python_code stringlengths 0 1.02M | repo_name stringlengths 9 48 | file_path stringlengths 5 114 |
|---|---|---|
# Owner(s): ["module: __torch_dispatch__"]
import tempfile
import torch
from copy import deepcopy
from torch.library import Library
from torch.cuda.jiterator import _create_jit_fn
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS
from torch.utils._mode_utils import no_dispatch, find_outermost_mode, all_same_mode, all_same_mode_scope
from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \
log_input, capture_logs, capture_logs_with_logging_tensor_mode
from torch.utils._pytree import tree_map, tree_map_only
from torch.utils._python_dispatch import enable_torch_dispatch_mode, TorchDispatchMode
import logging
class TestPythonRegistration(TestCase):
def test_override_aten_ops_with_multiple_libraries(self) -> None:
x = torch.tensor([1, 2])
my_lib1 = Library("aten", "IMPL")
my_lib2 = Library("aten", "IMPL")
# Example 1
def my_neg(*args, **kwargs):
return args[0]._neg_view()
# Now we are secretly making the operator a view op so autograd needs to know how
# to handle it
my_lib1.impl('neg', my_neg, "AutogradCPU")
self.assertTrue(torch.neg(x).is_neg())
# RuntimeError: impl("aten::neg", ...):
# Explicitly provided namespace (aten) in operator name does not match ...
with self.assertRaisesRegex(RuntimeError, "operator name does not match namespace"):
my_lib3 = Library("foo", "DEF")
my_lib3.define("neg(Tensor self) -> Tensor")
my_lib3.impl(torch.ops.aten.neg.default, my_neg, "AutogradCPU")
del my_lib3
# Example 2
def my_mul(*args, **kwargs):
return torch.zeros_like(args[0])
# torch.ops.aten.mul.Tensor
my_lib2.impl("aten::mul.Tensor", my_mul, "ZeroTensor")
y = torch._efficientzerotensor(2)
self.assertFalse(torch.mul(x, y)._is_zerotensor())
# Assert that a user can't override the behavior of a (ns, op, dispatch_key)
# combination if someone overrided the behavior for the same before them
with self.assertRaisesRegex(RuntimeError, 'already a kernel registered from python'):
my_lib2.impl(torch.ops.aten.mul.Tensor, my_mul, "ZeroTensor")
del my_lib1
# Validate that lib2 is not affected by removing lib1
self.assertFalse(torch.mul(x, y)._is_zerotensor())
del my_lib2
# Validate that the old behavior is restored for neg and mul
self.assertFalse(torch.neg(x).is_neg())
self.assertTrue(torch.mul(x, y)._is_zerotensor())
def test_error_if_fn_not_callable(self):
with self.assertRaisesRegex(TypeError, "Input function is required to be a callable"):
my_lib = Library("aten", "IMPL")
my_lib.impl(torch.ops.aten.neg.default, [], "AutogradCPU")
def test_override_cpu_sum(self) -> None:
# Example 1
run = [False]
def my_sum(*args, **kwargs):
run[0] = True
return args[0]
my_lib1 = Library("aten", "IMPL")
my_lib1.impl('aten::sum', my_sum, "CPU")
x = torch.tensor([1, 2])
self.assertEqual(torch.sum(x), x)
self.assertTrue(run[0])
del my_lib1
# Validate that the old behavior is restored for sum
self.assertEqual(torch.sum(x), torch.tensor(3))
def test_override_cuda_with_jiterator(self) -> None:
def override_where_cuda() -> None:
# Example 1: Invert the behavior of where's condition input
not_where_code_string = '''
template <typename T> T inverted_where(bool cond, T a, T b){
return !cond ? a : b;
}
'''
jitted_where = _create_jit_fn(not_where_code_string)
CALLED = [False]
def inverted_where(*args, **kwargs):
CALLED[0] = True
return jitted_where(*args, **kwargs)
# overriding where's cuda kernel with Jiterator generated kernel
my_lib = Library("aten", "IMPL")
my_lib.impl('aten::where.self', inverted_where, "CUDA")
device = 'cuda'
cond = torch.tensor([True, True, False], device=device, dtype=torch.bool)
x = torch.tensor([1, 2, 3], device=device)
y = torch.tensor([-1, -2, -3], device=device)
self.assertEqual(torch.where(cond, x, y), torch.tensor([-1, -2, 3]))
self.assertTrue(CALLED[0])
del my_lib
# behavior restored after deregistration
self.assertEqual(torch.where(cond, x, y), torch.tensor([1, 2, -3]))
def override_gelu_cuda() -> None:
# Example 2: Use relu to approximate gelu for faster compute
fastest_gelu_code_string = '''
template <typename T> T fast_gelu(T a){
return a > 0 ? a : 0;
}
'''
jitted_gelu = _create_jit_fn(fastest_gelu_code_string)
CALLED = [False]
def fast_gelu(*args, **kwargs):
CALLED[0] = True
return jitted_gelu(*args, **kwargs)
# overriding gelu's cuda kernel with Jiterator generated relu kernel
my_lib = Library("aten", "IMPL")
my_lib.impl('aten::gelu', fast_gelu, "CUDA")
x = torch.rand([3, 3], device='cuda', dtype=torch.float)
self.assertEqual(torch.nn.functional.gelu(x), torch.nn.functional.relu(x))
self.assertTrue(CALLED[0])
del my_lib
# behavior restored after deregistration
self.assertNotEqual(torch.nn.functional.gelu(x), torch.nn.functional.relu(x))
def override_exp_cuda() -> None:
# Example 3: Preventing exp from exploding for float16
clipped_exp_code_string = '''
template <typename T> T clipped_exp(T a){
return a > T(10.0) ? T(22026.4657948) : exp(a);
}
'''
jitted_exp = _create_jit_fn(clipped_exp_code_string)
CALLED = [False]
def clipped_exp(*args, **kwargs):
CALLED[0] = True
return jitted_exp(*args, **kwargs)
# overriding exp's cuda kernel with clipped_exp kernel
my_lib = Library("aten", "IMPL")
my_lib.impl('aten::exp', clipped_exp, "CUDA")
x = torch.tensor([0.0, 100.0], device='cuda', dtype=torch.float16)
self.assertEqual(torch.exp(x), torch.tensor([1.0, 22026.4657948], dtype=torch.float16))
self.assertTrue(CALLED[0])
del my_lib
# behavior restored after deregistration
self.assertEqual(torch.exp(x), torch.tensor([1.0, torch.inf], dtype=torch.float16))
def override_add_cuda() -> None:
# Example 4: simulate a hardware bug, where the adder is always off by 1
buggy_add_code_string = '''
template <typename T> T buggy_add(T a, T b){
return a + b + T(1);
}
'''
jitted_add = _create_jit_fn(buggy_add_code_string)
CALLED = [False]
def buggy_add(*args, **kwargs):
CALLED[0] = True
return jitted_add(*args, **kwargs)
my_lib = Library("aten", "IMPL")
my_lib.impl('aten::add.Tensor', buggy_add, "CUDA")
x_cpu = torch.rand([3, 3], device='cpu')
y_cpu = torch.rand([3], device='cpu')
x_cuda = x_cpu.cuda()
y_cuda = y_cpu.cuda()
self.assertEqual(x_cuda + y_cuda, x_cpu + y_cpu + 1)
self.assertTrue(CALLED[0])
del my_lib
# behavior restored after deregistration
self.assertEqual(x_cuda + y_cuda, x_cpu + y_cpu)
if torch.cuda.is_available() and not TEST_WITH_ROCM:
override_where_cuda()
override_gelu_cuda()
override_exp_cuda()
override_add_cuda()
def test_extend_library_with_dispatch_key_arg(self):
def my_sum(*args, **kwargs):
return args[0]
my_lib1 = Library("aten", "IMPL", dispatch_key="CPU")
# RuntimeError: Explicitly provided dispatch key (Conjugate) is
# inconsistent with the dispatch key of the enclosing TORCH_LIBRARY_IMPL block
with self.assertRaisesRegex(RuntimeError, "inconsistent with the dispatch key"):
my_lib1.impl('sum', my_sum, "Conjugate")
my_lib1.impl('aten::sum', my_sum)
x = torch.tensor([1, 2])
self.assertEqual(torch.sum(x), x)
del my_lib1
def test_create_new_library(self) -> None:
my_lib1 = Library("foo", "DEF")
my_lib1.define("sum(Tensor self) -> Tensor")
# Example 1
@torch.library.impl(my_lib1, "sum", "CPU")
def my_sum(*args, **kwargs):
return args[0]
x = torch.tensor([1, 2])
self.assertEqual(torch.ops.foo.sum(x), x)
my_lib2 = Library("foo", "IMPL")
# Example 2
@torch.library.impl(my_lib2, torch.ops.foo.sum.default, "ZeroTensor")
def my_sum_zt(*args, **kwargs):
if args[0]._is_zerotensor():
return torch._efficientzerotensor(args[0].shape)
else:
return args[0]
y = torch._efficientzerotensor(3)
self.assertTrue(torch.ops.foo.sum(y)._is_zerotensor())
self.assertEqual(torch.ops.foo.sum(x), x)
del my_lib2
del my_lib1
@unittest.skipIf(IS_WINDOWS, "Skipped under Windows")
def test_alias_analysis(self):
def test_helper(alias_analysis=""):
my_lib1 = Library("foo", "DEF")
called = [0]
@torch.library.define(my_lib1, "_op() -> None", alias_analysis=alias_analysis)
def _op(*args, **kwargs):
called[0] += 1
@torch.jit.script
def _test():
torch.ops.foo._op()
assert "foo::_op" in str(_test.graph)
with self.assertRaises(AssertionError):
test_helper("") # alias_analysis="FROM_SCHEMA"
test_helper("CONSERVATIVE")
def test_error_for_unsupported_ns_or_kind(self) -> None:
with self.assertRaisesRegex(ValueError, "Unsupported kind"):
my_lib1 = Library("myns", "BLA")
with self.assertRaisesRegex(ValueError, "reserved namespace"):
my_lib1 = Library("prim", "DEF")
class TestPythonDispatch(TestCase):
def test_basic(self) -> None:
with capture_logs() as logs:
x = LoggingTensor(torch.tensor([3.0]), requires_grad=True)
log_input("x", x)
y = x * x
saved_x = y.grad_fn._saved_self
grad_y = LoggingTensor(torch.tensor([1.0]))
log_input("grad_y", grad_y)
g, = torch.autograd.grad((y,), (x,), (grad_y,))
self.assertEqual(g.elem, torch.tensor([6.0]))
with torch.no_grad():
self.assertEqual(saved_x, x)
self.assertEqual(saved_x._version, x._version)
x.add_(2)
self.assertEqual(saved_x, x)
# TODO: figure out why broken
# self.assertEqual(saved_x._version, x._version)
self.assertExpectedInline('\n'.join(logs), '''\
$0 = input('x')
$1 = torch._ops.aten.mul.Tensor($0, $0)
$2 = input('grad_y')
True = torch._ops.aten.is_same_size.default($1, $2)
$3 = torch._ops.aten.mul.Tensor($2, $0)
$4 = torch._ops.aten.mul.Tensor($2, $0)
$5 = torch._ops.aten.add.Tensor($4, $3)''')
def test_out(self) -> None:
with capture_logs() as logs:
x = LoggingTensor(torch.ones(1))
y = LoggingTensor(torch.zeros(1))
log_input("x", x)
log_input("y", y)
torch.abs(x, out=y)
self.assertEqual(y.elem, torch.ones(1))
# TODO: arguably this shouldn't pass and we should complain
# that out isn't a kwarg
self.assertExpectedInline('\n'.join(logs), '''\
$0 = input('x')
$1 = input('y')
$2 = torch._ops.aten.abs.out($0, out=$1)''')
def test_kwarg_only(self) -> None:
with capture_logs() as logs:
x = LoggingTensor(torch.ones(1))
y = LoggingTensor(torch.ones(1, 1))
z = LoggingTensor(torch.ones(1))
log_input("x", x)
log_input("y", y)
log_input("z", z)
torch.addmv(x, y, z)
torch.addmv(x, y, z, beta=1)
torch.addmv(x, y, z, beta=2)
torch.addmv(x, y, z, alpha=2)
torch.addmv(x, y, z, beta=2, alpha=2)
# The expectation is that beta/alpha don't show up when they're
# defaulted. This is even if the user explicitly specified it.
self.assertExpectedInline('\n'.join(logs), '''\
$0 = input('x')
$1 = input('y')
$2 = input('z')
$3 = torch._ops.aten.addmv.default($0, $1, $2)
$4 = torch._ops.aten.addmv.default($0, $1, $2)
$5 = torch._ops.aten.addmv.default($0, $1, $2, beta=2)
$6 = torch._ops.aten.addmv.default($0, $1, $2, alpha=2)
$7 = torch._ops.aten.addmv.default($0, $1, $2, beta=2, alpha=2)''')
def test_kwarg_only_and_positional_default(self) -> None:
with capture_logs() as logs:
x = LoggingTensor(torch.ones(1))
log_input("x", x)
torch.ops.aten._foobar(x)
torch.ops.aten._foobar(x, False)
torch.ops.aten._foobar(x, arg3=False)
torch.ops.aten._foobar(x, False, arg3=False)
# What we are testing here is that we omit arg2
# if it is defaulted, even if a kwarg is set
self.assertExpectedInline('\n'.join(logs), '''\
$0 = input('x')
$1 = torch._ops.aten._foobar.default($0)
$2 = torch._ops.aten._foobar.default($0, False)
$3 = torch._ops.aten._foobar.default($0, arg3=False)
$4 = torch._ops.aten._foobar.default($0, False, arg3=False)''')
def test_produce_real_type(self) -> None:
with capture_logs() as logs:
x = LoggingTensor(torch.ones(2, 2))
log_input("x", x)
x.to(dtype=torch.double) # non-optional dtype
torch.cumprod(x, 0, dtype=torch.double) # optional dtype
x[:, 1].contiguous(memory_format=torch.contiguous_format) # optional memory format
# There doesn't appear to be any layout signatures which are
# triggerable using tensor subclasses (need to use a mode)
self.assertExpectedInline('\n'.join(logs), '''\
$0 = input('x')
$1 = torch._ops.aten._to_copy.default($0, dtype=torch.float64)
$2 = torch._ops.aten.cumprod.default($0, 0, dtype=torch.float64)
$3 = torch._ops.aten.slice.Tensor($0, 0, 0, 9223372036854775807)
$4 = torch._ops.aten.select.int($3, 1, 1)
$5 = torch._ops.aten.clone.default($4, memory_format=torch.contiguous_format)''')
def test_list_ret(self) -> None:
# test all sequence types are permissible returns
for list_type in (list, tuple):
class A(torch._C._TensorBase):
@staticmethod
def __new__(cls, elem):
return torch.Tensor._make_subclass(cls, elem, elem.requires_grad)
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
if func.overloadpacket == torch.ops.aten.split:
with no_dispatch():
return list_type(torch.split(*args))
else:
raise AssertionError(f"unrecognized func: {func}")
self.assertEqual(
torch.split(A(torch.tensor([0, 1])), 2),
torch.split(torch.tensor([0, 1]), 2)
)
def test_invalid_ret(self) -> None:
# test invalid return gets reasonable error message
class A(torch._C._TensorBase):
@staticmethod
def __new__(cls, elem):
return torch.Tensor._make_subclass(cls, elem, elem.requires_grad)
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
return "arf"
# Wobbles depending on NDEBUG mode of pybind11
self.assertRaisesRegex(
RuntimeError, "Unable to cast", lambda: A(torch.zeros(1)).neg(),
)
self.assertRaisesRegexp(
RuntimeError, "Unable to cast", lambda: A(torch.zeros(1)).detach(),
)
def test_detach_appears_twice_when_called_once(self) -> None:
with capture_logs() as logs:
x = LoggingTensor(torch.tensor([3.0]), requires_grad=True)
log_input("x", x)
x.detach()
# FIXME: We actually want this to emit a single detach. However,
# it currently emits two, for reasons unclear to us. Leaving
# this test here to make sure we don't regress even further (it
# would be bad if calling .detach() once emits 3+ detaches).
self.assertExpectedInline('\n'.join(logs), '''\
$0 = input('x')
$1 = torch._ops.aten.detach.default($0)
$2 = torch._ops.aten.detach.default($1)''')
def test_metadata_change_not_allowed(self) -> None:
x = LoggingTensor(torch.ones(1))
y = x.data
self.assertIsInstance(y, LoggingTensor)
self.assertRaises(RuntimeError, lambda: y.resize_(4))
def test_storage(self) -> None:
# For now, just make sure it doesn't crash. Ideally, we should
# return some virtual storage that is safe to work with
x = LoggingTensor(torch.ones(1))
self.assertRaises(RuntimeError, lambda: x.storage())
def test_make_wrapper_subclass_noalloc(self) -> None:
# This is ludicrously big (8TB) and this should pass because wrapper
# subclasses don't allocate
torch.Tensor._make_wrapper_subclass(LoggingTensor, (1000000000000,))
def test_version(self) -> None:
x = LoggingTensor(torch.ones(1))
prev_vc = x._version
x.detach().add_(2)
cur_vc = x._version
self.assertNotEqual(prev_vc, cur_vc)
x.data.add_(2)
self.assertEqual(cur_vc, x._version)
def test_subclass_priority(self) -> None:
class ErrorA(RuntimeError):
pass
class ErrorB(RuntimeError):
pass
# The big tests for code coverage are test_precedence_semantics in
# test_overrides.py; this is just to make sure it is wired up at all
# correctly for __torch_dispatch__
class A(torch.Tensor):
@staticmethod
def __new__(cls, elem):
return torch.Tensor._make_subclass(cls, elem, elem.requires_grad)
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
raise ErrorA
class B(A):
@staticmethod
def __new__(cls, elem):
return torch.Tensor._make_subclass(cls, elem, elem.requires_grad)
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
raise ErrorB
self.assertRaises(ErrorA, lambda: torch.add(A(torch.empty(1)), A(torch.empty(1))))
self.assertRaises(ErrorB, lambda: torch.add(A(torch.empty(1)), B(torch.empty(1))))
self.assertRaises(ErrorB, lambda: torch.add(B(torch.empty(1)), A(torch.empty(1))))
self.assertRaises(ErrorB, lambda: torch.add(B(torch.empty(1)), B(torch.empty(1))))
def test_format(self) -> None:
x = LoggingTensor(torch.ones(1))
s1 = str(x)
s2 = repr(x)
s3 = f"{x}"
self.assertExpectedInline(s1, """LoggingTensor(tensor([1.]))""")
self.assertEqual(s1, s2)
self.assertEqual(s1, s3)
def test_custom_autograd(self) -> None:
escape = [None]
class Square(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = x ** 2
ctx.save_for_backward(x)
return y
@staticmethod
def backward(ctx, grad_output):
assert isinstance(grad_output, LoggingTensor)
x, = ctx.saved_tensors
assert isinstance(x, LoggingTensor)
escape[0] = x
return grad_output * 2 * x
with capture_logs() as logs:
x = LoggingTensor(torch.ones(1), requires_grad=True)
log_input("x", x)
x.grad = LoggingTensor(torch.zeros(1))
log_input("x.grad", x.grad)
y = Square.apply(x)
grad_output = LoggingTensor(torch.ones(1))
log_input("grad_output", grad_output)
y.backward(grad_output)
with torch.no_grad():
self.assertEqual(escape[0], x)
self.assertEqual(escape[0]._version, x._version)
# TODO: figure out why x.requires_grad = False doesn't
# trigger an error for LoggingTensor
x.add_(2)
self.assertEqual(escape[0], x)
# TODO: figure out why this is broken
# self.assertEqual(escape[0]._version, x._version)
self.assertExpectedInline('\n'.join(logs), '''\
$0 = input('x')
$1 = input('x.grad')
$2 = torch._ops.aten.pow.Tensor_Scalar($0, 2)
$3 = input('grad_output')
True = torch._ops.aten.is_same_size.default($2, $3)
$4 = torch._ops.aten.mul.Tensor($3, 2)
$5 = torch._ops.aten.mul.Tensor($4, $0)
$6 = torch._ops.aten.add_.Tensor($1, $5)''')
def test_subclass_creation(self):
# Make sure these statements runs without error
# In particular checking that when internal detach returns
# subclasses, these are cleanly overwritten.
class Foo(torch.Tensor):
pass
err_msg = "subclass Foo but.*already associated to a python object of type LoggingTensor"
with self.assertRaisesRegex(RuntimeError, err_msg):
a = torch.Tensor._make_subclass(Foo, LoggingTensor(torch.rand(2)))
with self.assertRaisesRegex(RuntimeError, err_msg):
b = LoggingTensor(torch.rand(2)).as_subclass(Foo)
with self.assertRaisesRegex(RuntimeError, err_msg):
Foo(LoggingTensor(torch.rand(2)))
with self.assertRaisesRegex(TypeError, "Foo must define __torch_dispatch__"):
torch.Tensor._make_wrapper_subclass(Foo, (2, 2))
def test_new_ones(self) -> None:
class MyTensor(torch.Tensor):
__torch_function__ = torch._C._disabled_torch_function_impl
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
return MyTensor(3)
self.assertEqual(type(MyTensor(2).new_ones(3)), MyTensor)
def test_like(self) -> None:
class MyTensor(torch.Tensor):
__torch_function__ = torch._C._disabled_torch_function_impl
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
return MyTensor(3)
for f in ["empty", "ones", "rand", "randn", "zeros"]:
f_name = f + "_like"
self.assertEqual(type(getattr(torch, f_name)(MyTensor(2))), MyTensor)
self.assertEqual(type(torch.full_like(MyTensor(2), 1.)), MyTensor)
self.assertEqual(type(torch.randint_like(MyTensor(2), high=3)), MyTensor)
def test_make_wrapper_subclass_propagates_metadata(self) -> None:
class WrapperTensor(torch.Tensor):
elem: torch.Tensor
__slots__ = ['elem']
@staticmethod
def __new__(cls, elem, *args, **kwargs):
r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
cls, elem.size(),
dtype=elem.dtype, layout=elem.layout,
device=elem.device, requires_grad=elem.requires_grad,
strides=elem.stride(), storage_offset=elem.storage_offset())
r.elem = elem
return r
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
raise RuntimeError("NYI")
# non-contiguous strides, non-zero storage offset
x = torch.randn(4, 6).t().diagonal(offset=2)
y = WrapperTensor(x)
self.assertEqual(y.size(), x.size())
self.assertEqual(y.stride(), x.stride())
self.assertEqual(y.storage_offset(), x.storage_offset())
def test_wrapper_subclass_serializes(self) -> None:
with tempfile.TemporaryFile() as f:
x = LoggingTensor(torch.randn(3))
torch.save(x, f)
f.seek(0)
x_loaded = torch.load(f)
self.assertTrue(type(x_loaded) is type(x))
self.assertEqual(x.elem, x_loaded.elem)
self.assertFalse(x is x_loaded)
def test_deepcopy_wrapper_subclass(self) -> None:
x = LoggingTensor(torch.randn(3))
x_copy = deepcopy(x)
self.assertTrue(type(x_copy) is type(x))
self.assertEqual(x.elem, x_copy.elem)
self.assertFalse(x is x_copy)
def test_deepcopy_wrapper_subclass_with_clone_returning_different_type(self) -> None:
class MyWrapperTensor(torch.Tensor):
elem: torch.Tensor
__slots__ = ['elem']
@staticmethod
def __new__(cls, elem, *args, **kwargs):
r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
cls, elem.size(),
dtype=elem.dtype, layout=elem.layout,
device=elem.device, requires_grad=elem.requires_grad,
strides=elem.stride(), storage_offset=elem.storage_offset())
r.elem = elem
return r
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
if func.overloadpacket.__name__ == "clone":
# Return a plain tensor from clone().
return args[0].elem.clone()
raise RuntimeError("NYI")
# NB: The default Tensor.__torch_function__ implementation called for deepcopy
# disables __torch_function__ by the time we get to clone(), so there is no need to
# explicitly disable __torch_function__ for this subclass.
x = MyWrapperTensor(torch.randn(3))
with self.assertRaisesRegex(RuntimeError,
"for which cloning returns another instance of the same subclass"):
x_copy = deepcopy(x)
def test_deepcopy_non_wrapper_subclass(self) -> None:
# Ensure correct error is thrown for common error cases.
class SubTensorError1(torch.Tensor):
# Default implementation of new_empty() returns a plain tensor.
pass
class SubTensorError2(torch.Tensor):
# new_empty() incorrectly returns a different type (i.e. a plain tensor).
def new_empty(self, shape):
return torch.Tensor(shape)
for error_cls in [SubTensorError1, SubTensorError2]:
x = error_cls(3)
with self.assertRaisesRegex(RuntimeError,
"for which that function returns another instance of the same subclass"):
x_copy = deepcopy(x)
# Ensure a correctly implemented new_empty() causes deepcopy() to work.
class SubTensorSuccess(torch.Tensor):
def new_empty(self, shape):
return type(self)(shape)
x = SubTensorSuccess(3)
x_copy = deepcopy(x)
self.assertIs(type(x_copy), type(x))
def test_index_put_where_only_index_is_subclass(self) -> None:
called_funcs = []
class MyTensor(torch.Tensor):
__torch_function__ = torch._C._disabled_torch_function_impl
elem: torch.Tensor
__slots__ = ['elem']
@staticmethod
def __new__(cls, elem, *args, **kwargs):
r = torch.Tensor._make_wrapper_subclass(
cls, elem.size(),
dtype=elem.dtype, layout=elem.layout,
device=elem.device, requires_grad=elem.requires_grad
)
r.elem = elem
return r
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
called_funcs.append(func)
return MyTensor(torch.tensor(3))
x = torch.randn(3, 3)
idxs = (MyTensor(torch.tensor(0)),)
v = torch.randn(1)
res = x.index_put_(idxs, v)
self.assertEqual(called_funcs, [torch.ops.aten.index_put_.default])
def test_enable_torch_dispatch_mode_error(self) -> None:
z = LoggingTensor(torch.empty([]))
with self.assertRaisesRegex(ValueError, "expected to get TorchDispatchMode, Tensor-like class, or None"):
with enable_torch_dispatch_mode(z):
pass
def test_enable_torch_dispatch_mode_basic(self) -> None:
with capture_logs(is_mode=True) as logs:
with enable_torch_dispatch_mode(LoggingTensorMode()):
torch.empty([])
self.assertExpectedInline('\n'.join(logs), """\
$0 = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), pin_memory=False)""")
def test_enable_torch_dispatch_mode_unrelated_tensors(self) -> None:
x = torch.randn([])
y = torch.randn([])
with capture_logs(is_mode=True) as logs:
with enable_torch_dispatch_mode(LoggingTensorMode()):
x + y
self.assertExpectedInline('\n'.join(logs), """\
$2 = torch._ops.aten.add.Tensor($0, $1)""")
def test_nested_push_regular(self):
with LoggingTensorMode.push() as mode:
# This previously errored
with LoggingTensorMode():
pass
def test_nested_push_logging_tensor_mode(self):
x = torch.randn([])
y = torch.randn([])
with capture_logs(is_mode=True) as logs:
with LoggingTensorMode():
with LoggingTensorMode():
torch.empty([])
x + y
self.assertExpectedInline('\n'.join(logs), """\
$0 = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), pin_memory=False)
$0 = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), pin_memory=False)
$3 = torch._ops.aten.add.Tensor($1, $2)
$3 = torch._ops.aten.add.Tensor($1, $2)""")
def test_capture_logs_with_torch_dispatch_mode(self):
x = torch.randn([])
y = torch.randn([])
with capture_logs_with_logging_tensor_mode() as logs:
torch.empty([])
x + y
self.assertExpectedInline('\n'.join(logs), """\
$0 = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), pin_memory=False)
$3 = torch._ops.aten.add.Tensor($1, $2)""")
x = torch.randn([])
y = torch.randn([])
with capture_logs_with_logging_tensor_mode() as logs1:
with capture_logs_with_logging_tensor_mode() as logs2:
torch.empty([])
x + y
self.assertExpectedInline('\n'.join(logs2), """\
$0 = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), pin_memory=False)
$0 = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), pin_memory=False)
$3 = torch._ops.aten.add.Tensor($1, $2)
$3 = torch._ops.aten.add.Tensor($1, $2)""")
self.assertEqual(logs1, logs2)
def test_enable_torch_dispatch_mode_subclass_priority(self) -> None:
class ErrorA(RuntimeError):
pass
class ErrorB(RuntimeError):
pass
class A(torch.Tensor):
@staticmethod
def __new__(cls, elem):
return torch.Tensor._make_subclass(cls, elem, elem.requires_grad)
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
raise ErrorA
class B(A):
@staticmethod
def __new__(cls, elem):
return torch.Tensor._make_subclass(cls, elem, elem.requires_grad)
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
raise ErrorB
a = A(torch.empty(1))
b = B(torch.empty(1))
with self.assertRaises(ErrorA):
a + a
with self.assertRaises(ErrorB):
a + b
# B has precedence over A due to the subclass relationship yet
# modes take precedence over arguments
with self.assertRaises(ErrorA):
with enable_torch_dispatch_mode(A):
b + b
with self.assertRaises(ErrorB):
with enable_torch_dispatch_mode(B):
a + a
with self.assertRaises(ErrorB):
with enable_torch_dispatch_mode(B):
a + b
def test_enable_torch_dispatch_mode_respects_no_dispatch(self) -> None:
with capture_logs(is_mode=True) as logs1:
with enable_torch_dispatch_mode(LoggingTensorMode()):
torch.ones([2, 3])
with no_dispatch():
torch.ones([2, 3])
with capture_logs(is_mode=True) as logs2:
with enable_torch_dispatch_mode(LoggingTensorMode()):
torch.ones([2, 3])
self.assertEqual(logs1, logs2)
def test_enable_torch_dispatch_mode_instance(self) -> None:
class TestMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
return func(*args, **kwargs)
x = TestMode()
y = torch.tensor([2.])
with enable_torch_dispatch_mode(x):
y + y
def test_shallow_copy_and_detach(self) -> None:
seen = set()
test_case = self
class TestMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
tree_map_only(torch.Tensor, lambda t: test_case.assertIn(t, seen), (args, kwargs))
if kwargs is None:
kwargs = {}
r = func(*args, **kwargs)
tree_map_only(torch.Tensor, lambda t: seen.add(t), r)
return r
with TestMode():
x = torch.randn(3, requires_grad=True)
loss = (x * x).sum()
loss.backward()
def test_nested_enable_torch_dispatch_mode(self) -> None:
class A(LoggingTensorMode):
pass
with self.assertRaisesRegex(ValueError, "there is already an active mode"):
with enable_torch_dispatch_mode(LoggingTensorMode()):
with enable_torch_dispatch_mode(A()):
pass
# For nesting to be a noop, they need to be the same instance
with self.assertRaisesRegex(ValueError, "there is already an active mode"):
with enable_torch_dispatch_mode(LoggingTensorMode()):
with enable_torch_dispatch_mode(LoggingTensorMode()):
pass
def test_nesting_with_same_enable_torch_dispatch_mode(self) -> None:
# "nested" enable_torch_dispatch_modes are allowed if they're the same mode (same instance).
# It's the equivalent of a noop, so it will only write once to the log
x = torch.tensor([3.])
mode = LoggingTensorMode()
with capture_logs(is_mode=True) as logs:
log_input("x", x)
with enable_torch_dispatch_mode(mode):
with enable_torch_dispatch_mode(mode):
x + x
self.assertExpectedInline('\n'.join(logs), '''\
$0 = input('x')
$1 = torch._ops.aten.add.Tensor($0, $0)''')
def test_enable_torch_dispatch_mode_ignore_preexisting(self):
class A(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
raise AssertionError
x = torch.tensor([3.])
with capture_logs(is_mode=True) as logs:
with enable_torch_dispatch_mode(A()):
with enable_torch_dispatch_mode(LoggingTensorMode(), ignore_preexisting=True):
x + x
self.assertExpectedInline('\n'.join(logs), """\
$1 = torch._ops.aten.add.Tensor($0, $0)""")
def test_enable_torch_dispatch_mode_replace(self):
class A(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
raise AssertionError
x = torch.tensor([3.])
outer_mode = A()
with capture_logs(is_mode=True) as logs:
with enable_torch_dispatch_mode(outer_mode):
with enable_torch_dispatch_mode(LoggingTensorMode(), replace=outer_mode):
x + x
self.assertExpectedInline('\n'.join(logs), """\
$1 = torch._ops.aten.add.Tensor($0, $0)""")
def test_exception_handling(self):
class A(torch.Tensor):
@staticmethod
def __new__(cls, elem):
return torch.Tensor._make_subclass(cls, elem, elem.requires_grad)
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
if func.__name__ == 'randn.default':
raise RuntimeError()
return cls(torch.zeros(()))
with enable_torch_dispatch_mode(A):
try:
torch.randn(())
except RuntimeError:
pass
self.assertTrue(isinstance(torch.zeros(()), A))
def test_ctor_no_inner(self):
class A(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
return torch.zeros([])
with enable_torch_dispatch_mode(A()):
x = torch.randn((3, 4))
self.assertEqual(x, torch.zeros([]))
def test_with_mode(self):
class ErrorA(RuntimeError):
pass
class A(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
raise ErrorA()
with self.assertRaises(ErrorA):
with A():
torch.empty([])
def test_with_mode_created_separately(self):
class ErrorA(RuntimeError):
pass
class A(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
raise ErrorA()
x = A()
with self.assertRaises(ErrorA):
with x:
torch.empty([])
def test_with_nested_modes(self):
class ErrorA(RuntimeError):
def __init__(self, msg):
return super().__init__(msg)
class A(TorchDispatchMode):
def __init__(self, msg):
self.msg = msg
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
raise ErrorA(self.msg)
with self.assertRaisesRegex(ErrorA, "layer2"):
with A("layer1"):
with A("layer2"):
torch.empty([])
def test_make_subclass_with_modes(self):
class ModeTensor(torch.Tensor):
def __new__(cls, elem, mode):
r = torch.Tensor._make_subclass(cls, elem, elem.requires_grad)
r.elem = elem
r.mode = mode
return r
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
modes = (arg.mode for arg in args + tuple(kwargs.values()) if isinstance(arg, ModeTensor))
outermost = find_outermost_mode(modes)
with outermost.restore():
return func(*args, **kwargs)
class Mode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
def unwrap(e):
if isinstance(e, ModeTensor):
return e.elem
else:
return e
def wrap(t):
if isinstance(t, torch.Tensor):
return ModeTensor(t, self)
else:
return t
return wrap(func(*tuple(unwrap(a) for a in args), **kwargs))
class BasicMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
return func(*args, **kwargs)
x = torch.tensor(4.)
with Mode():
y = x + x
z = y + y
self.assertIsInstance(y, ModeTensor)
self.assertIsInstance(z, ModeTensor)
with Mode():
with BasicMode(): # we can't nest two modes that call make_subclass because it only accepts vanilla tensors
y = x + x
z = y + y
self.assertIsInstance(y, ModeTensor)
self.assertIsInstance(z, ModeTensor)
assert self.assertRaisesRegex(RuntimeError, "subclass Mode but.* associated to a python object of type Mode")
def test_notimplemented_mode(self):
sub_count = 0
class PoliteMode(TorchDispatchMode):
def __init__(self):
self.pre_count = 0
self.post_count = 0
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
self.pre_count += 1
if any(t is not torch.Tensor for t in types):
return NotImplemented
self.post_count += 1
return func(*args, **kwargs)
class SubTensor(torch.Tensor):
def __new__(cls, elem):
r = torch.Tensor._make_wrapper_subclass(cls, elem.shape)
r.elem = elem
return r
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
nonlocal sub_count
sub_count += 1
def unwrap(t):
if isinstance(t, SubTensor):
return t.elem
else:
return t
return func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs))
__torch_function__ = torch._C._disabled_torch_function_impl
a = SubTensor(torch.randn(2))
with PoliteMode() as mode:
a.abs()
self.assertEqual(mode.pre_count, 2)
self.assertEqual(mode.post_count, 1)
self.assertEqual(sub_count, 1)
# make sure this doesn't error
with PoliteMode():
with PoliteMode():
a.abs()
def test_disable_mode(self):
class FailEverythingMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
raise RuntimeError("arf")
with FailEverythingMode() as m:
self.assertRaises(RuntimeError, lambda: torch.ones([2, 3]))
with enable_torch_dispatch_mode(None, replace=m):
torch.ones([2, 3])
def test_make_wrapper_subclass_with_modes(self):
class ModeTensor(torch.Tensor):
def __new__(cls, elem, mode):
r = torch.Tensor._make_wrapper_subclass(cls, elem.shape)
r.elem = elem
r.mode = mode
return r
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
modes = (arg.mode for arg in args + tuple(kwargs.values()) if isinstance(arg, ModeTensor))
outermost = find_outermost_mode(modes)
with outermost.restore():
return func(*args, **kwargs)
class Mode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
def unwrap(e):
if isinstance(e, ModeTensor):
return e.elem
else:
return e
def wrap(t):
if isinstance(t, torch.Tensor):
return ModeTensor(t, self)
else:
return t
return wrap(func(*tuple(unwrap(a) for a in args), **kwargs))
x = torch.tensor(4.)
with Mode():
y = x + x
z = y + y
self.assertIsInstance(y, ModeTensor)
self.assertIsInstance(z, ModeTensor)
with Mode():
with Mode():
y = x + x
z = y + y
self.assertIsInstance(y, ModeTensor)
self.assertIsInstance(z, ModeTensor)
def test_error_using_same_mode(self):
class A(TorchDispatchMode):
pass
x = A()
with x:
with self.assertRaisesRegex(RuntimeError, "has already been used as a mode. Please use a fresh version"):
with x:
pass
def test_error_using_class_method_on_mode(self):
class A(TorchDispatchMode):
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
return func(args, kwargs)
x = torch.tensor(5.)
with self.assertRaisesRegex(RuntimeError, "should be a normal method not a class method"):
with A():
x + x
def test_error_with_ancestor(self):
x = LoggingTensorMode()
with x:
pass
with self.assertRaisesRegex(RuntimeError, "has already been used as a mode. Please use a fresh version"):
with x:
pass
def test_restore_errors(self):
with self.assertRaisesRegex(RuntimeError, "does not have any ancestors. Use the standard version instead"):
with LoggingTensorMode().restore():
pass
x = LoggingTensorMode()
with LoggingTensorMode():
with x:
pass
with LoggingTensorMode(): # a different mode instance than the one above
with self.assertRaisesRegex(RuntimeError, "the current mode is not its ancestor"):
with x.restore():
pass
def test_restore_ancestor_mode(self):
x = LoggingTensorMode()
y = LoggingTensorMode()
with x:
with y:
pass
z = LoggingTensorMode()
with y.restore():
with z:
pass
with x.restore():
with z.restore():
pass
def test_find_outermost_mode(self):
self.assertIsNone(find_outermost_mode([None, None]))
x = LoggingTensorMode()
y = LoggingTensorMode()
with x:
with y:
pass
self.assertEqual(find_outermost_mode([x, y]), y)
z = LoggingTensorMode()
with y.restore():
with z:
pass
self.assertEqual(find_outermost_mode([z, x]), z)
i = LoggingTensorMode()
with self.assertRaisesRegex(RuntimeError, "doesn't have ancestors set so the ordering with other modes"):
find_outermost_mode([i, x, y, z])
k = LoggingTensorMode()
with k:
pass
with self.assertRaisesRegex(RuntimeError, "don't come from the same scope"):
find_outermost_mode([k, x, y, z])
def test_all_same_mode(self):
x = LoggingTensorMode()
y = LoggingTensorMode()
self.assertTrue(all_same_mode([x, x, x]))
self.assertFalse(all_same_mode([x, None]))
self.assertFalse(all_same_mode([x, y]))
def test_all_same_mode_scope(self):
x = LoggingTensorMode()
y = LoggingTensorMode()
z = LoggingTensorMode()
with x:
with y:
pass
with x.restore():
with z:
pass
i = LoggingTensorMode()
self.assertTrue(all_same_mode_scope([x, y], y))
self.assertTrue(all_same_mode_scope([x, z], z))
self.assertFalse(all_same_mode_scope([x, y, z], y))
self.assertFalse(all_same_mode_scope([x, y, z], z))
self.assertFalse(all_same_mode_scope([x, y, i], y))
no_ancestor = LoggingTensorMode()
self.assertFalse(all_same_mode_scope([x, y, z], no_ancestor))
def test_tolist_numpy_with_torch_dispatch_mode(self) -> None:
x = LoggingTensor(torch.tensor([2.0, 3.0]))
with self.assertRaisesRegex(RuntimeError, "is not supported for tensor subclasses."):
x.tolist()
with self.assertRaisesRegex(RuntimeError, "is not supported for tensor subclasses."):
x.numpy()
with self.assertRaises(AssertionError):
self.assertEqual(x, None)
def test_enable_torch_dispatch_mode_subclass_autograd_device_check(self) -> None:
class NonWrapperSubclass(torch.Tensor):
elem: torch.Tensor
__slots__ = ['elem']
@staticmethod
def __new__(cls, elem, *args, **kwargs):
# Wrong device here!
r = torch.Tensor._make_subclass(cls, elem.to("meta"), elem.requires_grad)
# ...the real tensor is held as an element on the tensor.
r.elem = elem
return r
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
def unwrap(e):
return e.elem if isinstance(e, NonWrapperSubclass) else e
def wrap(e):
return NonWrapperSubclass(e) if isinstance(e, torch.Tensor) else e
rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs)))
logging.getLogger("NonWrapperSubclass").info(f"{func.__module__}.{func.__name__}", args, kwargs, rs)
return rs
x = NonWrapperSubclass(torch.tensor([3.0, 4.0], requires_grad=True))
y = torch.randn(2, requires_grad=True)
z = x * y
self.assertIsInstance(z, NonWrapperSubclass)
z.sum().backward(torch.tensor(1))
self.assertEqual(x.grad, y)
self.assertEqual(y.grad, x)
def test_none_wrapping(self):
# A Tensor subclass that returns None when doing add
# See LoggingTensor above for more details on the subclass
class SubclassWithNone(torch.Tensor):
@staticmethod
def __new__(cls, elem, *args, **kwargs):
r = torch.Tensor._make_wrapper_subclass(
cls, elem.size(),
dtype=elem.dtype, layout=elem.layout,
device=elem.device, requires_grad=elem.requires_grad
)
r.elem = elem
return r
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
def unwrap(e):
return e.elem if isinstance(e, SubclassWithNone) else e
def wrap(e):
return SubclassWithNone(e) if isinstance(e, torch.Tensor) else e
rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs)))
if func.overloadpacket.__name__ == "add":
return None
else:
return rs
x = SubclassWithNone(torch.rand(2))
# Make sure both run without error
self.assertIsInstance(x * 2, SubclassWithNone)
self.assertIsNone(x + 2)
x.requires_grad_()
out = x.acos().sum()
# The backward of acos does add then rsqrt so here we make sure that the
# undefined Tensor generated by the user code is nicely handled.
# If acos formula changes in the future, this can be replaced by any other
# function that does add then something in the backward in a composite way
with self.assertRaisesRegex(RuntimeError, "but got None"):
out.backward()
def test_storage_can_be_converted_to_python_object(self):
s = torch.Storage()
z = LoggingTensor(torch.empty([]))
z.set_(s)
def test_autograd_in_attr(self):
# We want the wrapped Tensor to require gradients!
true_t = torch.rand(2, requires_grad=True)
t = LoggingTensorReentrant(true_t)
out = t + 2
self.assertFalse(out.requires_grad)
self.assertIsNone(out.grad_fn)
self.assertTrue(out.elem.requires_grad)
self.assertIsNotNone(out.elem.grad_fn)
with self.assertRaisesRegex(RuntimeError, "does not require grad"):
out.sum().backward()
out.elem.sum().backward()
self.assertIsNone(t.grad)
self.assertIsNotNone(t.elem.grad)
def test_dispatch_super_call(self):
called = []
class SubTensor(torch.Tensor):
@staticmethod
def __new__(cls, elem):
return torch.Tensor._make_subclass(cls, elem)
__torch_function__ = torch._C._disabled_torch_function_impl
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
called.append(func)
return super().__torch_dispatch__(func, types, args, kwargs)
x = torch.randn(2)
y = torch.randn(2)
self.assertEqual(SubTensor(x) + SubTensor(y), x + y)
self.assertEqual(called, [torch.ops.aten.add.Tensor])
def test_dispatch_super_call_list_arg(self):
called = []
class SubTensorWithListArg(torch.Tensor):
@staticmethod
def __new__(cls, elem):
return torch.Tensor._make_subclass(cls, elem)
__torch_function__ = torch._C._disabled_torch_function_impl
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
called.append(func)
return super().__torch_dispatch__(func, types, list(args), kwargs)
x = torch.randn(2)
self.assertEqual(SubTensorWithListArg(x).neg(), x.neg())
self.assertEqual(called, [torch.ops.aten.neg.default])
def test_dispatch_super_dont_autograd(self):
called = []
class SubTensor(torch.Tensor):
@staticmethod
def __new__(cls, elem):
return torch.Tensor._make_subclass(cls, elem, elem.requires_grad)
__torch_function__ = torch._C._disabled_torch_function_impl
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
called.append(func)
# This argument still requires grad because it was passed
# through directly...
self.assertTrue(args[0].requires_grad)
r = super().__torch_dispatch__(func, types, args, kwargs)
# But the output better not require grad, because that means
# you did autograd again in torch dispatch (oops)
self.assertFalse(r.requires_grad)
return r
x = SubTensor(torch.randn(2, requires_grad=True))
x.neg()
self.assertEqual(called, [torch.ops.aten.neg.default])
def test_set_data(self):
called = 0
class SubTensor(torch.Tensor):
__torch_function__ = torch._C._disabled_torch_function_impl
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
nonlocal called
called += 1
return super().__torch_dispatch__(func, types, args, kwargs)
x = SubTensor(torch.empty(2))
x.data
self.assertEqual(called, 1)
x.data = torch.empty(2)
self.assertEqual(called, 1)
x.data
self.assertEqual(called, 2)
self.assertIs(type(x), SubTensor)
x.set_(torch.empty(2))
self.assertEqual(called, 3)
x.data
self.assertEqual(called, 4)
self.assertIs(type(x), SubTensor)
def test_construct_int_tensor(self):
class SubTensor(torch.Tensor):
pass
# should not fail
SubTensor(torch.zeros(2, dtype=torch.int))
def test_multiple_ops_subclass(self):
# This is a Direct Subclass, don't do that!
class MySubclass(torch.Tensor):
@staticmethod
def __new__(cls, elem):
r = torch.Tensor._make_subclass(cls, elem)
return r
__torch_function__ = torch._C._disabled_torch_function_impl
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
with no_dispatch():
return func(*args, **kwargs)
x = MySubclass(torch.rand(2, 2, dtype=torch.complex64))
y = x.conj()
# Details of the bug that this tests for:
# Here, y dispatch keys are: {PythonTLSSnapshot, AutogradCPU, Conjugate, Python, CPU}
# There are a few calls to the dispatcher that are going to happen here:
# - call_exp: User calling exp on y
# - PythonTLSSnapshot: records the TLS on entry and redispatch
# - AutogradCPU: no input requires grad, so does nothing and redispatch
# - Conjugate: no special implementation for exp: use the fallback that
# first clone the Tensor (to materialize the conj) then redispatch
# - call_clone: conjugate fallback calling clone on y
# - PythonTLSSnapshot: records the TLS on entry and redispatch
# - (AutogradCPU: skipped as autograd added itself to the exclude set above)
# - Conjugate: special implementation for clone: just skip this key
# - Python: Reset the TLS based on the snapshot above and call the user implementation (this
# actually calls into the dispatcher again but since we disable both our keys
# before, not detailed here)
# - exit Python: restore the TLS and exit
# - exit Conjugate: nothing was inplace so just exit
# - exit PythonTLSSnapshot: done with this call, reset the saved TLS to empty
# - Python: Reset the TLS again based on the snapshot. <- this used to fail
# - More steps....
y.exp()
@staticmethod
def subclass_helper(cls, data, use_wrapper_subclass, **kwargs):
if use_wrapper_subclass:
kwargs["device"] = data.device
kwargs["dtype"] = data.dtype
kwargs["layout"] = data.layout
kwargs["requires_grad"] = True
return torch.Tensor._make_wrapper_subclass(cls, data.size(), **kwargs) # type: ignore[attr-defined]
else:
return torch.Tensor._make_subclass(cls, data, True, **kwargs)
def test_is_contiguous_slow_path(self):
data = torch.randn(3, 3)
contiguous_data = data.clone()
not_contiguous_data = torch.as_strided(data.clone(), (2, 2), (1, 2))
for use_wrapper_subclass in [True, False]:
class ExampleTensor1(torch.Tensor):
@staticmethod
def __new__(cls, data, wrapper):
return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="strides")
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
return NotImplemented
class ExampleTensor2(torch.Tensor):
@staticmethod
def __new__(cls, data, wrapper):
return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="strides")
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
if func.overloadpacket == torch.ops.aten.is_contiguous:
return contiguous_data.is_contiguous()
return NotImplemented
class ExampleTensor3(torch.Tensor):
@staticmethod
def __new__(cls, data, wrapper):
return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="strides")
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
if func.overloadpacket == torch.ops.aten.is_contiguous:
return not_contiguous_data.is_contiguous()
return NotImplemented
err_msg = "no implementation found for 'torch.ops.aten.is_contiguous'"
e = ExampleTensor1(torch.randn(3, 3), use_wrapper_subclass)
with self.assertRaisesRegex(TypeError, err_msg):
e.is_contiguous()
with self.assertRaisesRegex(TypeError, err_msg):
e.contiguous()
e = ExampleTensor2(torch.randn(3, 3), use_wrapper_subclass)
self.assertEqual(e.is_contiguous(), True)
e.contiguous() # this will just return the original TensorImpl since is_contiguous = True
err_msg = "no implementation found for"
e = ExampleTensor3(torch.randn(3, 3), use_wrapper_subclass)
self.assertEqual(e.is_contiguous(), False)
with self.assertRaisesRegex(TypeError, err_msg):
e.contiguous()
def test_device_slowpath(self):
for use_wrapper_subclass in [True]:
class ExampleTensor1(torch.Tensor):
@staticmethod
def __new__(cls, data, wrapper):
return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_device=True)
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
return NotImplemented
class ExampleTensor2(torch.Tensor):
@staticmethod
def __new__(cls, data, wrapper):
return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_device=True)
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
if func.overloadpacket == torch.ops.prim.device:
return torch.device('meta')
return NotImplemented
class ExampleTensor3(torch.Tensor):
@staticmethod
def __new__(cls, data, wrapper):
return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_device=True)
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
if func.overloadpacket == torch.ops.prim.device:
return torch.device('meta')
return NotImplemented
err_msg = "no implementation found for 'torch.ops.prim.device'"
with self.assertRaisesRegex(TypeError, err_msg):
e = ExampleTensor1(torch.randn(3, 3), use_wrapper_subclass)
e.device()
ten = torch.rand([1])
e = ExampleTensor2(torch.randn(3, 3, device='cpu'), use_wrapper_subclass)
self.assertEqual(e.device.type, 'meta')
self.assertEqual(ten.type_as(e).device.type, 'meta')
e = ExampleTensor3(torch.randn(3, 3, device='cpu'), use_wrapper_subclass)
self.assertEqual(e.device.type, 'meta')
self.assertEqual(ten.type_as(e).device.type, 'meta')
def test_dim_slowpath(self):
data = torch.randn(3, 3)
for use_wrapper_subclass in [True, False]:
class DimNotImplementedTensor(torch.Tensor):
@staticmethod
def __new__(cls, data, wrapper):
return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="sizes")
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
return NotImplemented
class DimImplementedTensor(torch.Tensor):
@staticmethod
def __new__(cls, data, wrapper):
return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="sizes")
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
if func.overloadpacket == torch.ops.aten.dim:
return data.dim()
return NotImplemented
err_msg = "no implementation found for 'torch.ops.aten.dim'"
e = DimNotImplementedTensor(torch.randn(3, 3), use_wrapper_subclass)
with self.assertRaisesRegex(TypeError, err_msg):
e.dim()
t = DimImplementedTensor(torch.randn(3, 3), use_wrapper_subclass)
self.assertEqual(t.dim(), 2)
def test_maybe_tuple_bug(self):
class T(torch.Tensor):
@classmethod
def __torch_function__(cls, *args, **kwargs):
pass
a = torch.rand(3)
a[[T(), T()]]
def test_standard_is_not_subclass(self):
# https://github.com/pytorch/pytorch/issues/79079
self.assertFalse(torch._C._dispatch_isTensorSubclassLike(torch.empty(0)))
def test_strides_slow_path(self):
for use_wrapper_subclass in [True, False]:
class StridesNotImplemented(torch.Tensor):
@staticmethod
def __new__(cls, data, wrapper):
return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="strides")
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
return NotImplemented
class StridesCustomReturn(torch.Tensor):
@staticmethod
def __new__(cls, data, wrapper):
return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="strides")
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
if func == torch.ops.aten.stride.default:
return (4, 2)
return NotImplemented
class StridesDefaultReturn(torch.Tensor):
@staticmethod
def __new__(cls, data, wrapper):
return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="strides")
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
if func == torch.ops.aten.stride.default:
return None
return NotImplemented
err_msg = "no implementation found for 'torch.ops.aten.stride'"
e = StridesNotImplemented(torch.randn(3, 3), use_wrapper_subclass)
with self.assertRaisesRegex(TypeError, err_msg):
e.stride()
e = StridesCustomReturn(torch.randn(3, 3), use_wrapper_subclass)
self.assertEqual(e.stride(), (4, 2))
e = StridesDefaultReturn(torch.randn(6, 2), use_wrapper_subclass)
self.assertEqual(e.stride(), (2, 1))
def test_sizes_slow_path(self):
for use_wrapper_subclass in [True, False]:
data = torch.randn(6, 2)
class SizesNotImplemented(torch.Tensor):
@staticmethod
def __new__(cls, data, wrapper):
return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="sizes")
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
if func.overloadpacket == torch.ops.aten.dim:
return data.dim()
return NotImplemented
class SizesCustomReturn(torch.Tensor):
@staticmethod
def __new__(cls, data, wrapper):
return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="sizes")
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
if func.overloadpacket == torch.ops.aten.dim:
return data.dim()
if func.overloadpacket == torch.ops.aten.sym_size:
return (5, 3)
return NotImplemented
class SizesDefaultReturn(torch.Tensor):
@staticmethod
def __new__(cls, data, wrapper):
return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="sizes")
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
if func.overloadpacket == torch.ops.aten.dim:
return data.dim()
if func.overloadpacket == torch.ops.aten.sym_size:
return None
return NotImplemented
err_msg = "no implementation found for 'torch.ops.aten.sym_size'"
e = SizesNotImplemented(torch.randn(3, 3), use_wrapper_subclass)
with self.assertRaisesRegex(RuntimeError, err_msg):
e.size()
e = SizesCustomReturn(torch.randn(3, 3), use_wrapper_subclass)
self.assertEqual(e.size(), (5, 3))
e = SizesDefaultReturn(torch.randn(4, 2), use_wrapper_subclass)
self.assertEqual(e.size(), (4, 2))
def test_layout_slow_path(self):
for use_wrapper_subclass in [True, False]:
data = torch.randn(6, 2)
class LayoutNotImplemented(torch.Tensor):
@staticmethod
def __new__(cls, data, wrapper):
return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_layout=True)
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
return NotImplemented
class LayoutCustomReturn(torch.Tensor):
@staticmethod
def __new__(cls, data, wrapper):
return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_layout=True)
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
if func.overloadpacket == torch.ops.prim.layout:
return torch.sparse_csr
return NotImplemented
class LayoutDefaultReturn(torch.Tensor):
@staticmethod
def __new__(cls, data, wrapper):
return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_layout=True)
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
if func.overloadpacket == torch.ops.prim.layout:
return data.layout
return NotImplemented
err_msg = "no implementation found for 'torch.ops.prim.layout'"
e = LayoutNotImplemented(torch.randn(3, 3), use_wrapper_subclass)
with self.assertRaisesRegex(TypeError, err_msg):
e.layout
e = LayoutCustomReturn(torch.randn(3, 3), use_wrapper_subclass)
self.assertEqual(e.layout, torch.sparse_csr)
e = LayoutDefaultReturn(torch.randn(4, 2), use_wrapper_subclass)
self.assertEqual(e.layout, torch.strided)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_python_dispatch.py |
# Owner(s): ["oncall: r2p"]
from torch.testing._internal.common_utils import (
TestCase, run_tests,
)
from datetime import timedelta, datetime
import tempfile
import time
from torch.monitor import (
Aggregation,
Event,
log_event,
register_event_handler,
unregister_event_handler,
Stat,
TensorboardEventHandler,
)
class TestMonitor(TestCase):
def test_interval_stat(self) -> None:
events = []
def handler(event):
events.append(event)
handle = register_event_handler(handler)
s = Stat(
"asdf",
(Aggregation.SUM, Aggregation.COUNT),
timedelta(milliseconds=1),
)
self.assertEqual(s.name, "asdf")
s.add(2)
for _ in range(100):
# NOTE: different platforms sleep may be inaccurate so we loop
# instead (i.e. win)
time.sleep(1 / 1000) # ms
s.add(3)
if len(events) >= 1:
break
self.assertGreaterEqual(len(events), 1)
unregister_event_handler(handle)
def test_fixed_count_stat(self) -> None:
s = Stat(
"asdf",
(Aggregation.SUM, Aggregation.COUNT),
timedelta(hours=100),
3,
)
s.add(1)
s.add(2)
name = s.name
self.assertEqual(name, "asdf")
self.assertEqual(s.count, 2)
s.add(3)
self.assertEqual(s.count, 0)
self.assertEqual(s.get(), {Aggregation.SUM: 6.0, Aggregation.COUNT: 3})
def test_log_event(self) -> None:
e = Event(
name="torch.monitor.TestEvent",
timestamp=datetime.now(),
data={
"str": "a string",
"float": 1234.0,
"int": 1234,
},
)
self.assertEqual(e.name, "torch.monitor.TestEvent")
self.assertIsNotNone(e.timestamp)
self.assertIsNotNone(e.data)
log_event(e)
def test_event_handler(self) -> None:
events = []
def handler(event: Event) -> None:
events.append(event)
handle = register_event_handler(handler)
e = Event(
name="torch.monitor.TestEvent",
timestamp=datetime.now(),
data={},
)
log_event(e)
self.assertEqual(len(events), 1)
self.assertEqual(events[0], e)
log_event(e)
self.assertEqual(len(events), 2)
unregister_event_handler(handle)
log_event(e)
self.assertEqual(len(events), 2)
class TestMonitorTensorboard(TestCase):
def setUp(self):
global SummaryWriter, event_multiplexer
try:
from torch.utils.tensorboard import SummaryWriter
from tensorboard.backend.event_processing import (
plugin_event_multiplexer as event_multiplexer,
)
except ImportError:
return self.skipTest("Skip the test since TensorBoard is not installed")
self.temp_dirs = []
def create_summary_writer(self):
temp_dir = tempfile.TemporaryDirectory() # noqa: P201
self.temp_dirs.append(temp_dir)
return SummaryWriter(temp_dir.name)
def tearDown(self):
# Remove directories created by SummaryWriter
for temp_dir in self.temp_dirs:
temp_dir.cleanup()
def test_event_handler(self):
with self.create_summary_writer() as w:
handle = register_event_handler(TensorboardEventHandler(w))
s = Stat(
"asdf",
(Aggregation.SUM, Aggregation.COUNT),
timedelta(hours=1),
5,
)
for i in range(10):
s.add(i)
self.assertEqual(s.count, 0)
unregister_event_handler(handle)
mul = event_multiplexer.EventMultiplexer()
mul.AddRunsFromDirectory(self.temp_dirs[-1].name)
mul.Reload()
scalar_dict = mul.PluginRunToTagToContent("scalars")
raw_result = {
tag: mul.Tensors(run, tag)
for run, run_dict in scalar_dict.items()
for tag in run_dict
}
scalars = {
tag: [e.tensor_proto.float_val[0] for e in events] for tag, events in raw_result.items()
}
self.assertEqual(scalars, {
"asdf.sum": [10],
"asdf.count": [5],
})
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_monitor.py |
# Owner(s): ["module: dataloader"]
import copy
import itertools
import os
import os.path
import pickle
import random
import sys
import tempfile
import warnings
from functools import partial
from typing import (
Any,
Awaitable,
Dict,
Generic,
Iterator,
List,
NamedTuple,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from unittest import skipIf
import numpy as np
import torch
import torch.utils.data.datapipes as dp
import torch.utils.data.graph
import torch.utils.data.graph_settings
from torch.testing._internal.common_utils import TestCase, run_tests, suppress_warnings
from torch.utils.data import (
DataLoader,
DataChunk,
IterDataPipe,
MapDataPipe,
RandomSampler,
argument_validation,
runtime_validation,
runtime_validation_disabled,
)
from torch.utils.data.graph import traverse
from torch.utils.data.datapipes.utils.common import StreamWrapper
from torch.utils.data.datapipes.utils.decoder import (
basichandlers as decoder_basichandlers,
)
from torch.utils.data.datapipes.utils.snapshot import (
_simple_graph_snapshot_restoration
)
from torch.utils.data.datapipes.dataframe import CaptureDataFrame
from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper
try:
import dill
# XXX: By default, dill writes the Pickler dispatch table to inject its
# own logic there. This globally affects the behavior of the standard library
# pickler for any user who transitively depends on this module!
# Undo this extension to avoid altering the behavior of the pickler globally.
dill.extend(use_dill=False)
HAS_DILL = True
except ImportError:
HAS_DILL = False
skipIfNoDill = skipIf(not HAS_DILL, "no dill")
try:
import pandas # type: ignore[import] # noqa: F401 F403
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
skipIfNoDataFrames = skipIf(not HAS_PANDAS, "no dataframes (pandas)")
skipTyping = skipIf(True, "TODO: Fix typing bug")
T_co = TypeVar("T_co", covariant=True)
def create_temp_dir_and_files():
# The temp dir and files within it will be released and deleted in tearDown().
# Adding `noqa: P201` to avoid mypy's warning on not releasing the dir handle within this function.
temp_dir = tempfile.TemporaryDirectory() # noqa: P201
temp_dir_path = temp_dir.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, suffix='.txt') as f:
temp_file1_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, suffix='.byte') as f:
temp_file2_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, suffix='.empty') as f:
temp_file3_name = f.name
with open(temp_file1_name, 'w') as f1:
f1.write('0123456789abcdef')
with open(temp_file2_name, 'wb') as f2:
f2.write(b"0123456789abcdef")
temp_sub_dir = tempfile.TemporaryDirectory(dir=temp_dir_path) # noqa: P201
temp_sub_dir_path = temp_sub_dir.name
with tempfile.NamedTemporaryFile(dir=temp_sub_dir_path, delete=False, suffix='.txt') as f:
temp_sub_file1_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_sub_dir_path, delete=False, suffix='.byte') as f:
temp_sub_file2_name = f.name
with open(temp_sub_file1_name, 'w') as f1:
f1.write('0123456789abcdef')
with open(temp_sub_file2_name, 'wb') as f2:
f2.write(b"0123456789abcdef")
return [(temp_dir, temp_file1_name, temp_file2_name, temp_file3_name),
(temp_sub_dir, temp_sub_file1_name, temp_sub_file2_name)]
def reset_after_n_next_calls(datapipe: Union[IterDataPipe[T_co], MapDataPipe[T_co]],
n: int) -> Tuple[List[T_co], List[T_co]]:
"""
Given a DataPipe and integer n, iterate the DataPipe for n elements and store the elements into a list
Then, reset the DataPipe and return a tuple of two lists
1. A list of elements yielded before the reset
2. A list of all elements of the DataPipe after the reset
"""
it = iter(datapipe)
res_before_reset = []
for _ in range(n):
res_before_reset.append(next(it))
return res_before_reset, list(datapipe)
def odd_or_even(x: int) -> int:
return x % 2
class TestDataChunk(TestCase):
def setUp(self):
self.elements = list(range(10))
random.shuffle(self.elements)
self.chunk: DataChunk[int] = DataChunk(self.elements)
def test_getitem(self):
for i in range(10):
self.assertEqual(self.elements[i], self.chunk[i])
def test_iter(self):
for ele, dc in zip(self.elements, iter(self.chunk)):
self.assertEqual(ele, dc)
def test_len(self):
self.assertEqual(len(self.elements), len(self.chunk))
def test_as_string(self):
self.assertEqual(str(self.chunk), str(self.elements))
batch = [self.elements] * 3
chunks: List[DataChunk[int]] = [DataChunk(self.elements)] * 3
self.assertEqual(str(batch), str(chunks))
def test_sort(self):
chunk: DataChunk[int] = DataChunk(self.elements)
chunk.sort()
self.assertTrue(isinstance(chunk, DataChunk))
for i, d in enumerate(chunk):
self.assertEqual(i, d)
def test_reverse(self):
chunk: DataChunk[int] = DataChunk(self.elements)
chunk.reverse()
self.assertTrue(isinstance(chunk, DataChunk))
for i in range(10):
self.assertEqual(chunk[i], self.elements[9 - i])
def test_random_shuffle(self):
elements = list(range(10))
chunk: DataChunk[int] = DataChunk(elements)
rng = random.Random(0)
rng.shuffle(chunk)
rng = random.Random(0)
rng.shuffle(elements)
self.assertEqual(chunk, elements)
class TestStreamWrapper(TestCase):
class _FakeFD:
def __init__(self, filepath):
self.filepath = filepath
self.opened = False
self.closed = False
def open(self):
self.opened = True
def read(self):
if self.opened:
return "".join(self)
else:
raise IOError("Cannot read from un-opened file descriptor")
def __iter__(self):
for i in range(5):
yield str(i)
def close(self):
if self.opened:
self.opened = False
self.closed = True
def __repr__(self):
return "FakeFD"
def test_dir(self):
fd = TestStreamWrapper._FakeFD("")
wrap_fd = StreamWrapper(fd)
s = set(dir(wrap_fd))
for api in ['open', 'read', 'close']:
self.assertTrue(api in s)
def test_api(self):
fd = TestStreamWrapper._FakeFD("")
wrap_fd = StreamWrapper(fd)
self.assertFalse(fd.opened)
self.assertFalse(fd.closed)
with self.assertRaisesRegex(IOError, "Cannot read from"):
wrap_fd.read()
wrap_fd.open()
self.assertTrue(fd.opened)
self.assertEqual("01234", wrap_fd.read())
del wrap_fd
self.assertFalse(fd.opened)
self.assertTrue(fd.closed)
def test_pickle(self):
with tempfile.TemporaryFile() as f:
with self.assertRaises(TypeError) as ctx1:
pickle.dumps(f)
wrap_f = StreamWrapper(f)
with self.assertRaises(TypeError) as ctx2:
pickle.dumps(wrap_f)
# Same exception when pickle
self.assertEqual(str(ctx1.exception), str(ctx2.exception))
fd = TestStreamWrapper._FakeFD("")
wrap_fd = StreamWrapper(fd)
_ = pickle.loads(pickle.dumps(wrap_fd))
def test_repr(self):
fd = TestStreamWrapper._FakeFD("")
wrap_fd = StreamWrapper(fd)
self.assertEqual(str(wrap_fd), "StreamWrapper<FakeFD>")
with tempfile.TemporaryFile() as f:
wrap_f = StreamWrapper(f)
self.assertEqual(str(wrap_f), "StreamWrapper<" + str(f) + ">")
class TestIterableDataPipeBasic(TestCase):
def setUp(self):
ret = create_temp_dir_and_files()
self.temp_dir = ret[0][0]
self.temp_files = ret[0][1:]
self.temp_sub_dir = ret[1][0]
self.temp_sub_files = ret[1][1:]
def tearDown(self):
try:
self.temp_sub_dir.cleanup()
self.temp_dir.cleanup()
except Exception as e:
warnings.warn("TestIterableDatasetBasic was not able to cleanup temp dir due to {}".format(str(e)))
def test_listdirfiles_iterable_datapipe(self):
temp_dir = self.temp_dir.name
datapipe: IterDataPipe = dp.iter.FileLister(temp_dir, '')
count = 0
for pathname in datapipe:
count = count + 1
self.assertTrue(pathname in self.temp_files)
self.assertEqual(count, len(self.temp_files))
count = 0
datapipe = dp.iter.FileLister(temp_dir, '', recursive=True)
for pathname in datapipe:
count = count + 1
self.assertTrue((pathname in self.temp_files) or (pathname in self.temp_sub_files))
self.assertEqual(count, len(self.temp_files) + len(self.temp_sub_files))
temp_files = self.temp_files
datapipe = dp.iter.FileLister([temp_dir, *temp_files])
count = 0
for pathname in datapipe:
count += 1
self.assertTrue(pathname in self.temp_files)
self.assertEqual(count, 2 * len(self.temp_files))
# test functional API
datapipe = datapipe.list_files()
count = 0
for pathname in datapipe:
count += 1
self.assertTrue(pathname in self.temp_files)
self.assertEqual(count, 2 * len(self.temp_files))
def test_listdirfilesdeterministic_iterable_datapipe(self):
temp_dir = self.temp_dir.name
datapipe = dp.iter.FileLister(temp_dir, '')
# The output order should be always the same.
self.assertEqual(list(datapipe), list(datapipe))
datapipe = dp.iter.FileLister(temp_dir, '', recursive=True)
# The output order should be always the same.
self.assertEqual(list(datapipe), list(datapipe))
def test_openfilesfromdisk_iterable_datapipe(self):
# test import datapipe class directly
from torch.utils.data.datapipes.iter import (
FileLister,
FileOpener,
)
temp_dir = self.temp_dir.name
datapipe1 = FileLister(temp_dir, '')
datapipe2 = FileOpener(datapipe1, mode='b')
count = 0
for rec in datapipe2:
count = count + 1
self.assertTrue(rec[0] in self.temp_files)
with open(rec[0], 'rb') as f:
self.assertEqual(rec[1].read(), f.read())
rec[1].close()
self.assertEqual(count, len(self.temp_files))
# functional API
datapipe3 = datapipe1.open_files(mode='b')
count = 0
for rec in datapipe3:
count = count + 1
self.assertTrue(rec[0] in self.temp_files)
with open(rec[0], 'rb') as f:
self.assertEqual(rec[1].read(), f.read())
rec[1].close()
self.assertEqual(count, len(self.temp_files))
# __len__ Test
with self.assertRaises(TypeError):
len(datapipe3)
def test_routeddecoder_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_pngfile_pathname = os.path.join(temp_dir, "test_png.png")
png_data = np.array([[[1., 0., 0.], [1., 0., 0.]], [[1., 0., 0.], [1., 0., 0.]]], dtype=np.single)
np.save(temp_pngfile_pathname, png_data)
datapipe1 = dp.iter.FileLister(temp_dir, ['*.png', '*.txt'])
datapipe2 = dp.iter.FileOpener(datapipe1, mode='b')
def _png_decoder(extension, data):
if extension != 'png':
return None
return np.load(data)
def _helper(prior_dp, dp, channel_first=False):
# Byte stream is not closed
for inp in prior_dp:
self.assertFalse(inp[1].closed)
for inp, rec in zip(prior_dp, dp):
ext = os.path.splitext(rec[0])[1]
if ext == '.png':
expected = np.array([[[1., 0., 0.], [1., 0., 0.]], [[1., 0., 0.], [1., 0., 0.]]], dtype=np.single)
if channel_first:
expected = expected.transpose(2, 0, 1)
self.assertEqual(rec[1], expected)
else:
with open(rec[0], 'rb') as f:
self.assertEqual(rec[1], f.read().decode('utf-8'))
# Corresponding byte stream is closed by Decoder
self.assertTrue(inp[1].closed)
cached = list(datapipe2)
with warnings.catch_warnings(record=True) as wa:
datapipe3 = dp.iter.RoutedDecoder(cached, _png_decoder)
datapipe3.add_handler(decoder_basichandlers)
_helper(cached, datapipe3)
cached = list(datapipe2)
with warnings.catch_warnings(record=True) as wa:
datapipe4 = dp.iter.RoutedDecoder(cached, decoder_basichandlers)
datapipe4.add_handler(_png_decoder)
_helper(cached, datapipe4, channel_first=True)
def test_groupby_iterable_datapipe(self):
file_list = ["a.png", "b.png", "c.json", "a.json", "c.png", "b.json", "d.png",
"d.json", "e.png", "f.json", "g.png", "f.png", "g.json", "e.json",
"h.txt", "h.json"]
import io
datapipe1 = dp.iter.IterableWrapper([(filename, io.BytesIO(b'12345abcde')) for filename in file_list])
def group_fn(data):
filepath, _ = data
return os.path.basename(filepath).split(".")[0]
datapipe2 = dp.iter.Grouper(datapipe1, group_key_fn=group_fn, group_size=2)
def order_fn(data):
data.sort(key=lambda f: f[0], reverse=True)
return data
datapipe3 = dp.iter.Mapper(datapipe2, fn=order_fn) # type: ignore[var-annotated]
expected_result = [
("a.png", "a.json"), ("c.png", "c.json"), ("b.png", "b.json"), ("d.png", "d.json"),
("f.png", "f.json"), ("g.png", "g.json"), ("e.png", "e.json"), ("h.txt", "h.json")]
count = 0
for rec, expected in zip(datapipe3, expected_result):
count = count + 1
self.assertEqual(os.path.basename(rec[0][0]), expected[0])
self.assertEqual(os.path.basename(rec[1][0]), expected[1])
for i in [0, 1]:
self.assertEqual(rec[i][1].read(), b'12345abcde')
rec[i][1].close()
self.assertEqual(count, 8)
def test_demux_mux_datapipe(self):
numbers = NumbersDataset(10)
n1, n2 = numbers.demux(2, lambda x: x % 2)
self.assertEqual([0, 2, 4, 6, 8], list(n1))
self.assertEqual([1, 3, 5, 7, 9], list(n2))
# Functional Test: demux and mux works sequentially as expected
numbers = NumbersDataset(10)
n1, n2, n3 = numbers.demux(3, lambda x: x % 3)
n = n1.mux(n2, n3)
self.assertEqual(list(range(9)), list(n))
# Functional Test: Uneven DataPipes
source_numbers = list(range(0, 10)) + [10, 12]
numbers_dp = dp.iter.IterableWrapper(source_numbers)
n1, n2 = numbers_dp.demux(2, lambda x: x % 2)
self.assertEqual([0, 2, 4, 6, 8, 10, 12], list(n1))
self.assertEqual([1, 3, 5, 7, 9], list(n2))
n = n1.mux(n2)
self.assertEqual(list(range(10)), list(n))
@suppress_warnings # Suppress warning for lambda fn
def test_map_with_col_file_handle_datapipe(self):
temp_dir = self.temp_dir.name
datapipe1 = dp.iter.FileLister(temp_dir, '')
datapipe2 = dp.iter.FileOpener(datapipe1)
def _helper(datapipe):
dp1 = datapipe.map(lambda x: x.read(), input_col=1)
dp2 = datapipe.map(lambda x: (x[0], x[1].read()))
self.assertEqual(list(dp1), list(dp2))
# tuple
_helper(datapipe2)
# list
datapipe3 = datapipe2.map(lambda x: list(x))
_helper(datapipe3)
@skipIfNoDataFrames
class TestCaptureDataFrame(TestCase):
def get_new_df(self):
return df_wrapper.create_dataframe([[1, 2]], columns=['a', 'b'])
def compare_capture_and_eager(self, operations):
cdf = CaptureDataFrame()
cdf = operations(cdf)
df = self.get_new_df()
cdf = cdf.apply_ops(df)
df = self.get_new_df()
df = operations(df)
self.assertTrue(df.equals(cdf))
def test_basic_capture(self):
def operations(df):
df['c'] = df.b + df['a'] * 7
# somehow swallows pandas UserWarning when `df.c = df.b + df['a'] * 7`
return df
self.compare_capture_and_eager(operations)
class TestDataFramesPipes(TestCase):
"""
Most of test will fail if pandas instaled, but no dill available.
Need to rework them to avoid multiple skips.
"""
def _get_datapipe(self, range=10, dataframe_size=7):
return NumbersDataset(range) \
.map(lambda i: (i, i % 3))
def _get_dataframes_pipe(self, range=10, dataframe_size=7):
return NumbersDataset(range) \
.map(lambda i: (i, i % 3)) \
._to_dataframes_pipe(
columns=['i', 'j'],
dataframe_size=dataframe_size)
@skipIfNoDataFrames
@skipIfNoDill # TODO(VitalyFedyunin): Decouple tests from dill by avoiding lambdas in map
def test_capture(self):
dp_numbers = self._get_datapipe().map(lambda x: (x[0], x[1], x[1] + 3 * x[0]))
df_numbers = self._get_dataframes_pipe()
df_numbers['k'] = df_numbers['j'] + df_numbers.i * 3
expected = list(dp_numbers)
actual = list(df_numbers)
self.assertEqual(expected, actual)
@skipIfNoDataFrames
@skipIfNoDill
def test_shuffle(self):
# With non-zero (but extremely low) probability (when shuffle do nothing),
# this test fails, so feel free to restart
df_numbers = self._get_dataframes_pipe(range=1000).shuffle()
dp_numbers = self._get_datapipe(range=1000)
df_result = [tuple(item) for item in df_numbers]
self.assertNotEqual(list(dp_numbers), df_result)
self.assertEqual(list(dp_numbers), sorted(df_result))
@skipIfNoDataFrames
@skipIfNoDill
def test_batch(self):
df_numbers = self._get_dataframes_pipe(range=100).batch(8)
df_numbers_list = list(df_numbers)
last_batch = df_numbers_list[-1]
self.assertEqual(4, len(last_batch))
unpacked_batch = [tuple(row) for row in last_batch]
self.assertEqual([(96, 0), (97, 1), (98, 2), (99, 0)], unpacked_batch)
@skipIfNoDataFrames
@skipIfNoDill
def test_unbatch(self):
df_numbers = self._get_dataframes_pipe(range=100).batch(8).batch(3)
dp_numbers = self._get_datapipe(range=100)
self.assertEqual(list(dp_numbers), list(df_numbers.unbatch(2)))
@skipIfNoDataFrames
@skipIfNoDill
def test_filter(self):
df_numbers = self._get_dataframes_pipe(range=10).filter(lambda x: x.i > 5)
actual = list(df_numbers)
self.assertEqual([(6, 0), (7, 1), (8, 2), (9, 0)], actual)
@skipIfNoDataFrames
@skipIfNoDill
def test_collate(self):
def collate_i(column):
return column.sum()
def collate_j(column):
return column.prod()
df_numbers = self._get_dataframes_pipe(range=30).batch(3)
df_numbers = df_numbers.collate({'j': collate_j, 'i': collate_i})
expected_i = [3,
12,
21,
30,
39,
48,
57,
66,
75,
84, ]
actual_i = []
for i, j in df_numbers:
actual_i.append(i)
self.assertEqual(expected_i, actual_i)
actual_i = []
for item in df_numbers:
actual_i.append(item.i)
self.assertEqual(expected_i, actual_i)
class IDP_NoLen(IterDataPipe):
def __init__(self, input_dp):
super().__init__()
self.input_dp = input_dp
# Prevent in-place modification
def __iter__(self):
input_dp = self.input_dp if isinstance(self.input_dp, IterDataPipe) else copy.deepcopy(self.input_dp)
for i in input_dp:
yield i
def _fake_fn(data):
return data
def _fake_add(constant, data):
return constant + data
def _fake_filter_fn(data):
return True
def _simple_filter_fn(data):
return data >= 5
def _fake_filter_fn_constant(constant, data):
return data >= constant
def _mul_10(x):
return x * 10
def _mod_3_test(x):
return x % 3 == 1
def _worker_init_fn(worker_id):
info = torch.utils.data.get_worker_info()
num_workers = info.num_workers
datapipe = info.dataset
torch.utils.data.graph_settings.apply_sharding(datapipe, num_workers, worker_id)
lambda_fn1 = lambda x: x # noqa: E731
lambda_fn2 = lambda x: x % 2 # noqa: E731
lambda_fn3 = lambda x: x >= 5 # noqa: E731
class TestFunctionalIterDataPipe(TestCase):
def _serialization_test_helper(self, datapipe, use_dill):
if use_dill:
serialized_dp = dill.dumps(datapipe)
deserialized_dp = dill.loads(serialized_dp)
else:
serialized_dp = pickle.dumps(datapipe)
deserialized_dp = pickle.loads(serialized_dp)
try:
self.assertEqual(list(datapipe), list(deserialized_dp))
except AssertionError as e:
print(f"{datapipe} is failing.")
raise e
def _serialization_test_for_single_dp(self, dp, use_dill=False):
# 1. Testing for serialization before any iteration starts
self._serialization_test_helper(dp, use_dill)
# 2. Testing for serialization after DataPipe is partially read
it = iter(dp)
_ = next(it)
self._serialization_test_helper(dp, use_dill)
# 3. Testing for serialization after DataPipe is fully read
it = iter(dp)
_ = list(it)
self._serialization_test_helper(dp, use_dill)
def _serialization_test_for_dp_with_children(self, dp1, dp2, use_dill=False):
# 1. Testing for serialization before any iteration starts
self._serialization_test_helper(dp1, use_dill)
self._serialization_test_helper(dp2, use_dill)
# 2. Testing for serialization after DataPipe is partially read
it1, it2 = iter(dp1), iter(dp2)
_, _ = next(it1), next(it2)
# Catch `fork`, `demux` "some child DataPipes are not exhausted" warning
with warnings.catch_warnings(record=True) as wa:
self._serialization_test_helper(dp1, use_dill)
self._serialization_test_helper(dp2, use_dill)
# 2.5. Testing for serialization after one child DataPipe is fully read
# (Only for DataPipes with children DataPipes)
it1 = iter(dp1)
_ = list(it1) # fully read one child
# Catch `fork`, `demux` "some child DataPipes are not exhausted" warning
with warnings.catch_warnings(record=True) as wa:
self._serialization_test_helper(dp1, use_dill)
self._serialization_test_helper(dp2, use_dill)
# 3. Testing for serialization after DataPipe is fully read
it2 = iter(dp2)
_ = list(it2) # fully read the other child
self._serialization_test_helper(dp1, use_dill)
self._serialization_test_helper(dp2, use_dill)
def test_serializable(self):
picklable_datapipes: List = [
(dp.iter.Batcher, None, (3, True,), {}),
(dp.iter.Collator, None, (_fake_fn,), {}),
(dp.iter.Concater, None, (dp.iter.IterableWrapper(range(5)),), {}),
(dp.iter.Demultiplexer, None, (2, _simple_filter_fn), {}),
(dp.iter.FileLister, ".", (), {}),
(dp.iter.FileOpener, None, (), {}),
(dp.iter.Filter, None, (_fake_filter_fn,), {}),
(dp.iter.Filter, None, (partial(_fake_filter_fn_constant, 5),), {}),
(dp.iter.Forker, None, (2,), {}),
(dp.iter.Grouper, None, (_fake_filter_fn,), {"group_size": 2}),
(dp.iter.IterableWrapper, range(10), (), {}),
(dp.iter.Mapper, None, (_fake_fn,), {}),
(dp.iter.Mapper, None, (partial(_fake_add, 1),), {}),
(dp.iter.Multiplexer, None, (dp.iter.IterableWrapper(range(10)),), {}),
(dp.iter.Sampler, None, (), {}),
(dp.iter.Shuffler, dp.iter.IterableWrapper([0] * 10), (), {}),
(dp.iter.StreamReader, None, (), {}),
(dp.iter.UnBatcher, None, (0,), {}),
(dp.iter.Zipper, None, (dp.iter.IterableWrapper(range(10)),), {}),
]
# Skipping comparison for these DataPipes
dp_skip_comparison = {dp.iter.FileOpener, dp.iter.StreamReader}
# These DataPipes produce multiple DataPipes as outputs and those should be compared
dp_compare_children = {dp.iter.Demultiplexer, dp.iter.Forker}
for dpipe, custom_input, dp_args, dp_kwargs in picklable_datapipes:
if custom_input is None:
custom_input = dp.iter.IterableWrapper(range(10))
if dpipe in dp_skip_comparison: # Merely make sure they are picklable and loadable (no value comparison)
datapipe = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
serialized_dp = pickle.dumps(datapipe)
_ = pickle.loads(serialized_dp)
elif dpipe in dp_compare_children: # DataPipes that have children
dp1, dp2 = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self._serialization_test_for_dp_with_children(dp1, dp2)
else: # Single DataPipe that requires comparison
datapipe = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self._serialization_test_for_single_dp(datapipe)
def test_serializable_with_dill(self):
"""Only for DataPipes that take in a function as argument"""
input_dp = dp.iter.IterableWrapper(range(10))
datapipes_with_lambda_fn: List[Tuple[Type[IterDataPipe], Tuple, Dict[str, Any]]] = [
(dp.iter.Collator, (lambda_fn1,), {}),
(dp.iter.Demultiplexer, (2, lambda_fn2,), {}),
(dp.iter.Filter, (lambda_fn3,), {}),
(dp.iter.Grouper, (lambda_fn3,), {}),
(dp.iter.Mapper, (lambda_fn1,), {}),
]
def _local_fns():
def _fn1(x):
return x
def _fn2(x):
return x % 2
def _fn3(x):
return x >= 5
return _fn1, _fn2, _fn3
fn1, fn2, fn3 = _local_fns()
datapipes_with_local_fn: List[Tuple[Type[IterDataPipe], Tuple, Dict[str, Any]]] = [
(dp.iter.Collator, (fn1,), {}),
(dp.iter.Demultiplexer, (2, fn2,), {}),
(dp.iter.Filter, (fn3,), {}),
(dp.iter.Grouper, (fn3,), {}),
(dp.iter.Mapper, (fn1,), {}),
]
dp_compare_children = {dp.iter.Demultiplexer}
if HAS_DILL:
for dpipe, dp_args, dp_kwargs in datapipes_with_lambda_fn + datapipes_with_local_fn:
if dpipe in dp_compare_children:
dp1, dp2 = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self._serialization_test_for_dp_with_children(dp1, dp2, use_dill=True)
else:
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self._serialization_test_for_single_dp(datapipe, use_dill=True)
else:
msgs = (
r"^Lambda function is not supported by pickle",
r"^Local function is not supported by pickle"
)
for dps, msg in zip((datapipes_with_lambda_fn, datapipes_with_local_fn), msgs):
for dpipe, dp_args, dp_kwargs in dps:
with self.assertWarnsRegex(UserWarning, msg):
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
with self.assertRaises((pickle.PicklingError, AttributeError)):
pickle.dumps(datapipe)
def test_iterable_wrapper_datapipe(self):
input_ls = list(range(10))
input_dp = dp.iter.IterableWrapper(input_ls)
# Functional Test: values are unchanged and in the same order
self.assertEqual(input_ls, list(input_dp))
# Functional Test: deep copy by default when an iterator is initialized (first element is read)
it = iter(input_dp)
self.assertEqual(0, next(it)) # The deep copy only happens when the first element is read
input_ls.append(50)
self.assertEqual(list(range(1, 10)), list(it))
# Functional Test: shallow copy
input_ls2 = [1, 2, 3]
input_dp_shallow = dp.iter.IterableWrapper(input_ls2, deepcopy=False)
input_ls2.append(10)
self.assertEqual([1, 2, 3, 10], list(input_dp_shallow))
# Reset Test: reset the DataPipe
input_ls = list(range(10))
input_dp = dp.iter.IterableWrapper(input_ls)
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(input_dp, n_elements_before_reset)
self.assertEqual(input_ls[:n_elements_before_reset], res_before_reset)
self.assertEqual(input_ls, res_after_reset)
# __len__ Test: inherits length from sequence
self.assertEqual(len(input_ls), len(input_dp))
def test_concat_iterdatapipe(self):
input_dp1 = dp.iter.IterableWrapper(range(10))
input_dp2 = dp.iter.IterableWrapper(range(5))
# Functional Test: Raises exception for empty input
with self.assertRaisesRegex(ValueError, r"Expected at least one DataPipe"):
dp.iter.Concater()
# Functional Test: Raises exception for non-IterDataPipe input
with self.assertRaisesRegex(TypeError, r"Expected all inputs to be `IterDataPipe`"):
dp.iter.Concater(input_dp1, ()) # type: ignore[arg-type]
# Functional Test: Concatenate DataPipes as expected
concat_dp = input_dp1.concat(input_dp2)
self.assertEqual(len(concat_dp), 15)
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
# Reset Test: reset the DataPipe
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(concat_dp, n_elements_before_reset)
self.assertEqual(list(range(5)), res_before_reset)
self.assertEqual(list(range(10)) + list(range(5)), res_after_reset)
# __len__ Test: inherits length from source DataPipe
input_dp_nl = IDP_NoLen(range(5))
concat_dp = input_dp1.concat(input_dp_nl)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(concat_dp)
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
def test_fork_iterdatapipe(self):
input_dp = dp.iter.IterableWrapper(range(10))
with self.assertRaises(ValueError):
input_dp.fork(num_instances=0)
dp0 = input_dp.fork(num_instances=1, buffer_size=0)
self.assertEqual(dp0, input_dp)
# Functional Test: making sure all child DataPipe shares the same reference
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
self.assertTrue(all(n1 is n2 and n1 is n3 for n1, n2, n3 in zip(dp1, dp2, dp3)))
# Functional Test: one child DataPipe yields all value at a time
output1, output2, output3 = list(dp1), list(dp2), list(dp3)
self.assertEqual(list(range(10)), output1)
self.assertEqual(list(range(10)), output2)
self.assertEqual(list(range(10)), output3)
# Functional Test: two child DataPipes yield value together
dp1, dp2 = input_dp.fork(num_instances=2)
output = []
for n1, n2 in zip(dp1, dp2):
output.append((n1, n2))
self.assertEqual([(i, i) for i in range(10)], output)
# Functional Test: one child DataPipe yields all value first, but buffer_size = 5 being too small
dp1, dp2 = input_dp.fork(num_instances=2, buffer_size=4)
it1 = iter(dp1)
for _ in range(4):
next(it1)
with self.assertRaises(BufferError):
next(it1)
with self.assertRaises(BufferError):
list(dp2)
dp1, dp2 = input_dp.fork(num_instances=2, buffer_size=5)
with self.assertRaises(BufferError):
list(dp2)
# Functional Test: one child DataPipe yields all value first with unlimited buffer
with warnings.catch_warnings(record=True) as wa:
dp1, dp2 = input_dp.fork(num_instances=2, buffer_size=-1)
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Unlimited buffer size is set")
l1, l2 = list(dp1), list(dp2)
for d1, d2 in zip(l1, l2):
self.assertEqual(d1, d2)
# Functional Test: two child DataPipes yield value together with buffer size 1
dp1, dp2 = input_dp.fork(num_instances=2, buffer_size=1)
output = []
for n1, n2 in zip(dp1, dp2):
output.append((n1, n2))
self.assertEqual([(i, i) for i in range(10)], output)
# Functional Test: make sure logic related to slowest_ptr is working properly
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
output1, output2, output3 = [], [], []
for i, (n1, n2) in enumerate(zip(dp1, dp2)):
output1.append(n1)
output2.append(n2)
if i == 4: # yield all of dp3 when halfway through dp1, dp2
output3 = list(dp3)
break
self.assertEqual(list(range(5)), output1)
self.assertEqual(list(range(5)), output2)
self.assertEqual(list(range(10)), output3)
# Reset Test: DataPipe resets when a new iterator is created, even if this datapipe hasn't been read
dp1, dp2 = input_dp.fork(num_instances=2)
_ = iter(dp1)
output2 = []
with self.assertRaisesRegex(RuntimeError, r"iterator has been invalidated"):
for i, n2 in enumerate(dp2):
output2.append(n2)
if i == 4:
with warnings.catch_warnings(record=True) as wa:
_ = iter(dp1) # This will reset all child DataPipes
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"child DataPipes are not exhausted")
self.assertEqual(list(range(5)), output2)
# Reset Test: DataPipe resets when some of it has been read
dp1, dp2 = input_dp.fork(num_instances=2)
output1, output2 = [], []
for i, (n1, n2) in enumerate(zip(dp1, dp2)):
output1.append(n1)
output2.append(n2)
if i == 4:
with warnings.catch_warnings(record=True) as wa:
_ = iter(dp1) # Reset both all child DataPipe
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
break
with warnings.catch_warnings(record=True) as wa:
for i, (n1, n2) in enumerate(zip(dp1, dp2)):
output1.append(n1)
output2.append(n2)
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"child DataPipes are not exhausted")
self.assertEqual(list(range(5)) + list(range(10)), output1)
self.assertEqual(list(range(5)) + list(range(10)), output2)
# Reset Test: DataPipe reset, even when some other child DataPipes are not read
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(10)), output1)
self.assertEqual(list(range(10)), output2)
with warnings.catch_warnings(record=True) as wa:
self.assertEqual(list(range(10)), list(dp1)) # Resets even though dp3 has not been read
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
output3 = []
for i, n3 in enumerate(dp3):
output3.append(n3)
if i == 4:
with warnings.catch_warnings(record=True) as wa:
output1 = list(dp1) # Resets even though dp3 is only partially read
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
self.assertEqual(list(range(5)), output3)
self.assertEqual(list(range(10)), output1)
break
self.assertEqual(list(range(10)), list(dp3)) # dp3 has to read from the start again
# __len__ Test: Each DataPipe inherits the source datapipe's length
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
self.assertEqual(len(input_dp), len(dp1))
self.assertEqual(len(input_dp), len(dp2))
self.assertEqual(len(input_dp), len(dp3))
# Pickle Test:
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
traverse(dp1) # This should not raise any error
for _ in zip(dp1, dp2, dp3):
pass
traverse(dp2) # This should not raise any error either
def test_mux_iterdatapipe(self):
# Functional Test: Elements are yielded one at a time from each DataPipe, until they are all exhausted
input_dp1 = dp.iter.IterableWrapper(range(4))
input_dp2 = dp.iter.IterableWrapper(range(4, 8))
input_dp3 = dp.iter.IterableWrapper(range(8, 12))
output_dp = input_dp1.mux(input_dp2, input_dp3)
expected_output = [0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11]
self.assertEqual(len(expected_output), len(output_dp))
self.assertEqual(expected_output, list(output_dp))
# Functional Test: Uneven input Data Pipes
input_dp1 = dp.iter.IterableWrapper([1, 2, 3, 4])
input_dp2 = dp.iter.IterableWrapper([10])
input_dp3 = dp.iter.IterableWrapper([100, 200, 300])
output_dp = input_dp1.mux(input_dp2, input_dp3)
expected_output = [1, 10, 100]
self.assertEqual(len(expected_output), len(output_dp))
self.assertEqual(expected_output, list(output_dp))
# Functional Test: Empty Data Pipe
input_dp1 = dp.iter.IterableWrapper([0, 1, 2, 3])
input_dp2 = dp.iter.IterableWrapper([])
output_dp = input_dp1.mux(input_dp2)
self.assertEqual(len(input_dp2), len(output_dp))
self.assertEqual(list(input_dp2), list(output_dp))
# __len__ Test: raises TypeError when __len__ is called and an input doesn't have __len__
input_dp1 = dp.iter.IterableWrapper(range(10))
input_dp_no_len = IDP_NoLen(range(10))
output_dp = input_dp1.mux(input_dp_no_len)
with self.assertRaises(TypeError):
len(output_dp)
def test_demux_iterdatapipe(self):
input_dp = dp.iter.IterableWrapper(range(10))
with self.assertRaises(ValueError):
input_dp.demux(num_instances=0, classifier_fn=lambda x: 0)
# Functional Test: split into 2 DataPipes and output them one at a time
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(0, 10, 2)), output1)
self.assertEqual(list(range(1, 10, 2)), output2)
# Functional Test: split into 2 DataPipes and output them together
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output = []
for n1, n2 in zip(dp1, dp2):
output.append((n1, n2))
self.assertEqual([(i, i + 1) for i in range(0, 10, 2)], output)
# Functional Test: values of the same classification are lumped together, and buffer_size = 3 being too small
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: 0 if x >= 5 else 1, buffer_size=4)
it1 = iter(dp1)
with self.assertRaises(BufferError):
next(it1) # Buffer raises because first 5 elements all belong to the a different child
with self.assertRaises(BufferError):
list(dp2)
# Functional Test: values of the same classification are lumped together, and buffer_size = 5 is just enough
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: 0 if x >= 5 else 1, buffer_size=5)
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(5, 10)), output1)
self.assertEqual(list(range(0, 5)), output2)
# Functional Test: values of the same classification are lumped together, and unlimited buffer
with warnings.catch_warnings(record=True) as wa:
dp1, dp2 = input_dp.demux(
num_instances=2,
classifier_fn=lambda x: 0 if x >= 5 else 1,
buffer_size=-1
)
exp_l = 1 if HAS_DILL else 2
self.assertEqual(len(wa), exp_l)
self.assertRegex(str(wa[-1].message), r"Unlimited buffer size is set")
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(5, 10)), output1)
self.assertEqual(list(range(0, 5)), output2)
# Functional Test: classifier returns a value outside of [0, num_instance - 1]
dp0 = input_dp.demux(num_instances=1, classifier_fn=lambda x: x % 2)
it = iter(dp0[0])
with self.assertRaises(ValueError):
next(it)
next(it)
# Reset Test: DataPipe resets when a new iterator is created, even if this datapipe hasn't been read
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
_ = iter(dp1)
output2 = []
with self.assertRaisesRegex(RuntimeError, r"iterator has been invalidated"):
for i, n2 in enumerate(dp2):
output2.append(n2)
if i == 4:
with warnings.catch_warnings(record=True) as wa:
_ = iter(dp1) # This will reset all child DataPipes
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"child DataPipes are not exhausted")
self.assertEqual(list(range(1, 10, 2)), output2)
# Reset Test: DataPipe resets when some of it has been read
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output1, output2 = [], []
for n1, n2 in zip(dp1, dp2):
output1.append(n1)
output2.append(n2)
if n1 == 4:
break
with warnings.catch_warnings(record=True) as wa:
i1 = iter(dp1) # Reset all child DataPipes
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
for n1, n2 in zip(dp1, dp2):
output1.append(n1)
output2.append(n2)
self.assertEqual([0, 2, 4] + list(range(0, 10, 2)), output1)
self.assertEqual([1, 3, 5] + list(range(1, 10, 2)), output2)
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"child DataPipes are not exhausted")
# Reset Test: DataPipe reset, even when not all child DataPipes are exhausted
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output1 = list(dp1)
self.assertEqual(list(range(0, 10, 2)), output1)
with warnings.catch_warnings(record=True) as wa:
self.assertEqual(list(range(0, 10, 2)), list(dp1)) # Reset even when dp2 is not read
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
output2 = []
for i, n2 in enumerate(dp2):
output2.append(n2)
if i == 1:
self.assertEqual(list(range(1, 5, 2)), output2)
with warnings.catch_warnings(record=True) as wa:
self.assertEqual(list(range(0, 10, 2)), list(dp1)) # Can reset even when dp2 is partially read
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
break
output2 = list(dp2) # output2 has to read from beginning again
self.assertEqual(list(range(1, 10, 2)), output2)
# Functional Test: drop_none = True
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2 if x % 5 != 0 else None,
drop_none=True)
self.assertEqual([2, 4, 6, 8], list(dp1))
self.assertEqual([1, 3, 7, 9], list(dp2))
# Functional Test: drop_none = False
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2 if x % 5 != 0 else None,
drop_none=False)
it1 = iter(dp1)
with self.assertRaises(ValueError):
next(it1)
# __len__ Test: __len__ not implemented
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
with self.assertRaises(TypeError):
len(dp1) # It is not implemented as we do not know length for each child in advance
with self.assertRaises(TypeError):
len(dp2)
# Pickle Test:
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=odd_or_even)
traverse(dp1) # This should not raise any error
for _ in zip(dp1, dp2):
pass
traverse(dp2) # This should not raise any error either
def test_map_iterdatapipe(self):
target_length = 10
input_dp = dp.iter.IterableWrapper(range(target_length))
def fn(item, dtype=torch.float, *, sum=False):
data = torch.tensor(item, dtype=dtype)
return data if not sum else data.sum()
# Functional Test: apply to each element correctly
map_dp = input_dp.map(fn)
self.assertEqual(target_length, len(map_dp))
for x, y in zip(map_dp, range(target_length)):
self.assertEqual(x, torch.tensor(y, dtype=torch.float))
# Functional Test: works with partial function
map_dp = input_dp.map(partial(fn, dtype=torch.int, sum=True))
for x, y in zip(map_dp, range(target_length)):
self.assertEqual(x, torch.tensor(y, dtype=torch.int).sum())
# __len__ Test: inherits length from source DataPipe
self.assertEqual(target_length, len(map_dp))
input_dp_nl = IDP_NoLen(range(target_length))
map_dp_nl = input_dp_nl.map(lambda x: x)
for x, y in zip(map_dp_nl, range(target_length)):
self.assertEqual(x, torch.tensor(y, dtype=torch.float))
# __len__ Test: inherits length from source DataPipe - raises error when invalid
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(map_dp_nl)
# Reset Test: DataPipe resets properly
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(map_dp, n_elements_before_reset)
self.assertEqual(list(range(n_elements_before_reset)), res_before_reset)
self.assertEqual(list(range(10)), res_after_reset)
@suppress_warnings # Suppress warning for lambda fn
def test_map_tuple_list_with_col_iterdatapipe(self):
def fn_11(d):
return -d
def fn_1n(d):
return -d, d
def fn_n1(d0, d1):
return d0 + d1
def fn_nn(d0, d1):
return -d0, -d1, d0 + d1
def _helper(ref_fn, fn, input_col=None, output_col=None, error=None):
for constr in (list, tuple):
datapipe = dp.iter.IterableWrapper([constr((0, 1, 2)), constr((3, 4, 5)), constr((6, 7, 8))])
if ref_fn is None:
with self.assertRaises(error):
res_dp = datapipe.map(fn, input_col, output_col)
list(res_dp)
else:
res_dp = datapipe.map(fn, input_col, output_col)
ref_dp = datapipe.map(ref_fn)
self.assertEqual(list(res_dp), list(ref_dp))
# Reset
self.assertEqual(list(res_dp), list(ref_dp))
# Replacing with one input column and default output column
_helper(lambda data: (data[0], -data[1], data[2]), fn_11, 1)
_helper(lambda data: (data[0], (-data[1], data[1]), data[2]), fn_1n, 1)
# The index of input column is out of range
_helper(None, fn_1n, 3, error=IndexError)
# Unmatched input columns with fn arguments
_helper(None, fn_n1, 1, error=TypeError)
# Replacing with multiple input columns and default output column (the left-most input column)
_helper(lambda data: (data[1], data[2] + data[0]), fn_n1, [2, 0])
_helper(lambda data: (data[0], (-data[2], -data[1], data[2] + data[1])), fn_nn, [2, 1])
# output_col can only be specified when input_col is not None
_helper(None, fn_n1, None, 1, error=ValueError)
# output_col can only be single-element list or tuple
_helper(None, fn_n1, None, [0, 1], error=ValueError)
# Single-element list as output_col
_helper(lambda data: (-data[1], data[1], data[2]), fn_11, 1, [0])
# Replacing with one input column and single specified output column
_helper(lambda data: (-data[1], data[1], data[2]), fn_11, 1, 0)
_helper(lambda data: (data[0], data[1], (-data[1], data[1])), fn_1n, 1, 2)
# The index of output column is out of range
_helper(None, fn_1n, 1, 3, error=IndexError)
_helper(lambda data: (data[0], data[0] + data[2], data[2]), fn_n1, [0, 2], 1)
_helper(lambda data: ((-data[1], -data[2], data[1] + data[2]), data[1], data[2]), fn_nn, [1, 2], 0)
# Appending the output at the end
_helper(lambda data: (*data, -data[1]), fn_11, 1, -1)
_helper(lambda data: (*data, (-data[1], data[1])), fn_1n, 1, -1)
_helper(lambda data: (*data, data[0] + data[2]), fn_n1, [0, 2], -1)
_helper(lambda data: (*data, (-data[1], -data[2], data[1] + data[2])), fn_nn, [1, 2], -1)
@suppress_warnings # Suppress warning for lambda fn
def test_map_dict_with_col_iterdatapipe(self):
def fn_11(d):
return -d
def fn_1n(d):
return -d, d
def fn_n1(d0, d1):
return d0 + d1
def fn_nn(d0, d1):
return -d0, -d1, d0 + d1
# Prevent modification in-place to support resetting
def _dict_update(data, newdata, remove_idx=None):
_data = dict(data)
_data.update(newdata)
if remove_idx:
for idx in remove_idx:
del _data[idx]
return _data
def _helper(ref_fn, fn, input_col=None, output_col=None, error=None):
datapipe = dp.iter.IterableWrapper(
[{"x": 0, "y": 1, "z": 2},
{"x": 3, "y": 4, "z": 5},
{"x": 6, "y": 7, "z": 8}]
)
if ref_fn is None:
with self.assertRaises(error):
res_dp = datapipe.map(fn, input_col, output_col)
list(res_dp)
else:
res_dp = datapipe.map(fn, input_col, output_col)
ref_dp = datapipe.map(ref_fn)
self.assertEqual(list(res_dp), list(ref_dp))
# Reset
self.assertEqual(list(res_dp), list(ref_dp))
# Replacing with one input column and default output column
_helper(lambda data: _dict_update(data, {"y": -data["y"]}), fn_11, "y")
_helper(lambda data: _dict_update(data, {"y": (-data["y"], data["y"])}), fn_1n, "y")
# The key of input column is not in dict
_helper(None, fn_1n, "a", error=KeyError)
# Unmatched input columns with fn arguments
_helper(None, fn_n1, "y", error=TypeError)
# Replacing with multiple input columns and default output column (the left-most input column)
_helper(lambda data: _dict_update(data, {"z": data["x"] + data["z"]}, ["x"]), fn_n1, ["z", "x"])
_helper(lambda data: _dict_update(
data, {"z": (-data["z"], -data["y"], data["y"] + data["z"])}, ["y"]), fn_nn, ["z", "y"])
# output_col can only be specified when input_col is not None
_helper(None, fn_n1, None, "x", error=ValueError)
# output_col can only be single-element list or tuple
_helper(None, fn_n1, None, ["x", "y"], error=ValueError)
# Single-element list as output_col
_helper(lambda data: _dict_update(data, {"x": -data["y"]}), fn_11, "y", ["x"])
# Replacing with one input column and single specified output column
_helper(lambda data: _dict_update(data, {"x": -data["y"]}), fn_11, "y", "x")
_helper(lambda data: _dict_update(data, {"z": (-data["y"], data["y"])}), fn_1n, "y", "z")
_helper(lambda data: _dict_update(data, {"y": data["x"] + data["z"]}), fn_n1, ["x", "z"], "y")
_helper(lambda data: _dict_update(
data, {"x": (-data["y"], -data["z"], data["y"] + data["z"])}), fn_nn, ["y", "z"], "x")
# Adding new key to dict for the output
_helper(lambda data: _dict_update(data, {"a": -data["y"]}), fn_11, "y", "a")
_helper(lambda data: _dict_update(data, {"a": (-data["y"], data["y"])}), fn_1n, "y", "a")
_helper(lambda data: _dict_update(data, {"a": data["x"] + data["z"]}), fn_n1, ["x", "z"], "a")
_helper(lambda data: _dict_update(
data, {"a": (-data["y"], -data["z"], data["y"] + data["z"])}), fn_nn, ["y", "z"], "a")
def test_collate_iterdatapipe(self):
arrs = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
input_dp = dp.iter.IterableWrapper(arrs)
def _collate_fn(batch, default_type=torch.float):
return torch.tensor(sum(batch), dtype=default_type)
# Functional Test: defaults to the default collate function when a custom one is not specified
collate_dp = input_dp.collate()
for x, y in zip(arrs, collate_dp):
self.assertEqual(torch.tensor(x), y)
# Functional Test: custom collate function
collate_dp = input_dp.collate(collate_fn=_collate_fn)
for x, y in zip(arrs, collate_dp):
self.assertEqual(torch.tensor(sum(x), dtype=torch.float), y)
# Functional Test: custom, partial collate function
collate_dp = input_dp.collate(partial(_collate_fn, default_type=torch.int))
for x, y in zip(arrs, collate_dp):
self.assertEqual(torch.tensor(sum(x), dtype=torch.int), y)
# Reset Test: reset the DataPipe and results are still correct
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(collate_dp, n_elements_before_reset)
self.assertEqual([torch.tensor(6, dtype=torch.int)], res_before_reset)
for x, y in zip(arrs, res_after_reset):
self.assertEqual(torch.tensor(sum(x), dtype=torch.int), y)
# __len__ Test: __len__ is inherited
self.assertEqual(len(input_dp), len(collate_dp))
# __len__ Test: verify that it has no valid __len__ when the source doesn't have it
input_dp_nl = IDP_NoLen(arrs)
collate_dp_nl = input_dp_nl.collate()
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(collate_dp_nl)
for x, y in zip(arrs, collate_dp_nl):
self.assertEqual(torch.tensor(x), y)
def test_batch_iterdatapipe(self):
arrs = list(range(10))
input_dp = dp.iter.IterableWrapper(arrs)
# Functional Test: raise error when input argument `batch_size = 0`
with self.assertRaises(AssertionError):
input_dp.batch(batch_size=0)
# Functional Test: by default, do not drop the last batch
bs = 3
batch_dp = input_dp.batch(batch_size=bs)
self.assertEqual(len(batch_dp), 4)
for i, batch in enumerate(batch_dp):
self.assertEqual(len(batch), 1 if i == 3 else bs)
self.assertEqual(batch, arrs[i * bs: i * bs + len(batch)])
# Functional Test: Drop the last batch when specified
bs = 4
batch_dp = input_dp.batch(batch_size=bs, drop_last=True)
for i, batch in enumerate(batch_dp):
self.assertEqual(batch, arrs[i * bs: i * bs + len(batch)])
# __len__ test: verifying that the overall length and of each batch is correct
for i, batch in enumerate(batch_dp):
self.assertEqual(len(batch), bs)
# __len__ Test: the length is missing if the source DataPipe doesn't have length
self.assertEqual(len(batch_dp), 2)
input_dp_nl = IDP_NoLen(range(10))
batch_dp_nl = input_dp_nl.batch(batch_size=2)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(batch_dp_nl)
# Reset Test: Ensures that the DataPipe can properly reset
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(batch_dp, n_elements_before_reset)
self.assertEqual([[0, 1, 2, 3]], res_before_reset)
self.assertEqual([[0, 1, 2, 3], [4, 5, 6, 7]], res_after_reset)
def test_unbatch_iterdatapipe(self):
target_length = 6
prebatch_dp = dp.iter.IterableWrapper(range(target_length))
# Functional Test: Unbatch DataPipe should be the same as pre-batch DataPipe
input_dp = prebatch_dp.batch(3)
unbatch_dp = input_dp.unbatch()
self.assertEqual(len(list(unbatch_dp)), target_length) # __len__ is as expected
for i, res in zip(range(target_length), unbatch_dp):
self.assertEqual(i, res)
# Functional Test: unbatch works for an input with nested levels
input_dp = dp.iter.IterableWrapper([[0, 1, 2], [3, 4, 5]])
unbatch_dp = input_dp.unbatch()
self.assertEqual(len(list(unbatch_dp)), target_length)
for i, res in zip(range(target_length), unbatch_dp):
self.assertEqual(i, res)
input_dp = dp.iter.IterableWrapper([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
# Functional Test: unbatch works for an input with nested levels
unbatch_dp = input_dp.unbatch()
expected_dp = [[0, 1], [2, 3], [4, 5], [6, 7]]
self.assertEqual(len(list(unbatch_dp)), 4)
for j, res in zip(expected_dp, unbatch_dp):
self.assertEqual(j, res)
# Functional Test: unbatching multiple levels at the same time
unbatch_dp = input_dp.unbatch(unbatch_level=2)
expected_dp2 = [0, 1, 2, 3, 4, 5, 6, 7]
self.assertEqual(len(list(unbatch_dp)), 8)
for i, res in zip(expected_dp2, unbatch_dp):
self.assertEqual(i, res)
# Functional Test: unbatching all levels at the same time
unbatch_dp = input_dp.unbatch(unbatch_level=-1)
self.assertEqual(len(list(unbatch_dp)), 8)
for i, res in zip(expected_dp2, unbatch_dp):
self.assertEqual(i, res)
# Functional Test: raises error when input unbatch_level is less than -1
input_dp = dp.iter.IterableWrapper([[0, 1, 2], [3, 4, 5]])
with self.assertRaises(ValueError):
unbatch_dp = input_dp.unbatch(unbatch_level=-2)
for i in unbatch_dp:
print(i)
# Functional Test: raises error when input unbatch_level is too high
with self.assertRaises(IndexError):
unbatch_dp = input_dp.unbatch(unbatch_level=5)
for i in unbatch_dp:
print(i)
# Reset Test: unbatch_dp resets properly
input_dp = dp.iter.IterableWrapper([[0, 1, 2], [3, 4, 5]])
unbatch_dp = input_dp.unbatch(unbatch_level=-1)
n_elements_before_reset = 3
res_before_reset, res_after_reset = reset_after_n_next_calls(unbatch_dp, n_elements_before_reset)
self.assertEqual([0, 1, 2], res_before_reset)
self.assertEqual([0, 1, 2, 3, 4, 5], res_after_reset)
def test_filter_datapipe(self):
input_ds = dp.iter.IterableWrapper(range(10))
def _filter_fn(data, val):
return data >= val
# Functional Test: filter works with partial function
filter_dp = input_ds.filter(partial(_filter_fn, val=5))
self.assertEqual(list(filter_dp), list(range(5, 10)))
def _non_bool_fn(data):
return 1
# Functional Test: filter function must return bool
filter_dp = input_ds.filter(filter_fn=_non_bool_fn)
with self.assertRaises(ValueError):
temp = list(filter_dp)
# Funtional Test: Specify input_col
tuple_input_ds = dp.iter.IterableWrapper([(d - 1, d, d + 1) for d in range(10)])
# Single input_col
input_col_1_dp = tuple_input_ds.filter(partial(_filter_fn, val=5), input_col=1)
self.assertEqual(list(input_col_1_dp), [(d - 1, d, d + 1) for d in range(5, 10)])
# Multiple input_col
def _mul_filter_fn(a, b):
return a + b < 10
input_col_2_dp = tuple_input_ds.filter(_mul_filter_fn, input_col=[0, 2])
self.assertEqual(list(input_col_2_dp), [(d - 1, d, d + 1) for d in range(5)])
# __len__ Test: DataPipe has no valid len
with self.assertRaisesRegex(TypeError, r"has no len"):
len(filter_dp)
# Reset Test: DataPipe resets correctly
filter_dp = input_ds.filter(partial(_filter_fn, val=5))
n_elements_before_reset = 3
res_before_reset, res_after_reset = reset_after_n_next_calls(filter_dp, n_elements_before_reset)
self.assertEqual(list(range(5, 10))[:n_elements_before_reset], res_before_reset)
self.assertEqual(list(range(5, 10)), res_after_reset)
def test_sampler_iterdatapipe(self):
input_dp = dp.iter.IterableWrapper(range(10))
# Default SequentialSampler
sampled_dp = dp.iter.Sampler(input_dp) # type: ignore[var-annotated]
self.assertEqual(len(sampled_dp), 10)
for i, x in enumerate(sampled_dp):
self.assertEqual(x, i)
# RandomSampler
random_sampled_dp = dp.iter.Sampler(input_dp, sampler=RandomSampler, sampler_kwargs={
'replacement': True}) # type: ignore[var-annotated] # noqa: B950
# Requires `__len__` to build SamplerDataPipe
input_dp_nolen = IDP_NoLen(range(10))
with self.assertRaises(AssertionError):
sampled_dp = dp.iter.Sampler(input_dp_nolen)
def test_stream_reader_iterdatapipe(self):
from io import StringIO
input_dp = dp.iter.IterableWrapper([("f1", StringIO("abcde")), ("f2", StringIO("bcdef"))])
expected_res = ["abcde", "bcdef"]
# Functional Test: Read full chunk
dp1 = input_dp.read_from_stream()
self.assertEqual([d[1] for d in dp1], expected_res)
# Functional Test: Read full chunk
dp2 = input_dp.read_from_stream(chunk=1)
self.assertEqual([d[1] for d in dp2], [c for s in expected_res for c in s])
# `__len__` Test
with self.assertRaises(TypeError):
len(dp1)
def test_shuffle_iterdatapipe(self):
exp = list(range(100))
input_ds = dp.iter.IterableWrapper(exp)
with self.assertRaises(AssertionError):
shuffle_dp = input_ds.shuffle(buffer_size=0)
def _create_dp(buffer_size):
input_ds = dp.iter.IterableWrapper(list(range(100)))
return input_ds.shuffle(buffer_size=bs).sharding_filter()
for bs in (5, 20, 33):
shuffle_dp = _create_dp(bs)
self.assertEqual(len(shuffle_dp), len(exp))
torch.manual_seed(123)
res = list(shuffle_dp)
self.assertEqual(sorted(res), exp)
# Test Deterministic
for num_workers, pw in itertools.product((0, 1, 2), (True, False)):
if num_workers == 0 and pw:
continue
mp_ctx = "spawn" if num_workers > 0 else None
dl = DataLoader(
shuffle_dp,
num_workers=num_workers,
shuffle=True,
multiprocessing_context=mp_ctx,
worker_init_fn=_worker_init_fn,
persistent_workers=pw
)
# No seed
dl_res_ns = list(dl)
self.assertEqual(len(dl_res_ns), len(exp))
self.assertEqual(sorted(dl_res_ns), sorted(exp))
# Same seeds
dl_res = []
for epoch in range(2):
torch.manual_seed(123)
dl_res.append(list(dl))
self.assertEqual(dl_res[0], dl_res[1])
# Different seeds
torch.manual_seed(321)
dl_res.append(list(dl))
self.assertEqual(len(dl_res[0]), len(dl_res[2]))
self.assertNotEqual(dl_res[0], dl_res[2])
self.assertEqual(sorted(dl_res[0]), sorted(dl_res[2]))
shuffle_dp_nl = IDP_NoLen(range(20)).shuffle(buffer_size=5)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(shuffle_dp_nl)
# Test: deactivate shuffling via set_shuffle
unshuffled_dp = input_ds.shuffle().set_shuffle(False)
self.assertEqual(list(unshuffled_dp), list(input_ds))
def test_zip_iterdatapipe(self):
# Functional Test: raises TypeError when an input is not of type `IterDataPipe`
with self.assertRaises(TypeError):
dp.iter.Zipper(dp.iter.IterableWrapper(range(10)), list(range(10))) # type: ignore[arg-type]
# Functional Test: raises TypeError when an input does not have valid length
zipped_dp = dp.iter.Zipper(dp.iter.IterableWrapper(
range(10)), IDP_NoLen(range(5))) # type: ignore[var-annotated]
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(zipped_dp)
# Functional Test: zips the results properly
exp = list((i, i) for i in range(5))
self.assertEqual(list(zipped_dp), exp)
# Functional Test: zips the inputs properly even when lengths are different (zips to the shortest)
zipped_dp = dp.iter.Zipper(dp.iter.IterableWrapper(range(10)), dp.iter.IterableWrapper(range(5)))
# __len__ Test: length matches the length of the shortest input
self.assertEqual(len(zipped_dp), 5)
# Reset Test:
n_elements_before_reset = 3
res_before_reset, res_after_reset = reset_after_n_next_calls(zipped_dp, n_elements_before_reset)
expected_res = [(i, i) for i in range(5)]
self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_res, res_after_reset)
class TestFunctionalMapDataPipe(TestCase):
def _serialization_test_helper(self, datapipe, use_dill):
if use_dill:
serialized_dp = dill.dumps(datapipe)
deserialized_dp = dill.loads(serialized_dp)
else:
serialized_dp = pickle.dumps(datapipe)
deserialized_dp = pickle.loads(serialized_dp)
try:
self.assertEqual(list(datapipe), list(deserialized_dp))
except AssertionError as e:
print(f"{datapipe} is failing.")
raise e
def _serialization_test_for_single_dp(self, dp, use_dill=False):
# 1. Testing for serialization before any iteration starts
self._serialization_test_helper(dp, use_dill)
# 2. Testing for serialization after DataPipe is partially read
it = iter(dp)
_ = next(it)
self._serialization_test_helper(dp, use_dill)
# 3. Testing for serialization after DataPipe is fully read
_ = list(it)
self._serialization_test_helper(dp, use_dill)
def test_serializable(self):
picklable_datapipes: List = [
(dp.map.Batcher, None, (2,), {}),
(dp.map.Concater, None, (dp.map.SequenceWrapper(range(10)),), {}),
(dp.map.Mapper, None, (), {}),
(dp.map.Mapper, None, (_fake_fn,), {}),
(dp.map.Mapper, None, (partial(_fake_add, 1),), {}),
(dp.map.SequenceWrapper, range(10), (), {}),
(dp.map.Shuffler, dp.map.SequenceWrapper([0] * 5), (), {}),
(dp.map.Zipper, None, (dp.map.SequenceWrapper(range(10)),), {}),
]
for dpipe, custom_input, dp_args, dp_kwargs in picklable_datapipes:
if custom_input is None:
custom_input = dp.map.SequenceWrapper(range(10))
datapipe = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self._serialization_test_for_single_dp(datapipe)
def test_serializable_with_dill(self):
"""Only for DataPipes that take in a function as argument"""
input_dp = dp.map.SequenceWrapper(range(10))
datapipes_with_lambda_fn: List[
Tuple[Type[MapDataPipe], Tuple, Dict[str, Any]]
] = [
(dp.map.Mapper, (lambda_fn1,), {}),
]
def _local_fns():
def _fn1(x):
return x
return _fn1
fn1 = _local_fns()
datapipes_with_local_fn: List[
Tuple[Type[MapDataPipe], Tuple, Dict[str, Any]]
] = [
(dp.map.Mapper, (fn1,), {}),
]
if HAS_DILL:
for dpipe, dp_args, dp_kwargs in datapipes_with_lambda_fn + datapipes_with_local_fn:
_ = dill.dumps(dpipe(input_dp, *dp_args, **dp_kwargs)) # type: ignore[call-arg]
else:
msgs = (
r"^Lambda function is not supported by pickle",
r"^Local function is not supported by pickle"
)
for dps, msg in zip((datapipes_with_lambda_fn, datapipes_with_local_fn), msgs):
for dpipe, dp_args, dp_kwargs in dps:
with self.assertWarnsRegex(UserWarning, msg):
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
with self.assertRaises((pickle.PicklingError, AttributeError)):
pickle.dumps(datapipe)
def test_sequence_wrapper_datapipe(self):
seq = list(range(10))
input_dp = dp.map.SequenceWrapper(seq)
# Functional Test: all elements are equal in the same order
self.assertEqual(seq, list(input_dp))
# Functional Test: confirm deepcopy works by default
seq.append(11)
self.assertEqual(list(range(10)), list(input_dp)) # input_dp shouldn't have 11
# Functional Test: non-deepcopy version is working
seq2 = [1, 2, 3]
input_dp_non_deep = dp.map.SequenceWrapper(seq2, deepcopy=False)
seq2.append(4)
self.assertEqual(list(seq2), list(input_dp_non_deep)) # should have 4
# Reset Test: reset the DataPipe
seq = list(range(10))
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(input_dp, n_elements_before_reset)
self.assertEqual(list(range(5)), res_before_reset)
self.assertEqual(seq, res_after_reset)
# __len__ Test: inherits length from sequence
self.assertEqual(len(seq), len(input_dp))
def test_concat_mapdatapipe(self):
input_dp1 = dp.map.SequenceWrapper(range(10))
input_dp2 = dp.map.SequenceWrapper(range(5))
with self.assertRaisesRegex(ValueError, r"Expected at least one DataPipe"):
dp.map.Concater()
with self.assertRaisesRegex(TypeError, r"Expected all inputs to be `MapDataPipe`"):
dp.map.Concater(input_dp1, ()) # type: ignore[arg-type]
concat_dp = input_dp1.concat(input_dp2)
self.assertEqual(len(concat_dp), 15)
for index in range(15):
self.assertEqual(concat_dp[index], (list(range(10)) + list(range(5)))[index])
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
def test_zip_mapdatapipe(self):
input_dp1 = dp.map.SequenceWrapper(range(10))
input_dp2 = dp.map.SequenceWrapper(range(5))
input_dp3 = dp.map.SequenceWrapper(range(15))
# Functional Test: requires at least one input DataPipe
with self.assertRaisesRegex(ValueError, r"Expected at least one DataPipe"):
dp.map.Zipper()
# Functional Test: all inputs must be MapDataPipes
with self.assertRaisesRegex(TypeError, r"Expected all inputs to be `MapDataPipe`"):
dp.map.Zipper(input_dp1, ()) # type: ignore[arg-type]
# Functional Test: Zip the elements up as a tuples
zip_dp = input_dp1.zip(input_dp2, input_dp3)
self.assertEqual([(i, i, i) for i in range(5)], [zip_dp[i] for i in range(5)])
# Functional Test: Raise IndexError when index equal or exceed the length of the shortest DataPipe
with self.assertRaisesRegex(IndexError, r"out of range"):
input_dp1.zip(input_dp2, input_dp3)[5]
# Functional Test: Ensure `zip` can combine `Shuffler` with others
dp1 = dp.map.SequenceWrapper(range(10))
shuffle_dp1 = dp1.shuffle()
dp2 = dp.map.SequenceWrapper(range(10))
shuffle_dp2 = dp2.shuffle()
zip_dp = shuffle_dp1.zip(shuffle_dp2)
self.assertEqual(10, len(list(zip_dp)))
zip_dp2 = shuffle_dp1.zip(dp2)
self.assertEqual(10, len(list(zip_dp)))
# __len__ Test: returns the length of the shortest DataPipe
zip_dp = input_dp1.zip(input_dp2, input_dp3)
self.assertEqual(5, len(zip_dp))
def test_shuffler_mapdatapipe(self):
input_dp1 = dp.map.SequenceWrapper(range(10))
input_dp2 = dp.map.SequenceWrapper({'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5})
# Functional Test: Assumes 0-index when indices is not given
shuffler_dp = input_dp1.shuffle()
self.assertEqual(set(range(10)), set(shuffler_dp))
# Functional Test: Custom indices are working
shuffler_dp = dp.map.Shuffler(input_dp2, indices=['a', 'b', 'c', 'd', 'e'])
self.assertEqual(set(range(1, 6)), set(shuffler_dp))
# # Reset Test:
shuffler_dp = input_dp1.shuffle()
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(shuffler_dp, n_elements_before_reset)
self.assertEqual(5, len(res_before_reset))
for x in res_before_reset:
self.assertTrue(x in set(range(10)))
self.assertEqual(set(range(10)), set(res_after_reset))
# __len__ Test: returns the length of the input DataPipe
shuffler_dp = input_dp1.shuffle()
self.assertEqual(10, len(shuffler_dp))
def test_map_mapdatapipe(self):
arr = range(10)
input_dp = dp.map.SequenceWrapper(arr)
def fn(item, dtype=torch.float, *, sum=False):
data = torch.tensor(item, dtype=dtype)
return data if not sum else data.sum()
map_dp = input_dp.map(fn)
self.assertEqual(len(input_dp), len(map_dp))
for index in arr:
self.assertEqual(
map_dp[index], torch.tensor(input_dp[index], dtype=torch.float)
)
map_dp = input_dp.map(partial(fn, dtype=torch.int, sum=True))
self.assertEqual(len(input_dp), len(map_dp))
for index in arr:
self.assertEqual(
map_dp[index], torch.tensor(input_dp[index], dtype=torch.int).sum()
)
def test_batch_mapdatapipe(self):
arr = list(range(13))
input_dp = dp.map.SequenceWrapper(arr)
# Functional Test: batches top level by default
batch_dp = dp.map.Batcher(input_dp, batch_size=2)
self.assertEqual([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12]], list(batch_dp))
# Functional Test: drop_last on command
batch_dp = dp.map.Batcher(input_dp, batch_size=2, drop_last=True)
self.assertEqual([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]], list(batch_dp))
# Functional Test: nested batching
batch_dp_2 = batch_dp.batch(batch_size=3)
self.assertEqual([[[0, 1], [2, 3], [4, 5]], [[6, 7], [8, 9], [10, 11]]], list(batch_dp_2))
# Reset Test:
n_elements_before_reset = 3
res_before_reset, res_after_reset = reset_after_n_next_calls(batch_dp, n_elements_before_reset)
self.assertEqual([[0, 1], [2, 3], [4, 5]], res_before_reset)
self.assertEqual([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]], res_after_reset)
# __len__ Test:
self.assertEqual(6, len(batch_dp))
self.assertEqual(2, len(batch_dp_2))
# Metaclass conflict for Python 3.6
# Multiple inheritance with NamedTuple is not supported for Python 3.9
_generic_namedtuple_allowed = sys.version_info >= (3, 7) and sys.version_info < (3, 9)
if _generic_namedtuple_allowed:
class InvalidData(Generic[T_co], NamedTuple):
name: str
data: T_co
class TestTyping(TestCase):
def test_isinstance(self):
class A(IterDataPipe):
pass
class B(IterDataPipe):
pass
a = A()
self.assertTrue(isinstance(a, A))
self.assertFalse(isinstance(a, B))
def test_protocol(self):
try:
from typing import Protocol # type: ignore[attr-defined]
except ImportError:
from typing import _Protocol # type: ignore[attr-defined]
Protocol = _Protocol
class P(Protocol):
pass
class A(IterDataPipe[P]):
pass
@skipTyping
def test_subtype(self):
from torch.utils.data.datapipes._typing import issubtype
basic_type = (int, str, bool, float, complex,
list, tuple, dict, set, T_co)
for t in basic_type:
self.assertTrue(issubtype(t, t))
self.assertTrue(issubtype(t, Any))
if t == T_co:
self.assertTrue(issubtype(Any, t))
else:
self.assertFalse(issubtype(Any, t))
for t1, t2 in itertools.product(basic_type, basic_type):
if t1 == t2 or t2 == T_co:
self.assertTrue(issubtype(t1, t2))
else:
self.assertFalse(issubtype(t1, t2))
T = TypeVar('T', int, str)
S = TypeVar('S', bool, Union[str, int], Tuple[int, T]) # type: ignore[valid-type]
types = ((int, Optional[int]),
(List, Union[int, list]),
(Tuple[int, str], S),
(Tuple[int, str], tuple),
(T, S),
(S, T_co),
(T, Union[S, Set]))
for sub, par in types:
self.assertTrue(issubtype(sub, par))
self.assertFalse(issubtype(par, sub))
subscriptable_types = {
List: 1,
Tuple: 2, # use 2 parameters
Set: 1,
Dict: 2,
}
for subscript_type, n in subscriptable_types.items():
for ts in itertools.combinations(types, n):
subs, pars = zip(*ts)
sub = subscript_type[subs] # type: ignore[index]
par = subscript_type[pars] # type: ignore[index]
self.assertTrue(issubtype(sub, par))
self.assertFalse(issubtype(par, sub))
# Non-recursive check
self.assertTrue(issubtype(par, sub, recursive=False))
@skipTyping
def test_issubinstance(self):
from torch.utils.data.datapipes._typing import issubinstance
basic_data = (1, '1', True, 1., complex(1., 0.))
basic_type = (int, str, bool, float, complex)
S = TypeVar('S', bool, Union[str, int])
for d in basic_data:
self.assertTrue(issubinstance(d, Any))
self.assertTrue(issubinstance(d, T_co))
if type(d) in (bool, int, str):
self.assertTrue(issubinstance(d, S))
else:
self.assertFalse(issubinstance(d, S))
for t in basic_type:
if type(d) == t:
self.assertTrue(issubinstance(d, t))
else:
self.assertFalse(issubinstance(d, t))
# list/set
dt = (([1, '1', 2], List), (set({1, '1', 2}), Set))
for d, t in dt:
self.assertTrue(issubinstance(d, t))
self.assertTrue(issubinstance(d, t[T_co])) # type: ignore[index]
self.assertFalse(issubinstance(d, t[int])) # type: ignore[index]
# dict
d = dict({'1': 1, '2': 2.})
self.assertTrue(issubinstance(d, Dict))
self.assertTrue(issubinstance(d, Dict[str, T_co]))
self.assertFalse(issubinstance(d, Dict[str, int]))
# tuple
d = (1, '1', 2)
self.assertTrue(issubinstance(d, Tuple))
self.assertTrue(issubinstance(d, Tuple[int, str, T_co]))
self.assertFalse(issubinstance(d, Tuple[int, Any]))
self.assertFalse(issubinstance(d, Tuple[int, int, int]))
# Static checking annotation
@skipTyping
def test_compile_time(self):
with self.assertRaisesRegex(TypeError, r"Expected 'Iterator' as the return"):
class InvalidDP1(IterDataPipe[int]):
def __iter__(self) -> str: # type: ignore[misc, override]
yield 0
with self.assertRaisesRegex(TypeError, r"Expected return type of '__iter__'"):
class InvalidDP2(IterDataPipe[Tuple]):
def __iter__(self) -> Iterator[int]: # type: ignore[override]
yield 0
with self.assertRaisesRegex(TypeError, r"Expected return type of '__iter__'"):
class InvalidDP3(IterDataPipe[Tuple[int, str]]):
def __iter__(self) -> Iterator[tuple]: # type: ignore[override]
yield (0,)
if _generic_namedtuple_allowed:
with self.assertRaisesRegex(TypeError, r"is not supported by Python typing"):
class InvalidDP4(IterDataPipe["InvalidData[int]"]): # type: ignore[type-arg, misc]
pass
class DP1(IterDataPipe[Tuple[int, str]]):
def __init__(self, length):
self.length = length
def __iter__(self) -> Iterator[Tuple[int, str]]:
for d in range(self.length):
yield d, str(d)
self.assertTrue(issubclass(DP1, IterDataPipe))
dp1 = DP1(10)
self.assertTrue(DP1.type.issubtype(dp1.type) and dp1.type.issubtype(DP1.type)) # type: ignore[attr-defined]
dp1_ = DP1(5)
self.assertEqual(dp1.type, dp1_.type)
with self.assertRaisesRegex(TypeError, r"is not a generic class"):
class InvalidDP5(DP1[tuple]): # type: ignore[type-arg]
def __iter__(self) -> Iterator[tuple]: # type: ignore[override]
yield (0,)
class DP2(IterDataPipe[T_co]):
def __iter__(self) -> Iterator[T_co]:
for d in range(10):
yield d # type: ignore[misc]
self.assertTrue(issubclass(DP2, IterDataPipe))
dp2 = DP2() # type: ignore[var-annotated]
self.assertTrue(DP2.type.issubtype(dp2.type) and dp2.type.issubtype(DP2.type)) # type: ignore[attr-defined]
dp2_ = DP2() # type: ignore[var-annotated]
self.assertEqual(dp2.type, dp2_.type)
class DP3(IterDataPipe[Tuple[T_co, str]]):
r""" DataPipe without fixed type with __init__ function"""
def __init__(self, datasource):
self.datasource = datasource
def __iter__(self) -> Iterator[Tuple[T_co, str]]:
for d in self.datasource:
yield d, str(d)
self.assertTrue(issubclass(DP3, IterDataPipe))
dp3 = DP3(range(10)) # type: ignore[var-annotated]
self.assertTrue(DP3.type.issubtype(dp3.type) and dp3.type.issubtype(DP3.type)) # type: ignore[attr-defined]
dp3_ = DP3(5) # type: ignore[var-annotated]
self.assertEqual(dp3.type, dp3_.type)
class DP4(IterDataPipe[tuple]):
r""" DataPipe without __iter__ annotation"""
def __iter__(self):
raise NotImplementedError
self.assertTrue(issubclass(DP4, IterDataPipe))
dp4 = DP4()
self.assertTrue(dp4.type.param == tuple)
class DP5(IterDataPipe):
r""" DataPipe without type annotation"""
def __iter__(self) -> Iterator[str]:
raise NotImplementedError
self.assertTrue(issubclass(DP5, IterDataPipe))
dp5 = DP5()
from torch.utils.data.datapipes._typing import issubtype
self.assertTrue(issubtype(dp5.type.param, Any) and issubtype(Any, dp5.type.param))
class DP6(IterDataPipe[int]):
r""" DataPipe with plain Iterator"""
def __iter__(self) -> Iterator:
raise NotImplementedError
self.assertTrue(issubclass(DP6, IterDataPipe))
dp6 = DP6()
self.assertTrue(dp6.type.param == int)
class DP7(IterDataPipe[Awaitable[T_co]]):
r""" DataPipe with abstract base class"""
self.assertTrue(issubclass(DP7, IterDataPipe))
self.assertTrue(DP7.type.param == Awaitable[T_co]) # type: ignore[attr-defined]
class DP8(DP7[str]):
r""" DataPipe subclass from a DataPipe with abc type"""
self.assertTrue(issubclass(DP8, IterDataPipe))
self.assertTrue(DP8.type.param == Awaitable[str]) # type: ignore[attr-defined]
@skipTyping
def test_construct_time(self):
class DP0(IterDataPipe[Tuple]):
@argument_validation
def __init__(self, dp: IterDataPipe):
self.dp = dp
def __iter__(self) -> Iterator[Tuple]:
for d in self.dp:
yield d, str(d)
class DP1(IterDataPipe[int]):
@argument_validation
def __init__(self, dp: IterDataPipe[Tuple[int, str]]):
self.dp = dp
def __iter__(self) -> Iterator[int]:
for a, b in self.dp:
yield a
# Non-DataPipe input with DataPipe hint
datasource = [(1, '1'), (2, '2'), (3, '3')]
with self.assertRaisesRegex(TypeError, r"Expected argument 'dp' as a IterDataPipe"):
dp0 = DP0(datasource)
dp0 = DP0(dp.iter.IterableWrapper(range(10)))
with self.assertRaisesRegex(TypeError, r"Expected type of argument 'dp' as a subtype"):
dp1 = DP1(dp0)
@skipTyping
def test_runtime(self):
class DP(IterDataPipe[Tuple[int, T_co]]):
def __init__(self, datasource):
self.ds = datasource
@runtime_validation
def __iter__(self) -> Iterator[Tuple[int, T_co]]:
for d in self.ds:
yield d
dss = ([(1, '1'), (2, '2')],
[(1, 1), (2, '2')])
for ds in dss:
dp0 = DP(ds) # type: ignore[var-annotated]
self.assertEqual(list(dp0), ds)
# Reset __iter__
self.assertEqual(list(dp0), ds)
dss = ([(1, 1), ('2', 2)], # type: ignore[assignment, list-item]
[[1, '1'], [2, '2']], # type: ignore[list-item]
[1, '1', 2, '2'])
for ds in dss:
dp0 = DP(ds)
with self.assertRaisesRegex(RuntimeError, r"Expected an instance as subtype"):
list(dp0)
with runtime_validation_disabled():
self.assertEqual(list(dp0), ds)
with runtime_validation_disabled():
self.assertEqual(list(dp0), ds)
with self.assertRaisesRegex(RuntimeError, r"Expected an instance as subtype"):
list(dp0)
@skipTyping
def test_reinforce(self):
T = TypeVar('T', int, str)
class DP(IterDataPipe[T]):
def __init__(self, ds):
self.ds = ds
@runtime_validation
def __iter__(self) -> Iterator[T]:
for d in self.ds:
yield d
ds = list(range(10))
# Valid type reinforcement
dp0 = DP(ds).reinforce_type(int)
self.assertTrue(dp0.type, int)
self.assertEqual(list(dp0), ds)
# Invalid type
with self.assertRaisesRegex(TypeError, r"'expected_type' must be a type"):
dp1 = DP(ds).reinforce_type(1)
# Type is not subtype
with self.assertRaisesRegex(TypeError, r"Expected 'expected_type' as subtype of"):
dp2 = DP(ds).reinforce_type(float)
# Invalid data at runtime
dp3 = DP(ds).reinforce_type(str)
with self.assertRaisesRegex(RuntimeError, r"Expected an instance as subtype"):
list(dp3)
# Context Manager to disable the runtime validation
with runtime_validation_disabled():
self.assertEqual(list(d for d in dp3), ds)
class NumbersDataset(IterDataPipe):
def __init__(self, size=10):
self.size = size
def __iter__(self):
for i in range(self.size):
yield i
class TestGraph(TestCase):
class CustomIterDataPipe(IterDataPipe):
def add_v(self, x):
return x + self.v
def __init__(self, source_dp, v=1):
self._dp = source_dp.map(self.add_v)
self.v = 1
def __iter__(self):
yield from self._dp
def __hash__(self):
raise NotImplementedError
def test_simple_traverse(self):
numbers_dp = NumbersDataset(size=50)
mapped_dp = numbers_dp.map(lambda x: x * 10)
graph = torch.utils.data.graph.traverse(mapped_dp, only_datapipe=True)
expected: Dict[Any, Any] = {id(mapped_dp): (mapped_dp, {id(numbers_dp): (numbers_dp, {})})}
self.assertEqual(expected, graph)
dps = torch.utils.data.graph_settings.get_all_graph_pipes(graph)
self.assertEqual(len(dps), 2)
self.assertTrue(numbers_dp in dps)
self.assertTrue(mapped_dp in dps)
def test_traverse_forked(self):
numbers_dp = NumbersDataset(size=50)
dp0, dp1, dp2 = numbers_dp.fork(num_instances=3)
dp0_upd = dp0.map(lambda x: x * 10)
dp1_upd = dp1.filter(lambda x: x % 3 == 1)
combined_dp = dp0_upd.mux(dp1_upd, dp2)
graph = torch.utils.data.graph.traverse(combined_dp, only_datapipe=True)
expected = {
id(combined_dp): (combined_dp, {
id(dp0_upd): (dp0_upd, {
id(dp0): (dp0, {
id(dp0.main_datapipe): (dp0.main_datapipe, {
id(dp0.main_datapipe.main_datapipe): (dp0.main_datapipe.main_datapipe, {})
})
})
}),
id(dp1_upd): (dp1_upd, {
id(dp1): (dp1, {
id(dp1.main_datapipe): (dp1.main_datapipe, {
id(dp1.main_datapipe.main_datapipe): (dp1.main_datapipe.main_datapipe, {})
})
})
}),
id(dp2): (dp2, {
id(dp2.main_datapipe): (dp2.main_datapipe, {
id(dp2.main_datapipe.main_datapipe): (dp2.main_datapipe.main_datapipe, {})
})
})
})
}
self.assertEqual(expected, graph)
dps = torch.utils.data.graph_settings.get_all_graph_pipes(graph)
self.assertEqual(len(dps), 8)
for _dp in [numbers_dp, dp0.main_datapipe, dp0, dp1, dp2, dp0_upd, dp1_upd, combined_dp]:
self.assertTrue(_dp in dps)
def test_traverse_mapdatapipe(self):
source_dp = dp.map.SequenceWrapper(range(10))
map_dp = source_dp.map(partial(_fake_add, 1))
graph = torch.utils.data.graph.traverse(map_dp)
expected: Dict[Any, Any] = {id(map_dp): (map_dp, {id(source_dp): (source_dp, {})})}
self.assertEqual(expected, graph)
def test_traverse_mixdatapipe(self):
source_map_dp = dp.map.SequenceWrapper(range(10))
iter_dp = dp.iter.IterableWrapper(source_map_dp)
graph = torch.utils.data.graph.traverse(iter_dp)
expected: Dict[Any, Any] = {id(iter_dp): (iter_dp, {id(source_map_dp): (source_map_dp, {})})}
self.assertEqual(expected, graph)
def test_traverse_circular_datapipe(self):
source_iter_dp = dp.iter.IterableWrapper(list(range(10)))
circular_dp = TestGraph.CustomIterDataPipe(source_iter_dp)
graph = torch.utils.data.graph.traverse(circular_dp, only_datapipe=True)
# See issue: https://github.com/pytorch/data/issues/535
expected: Dict[Any, Any] = {
id(circular_dp): (circular_dp, {
id(circular_dp._dp): (circular_dp._dp, {
id(source_iter_dp): (source_iter_dp, {})
})
})
}
self.assertEqual(expected, graph)
dps = torch.utils.data.graph_settings.get_all_graph_pipes(graph)
self.assertEqual(len(dps), 3)
for _dp in [circular_dp, circular_dp._dp, source_iter_dp]:
self.assertTrue(_dp in dps)
def test_traverse_unhashable_datapipe(self):
source_iter_dp = dp.iter.IterableWrapper(list(range(10)))
unhashable_dp = TestGraph.CustomIterDataPipe(source_iter_dp)
graph = torch.utils.data.graph.traverse(unhashable_dp, only_datapipe=True)
with self.assertRaises(NotImplementedError):
hash(unhashable_dp)
expected: Dict[Any, Any] = {
id(unhashable_dp): (unhashable_dp, {
id(unhashable_dp._dp): (unhashable_dp._dp, {
id(source_iter_dp): (source_iter_dp, {})
})
})
}
self.assertEqual(expected, graph)
def unbatch(x):
return x[0]
class TestSerialization(TestCase):
@skipIfNoDill
def test_spawn_lambdas_iter(self):
idp = dp.iter.IterableWrapper(range(3)).map(lambda x: x + 1).shuffle()
dl = DataLoader(idp, num_workers=2, shuffle=True,
multiprocessing_context='spawn', collate_fn=unbatch, batch_size=1)
result = list(dl)
self.assertEqual([1, 1, 2, 2, 3, 3], sorted(result))
@skipIfNoDill
def test_spawn_lambdas_map(self):
mdp = dp.map.SequenceWrapper(range(6)).map(lambda x: x + 1).shuffle()
dl = DataLoader(mdp, num_workers=2, shuffle=True,
multiprocessing_context='spawn', collate_fn=unbatch, batch_size=1)
result = list(dl)
self.assertEqual([1, 2, 3, 4, 5, 6], sorted(result))
class TestCircularSerialization(TestCase):
class CustomIterDataPipe(IterDataPipe):
@staticmethod
def add_one(x):
return x + 1
@classmethod
def classify(cls, x):
return 0
def add_v(self, x):
return x + self.v
def __init__(self, fn, source_dp=None):
self.fn = fn
self.source_dp = source_dp if source_dp else dp.iter.IterableWrapper([1, 2, 4])
self._dp = self.source_dp.map(self.add_one).map(self.add_v).demux(2, self.classify)[0]
self.v = 1
def __iter__(self):
yield from self._dp
def test_circular_serialization_with_pickle(self):
# Test for circular reference issue with pickle
dp1 = TestCircularSerialization.CustomIterDataPipe(fn=_fake_fn)
self.assertTrue(list(dp1) == list(pickle.loads(pickle.dumps(dp1))))
child_1 = dp1._dp
dm_1 = child_1.main_datapipe
m2_1 = dm_1.main_datapipe
m1_1 = m2_1.datapipe
src_1 = m1_1.datapipe
res1 = traverse(dp1, only_datapipe=True)
res2 = traverse(dp1, only_datapipe=False)
exp_res_1 = {id(dp1): (dp1, {
id(src_1): (src_1, {}),
id(child_1): (child_1, {id(dm_1): (dm_1, {
id(m2_1): (m2_1, {id(m1_1): (m1_1, {id(src_1): (src_1, {})})})
})})
})}
exp_res_2 = {id(dp1): (dp1, {
id(src_1): (src_1, {}),
id(child_1): (child_1, {id(dm_1): (dm_1, {
id(m2_1): (m2_1, {
id(m1_1): (m1_1, {id(src_1): (src_1, {})}),
id(src_1): (src_1, {})
})
})})
})}
self.assertEqual(res1, exp_res_1)
self.assertEqual(res2, exp_res_2)
dp2 = TestCircularSerialization.CustomIterDataPipe(fn=_fake_fn, source_dp=dp1)
self.assertTrue(list(dp2) == list(pickle.loads(pickle.dumps(dp2))))
child_2 = dp2._dp
dm_2 = child_2.main_datapipe
m2_2 = dm_2.main_datapipe
m1_2 = m2_2.datapipe
res3 = traverse(dp2, only_datapipe=True)
res4 = traverse(dp2, only_datapipe=False)
exp_res_3 = {id(dp2): (dp2, {
id(dp1): (dp1, {
id(src_1): (src_1, {}),
id(child_1): (child_1, {id(dm_1): (dm_1, {
id(m2_1): (m2_1, {id(m1_1): (m1_1, {id(src_1): (src_1, {})})})
})})
}),
id(child_2): (child_2, {id(dm_2): (dm_2, {
id(m2_2): (m2_2, {id(m1_2): (m1_2, {
id(dp1): (dp1, {
id(src_1): (src_1, {}),
id(child_1): (child_1, {id(dm_1): (dm_1, {
id(m2_1): (m2_1, {id(m1_1): (m1_1, {id(src_1): (src_1, {})})})
})})
}),
})})
})})
})}
exp_res_4 = {id(dp2): (dp2, {
id(dp1): (dp1, {
id(src_1): (src_1, {}),
id(child_1): (child_1, {id(dm_1): (dm_1, {
id(m2_1): (m2_1, {
id(m1_1): (m1_1, {id(src_1): (src_1, {})}),
id(src_1): (src_1, {})
})
})})
}),
id(child_2): (child_2, {id(dm_2): (dm_2, {
id(m2_2): (m2_2, {
id(m1_2): (m1_2, {
id(dp1): (dp1, {
id(src_1): (src_1, {}),
id(child_1): (child_1, {id(dm_1): (dm_1, {
id(m2_1): (m2_1, {
id(m1_1): (m1_1, {id(src_1): (src_1, {})}),
id(src_1): (src_1, {})
})
})})
})
}),
id(dp1): (dp1, {
id(src_1): (src_1, {}),
id(child_1): (child_1, {id(dm_1): (dm_1, {
id(m2_1): (m2_1, {
id(m1_1): (m1_1, {id(src_1): (src_1, {})}),
id(src_1): (src_1, {})
})
})})
})
})
})})
})}
self.assertEqual(res3, exp_res_3)
self.assertEqual(res4, exp_res_4)
class LambdaIterDataPipe(CustomIterDataPipe):
def __init__(self, fn, source_dp=None):
super().__init__(fn, source_dp)
self.container = [lambda x: x + 1, ]
self.lambda_fn = lambda x: x + 1
self._dp = self.source_dp.map(self.add_one).map(self.lambda_fn).map(self.add_v).demux(2, self.classify)[0]
@skipIfNoDill
@skipIf(True, "Dill Tests")
def test_circular_serialization_with_dill(self):
# Test for circular reference issue with dill
dp1 = TestCircularSerialization.LambdaIterDataPipe(lambda x: x + 1)
self.assertTrue(list(dp1) == list(dill.loads(dill.dumps(dp1))))
child_1 = dp1._dp
dm_1 = child_1.main_datapipe
m2_1 = dm_1.main_datapipe
m1_1 = m2_1.datapipe
src_1 = m1_1.datapipe
res1 = traverse(dp1, only_datapipe=True)
res2 = traverse(dp1, only_datapipe=False)
exp_res_1 = {id(dp1): (dp1, {
id(src_1): (src_1, {}),
id(child_1): (child_1, {id(dm_1): (dm_1, {
id(m2_1): (m2_1, {id(m1_1): (m1_1, {id(src_1): (src_1, {})})})
})})
})}
exp_res_2 = {id(dp1): (dp1, {
id(src_1): (src_1, {}),
id(child_1): (child_1, {id(dm_1): (dm_1, {
id(m2_1): (m2_1, {
id(m1_1): (m1_1, {id(src_1): (src_1, {})}),
id(src_1): (src_1, {})
})
})})
})}
self.assertEqual(res1, exp_res_1)
self.assertEqual(res2, exp_res_2)
dp2 = TestCircularSerialization.LambdaIterDataPipe(fn=_fake_fn, source_dp=dp1)
self.assertTrue(list(dp2) == list(dill.loads(dill.dumps(dp2))))
child_2 = dp2._dp
dm_2 = child_2.main_datapipe
m2_2 = dm_2.main_datapipe
m1_2 = m2_2.datapipe
res3 = traverse(dp2, only_datapipe=True)
res4 = traverse(dp2, only_datapipe=False)
exp_res_3 = {id(dp2): (dp2, {
id(dp1): (dp1, {
id(src_1): (src_1, {}),
id(child_1): (child_1, {id(dm_1): (dm_1, {
id(m2_1): (m2_1, {id(m1_1): (m1_1, {id(src_1): (src_1, {})})})
})})
}),
id(child_2): (child_2, {id(dm_2): (dm_2, {
id(m2_2): (m2_2, {id(m1_2): (m1_2, {
id(dp1): (dp1, {
id(src_1): (src_1, {}),
id(child_1): (child_1, {id(dm_1): (dm_1, {
id(m2_1): (m2_1, {id(m1_1): (m1_1, {id(src_1): (src_1, {})})})
})})
}),
})})
})})
})}
exp_res_4 = {id(dp2): (dp2, {
id(dp1): (dp1, {
id(src_1): (src_1, {}),
id(child_1): (child_1, {id(dm_1): (dm_1, {
id(m2_1): (m2_1, {
id(m1_1): (m1_1, {id(src_1): (src_1, {})}),
id(src_1): (src_1, {})
})
})})
}),
id(child_2): (child_2, {id(dm_2): (dm_2, {
id(m2_2): (m2_2, {
id(m1_2): (m1_2, {
id(dp1): (dp1, {
id(src_1): (src_1, {}),
id(child_1): (child_1, {id(dm_1): (dm_1, {
id(m2_1): (m2_1, {
id(m1_1): (m1_1, {id(src_1): (src_1, {})}),
id(src_1): (src_1, {})
})
})})
})
}),
id(dp1): (dp1, {
id(src_1): (src_1, {}),
id(child_1): (child_1, {id(dm_1): (dm_1, {
id(m2_1): (m2_1, {
id(m1_1): (m1_1, {id(src_1): (src_1, {})}),
id(src_1): (src_1, {})
})
})})
})
})
})})
})}
self.assertEqual(res3, exp_res_3)
self.assertEqual(res4, exp_res_4)
class TestSharding(TestCase):
def _get_pipeline(self):
numbers_dp = NumbersDataset(size=10)
dp0, dp1 = numbers_dp.fork(num_instances=2)
dp0_upd = dp0.map(_mul_10)
dp1_upd = dp1.filter(_mod_3_test)
combined_dp = dp0_upd.mux(dp1_upd)
return combined_dp
def _get_dill_pipeline(self):
numbers_dp = NumbersDataset(size=10)
dp0, dp1 = numbers_dp.fork(num_instances=2)
dp0_upd = dp0.map(lambda x: x * 10)
dp1_upd = dp1.filter(lambda x: x % 3 == 1)
combined_dp = dp0_upd.mux(dp1_upd)
return combined_dp
def test_simple_sharding(self):
sharded_dp = self._get_pipeline().sharding_filter()
torch.utils.data.graph_settings.apply_sharding(sharded_dp, 3, 1)
items = list(sharded_dp)
self.assertEqual([1, 20], items)
all_items = [0, 1, 10, 4, 20, 7]
items = []
for i in range(3):
sharded_dp = self._get_pipeline().sharding_filter()
torch.utils.data.graph_settings.apply_sharding(sharded_dp, 3, i)
items += list(sharded_dp)
self.assertEqual(sorted(all_items), sorted(items))
def test_sharding_length(self):
numbers_dp = dp.iter.IterableWrapper(range(13))
sharded_dp0 = numbers_dp.sharding_filter()
torch.utils.data.graph_settings.apply_sharding(sharded_dp0, 3, 0)
sharded_dp1 = numbers_dp.sharding_filter()
torch.utils.data.graph_settings.apply_sharding(sharded_dp1, 3, 1)
sharded_dp2 = numbers_dp.sharding_filter()
torch.utils.data.graph_settings.apply_sharding(sharded_dp2, 3, 2)
self.assertEqual(13, len(numbers_dp))
self.assertEqual(5, len(sharded_dp0))
self.assertEqual(4, len(sharded_dp1))
self.assertEqual(4, len(sharded_dp2))
numbers_dp = dp.iter.IterableWrapper(range(1))
sharded_dp0 = numbers_dp.sharding_filter()
torch.utils.data.graph_settings.apply_sharding(sharded_dp0, 2, 0)
sharded_dp1 = numbers_dp.sharding_filter()
torch.utils.data.graph_settings.apply_sharding(sharded_dp1, 2, 1)
self.assertEqual(1, len(sharded_dp0))
self.assertEqual(0, len(sharded_dp1))
def test_old_dataloader(self):
dp0 = self._get_pipeline()
expected = list(dp0)
dp0 = self._get_pipeline().sharding_filter()
dl = DataLoader(dp0, batch_size=1, shuffle=False, num_workers=2)
items = []
for i in dl:
items.append(i)
self.assertEqual(sorted(expected), sorted(items))
class TestIterDataPipeSingletonConstraint(TestCase):
r"""
Each `IterDataPipe` can only have one active iterator. Whenever a new iterator is created, older
iterators are invalidated. These tests aim to ensure `IterDataPipe` follows this behavior.
"""
def _check_single_iterator_invalidation_logic(self, source_dp: IterDataPipe):
r"""
Given a IterDataPipe, verifies that the iterator can be read, reset, and the creation of
a second iterator invalidates the first one.
"""
it1 = iter(source_dp)
self.assertEqual(list(range(10)), list(it1))
it1 = iter(source_dp)
self.assertEqual(list(range(10)), list(it1)) # A fresh iterator can be read in full again
it1 = iter(source_dp)
self.assertEqual(0, next(it1))
it2 = iter(source_dp) # This should invalidate `it1`
self.assertEqual(0, next(it2)) # Should read from the beginning again
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it1)
def test_iterdatapipe_singleton_generator(self):
r"""
Testing for the case where IterDataPipe's `__iter__` is a generator function.
"""
# Functional Test: Check if invalidation logic is correct
source_dp: IterDataPipe = dp.iter.IterableWrapper(range(10))
self._check_single_iterator_invalidation_logic(source_dp)
# Functional Test: extend the test to a pipeline
dps = source_dp.map(_fake_fn).filter(_fake_filter_fn)
self._check_single_iterator_invalidation_logic(dps)
# Functional Test: multiple simultaneous references to the same DataPipe fails
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
for _ in zip(source_dp, source_dp):
pass
# Function Test: sequential references work
for _ in zip(list(source_dp), list(source_dp)):
pass
def test_iterdatapipe_singleton_self_next(self):
r"""
Testing for the case where IterDataPipe's `__iter__` returns `self` and there is a `__next__` method
Note that the following DataPipe by is singleton by default (because `__iter__` returns `self`).
"""
class _CustomIterDP_Self(IterDataPipe):
def __init__(self, iterable):
self.source = iterable
self.iterable = iter(iterable)
def __iter__(self):
self.reset()
return self
def __next__(self):
return next(self.iterable)
def reset(self):
self.iterable = iter(self.source)
# Functional Test: Check that every `__iter__` call returns the same object
source_dp = _CustomIterDP_Self(range(10))
res = list(source_dp)
it = iter(source_dp)
self.assertEqual(res, list(it))
# Functional Test: Check if invalidation logic is correct
source_dp = _CustomIterDP_Self(range(10))
self._check_single_iterator_invalidation_logic(source_dp)
self.assertEqual(1, next(source_dp)) # `source_dp` is still valid and can be read
# Functional Test: extend the test to a pipeline
source_dp = _CustomIterDP_Self(dp.iter.IterableWrapper(range(10)).map(_fake_fn).filter(_fake_filter_fn))
self._check_single_iterator_invalidation_logic(source_dp)
self.assertEqual(1, next(source_dp)) # `source_dp` is still valid and can be read
# Functional Test: multiple simultaneous references to the same DataPipe fails
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
for _ in zip(source_dp, source_dp):
pass
def test_iterdatapipe_singleton_new_object(self):
r"""
Testing for the case where IterDataPipe's `__iter__` isn't a generator nor returns `self`,
and there isn't a `__next__` method.
"""
class _CustomIterDP(IterDataPipe):
def __init__(self, iterable):
self.iterable = iter(iterable)
def __iter__(self): # Note that this doesn't reset
return self.iterable # Intentionally not returning `self`
# Functional Test: Check if invalidation logic is correct
source_dp = _CustomIterDP(range(10))
it1 = iter(source_dp)
self.assertEqual(0, next(it1))
it2 = iter(source_dp)
self.assertEqual(1, next(it2))
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it1)
# Functional Test: extend the test to a pipeline
source_dp = _CustomIterDP(dp.iter.IterableWrapper(range(10)).map(_fake_fn).filter(_fake_filter_fn))
it1 = iter(source_dp)
self.assertEqual(0, next(it1))
it2 = iter(source_dp)
self.assertEqual(1, next(it2))
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it1)
# Functional Test: multiple simultaneous references to the same DataPipe fails
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
for _ in zip(source_dp, source_dp):
pass
def test_iterdatapipe_singleton_buggy(self):
r"""
Buggy test case case where IterDataPipe's `__iter__` returns a new object, but also has
a `__next__` method.
"""
class _CustomIterDP(IterDataPipe):
def __init__(self, iterable):
self.source = iterable
self.iterable = iter(iterable)
def __iter__(self):
return iter(self.source) # Intentionally not returning `self`
def __next__(self):
return next(self.iterable)
# Functional Test: Check if invalidation logic is correct
source_dp = _CustomIterDP(range(10))
self._check_single_iterator_invalidation_logic(source_dp)
self.assertEqual(0, next(source_dp)) # `__next__` is unrelated with `__iter__`
# Functional Test: Special case to show `__next__` is unrelated with `__iter__`
source_dp = _CustomIterDP(range(10))
self.assertEqual(0, next(source_dp))
it1 = iter(source_dp)
self.assertEqual(0, next(it1))
self.assertEqual(1, next(source_dp))
it2 = iter(source_dp) # invalidates both `it1`
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it1)
self.assertEqual(2, next(source_dp)) # not impacted by the creation of `it2`
self.assertEqual(list(range(10)), list(it2)) # `it2` still works because it is a new object
def test_iterdatapipe_singleton_constraint_multiple_outputs(self):
r"""
Testing for the case where IterDataPipe has multiple child DataPipes as outputs.
"""
# Functional Test: all previous related iterators should be invalidated when a new iterator
# is created from a ChildDataPipe
source_dp: IterDataPipe = dp.iter.IterableWrapper(range(10))
cdp1, cdp2 = source_dp.fork(num_instances=2)
it1, it2 = iter(cdp1), iter(cdp2)
self.assertEqual(list(range(10)), list(it1))
self.assertEqual(list(range(10)), list(it2))
it1, it2 = iter(cdp1), iter(cdp2)
with warnings.catch_warnings(record=True) as wa:
it3 = iter(cdp1) # This should invalidate `it1` and `it2`
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"child DataPipes are not exhausted")
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it1)
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it2)
self.assertEqual(0, next(it3))
# The next line should not invalidate anything, as there was no new iterator created
# for `cdp2` after `it2` was invalidated
it4 = iter(cdp2)
self.assertEqual(1, next(it3)) # An error shouldn't be raised here
self.assertEqual(list(range(10)), list(it4))
# Functional Test: invalidation when a new iterator is created from `source_dp`
source_dp = dp.iter.IterableWrapper(range(10))
cdp1, cdp2 = source_dp.fork(num_instances=2)
it1, it2 = iter(cdp1), iter(cdp2)
self.assertEqual(list(range(10)), list(it1))
self.assertEqual(list(range(10)), list(it2))
it1, it2 = iter(cdp1), iter(cdp2)
self.assertEqual(0, next(it1))
self.assertEqual(0, next(it2))
it3 = iter(source_dp) # note that a new iterator is created from `source_dp`
self.assertEqual(0, next(it3)) # `it3` should invalidate `it1` and `it2` since they both use `source_dp`
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it1)
self.assertEqual(1, next(it3))
# Function Test: Extending test to pipeline
source_dp = dp.iter.IterableWrapper(range(10)).map(_fake_fn).filter(_fake_filter_fn)
cdp1, cdp2 = source_dp.fork(num_instances=2)
it1, it2 = iter(cdp1), iter(cdp2)
self.assertEqual(list(range(10)), list(it1))
self.assertEqual(list(range(10)), list(it2))
it1, it2 = iter(cdp1), iter(cdp2)
with warnings.catch_warnings(record=True) as wa:
it3 = iter(cdp1) # This should invalidate `it1` and `it2`
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"child DataPipes are not exhausted")
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it1)
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it2)
with warnings.catch_warnings(record=True) as wa:
it1, it2 = iter(cdp1), iter(cdp2)
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"child DataPipes are not exhausted")
self.assertEqual(0, next(it1))
self.assertEqual(0, next(it2))
it3 = iter(source_dp) # note that a new iterator is created from `source_dp`
self.assertEqual(0, next(it3)) # `it3` should invalidate `it1` and `it2` since they both use `source_dp`
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it1)
self.assertEqual(1, next(it3))
class TestIterDataPipeCountSampleYielded(TestCase):
def _yield_count_test_helper(self, datapipe, n_expected_samples):
# Functional Test: Check if number of samples yielded is as expected
res = list(datapipe)
self.assertEqual(len(res), datapipe._number_of_samples_yielded)
# Functional Test: Check if the count is correct when DataPipe is partially read
it = iter(datapipe)
res = []
for i, value in enumerate(it):
res.append(value)
if i == n_expected_samples - 1:
break
self.assertEqual(n_expected_samples, datapipe._number_of_samples_yielded)
# Functional Test: Check for reset behavior and if iterator also works
it = iter(datapipe) # reset the DataPipe
res = list(it)
self.assertEqual(len(res), datapipe._number_of_samples_yielded)
def test_iterdatapipe_sample_yielded_generator_function(self):
# Functional Test: `__iter__` is a generator function
datapipe: IterDataPipe = dp.iter.IterableWrapper(range(10))
self._yield_count_test_helper(datapipe, n_expected_samples=5)
def test_iterdatapipe_sample_yielded_generator_function_exception(self):
# Functional Test: `__iter__` is a custom generator function with exception
class _CustomGeneratorFnDataPipe(IterDataPipe):
# This class's `__iter__` has a Runtime Error
def __iter__(self):
yield 0
yield 1
yield 2
raise RuntimeError("Custom test error after yielding 3 elements")
yield 3
# Functional Test: Ensure the count is correct even when exception is raised
datapipe: IterDataPipe = _CustomGeneratorFnDataPipe()
with self.assertRaisesRegex(RuntimeError, "Custom test error after yielding 3 elements"):
list(datapipe)
self.assertEqual(3, datapipe._number_of_samples_yielded)
# Functional Test: Check for reset behavior and if iterator also works
it = iter(datapipe) # reset the DataPipe
with self.assertRaisesRegex(RuntimeError, "Custom test error after yielding 3 elements"):
list(it)
self.assertEqual(3, datapipe._number_of_samples_yielded)
def test_iterdatapipe_sample_yielded_return_self(self):
class _CustomGeneratorDataPipe(IterDataPipe):
# This class's `__iter__` is not a generator function
def __init__(self):
self.source = iter(range(10))
def __iter__(self):
return self.source
def reset(self):
self.source = iter(range(10))
datapipe: IterDataPipe = _CustomGeneratorDataPipe()
self._yield_count_test_helper(datapipe, n_expected_samples=5)
def test_iterdatapipe_sample_yielded_next(self):
class _CustomNextDataPipe(IterDataPipe):
# This class's `__iter__` returns `self` and has a `__next__`
def __init__(self):
self.source = iter(range(10))
def __iter__(self):
return self
def __next__(self):
return next(self.source)
def reset(self):
self.source = iter(range(10))
datapipe: IterDataPipe = _CustomNextDataPipe()
self._yield_count_test_helper(datapipe, n_expected_samples=5)
def test_iterdatapipe_sample_yielded_next_exception(self):
class _CustomNextDataPipe(IterDataPipe):
# This class's `__iter__` returns `self` and has a `__next__`
def __init__(self):
self.source = iter(range(10))
self.count = 0
def __iter__(self):
return self
def __next__(self):
if self.count == 3:
raise RuntimeError("Custom test error after yielding 3 elements")
self.count += 1
return next(self.source)
def reset(self):
self.count = 0
self.source = iter(range(10))
# Functional Test: Ensure the count is correct even when exception is raised
datapipe: IterDataPipe = _CustomNextDataPipe()
with self.assertRaisesRegex(RuntimeError, "Custom test error after yielding 3 elements"):
list(datapipe)
self.assertEqual(3, datapipe._number_of_samples_yielded)
# Functional Test: Check for reset behavior and if iterator also works
it = iter(datapipe) # reset the DataPipe
with self.assertRaisesRegex(RuntimeError, "Custom test error after yielding 3 elements"):
list(it)
self.assertEqual(3, datapipe._number_of_samples_yielded)
class _CustomNonGeneratorTestDataPipe(IterDataPipe):
def __init__(self):
self.n = 10
self.source = list(range(self.n))
# This class's `__iter__` is not a generator function
def __iter__(self):
return iter(self.source)
def __len__(self):
return self.n
class _CustomSelfNextTestDataPipe(IterDataPipe):
def __init__(self):
self.n = 10
self.iter = iter(range(self.n))
def __iter__(self):
return self
def __next__(self):
return next(self.iter)
def reset(self):
self.iter = iter(range(self.n))
def __len__(self):
return self.n
class TestIterDataPipeGraphFastForward(TestCase):
def _fast_forward_graph_test_helper(self, datapipe, fast_forward_fn, expected_res, n_iterations=3, rng=None):
if rng is None:
rng = torch.Generator()
rng = rng.manual_seed(0)
torch.utils.data.graph_settings.apply_shuffle_seed(datapipe, rng)
# Test Case: fast forward works with list
rng.manual_seed(0)
fast_forward_fn(datapipe, n_iterations, rng)
actual_res = list(datapipe)
self.assertEqual(len(datapipe) - n_iterations, len(actual_res))
self.assertEqual(expected_res[n_iterations:], actual_res)
# Test Case: fast forward works with iterator
rng.manual_seed(0)
fast_forward_fn(datapipe, n_iterations, rng)
it = iter(datapipe)
actual_res = list(it)
self.assertEqual(len(datapipe) - n_iterations, len(actual_res))
self.assertEqual(expected_res[n_iterations:], actual_res)
with self.assertRaises(StopIteration):
next(it)
def test_simple_snapshot_graph(self):
graph1 = dp.iter.IterableWrapper(range(10))
res1 = list(range(10))
self._fast_forward_graph_test_helper(graph1, _simple_graph_snapshot_restoration,
expected_res=res1)
graph2 = graph1.map(_mul_10)
res2 = [10 * x for x in res1]
self._fast_forward_graph_test_helper(graph2, _simple_graph_snapshot_restoration,
expected_res=res2)
rng = torch.Generator()
graph3 = graph2.shuffle()
rng.manual_seed(0)
torch.utils.data.graph_settings.apply_shuffle_seed(graph3, rng)
res3 = list(graph3)
self._fast_forward_graph_test_helper(graph3, _simple_graph_snapshot_restoration,
expected_res=res3)
graph4 = graph3.map(_mul_10)
res4 = [10 * x for x in res3]
self._fast_forward_graph_test_helper(graph4, _simple_graph_snapshot_restoration,
expected_res=res4)
batch_size = 2
graph5 = graph4.batch(batch_size)
res5 = [res4[i:i + batch_size] for i in range(0, len(res4), batch_size)] # .batch(2)
self._fast_forward_graph_test_helper(graph5, _simple_graph_snapshot_restoration,
expected_res=res5)
# With `fork` and `zip`
cdp1, cdp2 = graph5.fork(2)
graph6 = cdp1.zip(cdp2)
rng = rng.manual_seed(100)
torch.utils.data.graph_settings.apply_shuffle_seed(graph6, rng)
res6 = [(x, x) for x in res5]
self._fast_forward_graph_test_helper(graph6, _simple_graph_snapshot_restoration,
expected_res=res6)
# With `fork` and `concat`
graph7 = cdp1.concat(cdp2)
res7 = res5 * 2
self._fast_forward_graph_test_helper(graph7, _simple_graph_snapshot_restoration,
expected_res=res7)
# Raises an exception if the graph has already been restored
with self.assertRaisesRegex(RuntimeError, "Snapshot restoration cannot be applied."):
_simple_graph_snapshot_restoration(graph7, 1)
_simple_graph_snapshot_restoration(graph7, 1)
def test_simple_snapshot_custom_non_generator(self):
graph = _CustomNonGeneratorTestDataPipe()
self._fast_forward_graph_test_helper(graph, _simple_graph_snapshot_restoration, expected_res=range(10))
def test_simple_snapshot_custom_self_next(self):
graph = _CustomSelfNextTestDataPipe()
self._fast_forward_graph_test_helper(graph, _simple_graph_snapshot_restoration, expected_res=range(10))
def _snapshot_test_helper(self, datapipe, expected_res, n_iter=3, rng=None):
"""
Extend the previous test with serialization and deserialization test.
"""
if rng is None:
rng = torch.Generator()
rng.manual_seed(0)
torch.utils.data.graph_settings.apply_shuffle_seed(datapipe, rng)
it = iter(datapipe)
for _ in range(n_iter):
next(it)
serialized_graph = pickle.dumps(datapipe)
deserialized_graph = pickle.loads(serialized_graph)
self.assertEqual(n_iter, datapipe._number_of_samples_yielded)
self.assertEqual(n_iter, deserialized_graph._number_of_samples_yielded)
rng_for_deserialized = torch.Generator()
rng_for_deserialized.manual_seed(0)
_simple_graph_snapshot_restoration(deserialized_graph, n_iter, rng=rng_for_deserialized)
self.assertEqual(expected_res[n_iter:], list(it))
self.assertEqual(expected_res[n_iter:], list(deserialized_graph))
def test_simple_snapshot_graph_with_serialization(self):
graph1 = dp.iter.IterableWrapper(range(10))
res1 = list(range(10))
self._snapshot_test_helper(graph1, expected_res=res1)
graph2 = graph1.map(_mul_10)
res2 = [10 * x for x in res1]
self._snapshot_test_helper(graph2, expected_res=res2)
rng = torch.Generator()
graph3 = graph2.shuffle()
rng.manual_seed(0)
torch.utils.data.graph_settings.apply_shuffle_seed(graph3, rng)
res3 = list(graph3)
self._snapshot_test_helper(graph3, expected_res=res3)
graph4 = graph3.map(_mul_10)
res4 = [10 * x for x in res3]
self._snapshot_test_helper(graph4, expected_res=res4)
batch_size = 2
graph5 = graph4.batch(batch_size)
res5 = [res4[i:i + batch_size] for i in range(0, len(res4), batch_size)] # .batch(2)
self._snapshot_test_helper(graph5, expected_res=res5)
# With `fork` and `zip`
cdp1, cdp2 = graph5.fork(2)
graph6 = cdp1.zip(cdp2)
res6 = [(x, x) for x in res5]
self._snapshot_test_helper(graph6, expected_res=res6)
# With `fork` and `concat`
graph7 = cdp1.concat(cdp2)
res7 = res5 * 2
self._snapshot_test_helper(graph7, expected_res=res7)
def test_simple_snapshot_graph_repeated(self):
cdp1, cdp2 = dp.iter.IterableWrapper(range(10)).map(_mul_10).shuffle().map(_mul_10).map(_mul_10).fork(2)
graph = cdp1.zip(cdp2)
rng = torch.Generator()
rng.manual_seed(0)
torch.utils.data.graph_settings.apply_shuffle_seed(graph, rng)
# Get expected result
expected_res = list(graph)
rng.manual_seed(0)
torch.utils.data.graph_settings.apply_shuffle_seed(graph, rng)
it = iter(graph)
n_iter = 3
for _ in range(n_iter):
next(it)
# First serialization/deserialization
serialized_graph = pickle.dumps(graph)
deserialized_graph = pickle.loads(serialized_graph)
rng_for_deserialized = torch.Generator()
rng_for_deserialized.manual_seed(0)
_simple_graph_snapshot_restoration(deserialized_graph, deserialized_graph._number_of_samples_yielded,
rng=rng_for_deserialized)
it = iter(deserialized_graph)
# Get the next element and ensure it is as expected
self.assertEqual(expected_res[3], next(it))
# Serializalize/Deserialize and fast-forward again after to ensure it works
serialized_graph2 = pickle.dumps(deserialized_graph)
deserialized_graph2 = pickle.loads(serialized_graph2)
rng_for_deserialized = torch.Generator()
rng_for_deserialized.manual_seed(0)
_simple_graph_snapshot_restoration(deserialized_graph2, deserialized_graph._number_of_samples_yielded,
rng=rng_for_deserialized)
# Get the next element and ensure it is as expected
self.assertEqual(expected_res[4:], list(deserialized_graph2))
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_datapipe.py |
# -*- coding: utf-8 -*-
# Owner(s): ["module: linear algebra"]
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce, partial, wraps
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state, IS_ARM64)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver, dtypesIfMPS)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
floating_and_complex_types_and, floating_types_and, complex_types,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, CUDA11OrLater, CUDA9, _get_magma_version, \
_get_torch_cuda_version
from torch.distributions.binomial import Binomial
# Protects against includes accidentally setting the default dtype
# NOTE: jit_metaprogramming_utils sets the default dtype to double!
torch.set_default_dtype(torch.float32)
assert torch.get_default_dtype() is torch.float32
if TEST_SCIPY:
import scipy
def setLinalgBackendsToDefaultFinally(fn):
@wraps(fn)
def _fn(*args, **kwargs):
try:
fn(*args, **kwargs)
finally:
# Set linalg backend back to default to make sure potential failures in one test
# doesn't affect other linalg tests
torch.backends.cuda.preferred_linalg_library('default')
return _fn
@unittest.skipIf(IS_ARM64, "Issue with numpy version on arm")
class TestLinalg(TestCase):
def setUp(self):
super(self.__class__, self).setUp()
torch.backends.cuda.matmul.allow_tf32 = False
def tearDown(self):
torch.backends.cuda.matmul.allow_tf32 = True
super(self.__class__, self).tearDown()
exact_dtype = True
@dtypes(torch.float, torch.cfloat)
@precisionOverride({torch.float: 1e-06, torch.cfloat: 1e-06})
@tf32_on_and_off(5e-3)
def test_inner(self, device, dtype):
def check(a_sizes_, b_sizes_):
for a_sizes, b_sizes in ((a_sizes_, b_sizes_), (b_sizes_, a_sizes_)):
a = torch.randn(a_sizes, dtype=dtype, device=device)
b = torch.randn(b_sizes, dtype=dtype, device=device)
res = torch.inner(a, b)
ref = np.inner(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(res.cpu(), torch.from_numpy(np.array(ref)))
out = torch.zeros_like(res)
torch.inner(a, b, out=out)
self.assertEqual(res, out)
check([], []) # scalar x scalar
check([], [0]) # scalar x empty
check([], [3]) # scalar x 1D
check([], [2, 3, 4]) # scalar x 3D
check([0], [0]) # empty x empty
check([0], [2, 0]) # empty x 2D
check([2], [2]) # 1D x 1D
check([2], [3, 1, 2]) # 1D x 3D
check([2], [3, 0, 2]) # 1D x 3D empty
check([1, 2], [3, 2]) # 2D x 2D
check([1, 2], [3, 4, 2]) # 2D x 3D
check([2, 1, 3, 2], [1, 3, 2, 2]) # 4D x 4D
# Test error message
with self.assertRaisesRegex(RuntimeError,
r"inner\(\) the last dimension must match on both "
r"input tensors but got shapes \[2, 3\] and \[2, 2\]"):
torch.randn(2, 3, device=device, dtype=dtype).inner(torch.randn(2, 2, device=device, dtype=dtype))
# Tests torch.outer, and its alias, torch.ger, vs. NumPy
@precisionOverride({torch.bfloat16: 1e-1})
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_outer(self, device, dtype):
def run_test_case(a, b):
if dtype == torch.bfloat16:
a_np = a.to(torch.double).cpu().numpy()
b_np = b.to(torch.double).cpu().numpy()
exact_dtype = False
else:
a_np = a.cpu().numpy()
b_np = b.cpu().numpy()
exact_dtype = True
expected = np.outer(a_np, b_np)
self.assertEqual(torch.outer(a, b), expected, exact_dtype=False)
self.assertEqual(torch.Tensor.outer(a, b), expected, exact_dtype=False)
self.assertEqual(torch.ger(a, b), expected, exact_dtype=False)
self.assertEqual(torch.Tensor.ger(a, b), expected, exact_dtype=False)
# test out variant
out = torch.empty(a.size(0), b.size(0), device=device, dtype=dtype)
torch.outer(a, b, out=out)
self.assertEqual(out, expected, exact_dtype=False)
out = torch.empty(a.size(0), b.size(0), device=device, dtype=dtype)
torch.ger(a, b, out=out)
self.assertEqual(out, expected, exact_dtype=False)
a = torch.randn(50).to(device=device, dtype=dtype)
b = torch.randn(50).to(device=device, dtype=dtype)
run_test_case(a, b)
# test 0 strided tensor
zero_strided = torch.randn(1).to(device=device, dtype=dtype).expand(50)
run_test_case(zero_strided, b)
run_test_case(a, zero_strided)
def test_solve_removed_error(self, device):
a = make_tensor(5, 5, device=device, dtype=torch.float32)
b = make_tensor(5, 1, device=device, dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
torch.solve(b, a)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
b.solve(a)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_lstsq(self, device, dtype):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
if self.device_type == 'cpu':
drivers = ('gels', 'gelsy', 'gelsd', 'gelss', None)
else:
drivers = ('gels', None)
def check_solution_correctness(a, b, sol):
sol2 = a.pinverse() @ b
self.assertEqual(sol, sol2, atol=1e-5, rtol=1e-5)
def check_correctness_ref(a, b, res, ref, driver="default"):
def apply_if_not_empty(t, f):
if t.numel():
return f(t)
else:
return t
def select_if_not_empty(t, i):
selected = apply_if_not_empty(t, lambda x: x.select(0, i))
return selected
m = a.size(-2)
n = a.size(-1)
nrhs = b.size(-1)
batch_size = int(np.prod(a.shape[:-2]))
if batch_size == 0:
batch_size = 1
a_3d = a.view(batch_size, m, n)
b_3d = b.view(batch_size, m, nrhs)
solution_3d = res.solution.view(batch_size, n, nrhs)
residuals_2d = apply_if_not_empty(res.residuals, lambda t: t.view(-1, nrhs))
rank_1d = apply_if_not_empty(res.rank, lambda t: t.view(-1))
singular_values_2d = res.singular_values.view(batch_size, res.singular_values.shape[-1])
if a.numel() > 0:
for i in range(batch_size):
sol, residuals, rank, singular_values = ref(
a_3d.select(0, i).numpy(),
b_3d.select(0, i).numpy()
)
# Singular values are None when lapack_driver='gelsy' in SciPy
if singular_values is None:
singular_values = []
self.assertEqual(sol, solution_3d.select(0, i), atol=1e-5, rtol=1e-5)
self.assertEqual(rank, select_if_not_empty(rank_1d, i), atol=1e-5, rtol=1e-5)
self.assertEqual(singular_values, singular_values_2d.select(0, i), atol=1e-5, rtol=1e-5)
# SciPy and NumPy operate only on non-batched input and
# return an empty array with shape (0,) if rank(a) != n
# in PyTorch the batched inputs are supported and
# matrices in the batched input can have different ranks
# we compute residuals only if all matrices have rank == n
# see https://github.com/pytorch/pytorch/issues/56483
if m > n:
if torch.all(rank_1d == n):
self.assertEqual(
residuals, select_if_not_empty(residuals_2d, i), atol=1e-5, rtol=1e-5, exact_dtype=False
)
else:
self.assertTrue(residuals_2d.numel() == 0)
else:
self.assertEqual(res.solution.shape, (*a.shape[:-2], n, nrhs))
self.assertEqual(res.rank.shape, a.shape[:-2])
# residuals are not always computed (and have non-zero shape)
if m > n and driver != "gelsy":
self.assertEqual(res.residuals.shape, (*a.shape[:-2], 0))
else:
self.assertEqual(res.residuals.shape, (0, ))
# singular_values are not always computed (and have non-zero shape)
if driver == "default" or driver == "gelsd" or driver == "gelss":
self.assertEqual(res.singular_values.shape, (*a.shape[:-2], min(m, n)))
else:
self.assertEqual(res.singular_values.shape, (0, ))
def check_correctness_scipy(a, b, res, driver, cond):
# SciPy provides 3 driver options: gelsd, gelss, gelsy
if TEST_SCIPY and driver in ('gelsd', 'gelss', 'gelsy'):
import scipy.linalg
def scipy_ref(a, b):
return scipy.linalg.lstsq(a, b, lapack_driver=driver, cond=cond)
check_correctness_ref(a, b, res, scipy_ref, driver=driver)
def check_correctness_numpy(a, b, res, driver, rcond):
# NumPy uses only gelsd routine
if driver == 'gelsd':
def numpy_ref(a, b):
return np.linalg.lstsq(a, b, rcond=rcond)
check_correctness_ref(a, b, res, numpy_ref)
version = torch.testing._internal.common_cuda._get_torch_cuda_version()
cusolver_available = (version >= (10, 2))
ms = [2 ** i for i in range(5)]
m_ge_n_sizes = [(m, m // 2) for m in ms] + [(m, m) for m in ms]
# cases m < n are only supported on CPU and for cuSOLVER path on CUDA
m_l_n_sizes = [(m // 2, m) for m in ms]
include_m_l_n_case = (cusolver_available or device == 'cpu')
matrix_sizes = m_ge_n_sizes + (m_l_n_sizes if include_m_l_n_case else [])
batches = [(), (2,), (2, 2), (2, 2, 2)]
# we generate matrices with singular values sampled from a normal distribution,
# that is why we use `cond=1.0`, the mean to cut roughly half of all
# the singular values and compare whether torch.linalg.lstsq agrees with
# SciPy and NumPy.
# if rcond is True then set value for it based on the used algorithm
# rcond == -1 or any other negative value forces LAPACK to use machine precision tolerance
rconds = (None, True, -1)
for batch, matrix_size, driver, rcond in itertools.product(batches, matrix_sizes, drivers, rconds):
# keep the rcond value if it is None or -1, set the driver specific value if it is True
if rcond and rcond != -1:
if driver in ('gelss', 'gelsd'):
# SVD based algorithm; set to zero roughly half of all the singular values
rcond = 1.0
else:
# driver == 'gelsy'
# QR based algorithm; setting the value too high might lead to non-unique solutions and flaky tests
# so we skip this case
continue
# specifying rcond value has no effect for gels driver so no need to run the tests again
if driver == 'gels' and rcond is not None:
continue
shape = batch + matrix_size
a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device)
b = torch.rand(*shape, dtype=dtype, device=device)
m = a.size(-2)
n = a.size(-1)
res = torch.linalg.lstsq(a, b, rcond=rcond, driver=driver)
sol = res.solution
# Only checks gelsd, gelss, gelsy drivers
check_correctness_scipy(a, b, res, driver, rcond)
# Only checks gelsd driver
check_correctness_numpy(a, b, res, driver, rcond)
# gels driver is not checked by comparing to NumPy or SciPy implementation
# because NumPy and SciPy do not implement this driver
if driver == 'gels' and rcond is None:
check_solution_correctness(a, b, sol)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_lstsq_batch_broadcasting(self, device, dtype):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
def check_correctness(a, b):
sol = torch.linalg.lstsq(a, b).solution
sol2 = a.pinverse() @ b
self.assertEqual(sol, sol2, rtol=1e-5, atol=1e-5)
ms = [2 ** i for i in range(5)]
batches = [(), (0,), (2,), (2, 2), (2, 2, 2)]
# the case when a single matrix is batch-broadcasted over the rhs
for m, batch in itertools.product(ms, batches):
a = random_well_conditioned_matrix(m, m, dtype=dtype, device=device).view(*([1] * len(batch)), m, m)
b = torch.rand(*(batch + (m, m)), dtype=dtype, device=device)
check_correctness(a, b)
# cases with broadcastable shapes
for m in ms:
a = random_well_conditioned_matrix(1, 3, 1, 3, m, m, dtype=dtype, device=device)
b = torch.rand(3, 1, 3, 1, m, m // 2, dtype=dtype, device=device)
check_correctness(a, b)
# rhs are vectors, not matrices in this test
b = torch.rand(3, 1, 3, 1, m, dtype=dtype, device=device)
# unsqueeze for b because `check_correctness` checks against
# a.pinverse() @ b, which requires b to be a matrix
check_correctness(a, b.unsqueeze(-1))
a = random_well_conditioned_matrix(3, 1, 3, 1, m, m, dtype=dtype, device=device)
b = torch.rand(1, 3, 1, 3, m, m // 2, dtype=dtype, device=device)
check_correctness(a, b)
# rhs are vectors, not matrices in this test
b = torch.rand(1, 3, 1, 3, m, dtype=dtype, device=device)
check_correctness(a, b.unsqueeze(-1))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_lstsq_input_checks(self, device, dtype):
# check empty inputs
# empty batches
a = torch.rand(0, 0, 3, 3, dtype=dtype, device=device)
b = torch.rand(0, 0, 3, 2, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(0, 0, 3, 2, dtype=dtype, device=device)
)
# empty a and b
a = torch.rand(2, 2, 0, 0, dtype=dtype, device=device)
b = torch.rand(2, 2, 0, 0, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 0, 0, dtype=dtype, device=device)
)
# empty a and b
a = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)
b = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 0, 0, dtype=dtype, device=device)
)
# empty a but not b
a = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)
b = torch.rand(2, 2, 3, 2, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 0, 2, dtype=dtype, device=device)
)
# empty a and b
if torch.device(device).type == 'cpu':
# only CPU since CUDA does not support overdetermined systems
a = torch.rand(2, 2, 0, 3, dtype=dtype, device=device)
b = torch.rand(2, 2, 0, 3, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 3, 3, dtype=dtype, device=device)
)
a = torch.rand(2, 3, dtype=dtype, device=device)
b = torch.rand(3, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, 'input must have at least 2 dimensions'):
torch.linalg.lstsq(b, b)
with self.assertRaisesRegex(RuntimeError, 'other must have at least 1 dimension'):
torch.linalg.lstsq(a, torch.tensor(1, dtype=dtype, device=device))
with self.assertRaisesRegex(RuntimeError, r'input.size\(-2\) should match other.size\(-1\)'):
torch.linalg.lstsq(a, b)
with self.assertRaisesRegex(RuntimeError, r'input.size\(-2\) should match other.size\(-2\)'):
torch.linalg.lstsq(a, b.unsqueeze(-1))
def complement_device(device):
if device == 'cpu' and torch.cuda.is_available():
return 'cuda'
else:
return 'cpu'
a = torch.rand(2, 2, 2, 2, dtype=dtype, device=device)
b = torch.rand(2, 2, 2, dtype=dtype, device=complement_device(device))
if a.device != b.device:
with self.assertRaisesRegex(RuntimeError, 'be on the same device'):
torch.linalg.lstsq(a, b)
b = (torch.rand(2, 2, 2, dtype=dtype, device=device) * 100).long()
with self.assertRaisesRegex(RuntimeError, 'the same dtype'):
torch.linalg.lstsq(a, b)
a = torch.rand(2, 2, 2, 2, dtype=dtype, device=device)
b = torch.rand(2, 2, 2, dtype=dtype, device=device)
if device != 'cpu':
with self.assertRaisesRegex(RuntimeError, '`driver` other than `gels` is not supported on CUDA'):
torch.linalg.lstsq(a, b, driver='fictitious_driver')
# if on cpu
else:
with self.assertRaisesRegex(RuntimeError, r'parameter `driver` should be one of \(gels, gelsy, gelsd, gelss\)'):
torch.linalg.lstsq(a, b, driver='fictitious_driver')
# cuSOLVER path supports underdetermined systems
version = torch.testing._internal.common_cuda._get_torch_cuda_version()
cusolver_not_available = (version < (10, 1))
if device != 'cpu' and cusolver_not_available:
a = torch.rand(2, 3, dtype=dtype, device=device)
b = torch.rand(2, 1, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, r'only overdetermined systems'):
torch.linalg.lstsq(a, b)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(shape, batch, contiguous):
A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)
if A.numel() > 0 and not contiguous:
A = A.mT
self.assertFalse(A.is_contiguous())
expected_L = np.linalg.cholesky(A.cpu().numpy())
actual_L = torch.linalg.cholesky(A)
# For fp32 individual entries in matrices can differ between PyTorch and NumPy
# Let's compare the norms of matrices instead
if A.numel() > 0 and dtype in [torch.float32, torch.complex64]:
# axis is specified to calculate matrix norm for batched input
expected_norm = np.linalg.norm(expected_L, ord=1, axis=(-2, -1))
actual_norm = torch.linalg.norm(actual_L, ord=1, axis=(-2, -1))
# Compare the norms with standard tolerances
self.assertEqual(actual_norm, expected_norm)
# and individual values with a higher tolerance
self.assertEqual(actual_L, expected_L, atol=1e-2, rtol=1e-5)
else:
self.assertEqual(actual_L, expected_L)
shapes = (0, 3, 5)
batches = ((), (3, ), (2, 2))
larger_input_case = [(100, (5, ), True)]
for shape, batch, contiguous in list(itertools.product(shapes, batches, (True, False))) + larger_input_case:
run_test(shape, batch, contiguous)
# check the out= variant
A = random_hermitian_pd_matrix(3, 3, dtype=dtype, device=device)
out = torch.empty_like(A)
ans = torch.linalg.cholesky(A, out=out)
self.assertEqual(ans, out)
expected = torch.linalg.cholesky(A)
self.assertEqual(expected, out)
# check the upper= variant
expected = torch.linalg.cholesky(A).mH
actual = torch.linalg.cholesky(A, upper=True)
self.assertEqual(expected, actual)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_errors_and_warnings(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
# cholesky requires the input to be a square matrix or batch of square matrices
A = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.cholesky(A)
A = torch.randn(2, 2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.cholesky(A)
with self.assertRaisesRegex(np.linalg.LinAlgError, r'Last 2 dimensions of the array must be square'):
np.linalg.cholesky(A.cpu().numpy())
# cholesky requires the input to be at least 2 dimensional tensor
A = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must have at least 2 dimensions'):
torch.linalg.cholesky(A)
with self.assertRaisesRegex(np.linalg.LinAlgError,
r'1-dimensional array given\. Array must be at least two-dimensional'):
np.linalg.cholesky(A.cpu().numpy())
# if the input matrix is not positive definite, an error should be raised
A = torch.eye(3, 3, dtype=dtype, device=device)
A[-1, -1] = 0 # Now A is not positive definite
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'minor of order 3 is not positive-definite'):
torch.linalg.cholesky(A)
with self.assertRaisesRegex(np.linalg.LinAlgError, r'Matrix is not positive definite'):
np.linalg.cholesky(A.cpu().numpy())
# if at least one matrix in the batch is singular, an error should be raised
A = torch.eye(3, 3, dtype=dtype, device=device)
A = A.reshape((1, 3, 3))
A = A.repeat(5, 1, 1)
A[4, -1, -1] = 0 # Now A[4] is not positive definite
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'\(Batch element 4\): The factorization could not be completed'):
torch.linalg.cholesky(A)
# if out tensor with wrong shape is passed a warning is given
A = random_hermitian_pd_matrix(3, dtype=dtype, device=device)
out = torch.empty(2, 3, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.cholesky(A, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(*A.shape, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got int instead"):
torch.linalg.cholesky(A, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.cholesky(A, out=out)
# NOTE: old_cholesky* tests were moved here from test_torch.py and test_autograd.py
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_old_cholesky_batched_many_batches(self, device, dtype):
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
def cholesky_test_helper(n, batchsize, device, upper):
A = random_symmetric_pd_matrix(n, batchsize, dtype=dtype, device=device)
chol_fact = torch.cholesky(A, upper=upper)
if upper:
# Correctness check
self.assertEqual(A, chol_fact.mT.matmul(chol_fact))
# Upper triangular check
self.assertEqual(chol_fact, chol_fact.triu())
else:
# Correctness check
self.assertEqual(A, chol_fact.matmul(chol_fact.mT))
# Lower triangular check
self.assertEqual(chol_fact, chol_fact.tril())
for upper, batchsize in itertools.product([True, False], [262144, 524288]):
cholesky_test_helper(2, batchsize, device, upper)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_old_cholesky_batched(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def cholesky_test_helper(n, batch_dims, upper):
A = random_hermitian_pd_matrix(n, *batch_dims, dtype=dtype, device=device)
cholesky_exp = torch.stack([m.cholesky(upper=upper) for m in A.reshape(-1, n, n)])
cholesky_exp = cholesky_exp.reshape_as(A)
self.assertEqual(cholesky_exp, torch.cholesky(A, upper=upper))
for upper, batchsize in itertools.product([True, False], [(3,), (3, 4), (2, 3, 4)]):
cholesky_test_helper(3, batchsize, upper)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@tf32_on_and_off(0.01)
def test_old_cholesky(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
A = random_hermitian_pd_matrix(10, dtype=dtype, device=device)
# default Case
C = torch.cholesky(A)
B = torch.mm(C, C.t().conj())
self.assertEqual(A, B, atol=1e-14, rtol=0)
# test Upper Triangular
U = torch.cholesky(A, True)
B = torch.mm(U.t().conj(), U)
self.assertEqual(A, B, atol=1e-14, rtol=0, msg='cholesky (upper) did not allow rebuilding the original matrix')
# test Lower Triangular
L = torch.cholesky(A, False)
B = torch.mm(L, L.t().conj())
self.assertEqual(A, B, atol=1e-14, rtol=0, msg='cholesky (lower) did not allow rebuilding the original matrix')
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_old_cholesky_empty(self, device, dtype):
def run_test(upper):
A = torch.empty(0, 0, dtype=dtype, device=device)
chol = torch.cholesky(A, upper)
chol_A = torch.matmul(chol, chol.t().conj())
self.assertEqual(A, chol_A)
for upper in [True, False]:
run_test(upper)
# Test for issue
# https://github.com/pytorch/pytorch/issues/57032
# torch.cholesky with upper=True for batched CUDA inputs was wrong
# it was using the lower triangular part instead of the upper one
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_old_cholesky_batched_upper(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
batchsize = 2
A = random_hermitian_pd_matrix(3, batchsize, dtype=dtype, device=device)
A_triu = A.triu() # fill the lower triangular part with zero
U = torch.cholesky(A_triu, upper=True)
reconstruct_A = U.mH @ U
self.assertEqual(A, reconstruct_A)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_ex(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(n, batch):
A = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device)
expected_L = np.linalg.cholesky(A.cpu().numpy())
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
actual_L, actual_info = torch.linalg.cholesky_ex(A)
# For fp32 individual entries in matrices can differ between PyTorch and NumPy
# Let's compare the norms of matrices instead
if A.numel() > 0 and dtype in [torch.float32, torch.complex64]:
# axis is specified to calculate matrix norm for batched input
expected_norm = np.linalg.norm(expected_L, ord=1, axis=(-2, -1))
actual_norm = torch.linalg.norm(actual_L, ord=1, axis=(-2, -1))
# Compare the norms with standard tolerances
self.assertEqual(actual_norm, expected_norm)
# and individual values with a higher tolerance
self.assertEqual(actual_L, expected_L, atol=1e-2, rtol=1e-5)
else:
self.assertEqual(actual_L, expected_L)
self.assertEqual(actual_info, expected_info)
ns = (0, 3, 5)
batches = ((), (2, ), (2, 1))
for n, batch in itertools.product(ns, batches):
run_test(n, batch)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_ex_non_pd(self, device, dtype):
# if the input matrix is not positive definite, info with positive integer is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A[-1, -1] = 0 # Now A is singular
_, info = torch.linalg.cholesky_ex(A)
self.assertEqual(info, 3)
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'minor of order 3 is not positive-definite'):
torch.linalg.cholesky_ex(A, check_errors=True)
# if at least one matrix in the batch is not positive definite,
# batched info with positive integer for the corresponding matrix is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A = A.reshape((1, 3, 3))
A = A.repeat(5, 1, 1)
A[3, -2, -2] = 0 # Now A[3] is singular
_, info = torch.linalg.cholesky_ex(A)
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
expected_info[3] = 2
self.assertEqual(info, expected_info)
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'\(Batch element 3\): The factorization could not be completed'):
torch.linalg.cholesky_ex(A, check_errors=True)
def _test_addr_vs_numpy(self, device, dtype, beta=1, alpha=1):
def check(m, a, b, beta, alpha):
if dtype == torch.bfloat16:
a_np = a.to(torch.double).cpu().numpy()
b_np = b.to(torch.double).cpu().numpy()
m_np = m.to(torch.double).cpu().numpy()
exact_dtype = False
else:
a_np = a.cpu().numpy()
b_np = b.cpu().numpy()
m_np = m.cpu().numpy()
exact_dtype = True
if beta == 0:
expected = alpha * np.outer(a_np, b_np)
else:
expected = beta * m_np + alpha * np.outer(a_np, b_np)
res = torch.addr(m, a, b, beta=beta, alpha=alpha)
self.assertEqual(res, expected, exact_dtype=exact_dtype)
# Test out variant
out = torch.empty_like(res)
torch.addr(m, a, b, beta=beta, alpha=alpha, out=out)
self.assertEqual(out, expected, exact_dtype=exact_dtype)
m = make_tensor((50, 50), device=device, dtype=dtype, low=-2, high=2)
a = make_tensor((50,), device=device, dtype=dtype, low=-2, high=2)
b = make_tensor((50,), device=device, dtype=dtype, low=-2, high=2)
check(m, a, b, beta, alpha)
# test transpose
m_transpose = torch.transpose(m, 0, 1)
check(m_transpose, a, b, beta, alpha)
# test 0 strided tensor
zero_strided = make_tensor((1,), device=device, dtype=dtype, low=-2, high=2).expand(50)
check(m, zero_strided, b, beta, alpha)
# test scalar
m_scalar = torch.tensor(1, device=device, dtype=dtype)
check(m_scalar, a, b, beta, alpha)
# test nans and infs are not propagated to the output when beta == 0
float_and_complex_dtypes = floating_and_complex_types_and(torch.half, torch.bfloat16)
if beta == 0 and dtype in float_and_complex_dtypes:
m[0][10] = m[10][10] = m[20][20] = float('inf')
m[1][10] = m[11][10] = m[21][20] = float('nan')
check(m, a, b, 0, alpha)
@dtypes(torch.bool)
def test_addr_bool(self, device, dtype):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=False)
self._test_addr_vs_numpy(device, dtype, beta=False, alpha=True)
self._test_addr_vs_numpy(device, dtype, beta=False, alpha=False)
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=True)
@dtypes(*integral_types())
def test_addr_integral(self, device, dtype):
with self.assertRaisesRegex(RuntimeError,
'argument beta must not be a floating point number.'):
self._test_addr_vs_numpy(device, dtype, beta=2., alpha=1)
with self.assertRaisesRegex(RuntimeError,
'argument alpha must not be a floating point number.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=1.)
with self.assertRaisesRegex(RuntimeError,
'Boolean beta only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=1)
with self.assertRaisesRegex(RuntimeError,
'Boolean alpha only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=True)
# when beta is zero
self._test_addr_vs_numpy(device, dtype, beta=0, alpha=2)
# when beta is not zero
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=2)
@precisionOverride({torch.bfloat16: 1e-1})
@dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16))
def test_addr_float_and_complex(self, device, dtype):
with self.assertRaisesRegex(RuntimeError,
'Boolean beta only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=1)
with self.assertRaisesRegex(RuntimeError,
'Boolean alpha only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=True)
# when beta is zero
self._test_addr_vs_numpy(device, dtype, beta=0., alpha=2)
# when beta is not zero
self._test_addr_vs_numpy(device, dtype, beta=0.5, alpha=2)
if dtype in complex_types():
self._test_addr_vs_numpy(device, dtype, beta=(0 + 0.1j), alpha=(0.2 - 0.2j))
@dtypes(*itertools.product(all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)))
def test_outer_type_promotion(self, device, dtypes):
a = torch.randn(5).to(device=device, dtype=dtypes[0])
b = torch.randn(5).to(device=device, dtype=dtypes[1])
for op in (torch.outer, torch.Tensor.outer, torch.ger, torch.Tensor.ger):
result = op(a, b)
self.assertEqual(result.dtype, torch.result_type(a, b))
# don't use @dtypes decorator to avoid generating ~1700 tests per device
def test_addr_type_promotion(self, device):
for dtypes0, dtypes1, dtypes2 in product(all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool), repeat=3):
a = make_tensor((5,), device=device, dtype=dtypes0, low=-2, high=2)
b = make_tensor((5,), device=device, dtype=dtypes1, low=-2, high=2)
m = make_tensor((5, 5), device=device, dtype=dtypes2, low=-2, high=2)
desired_dtype = torch.promote_types(torch.promote_types(dtypes0, dtypes1),
dtypes2)
for op in (torch.addr, torch.Tensor.addr):
result = op(m, a, b)
self.assertEqual(result.dtype, desired_dtype)
# Tests migrated from test_torch.py
# 1) test the shape of the result tensor when there is empty input tensor
# 2) test the Runtime Exception when there is scalar input tensor
def test_outer_ger_addr_legacy_tests(self, device):
for size in ((0, 0), (0, 5), (5, 0)):
a = torch.rand(size[0], device=device)
b = torch.rand(size[1], device=device)
self.assertEqual(torch.outer(a, b).shape, size)
self.assertEqual(torch.ger(a, b).shape, size)
m = torch.empty(size, device=device)
self.assertEqual(torch.addr(m, a, b).shape, size)
m = torch.randn(5, 6, device=device)
a = torch.randn(5, device=device)
b = torch.tensor(6, device=device)
self.assertRaises(RuntimeError, lambda: torch.outer(a, b))
self.assertRaises(RuntimeError, lambda: torch.outer(b, a))
self.assertRaises(RuntimeError, lambda: torch.ger(a, b))
self.assertRaises(RuntimeError, lambda: torch.ger(b, a))
self.assertRaises(RuntimeError, lambda: torch.addr(m, a, b))
self.assertRaises(RuntimeError, lambda: torch.addr(m, b, a))
# Tests torch.det and its alias, torch.linalg.det, vs. NumPy
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double, torch.cdouble)
def test_det(self, device, dtype):
tensors = (
torch.randn((2, 2), device=device, dtype=dtype),
torch.randn((129, 129), device=device, dtype=dtype),
torch.randn((3, 52, 52), device=device, dtype=dtype),
torch.randn((4, 2, 26, 26), device=device, dtype=dtype))
ops = (torch.det, torch.Tensor.det,
torch.linalg.det)
for t in tensors:
expected = np.linalg.det(t.cpu().numpy())
for op in ops:
actual = op(t)
self.assertEqual(actual, expected)
self.compare_with_numpy(op, np.linalg.det, t)
# NOTE: det requires a 2D+ tensor
t = torch.randn(1, device=device, dtype=dtype)
with self.assertRaises(RuntimeError):
op(t)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigh(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(shape, batch, uplo):
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)
actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
# sign of eigenvectors is not unique and therefore absolute values are compared
self.assertEqual(abs(actual_v), abs(expected_v))
# additionally we can multiply the eigenvector with a phase factor e^{i\phi} and then compare the values
# let's choose the convention that the first element of the eigenvectors from torch and numpy be the same
# for real inputs, this phase factor is plus or minus one
if matrix.numel() > 0:
phase = torch.from_numpy(expected_v[..., 0, :]).to(device=device).div(actual_v[..., 0, :])
actual_v_rotated = actual_v * phase.unsqueeze(-2).expand_as(actual_v)
self.assertEqual(actual_v_rotated, expected_v)
# check the out= variant
out_w = torch.empty_like(actual_w)
out_v = torch.empty_like(actual_v)
ans_w, ans_v = torch.linalg.eigh(matrix, UPLO=uplo, out=(out_w, out_v))
self.assertEqual(ans_w, out_w)
self.assertEqual(ans_v, out_v)
self.assertEqual(ans_w, actual_w)
self.assertEqual(abs(ans_v), abs(actual_v))
shapes = (0, 3, 5)
batches = ((), (3, ), (2, 2))
uplos = ["U", "L"]
for shape, batch, uplo in itertools.product(shapes, batches, uplos):
run_test(shape, batch, uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigh_lower_uplo(self, device, dtype):
def run_test(shape, batch, uplo):
# check lower case uplo
# use non-symmetric input to check whether uplo argument is working as intended
matrix = torch.randn(shape, shape, *batch, dtype=dtype, device=device)
expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)
actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
self.assertEqual(abs(actual_v), abs(expected_v))
uplos = ["u", "l"]
for uplo in uplos:
run_test(3, (2, 2), uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eigh_errors_and_warnings(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
# eigh requires a square matrix
t = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eigh(t)
# eigh requires 'uplo' parameter to be 'U' or 'L'
t = torch.randn(3, 3, device=device, dtype=dtype)
for uplo in ["a", "wrong"]:
with self.assertRaisesRegex(RuntimeError, "be \'L\' or \'U\'"):
torch.linalg.eigh(t, UPLO=uplo)
with self.assertRaisesRegex(ValueError, "be \'L\' or \'U\'"):
np.linalg.eigh(t.cpu().numpy(), UPLO=uplo)
# if non-empty out tensor with wrong shape is passed a warning is given
a = random_hermitian_matrix(3, dtype=dtype, device=device)
real_dtype = a.real.dtype if dtype.is_complex else dtype
out_w = torch.empty(7, 7, dtype=real_dtype, device=device)
out_v = torch.empty(7, 7, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eigh(a, out=(out_w, out_v))
# Check warning occurs
self.assertEqual(len(w), 2)
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out_w = torch.empty(0, dtype=real_dtype, device=device)
out_v = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got int instead"):
torch.linalg.eigh(a, out=(out_w, out_v))
out_w = torch.empty(0, dtype=torch.int, device=device)
out_v = torch.empty(0, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "but got int instead"):
torch.linalg.eigh(a, out=(out_w, out_v))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=dtype)
out_v = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigh(a, out=(out_w, out_v))
out_w = torch.empty(0, device=device, dtype=dtype)
out_v = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigh(a, out=(out_w, out_v))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigvalsh(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(shape, batch, uplo):
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
expected_w = np.linalg.eigvalsh(matrix.cpu().numpy(), UPLO=uplo)
actual_w = torch.linalg.eigvalsh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
# check the out= variant
out = torch.empty_like(actual_w)
ans = torch.linalg.eigvalsh(matrix, UPLO=uplo, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, actual_w)
shapes = (0, 3, 5)
batches = ((), (3, ), (2, 2))
uplos = ["U", "L"]
for shape, batch, uplo in itertools.product(shapes, batches, uplos):
run_test(shape, batch, uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eigvalsh_errors_and_warnings(self, device, dtype):
# eigvalsh requires a square matrix
t = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eigvalsh(t)
# eigvalsh requires 'uplo' parameter to be 'U' or 'L'
t = torch.randn(3, 3, device=device, dtype=dtype)
for uplo in ["a", "wrong"]:
with self.assertRaisesRegex(RuntimeError, "be \'L\' or \'U\'"):
torch.linalg.eigvalsh(t, UPLO=uplo)
with self.assertRaisesRegex(ValueError, "be \'L\' or \'U\'"):
np.linalg.eigvalsh(t.cpu().numpy(), UPLO=uplo)
# if non-empty out tensor with wrong shape is passed a warning is given
real_dtype = t.real.dtype if dtype.is_complex else dtype
out = torch.empty_like(t).to(real_dtype)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eigvalsh(t, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got int instead"):
torch.linalg.eigvalsh(t, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigvalsh(t, out=out)
@dtypes(*floating_and_complex_types())
def test_kron(self, device, dtype):
def run_test_case(a_shape, b_shape):
a = torch.rand(a_shape, dtype=dtype, device=device)
b = torch.rand(b_shape, dtype=dtype, device=device)
expected = np.kron(a.cpu().numpy(), b.cpu().numpy())
result = torch.kron(a, b)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty_like(result)
ans = torch.kron(a, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
shapes = [(4,), (2, 2), (1, 2, 3), (1, 2, 3, 3)]
for a_shape, b_shape in itertools.product(shapes, reversed(shapes)):
run_test_case(a_shape, b_shape)
@dtypes(*floating_and_complex_types())
def test_kron_empty(self, device, dtype):
def run_test_case(empty_shape):
a = torch.eye(3, dtype=dtype, device=device)
b = torch.empty(empty_shape, dtype=dtype, device=device)
result = torch.kron(a, b)
expected = np.kron(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(result, expected)
# NumPy doesn't work if the first argument is empty
result = torch.kron(b, a)
self.assertEqual(result.shape, expected.shape)
empty_shapes = [(0,), (2, 0), (1, 0, 3)]
for empty_shape in empty_shapes:
run_test_case(empty_shape)
@dtypes(*floating_and_complex_types())
def test_kron_errors_and_warnings(self, device, dtype):
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.eye(3, dtype=dtype, device=device)
b = torch.ones((2, 2), dtype=dtype, device=device)
out = torch.empty_like(a)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.kron(a, b, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should match
out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "can't be cast to the desired output type"):
torch.kron(a, b, out=out)
# This test confirms that torch.linalg.norm's dtype argument works
# as expected, according to the function's documentation
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble, torch.bfloat16, torch.float16)
def test_norm_dtype(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
def run_test_case(input_size, ord, keepdim, to_dtype):
msg = (
f'input_size={input_size}, ord={ord}, keepdim={keepdim}, '
f'dtype={dtype}, to_dtype={to_dtype}')
input = make_arg(input_size)
result = torch.linalg.norm(input, ord, keepdim=keepdim)
self.assertEqual(result.dtype, input.real.dtype, msg=msg)
result_out = torch.empty((0), dtype=result.dtype, device=device)
torch.linalg.norm(input, ord, keepdim=keepdim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
result = torch.linalg.norm(input.to(to_dtype), ord, keepdim=keepdim)
result_with_dtype = torch.linalg.norm(input, ord, keepdim=keepdim, dtype=to_dtype)
self.assertEqual(result, result_with_dtype, msg=msg)
result_out_with_dtype = torch.empty_like(result_with_dtype)
torch.linalg.norm(input, ord, keepdim=keepdim, dtype=to_dtype, out=result_out_with_dtype)
self.assertEqual(result_with_dtype, result_out_with_dtype, msg=msg)
ord_vector = [0, 1, -1, 2, -2, 3, -3, 4.5, -4.5, inf, -inf, None]
# In these orders we are computing the 10-th power and 10-th root of numbers.
# We avoid them for half-precision types as it makes the tests above too badly conditioned
if dtype != torch.float16 and dtype != torch.bfloat16:
ord_vector.extend([0.1, -0.1])
ord_matrix = ['fro', 'nuc', 1, -1, 2, -2, inf, -inf, None]
S = 10
if dtype == torch.cfloat:
norm_dtypes = (torch.cfloat, torch.cdouble)
elif dtype == torch.cdouble:
norm_dtypes = (torch.cdouble,)
elif dtype in (torch.float16, torch.bfloat16, torch.float):
norm_dtypes = (torch.float, torch.double)
elif dtype == torch.double:
norm_dtypes = (torch.double,)
else:
raise RuntimeError("Unsupported dtype")
for ord, keepdim, norm_dtype in product(ord_vector, (True, False), norm_dtypes):
run_test_case((S,) , ord, keepdim, norm_dtype)
for ord, keepdim, norm_dtype in product(ord_matrix, (True, False), norm_dtypes):
if ord in [2, -2, 'nuc']:
# We need torch.svdvals
if dtype == torch.float16 or dtype == torch.bfloat16:
continue
# We need LAPACK or equivalent
if ((torch.device(device).type == 'cuda' and not torch.cuda.has_magma and not has_cusolver()) or
(torch.device(device).type == 'cpu' and not torch._C.has_lapack)):
continue
run_test_case((S, S) , ord, keepdim, norm_dtype)
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble, torch.bfloat16, torch.float16)
def test_vector_norm(self, device, dtype):
# This test compares torch.linalg.vector_norm's output with
# torch.linalg.norm given a flattened tensor
ord_vector = [0, 0.9, 1, 2, 3, inf, -0.5, -1, -2, -3, -inf]
input_sizes = [
(10, ),
(4, 5),
(3, 4, 5),
(0, ),
(0, 10),
(0, 0),
(10, 0, 10),
]
def vector_norm_reference(input, ord, dim=None, keepdim=False, dtype=None):
if dim is None:
input_maybe_flat = input.flatten(0, -1)
else:
input_maybe_flat = input
result = torch.linalg.norm(input_maybe_flat, ord, dim=dim, keepdim=keepdim, dtype=dtype)
if keepdim and dim is None:
result = result.reshape([1] * input.dim())
return result
def run_test_case(input, ord, dim, keepdim, norm_dtype):
if (input.numel() == 0 and
(ord < 0. or ord == inf) and
(dim is None or input.shape[dim] == 0)):
# The operation does not have an identity.
error_msg = "linalg.vector_norm cannot compute"
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim)
else:
msg = (f'input.size()={input.size()}, ord={ord}, dim={dim}, '
f'keepdim={keepdim}, dtype={dtype}, norm_dtype={norm_dtype}')
result_dtype_reference = vector_norm_reference(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
result_dtype = torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
if dtype.is_complex:
result_dtype_reference = result_dtype_reference.real
self.assertEqual(result_dtype, result_dtype_reference, msg=msg)
if norm_dtype is not None:
ref = torch.linalg.vector_norm(input.to(norm_dtype), ord, dim=dim, keepdim=keepdim)
actual = torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
self.assertEqual(ref, actual, msg=msg)
if dtype == torch.cfloat:
norm_dtypes = (None, torch.cfloat, torch.cdouble)
elif dtype == torch.cdouble:
norm_dtypes = (None, torch.cdouble)
elif dtype in (torch.float16, torch.bfloat16, torch.float):
norm_dtypes = (None, torch.float, torch.double)
elif dtype == torch.double:
norm_dtypes = (None, torch.double)
else:
raise RuntimeError("Unsupported dtype")
for input_size, ord, keepdim, norm_dtype in product(input_sizes, ord_vector, [True, False], norm_dtypes):
input = make_tensor(input_size, dtype=dtype, device=device, low=-9, high=9)
for dim in [None, random.randint(0, len(input_size) - 1)]:
run_test_case(
input,
ord,
dim,
keepdim,
norm_dtype)
def test_vector_norm_dim_tuple_arg(self, device):
test_cases = [
# input size, dim, error, error message
((4, ), (0, ), None, None),
((4, ), (1, ), IndexError, r'Dimension out of range'),
((4, ), (-2, ), IndexError, r'Dimension out of range'),
((4, 3), (0, -1), None, None),
((4, 3), (0, 0), RuntimeError, r'dim 0 appears multiple times in the list of dims'),
((4, 3), (0, -2), RuntimeError, r'dim 0 appears multiple times in the list of dims'),
((4, 3), (0, 1.0), TypeError, r"argument 'dim' must be tuple of ints"),
((4, 3), (None, ), TypeError, r"argument 'dim' must be tuple of ints"),
]
for input_size, dim_tuple, error, error_msg in test_cases:
input = torch.randn(input_size, device=device)
# vector_norm should accept a tuple or a list for dim arg
for dim in [dim_tuple, list(dim_tuple)]:
if error is None:
torch.linalg.vector_norm(input, dim=dim)
else:
with self.assertRaises(error):
torch.linalg.vector_norm(input, dim=dim)
# This test compares torch.linalg.norm and numpy.linalg.norm to ensure that
# their vector norm results match
@dtypes(torch.float, torch.double)
def test_norm_vector(self, device, dtype):
def run_test_case(input, p, dim, keepdim):
result = torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
self.assertEqual(result, result_numpy, msg=msg)
result_out = torch.empty_like(result)
torch.linalg.norm(input, ord, dim, keepdim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
ord_vector = [0, 1, -1, 2, -2, 3, -3, 4.5, -4.5, inf, -inf]
S = 10
test_cases = [
# input size, p settings, dim
((S, ), ord_vector, None),
((S, ), ord_vector, 0),
((S, S, S), ord_vector, 0),
((S, S, S), ord_vector, 1),
((S, S, S), ord_vector, 2),
((S, S, S), ord_vector, -1),
((S, S, S), ord_vector, -2),
]
L = 1_000_000
if dtype == torch.double:
test_cases.append(((L, ), ord_vector, None))
for keepdim in [True, False]:
for input_size, ord_settings, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_settings:
run_test_case(input, ord, dim, keepdim)
# This test compares torch.linalg.norm, torch.linalg.matrix_norm and numpy.linalg.norm to
# ensure that their matrix norm results match.
@skipMeta # https://github.com/pytorch/pytorch/issues/54082
@skipCUDAIfNoMagma
@dtypes(torch.float, torch.double)
@precisionOverride({torch.float32: 2e-4})
def test_norm_matrix(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
def run_test_case(input, ord, dim, keepdim):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
result = torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
result = torch.linalg.norm(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
if ord is not None and dim is not None:
result = torch.linalg.matrix_norm(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
ord_matrix = [1, -1, 2, -2, inf, -inf, 'nuc', 'fro']
S = 10
test_cases = [
# input size, dim
((S, S), None),
((S, S), (0, 1)),
((S, S), (1, 0)),
((S, S, S, S), (2, 0)),
((S, S, S, S), (-1, -2)),
((S, S, S, S), (-1, -3)),
((S, S, S, S), (-3, 2)),
]
for (shape, dim), keepdim, ord in product(test_cases, [True, False], ord_matrix):
if ord in [2, -2, 'nuc']:
# We need torch.svdvals
if dtype == torch.float16 or dtype == torch.bfloat16:
continue
# We need LAPACK or equivalent
if ((torch.device(device).type == 'cuda' and not torch.cuda.has_magma and not has_cusolver()) or
(torch.device(device).type == 'cpu' and not torch._C.has_lapack)):
continue
run_test_case(make_arg(shape), ord, dim, keepdim)
@onlyCUDA
@dtypes(torch.bfloat16, torch.float16)
def test_norm_fused_type_promotion(self, device, dtype):
x = torch.randn(10, device=device, dtype=dtype)
def profile_and_check(fn, x, kwargs, fn_name):
with torch.profiler.profile(activities=(torch.profiler.ProfilerActivity.CPU,)) as p:
fn(x, **kwargs, dtype=torch.float)
# smoke check that profiler returned some events
self.assertTrue(fn_name in map(lambda e: e.name, p.events()))
# test that there was no explicit copy
self.assertFalse("aten::to" in map(lambda e: e.name, p.events()))
for f, kwargs, fn_name in zip((torch.norm, torch.linalg.vector_norm), ({"p" : 2}, {}),
("aten::norm", "aten::linalg_vector_norm")):
profile_and_check(f, x, kwargs, fn_name)
@skipMeta # https://github.com/pytorch/pytorch/issues/53739
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3})
def test_cond(self, device, dtype):
def run_test_case(input, p):
result = torch.linalg.cond(input, p)
result_numpy = np.linalg.cond(input.cpu().numpy(), p)
self.assertEqual(result, result_numpy, rtol=1e-2, atol=self.precision, exact_dtype=False)
self.assertEqual(result.shape, result_numpy.shape)
# test out= variant
out = torch.empty_like(result)
ans = torch.linalg.cond(input, p, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
norm_types = [1, -1, 2, -2, inf, -inf, 'fro', 'nuc', None]
input_sizes = [(32, 32), (2, 3, 3, 3)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in norm_types:
run_test_case(input, p)
# test empty batch sizes
input_sizes = [(0, 3, 3), (0, 2, 5, 5)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in norm_types:
run_test_case(input, p)
# test non-square input
input_sizes = [(16, 32), (32, 16), (2, 3, 5, 3), (2, 3, 3, 5)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in [2, -2, None]:
run_test_case(input, p)
# test for singular input
a = torch.eye(3, dtype=dtype, device=device)
a[-1, -1] = 0 # make 'a' singular
for p in norm_types:
try:
run_test_case(a, p)
except np.linalg.LinAlgError:
# Numpy may fail to converge for some BLAS backends (although this is very rare)
# See the discussion in https://github.com/pytorch/pytorch/issues/67675
pass
# test for 0x0 matrices. NumPy doesn't work for such input, we return 0
input_sizes = [(0, 0), (2, 5, 0, 0)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in ['fro', 2]:
expected_dtype = a.real.dtype if dtype.is_complex else dtype
expected = torch.zeros(input_size[:-2], dtype=expected_dtype, device=device)
actual = torch.linalg.cond(input, p)
self.assertEqual(actual, expected)
@skipMeta # https://github.com/pytorch/pytorch/issues/53739
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3})
def test_cond_errors_and_warnings(self, device, dtype):
norm_types = [1, -1, 2, -2, inf, -inf, 'fro', 'nuc', None]
# cond expects the input to be at least 2-dimensional
a = torch.ones(3, dtype=dtype, device=device)
for p in norm_types:
with self.assertRaisesRegex(RuntimeError, r'at least 2 dimensions'):
torch.linalg.cond(a, p)
# for some norm types cond expects the input to be square
a = torch.ones(3, 2, dtype=dtype, device=device)
norm_types = [1, -1, inf, -inf, 'fro', 'nuc']
for p in norm_types:
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.cond(a, p)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.ones((2, 2), dtype=dtype, device=device)
for p in ['fro', 2]:
real_dtype = a.real.dtype if dtype.is_complex else dtype
out = torch.empty(a.shape, dtype=real_dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.cond(a, p, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(0, dtype=torch.int, device=device)
for p in ['fro', 2]:
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.cond(a, p, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
for p in ['fro', 2]:
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.cond(a, p, out=out)
# for batched input if at least one matrix in the batch is not invertible,
# we can't get the result for all other (possibly) invertible matrices in the batch without an explicit for loop.
# this should change when at::inverse works with silent errors
# NumPy works fine in this case because it's possible to silence the error and get the inverse matrix results
# possibly filled with NANs
batch_dim = 3
a = torch.eye(3, 3, dtype=dtype, device=device)
a = a.reshape((1, 3, 3))
a = a.repeat(batch_dim, 1, 1)
a[1, -1, -1] = 0 # now a[1] is singular
for p in [1, -1, inf, -inf, 'fro', 'nuc']:
result = torch.linalg.cond(a, p)
self.assertEqual(result[1], float('inf'))
# check invalid norm type
a = torch.ones(3, 3, dtype=dtype, device=device)
for p in ['wrong_norm', 5]:
with self.assertRaisesRegex(RuntimeError, f"linalg.cond got an invalid norm type: {p}"):
torch.linalg.cond(a, p)
# This test calls torch.linalg.norm and numpy.linalg.norm with illegal arguments
# to ensure that they both throw errors
@dtypes(torch.float, torch.double)
def test_norm_errors(self, device, dtype):
def run_error_test_case(input, ord, dim, keepdim, error_type, error_regex):
test_case_info = (
f'test case input.size()={input.size()}, ord={ord}, dim={dim}, '
f'keepdim={keepdim}, dtype={dtype}')
with self.assertRaisesRegex(error_type, error_regex, msg=test_case_info):
torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
msg = f'numpy does not raise error but pytorch does, for case "{test_case_info}"'
with self.assertRaises(Exception, msg=test_case_info):
np.linalg.norm(input_numpy, ord, dim, keepdim)
S = 10
error_test_cases = [
# input size, p settings, dim, error type, error regex
((S, ), ['fro', 'nuc'], None, RuntimeError, r'A must have at least 2 dimensions'),
((S, S), [3.5], None, RuntimeError, r'matrix_norm: Order 3.5 not supported'),
((S, S), [0], None, RuntimeError, r'matrix_norm: Order 0 not supported'),
((S, S), ['fail'], None, RuntimeError, r'matrix_norm: Order fail not supported'),
((S, S), ['fro', 'nuc'], 0, RuntimeError, r'matrix_norm: dim must be a 2-tuple'),
((S, S), ['fro', 'nuc', 2], (0, 0), RuntimeError, r'dims must be different'),
((S, S), ['fro', 'nuc', 2], (-1, 1), RuntimeError, r'dims must be different'),
((S, S), ['fro', 'nuc', 2], (0, 4), IndexError, r'Dimension out of range'),
((S, ), [0], (4, ), IndexError, r'Dimension out of range'),
((S, ), [None], (0, 0), RuntimeError, r'dim 0 appears multiple times'),
((S, S, S), [1], (0, 1, 2), RuntimeError, r"If dim is specified, it must be of length 1 or 2."),
((S, S, S), [1], None, RuntimeError, r"If dim is not specified but ord is, the input must be 1D or 2D"),
]
for keepdim in [True, False]:
for input_size, ord_settings, dim, error_type, error_regex in error_test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_settings:
run_error_test_case(input, ord, dim, keepdim, error_type, error_regex)
# Test complex number inputs for linalg.norm
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.cfloat, torch.cdouble)
@precisionOverride({torch.cfloat: 2e-4})
def test_norm_complex(self, device, dtype):
def gen_error_message(input_size, ord, keepdim, dim=None):
return "complex norm failed for input size %s, ord=%s, keepdim=%s, dim=%s" % (
input_size, ord, keepdim, dim)
vector_ords = [None, 0, 1, 2, 3, inf, -1, -2, -3, -inf]
matrix_ords = [None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf]
# Test supported ords
for keepdim in [False, True]:
# vector norm
x = torch.randn(25, device=device, dtype=dtype)
xn = x.cpu().numpy()
for ord in vector_ords:
res = torch.linalg.norm(x, ord, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord, keepdims=keepdim)
msg = gen_error_message(x.size(), ord, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg, exact_dtype=False)
res_out = torch.tensor([], device=device, dtype=res.dtype)
torch.linalg.norm(x, ord, keepdim=keepdim, out=res_out)
self.assertEqual(res_out.shape, expected.shape, msg=msg)
self.assertEqual(res_out, expected, msg=msg)
# matrix norm
x = torch.randn(25, 25, device=device, dtype=dtype)
xn = x.cpu().numpy()
for ord in matrix_ords:
res = torch.linalg.norm(x, ord, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord, keepdims=keepdim)
msg = gen_error_message(x.size(), ord, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg, exact_dtype=False)
res_out = torch.tensor([], device=device, dtype=res.dtype)
torch.linalg.norm(x, ord, keepdim=keepdim, out=res_out)
self.assertEqual(res_out.shape, expected.shape, msg=msg)
self.assertEqual(res_out, expected, msg=msg)
# Test that linal.vector_norm gives the same result as numpy when inputs
# contain extreme values (inf, -inf, nan)
def test_vector_norm_extreme_values(self, device):
vector_ords = [0, 1, 2, 3, inf, -1, -2, -3, -inf]
vectors = []
for pair in itertools.product([inf, -inf, 0.0, nan, 1.0], repeat=2):
vectors.append(list(pair))
for vector in vectors:
x = torch.tensor(vector, device=device)
x_n = x.cpu().numpy()
for ord in vector_ords:
msg = f'ord={ord}, vector={vector}'
result = torch.linalg.vector_norm(x, ord=ord)
result_n = np.linalg.norm(x_n, ord=ord)
self.assertEqual(result, result_n, msg=msg)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double)
@precisionOverride({torch.float32: 2e-5})
def test_matrix_norm(self, device, dtype):
# Test only inputs for which torch.linalg.matrix_norm diverges from torch.linalg.norm
A = make_tensor((2, 2, 2), dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, r'linalg.matrix_norm:.*must have at least 2 dimensions.*'):
torch.linalg.matrix_norm(make_tensor((2,), dtype=dtype, device=device))
with self.assertRaisesRegex(RuntimeError, r'linalg.matrix_norm:.*must be a 2-tuple.*'):
torch.linalg.matrix_norm(A, dim=(0,))
with self.assertRaisesRegex(RuntimeError, r'.*not supported.*'):
torch.linalg.matrix_norm(A, ord=0)
with self.assertRaisesRegex(RuntimeError, r'.*not supported.*'):
torch.linalg.matrix_norm(A, ord=3.0)
# Test dim=None behavior
ref = torch.linalg.norm(A, dim=(-2, -1))
res = torch.linalg.matrix_norm(A)
self.assertEqual(ref, res)
# Test that linal.norm gives the same result as numpy when inputs
# contain extreme values (inf, -inf, nan)
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
@unittest.skipIf(IS_MACOS, "Skipped on MacOS!")
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_norm_extreme_values(self, device):
vector_ords = [0, 1, 2, 3, inf, -1, -2, -3, -inf]
# matrix_ords 'nuc', 2, -2 are skipped currently
# See issue https://github.com/pytorch/pytorch/issues/71911
matrix_ords = ['fro', 1, inf, -1, -inf]
vectors = []
matrices = []
for pair in itertools.product([inf, -inf, 0.0, nan, 1.0], repeat=2):
vectors.append(list(pair))
matrices.append([[pair[0], pair[1]]])
matrices.append([[pair[0]], [pair[1]]])
for vector in vectors:
x = torch.tensor(vector).to(device)
x_n = x.cpu().numpy()
for ord in vector_ords:
msg = f'ord={ord}, vector={vector}'
result = torch.linalg.norm(x, ord=ord)
result_n = np.linalg.norm(x_n, ord=ord)
self.assertEqual(result, result_n, msg=msg)
# TODO: Remove this function once the broken cases are fixed
def is_broken_matrix_norm_case(ord, x):
if self.device_type == 'cuda':
if x.size() == torch.Size([1, 2]):
if ord in ['nuc', 2, -2] and isnan(x[0][0]) and x[0][1] == 1:
# These cases are broken because of an issue with svd
# https://github.com/pytorch/pytorch/issues/43567
return True
if ord in ['nuc', 2, -2]:
# These cases are broken because of another issue with svd
# https://github.com/pytorch/pytorch/issues/52633
return True
return False
for matrix in matrices:
x = torch.tensor(matrix).to(device)
x_n = x.cpu().numpy()
for ord in matrix_ords:
msg = f'ord={ord}, matrix={matrix}'
if is_broken_matrix_norm_case(ord, x):
continue
else:
result_n = np.linalg.norm(x_n, ord=ord)
result = torch.linalg.norm(x, ord=ord)
self.assertEqual(result, result_n, msg=msg)
# Test degenerate shape results match numpy for linalg.norm vector norms
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@unittest.skipIf(TEST_WITH_ASAN, "Skipped on ASAN since it checks for undefined behavior.")
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_norm_vector_degenerate_shapes(self, device, dtype):
def run_test_case(input, ord, dim, keepdim):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
if (input.numel() == 0 and
(ord < 0. or ord == inf) and
(dim is None or input.shape[dim] == 0)):
with self.assertRaises(RuntimeError):
torch.linalg.norm(input, ord, dim, keepdim)
else:
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
result = torch.linalg.norm(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
ord_vector = [0, 0.5, 1, 2, 3, inf, -0.5, -1, -2, -3, -inf]
S = 10
test_cases = [
# input size, dim
((0, ), None),
((0, S), 0),
((0, S), 1),
((S, 0), 0),
((S, 0), 1),
]
for keepdim in [True, False]:
for input_size, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_vector:
run_test_case(input, ord, dim, keepdim)
# Test degenerate shape results match numpy for linalg.norm matrix norms
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_norm_matrix_degenerate_shapes(self, device, dtype):
def run_test_case(input, ord, dim, keepdim, should_error):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
input_numpy = input.cpu().numpy()
ops = [torch.linalg.norm]
if ord is not None and dim is not None:
ops.append(torch.linalg.matrix_norm)
if should_error:
with self.assertRaises(ValueError):
np.linalg.norm(input_numpy, ord, dim, keepdim)
for op in ops:
with self.assertRaises(IndexError):
op(input, ord, dim, keepdim)
else:
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
for op in ops:
result = op(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
ord_matrix = ['fro', 'nuc', 1, 2, inf, -1, -2, -inf, None]
S = 10
test_cases = [
# input size, p settings that cause error, dim
((0, 0), [1, 2, inf, -1, -2, -inf], None),
((0, S), [2, inf, -2, -inf], None),
((S, 0), [1, 2, -1, -2], None),
((S, S, 0), [], (0, 1)),
((1, S, 0), [], (0, 1)),
((0, 0, S), [1, 2, inf, -1, -2, -inf], (0, 1)),
((0, 0, S), [1, 2, inf, -1, -2, -inf], (1, 0)),
]
for keepdim in [True, False]:
for input_size, error_ords, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_matrix:
run_test_case(input, ord, dim, keepdim, ord in error_ords)
def test_norm_fastpaths(self, device):
x = torch.randn(3, 5, device=device)
# slow path
result = torch.linalg.norm(x, 4.5, 1)
expected = torch.pow(x.abs().pow(4.5).sum(1), 1.0 / 4.5)
self.assertEqual(result, expected)
# fast 0-norm
result = torch.linalg.norm(x, 0, 1)
expected = (x != 0).type_as(x).sum(1)
self.assertEqual(result, expected)
# fast 1-norm
result = torch.linalg.norm(x, 1, 1)
expected = x.abs().sum(1)
self.assertEqual(result, expected)
# fast 2-norm
result = torch.linalg.norm(x, 2, 1)
expected = torch.sqrt(x.pow(2).sum(1))
self.assertEqual(result, expected)
# fast 3-norm
result = torch.linalg.norm(x, 3, 1)
expected = torch.pow(x.pow(3).abs().sum(1), 1.0 / 3.0)
self.assertEqual(result, expected)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_old_eig_basic(self, device, dtype):
a = torch.tensor([[1.96, 0.00, 0.00, 0.00, 0.00],
[-6.49, 3.80, 0.00, 0.00, 0.00],
[-0.47, -6.39, 4.17, 0.00, 0.00],
[-7.20, 1.50, -1.51, 5.70, 0.00],
[-0.65, -6.34, 2.67, 1.80, -7.10]],
dtype=dtype, device=device).t()
e = torch.eig(a)[0]
ee, vv = torch.eig(a, True)
te = torch.tensor((), dtype=dtype, device=device)
tv = torch.tensor((), dtype=dtype, device=device)
eee, vvv = torch.eig(a, True, out=(te, tv))
self.assertEqual(e, ee, atol=1e-12, rtol=0)
self.assertEqual(ee, eee, atol=1e-12, rtol=0)
self.assertEqual(ee, te, atol=1e-12, rtol=0)
self.assertEqual(vv, vvv, atol=1e-12, rtol=0)
self.assertEqual(vv, tv, atol=1e-12, rtol=0)
#
# compare with numpy
np_e, np_v = np.linalg.eig(a.cpu().numpy())
if dtype.is_complex:
self.assertEqual(ee, np_e)
else:
# np_e.shape == (n, 2), where each column contain the real and
# imaginary parts of the result
self.assertEqual(ee[:, 0], np_e) # real part
self.assertEqual(ee[:, 1], torch.zeros(ee.shape[0], dtype=dtype)) # imaginary part
self.assertEqual(vv, np_v)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.double, torch.float)
def test_old_eig_reuse(self, device, dtype):
X = torch.randn(4, 4, dtype=dtype, device=device)
X = torch.mm(X.t(), X)
e = torch.zeros(4, 2, dtype=dtype, device=device)
v = torch.zeros(4, 4, dtype=dtype, device=device)
torch.eig(X, True, out=(e, v))
Xhat = np.matmul(np.matmul(v.cpu(), torch.diag(e.select(1, 0)).cpu()), v.t().cpu())
if dtype is torch.float:
atol = 1e-7
rtol = 1e-5
else:
atol = 1e-8
rtol = 0
self.assertEqual(X, Xhat, atol=atol, rtol=rtol, msg='VeV\' wrong')
self.assertTrue(v.is_contiguous(), 'V is not contiguous')
torch.eig(X, True, out=(e, v))
Xhat = np.matmul(v.cpu(), np.matmul(e.select(1, 0).diag().cpu(), v.t().cpu()))
self.assertEqual(X, Xhat, atol=atol, rtol=rtol, msg='VeV\' wrong')
self.assertTrue(v.is_contiguous(), 'V is not contiguous')
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.double, torch.float)
def test_old_eig_invalid_input(self, device, dtype):
# test invalid input
self.assertRaisesRegex(
RuntimeError,
'input should be 2 dimensional',
lambda: torch.eig(torch.ones((2))))
self.assertRaisesRegex(
RuntimeError,
'input should be square',
lambda: torch.eig(torch.ones((2, 3))))
self.assertRaisesRegex(
RuntimeError,
'input should not contain infs or NaNs',
lambda: torch.eig(np.inf * torch.ones((2, 2))))
self.assertRaisesRegex(
RuntimeError,
'input should not contain infs or NaNs',
lambda: torch.eig(np.nan * torch.ones((2, 2))))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double, torch.float)
def test_old_eig_out(self, device, dtype):
# the out version of torch.eig needs to be tested manually: we can't
# use the "test_out=True" parameter to tensor_op_tests because the
# signature is irregular (since we have *two* output vectors)
t = torch.randn(10, 10, dtype=dtype, device=device)
evals, evecs = torch.eig(t, eigenvectors=True)
#
# check that the out= version computes the same values as the normal one
out_evals = torch.empty_like(evals)
out_evecs = torch.empty_like(evecs)
evals2, evecs2 = torch.eig(t, eigenvectors=True, out=(out_evals, out_evecs))
# check that the out tensors were used in-place
self.assertEqual(evals2.data_ptr(), out_evals.data_ptr())
self.assertEqual(evecs2.data_ptr(), out_evecs.data_ptr())
# check that the result is the same as the non-out version
self.assertEqual(evals, out_evals)
self.assertEqual(evecs, out_evecs)
#
# check what happens in the eigenvectors=False case
out_evals = torch.empty_like(evals)
out_evecs = torch.tensor([1, 2, 3], dtype=dtype, device=device)
evals2, evecs2 = torch.eig(t, eigenvectors=False, out=(out_evals, out_evecs))
# check that the out_evals was used in-place
self.assertEqual(evals2.data_ptr(), out_evals.data_ptr())
self.assertEqual(evals, out_evals)
# check that out_evecs was NOT touched at all
assert out_evecs.tolist() == [1, 2, 3]
#
# check that we complain if we pass an out vector of the wrong dtype
wrong_out = torch.empty((0, 0), dtype=int)
with self.assertRaisesRegex(RuntimeError, r"Expected .* but got .*"):
torch.eig(t, eigenvectors=True, out=(wrong_out, out_evecs))
with self.assertRaisesRegex(RuntimeError, r"Expected .* but got .*"):
torch.eig(t, eigenvectors=True, out=(out_evals, wrong_out))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
# NumPy computes only in float64 and complex128 precisions
# for float32 or complex64 results might be very different from float64 or complex128
@dtypes(torch.float64, torch.complex128)
def test_eig_numpy(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
# unlike NumPy the result is not cast to float32 or float64 dtype in this case
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eig(a)
# compare with NumPy
# the eigenvalues are not necessarily ordered
# so order of NumPy and PyTorch can be different
expected = np.linalg.eig(a.cpu().numpy())
# sort NumPy output
ind = np.argsort(expected[0], axis=-1)[::-1]
expected = (np.take_along_axis(expected[0], ind, axis=-1), np.take_along_axis(expected[1], ind[:, None], axis=-1))
# sort PyTorch output
# torch.argsort doesn't work with complex inputs, NumPy sorting on CPU is used instead
# RuntimeError: _th_sort not supported on CUDAType for ComplexDouble
# RuntimeError: "sorting_kernel_method_name" not implemented for 'ComplexDouble'
ind = np.argsort(actual[0].cpu().numpy(), axis=-1)[::-1]
actual_np = [x.cpu().numpy() for x in actual]
sorted_actual = (
np.take_along_axis(actual_np[0], ind, axis=-1),
np.take_along_axis(actual_np[1], ind[:, None], axis=-1))
self.assertEqual(expected[0], sorted_actual[0], exact_dtype=False)
self.assertEqual(abs(expected[1]), abs(sorted_actual[1]), exact_dtype=False)
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_eig_compare_backends(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eig(a)
complementary_device = 'cpu'
# compare with CPU
expected = torch.linalg.eig(a.to(complementary_device))
self.assertEqual(expected[0], actual[0])
self.assertEqual(expected[1], actual[1])
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@slowTest
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(torch.float32)
def test_eig_check_magma(self, device, dtype):
# For CUDA inputs only matrices of size larger than 2048x2048 actually call MAGMA library
shape = (2049, 2049)
a = make_tensor(shape, dtype=dtype, device=device)
w, v = torch.linalg.eig(a)
# check correctness using eigendecomposition identity
self.assertEqual(a.to(v.dtype) @ v, w * v, atol=1e-3, rtol=1e-3)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eig_errors_and_warnings(self, device, dtype):
# eig requires the input to be at least 2 dimensional tensor
a = make_tensor(2, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.linalg.eig(a)
# eig requires a square matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eig(a)
# if out tensor with floating dtype is passed for complex output an error is thrown
if not dtype.is_complex:
# The characteristic equation is p(λ) = λ^2 − 2λ + 5 = 0, with roots λ = 1±2i
a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)
out0 = torch.empty(0, device=device, dtype=dtype)
out1 = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvalues to be safely castable"):
torch.linalg.eig(a, out=(out0, out1))
out0 = torch.empty(0, device=device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvectors to be safely castable"):
torch.linalg.eig(a, out=(out0, out1))
# dtypes should be safely castable
a = make_tensor((3, 3), dtype=dtype, device=device)
out0 = torch.empty(0, dtype=torch.int, device=device)
out1 = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvalues with dtype Int"):
torch.linalg.eig(a, out=(out0, out1))
out0 = torch.empty(0, dtype=torch.complex128, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvectors with dtype Int"):
torch.linalg.eig(a, out=(out0, out1))
# if non-empty out tensor with wrong shape is passed a warning is given
a = make_tensor((3, 3), dtype=dtype, device=device)
out0 = torch.empty(1, device=device, dtype=torch.complex128)
out1 = torch.empty(1, device=device, dtype=torch.complex128)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eig(a, out=(out0, out1))
# Check warning occurs
self.assertEqual(len(w), 2)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=torch.complex128)
out_v = torch.empty(0, device=device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eig(a, out=(out_w, out_v))
out_w = torch.empty(0, device=device, dtype=torch.complex128)
out_v = torch.empty(0, device=wrong_device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eig(a, out=(out_w, out_v))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_eig_with_nan(self, device, dtype):
for val in [np.inf, np.nan]:
for batch_dim in [(), (10,)]:
a = make_tensor((*batch_dim, 5, 5), device=device, dtype=dtype)
a[..., -1, -1] = val
with self.assertRaisesRegex(RuntimeError, "torch.linalg.eig: input tensor should not"):
torch.linalg.eig(a)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
# NumPy computes only in float64 and complex128 precisions
# for float32 or complex64 results might be very different from float64 or complex128
@dtypes(torch.float64, torch.complex128)
def test_eigvals_numpy(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
# unlike NumPy the result is not cast to float32 or float64 dtype in this case
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eigvals(a)
# compare with NumPy
# the eigenvalues are not necessarily ordered
# so order of NumPy and PyTorch can be different
expected = np.linalg.eigvals(a.cpu().numpy())
# sort NumPy output
ind = np.argsort(expected, axis=-1)[::-1]
expected = np.take_along_axis(expected, ind, axis=-1)
# sort PyTorch output
# torch.argsort doesn't work with complex inputs, NumPy sorting on CPU is used instead
# RuntimeError: _th_sort not supported on CUDAType for ComplexDouble
# RuntimeError: "sorting_kernel_method_name" not implemented for 'ComplexDouble'
ind = np.argsort(actual.cpu().numpy(), axis=-1)[::-1]
actual_np = actual.cpu().numpy()
sorted_actual = np.take_along_axis(actual_np, ind, axis=-1)
self.assertEqual(expected, sorted_actual, exact_dtype=False)
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_eigvals_compare_backends(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eigvals(a)
complementary_device = 'cpu'
# compare with CPU
expected = torch.linalg.eigvals(a.to(complementary_device))
self.assertEqual(expected, actual)
# check out= variant
complex_dtype = dtype
if not dtype.is_complex:
complex_dtype = torch.complex128 if dtype == torch.float64 else torch.complex64
out = torch.empty(0, dtype=complex_dtype, device=device)
ans = torch.linalg.eigvals(a, out=out)
self.assertEqual(ans, out)
self.assertEqual(expected.to(complex_dtype), out)
# check non-contiguous out
if a.numel() > 0:
out = torch.empty(2 * shape[0], *shape[1:-1], dtype=complex_dtype, device=device)[::2]
self.assertFalse(out.is_contiguous())
ans = torch.linalg.eigvals(a, out=out)
self.assertEqual(ans, out)
self.assertEqual(expected.to(complex_dtype), out)
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eigvals_errors_and_warnings(self, device, dtype):
# eig requires the input to be at least 2 dimensional tensor
a = make_tensor(2, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.linalg.eigvals(a)
# eig requires a square matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eigvals(a)
# if out tensor with floating dtype is passed for complex output an error is thrown
if not dtype.is_complex:
# The characteristic equation is p(λ) = λ^2 − 2λ + 5 = 0, with roots λ = 1±2i
a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)
out = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvalues to be safely castable"):
torch.linalg.eigvals(a, out=out)
# dtypes should be safely castable
a = make_tensor((3, 3), dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvalues with dtype Int"):
torch.linalg.eigvals(a, out=out)
# if non-empty out tensor with wrong shape is passed a warning is given
out = torch.empty(1, device=device, dtype=torch.complex128)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eigvals(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigvals(a, out=out_w)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_norm_old(self, device):
def gen_error_message(input_size, p, keepdim, dim=None):
return "norm failed for input size %s, p=%s, keepdim=%s, dim=%s" % (
input_size, p, keepdim, dim)
for keepdim in [False, True]:
# full reduction
x = torch.randn(25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, 4, inf, -inf, -1, -2, -3, 1.5]:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
self.assertEqual(res, expected, atol=1e-5, rtol=0, msg=gen_error_message(x.size(), p, keepdim))
# one dimension
x = torch.randn(25, 25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, 4, inf, -inf, -1, -2, -3]:
dim = 1
res = x.norm(p, dim, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, dim, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim, dim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# matrix norm
for p in ['fro', 'nuc']:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# zero dimensions
x = torch.randn((), device=device)
xn = x.cpu().numpy()
res = x.norm(keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, keepdims=keepdim)
msg = gen_error_message(x.size(), None, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# larger tensor sanity check
self.assertEqual(
2 * torch.norm(torch.ones(10000), keepdim=keepdim),
torch.norm(torch.ones(40000), keepdim=keepdim))
# matrix norm with non-square >2-D tensors, all combinations of reduction dims
x = torch.randn(5, 6, 7, 8, device=device)
xn = x.cpu().numpy()
for p in ['fro', 'nuc']:
for dim in itertools.product(*[list(range(4))] * 2):
if dim[0] == dim[1]:
continue
res = x.norm(p=p, dim=dim, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord=p, axis=dim, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim, dim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# Test that torch.norm with p=+/-inf propagates NaN
def test_norm_old_nan_propagation(self, device):
ords = [inf, -inf]
for pair in itertools.product([0.0, nan, 1.0], repeat=2):
x = torch.tensor(list(pair), device=device)
for ord in ords:
result = torch.norm(x, p=ord)
result_check = torch.linalg.norm(x, ord=ord)
self.assertEqual(result, result_check)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_norm_complex_old(self, device):
def gen_error_message(input_size, p, keepdim, dim=None):
return "complex norm failed for input size %s, p=%s, keepdim=%s, dim=%s" % (
input_size, p, keepdim, dim)
for keepdim in [False, True]:
# vector norm
x = torch.randn(25, device=device) + 1j * torch.randn(25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, inf, -1, -2, -3, -inf]:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# matrix norm
x = torch.randn(25, 25, device=device) + 1j * torch.randn(25, 25, device=device)
xn = x.cpu().numpy()
for p in ['nuc', 'fro']:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg, rtol=4e-6, atol=6e-4)
# Ensure torch.norm with p='fro' and p=2 give the same results for mutually supported input combinations
@dtypes(torch.float)
def test_norm_fro_2_equivalence_old(self, device, dtype):
input_sizes = [
(0,),
(10,),
(0, 0),
(4, 30),
(0, 45),
(100, 0),
(45, 10, 23),
(0, 23, 59),
(23, 0, 37),
(34, 58, 0),
(0, 0, 348),
(0, 3434, 0),
(0, 0, 0),
(5, 3, 8, 1, 3, 5)]
for input_size in input_sizes:
a = make_tensor(input_size, dtype=dtype, device=device, low=-9, high=9)
# Try full reduction
dim_settings = [None]
# Try all possible 1-D reductions
dim_settings += list(range(-a.dim(), a.dim()))
def wrap_dim(dim, ndims):
assert (dim < ndims) and (dim >= -ndims)
if dim >= 0:
return dim
else:
return dim + ndims
# Try all possible 2-D reductions
dim_settings += [
(d0, d1) for d0, d1 in itertools.combinations(range(-a.dim(), a.dim()), 2)
if wrap_dim(d0, a.dim()) != wrap_dim(d1, a.dim())]
for dim in dim_settings:
for keepdim in [True, False]:
a_norm_2 = torch.norm(a, p=2, dim=dim, keepdim=keepdim)
a_norm_fro = torch.norm(a, p='fro', dim=dim, keepdim=keepdim)
self.assertEqual(a_norm_fro, a_norm_2)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_nuclear_norm_axes_small_brute_force_old(self, device):
def check_single_nuclear_norm(x, axes):
if self.device_type != 'cpu' and randrange(100) < 95:
return # too many cpu <==> device copies
a = np.array(x.cpu(), copy=False)
expected = np.linalg.norm(a, "nuc", axis=axes)
ans = torch.norm(x, "nuc", dim=axes)
self.assertTrue(ans.is_contiguous())
self.assertEqual(ans.shape, expected.shape)
self.assertEqual(ans.cpu(), expected, rtol=1e-02, atol=1e-03, equal_nan=True)
out = torch.zeros(expected.shape, dtype=x.dtype, device=x.device)
ans = torch.norm(x, "nuc", dim=axes, out=out)
self.assertIs(ans, out)
self.assertTrue(ans.is_contiguous())
self.assertEqual(ans.shape, expected.shape)
self.assertEqual(ans.cpu(), expected, rtol=1e-02, atol=1e-03, equal_nan=True)
for n in range(1, 3):
for m in range(1, 3):
for axes in itertools.permutations([0, 1], 2):
# 2d, inner dimensions C
x = torch.randn(n, m, device=device)
check_single_nuclear_norm(x, axes)
# 2d, inner dimensions Fortran
x = torch.randn(m, n, device=device).mT
check_single_nuclear_norm(x, axes)
# 2d, inner dimensions non-contiguous
x = torch.randn(n, 2 * m, device=device)[:, ::2]
check_single_nuclear_norm(x, axes)
# 2d, all dimensions non-contiguous
x = torch.randn(7 * n, 2 * m, device=device)[::7, ::2]
check_single_nuclear_norm(x, axes)
for o in range(1, 3):
for axes in itertools.permutations([0, 1, 2], 2):
# 3d, inner dimensions C
x = torch.randn(o, n, m, device=device)
check_single_nuclear_norm(x, axes)
# 3d, inner dimensions Fortran
x = torch.randn(o, m, n, device=device).mT
check_single_nuclear_norm(x, axes)
# 3d, inner dimensions non-contiguous
x = torch.randn(o, n, 2 * m, device=device)[:, :, ::2]
check_single_nuclear_norm(x, axes)
# 3d, all dimensions non-contiguous
x = torch.randn(7 * o, 5 * n, 2 * m, device=device)[::7, ::5, ::2]
check_single_nuclear_norm(x, axes)
for r in range(1, 3):
for axes in itertools.permutations([0, 1, 2, 3], 2):
# 4d, inner dimensions C
x = torch.randn(r, o, n, m, device=device)
check_single_nuclear_norm(x, axes)
# 4d, inner dimensions Fortran
x = torch.randn(r, o, n, m, device=device).mT
check_single_nuclear_norm(x, axes)
# 4d, inner dimensions non-contiguous
x = torch.randn(r, o, n, 2 * m, device=device)[:, :, :, ::2]
check_single_nuclear_norm(x, axes)
# 4d, all dimensions non-contiguous
x = torch.randn(7 * r, 5 * o, 11 * n, 2 * m, device=device)[::7, ::5, ::11, ::2]
check_single_nuclear_norm(x, axes)
@skipCUDAIfNoMagma
def test_nuclear_norm_exceptions_old(self, device):
for lst in [], [1], [1, 2]:
x = torch.tensor(lst, dtype=torch.double, device=device)
for axes in (), (0,):
self.assertRaises(RuntimeError, torch.norm, x, "nuc", axes)
self.assertRaises(IndexError, torch.norm, x, "nuc", (0, 1))
x = torch.tensor([[0, 1, 2], [3, 4, 5]], dtype=torch.double, device=device)
self.assertRaisesRegex(RuntimeError, "duplicate or invalid", torch.norm, x, "nuc", (0, 0))
self.assertRaisesRegex(IndexError, "Dimension out of range", torch.norm, x, "nuc", (0, 2))
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_svd_lowrank(self, device, dtype):
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
def run_subtest(actual_rank, matrix_size, batches, device, svd_lowrank, **options):
density = options.pop('density', 1)
if isinstance(matrix_size, int):
rows = columns = matrix_size
else:
rows, columns = matrix_size
if density == 1:
a_input = random_lowrank_matrix(actual_rank, rows, columns, *batches, device=device, dtype=dtype)
a = a_input
else:
assert batches == ()
a_input = random_sparse_matrix(rows, columns, density, device=device, dtype=dtype)
a = a_input.to_dense()
q = min(*size)
u, s, v = svd_lowrank(a_input, q=q, **options)
# check if u, s, v is a SVD
u, s, v = u[..., :q], s[..., :q], v[..., :q]
A = u.matmul(s.diag_embed()).matmul(v.mT)
self.assertEqual(A, a, rtol=1e-7, atol=2e-7)
# check if svd_lowrank produces same singular values as torch.svd
U, S, V = torch.svd(a)
self.assertEqual(s.shape, S.shape)
self.assertEqual(u.shape, U.shape)
self.assertEqual(v.shape, V.shape)
self.assertEqual(s, S)
if density == 1:
# actual_rank is known only for dense inputs
#
# check if pairs (u, U) and (v, V) span the same
# subspaces, respectively
u, s, v = u[..., :actual_rank], s[..., :actual_rank], v[..., :actual_rank]
U, S, V = U[..., :actual_rank], S[..., :actual_rank], V[..., :actual_rank]
self.assertEqual(u.mT.matmul(U).det().abs(), torch.ones(batches, device=device, dtype=dtype))
self.assertEqual(v.mT.matmul(V).det().abs(), torch.ones(batches, device=device, dtype=dtype))
all_batches = [(), (1,), (3,), (2, 3)]
for actual_rank, size, all_batches in [
(2, (17, 4), all_batches),
(4, (17, 4), all_batches),
(4, (17, 17), all_batches),
(10, (100, 40), all_batches),
(7, (1000, 1000), [()]),
]:
# dense input
for batches in all_batches:
run_subtest(actual_rank, size, batches, device, torch.svd_lowrank)
if size != size[::-1]:
run_subtest(actual_rank, size[::-1], batches, device, torch.svd_lowrank)
# sparse input
for size in [(17, 4), (4, 17), (17, 17), (100, 40), (40, 100), (1000, 1000)]:
for density in [0.005, 0.1]:
run_subtest(None, size, (), device, torch.svd_lowrank, density=density)
# jitting support
jitted = torch.jit.script(torch.svd_lowrank)
actual_rank, size, batches = 2, (17, 4), ()
run_subtest(actual_rank, size, batches, device, jitted)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@precisionOverride({torch.float: 1e-4, torch.cfloat: 2e-4})
@setLinalgBackendsToDefaultFinally
@dtypes(*floating_and_complex_types())
def test_svd(self, device, dtype):
# tests linalg.svd, svd, linalg.svdvals
make_arg = partial(make_tensor, dtype=dtype, device=device)
backends = ["default"]
if torch.device(device).type == 'cuda':
if torch.cuda.has_magma:
backends.append("magma")
if has_cusolver():
backends.append("cusolver")
ns = (12, 4, 2, 0)
batches = ((), (0,), (1,), (2,), (2, 1), (0, 2))
drivers = (None, 'gesvd', 'gesvdj', 'gesvda')
for backend in backends:
torch.backends.cuda.preferred_linalg_library(backend)
for batch, m, n, driver in product(batches, ns, ns, drivers):
if not (backend == 'cusolver' or driver is None):
# only test cases below and skip otherwise:
# - backend == 'cusolver' (driver can be anything)
# - backend != 'cusolver' (driver should only be None)
continue
shape = batch + (m, n)
k = min(m, n)
A = make_arg(shape)
U, S, Vh = torch.linalg.svd(A, full_matrices=False, driver=driver)
self.assertEqual((U @ S.to(A.dtype).diag_embed()) @ Vh, A)
U_f, S_f, Vh_f = torch.linalg.svd(A, full_matrices=True, driver=driver)
self.assertEqual(S_f, S)
self.assertEqual((U_f[..., :k] @ S_f.to(A.dtype).diag_embed()) @ Vh_f[..., :k, :], A)
S_s = torch.linalg.svdvals(A, driver=driver)
self.assertEqual(S_s, S)
U, S, V = torch.svd(A, some=True)
self.assertEqual((U @ S.to(A.dtype).diag_embed()) @ V.mH, A)
U_f, S_f, V_f = torch.svd(A, some=False)
self.assertEqual(S_f, S)
self.assertEqual((U_f[..., :k] @ S_f.to(A.dtype).diag_embed()) @ V_f[..., :k].mH, A)
S_s = torch.svd(A, compute_uv=False).S
self.assertEqual(S_s, S)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.complex128)
def test_invariance_error_spectral_decompositions(self, device, dtype):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=True)
A = make_arg((3, 3))
with self.assertRaisesRegex(RuntimeError, "ill-defined"):
U, _, Vh = torch.linalg.svd(A, full_matrices=False)
(U + Vh).sum().backward()
A = make_arg((3, 3))
with self.assertRaisesRegex(RuntimeError, "ill-defined"):
V = torch.linalg.eig(A).eigenvectors
V.sum().backward()
A = make_arg((3, 3))
A = A + A.mH
with self.assertRaisesRegex(RuntimeError, "ill-defined"):
Q = torch.linalg.eigh(A).eigenvectors
Q.sum().backward()
@skipCUDAIfNoCusolver # MAGMA backend doesn't work in this case
@skipCUDAIfRocm
@precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_svd_memory_allocation(self, device, dtype):
# test for https://github.com/pytorch/pytorch/issues/61949
# the problem was that tensors of incorrect size were allocated and then narrowed
m = 3
n = 2**20
a = make_tensor((m, n), dtype=dtype, device=device)
# the following should run without errors
S = torch.linalg.svdvals(a)
result = torch.linalg.svd(a, full_matrices=False)
self.assertEqual(result.S, S)
# This test doesn't work with MAGMA backend https://github.com/pytorch/pytorch/issues/72106
@skipMeta
@skipCUDAIfRocm
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_svd_nan_error(self, device, dtype):
for svd in [torch.svd, torch.linalg.svd]:
# if input contains NaN then an error is triggered for svd
# When cuda < 11.5, cusolver raises CUSOLVER_STATUS_EXECUTION_FAILED when input contains nan.
# When cuda >= 11.5, cusolver normally finishes execution and sets info array indicating convergence issue.
error_msg = r'(CUSOLVER_STATUS_EXECUTION_FAILED|The algorithm failed to converge)'
a = torch.full((3, 3), float('nan'), dtype=dtype, device=device)
a[0] = float('nan')
with self.assertRaisesRegex(torch.linalg.LinAlgError, error_msg):
svd(a)
error_msg = r'(CUSOLVER_STATUS_EXECUTION_FAILED|\(Batch element 1\): The algorithm failed to converge)'
a = torch.randn(3, 33, 33, dtype=dtype, device=device)
a[1, 0, 0] = float('nan')
with self.assertRaisesRegex(torch.linalg.LinAlgError, error_msg):
svd(a)
def cholesky_solve_test_helper(self, A_dims, b_dims, upper, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = random_hermitian_pd_matrix(*A_dims, dtype=dtype, device=device)
L = torch.cholesky(A, upper=upper)
return b, A, L
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve(self, device, dtype):
for (k, n), upper in itertools.product(zip([2, 3, 5], [3, 5, 7]), [True, False]):
b, A, L = self.cholesky_solve_test_helper((n,), (n, k), upper, device, dtype)
x = torch.cholesky_solve(b, L, upper=upper)
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve_batched(self, device, dtype):
def cholesky_solve_batch_helper(A_dims, b_dims, upper):
b, A, L = self.cholesky_solve_test_helper(A_dims, b_dims, upper, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.cholesky_solve(b[i], L[i], upper=upper))
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.cholesky_solve(b, L, upper=upper) # Actual output
self.assertEqual(x_act, x_exp) # Equality check
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax) # Correctness check
for upper, batchsize in itertools.product([True, False], [1, 3, 4]):
cholesky_solve_batch_helper((5, batchsize), (batchsize, 5, 10), upper)
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve_batched_many_batches(self, device, dtype):
for A_dims, b_dims in zip([(5, 256, 256), (5,)], [(5, 10), (512, 512, 5, 10)]):
for upper in [True, False]:
b, A, L = self.cholesky_solve_test_helper(A_dims, b_dims, upper, device, dtype)
x = torch.cholesky_solve(b, L, upper)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(Ax))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve_batched_broadcasting(self, device, dtype):
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(A_dims, b_dims, upper):
A_matrix_size = A_dims[-1]
A_batch_dims = A_dims[:-2]
A = random_hermitian_pd_matrix(A_matrix_size, *A_batch_dims,
dtype=dtype, device='cpu')
b = torch.randn(*b_dims, dtype=dtype, device='cpu')
x_exp = torch.tensor(solve(A.numpy(), b.numpy()), dtype=dtype, device=device)
A, b = A.to(dtype=dtype, device=device), b.to(dtype=dtype, device=device)
L = torch.linalg.cholesky(A, upper=upper)
x = torch.cholesky_solve(b, L, upper=upper)
self.assertEqual(x, x_exp)
# https://github.com/pytorch/pytorch/issues/42695
x = torch.cholesky_solve(b, L, upper=upper, out=x)
self.assertEqual(x, x_exp)
# test against numpy.linalg.solve
for upper in [True, False]:
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6), upper) # no broadcasting
run_test((2, 1, 3, 4, 4), (4, 6), upper) # broadcasting b
run_test((4, 4), (2, 1, 3, 4, 2), upper) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5), upper) # broadcasting A & b
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_solve_out_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.cholesky_solve(b, a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.cholesky_solve(b, a, out=out)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
out = torch.empty(1, dtype=dtype, device=device)
# Trigger warning
torch.cholesky_solve(b, a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 2e-3, torch.complex64: 2e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_inverse(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
def run_test(torch_inverse, matrix, batches, n):
matrix_inverse = torch_inverse(matrix)
# Compare against NumPy output
# NumPy uses 'gesv' LAPACK routine solving the equation A A_inv = I
# But in PyTorch 'gertf' + 'getri' is used causing element-wise differences
expected = np.linalg.inv(matrix.cpu().numpy())
self.assertEqual(matrix_inverse, expected, atol=self.precision, rtol=self.precision)
# Additional correctness tests, check matrix*matrix_inverse == identity
identity = torch.eye(n, dtype=dtype, device=device)
self.assertEqual(identity.expand_as(matrix), np.matmul(matrix.cpu(), matrix_inverse.cpu()))
self.assertEqual(identity.expand_as(matrix), np.matmul(matrix_inverse.cpu(), matrix.cpu()))
# check the out= variant
# prepare the expected out tensor
matrix_inverse_out = torch.empty(*batches, n, n, dtype=dtype, device=device)
matrix_inverse_out_t = matrix_inverse_out.mT.clone(memory_format=torch.contiguous_format)
matrix_inverse_out = matrix_inverse_out_t.mT
ans = torch_inverse(matrix, out=matrix_inverse_out)
self.assertEqual(matrix_inverse_out, ans, atol=0, rtol=0)
self.assertEqual(matrix_inverse_out, matrix_inverse, atol=0, rtol=0)
# batched matrices: 3+ dimensional tensors, check matrix_inverse same as single-inverse for each matrix
if matrix.ndim > 2 and batches[0] != 0:
expected_inv_list = []
p = int(np.prod(batches)) # use `p` instead of -1, so that the test works for empty input as well
for mat in matrix.contiguous().view(p, n, n):
expected_inv_list.append(torch_inverse(mat))
expected_inv = torch.stack(expected_inv_list).view(*batches, n, n)
if self.device_type == 'cuda' and dtype in [torch.float32, torch.complex64]:
# single-inverse is done using cuSOLVER, while batched inverse is done using MAGMA
# individual values can be significantly different for fp32, hence rather high rtol is used
# the important thing is that torch_inverse passes above checks with identity
self.assertEqual(matrix_inverse, expected_inv, atol=1e-1, rtol=1e-2)
else:
self.assertEqual(matrix_inverse, expected_inv)
# helper function for testing torch.linalg.inv_ex
def test_inv_ex(input, out=None):
if out is not None:
info = torch.empty(0, dtype=torch.int32, device=device)
return torch.linalg.inv_ex(input, out=(out, info)).inverse
return torch.linalg.inv_ex(input).inverse
for torch_inverse in [torch.inverse, torch.linalg.inv, test_inv_ex]:
for batches, n in itertools.product(
[[], [0], [2], [2, 1]],
[0, 5]
):
matrices = make_arg(*batches, n, n)
run_test(torch_inverse, matrices, batches, n)
# test non-contiguous input
run_test(torch_inverse, matrices.mT, batches, n)
if n > 0:
run_test(
torch_inverse,
make_arg(*batches, 2 * n, 2 * n)
.view(-1, n * 2, n * 2)[:, ::2, ::2].view(*batches, n, n),
batches, n
)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_inv_ex_info_device(self, device, dtype):
A = torch.eye(3, 3, dtype=dtype, device=device)
info = torch.linalg.inv_ex(A).info
self.assertTrue(info.device == A.device)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_inv_ex_singular(self, device, dtype):
# if the input matrix is not invertible, info with positive integer is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A[-1, -1] = 0 # Now A is singular
info = torch.linalg.inv_ex(A).info
self.assertEqual(info, 3)
with self.assertRaisesRegex(torch.linalg.LinAlgError,
r'diagonal element 3 is zero, the inversion could not be completed'):
torch.linalg.inv_ex(A, check_errors=True)
# if at least one matrix in the batch is not positive definite,
# batched info with positive integer for the corresponding matrix is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A = A.reshape((1, 3, 3))
A = A.repeat(5, 1, 1)
A[3, -2, -2] = 0 # Now A[3] is singular
info = torch.linalg.inv_ex(A).info
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
expected_info[3] = 2
self.assertEqual(info, expected_info)
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'\(Batch element 3\): The diagonal element 2 is zero'):
torch.linalg.inv_ex(A, check_errors=True)
@slowTest
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@skipCUDAIfRocm
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 2e-3, torch.complex64: 2e-3,
torch.float64: 1e-5, torch.complex128: 1e-5})
def test_inverse_many_batches(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
def test_inverse_many_batches_helper(torch_inverse, b, n):
matrices = make_arg(b, n, n)
matrices_inverse = torch_inverse(matrices)
# Compare against NumPy output
expected = np.linalg.inv(matrices.cpu().numpy())
self.assertEqual(matrices_inverse, expected, atol=self.precision, rtol=1e-3)
for torch_inverse in [torch.inverse, torch.linalg.inv]:
test_inverse_many_batches_helper(torch_inverse, 5, 256)
test_inverse_many_batches_helper(torch_inverse, 3, 512)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@onlyNativeDeviceTypes # TODO: XLA doesn't raise exception
@dtypes(*floating_and_complex_types())
def test_inverse_errors(self, device, dtype):
# inverse expects batches of square matrices as input
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.inverse(torch.randn(2, 3, 4, 3))
# if input is not invertible, RuntimeError is raised mentioning the first non-invertible batch
def run_test_singular_input(batch_dim, n):
x = torch.eye(3, 3, dtype=dtype, device=device).reshape((1, 3, 3)).repeat(batch_dim, 1, 1)
x[n, -1, -1] = 0
with self.assertRaisesRegex(torch.linalg.LinAlgError, rf'\(Batch element {n}\): The diagonal element 3 is zero'):
torch.inverse(x)
for params in [(1, 0), (2, 0), (2, 1), (4, 0), (4, 2), (10, 2)]:
run_test_singular_input(*params)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@onlyNativeDeviceTypes # TODO: XLA doesn't raise exception
@skipCUDAIfRocm
@skipCUDAVersionIn([(11, 3), (11, 6), (11, 7)]) # https://github.com/pytorch/pytorch/issues/57482
@dtypes(*floating_and_complex_types())
def test_inverse_errors_large(self, device, dtype):
# Test batched inverse of singular matrices reports errors without crashing (gh-51930)
x = torch.empty((8, 10, 616, 616), dtype=dtype, device=device)
x[:] = torch.eye(616, dtype=dtype, device=device)
x[..., 10, 10] = 0
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'\(Batch element 0\): The diagonal element 11 is zero'):
torch.inverse(x)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3, torch.float64: 1e-7, torch.complex128: 1e-7})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_pinv(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test_main(A, hermitian):
# Testing against definition for pseudo-inverses
A_pinv = torch.linalg.pinv(A, hermitian=hermitian)
np_A = A.cpu().numpy()
np_A_pinv = A_pinv.cpu().numpy()
if A.numel() > 0:
self.assertEqual(A, np_A @ np_A_pinv @ np_A, atol=self.precision, rtol=self.precision)
self.assertEqual(A_pinv, np_A_pinv @ np_A @ np_A_pinv, atol=self.precision, rtol=self.precision)
self.assertEqual(np_A @ np_A_pinv, (np_A @ np_A_pinv).conj().swapaxes(-2, -1))
self.assertEqual(np_A_pinv @ np_A, (np_A_pinv @ np_A).conj().swapaxes(-2, -1))
else:
self.assertEqual(A.shape, A_pinv.shape[:-2] + (A_pinv.shape[-1], A_pinv.shape[-2]))
# Check out= variant
out = torch.empty_like(A_pinv)
ans = torch.linalg.pinv(A, hermitian=hermitian, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, A_pinv)
def run_test_numpy(A, hermitian):
# Check against NumPy output
# Test float rcond, and specific value for each matrix
rconds = [float(torch.rand(1)), ]
# Test different types of rcond tensor
for rcond_type in all_types():
rconds.append(torch.rand(A.shape[:-2], dtype=torch.double, device=device).to(rcond_type))
# Test broadcasting of rcond
if A.ndim > 2:
rconds.append(torch.rand(A.shape[-3], device=device))
for rcond in rconds:
actual = torch.linalg.pinv(A, rcond=rcond, hermitian=hermitian)
torch_rtol = torch.linalg.pinv(A, rtol=rcond, hermitian=hermitian)
self.assertEqual(actual, torch_rtol)
numpy_rcond = rcond if isinstance(rcond, float) else rcond.cpu().numpy()
expected = np.linalg.pinv(A.cpu().numpy(), rcond=numpy_rcond, hermitian=hermitian)
self.assertEqual(actual, expected, atol=self.precision, rtol=1e-5)
for sizes in [(5, 5), (3, 5, 5), (3, 2, 5, 5), # square matrices
(3, 2), (5, 3, 2), (2, 5, 3, 2), # fat matrices
(2, 3), (5, 2, 3), (2, 5, 2, 3), # thin matrices
(0, 0), (0, 2), (2, 0), (3, 0, 0), (0, 3, 0), (0, 0, 3)]: # zero numel matrices
A = torch.randn(*sizes, dtype=dtype, device=device)
hermitian = False
run_test_main(A, hermitian)
run_test_numpy(A, hermitian)
# Check hermitian = True
for sizes in [(5, 5), (3, 5, 5), (3, 2, 5, 5), # square matrices
(0, 0), (3, 0, 0), ]: # zero numel square matrices
A = random_hermitian_pd_matrix(sizes[-1], *sizes[:-2], dtype=dtype, device=device)
hermitian = True
run_test_main(A, hermitian)
run_test_numpy(A, hermitian)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_pinv_errors_and_warnings(self, device, dtype):
# pinv requires at least 2D tensor
a = torch.randn(1, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "expected a tensor with 2 or more dimensions"):
torch.linalg.pinv(a)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(3, 3, dtype=dtype, device=device)
out = torch.empty(7, 7, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.pinv(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes of out and input should be safely castable
out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.pinv(a, out=out)
if torch.cuda.is_available():
# device of out and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty_like(a).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected result and input tensors to be on the same device"):
torch.linalg.pinv(a, out=out)
# device of rcond and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
rcond = torch.full((), 1e-2, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.pinv(a, rcond=rcond)
# rcond can't be complex
rcond = torch.full((), 1j, device=device)
with self.assertRaisesRegex(RuntimeError, "rcond tensor of complex type is not supported"):
torch.linalg.pinv(a, rcond=rcond)
# atol can't be complex
atol = torch.full((), 1j, device=device)
with self.assertRaisesRegex(RuntimeError, "atol tensor of complex type is not supported"):
torch.linalg.pinv(a, atol=atol)
# rtol can't be complex
rtol = torch.full((), 1j, device=device)
with self.assertRaisesRegex(RuntimeError, "rtol tensor of complex type is not supported"):
torch.linalg.pinv(a, rtol=rtol)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_inv_errors_and_warnings(self, device, dtype):
# inv expects batches of square matrices as input
a = torch.randn(2, 3, 4, 3, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.inv(a)
# inv requires the input to be at least 2 dimensional tensor
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.linalg.inv(a)
# if input is not invertible, RuntimeError is raised mentioning the first non-invertible batch
def run_test_singular_input(batch_dim, n):
a = torch.eye(3, 3, dtype=dtype, device=device).reshape((1, 3, 3)).repeat(batch_dim, 1, 1)
a[n, -1, -1] = 0
with self.assertRaisesRegex(torch.linalg.LinAlgError, rf"\(Batch element {n}\): The diagonal element 3 is zero"):
torch.linalg.inv(a)
for params in [(1, 0), (2, 0), (2, 1), (4, 0), (4, 2), (10, 2)]:
run_test_singular_input(*params)
# dtypes should match
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "got result with dtype Int"):
torch.linalg.inv(a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.inv(a, out=out)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(1, dtype=dtype, device=device)
# Trigger warning
torch.linalg.inv(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# if out tensor in batched column major format but with wrong a warning is given
with warnings.catch_warnings(record=True) as w:
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(3, 3, dtype=dtype, device=device)
out = out.mT.clone(memory_format=torch.contiguous_format)
out = out.mT
self.assertTrue(out.mT.is_contiguous())
# Trigger warning
torch.linalg.inv(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
def solve_test_helper(self, A_dims, b_dims, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_A = partial(make_fullrank, device=device, dtype=dtype)
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = make_A(*A_dims)
return b, A
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3})
def test_solve(self, device, dtype):
def run_test(n, batch, rhs):
A_dims = (*batch, n, n)
b_dims = (*batch, n, *rhs)
b, A = self.solve_test_helper(A_dims, b_dims, device, dtype)
# Correctness test
x = torch.linalg.solve(A, b)
if rhs == ():
Ax = np.matmul(A.cpu(), x.unsqueeze(-1).cpu())
Ax.squeeze_(-1)
else:
Ax = np.matmul(A.cpu(), x.cpu())
self.assertEqual(b.expand_as(Ax), Ax)
# Check against NumPy
expected = np.linalg.solve(A.cpu().numpy(), b.expand_as(x).cpu().numpy())
self.assertEqual(x, expected)
batches = [(), (0, ), (3, ), (2, 3)]
ns = [0, 5, 32]
nrhs = [(), (1, ), (5, )]
for n, batch, rhs in itertools.product(ns, batches, nrhs):
run_test(n, batch, rhs)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_solve_batched_broadcasting(self, device, dtype):
from numpy.linalg import solve
def run_test(A_dims, B_dims):
A_matrix_size = A_dims[-1]
A_batch_dims = A_dims[:-2]
B, A = self.solve_test_helper(A_batch_dims + (A_matrix_size, A_matrix_size), B_dims, device, dtype)
actual = torch.linalg.solve(A, B)
expected = solve(A.cpu().numpy(), B.cpu().numpy())
self.assertEqual(actual, expected)
# test against numpy.linalg.solve
run_test((5, 5), (2, 0, 5, 3)) # broadcasting with 0 batch dim
run_test((2, 0, 5, 5), (5, 3)) # broadcasting with 0 batch dim
run_test((2, 1, 3, 4, 4), (4, 6)) # broadcasting B
run_test((4, 4), (2, 1, 3, 4, 2)) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5)) # broadcasting A & B
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
@precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})
def test_tensorsolve(self, device, dtype):
def run_test(a_shape, dims):
a = torch.randn(a_shape, dtype=dtype, device=device)
b = torch.randn(a_shape[:2], dtype=dtype, device=device)
result = torch.linalg.tensorsolve(a, b, dims=dims)
expected = np.linalg.tensorsolve(a.cpu().numpy(), b.cpu().numpy(), axes=dims)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty_like(result)
ans = torch.linalg.tensorsolve(a, b, dims=dims, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
a_shapes = [(2, 3, 6), (3, 4, 4, 3)]
dims = [None, (0, 2)]
for a_shape, d in itertools.product(a_shapes, dims):
run_test(a_shape, d)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_tensorsolve_empty(self, device, dtype):
# Check for empty inputs. NumPy does not work for these cases.
a = torch.empty(0, 0, 1, 2, 3, 0, dtype=dtype, device=device)
b = torch.empty(a.shape[:2], dtype=dtype, device=device)
x = torch.linalg.tensorsolve(a, b)
self.assertEqual(torch.tensordot(a, x, dims=len(x.shape)), b)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32)
def test_tensorsolve_errors_and_warnings(self, device, dtype):
# tensorsolve expects the input that can be reshaped to a square matrix
a = torch.eye(2 * 3 * 4, dtype=dtype, device=device).reshape((2 * 3, 4, 2, 3, 4))
b = torch.randn(8, 4, dtype=dtype, device=device)
self.assertTrue(np.prod(a.shape[2:]) != np.prod(b.shape))
with self.assertRaisesRegex(RuntimeError, r'Expected self to satisfy the requirement'):
torch.linalg.tensorsolve(a, b)
# if non-empty out tensor with wrong shape is passed a warning is given
out = torch.empty_like(a)
b = torch.randn(6, 4, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.tensorsolve(a, b, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.tensorsolve(a, b, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.tensorsolve(a, b, out=out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float: 1e-3, torch.cfloat: 1e-3})
def test_tensorinv(self, device, dtype):
def run_test(a_shape, ind):
a = torch.randn(a_shape, dtype=dtype, device=device)
a_numpy = a.cpu().numpy()
result = torch.linalg.tensorinv(a, ind=ind)
expected = np.linalg.tensorinv(a_numpy, ind=ind)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty_like(result)
ans = torch.linalg.tensorinv(a, ind=ind, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
# compare to NumPy output
run_test((12, 3, 4), ind=1)
run_test((3, 8, 24), ind=2)
run_test((18, 3, 3, 2), ind=1)
run_test((1, 4, 2, 2), ind=2)
run_test((2, 3, 5, 30), ind=3)
run_test((24, 2, 2, 3, 2), ind=1)
run_test((3, 4, 2, 3, 2), ind=2)
run_test((1, 2, 3, 2, 3), ind=3)
run_test((3, 2, 1, 2, 12), ind=4)
@skipMeta # See https://github.com/pytorch/pytorch/issues/53739
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_tensorinv_empty(self, device, dtype):
for ind in range(1, 4):
# Check for empty inputs. NumPy does not work for these cases.
a = torch.empty(0, 0, 1, 2, 3, 0, dtype=dtype, device=device)
a_inv = torch.linalg.tensorinv(a, ind=ind)
self.assertEqual(a_inv.shape, a.shape[ind:] + a.shape[:ind])
@skipMeta # See https://github.com/pytorch/pytorch/issues/53739
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_tensorinv_errors_and_warnings(self, device, dtype):
def check_shape(a_shape, ind):
# tensorinv requires the input to satisfy
# prod(a.shape[ind:]) == prod(a.shape[:ind])
a = torch.randn(a_shape, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "Expected self to satisfy the requirement"):
torch.linalg.tensorinv(a, ind=ind)
def check_ind(a_shape, ind):
a = torch.randn(a_shape, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "Expected a strictly positive integer"):
torch.linalg.tensorinv(a, ind=ind)
def check_out(a_shape, ind):
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(a_shape, dtype=dtype, device=device)
out = torch.empty_like(a)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.tensorinv(a, ind=ind, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.tensorinv(a, ind=ind, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.tensorinv(a, ind=ind, out=out)
# test for invalid shape
check_shape((2, 3, 4), ind=1)
check_shape((1, 2, 3, 4), ind=3)
# test for invalid ind
check_ind((12, 3, 4), ind=-1)
check_ind((18, 3, 3, 2), ind=0)
# test for invalid out tensor
check_out((12, 3, 4), ind=1)
check_out((3, 8, 24), ind=2)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_tensorinv_singular_input(self, device, dtype):
def check_singular_input(a_shape, ind):
prod_ind_end = np.prod(a_shape[ind:])
a = torch.eye(prod_ind_end, dtype=dtype, device=device)
a[-1, -1] = 0 # Now `a` is singular
a = a.reshape(a_shape)
with self.assertRaisesRegex(torch.linalg.LinAlgError, "The diagonal element"):
torch.linalg.tensorinv(a, ind=ind)
# test for non-invertible input
check_singular_input((12, 3, 4), ind=1)
check_singular_input((3, 6, 18), ind=2)
def _test_dot_vdot_vs_numpy(self, device, dtype, torch_fn, np_fn):
def check(x, y):
# Compare with numpy
res = torch_fn(x, y)
if x.dtype == torch.bfloat16:
ref = torch.from_numpy(np.array(np_fn(x.cpu().float().numpy(), y.cpu().float().numpy())))
else:
ref = torch.from_numpy(np.array(np_fn(x.cpu().numpy(), y.cpu().numpy())))
if res.dtype == torch.bfloat16:
self.assertEqual(res.cpu(), ref.bfloat16())
else:
self.assertEqual(res.cpu(), ref)
# Test out variant
out = torch.empty_like(res)
torch_fn(x, y, out=out)
self.assertEqual(out, res)
# Empty
x = torch.tensor([], dtype=dtype, device=device)
y = torch.tensor([], dtype=dtype, device=device)
check(x, y)
# Contiguous
x = 0.1 * torch.randn(5000, dtype=dtype, device=device)
y = 0.1 * torch.randn(5000, dtype=dtype, device=device)
check(x, y)
# 0 strided
y = 0.1 * torch.randn(1, dtype=dtype, device=device).expand(5000)
check(x, y)
# 2 strided
check(x[::2], y[::2])
@dtypes(torch.float, torch.cfloat, torch.bfloat16)
@dtypesIfCUDA(torch.float, torch.cfloat)
@precisionOverride({torch.cfloat: 1e-4, torch.float32: 5e-5, torch.bfloat16: 1e-0})
def test_dot_vs_numpy(self, device, dtype):
self._test_dot_vdot_vs_numpy(device, dtype, torch.dot, np.dot)
@dtypes(torch.float, torch.cfloat)
@precisionOverride({torch.cfloat: 1e-4, torch.float32: 5e-5})
def test_vdot_vs_numpy(self, device, dtype):
self._test_dot_vdot_vs_numpy(device, dtype, torch.vdot, np.vdot)
def _test_dot_vdot_invalid_args(self, device, torch_fn, complex_dtypes=False):
def check(x, y, regex):
with self.assertRaisesRegex(RuntimeError, regex):
torch_fn(x, y)
if complex_dtypes:
x = torch.randn(1, dtype=torch.cfloat, device=device)
y = torch.randn(3, dtype=torch.cdouble, device=device)
else:
x = torch.randn(1, dtype=torch.float, device=device)
y = torch.randn(3, dtype=torch.double, device=device)
check(x, y, 'dot : expected both vectors to have same dtype')
check(x.reshape(1, 1), y, '1D tensors expected')
check(x.expand(9), y.to(x.dtype), 'inconsistent tensor size')
if self.device_type != 'cpu':
x_cpu = x.expand(3).cpu()
check(x_cpu, y.to(x.dtype), 'Expected all tensors to be on the same device')
@onlyNativeDeviceTypes
def test_vdot_invalid_args(self, device):
self._test_dot_vdot_invalid_args(device, torch.vdot)
self._test_dot_vdot_invalid_args(device, torch.vdot, complex_dtypes=True)
@onlyNativeDeviceTypes
def test_dot_invalid_args(self, device):
self._test_dot_vdot_invalid_args(device, torch.dot)
self._test_dot_vdot_invalid_args(device, torch.dot, complex_dtypes=True)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
def run_test(shape0, shape1, batch):
a = torch.randn(*batch, shape0, shape1, dtype=dtype, device=device)
rank_a = matrix_rank(a)
self.assertEqual(rank_a, matrix_rank(a.mH))
aaH = torch.matmul(a, a.mH)
rank_aaH = matrix_rank(aaH)
rank_aaH_hermitian = matrix_rank(aaH, hermitian=True)
self.assertEqual(rank_aaH, rank_aaH_hermitian)
aHa = torch.matmul(a.mH, a)
self.assertEqual(matrix_rank(aHa), matrix_rank(aHa, hermitian=True))
# check against NumPy
self.assertEqual(rank_a, np.linalg.matrix_rank(a.cpu().numpy()))
self.assertEqual(matrix_rank(a, 0.01), np.linalg.matrix_rank(a.cpu().numpy(), 0.01))
self.assertEqual(rank_aaH, np.linalg.matrix_rank(aaH.cpu().numpy()))
self.assertEqual(matrix_rank(aaH, 0.01), np.linalg.matrix_rank(aaH.cpu().numpy(), 0.01))
# hermitian flag for NumPy was added in 1.14.0
if np.lib.NumpyVersion(np.__version__) >= '1.14.0':
self.assertEqual(rank_aaH_hermitian,
np.linalg.matrix_rank(aaH.cpu().numpy(), hermitian=True))
self.assertEqual(matrix_rank(aaH, 0.01, True),
np.linalg.matrix_rank(aaH.cpu().numpy(), 0.01, True))
# check out= variant
out = torch.empty(a.shape[:-2], dtype=torch.int64, device=device)
ans = matrix_rank(a, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, rank_a)
shapes = (3, 13)
batches = ((), (0, ), (4, ), (3, 5, ))
for (shape0, shape1), batch in zip(itertools.product(shapes, reversed(shapes)), batches):
run_test(shape0, shape1, batch)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank_atol(self, device, dtype):
def run_test_atol(shape0, shape1, batch):
a = make_tensor((*batch, shape0, shape1), dtype=dtype, device=device)
# Check against NumPy output
# Test float tol, and specific value for each matrix
tolerances = [float(torch.rand(1)), ]
# Test different types of tol tensor
for tol_type in all_types():
tolerances.append(make_tensor(a.shape[:-2], dtype=tol_type, device=device, low=0))
# Test broadcasting of tol
if a.ndim > 2:
tolerances.append(make_tensor(a.shape[-3], dtype=torch.float32, device=device, low=0))
for tol in tolerances:
actual = torch.linalg.matrix_rank(a, atol=tol)
actual_tol = torch.linalg.matrix_rank(a, tol=tol)
self.assertEqual(actual, actual_tol)
numpy_tol = tol if isinstance(tol, float) else tol.cpu().numpy()
expected = np.linalg.matrix_rank(a.cpu().numpy(), tol=numpy_tol)
self.assertEqual(actual, expected)
shapes = (3, 13)
batches = ((), (0, ), (4, ), (3, 5, ))
for (shape0, shape1), batch in zip(itertools.product(shapes, reversed(shapes)), batches):
run_test_atol(shape0, shape1, batch)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float64)
def test_matrix_rank_atol_rtol(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
# creates a matrix with singular values rank=n and singular values in range [2/3, 3/2]
# the singular values are 1 + 1/2, 1 - 1/3, 1 + 1/4, 1 - 1/5, ...
n = 9
a = make_arg(n, n)
# test float and tensor variants
for tol_value in [0.81, torch.tensor(0.81, device=device)]:
# using rtol (relative tolerance) takes into account the largest singular value (1.5 in this case)
result = torch.linalg.matrix_rank(a, rtol=tol_value)
self.assertEqual(result, 2) # there are 2 singular values above 1.5*0.81 = 1.215
# atol is used directly to compare with singular values
result = torch.linalg.matrix_rank(a, atol=tol_value)
self.assertEqual(result, 7) # there are 7 singular values above 0.81
# when both are specified the maximum tolerance is used
result = torch.linalg.matrix_rank(a, atol=tol_value, rtol=tol_value)
self.assertEqual(result, 2) # there are 2 singular values above max(0.81, 1.5*0.81)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@skipCUDAVersionIn([(11, 6), (11, 7)]) # https://github.com/pytorch/pytorch/issues/75391
@dtypes(*floating_and_complex_types())
def test_matrix_rank_empty(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
# NumPy doesn't work for input with no elements
def run_test(shape0, shape1, batch):
a = torch.randn(*batch, shape0, shape1, dtype=dtype, device=device)
rank_a = matrix_rank(a)
expected = torch.zeros(batch, dtype=torch.int64, device=device)
self.assertEqual(rank_a, matrix_rank(a.mH))
aaH = torch.matmul(a, a.mH)
rank_aaH = matrix_rank(aaH)
rank_aaH_hermitian = matrix_rank(aaH, hermitian=True)
self.assertEqual(rank_aaH, rank_aaH_hermitian)
aHa = torch.matmul(a.mH, a)
self.assertEqual(matrix_rank(aHa), matrix_rank(aHa, hermitian=True))
self.assertEqual(rank_a, expected)
self.assertEqual(matrix_rank(a, 0.01), expected)
self.assertEqual(rank_aaH, expected)
self.assertEqual(matrix_rank(aaH, 0.01), expected)
self.assertEqual(rank_aaH_hermitian, expected)
self.assertEqual(matrix_rank(aaH, 0.01, True), expected)
batches = ((), (4, ), (3, 5, ))
for batch in batches:
run_test(0, 0, batch)
run_test(0, 3, batch)
run_test(3, 0, batch)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank_out_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.bool, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Bool"):
torch.linalg.matrix_rank(a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.matrix_rank(a, out=out)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
out = torch.empty(3, dtype=dtype, device=device)
# Trigger warning
torch.linalg.matrix_rank(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank_basic(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
a = torch.eye(10, dtype=dtype, device=device)
self.assertEqual(matrix_rank(a).item(), 10)
self.assertEqual(matrix_rank(a, hermitian=True).item(), 10)
a[5, 5] = 0
self.assertEqual(matrix_rank(a).item(), 9)
self.assertEqual(matrix_rank(a, hermitian=True).item(), 9)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_old_matrix_rank(self, device, dtype):
a = torch.eye(10, dtype=dtype, device=device)
self.assertEqual(torch.matrix_rank(a).item(), 10)
self.assertEqual(torch.matrix_rank(a, True).item(), 10)
a[5, 5] = 0
self.assertEqual(torch.matrix_rank(a).item(), 9)
self.assertEqual(torch.matrix_rank(a, True).item(), 9)
a = torch.randn(24, 42, dtype=dtype, device=device)
self.assertEqual(torch.matrix_rank(a), torch.matrix_rank(a.t()))
aaT = torch.mm(a, a.conj().t())
self.assertEqual(torch.matrix_rank(aaT), torch.matrix_rank(aaT, True))
aTa = torch.mm(a.conj().t(), a)
self.assertEqual(torch.matrix_rank(aTa), torch.matrix_rank(aTa, True))
a = torch.randn(35, 75, dtype=dtype, device=device)
self.assertEqual(torch.matrix_rank(a), np.linalg.matrix_rank(a.cpu().numpy()))
self.assertEqual(torch.matrix_rank(a, 0.01), np.linalg.matrix_rank(a.cpu().numpy(), 0.01))
aaT = torch.mm(a, a.conj().t())
self.assertEqual(torch.matrix_rank(aaT), np.linalg.matrix_rank(aaT.cpu().numpy()))
self.assertEqual(torch.matrix_rank(aaT, 0.01), np.linalg.matrix_rank(aaT.cpu().numpy(), 0.01))
if np.lib.NumpyVersion(np.__version__) >= '1.14.0':
self.assertEqual(torch.matrix_rank(aaT, True), np.linalg.matrix_rank(aaT.cpu().numpy(), True))
self.assertEqual(torch.matrix_rank(aaT, 0.01, True), np.linalg.matrix_rank(aaT.cpu().numpy(), 0.01, True))
@onlyNativeDeviceTypes
@dtypes(torch.double)
# This tests only the cases where torch.chain_matmul differs from torch.linalg.multi_dot which this is an "alias" for.
def test_chain_matmul(self, device, dtype):
# chain_matmul accepts a single input tensor while multi_dot does not
t = make_tensor((2, 2), dtype=dtype, device=device)
self.assertEqual(t, torch.chain_matmul(t))
with self.assertRaisesRegex(RuntimeError, r"chain_matmul\(\): Expected one or more matrices"):
torch.chain_matmul()
# chain_matmul expects all tensors to be 2D whereas multi_dot allows the first and last tensors to
# be either 1D or 2D
with self.assertRaisesRegex(RuntimeError, r"Tensor dimension is 1, expected 2 instead"):
torch.chain_matmul(make_tensor(1, dtype=dtype, device=device), make_tensor(1, dtype=dtype, device=device))
@onlyNativeDeviceTypes
@dtypes(torch.double, torch.cdouble)
def test_multi_dot(self, device, dtype):
def check(*shapes):
tensors = [make_tensor(shape, dtype=dtype, device=device) for shape in shapes]
np_arrays = [tensor.cpu().numpy() for tensor in tensors]
res = torch.linalg.multi_dot(tensors).cpu()
ref = torch.from_numpy(np.array(np.linalg.multi_dot(np_arrays)))
self.assertEqual(res, ref)
# test for inputs with empty dimensions
check([0], [0])
check([2], [2, 0])
check([1, 0], [0])
check([0, 2], [2, 1])
check([2, 2], [2, 0])
check([2, 0], [0, 3])
check([0, 0], [0, 1])
check([4, 2], [2, 0], [0, 3], [3, 2])
# test variable output shapes
check([2], [2])
check([1, 2], [2])
check([2], [2, 1])
check([1, 2], [2, 1])
check([3, 2], [2, 4])
# test multiple input tensors
check([3], [3, 4], [4, 2], [2, 5], [5])
check([1, 2], [2, 2], [2, 3], [3, 1])
# test large tensors
check([10, 100], [100, 5], [5, 50])
check([10, 20], [20, 30], [30, 5])
@onlyNativeDeviceTypes
@dtypes(torch.float)
def test_multi_dot_errors(self, device, dtype):
def check(tensors, out, msg):
with self.assertRaisesRegex(RuntimeError, msg):
torch.linalg.multi_dot(tensors, out=out)
a = make_tensor(2, dtype=dtype, device=device)
check([], None, "expected at least 2 tensors")
check([a], None, "expected at least 2 tensors")
check([torch.tensor(1, device=device, dtype=dtype), a], None, "the first tensor must be 1D or 2D")
check([a, torch.tensor(1, device=device, dtype=dtype)], None, "the last tensor must be 1D or 2D")
check([a, a, a], None, "tensor 1 must be 2D")
check([a, make_tensor((2, 2, 2), dtype=dtype, device=device), a], None, "tensor 1 must be 2D")
check([a, make_tensor(2, dtype=torch.double, device=device)], None, "all tensors must have be the same dtype")
check([a, a], torch.empty(0, device=device, dtype=torch.double), "expected out tensor to have dtype")
if self.device_type == 'cuda':
check([a, make_tensor(2, dtype=dtype, device="cpu")], None, "all tensors must be on the same device")
check([a, a], torch.empty(0, dtype=dtype), "expected out tensor to be on device")
check([a, make_tensor(3, dtype=dtype, device=device)], None, "cannot be multiplied")
check([a, make_tensor((3, 2), dtype=dtype, device=device), a], None, "cannot be multiplied")
@precisionOverride({torch.float32: 5e-6, torch.complex64: 5e-6})
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_qr(self, device, dtype):
def run_test(tensor_dims, some):
A = torch.randn(*tensor_dims, dtype=dtype, device=device)
Q, R = torch.qr(A, some=some)
# Check0: Q[-2:] = (m, n_columns), R[-2:] = (n_columns, n)
m, n = tensor_dims[-2:]
n_columns = m if (not some) and m > n else min(m, n)
self.assertEqual(Q.size(-2), m)
self.assertEqual(R.size(-1), n)
self.assertEqual(Q.size(-1), n_columns)
A_ = A.cpu().numpy()
Q_ = Q.cpu().numpy()
R_ = R.cpu().numpy()
# Check1: A = QR
self.assertEqual(A_, np.matmul(Q_, R_))
# Check2: A = QR (with out)
Q_out, R_out = torch.full_like(Q, math.nan), torch.full_like(R, math.nan)
torch.qr(A, some=some, out=(Q_out, R_out))
Q_out_ = Q_out.cpu().numpy()
R_out_ = R_out.cpu().numpy()
self.assertEqual(A_, np.matmul(Q_out_, R_out_))
# Check3: Q == Q_out, R == R_out
self.assertEqual(Q_, Q_out_)
self.assertEqual(R_, R_out_)
# Check4: Q^{T}Q = I, triu(R) = R
eye = torch.eye(n_columns, device=device, dtype=dtype).expand(Q.shape[:-2] + (n_columns, n_columns)).cpu().numpy()
self.assertEqual(np.matmul(Q_.swapaxes(-1, -2).conj(), Q_), eye)
self.assertEqual(R.triu(), R)
tensor_dims_list = [(0, 5), (0, 0), (5, 0), # Empty Tensors
(2, 1, 0, 5), (2, 1, 0, 0), (2, 1, 5, 0), (2, 0, 5, 5), # Batched empty Tensors
(3, 5), (5, 5), (5, 3), # Single matrix
(7, 3, 5), (7, 5, 5), (7, 5, 3), # 3-dim Tensors
(7, 5, 3, 5), (7, 5, 5, 5), (7, 5, 5, 3)] # 4-dim Tensors
for tensor_dims, some in itertools.product(tensor_dims_list, [True, False]):
run_test(tensor_dims, some)
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_qr_vs_numpy(self, device, dtype):
"""
test torch.linalg.qr vs numpy.linalg.qr
"""
sizes_to_test = [
(7, 5),
(5, 7),
(5, 0), # empty
(0, 5), # empty
]
for size in sizes_to_test:
t = torch.randn(size, device=device, dtype=dtype)
np_t = t.cpu().numpy()
for mode in ['reduced', 'complete']:
exp_q, exp_r = np.linalg.qr(np_t, mode=mode)
q, r = torch.linalg.qr(t, mode=mode)
self.assertEqual(q, exp_q)
self.assertEqual(r, exp_r)
#
# for mode='r' we need a special logic because numpy returns only r
exp_r = np.linalg.qr(np_t, mode='r')
q, r = torch.linalg.qr(t, mode='r')
# check that q is empty
self.assertEqual(q.shape, (0,))
self.assertEqual(q.dtype, t.dtype)
self.assertEqual(q.device, t.device)
# check r
self.assertEqual(r, exp_r)
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float)
def test_linalg_qr_autograd_errors(self, device, dtype):
# torch.linalg.qr(mode='r') returns only 'r' and discards 'q', but
# without 'q' you cannot compute the backward pass. Check that
# linalg_qr_backward complains cleanly in that case.
inp = torch.randn((5, 7), device=device, dtype=dtype, requires_grad=True)
q, r = torch.linalg.qr(inp, mode='r')
self.assertEqual(q.shape, (0,)) # empty tensor
b = torch.sum(r)
with self.assertRaisesRegex(RuntimeError,
"The derivative of linalg.qr depends on Q"):
b.backward()
#
inp = torch.randn((7, 5), device=device, dtype=dtype, requires_grad=True)
q, r = torch.linalg.qr(inp, mode='complete')
b = torch.sum(r)
with self.assertRaisesRegex(RuntimeError,
"The QR decomposition is not differentiable when mode='complete' and nrows > ncols"):
b.backward()
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_qr_batched(self, device, dtype):
"""
test torch.linalg.qr vs numpy.linalg.qr. We need some special logic
because numpy does not support batched qr
"""
def np_qr_batched(a, mode):
"""poor's man batched version of np.linalg.qr"""
all_q = []
all_r = []
for matrix in a:
result = np.linalg.qr(matrix, mode=mode)
if mode == 'r':
all_r.append(result)
else:
q, r = result
all_q.append(q)
all_r.append(r)
if mode == 'r':
return np.array(all_r)
else:
return np.array(all_q), np.array(all_r)
t = torch.randn((3, 7, 5), device=device, dtype=dtype)
np_t = t.cpu().numpy()
for mode in ['reduced', 'complete']:
exp_q, exp_r = np_qr_batched(np_t, mode=mode)
q, r = torch.linalg.qr(t, mode=mode)
self.assertEqual(q, exp_q)
self.assertEqual(r, exp_r)
# for mode='r' we need a special logic because numpy returns only r
exp_r = np_qr_batched(np_t, mode='r')
q, r = torch.linalg.qr(t, mode='r')
# check that q is empty
self.assertEqual(q.shape, (0,))
self.assertEqual(q.dtype, t.dtype)
self.assertEqual(q.device, t.device)
# check r
self.assertEqual(r, exp_r)
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float)
def test_qr_error_cases(self, device, dtype):
t1 = torch.randn(5, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, 'linalg.qr: The input tensor A must have at least 2 dimensions.'):
torch.linalg.qr(t1)
t2 = torch.randn((5, 7), device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "qr received unrecognized mode 'hello'"):
torch.linalg.qr(t2, mode='hello')
def _check_einsum(self, *args, np_args=None):
if np_args is None:
np_args = [arg.cpu().numpy() if isinstance(arg, torch.Tensor) else arg for arg in args]
res = torch.einsum(*args)
ref = np.einsum(*np_args)
self.assertEqual(torch.from_numpy(np.array(ref)), res)
@dtypes(torch.double, torch.cdouble)
def test_einsum(self, device, dtype):
# Test cases from https://gist.github.com/rockt/15ee013889d65342088e9260a377dc8f
x = make_tensor((5,), dtype=dtype, device=device)
y = make_tensor((7,), dtype=dtype, device=device)
A = make_tensor((3, 5), dtype=dtype, device=device)
B = make_tensor((2, 5), dtype=dtype, device=device)
C = make_tensor((2, 3, 5), dtype=dtype, device=device)
D = make_tensor((2, 5, 7), dtype=dtype, device=device)
E = make_tensor((7, 9), dtype=dtype, device=device)
F = make_tensor((2, 3, 3, 5), dtype=dtype, device=device)
G = make_tensor((5, 4, 6), dtype=dtype, device=device)
H = make_tensor((4, 4), dtype=dtype, device=device)
I = make_tensor((2, 3, 2), dtype=dtype, device=device)
# Vector operations
self._check_einsum('i->', x) # sum
self._check_einsum('i,i->', x, x) # dot
self._check_einsum('i,i->i', x, x) # vector element-wisem mul
self._check_einsum('i,j->ij', x, y) # outer
# Matrix operations
self._check_einsum("ij->ji", A) # transpose
self._check_einsum("ij->j", A) # row sum
self._check_einsum("ij->i", A) # col sum
self._check_einsum("ij,ij->ij", A, A) # matrix element-wise mul
self._check_einsum("ij,j->i", A, x) # matrix vector multiplication
self._check_einsum("ij,kj->ik", A, B) # matmul
self._check_einsum("ij,ab->ijab", A, E) # matrix outer product
# Tensor operations
self._check_einsum("Aij,Ajk->Aik", C, D) # batch matmul
self._check_einsum("ijk,jk->i", C, A) # tensor matrix contraction
self._check_einsum("aij,jk->aik", D, E) # tensor matrix contraction
self._check_einsum("abCd,dFg->abCFg", F, G) # tensor tensor contraction
self._check_einsum("ijk,jk->ik", C, A) # tensor matrix contraction with double indices
self._check_einsum("ijk,jk->ij", C, A) # tensor matrix contraction with double indices
self._check_einsum("ijk,ik->j", C, B) # non contiguous
self._check_einsum("ijk,ik->jk", C, B) # non contiguous with double indices
# Test diagonals
self._check_einsum("ii", H) # trace
self._check_einsum("ii->i", H) # diagonal
self._check_einsum('iji->j', I) # non-contiguous trace
self._check_einsum('ngrg...->nrg...', make_tensor((2, 1, 3, 1, 4), dtype=dtype, device=device))
# Test ellipsis
self._check_einsum("i...->...", H)
self._check_einsum("ki,...k->i...", A.t(), B)
self._check_einsum("k...,jk->...", A.t(), B)
self._check_einsum('...ik, ...j -> ...ij', C, x)
self._check_einsum('Bik,k...j->i...j', C, make_tensor((5, 3), dtype=dtype, device=device))
self._check_einsum('i...j, ij... -> ...ij', C, make_tensor((2, 5, 2, 3), dtype=dtype, device=device))
# torch.bilinear with noncontiguous tensors
l = make_tensor((5, 10), dtype=dtype, device=device, noncontiguous=True)
r = make_tensor((5, 20), dtype=dtype, device=device, noncontiguous=True)
w = make_tensor((15, 10, 20), dtype=dtype, device=device)
self._check_einsum("bn,anm,bm->ba", l, w, r)
# with strided tensors
self._check_einsum("bn,Anm,bm->bA", l[:, ::2], w[:, ::2, ::2], r[:, ::2])
@dtypes(torch.double, torch.cdouble)
def test_einsum_sublist_format(self, device, dtype):
x = make_tensor((5,), dtype=dtype, device=device)
y = make_tensor((7,), dtype=dtype, device=device)
A = make_tensor((3, 5), dtype=dtype, device=device)
B = make_tensor((2, 5), dtype=dtype, device=device)
C = make_tensor((2, 1, 3, 1, 4), dtype=dtype, device=device)
self._check_einsum(x, [0])
self._check_einsum(x, [0], [])
self._check_einsum(x, [0], y, [1], [0, 1])
self._check_einsum(A, [0, 1], [1, 0])
self._check_einsum(A, [0, 1], x, [1], [0])
self._check_einsum(A, [0, 1], B, [2, 1])
self._check_einsum(A, [0, 1], B, [2, 1], [0, 2])
self._check_einsum(C, [0, 1, 2, 1, Ellipsis], [0, 2, 1, Ellipsis])
self._check_einsum(A.t(), [0, 1], B, [Ellipsis, 0])
self._check_einsum(A.t(), [0, 1], B, [Ellipsis, 0], [1, Ellipsis])
self._check_einsum(A.t(), [0, Ellipsis], B, [1, 0], [Ellipsis])
# torch.bilinear with noncontiguous tensors
l = make_tensor((5, 10), dtype=dtype, device=device, noncontiguous=True)
r = make_tensor((5, 20), dtype=dtype, device=device, noncontiguous=True)
w = make_tensor((15, 10, 20), dtype=dtype, device=device)
self._check_einsum(l, [40, 41], w, [2, 41, 50], r, [40, 50], [40, 2])
@dtypes(torch.double, torch.cdouble)
def test_einsum_random(self, device, dtype):
def convert_label(label):
if label == ...:
return '...'
elif label < 26:
return chr(ord('A') + label)
else:
return chr(ord('a') + label - 26)
def convert_sublist(sublist):
return ''.join(convert_label(label) for label in sublist)
def test(n=10, # how many tests to generate
n_labels=5, # how many labels available
min_ops=1, max_ops=3, # min and max number of operands per test
min_dims=1, max_dims=3, # min and max number of dimensions per operand
min_size=1, max_size=8, # min and max size of each dimension
max_out_dim=3, # max number of dimensions for the output
enable_diagonals=True, # controls if labels can be repeated for diagonals
ellipsis_prob=0.5, # probability of including ellipsis in operand
broadcasting_prob=0.1): # probability of turning some dim sizes 1 for broadcasting
all_labels = torch.arange(52)
assert 0 <= n
assert 0 <= n_labels < len(all_labels)
assert 0 < min_ops <= max_ops
assert 0 <= min_dims <= max_dims
assert 0 <= min_size <= max_size
assert 0 <= max_out_dim
assert enable_diagonals or max_dims <= n_labels
for _ in range(n):
# Select a subset of labels for this test and give them random sizes
possible_labels = all_labels[torch.randperm(len(all_labels))[:n_labels]]
labels_size = torch.randint_like(all_labels, min_size, max_size + 1)
ellipsis_shape = torch.randint(min_size, max_size + 1, (max_dims - min_dims,))
operands = []
sublists = []
ell_size = 0
valid_labels = set()
# create random input operands
for _ in range(random.randint(min_ops, max_ops)):
n_dim = random.randint(min_dims, max_dims)
labels_idx = torch.ones(len(possible_labels)).multinomial(n_dim, enable_diagonals)
labels = possible_labels[labels_idx]
valid_labels.update(labels.tolist())
shape = labels_size[labels]
# turn some dimensions to size 1 for testing broadcasting
mask = Binomial(probs=broadcasting_prob).sample((n_dim,))
broadcast_labels = torch.unique(labels[mask == 1])
shape[(labels[..., None] == broadcast_labels).any(-1)] = 1
labels = labels.tolist()
shape = shape.tolist()
# include ellipsis if not all dimensions were assigned a label already
if n_dim < max_dims and torch.rand(1) < ellipsis_prob:
ell_num_dim = random.randint(1, max_dims - n_dim)
ell_size = max(ell_size, ell_num_dim)
ell_shape = ellipsis_shape[-ell_num_dim:]
# again, turn some dimensions to size 1 for broadcasting
mask = Binomial(probs=broadcasting_prob).sample((ell_num_dim,))
ell_shape[mask == 1] = 1
ell_index = random.randint(0, n_dim)
shape[ell_index:ell_index] = ell_shape
labels.insert(ell_index, ...)
operands.append(make_tensor(shape, dtype=dtype, device=device))
sublists.append(labels)
# NumPy has a bug with the sublist format so for now we compare PyTorch sublist
# implementation against the equation format implementation of NumPy
# see https://github.com/numpy/numpy/issues/10926
np_operands = [op.cpu().numpy() for op in operands]
# test equation format
equation = ','.join(convert_sublist(l) for l in sublists)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
# test sublist format
args = [*itertools.chain(*zip(operands, sublists))]
self._check_einsum(*args, np_args=(equation, *np_operands))
# generate an explicit output
out_sublist = []
num_out_labels = max(0, random.randint(0, min(max_out_dim, len(valid_labels))) - ell_size)
if num_out_labels > 0:
out_labels_idx = torch.ones(len(valid_labels)).multinomial(num_out_labels)
out_sublist = torch.tensor(list(valid_labels))[out_labels_idx].tolist()
out_sublist.insert(random.randint(0, num_out_labels), ...)
# test equation format with explicit output
equation += '->' + convert_sublist(out_sublist)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
# test sublist format with explicit output
args.append(out_sublist)
self._check_einsum(*args, np_args=(equation, *np_operands))
test(100)
def test_einsum_corner_cases(self, device):
def check(equation, *operands, expected_output):
tensors = [torch.tensor(operand, device=device, dtype=torch.float32) if not isinstance(operand, tuple)
else make_tensor(operand, dtype=torch.float32, device=device) for operand in operands]
output = torch.einsum(equation, tensors)
self.assertEqual(output, torch.tensor(expected_output, dtype=torch.float32, device=device))
# Test equation variantions
check(' ', 1, expected_output=1)
check(' -> ', 1, expected_output=1)
check(' , ', 2, 2, expected_output=4)
check(' , , ', 2, 2, 2, expected_output=8)
check(' , -> ', 2, 2, expected_output=4)
check(' i ', [1], expected_output=[1])
check(' i -> ', [1], expected_output=1)
check(' i -> i ', [1], expected_output=[1])
check(' i , i ', [2], [2], expected_output=4)
check(' i , i -> i ', [2], [2], expected_output=[4])
# Test tensors with 0 size dimensions
check('i', [], expected_output=[])
check(' i j -> j', [[], []], expected_output=[])
check('ij->i', [[], []], expected_output=[0., 0.])
check(' i j k , k -> i j ', (3, 0, 6), (6,), expected_output=[[], [], []])
# Test broadcasting
check('i,j', [2], [1, 2], expected_output=[[2, 4]])
check('i,ij->ij', [1, 2], [[1, 2, 3], [2, 3, 4]], expected_output=[[1, 2, 3], [4, 6, 8]])
# Test ellipsis broadcasting
check('...', 1, expected_output=1)
check('...->', 1, expected_output=1)
check('...->...', 1, expected_output=1)
check('...', [1], expected_output=[1])
check('...->', [1], expected_output=1)
check('z...->z', [1], expected_output=[1])
check('Z...->...Z', [1], expected_output=[1])
check('...a->', [[2], [4]], expected_output=6)
check('a...b->ab', [[[1], [2]], [[3], [4]]], expected_output=[[3], [7]])
def test_einsum_error_cases(self, device):
def check(*args, regex, exception=RuntimeError):
with self.assertRaisesRegex(exception, r'einsum\(\):.*' + regex):
torch.einsum(*args)
x = make_tensor((2,), dtype=torch.float32, device=device)
y = make_tensor((2, 3), dtype=torch.float32, device=device)
check('', [], regex=r'at least one operand', exception=ValueError)
check('. ..', [x], regex=r'found \'.\' for operand 0 that is not part of any ellipsis')
check('... ...', [x], regex=r'found \'.\' for operand 0 for which an ellipsis was already found')
check('1', [x], regex=r'invalid subscript given at index 0')
check(',', [x], regex=r'fewer operands were provided than specified in the equation')
check('', [x, x], regex=r'more operands were provided than specified in the equation')
check('', [x], regex=r'the number of subscripts in the equation \(0\) does not match the number '
r'of dimensions \(1\) for operand 0 and no ellipsis was given')
check('ai', [x], regex=r'the number of subscripts in the equation \(2\) does not match the number '
r'of dimensions \(1\) for operand 0 and no ellipsis was given')
check('ai...', [x], regex=r'the number of subscripts in the equation \(2\) is more than the number '
r'of dimensions \(1\) for operand 0')
check('a->... .', [x], regex=r'found \'.\' for output but an ellipsis \(...\) was already found')
check('a->..', [x], regex=r'found \'.\' for output that is not part of any ellipsis \(...\)')
check('a->1', [x], regex=r'invalid subscript given at index 3')
check('a->aa', [x], regex=r'output subscript a appears more than once in the output')
check('a->i', [x], regex=r'output subscript i does not appear in the equation for any input operand')
check('aa', [y], regex=r'subscript a is repeated for operand 0 but the sizes don\'t match, 3 != 2')
check('a, ba', [x, y], regex=r'operands do not broadcast with remapped shapes \[original->remapped\]: '
r'\[2\]->\[1, 2\] \[2, 3\]->\[2, 3\]')
check(x, [-1], regex=r'not within the valid range \[0, 52\)', exception=ValueError)
check(x, [52], regex=r'not within the valid range \[0, 52\)', exception=ValueError)
def _gen_shape_inputs_linalg_triangular_solve(self, shape, dtype, device, well_conditioned=False):
make_arg = partial(make_tensor, dtype=dtype, device=device)
make_randn = partial(torch.randn, dtype=dtype, device=device)
b, n, k = shape
for left, uni, expand_a, tr_a, conj_a, expand_b, tr_b, conj_b in product((True, False), repeat=8):
# expand means that we generate a batch of matrices with a stride of zero in the batch dimension
if (conj_a or conj_b) and not dtype.is_complex:
continue
# We just expand on the batch size
if (expand_a or expand_b) and b == 1:
continue
size_a = (b, n, n) if left else (b, k, k)
size_b = (b, n, k) if not tr_b else (b, k, n)
# If expand_a or expand_b, we'll expand them to the correct size later
if b == 1 or expand_a:
size_a = size_a[1:]
if b == 1 or expand_b:
size_b = size_b[1:]
if well_conditioned:
PLU = torch.linalg.lu(make_randn(*size_a))
if uni:
# A = L from PLU
A = PLU[1].transpose(-2, -1).contiguous()
else:
# A = U from PLU
A = PLU[2].contiguous()
else:
A = make_arg(size_a)
A.triu_()
diag = A.diagonal(0, -2, -1)
if uni:
diag.fill_(1.)
else:
diag[diag.abs() < 1e-6] = 1.
B = make_arg(size_b)
if tr_a:
A.transpose_(-2, -1)
if tr_b:
B.transpose_(-2, -1)
if conj_a:
A = A.conj()
if conj_b:
B = B.conj()
if expand_a:
A = A.expand(b, *size_a)
if expand_b:
B = B.expand(b, n, k)
yield A, B, left, not tr_a, uni
def _test_linalg_solve_triangular(self, A, B, upper, left, uni):
X = torch.linalg.solve_triangular(A, B, upper=upper, left=left, unitriangular=uni)
if left:
self.assertEqual(A @ X, B)
else:
self.assertEqual(X @ A, B)
out = B
# B may be expanded
if not B.is_contiguous() and not B.transpose(-2, -1).is_contiguous():
out = B.clone()
torch.linalg.solve_triangular(A, B, upper=upper, left=left, unitriangular=uni, out=out)
self.assertEqual(X, out)
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-1, torch.complex64: 1e-1,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_linalg_solve_triangular(self, device, dtype):
# This exercises the API + BLAS CPU + batched cuBLAS
ks = (3, 1, 0)
ns = (5, 0)
bs = (1, 2, 0)
gen_inputs = self._gen_shape_inputs_linalg_triangular_solve
for b, n, k in product(bs, ns, ks):
for A, B, left, upper, uni in gen_inputs((b, n, k), dtype, device):
self._test_linalg_solve_triangular(A, B, upper, left, uni)
@onlyCUDA
@skipCUDAIfNoMagma # Magma needed for the PLU decomposition
@skipCUDAIfRocm # There is a memory access bug in rocBLAS in the (non-batched) solve_triangular
@skipCUDAVersionIn([(11, 3), (11, 6), (11, 7)]) # Tracked in https://github.com/pytorch/pytorch/issues/70111
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-2, torch.complex64: 1e-2,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_linalg_solve_triangular_large(self, device, dtype):
# Exercises magma and cublas
magma = (9, 513, 1)
iterative_cublas = (2, 64, 1)
gen_inputs = self._gen_shape_inputs_linalg_triangular_solve
for shape in (magma, iterative_cublas):
for A, B, left, upper, uni in gen_inputs(shape, dtype, device, well_conditioned=True):
self._test_linalg_solve_triangular(A, B, upper, left, uni)
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-2, torch.complex64: 1e-2,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_linalg_solve_triangular_broadcasting(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
sizes = (((2, 1, 3, 4, 4), (2, 1, 3, 4, 6)),
((2, 1, 3, 4, 4), (4, 6)),
((4, 4), (2, 1, 3, 4, 2)),
((1, 3, 1, 4, 4), (2, 1, 3, 4, 5)))
for size_A, size_B in sizes:
for left, upper, uni in itertools.product([True, False], repeat=3):
A = make_arg(size_A)
if upper:
A.triu_()
else:
A.tril_()
diag = A.diagonal(0, -2, -1)
if uni:
diag.fill_(1.)
else:
diag[diag.abs() < 1e-6] = 1.
B = make_arg(size_B)
if not left:
B.transpose_(-2, -1)
X = torch.linalg.solve_triangular(A, B, upper=upper, left=left, unitriangular=uni)
if left:
B_other = A @ X
else:
B_other = X @ A
self.assertEqual(*torch.broadcast_tensors(B, B_other))
def triangular_solve_test_helper(self, A_dims, b_dims, upper, unitriangular,
device, dtype):
triangle_function = torch.triu if upper else torch.tril
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = torch.randn(*A_dims, dtype=dtype, device=device)
# create positive definite matrix
A = torch.matmul(A, A.mT)
A_triangular = triangle_function(A)
if unitriangular:
A_triangular.diagonal(dim1=-2, dim2=-1).fill_(1.)
return b, A_triangular
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_triangular_solve(self, device, dtype):
ks = [0, 1, 3]
ns = [0, 5]
for k, n, (upper, unitriangular, transpose) in itertools.product(ks, ns,
itertools.product([True, False], repeat=3)):
b, A = self.triangular_solve_test_helper((n, n), (n, k), upper,
unitriangular, device, dtype)
x = torch.triangular_solve(b, A, upper=upper, unitriangular=unitriangular, transpose=transpose)[0]
if transpose:
self.assertEqual(b, np.matmul(A.t().cpu(), x.cpu()))
else:
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_triangular_solve_batched(self, device, dtype):
def triangular_solve_batch_helper(A_dims, b_dims, upper, unitriangular, transpose):
b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,
unitriangular, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.triangular_solve(b[i], A[i], upper=upper,
unitriangular=unitriangular,
transpose=transpose)[0])
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.triangular_solve(b, A, upper=upper,
unitriangular=unitriangular,
transpose=transpose)[0] # Actual output
self.assertEqual(x_act, x_exp) # Equality check
if transpose:
A = A.mT
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax)
def triangular_solve_zero_batch_helper(A_dims, b_dims, upper, unitriangular, transpose):
b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,
unitriangular, device, dtype)
x = torch.triangular_solve(b, A, upper=upper,
unitriangular=unitriangular,
transpose=transpose)[0]
self.assertTrue(x.shape == b.shape)
for upper, unitriangular, transpose in itertools.product([True, False], repeat=3):
batchsize = 3
triangular_solve_batch_helper((batchsize, 5, 5), (batchsize, 5, 10),
upper, unitriangular, transpose)
# test empty input
triangular_solve_batch_helper((batchsize, 0, 0), (batchsize, 0, 10),
upper, unitriangular, transpose)
triangular_solve_batch_helper((batchsize, 0, 0), (batchsize, 0, 0),
upper, unitriangular, transpose)
# test zero batch case
batchsize = 0
triangular_solve_zero_batch_helper((batchsize, 5, 5), (batchsize, 5, 10),
upper, unitriangular, transpose)
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_triangular_solve_batched_many_batches(self, device, dtype):
for upper, transpose, unitriangular in itertools.product([True, False], repeat=3):
# test batched A case
b, A = self.triangular_solve_test_helper((256, 256, 5, 5), (5, 1),
upper, unitriangular, device, dtype)
x, _ = torch.triangular_solve(b, A,
upper=upper, transpose=transpose, unitriangular=unitriangular)
if transpose:
A = A.mT
Ax = torch.matmul(A, x)
rtol = 1e-2 if dtype in [torch.float32, torch.complex64] else self.precision
self.assertEqual(Ax, b.expand_as(Ax), atol=self.precision, rtol=rtol)
# test batched b case
b, A = self.triangular_solve_test_helper((3, 3), (512, 512, 3, 1),
upper, unitriangular, device, dtype)
x, _ = torch.triangular_solve(b, A, upper=upper, transpose=transpose,
unitriangular=unitriangular)
if transpose:
A = A.mT
self.assertEqual(torch.matmul(A, x), b)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@dtypes(*floating_and_complex_types())
def test_triangular_solve_batched_broadcasting(self, device, dtype):
from scipy.linalg import solve_triangular as tri_solve
def scipy_tri_solve_batched(A, B, upper, trans, diag):
batch_dims_A, batch_dims_B = A.shape[:-2], B.shape[:-2]
single_dim_A, single_dim_B = A.shape[-2:], B.shape[-2:]
expand_dims = tuple(torch._C._infer_size(torch.Size(batch_dims_A),
torch.Size(batch_dims_B)))
expand_A = np.broadcast_to(A, expand_dims + single_dim_A)
expand_B = np.broadcast_to(B, expand_dims + single_dim_B)
flat_A = expand_A.reshape((-1,) + single_dim_A)
flat_B = expand_B.reshape((-1,) + single_dim_B)
flat_X = np.vstack([tri_solve(a, b, lower=(not upper), trans=int(trans), unit_diagonal=diag)
for a, b in zip(flat_A, flat_B)])
return flat_X.reshape(expand_B.shape)
def run_test(A_dims, b_dims, device, upper, transpose, unitriangular):
b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,
unitriangular, device, dtype)
x_exp = torch.as_tensor(scipy_tri_solve_batched(A.cpu().numpy(), b.cpu().numpy(),
upper, transpose, unitriangular))
x = torch.triangular_solve(b, A, upper=upper, transpose=transpose, unitriangular=unitriangular)[0]
self.assertEqual(x, x_exp.to(device))
for upper, transpose, unitriangular in itertools.product([True, False], repeat=3):
# test against scipy.linalg.solve_triangular
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6), device, upper, transpose, unitriangular) # no broadcasting
run_test((2, 1, 3, 4, 4), (4, 6), device, upper, transpose, unitriangular) # broadcasting b
run_test((4, 4), (2, 1, 3, 4, 2), device, upper, transpose, unitriangular) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5), device, upper, transpose, unitriangular) # broadcasting A & b
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_triangular_solve_out_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty_like(b).to(torch.int)
clone_a = torch.empty_like(a)
with self.assertRaisesRegex(RuntimeError, "Expected out tensor to have dtype"):
torch.triangular_solve(b, a, out=(out, clone_a))
out = torch.empty_like(b)
clone_a = clone_a.to(torch.int)
with self.assertRaisesRegex(RuntimeError, "Expected out tensor to have dtype"):
torch.triangular_solve(b, a, out=(out, clone_a))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
clone_a = torch.empty_like(a)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.triangular_solve(b, a, out=(out, clone_a))
out = torch.empty(0, dtype=dtype, device=device)
clone_a = torch.empty_like(a).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.triangular_solve(b, a, out=(out, clone_a))
# Trigger the WARN_ONCE deprecation error
torch.triangular_solve(b, a)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
out = torch.empty(1, dtype=dtype, device=device)
clone_a = torch.empty(1, dtype=dtype, device=device)
# Trigger warning
torch.triangular_solve(b, a, out=(out, clone_a))
# Check warning occurs
self.assertEqual(len(w), 2)
self.assertTrue("An output with one or more elements was resized" in str(w[0].message))
self.assertTrue("An output with one or more elements was resized" in str(w[1].message))
def check_single_matmul(self, x, y):
def assertEqual(answer, expected):
if x.dtype.is_floating_point or x.dtype.is_complex:
k = max(x.shape[-1], 1) # Scale the atol with the size of the matrix
self.assertEqual(answer, expected,
msg=f"{x.shape} x {y.shape} = {answer.shape}",
atol=k * 5e-5,
rtol=1e-4)
else:
self.assertEqual(answer, expected, msg=f"{x.shape} x {y.shape} = {answer.shape}")
# test x @ y
expected = np.matmul(x.cpu(), y.cpu())
ans = torch.matmul(x, y)
self.assertTrue(ans.is_contiguous())
assertEqual(ans, expected)
# test out
out = torch.empty_like(ans)
ans = torch.matmul(x, y, out=out)
self.assertIs(ans, out)
self.assertTrue(ans.is_contiguous())
assertEqual(ans, expected)
def gen_sizes_matmul(self, x_dim, y_dim=4, matrix_size=4, batch_size=3):
"""
Generates sequences of tuples (x, y) of with size(x) = x_dim and
size(y) <= y_dim that are compatible wrt. matmul
"""
assert x_dim >= 1
assert y_dim >= 2
x = x_dim
for y in range(1, y_dim + 1):
for batch, mn in product(product(range(batch_size), repeat=max(x - 2, y - 2, 0)),
product(range(matrix_size), repeat=min(y, 2))):
if x == 1:
size_x = mn[:1]
size_y = batch + mn
yield size_x, size_y
else:
for k in range(matrix_size):
size_x = (k,) + mn[:1]
if x > 2:
size_x = batch[-(x - 2):] + size_x
size_y = mn
if y > 2:
size_y = batch[-(y - 2):] + size_y
yield size_x, size_y
@dtypesIfCUDA(torch.float, torch.complex64) # Integer matmul just supported on CPU
@dtypes(torch.int64, torch.float, torch.complex64)
def test_matmul_small_brute_force_1d_Nd(self, device, dtype):
make_arg = partial(make_tensor, device=device, dtype=dtype)
for (size_x, size_y), nctg_x, nctg_y in product(self.gen_sizes_matmul(1), (True, False), (True, False)):
x = make_arg(size_x, noncontiguous=nctg_x)
y = make_arg(size_y, noncontiguous=nctg_y)
self.check_single_matmul(x, y)
@dtypesIfCUDA(torch.float, torch.complex64) # Integer matmul just supported on CPU
@dtypes(torch.int64, torch.float, torch.complex64)
def test_matmul_small_brute_force_2d_Nd(self, device, dtype):
make_arg = partial(make_tensor, device=device, dtype=dtype)
for (size_x, size_y), nctg_x, nctg_y in product(self.gen_sizes_matmul(2), (True, False), (True, False)):
x = make_arg(size_x, noncontiguous=nctg_x)
y = make_arg(size_y, noncontiguous=nctg_y)
self.check_single_matmul(x, y)
@dtypesIfCUDA(torch.float, torch.complex64) # Integer matmul just supported on CPU
@dtypes(torch.int64, torch.float, torch.complex64)
def test_matmul_small_brute_force_3d_Nd(self, device, dtype):
make_arg = partial(make_tensor, device=device, dtype=dtype)
for (size_x, size_y), nctg_x, nctg_y in product(self.gen_sizes_matmul(3), (True, False), (True, False)):
x = make_arg(size_x, noncontiguous=nctg_x)
y = make_arg(size_y, noncontiguous=nctg_y)
self.check_single_matmul(x, y)
def test_linear_algebra_scalar_raises(self, device) -> None:
m = torch.randn(5, 5, device=device)
v = torch.randn(5, device=device)
s = torch.tensor(7, device=device)
self.assertRaises(RuntimeError, lambda: torch.mv(m, s))
self.assertRaises(RuntimeError, lambda: torch.addmv(v, m, s))
@dtypes(torch.float32, torch.complex64)
def test_cross(self, device, dtype):
x = torch.rand(100, 3, 100, dtype=dtype, device=device)
y = torch.rand(100, 3, 100, dtype=dtype, device=device)
res1 = torch.cross(x, y)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.cross(x, y, out=res2)
self.assertEqual(res1, res2)
@dtypes(torch.float32, torch.complex64)
def test_linalg_cross(self, device, dtype):
x = torch.rand(100, 3, 100, dtype=dtype, device=device)
y = torch.rand(100, 3, 100, dtype=dtype, device=device)
res1 = torch.linalg.cross(x, y, dim=1)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.linalg.cross(x, y, dim=1, out=res2)
self.assertEqual(res1, res2)
# test for broadcastable inputs
x = torch.rand(1, 3, 2, dtype=dtype, device=device)
y = torch.rand(4, 3, 1, dtype=dtype, device=device)
res1 = torch.linalg.cross(x, y, dim=1)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.linalg.cross(x, y, dim=1, out=res2)
self.assertEqual(res1, res2)
# non contiguous case 1
x = torch.rand((4, 4, 4, 3), dtype=dtype,
device=device).contiguous(memory_format=torch.channels_last) # non-contiguous
y = torch.rand((4, 4, 4, 3), dtype=dtype,
device=device).contiguous(memory_format=torch.channels_last) # non-contiguous
np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=-1)
res = torch.linalg.cross(x, y, dim=-1)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
# non contiguous case 2
x = torch.rand(1, 3, 2, dtype=dtype, device=device) # contiguous
y = torch.rand(1, 3, 4, dtype=dtype, device=device).permute(2, 1, 0) # non-contiguous
np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=1)
res = torch.linalg.cross(x, y, dim=1)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
# non contiguous case 3
x = torch.rand(2, 3, 1, dtype=dtype, device=device).permute(2, 1, 0) # non-contiguous
y = torch.rand(1, 3, 4, dtype=dtype, device=device).permute(2, 1, 0) # non-contiguous
np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=1)
res = torch.linalg.cross(x, y, dim=1)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
# non contiguous case 4
x = torch.randn(12, 3, device=device, dtype=dtype)[::2, :] # non-contiguous
y = torch.randn(18, 3, device=device, dtype=dtype)[::3, :] # non-contiguous
np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=1)
res = torch.linalg.cross(x, y, dim=1)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
# non contiguous case 5
x = torch.randn(1, device=device, dtype=dtype) # contiguous
y = torch.randn(6, device=device, dtype=dtype)[::2] # non-contiguous
np_expected_ref = np.cross(x.expand(3).cpu().numpy(), y.cpu().numpy())
res = torch.linalg.cross(x, y)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
@dtypes(torch.float32, torch.complex64)
def test_cross_with_and_without_dim(self, device, dtype):
x = torch.rand(100, 3, dtype=dtype, device=device)
y = torch.rand(100, 3, dtype=dtype, device=device)
res1 = torch.cross(x, y, dim=1)
res2 = torch.cross(x, y, dim=-1)
res3 = torch.cross(x, y)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
@dtypes(torch.float32, torch.complex64)
def test_linalg_cross_with_and_without_dim(self, device, dtype):
x = torch.rand(100, 3, dtype=dtype, device=device)
y = torch.rand(100, 3, dtype=dtype, device=device)
res1 = torch.linalg.cross(x, y, dim=1)
res2 = torch.linalg.cross(x, y, dim=-1)
res3 = torch.linalg.cross(x, y)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
def test_cross_errors(self, device):
self.assertRaisesRegex(
RuntimeError, "must match the size of tensor",
lambda: torch.cross(torch.rand(100, 3, device=device), torch.rand(100, 3, 10, device=device)))
self.assertRaisesRegex(
RuntimeError, "must match the size of tensor",
lambda: torch.cross(torch.rand(5, 3, device=device), torch.rand(3, 5, device=device)))
self.assertRaisesRegex(
RuntimeError, "no dimension of size 3 in input",
lambda: torch.cross(torch.rand(5, 4, device=device), torch.rand(5, 4, device=device)))
self.assertRaisesRegex(
RuntimeError, "dimension 0 does not have size 3",
lambda: torch.cross(torch.rand(5, 4, 3, device=device), torch.rand(5, 4, 3, device=device), dim=0))
self.assertRaisesRegex(
RuntimeError, "dimension -1 does not have size 3",
lambda: torch.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-1))
self.assertRaisesRegex(
IndexError, "Dimension out of range",
lambda: torch.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-5))
def test_linalg_cross_errors(self, device):
self.assertRaisesRegex(
RuntimeError, "dimension -1 does not have size 3",
lambda: torch.linalg.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device)))
self.assertRaisesRegex(
RuntimeError, "must match the size of tensor",
lambda: torch.linalg.cross(torch.rand(100, 3, device=device), torch.rand(100, 3, 10, device=device)))
self.assertRaisesRegex(
RuntimeError, "must match the size of tensor",
lambda: torch.linalg.cross(torch.rand(5, 3, device=device), torch.rand(3, 5, device=device)))
self.assertRaisesRegex(
RuntimeError, "dimension 0 does not have size 3",
lambda: torch.linalg.cross(torch.rand(5, 4, 3, device=device), torch.rand(5, 4, 3, device=device), dim=0))
self.assertRaisesRegex(
RuntimeError, "dimension -1 does not have size 3",
lambda: torch.linalg.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-1))
self.assertRaisesRegex(
IndexError, "Dimension out of range",
lambda: torch.linalg.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-5))
def test_renorm(self, device):
m1 = torch.randn(20, 20, device=device) # big enough to exercise vectorized path
res1 = torch.tensor((), device=device)
def renorm(matrix, value, dim, max_norm):
m1 = matrix.transpose(dim, 0).contiguous()
# collapse non-dim dimensions.
m2 = m1.clone().resize_(m1.size(0), int(math.floor(m1.nelement() / m1.size(0))))
norms = m2.norm(value, 1, True)
# clip
new_norms = norms.clone()
new_norms[torch.gt(norms, max_norm)] = max_norm
new_norms.div_(norms.add_(1e-7))
# renormalize
m1.mul_(new_norms.expand_as(m1))
return m1.transpose(dim, 0)
# note that the axis fed to torch.renorm is different (2~=1)
maxnorm = m1.norm(2, 1).mean()
m2 = renorm(m1, 2, 1, maxnorm)
m1.renorm_(2, 1, maxnorm)
self.assertEqual(m1, m2, atol=1e-5, rtol=0)
self.assertEqual(m1.norm(2, 0), m2.norm(2, 0), atol=1e-5, rtol=0)
m1 = torch.randn(3, 4, 5, device=device)
m2 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4)
maxnorm = m2.norm(2, 0).mean()
m2 = renorm(m2, 2, 1, maxnorm)
m1.renorm_(2, 1, maxnorm)
m3 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4)
self.assertEqual(m3, m2)
self.assertEqual(m3.norm(2, 0), m2.norm(2, 0))
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
@dtypes(*floating_and_complex_types())
def test_ormqr(self, device, dtype):
def run_test(batch, m, n, fortran_contiguous):
A = make_tensor((*batch, m, n), dtype=dtype, device=device)
reflectors, tau = torch.geqrf(A)
if not fortran_contiguous:
self.assertTrue(reflectors.mT.is_contiguous())
reflectors = reflectors.contiguous()
# Q is of size m x m
Q, _ = torch.linalg.qr(A, mode='complete')
C_right = make_tensor((*batch, m, n), dtype=dtype, device=device)
C_left = make_tensor((*batch, n, m), dtype=dtype, device=device)
expected = Q @ C_right
actual = torch.ormqr(reflectors, tau, C_right, left=True, transpose=False)
self.assertEqual(expected, actual)
expected = C_left @ Q
actual = torch.ormqr(reflectors, tau, C_left, left=False, transpose=False)
self.assertEqual(expected, actual)
expected = Q.mH @ C_right
actual = torch.ormqr(reflectors, tau, C_right, left=True, transpose=True)
self.assertEqual(expected, actual)
expected = C_left @ Q.mH
actual = torch.ormqr(reflectors, tau, C_left, left=False, transpose=True)
self.assertEqual(expected, actual)
# if tau is all zeros then the implicit matrix Q is the identity matrix
# so the actual result should be C_right in this case
zero_tau = torch.zeros_like(tau)
actual = torch.ormqr(reflectors, zero_tau, C_right, left=True, transpose=False)
self.assertEqual(C_right, actual)
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
for batch, (m, n), fortran_contiguous in product(batches, product(ns, ns), [True, False]):
run_test(batch, m, n, fortran_contiguous)
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
@dtypes(*floating_and_complex_types())
def test_ormqr_errors_and_warnings(self, device, dtype):
test_cases = [
# input1 size, input2 size, input3 size, error regex
((10,), (2,), (2,), r"input must have at least 2 dimensions"),
((2, 2), (2,), (2,), r"other must have at least 2 dimensions"),
((10, 6), (20,), (10, 6), r"other.shape\[-2\] must be greater than or equal to tau.shape\[-1\]"),
((6, 6), (5,), (5, 5), r"other.shape\[-2\] must be equal to input.shape\[-2\]"),
((1, 2, 2), (2, 2), (1, 2, 2), r"batch dimensions of tau to be equal to input.shape\[:-2\]"),
((1, 2, 2), (1, 2), (2, 2, 2), r"batch dimensions of other to be equal to input.shape\[:-2\]"),
]
for a_size, tau_size, c_size, error_regex in test_cases:
a = make_tensor(a_size, dtype=dtype, device=device)
tau = make_tensor(tau_size, dtype=dtype, device=device)
c = make_tensor(c_size, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, error_regex):
torch.ormqr(a, tau, c)
def test_blas_empty(self, device):
def fn(torchfn, *args, test_out=False, **kwargs):
def call_torch_fn(*args, **kwargs):
return torchfn(*tuple(torch.randn(shape, device=device) if isinstance(shape, tuple) else shape
for shape in args), **kwargs)
result = call_torch_fn(*args, **kwargs)
if not test_out:
return result
else:
out = torch.full_like(result, math.nan)
out1 = call_torch_fn(*args, **kwargs, out=out)
return out
# mm, addmm
self.assertEqual((0, 0), fn(torch.mm, (0, 0), (0, 0)).shape)
self.assertEqual((0, 5), fn(torch.mm, (0, 0), (0, 5)).shape)
self.assertEqual((5, 0), fn(torch.mm, (5, 0), (0, 0)).shape)
self.assertEqual((3, 0), fn(torch.mm, (3, 2), (2, 0)).shape)
self.assertEqual(torch.zeros((5, 6), device=device), fn(torch.mm, (5, 0), (0, 6)))
self.assertEqual(torch.zeros((5, 6), device=device), fn(torch.mm, (5, 0), (0, 6), test_out=True))
self.assertEqual((0, 0), fn(torch.addmm, (0, 0), (0, 0), (0, 0)).shape)
self.assertEqual((0, 1), fn(torch.addmm, (1, ), (0, 17), (17, 1)).shape)
t = torch.randn((5, 6), device=device)
self.assertEqual(t, fn(torch.addmm, t, (5, 0), (0, 6)))
self.assertEqual(t, fn(torch.addmm, t, (5, 0), (0, 6), test_out=True))
# mv, addmv
self.assertEqual((0,), fn(torch.mv, (0, 0), (0,)).shape)
self.assertEqual((0,), fn(torch.mv, (0, 2), (2,)).shape)
self.assertEqual(torch.zeros((3,), device=device), fn(torch.mv, (3, 0), (0,)))
self.assertEqual(torch.zeros((3,), device=device), fn(torch.mv, (3, 0), (0,), test_out=True))
self.assertEqual((0,), fn(torch.addmv, (0,), (0, 0), (0,)).shape)
t = torch.randn((3,), device=device)
self.assertEqual(t, fn(torch.addmv, t, (3, 0), (0,)))
self.assertEqual(t, fn(torch.addmv, t, (3, 0), (0,), test_out=True))
# bmm, baddbmm
self.assertEqual((0, 0, 0), fn(torch.bmm, (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((3, 0, 5), fn(torch.bmm, (3, 0, 0), (3, 0, 5)).shape)
self.assertEqual((0, 5, 6), fn(torch.bmm, (0, 5, 0), (0, 0, 6)).shape)
self.assertEqual(torch.zeros((3, 5, 6), device=device), fn(torch.bmm, (3, 5, 0), (3, 0, 6)))
self.assertEqual(torch.zeros((3, 5, 6), device=device), fn(torch.bmm, (3, 5, 0), (3, 0, 6), test_out=True))
self.assertEqual((0, 0, 0), fn(torch.baddbmm, (0, 0, 0), (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((3, 0, 5), fn(torch.baddbmm, (3, 0, 5), (3, 0, 0), (3, 0, 5)).shape)
self.assertEqual((0, 5, 6), fn(torch.baddbmm, (0, 5, 6), (0, 5, 0), (0, 0, 6)).shape)
self.assertEqual((3, 5, 6), fn(torch.baddbmm, (3, 5, 6), (3, 5, 0), (3, 0, 6)).shape)
c = torch.arange(30, dtype=torch.float32, device=device).reshape(3, 2, 5)
self.assertEqual(-2 * c, fn(torch.baddbmm, c, (3, 2, 0), (3, 0, 5), beta=-2)) # Issue #33467
self.assertEqual(-2 * c, fn(torch.baddbmm, c, (3, 2, 0), (3, 0, 5), beta=-2, test_out=True)) # Issue #33467
# addbmm
self.assertEqual((0, 0), fn(torch.addbmm, (0, 0), (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((0, 5), fn(torch.addbmm, (0, 5), (3, 0, 0), (3, 0, 5)).shape)
t = torch.randn((5, 6), device=device)
self.assertEqual(t, fn(torch.addbmm, t, (0, 5, 0), (0, 0, 6)))
self.assertEqual(t, fn(torch.addbmm, t, (0, 5, 0), (0, 0, 6), test_out=True))
# matmul
self.assertEqual(torch.tensor(0., device=device), fn(torch.matmul, (0,), (0,)))
self.assertEqual(torch.tensor(0., device=device), fn(torch.matmul, (0,), (0,), test_out=True))
self.assertEqual((0, 0), fn(torch.matmul, (0, 0), (0, 0)).shape)
self.assertEqual((0, 0, 0), fn(torch.matmul, (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((5, 0, 0), fn(torch.matmul, (5, 0, 0), (5, 0, 0)).shape)
self.assertEqual(torch.zeros((5, 3, 4), device=device), fn(torch.matmul, (5, 3, 0), (5, 0, 4)))
self.assertEqual(torch.zeros((5, 3, 4), device=device), fn(torch.matmul, (5, 3, 0), (5, 0, 4), test_out=True))
# dot
self.assertEqual(torch.tensor(0., device=device), fn(torch.dot, (0,), (0,)))
self.assertEqual(torch.tensor(0., device=device), fn(torch.dot, (0,), (0,), test_out=True))
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,
torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.half] if not CUDA9 else [],
*[torch.bfloat16] if CUDA11OrLater and SM53OrLater else []
))
@dtypes(*all_types_and_complex_and(torch.bfloat16))
def test_corner_cases_of_cublasltmatmul(self, device, dtype):
# common case
M = torch.randn(128, device=device).to(dtype)
m1 = torch.randn(2048, 2400, device=device).to(dtype)
m2 = torch.randn(128, 2400, device=device).to(dtype)
torch.nn.functional.linear(m1, m2, M)
# Ntrans_B has ld >> rows
m1 = torch.rand([128, 2400]).to(dtype).to(device).t()
m2 = torch.rand([2048, 25272]).to(dtype).to(device).t()[21940:24340]
M = torch.rand([128]).to(dtype).to(device)
torch.addmm(M, m2.t(), m1)
# trans_A has ld >> rows
m1 = torch.rand([128, 25272]).to(dtype).to(device)[:, 21940:24340].t()
m2 = torch.randn(2048, 2400, device=device).to(dtype)
M = torch.rand([128]).to(dtype).to(device)
torch.addmm(M, m2, m1)
# large tensor dim > 65535
M = torch.randn(16, device=device).to(dtype)
m1 = torch.randn(32, 131071 , device=device).to(dtype)
m2 = torch.randn(16, 131071, device=device).to(dtype)
torch.nn.functional.linear(m1, m2, M)
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.half] if not CUDA9 else [],
*[torch.bfloat16] if CUDA11OrLater and SM53OrLater else []
))
@dtypes(*all_types_and_complex_and(torch.bfloat16))
def test_blas_alpha_beta_empty(self, device, dtype):
# This test is disabled on CUDA 9 due to:
# See: https://github.com/pytorch/pytorch/issues/31006
if dtype is torch.bfloat16 and self.device_type == 'xla':
# TODO (@zasdfgbnm): this causes the following error on test
# TestTorchDeviceTypeXLA.test_blas_alpha_beta_empty_xla_bfloat16:
#
# RuntimeError: _th_equal not supported on CPUType for BFloat16
return
# ensure beta is respected
value = 11
input = torch.full((2,), value, dtype=dtype, device=device)
mat = torch.ones((2, 0), dtype=dtype, device=device)
vec = torch.ones((0,), dtype=dtype, device=device)
out = torch.empty((2,), dtype=dtype, device=device)
if dtype.is_complex:
alpha = 6 + 7j
beta = 3 + 4j
else:
alpha = 6
beta = 3
self.assertEqual(torch.full((2,), beta * value, dtype=dtype, device=device),
torch.addmv(input=input, mat=mat, vec=vec, alpha=alpha, beta=beta))
self.assertEqual(torch.full((2,), beta * value, dtype=dtype, device=device),
torch.addmv(input=input, mat=mat, vec=vec, alpha=alpha, beta=beta, out=out))
# torch.addmm
input = torch.full((2, 3), value, dtype=dtype, device=device)
mat2 = torch.ones((0, 3), dtype=dtype, device=device)
out = torch.empty((2, 3), dtype=dtype, device=device)
self.assertEqual(torch.full((2, 3), beta * value, dtype=dtype, device=device),
torch.addmm(input=input, mat1=mat, mat2=mat2, alpha=alpha, beta=beta))
self.assertEqual(torch.full((2, 3), beta * value, dtype=dtype, device=device),
torch.addmm(input=input, mat1=mat, mat2=mat2, alpha=alpha, beta=beta, out=out))
@dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16))
def test_blas_nan_out(self, device, dtype):
# These functions should work correctly with NaN filled outputs,
# but need special handling, see [NOTE: cpu_zero]
b = 3
n = 5
m = 7
p = 11
# torch.mv
nm = torch.randn((m, n), device=device).t()
_m = torch.randn((), device=device).expand(m)
_m_out = torch.full((m,), float('nan'), device=device)
self.assertEqual(torch.mv(nm, _m), torch.mv(nm, _m, out=_m_out))
self.assertEqual(0, torch.isnan(torch.mv(nm, _m)).sum())
# torch.mm
mp = torch.randn((p, m), device=device).t()
np_out = torch.full((n, p), float('nan'), device=device)
self.assertEqual(torch.mm(nm, mp), torch.mm(nm, mp, out=np_out))
# torch.bmm
bnm = torch.randn((b, m, n), device=device).transpose(1, 2)
bmp = torch.randn((b, p, m), device=device).transpose(1, 2)
bnp_out = torch.full((b, n, p), float('nan'), device=device)
self.assertEqual(torch.bmm(bnm, bmp), torch.bmm(bnm, bmp, out=bnp_out))
@onlyCPU # not supported by CUBLAS
def test_blas_mv_large_input(self, device):
# This would previously fail if the allocated output had NaNs, see:
# https://github.com/pytorch/pytorch/issues/31663 and [NOTE: cpu_zero]
n = 3000
m = 200
nm = torch.randn((m, n), device=device).t()
_m = torch.randn((), device=device).expand(m)
_m_out = torch.full((m,), 0., device=device)
self.assertEqual(torch.mv(nm, _m), torch.mv(nm, _m, out=_m_out))
@onlyCPU
def test_renorm_ps(self, device):
# full reduction
x = torch.randn(5, 5)
xn = x.numpy()
for p in [1, 2, 3, 4, inf]:
res = x.renorm(p, 1, 1)
expected = x / x.norm(p, 0, keepdim=True).clamp(min=1)
self.assertEqual(res, expected, msg="renorm failed for {}-norm".format(p))
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
@dtypes(*floating_and_complex_types())
def test_householder_product(self, device, dtype):
def generate_reflectors_and_tau(A):
"""
This function uses numpy.linalg.qr with mode "raw" to extract output of LAPACK's geqrf.
There is torch.geqrf function but it doesn't work with complex-valued input.
"""
if A.numel() > 0:
A_cpu = A.cpu()
flattened_batch_shape = [-1, *A_cpu.shape[-2:]]
reflectors = torch.empty_like(A_cpu).view(*flattened_batch_shape)
tau_shape = [*A_cpu.shape[:-2], A_cpu.shape[-1]]
tau = torch.empty(tau_shape, dtype=dtype).view(-1, A_cpu.shape[-1])
for A_i, reflectors_i, tau_i in zip(A_cpu.contiguous().view(*flattened_batch_shape), reflectors, tau):
reflectors_tmp, tau_i[:] = map(torch.from_numpy, np.linalg.qr(A_i, mode='raw'))
reflectors_i[:] = reflectors_tmp.T
reflectors = reflectors.view(*A_cpu.shape)
tau = tau.view(tau_shape)
return reflectors.to(A.device), tau.to(A.device)
reflectors = torch.empty_like(A)
tau = torch.empty(*A.shape[:-2], A.shape[-1], dtype=dtype, device=device)
return reflectors, tau
def run_test(shape):
A = torch.randn(*shape, dtype=dtype, device=device)
reflectors, tau = generate_reflectors_and_tau(A)
expected, _ = torch.linalg.qr(A)
actual = torch.linalg.householder_product(reflectors, tau)
# torch.linalg.qr does not work correctly for zero batch dimension tensors
# see https://github.com/pytorch/pytorch/issues/50576
if (A.numel() > 0):
self.assertEqual(expected, actual)
else:
self.assertTrue(actual.shape == shape)
# if tau is empty and A is not the result should be a matrix with ones on the diagonal
if (A.numel() > 0):
tau_empty = torch.empty(*shape[:-2], 0, dtype=dtype, device=device)
identity_mat = torch.zeros_like(reflectors)
identity_mat.diagonal(dim1=-1, dim2=-2)[:] = 1
actual = torch.linalg.householder_product(reflectors, tau_empty)
self.assertEqual(actual, identity_mat)
out = torch.empty_like(A)
ans = torch.linalg.householder_product(reflectors, tau, out=out)
self.assertEqual(ans, out)
if (A.numel() > 0):
self.assertEqual(expected, out)
shapes = [(0, 0), (5, 0), # Empty matrix
(5, 5), (5, 3), # Single matrix
(0, 0, 0), (0, 5, 5), (0, 5, 3), # Zero batch dimension tensors
(2, 5, 5), (2, 5, 3), # 3-dim tensors
(2, 1, 5, 5), (2, 1, 5, 3)] # 4-dim tensors
for shape in shapes:
run_test(shape)
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
def test_householder_product_errors_and_warnings(self, device):
test_cases = [
# input1 size, input2 size, error regex
((10,), (2,), r"input must have at least 2 dimensions"),
((10, 6), (20,), r"input.shape\[-1\] must be greater than or equal to tau.shape\[-1\]"),
((6, 10), (5,), r"input.shape\[-2\] must be greater than or equal to input.shape\[-1\]"),
]
for a_size, tau_size, error_regex in test_cases:
a = torch.rand(*a_size, device=device)
tau = torch.rand(*tau_size, device=device)
with self.assertRaisesRegex(RuntimeError, error_regex):
torch.linalg.householder_product(a, tau)
# if out tensor with wrong shape is passed a warning is given
reflectors = torch.randn(3, 3, device=device)
tau = torch.randn(3, device=device)
out = torch.empty(2, 3, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.householder_product(reflectors, tau, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty_like(reflectors).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.householder_product(reflectors, tau, out=out)
with self.assertRaisesRegex(RuntimeError, "tau dtype Int does not match input dtype"):
torch.linalg.householder_product(reflectors, tau.to(torch.int))
if torch.cuda.is_available():
# device of out and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty_like(reflectors).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.householder_product(reflectors, tau, out=out)
# device of tau and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
tau = tau.to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.householder_product(reflectors, tau)
@precisionOverride({torch.float32: 1e-2, torch.complex64: 1e-2})
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_linalg_lu_family(self, device, dtype):
# Tests torch.lu
# torch.linalg.lu_factor
# torch.linalg.lu_factor_ex
# torch.lu_unpack
# torch.linalg.lu_solve
# torch.linalg.solve
make_arg_full = partial(make_fullrank_matrices_with_distinct_singular_values, device=device, dtype=dtype)
make_arg = partial(make_tensor, device=device, dtype=dtype)
def run_test(A, pivot, singular, fn):
k = min(A.shape[-2:])
batch = A.shape[:-2]
check_errors = (fn == torch.linalg.lu_factor)
if singular and check_errors:
# It may or may not throw as the LU decomposition without pivoting
# may still succeed for singular matrices
try:
LU, pivots = fn(A, pivot=pivot)
except RuntimeError:
return
else:
LU, pivots = fn(A, pivot=pivot)[:2]
self.assertEqual(LU.size(), A.shape)
self.assertEqual(pivots.size(), batch + (k,))
if not pivot:
self.assertEqual(pivots, torch.arange(1, 1 + k, device=device, dtype=torch.int32).expand(batch + (k, )))
P, L, U = torch.lu_unpack(LU, pivots, unpack_pivots=pivot)
self.assertEqual(P @ L @ U if pivot else L @ U, A)
PLU = torch.linalg.lu(A, pivot=pivot)
self.assertEqual(P, PLU.P)
self.assertEqual(L, PLU.L)
self.assertEqual(U, PLU.U)
if not singular and A.size(-2) == A.size(-1):
nrhs = ((), (1,), (3,))
for left, rhs in product((True, False), nrhs):
# Vector case when left = False is not allowed
if not left and rhs == ():
continue
if left:
shape_B = A.shape[:-1] + rhs
else:
shape_B = A.shape[:-2] + rhs + A.shape[-1:]
B = make_arg(shape_B)
# Test linalg.lu_solve. It does not support vectors as rhs
# See https://github.com/pytorch/pytorch/pull/74045#issuecomment-1112304913
if rhs != ():
for adjoint in (True, False):
X = torch.linalg.lu_solve(LU, pivots, B, left=left, adjoint=adjoint)
A_adj = A.mH if adjoint else A
if left:
self.assertEqual(B, A_adj @ X)
else:
self.assertEqual(B, X @ A_adj)
# Test linalg.solve
X = torch.linalg.solve(A, B, left=left)
X_ = X.unsqueeze(-1) if rhs == () else X
B_ = B.unsqueeze(-1) if rhs == () else B
if left:
self.assertEqual(B_, A @ X_)
else:
self.assertEqual(B_, X_ @ A)
sizes = ((3, 3), (5, 5), (4, 2), (3, 4), (0, 0), (0, 1), (1, 0))
batches = ((0,), (), (1,), (2,), (3,), (1, 0), (3, 5))
# Non pivoting just implemented for CUDA
pivots = (True, False) if self.device_type == "cuda" else (True,)
fns = (partial(torch.lu, get_infos=True), torch.linalg.lu_factor, torch.linalg.lu_factor_ex)
for ms, batch, pivot, singular, fn in itertools.product(sizes, batches, pivots, (True, False), fns):
shape = batch + ms
A = make_arg(shape) if singular else make_arg_full(*shape)
# Just do one of them on singular matrices
if A.numel() == 0 and not singular:
continue
run_test(A, pivot, singular, fn)
# Reproducer of a magma bug,
# see https://bitbucket.org/icl/magma/issues/13/getrf_batched-kernel-produces-nans-on
# This is also a bug in cuSOLVER < 11.3
if (dtype == torch.double
and singular
and (torch.version.cuda is None or
torch.version.cuda.split('.') >= ["11", "3"])):
A = torch.ones(batch + ms, dtype=dtype, device=device)
run_test(A, pivot, singular, fn)
# Info should be positive for rank deficient matrices
A = torch.ones(5, 3, 3, device=device)
self.assertTrue((torch.linalg.lu_factor_ex(A, pivot=True).info >= 0).all())
if self.device_type == 'cpu':
# Error checking, no pivoting variant on CPU
fns = [torch.lu, torch.linalg.lu_factor, torch.linalg.lu_factor_ex, torch.linalg.lu]
for f in fns:
with self.assertRaisesRegex(RuntimeError, 'LU without pivoting is not implemented on the CPU'):
f(torch.empty(1, 2, 2), pivot=False)
@precisionOverride({torch.float32: 1e-2, torch.complex64: 1e-2})
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@setLinalgBackendsToDefaultFinally
@dtypes(*floating_and_complex_types())
def test_linalg_lu_solve(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
backends = ["default"]
if torch.device(device).type == 'cuda':
if torch.cuda.has_magma:
backends.append("magma")
if has_cusolver():
backends.append("cusolver")
def gen_matrices():
rhs = 3
ns = (5, 2, 0)
batches = ((), (0,), (1,), (2,), (2, 1), (0, 2))
for batch, n in product(batches, ns):
yield make_arg(batch + (n, n)), make_arg(batch + (n, rhs))
# Shapes to exercise all the paths
shapes = ((1, 64), (2, 128), (1025, 2))
for b, n in shapes:
yield make_arg((b, n, n)), make_arg((b, n, rhs))
for A, B in gen_matrices():
LU, pivots = torch.linalg.lu_factor(A)
for backend in backends:
torch.backends.cuda.preferred_linalg_library(backend)
for left, adjoint in product((True, False), repeat=2):
B_left = B if left else B.mT
X = torch.linalg.lu_solve(LU, pivots, B_left, left=left, adjoint=adjoint)
A_adj = A.mH if adjoint else A
if left:
self.assertEqual(B_left, A_adj @ X)
else:
self.assertEqual(B_left, X @ A_adj)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.double)
def test_lu_unpack_check_input(self, device, dtype):
x = torch.rand(5, 5, 5, device=device, dtype=dtype)
lu_data, lu_pivots = torch.linalg.lu_factor(x)
with self.assertRaisesRegex(RuntimeError, "torch.int32 dtype"):
torch.lu_unpack(lu_data, lu_pivots.long())
# check that onces flags are unset, Nones are returned
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_data=False)
self.assertTrue(l.numel() == 0 and u.numel() == 0)
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_pivots=False)
self.assertTrue(p.numel() == 0)
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_data=False, unpack_pivots=False)
self.assertTrue(p.numel() == 0 and l.numel() == 0 and u.numel() == 0)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_lobpcg_basic(self, device, dtype):
self._test_lobpcg_method(device, dtype, 'basic')
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_lobpcg_ortho(self, device, dtype):
self._test_lobpcg_method(device, dtype, 'ortho')
def _test_lobpcg_method(self, device, dtype, method):
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
def test_tracker(worker):
k = worker.iparams['k']
nc = worker.ivars['converged_count']
if k <= nc:
tol = worker.fparams['tol']
rerr = worker.tvars['rerr']
X = worker.X
E = worker.E
B = worker.B
A = worker.A
dtype = X.dtype
device = X.device
# Check convergence
self.assertLessEqual(rerr[:k].max(), tol)
# Check B-orthogonality
I = torch.eye(k, k, dtype=dtype, device=device)
self.assertEqual(qform(B, X[:, :k]), I)
# Check block equation
self.assertEqual(qform(A, X[:, :k]) / E[:k], I, atol=0.2, rtol=0)
orig_lobpcg = lobpcg
def lobpcg(*args, **kwargs):
kwargs['tracker'] = test_tracker
kwargs['niter'] = 1000
kwargs['method'] = method
kwargs['tol'] = 1e-8
return orig_lobpcg(*args, **kwargs)
prec = 5e-4
# check dense input
mm = torch.matmul
for batches in [(), (2,), (2, 3)]:
for m, n, k in [
(9, 3, 1),
(9, 3, 2),
(9, 2, 2),
(100, 15, 5),
]:
# skip tests that are known to fail with the basic
# LOBPCG method due to calling cholesky on singular
# input
if method == 'basic' and (m, n, k) in [(9, 2, 2), (100, 15, 5)]:
continue
A = random_symmetric_pd_matrix(m, *batches, device=device, dtype=dtype)
B = random_symmetric_pd_matrix(m, *batches, device=device, dtype=dtype)
# classical eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=False)
self.assertEqual(E.shape, batches + (k,))
self.assertEqual(V.shape, batches + (m, k))
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
e = torch.symeig(A)[0]
e_smallest = e[..., :k]
self.assertEqual(E, e_smallest)
# classical eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=True)
e_largest, _ = torch.sort(e[..., -k:], descending=True)
self.assertEqual(E, e_largest, atol=prec, rtol=0)
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
# generalized eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=False)
self.assertEqual(matmul(A, V), mm(matmul(B, V), E.diag_embed()), atol=prec, rtol=0)
# generalized eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=True)
self.assertEqual(matmul(A, V) / E.max(), mm(matmul(B, V), (E / E.max()).diag_embed()),
atol=prec, rtol=0)
# check sparse input
for m, n, k, density in [
(5, 1, 1, 0.8),
(9, 3, 2, 0.5),
(100, 1, 1, 0.1),
(1000, 7, 3, 0.01),
]:
# skip tests that are known to fail with the basic LOBCG
# method due to insufficient accuracy
if method == 'basic' and (m, n, k, density) in [(1000, 7, 3, 0.01)]:
continue
A = random_sparse_pd_matrix(m, density=density, device=device, dtype=dtype)
B = random_sparse_pd_matrix(m, density=density, device=device, dtype=dtype)
A_eigenvalues = torch.arange(1, m + 1, dtype=dtype) / m
e_smallest = A_eigenvalues[..., :k]
e_largest, _ = torch.sort(A_eigenvalues[..., -k:], descending=True)
# classical eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=False)
self.assertEqual(E, e_smallest)
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
# classical eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=True)
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
self.assertEqual(E, e_largest)
# generalized eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=False)
self.assertEqual(matmul(A, V), matmul(B, mm(V, E.diag_embed())), atol=prec, rtol=0)
# generalized eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=True)
self.assertEqual(matmul(A, V) / E.max(), mm(matmul(B, V), (E / E.max()).diag_embed()),
atol=prec, rtol=0)
@skipCPUIfNoLapack
@onlyCPU
@dtypes(torch.double)
def test_lobpcg_torchscript(self, device, dtype):
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
lobpcg = torch.jit.script(torch.lobpcg)
m = 500
k = 5
A1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)
X1 = torch.randn((m, k), dtype=dtype, device=device)
E1, V1 = lobpcg(A1, X=X1)
eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()
self.assertLess(eq_err, 1e-6)
@unittest.skipIf(not TEST_SCIPY or (TEST_SCIPY and scipy.__version__ < '1.4.1'), "Scipy not found or older than 1.4.1")
@skipCPUIfNoLapack
@onlyCPU
@dtypes(torch.double)
def test_lobpcg_scipy(self, device, dtype):
"""Compare torch and scipy.sparse.linalg implementations of lobpcg
"""
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
def toscipy(A):
if A.layout == torch.sparse_coo:
values = A.coalesce().values().cpu().numpy().copy()
indices = A.coalesce().indices().cpu().numpy().copy()
return scipy.sparse.coo_matrix((values, (indices[0], indices[1])), A.shape)
return A.cpu().numpy().copy()
niter = 1000
repeat = 10
m = 500 # size of the square matrix
k = 7 # the number of requested eigenpairs
A1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)
B1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)
X1 = torch.randn((m, k), dtype=dtype, device=device)
A2 = toscipy(A1)
B2 = toscipy(B1)
X2 = toscipy(X1)
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
tol = 1e-8
# tol for scipy lobpcg will be choosed so that the number of
# iterations will be equal or very close to pytorch lobpcg
# (that is around 170-180)
# Standard eigenvalue problem
E1, V1 = torch.lobpcg(A1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, maxiter=niter, largest=True, retLambdaHistory=True, tol=1.1 * tol)
iters1 = len(lambdas1)
iters2 = len(lambdas2)
self.assertLess(abs(iters1 - iters2), 0.05 * max(iters1, iters2))
E2a, V2a = scipy_lobpcg(A2, X2, maxiter=niter, largest=False)
eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()
eq_err_scipy = (abs(A2.dot(V2) - V2 * E2)**2).sum() ** 0.5 / E2.max()
self.assertLess(eq_err, 1e-6) # std
self.assertLess(eq_err_scipy, 1e-6) # std
self.assertEqual(E1, torch.from_numpy(E2.copy()))
# Generalized eigenvalue problem
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
E1, V1 = torch.lobpcg(A1, B=B1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=True, retLambdaHistory=True, tol=39 * tol)
E2a, V2a = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=False)
iters1 = len(lambdas1)
iters2 = len(lambdas2)
self.assertLess(abs(iters1 - iters2), 0.05 * max(iters1, iters2))
eq_err = torch.norm((mm(A1, V1) - mm(B1, V1) * E1), 2) / E1.max()
eq_err_scipy = (abs(A2.dot(V2) - B2.dot(V2) * E2)**2).sum() ** 0.5 / E2.max()
self.assertLess(eq_err, 1e-6) # general
self.assertLess(eq_err_scipy, 1e-6) # general
self.assertEqual(E1, torch.from_numpy(E2.copy()))
# Timings
elapsed_ortho = 0
elapsed_ortho_general = 0
elapsed_scipy = 0
elapsed_general_scipy = 0
for i in range(repeat):
start = time.time()
torch.lobpcg(A1, X=X1, niter=niter, method='ortho', tol=tol)
end = time.time()
elapsed_ortho += end - start
start = time.time()
torch.lobpcg(A1, X=X1, B=B1, niter=niter, method='ortho', tol=tol)
end = time.time()
elapsed_ortho_general += end - start
start = time.time()
scipy_lobpcg(A2, X2, maxiter=niter, tol=1.1 * tol)
end = time.time()
elapsed_scipy += end - start
start = time.time()
scipy_lobpcg(A2, X2, B=B2, maxiter=niter, tol=39 * tol)
end = time.time()
elapsed_general_scipy += end - start
elapsed_ortho_ms = 1000.0 * elapsed_ortho / repeat
elapsed_ortho_general_ms = 1000.0 * elapsed_ortho_general / repeat
elapsed_scipy_ms = 1000.0 * elapsed_scipy / repeat
elapsed_general_scipy_ms = 1000.0 * elapsed_general_scipy / repeat
print('''
CPU timings: torch.lobpcg vs scipy.sparse.linalg.lobpcg
-------------------------------------------------------
| standard | generalized | method
torch.lobpcg | {:10.2f} | {:10.2f} | ortho
scipy_lobpcg | {:10.2f} | {:10.2f} | N/A
-(input size: {:4}, eigenpairs:{:2}, units: ms per call)-
'''.format(elapsed_ortho_ms, elapsed_ortho_general_ms,
elapsed_scipy_ms, elapsed_general_scipy_ms,
m, k))
# Handling of very small tolerence
tol = 1e-100
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
E1, V1 = torch.lobpcg(A1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)
iters1 = len(lambdas1)
eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()
try:
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, maxiter=niter, largest=True, retLambdaHistory=True, tol=tol)
iters2 = len(lambdas2)
eq_err_scipy = (abs(A2.dot(V2) - V2 * E2)**2).sum() ** 0.5 / E2.max()
except Exception as msg:
print('Calling scipy_lobpcg failed [standard]:', msg)
iters2 = -1
eq_err_scipy = -1
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
E1, V1 = torch.lobpcg(A1, X=X1, B=B1, niter=niter, largest=True, tracker=tracker, tol=tol)
iters1_general = len(lambdas1)
eq_err_general = torch.norm((mm(A1, V1) - mm(B1, V1) * E1), 2) / E1.max()
try:
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=True, retLambdaHistory=True, tol=tol)
iters2_general = len(lambdas2)
eq_err_general_scipy = (abs(A2.dot(V2) - B2.dot(V2) * E2)**2).sum() ** 0.5 / E2.max()
except Exception as msg:
print('Calling scipy_lobpcg failed [generalized]:', msg)
iters2_general = -1
eq_err_general_scipy = -1
print('''\
Handling of small tol={:6.0e}: torch.lobpcg vs scipy.sparse.linalg.lobpcg
----------------------------------------------------------------------------
| standard | generalized | niter | method
torch.lobpcg | {:10.2e} | {:10.2e} | {:6} | ortho
scipy_lobpcg | {:10.2e} | {:10.2e} | {:6} | N/A
---(input size: {:4}, eigenpairs:{:2}, units: relative error, maxiter={:4})---
'''.format(tol, eq_err, eq_err_general, iters1, eq_err_scipy, eq_err_general_scipy, iters2, m, k, niter))
def _test_addmm_addmv(self, f, t, m, v, *, alpha=None, beta=None, transpose_out=False, activation=None):
dtype = t.dtype
numpy_dtype = dtype
if dtype in {torch.bfloat16}:
numpy_dtype = torch.float
if dtype.is_complex:
alpha = 0.9 + 0.3j if alpha is None else alpha
beta = 0.5 + 0.6j if beta is None else beta
else:
alpha = 1.2 if alpha is None else alpha
beta = 0.8 if beta is None else beta
res1 = f(t, m, v, alpha=alpha, beta=beta)
res2 = torch.full_like(res1, math.nan)
if transpose_out:
res2 = res2.t().clone(memory_format=torch.contiguous_format).t()
f(t, m, v, alpha=alpha, beta=beta, out=res2)
res3 = alpha * (m.to(numpy_dtype).cpu().numpy() @ v.to(numpy_dtype).cpu().numpy())
if beta != 0:
res3 += (beta * t).to(numpy_dtype).cpu().numpy()
if activation == "relu":
res3 = res3 * (res3 > 0)
else:
assert activation is None, f"unsupported activation {activation}"
res3 = torch.from_numpy(res3).to(dtype)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
@precisionOverride({torch.bfloat16: 1e-0, torch.half: 5e-4, torch.float: 1e-4, torch.double: 1e-8,
torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.bfloat16] if TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater) else [],
*[torch.half]))
@dtypes(torch.bfloat16, torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_addmv(self, device, dtype):
# have to use torch.randn(...).to(bfloat16) instead of
# torch.randn(..., dtype=bfloat16). randn does not support
# bfloat16 yet.
# "*0.2" to reduce errors for low precision
ts = [
0.2 * torch.randn(50, device=device).to(dtype),
0.2 * torch.randn(1, device=device).to(dtype).expand(50),
]
vs = [
0.2 * torch.randn(100, device=device).to(dtype),
0.2 * torch.ones(1, device=device).to(dtype).expand(100), # to reduce errors for low precision
]
ms = [
# 0d
0.2 * torch.ones((), device=device).to(dtype).expand(50, 100), # to reduce errors for low precision
# 1d
0.2 * torch.randn((1, 100), device=device).to(dtype).expand(50, 100),
# this initialization reduces errors for low precision for broadcasted matrices
# by making sure that intermediate and result values are exactly representable
# in low precision type
0.2 * torch.randint(3, (50, 1), dtype=torch.float, device=device).to(dtype).expand(50, 100),
# 2d
0.2 * torch.randn((50, 100), device=device).to(dtype),
0.2 * torch.randn((100, 50), device=device).to(dtype).t(),
]
for m, v, t in itertools.product(ms, vs, ts):
self._test_addmm_addmv(torch.addmv, t, m, v)
# Test beta=0, t=nan
t = torch.full((50,), math.nan, device=device).to(dtype)
for m, v in itertools.product(ms, vs):
self._test_addmm_addmv(torch.addmv, t, m, v, beta=0)
@dtypesIfCUDA(*floating_types_and(*[torch.bfloat16] if TEST_WITH_ROCM or (CUDA11OrLater and
SM53OrLater) else []))
@dtypes(torch.float, torch.double)
def test_addmv_rowmajor_colmajor_incx_incy_lda(self, device, dtype):
# tests (o, s)*(s). o is output size, s is summed size.
o = 5
s = 3
a_data = torch.arange(1, o * s + 1, device=device, dtype=dtype).view(o, s)
x_data = torch.arange(1, s + 1, 1, device=device, dtype=dtype)
y_data = torch.ones(o, device=device, dtype=dtype)
control = torch.tensor([15., 33., 51., 69., 87.], device=device, dtype=dtype)
def _test(row_major, incx, incy, lda_tail):
if row_major:
a_storage = torch.full((o, s + lda_tail), float('nan'), device=device, dtype=dtype)
else:
a_storage = torch.full((s, o + lda_tail), float('nan'), device=device, dtype=dtype).permute(1, 0)
a = a_storage[:o, :s].copy_(a_data)
x_storage = torch.full((s, incx), float('nan'), device=device, dtype=dtype)
x = x_storage[:, 0].copy_(x_data)
y_storage = torch.full((o, incy), float('nan'), device=device, dtype=dtype)
y = y_storage[:, 0].copy_(y_data)
self._test_addmm_addmv(torch.addmv, y, a, x)
for row_major, incx, incy, lda_tail in itertools.product((False, True), (1, 2), (1, 2), (0, 1)):
_test(row_major, incx, incy, lda_tail)
def _test_addmm_impl(self, func, activation, device, dtype):
M = torch.randn(10, 25, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(func, M, m1, m2, activation=activation)
# Test 0-strided
M = torch.randn(10, 1, device=device).to(dtype).expand(10, 25)
m1 = torch.randn(10, 1, device=device).to(dtype).expand(10, 50)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(func, M, m1, m2, activation=activation)
# Test beta=0, M=nan
M = torch.full((10, 25), math.nan, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(func, M, m1, m2, beta=0, activation=activation)
# Test transpose
for t1, t2, t3, t4 in itertools.product([True, False], repeat=4):
def maybe_transpose(cond, m):
if not cond:
return m
return m.t().clone(memory_format=torch.contiguous_format).t()
M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype))
m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype))
m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))
self._test_addmm_addmv(func, M, m1, m2, transpose_out=t4, activation=activation)
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,
torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfMPS(torch.float32)
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.bfloat16] if TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater) else []))
@dtypes(*floating_and_complex_types_and(torch.bfloat16))
@tf32_on_and_off(0.05)
def test_addmm(self, device, dtype):
self._test_addmm_impl(torch.addmm, None, device, dtype)
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,
torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*floating_types_and(
*[torch.bfloat16] if TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater) else []))
@dtypes(*floating_types_and(torch.bfloat16))
@tf32_on_and_off(0.05)
def test_addmm_activation(self, device, dtype):
self._test_addmm_impl(torch._addmm_activation, "relu", device, dtype)
@dtypes(torch.float, torch.double)
@dtypesIfCUDA(*floating_and_complex_types())
@tf32_on_and_off(0.005)
def test_addmm_sizes(self, device, dtype):
for m in [0, 1, 25]:
for n in [0, 1, 10]:
for k in [0, 1, 8]:
M = torch.randn(n, m, device=device).to(dtype)
m1 = torch.randn(n, k, device=device).to(dtype)
m2 = torch.randn(k, m, device=device).to(dtype)
self._test_addmm_addmv(torch.addmm, M, m1, m2)
m1 = torch.randn(n, k + 1, device=device).to(dtype)
m2 = torch.randn(k, m, device=device).to(dtype)
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.addmm(M, m1, m2))
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.mm(m1, m2))
@dtypes(torch.half)
@onlyCUDA
def test_addmm_baddbmm_overflow(self, device, dtype):
orig = torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
inp = torch.zeros(128, 128, dtype=torch.half, device=device)
mat1 = torch.ones(128, 1000, dtype=torch.half, device=device) * 100
mat2 = torch.ones(1000, 128, dtype=torch.half, device=device) * 100
out = torch.addmm(inp, mat1, mat2, alpha=0.001, beta=0.)
# just check for no overflow on ROCM
if TEST_WITH_ROCM:
self.assertFalse(out.isinf().any())
else:
self.assertTrue((out == 10000.).all())
inp = torch.zeros(3, 128, 128, dtype=torch.half, device=device)
mat1 = torch.ones(3, 128, 1000, dtype=torch.half, device=device) * 100
mat2 = torch.ones(3, 1000, 128, dtype=torch.half, device=device) * 100
out = torch.baddbmm(inp, mat1, mat2, alpha=0.001, beta=0.)
if TEST_WITH_ROCM:
self.assertFalse(out.isinf().any())
else:
self.assertTrue((out == 10000.).all())
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
@onlyCUDA
def test_matmul_45724(self, device):
# https://github.com/pytorch/pytorch/issues/45724
a = torch.rand(65537, 22, 64, device=device, dtype=torch.half)
b = torch.rand(65537, 64, 22, device=device, dtype=torch.half)
c = torch.full((65537, 22, 22), math.nan, dtype=torch.half, device=device)
cpu_result = torch.matmul(a.cpu().float(), b.cpu().float()).cuda().half()
torch.matmul(a, b, out=c)
self.assertEqual(c, cpu_result)
@slowTest
@onlyNativeDeviceTypes
# bfloat16 doesn't have sufficient precision to pass this test
@dtypes(torch.float32, torch.float64, torch.int32, torch.int64, torch.cfloat, torch.cdouble)
@dtypesIfCUDA(torch.float32, torch.float64, torch.cfloat, torch.cdouble)
@tf32_on_and_off(0.01)
def test_mm(self, device, dtype):
def _test_mm(n, m, p, dtype, genf):
# helper function
def matrixmultiply(mat1, mat2):
n = mat1.size(0)
m = mat1.size(1)
p = mat2.size(1)
res = torch.zeros(n, p, dtype=dtype, device=device)
for i, j in iter_indices(res):
res[i, j] = sum(mat1[i, k] * mat2[k, j] for k in range(m))
return res
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 1
mat1 = genf(n, m)
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 2
mat1 = genf(m, n).t()
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# test with zero stride
mat1 = genf(n, m)
mat2 = genf(m, 1).expand(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
def genf_int(x, y):
return torch.randint(0, 100, (x, y), dtype=dtype, device=device)
def genf_bfloat(x, y):
return torch.randn(x, y, dtype=torch.float32, device=device).to(dtype) * 0.1
def genf_float(x, y):
return torch.randn(x, y, dtype=dtype, device=device)
for (n, m, p) in [(20, 10, 15), (15, 20, 10), (25, 18, 10)]:
if (dtype == torch.int32) or (dtype == torch.int64):
genf = genf_int
elif (dtype == torch.bfloat16):
genf = genf_bfloat
else:
genf = genf_float
_test_mm(n, m, p, dtype, genf)
@onlyNativeDeviceTypes
def test_mm_bmm_non_memory_dense(self, device):
def _slice(tensor, fn):
return fn(tensor)[..., ::2]
A = torch.randn(3, 6, dtype=torch.cfloat, device=device)
B = torch.randn(3, 3, dtype=torch.cfloat, device=device)
out = torch.empty(3, 3, device=device, dtype=torch.complex64).t()
out1 = torch.empty(3, 3, device=device, dtype=torch.complex64).t()
A_conj = _slice(A, torch.conj)
A_conj_physical = _slice(A, torch.conj_physical)
self.assertEqual(torch.mm(A_conj, B, out=out), torch.mm(A_conj_physical, B, out=out))
self.assertEqual(torch.mm(A_conj.t(), B, out=out), torch.mm(A_conj_physical.t(), B, out=out))
Ab = torch.randn(2, 3, 6, dtype=torch.cfloat, device=device)
Bb = torch.randn(2, 3, 3, dtype=torch.cfloat, device=device)
Bb_ = torch.randn(1, 3, 3, dtype=torch.cfloat, device=device).expand(2, 3, 3)
out_b = torch.empty(2, 3, 3, device=device, dtype=torch.complex64).mT
Ab_conj = _slice(Ab, torch.conj)
Ab_conj_physical = _slice(Ab, torch.conj_physical)
def t_b(tensor):
return tensor.mT
self.assertEqual(torch.bmm(Ab_conj, Bb, out=out_b), torch.bmm(Ab_conj_physical, Bb, out=out_b))
self.assertEqual(torch.bmm(t_b(Ab_conj), Bb, out=out_b), torch.bmm(t_b(Ab_conj_physical), Bb, out=out_b))
# test broadcasting
self.assertEqual(torch.bmm(Ab_conj, Bb_, out=out_b), torch.bmm(Ab_conj_physical, Bb_, out=out_b))
self.assertEqual(torch.bmm(t_b(Ab_conj), Bb_, out=out_b), torch.bmm(t_b(Ab_conj_physical), Bb_, out=out_b))
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
def test_strided_mm_bmm(self, device, dtype):
# Tests strided view case with stride smaller than corresponding dimension size
x = torch.tensor([[1., 2., 3.], [4., 5., 6.]], dtype=dtype, device=device)
new_shape = [2, 2, 2]
new_stride = [3, 1, 1]
sx = torch.as_strided(x, size=new_shape, stride=new_stride)
torch_fn = lambda x: torch.bmm(x, x) # noqa: E731
np_fn = lambda x: np.matmul(x, x) # noqa: E731
self.compare_with_numpy(torch_fn, np_fn, sx)
torch_fn = lambda x: torch.mm(x, x) # noqa: E731
self.compare_with_numpy(torch_fn, np_fn, sx[0])
@precisionOverride({torch.half: 0.05, torch.bfloat16: 0.05})
@onlyNativeDeviceTypes
@dtypes(*floating_and_complex_types_and(torch.bfloat16))
@tf32_on_and_off(0.05)
def test_bmm(self, device, dtype):
if self.device_type == 'cuda' and dtype is torch.bfloat16 and CUDA11OrLater and not SM53OrLater:
# cuBLAS does not guarantee BFloat16 support on SM < 53.
# So on PyTorch, we consider BFloat16 support on SM < 53 as
# undefined bahavior
return
batch_sizes = [1, 10]
M, N, O = 23, 15, 12
numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32
is_supported = True
if dtype == torch.bfloat16 and self.device_type == 'cuda':
is_supported = TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)
if not is_supported:
for num_batches in batch_sizes:
b1 = torch.randn(num_batches, M, N, device=device).to(dtype)
b2 = torch.randn(num_batches, N, O, device=device).to(dtype)
self.assertRaisesRegex(RuntimeError, "type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED",
lambda: torch.bmm(b1, b2))
return
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_inputs(num_batches):
# transposed tensors
for perm1, perm2 in itertools.product(itertools.permutations((0, 1, 2)), repeat=2):
b1 = make_tensor((num_batches, M, N), dtype=dtype, device=device, low=-0.1, high=0.1)
b2 = make_tensor((num_batches, N, O), dtype=dtype, device=device, low=-0.1, high=0.1)
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
yield b1, b2
# broadcasting tensors
for b1, b2, b3, b4, b5, b6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if b1 else 1, M if b2 else 1, N if b3 else 1)
shape2 = (num_batches if b4 else 1, N if b5 else 1, O if b6 else 1)
b1 = make_tensor(shape1, dtype=dtype, device=device, low=-0.1, high=0.1).expand(num_batches, M, N)
b2 = make_tensor(shape2, dtype=dtype, device=device, low=-0.1, high=0.1).expand(num_batches, N, O)
yield b1, b2
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = torch.randn(shape1, dtype=dtype, device=device)
b2 = torch.randn(shape2, dtype=dtype, device=device)
yield b1, b2
for num_batches in batch_sizes:
for (b1, b2), perm3 in itertools.product(generate_inputs(num_batches), itertools.permutations((0, 1, 2))):
res1 = torch.bmm(b1, b2)
res2 = torch.full((num_batches, M, O), math.nan, dtype=dtype, device=device) \
.permute(perm3).contiguous().permute(invert_perm(perm3))
torch.bmm(b1, b2, out=res2)
expect = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
self.assertEqual(expect, res1)
self.assertEqual(expect, res2)
if self.device_type == 'cuda':
# check that mixed arguments are rejected
self.assertRaises(RuntimeError, lambda: torch.bmm(b1, b2.cpu()))
self.assertRaises(RuntimeError, lambda: torch.bmm(b1.cpu(), b2))
self.assertRaises(RuntimeError, lambda: torch.bmm(b1, b2, out=res2.cpu()))
def _test_addbmm_baddbmm(self, func, b1, b2, ref, out_tensor):
getattr(out_tensor, func + "_")(b1, b2)
self.assertEqual(out_tensor, ref)
res3 = out_tensor.clone()
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func}_ is deprecated"):
getattr(out_tensor, func + "_")(1, b1, b2)
self.assertEqual(out_tensor, ref * 2),
getattr(res3, func + "_")(b1, b2, beta=1)
self.assertEqual(out_tensor, res3)
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func}_ is deprecated"):
getattr(out_tensor, func + "_")(1., .5, b1, b2)
self.assertEqual(out_tensor, ref * 2.5)
getattr(res3, func + "_")(b1, b2, beta=1., alpha=.5)
self.assertEqual(out_tensor, res3)
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func} is deprecated"):
self.assertEqual(out_tensor, getattr(torch, func)(1, out_tensor, 0, b1, b2))
res4 = getattr(torch, func)(out_tensor, b1, b2, beta=1, alpha=.5)
self.assertEqual(res4, ref * 3),
nan = torch.full_like(out_tensor, math.nan)
res5 = getattr(torch, func)(nan, b1, b2, beta=0, alpha=1)
self.assertEqual(res5, ref)
if b1.is_complex():
res6 = getattr(torch, func)(out_tensor, b1, b2, beta=.1j, alpha=.5j)
self.assertEqual(res6, out_tensor * .1j + .5j * ref)
else:
res6 = getattr(torch, func)(out_tensor, b1, b2, beta=.1, alpha=.5)
self.assertEqual(res6, out_tensor * .1 + .5 * ref)
res7 = torch.full_like(out_tensor, math.nan)
getattr(torch, func)(nan, b1, b2, beta=0, out=res7)
self.assertEqual(res7, ref)
@precisionOverride({torch.half: 0.05, torch.bfloat16: 0.05})
@onlyNativeDeviceTypes
@dtypes(*floating_and_complex_types_and(torch.bfloat16))
@tf32_on_and_off(0.05)
def test_addbmm(self, device, dtype):
if self.device_type == 'cuda' and dtype is torch.bfloat16 and CUDA11OrLater and not SM53OrLater:
# cuBLAS does not guarantee BFloat16 support on SM < 53.
# So on PyTorch, we consider BFloat16 support on SM < 53 as
# undefined bahavior
return
num_batches = 2
M, N, O = 16, 17, 18
is_supported = True
if dtype == torch.bfloat16:
if self.device_type == 'cpu':
self.precision = 1 # 43 vs 43.75
else:
is_supported = TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)
if not is_supported:
b1 = make_tensor((num_batches, M, N), dtype=dtype, device=device, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), dtype=dtype, device=device, low=-1, high=1)
t = make_tensor((M, O), dtype=dtype, device=device, low=-1, high=1)
self.assertRaisesRegex(RuntimeError, "type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED",
lambda: torch.addbmm(t, b1, b2))
return
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_tensor():
numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32
# transposed tensors
for perm1, perm2 in itertools.product(itertools.permutations((0, 1, 2)), repeat=2):
for perm3 in itertools.permutations((0, 1)):
b1 = make_tensor((num_batches, M, N), dtype=dtype, device=device, low=-1, high=1) * 0.1
b2 = make_tensor((num_batches, N, O), dtype=dtype, device=device, low=-1, high=1) * 0.1
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype).sum(0)
out_tensor = torch.zeros_like(ref).permute(perm3).contiguous().permute(perm3)
yield b1, b2, ref, out_tensor
# broadcasting tensors
for s1, s2, s3, s4, s5, s6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if s1 else 1, M if s2 else 1, N if s3 else 1)
shape2 = (num_batches if s4 else 1, N if s5 else 1, O if s6 else 1)
b1 = make_tensor(shape1, dtype=dtype, device=device, low=-1, high=1).expand(num_batches, M, N) * 0.1
b2 = make_tensor(shape2, dtype=dtype, device=device, low=-1, high=1).expand(num_batches, N, O) * 0.1
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype).sum(0)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = make_tensor(shape1, dtype=dtype, device=device, low=-1, high=1) * 0.1
b2 = make_tensor(shape2, dtype=dtype, device=device, low=-1, high=1) * 0.1
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype).sum(0)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
for b1, b2, ref, out_tensor in generate_tensor():
self._test_addbmm_baddbmm("addbmm", b1, b2, ref, out_tensor)
@precisionOverride({torch.half: 0.1, torch.bfloat16: 0.5})
@onlyNativeDeviceTypes
@dtypes(*floating_and_complex_types_and(torch.bfloat16))
@tf32_on_and_off(0.05)
def test_baddbmm(self, device, dtype):
if self.device_type == 'cuda' and dtype is torch.bfloat16 and CUDA11OrLater and not SM53OrLater:
# cuBLAS does not guarantee BFloat16 support on SM < 53.
# So on PyTorch, we consider BFloat16 support on SM < 53 as
# undefined bahavior
return
num_batches = 10
M, N, O = 12, 8, 50
is_supported = True
if dtype == torch.bfloat16 and self.device_type == 'cuda':
is_supported = TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)
if not is_supported:
b1 = make_tensor((num_batches, M, N), dtype=dtype, device=device, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), dtype=dtype, device=device, low=-1, high=1)
t = make_tensor((num_batches, M, O), dtype=dtype, device=device, low=-1, high=1)
self.assertRaisesRegex(RuntimeError, "type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED",
lambda: torch.baddbmm(t, b1, b2))
return
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_tensor():
numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32
# transposed tensors
for perm1, perm2, perm3 in itertools.product(itertools.permutations((0, 1, 2)), repeat=3):
b1 = make_tensor((num_batches, M, N), dtype=dtype, device=device, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), dtype=dtype, device=device, low=-1, high=1)
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
out_tensor = out_tensor.permute(perm3).contiguous().permute(invert_perm(perm3))
yield b1, b2, ref, out_tensor
# broadcasting tensors
for s1, s2, s3, s4, s5, s6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if s1 else 1, M if s2 else 1, N if s3 else 1)
shape2 = (num_batches if s4 else 1, N if s5 else 1, O if s6 else 1)
b1 = make_tensor(shape1, dtype=dtype, device=device, low=-1, high=1).expand(num_batches, M, N)
b2 = make_tensor(shape2, dtype=dtype, device=device, low=-1, high=1).expand(num_batches, N, O)
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = make_tensor(shape1, dtype=dtype, device=device, low=-2, high=2)
b2 = make_tensor(shape2, dtype=dtype, device=device, low=-2, high=2)
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
for b1, b2, ref, out_tensor in generate_tensor():
self._test_addbmm_baddbmm("baddbmm", b1, b2, ref, out_tensor)
@precisionOverride({torch.float32: 5e-3, torch.complex64: 1e-3})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_pinverse(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
def run_test(M):
# Testing against definition for pseudo-inverses
MPI = torch.pinverse(M)
MPI_ = MPI.cpu().numpy()
M_ = M.cpu().numpy()
if M.numel() > 0:
self.assertEqual(M_, np.matmul(np.matmul(M_, MPI_), M_))
self.assertEqual(MPI_, np.matmul(np.matmul(MPI_, M_), MPI_))
self.assertEqual(np.matmul(M_, MPI_), np.matmul(M_, MPI_).swapaxes(-2, -1).conj())
self.assertEqual(np.matmul(MPI_, M_), np.matmul(MPI_, M_).swapaxes(-2, -1).conj())
else:
self.assertEqual(M.shape, MPI.shape[:-2] + (MPI.shape[-1], MPI.shape[-2]))
for sizes in [(5, 5), (3, 5, 5), (3, 7, 5, 5), # square matrices
(3, 2), (5, 3, 2), (7, 5, 3, 2), # fat matrices
(2, 3), (5, 2, 3), (7, 5, 2, 3), # thin matrices
(0, 0), (0, 2), (2, 0), (3, 0, 0), (0, 3, 0), (0, 0, 3)]: # zero numel matrices
M = torch.randn(*sizes, dtype=dtype, device=device)
run_test(M)
# Test inverse and pseudo-inverse for invertible matrix
for sizes in [(5, 5), (3, 5, 5), (3, 7, 5, 5)]:
matsize = sizes[-1]
batchdims = sizes[:-2]
M = make_arg(*batchdims, matsize, matsize)
self.assertEqual(torch.eye(matsize, dtype=dtype, device=device).expand(sizes), M.pinverse().matmul(M),
atol=1e-7, rtol=0, msg='pseudo-inverse for invertible matrix')
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(torch.double, torch.cdouble)
def test_matrix_power_non_negative(self, device, dtype):
def check(*size):
t = make_tensor(size, dtype=dtype, device=device)
for n in range(8):
res = torch.linalg.matrix_power(t, n)
ref = np.linalg.matrix_power(t.cpu().numpy(), n)
self.assertEqual(res.cpu(), torch.from_numpy(ref))
check(0, 0)
check(1, 1)
check(5, 5)
check(0, 3, 3)
check(2, 3, 3)
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(torch.double, torch.cdouble)
def test_matrix_power_negative(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
def check(*size):
t = make_arg(*size)
for n in range(-7, 0):
res = torch.linalg.matrix_power(t, n)
ref = np.linalg.matrix_power(t.cpu().numpy(), n)
self.assertEqual(res.cpu(), torch.from_numpy(ref))
check(0, 0)
check(5, 5)
check(2, 0, 0)
check(0, 3, 3)
check(2, 3, 3)
check(2, 3, 5, 5)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.complex64)
def test_linalg_matrix_exp_utils(self, device, dtype):
# test linear combination
def run_test(coeff_shape, data_shape):
coeffs = torch.rand(*coeff_shape, device=device, dtype=torch.float)
x = torch.rand(coeff_shape[1], *data_shape, device=device, dtype=dtype)
res1 = torch._compute_linear_combination(x, coeffs)
res2 = (x.unsqueeze(0) * coeffs.view(*coeff_shape, *([1] * len(data_shape)))).sum(1)
self.assertEqual(res1, res2, atol=1e-5, rtol=0.0)
# check `out=` version
res3 = torch.zeros(coeff_shape[0], *data_shape, device=device, dtype=dtype)
torch._compute_linear_combination(x, coeffs, out=res3)
self.assertEqual(res1, res3, atol=1e-5, rtol=0.0)
res4 = torch.ones(coeff_shape[0], *data_shape, device=device, dtype=dtype)
torch._compute_linear_combination(x, coeffs, out=res4)
self.assertEqual(res1, res4 - 1.0, atol=1e-5, rtol=0.0)
res5 = torch.ones(coeff_shape[0], *data_shape, device=device, dtype=dtype)
res5_clone = res5.clone()
torch._compute_linear_combination(x, coeffs, out=res5)
self.assertEqual(res1, res5 - res5_clone, atol=1e-5, rtol=0.0)
run_test([1, 3], [2, 2])
run_test([3, 1], [2, 2])
run_test([1, 10], [10, 10])
run_test([10, 1], [10, 10])
run_test([5, 3], [2, 2])
run_test([5, 3], [100, 100])
run_test([3, 4], [3, 3, 3])
run_test([3, 4], [3, 3, 3, 3])
@onlyCPU
@skipCPUIfNoLapack
@dtypes(torch.complex64)
def test_linalg_matrix_exp_no_warnings(self, device, dtype):
# this tests https://github.com/pytorch/pytorch/issues/80948
with freeze_rng_state():
torch.manual_seed(42)
tens = 0.5 * torch.randn(10, 3, 3, dtype=dtype, device=device)
tens = (0.5 * (tens.transpose(-1, -2) + tens))
with warnings.catch_warnings(record=True) as w:
tens.imag = torch.matrix_exp(tens.imag)
self.assertFalse(len(w))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.complex64, torch.complex128)
def test_linalg_matrix_exp_boundary_cases(self, device, dtype):
expm = torch.linalg.matrix_exp
with self.assertRaisesRegex(RuntimeError, "Expected a floating point or complex tensor"):
expm(torch.randn(3, 3).type(torch.int))
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
expm(torch.randn(3))
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
expm(torch.randn(3, 2, 1))
# check 1x1 matrices
x = torch.randn(3, 3, 1, 1)
self.assertEqual(expm(x), x.exp())
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_matrix_exp_analytic(self, device, dtype):
expm = torch.linalg.matrix_exp
# check zero matrix
x = torch.zeros(20, 20, dtype=dtype, device=device)
self.assertTrue((expm(x) == torch.eye(20, 20, dtype=dtype, device=device)).all().item())
def normalize_to_1_operator_norm(sample, desired_norm):
sample_norm, _ = sample.abs().sum(-2).max(-1)
sample_to_1_norm = sample / sample_norm.unsqueeze(-1).unsqueeze(-1)
return sample_to_1_norm * desired_norm
def gen_good_cond_number_matrices(*n):
"""
Generates a diagonally-domimant matrix
with the eigenvalues centered at 1
and the radii at most (n[-1] - 1) / (n[-2] ** 2)
"""
identity = torch.eye(n[-2], n[-1], dtype=dtype, device=device).expand(*n)
x = torch.rand(*n, dtype=dtype, device=device) / (n[-1] ** 2)
x = (x - x * identity) + identity
return x
def run_test(*n):
if dtype == torch.float:
thetas = [
1.192092800768788e-07, # deg 1
5.978858893805233e-04, # deg 2
5.116619363445086e-02, # deg 4
5.800524627688768e-01, # deg 8
1.461661507209034e+00, # deg 12
3.010066362817634e+00 # deg 18
]
else: # if torch.double
thetas = [
2.220446049250313e-16, # deg 1
2.580956802971767e-08, # deg 2
3.397168839976962e-04, # deg 4
4.991228871115323e-02, # deg 8
2.996158913811580e-01, # deg 12
1.090863719290036e+00 # deg 18
]
# generate input
q = gen_good_cond_number_matrices(*n)
q_ = q.cpu().numpy()
qinv = torch.inverse(q)
qinv_ = qinv.cpu().numpy()
d = torch.randn(n[:-1], dtype=dtype, device=device)
x = torch.from_numpy(
np.matmul(q_, np.matmul(torch.diag_embed(d).cpu().numpy(), qinv_))).to(device)
x_norm, _ = x.abs().sum(-2).max(-1)
# test simple analytic whatever norm generated
mexp = expm(x)
mexp_analytic = np.matmul(
q_,
np.matmul(
torch.diag_embed(d.exp()).cpu().numpy(),
qinv_
)
)
self.assertEqual(mexp, mexp_analytic, atol=1e-3, rtol=0.0)
# generate norms to test different degree expansions
sample_norms = []
for i in range(len(thetas) - 1):
sample_norms.append(0.5 * (thetas[i] + thetas[i + 1]))
sample_norms = [thetas[0] / 2] + sample_norms + [thetas[-1] * 2]
# matrices to equal norm
for sample_norm in sample_norms:
x_normalized = normalize_to_1_operator_norm(x, sample_norm)
mexp = expm(x_normalized)
mexp_analytic = np.matmul(
q_,
np.matmul(
torch.diag_embed((d / x_norm.unsqueeze(-1) * sample_norm).exp()).cpu().numpy(),
qinv_
)
)
self.assertEqual(mexp, mexp_analytic, atol=1e-3, rtol=0.0)
# single matrix
run_test(2, 2)
run_test(3, 3)
run_test(4, 4)
run_test(5, 5)
run_test(100, 100)
run_test(200, 200)
# small batch of matrices
run_test(3, 2, 2)
run_test(3, 3, 3)
run_test(3, 4, 4)
run_test(3, 5, 5)
run_test(3, 100, 100)
run_test(3, 200, 200)
# large batch of matrices
run_test(3, 3, 2, 2)
run_test(3, 3, 3, 3)
run_test(3, 3, 4, 4)
run_test(3, 3, 5, 5)
run_test(3, 3, 100, 100)
run_test(3, 3, 200, 200)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double)
def test_linalg_matrix_exp_batch(self, device, dtype):
def run_test(*n):
tensors_batch = torch.zeros(n, dtype=dtype, device=device)
tensors_batch = tensors_batch.view(-1, n[-2], n[-1])
num_matrices = tensors_batch.size(0)
tensors_list = []
for i in range(num_matrices):
tensors_list.append(torch.randn(n[-2], n[-1], dtype=dtype, device=device))
for i in range(num_matrices):
tensors_batch[i, ...] = tensors_list[i]
tensors_exp_map = (torch.linalg.matrix_exp(x) for x in tensors_list)
tensors_exp_batch = torch.linalg.matrix_exp(tensors_batch)
for i, tensor_exp in enumerate(tensors_exp_map):
self.assertEqual(tensors_exp_batch[i, ...], tensor_exp)
# small batch of matrices
run_test(3, 2, 2)
run_test(3, 3, 3)
run_test(3, 4, 4)
run_test(3, 5, 5)
# large batch of matrices
run_test(3, 3, 2, 2)
run_test(3, 3, 3, 3)
run_test(3, 3, 4, 4)
run_test(3, 3, 5, 5)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_matrix_exp_compare_with_taylor(self, device, dtype):
def normalize_to_1_operator_norm(sample, desired_norm):
sample_norm, _ = sample.abs().sum(-2).max(-1)
sample_to_1_norm = sample / sample_norm.unsqueeze(-1).unsqueeze(-1)
return sample_to_1_norm * desired_norm
def gen_good_cond_number_matrices(*n):
"""
Generates a diagonally-domimant matrix
with the eigenvalues centered at 1
and the radii at most (n[-1] - 1) / (n[-2] ** 2)
"""
identity = torch.eye(n[-2], n[-1], dtype=dtype, device=device).expand(*n)
x = torch.rand(*n, dtype=dtype, device=device) / (n[-1] ** 2)
x = (x - x * identity) + identity
return x
def get_taylor_approximation(a, deg):
a_ = a.cpu().numpy()
identity = torch.eye(a.size(-2), a.size(-1), dtype=dtype, device=device).expand_as(a)
res = identity.cpu().numpy()
taylor_term = identity.cpu().numpy()
for i in range(1, deg + 1):
taylor_term = np.matmul(a_, taylor_term) / i
res = res + taylor_term
return res
def scale_square(a, deg):
if a.abs().pow(2).sum().sqrt() < 1.0:
return get_taylor_approximation(a, 12)
else:
s = int(torch.log2(a.abs().pow(2).sum().sqrt()).ceil().item())
b = a / (2 ** s)
b = get_taylor_approximation(b, 18)
for _ in range(s):
b = np.matmul(b, b)
return torch.from_numpy(b).to(a.device)
def run_test(*n):
degs = [1, 2, 4, 8, 12, 18]
if dtype == torch.float:
thetas = [
1.192092800768788e-07, # deg 1
5.978858893805233e-04, # deg 2
5.116619363445086e-02, # deg 4
5.800524627688768e-01, # deg 8
1.461661507209034e+00, # deg 12
3.010066362817634e+00 # deg 18
]
else: # if torch.double
thetas = [
2.220446049250313e-16, # deg 1
2.580956802971767e-08, # deg 2
3.397168839976962e-04, # deg 4
4.991228871115323e-02, # deg 8
2.996158913811580e-01, # deg 12
1.090863719290036e+00 # deg 18
]
# generate norms to test different degree expansions
sample_norms = []
for i in range(len(thetas) - 1):
sample_norms.append(0.5 * (thetas[i] + thetas[i + 1]))
sample_norms = [thetas[0] / 2] + sample_norms + [thetas[-1] * 2]
degs = [degs[0]] + degs
for sample_norm, deg in zip(sample_norms, degs):
x = gen_good_cond_number_matrices(*n)
x = normalize_to_1_operator_norm(x, sample_norm)
mexp = torch.linalg.matrix_exp(x)
mexp_taylor = scale_square(x, deg)
self.assertEqual(mexp, mexp_taylor, atol=1e-2, rtol=0.0)
# single matrix
run_test(2, 2)
run_test(3, 3)
run_test(4, 4)
run_test(5, 5)
# small batch of matrices
run_test(3, 2, 2)
run_test(3, 3, 3)
run_test(3, 4, 4)
run_test(3, 5, 5)
# large batch of matrices
run_test(3, 3, 2, 2)
run_test(3, 3, 3, 3)
run_test(3, 3, 4, 4)
run_test(3, 3, 5, 5)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_slogdet(self, device, dtype):
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
# mat_chars denotes matrix characteristics
# possible values are: hermitian, hermitian_psd, hermitian_pd, singular, non_singular
def run_test(matsize, batchdims, mat_chars):
num_matrices = np.prod(batchdims)
list_of_matrices = []
if num_matrices != 0:
for idx in range(num_matrices):
mat_type = idx % len(mat_chars)
if mat_chars[mat_type] == 'hermitian':
list_of_matrices.append(random_hermitian_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'hermitian_psd':
list_of_matrices.append(random_hermitian_psd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'hermitian_pd':
list_of_matrices.append(random_hermitian_pd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'singular':
list_of_matrices.append(torch.ones(matsize, matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'non_singular':
list_of_matrices.append(random_square_matrix_of_rank(matsize, matsize, dtype=dtype, device=device))
full_tensor = torch.stack(list_of_matrices, dim=0).reshape(batchdims + (matsize, matsize))
else:
full_tensor = torch.randn(*batchdims, matsize, matsize, dtype=dtype, device=device)
actual_value = torch.linalg.slogdet(full_tensor)
expected_value = np.linalg.slogdet(full_tensor.cpu().numpy())
self.assertEqual(expected_value[0], actual_value[0], atol=self.precision, rtol=self.precision)
self.assertEqual(expected_value[1], actual_value[1], atol=self.precision, rtol=self.precision)
# test out=variant
sign_out = torch.empty_like(actual_value[0])
logabsdet_out = torch.empty_like(actual_value[1])
ans = torch.linalg.slogdet(full_tensor, out=(sign_out, logabsdet_out))
self.assertEqual(ans[0], sign_out)
self.assertEqual(ans[1], logabsdet_out)
self.assertEqual(sign_out, actual_value[0])
self.assertEqual(logabsdet_out, actual_value[1])
for matsize, batchdims in itertools.product([0, 3, 5], [(0,), (3,), (5, 3)]):
run_test(matsize, batchdims, mat_chars=['hermitian_pd'])
run_test(matsize, batchdims, mat_chars=['singular'])
run_test(matsize, batchdims, mat_chars=['non_singular'])
run_test(matsize, batchdims, mat_chars=['hermitian', 'hermitian_pd', 'hermitian_psd'])
run_test(matsize, batchdims, mat_chars=['singular', 'non_singular'])
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_slogdet_errors_and_warnings(self, device, dtype):
# slogdet requires the input to be a square matrix or batch of square matrices
a = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.slogdet(a)
# slogdet requires the input to be at least 2 dimensional tensor
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must have at least 2 dimensions'):
torch.linalg.slogdet(a)
a = torch.randn(2, 2, device=device, dtype=torch.bfloat16)
with self.assertRaisesRegex(RuntimeError, r'Low precision dtypes not supported'):
torch.linalg.slogdet(a)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(2, 3, 3, device=device, dtype=dtype)
sign_out = torch.empty(1, device=device, dtype=dtype)
real_dtype = a.real.dtype if dtype.is_complex else dtype
logabsdet_out = torch.empty(1, device=device, dtype=real_dtype)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
sign_out = torch.empty(0, device=wrong_device, dtype=dtype)
logabsdet_out = torch.empty(0, device=wrong_device, dtype=real_dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
@skipCUDAIf(torch.version.cuda is not None
and torch.version.cuda.split(".") < ["11", "3"], "There's a bug in cuSOLVER < 11.3")
# FIXME One of the backends of lu_factor fails in windows. I haven't investigated which or why
# https://github.com/pytorch/pytorch/issues/75225
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_det_logdet_slogdet(self, device, dtype):
def reference_slogdet(M):
sdet, logabsdet = np.linalg.slogdet(M.detach().cpu().numpy())
return M.new_tensor(sdet), M.new_tensor(logabsdet)
def test_single_det(M, target, desc):
target_sdet, target_logabsdet = target
det = M.det()
logdet = M.logdet()
sdet, logabsdet = M.slogdet()
linalg_sdet, linalg_logabsdet = torch.linalg.slogdet(M)
# Test det
self.assertEqual(det, target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (det)'.format(desc))
# Test slogdet
# Compare the overall value rather than individual parts because of
# precision issues when det is near zero.
self.assertEqual(sdet * logabsdet.exp(), target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (slogdet)'.format(desc))
self.assertEqual(linalg_sdet * linalg_logabsdet.exp(), target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (linalg_slogdet)'.format(desc))
# Test logdet
# Compare logdet against our own pytorch slogdet because they should
# be consistent, while it may behave slightly differently with other
# slogdet implementations when det is near zero due to precision
# issues.
if sdet.item() < 0:
self.assertTrue(logdet.item() != logdet.item(), '{} (logdet negative case)'.format(desc))
else:
self.assertEqual(logdet.exp(), target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (logdet non-negative case)'.format(desc))
eye = torch.eye(5, dtype=dtype, device=device)
test_single_det(eye, (torch.ones((), dtype=dtype, device=device), torch.zeros((), dtype=dtype, device=device)), 'identity')
# Testing bug in #34061 (https://github.com/pytorch/pytorch/issues/34061)
for n in range(250, 551, 100):
mat = torch.randn(n, n, dtype=dtype, device=device)
q, _ = torch.qr(mat)
ref_det, ref_logabsdet = reference_slogdet(q)
test_single_det(q, (ref_det, ref_logabsdet), 'orthogonal')
def test(M):
assert M.size(0) >= 5, 'this helper fn assumes M to be at least 5x5'
M = M.to(device)
ref_M_sdet, ref_M_logabsdet = reference_slogdet(M)
test_single_det(M, (ref_M_sdet, ref_M_logabsdet), 'basic')
if ref_M_logabsdet.exp().item() >= 1e-6: # skip singular
M_inv = M.inverse()
test_single_det(M_inv, reference_slogdet(M_inv), 'inverse')
test_single_det(M, (ref_M_sdet, ref_M_logabsdet), 'transpose')
for x in [0, 2, 4]:
for scale in [-2, -0.1, 0, 10]:
if scale > 0:
target = ref_M_sdet, ref_M_logabsdet + math.log(scale)
elif scale == 0:
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
else:
target = ref_M_sdet.neg(), ref_M_logabsdet + math.log(-scale)
# dim 0
M_clone = M.clone()
M_clone[:, x] *= scale
test_single_det(M_clone, target, 'scale a row')
# dim 1
M_clone = M.clone()
M_clone[x, :] *= scale
test_single_det(M_clone, target, 'scale a column')
for x1, x2 in [(0, 3), (4, 1), (3, 2)]:
assert x1 != x2, 'x1 and x2 needs to be different for this test'
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
# dim 0
M_clone = M.clone()
M_clone[:, x2] = M_clone[:, x1]
test_single_det(M_clone, target, 'two rows are same')
# dim 1
M_clone = M.clone()
M_clone[x2, :] = M_clone[x1, :]
test_single_det(M_clone, target, 'two columns are same')
for scale1, scale2 in [(0.3, -1), (0, 2), (10, 0.1)]:
det_scale = scale1 * scale2 * -1
if det_scale > 0:
target = ref_M_sdet, ref_M_logabsdet + math.log(det_scale)
elif det_scale == 0:
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
else:
target = ref_M_sdet.neg(), ref_M_logabsdet + math.log(-det_scale)
# dim 0
M_clone = M.clone()
t = M_clone[:, x1] * scale1
M_clone[:, x1] += M_clone[:, x2] * scale2
M_clone[:, x2] = t
test_single_det(M_clone, target, 'exchanging rows')
# dim 1
M_clone = M.clone()
t = M_clone[x1, :] * scale1
M_clone[x1, :] += M_clone[x2, :] * scale2
M_clone[x2, :] = t
test_single_det(M_clone, target, 'exchanging columns')
def get_random_mat_scale(n):
# For matrices with values i.i.d. with 0 mean, unit variance, and
# subexponential tail, we have:
# E[log det(A^2)] \approx log((n-1)!)
#
# Notice:
# log Var[det(A)] = log E[det(A^2)] >= E[log det(A^2)]
#
# So:
# stddev[det(A)] >= sqrt( (n-1)! )
#
# We use this as an intuitive guideline to scale random generated
# matrices so our closeness tests can work more robustly:
# scale by sqrt( (n-1)! )^(-1/n) = ( (n-1)! )^(-1/(2n))
#
# source: https://arxiv.org/pdf/1112.0752.pdf
# TODO: technically we need subexponential distn for this to hold,
# but we mostly use gaussian entries below. Consider switching
# to Chi-sq if this turns out not stable enough, since Chi-sq
# is easy enough to sample from.
return math.factorial(n - 1) ** (-1.0 / (2 * n))
for n in [5, 10, 25]:
scale = get_random_mat_scale(n)
test(torch.randn(n, n, dtype=dtype, device=device) * scale)
r = torch.randn(n, n, dtype=dtype, device=device) * scale
# symmetric psd
test(r.mm(r.t()))
# symmetric pd
r = torch.randn(n, n, dtype=dtype, device=device) * scale
test(r.mm(r.t()) + torch.eye(n, dtype=dtype, device=device) * 1e-6)
# symmetric
r = torch.randn(n, n, dtype=dtype, device=device) * scale
for i in range(n):
for j in range(i):
r[i, j] = r[j, i]
test(r)
# non-contiguous
test((torch.randn(n, n, n + 1, dtype=dtype, device=device) * scale)[:, 2, 1:])
# det = 0
r = torch.randn(n, n, dtype=dtype, device=device) * scale
u, s, v = r.svd()
if reference_slogdet(u)[0] < 0:
u = -u
if reference_slogdet(v)[0] < 0:
v = -v
s[0] *= -1
s[-1] = 0
test(u.mm(s.diag()).mm(v))
# Small values to test numerical stability. Note that we don't scale
# this matrix.
r = torch.randn(512, 512, dtype=dtype, device=device)
u, s, v = r.svd()
s.fill_(1. / (100 * s.numel()))
test(u.mm(s.diag()).mm(v))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_det_logdet_slogdet_batched(self, device, dtype):
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
# mat_chars denotes matrix characteristics
# possible values are: sym, sym_psd, sym_pd, sing, non_sym
def run_test(matsize, batchdims, mat_chars):
num_matrices = reduce(lambda x, y: x * y, batchdims, 1)
list_of_matrices = []
for idx in range(num_matrices):
mat_type = idx % len(mat_chars)
if mat_chars[mat_type] == 'sym':
list_of_matrices.append(random_symmetric_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sym_psd':
list_of_matrices.append(random_symmetric_psd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sym_pd':
list_of_matrices.append(random_symmetric_pd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sing':
list_of_matrices.append(torch.ones(matsize, matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'non_sing':
list_of_matrices.append(random_square_matrix_of_rank(matsize, matsize, dtype=dtype, device=device))
full_tensor = torch.stack(list_of_matrices, dim=0).reshape(batchdims + (matsize, matsize))
# Scaling adapted from `get_random_mat_scale` in _test_det_logdet_slogdet
full_tensor *= (math.factorial(matsize - 1) ** (-1.0 / (2 * matsize)))
for fn in [torch.det, torch.logdet, torch.slogdet, torch.linalg.slogdet]:
expected_value = []
actual_value = fn(full_tensor)
for full_idx in itertools.product(*map(lambda x: list(range(x)), batchdims)):
expected_value.append(fn(full_tensor[full_idx]))
if fn == torch.slogdet or fn == torch.linalg.slogdet:
sign_value = torch.stack([tup[0] for tup in expected_value], dim=0).reshape(batchdims)
expected_value = torch.stack([tup[1] for tup in expected_value], dim=0).reshape(batchdims)
self.assertEqual(sign_value, actual_value[0])
self.assertEqual(expected_value, actual_value[1])
else:
expected_value = torch.stack(expected_value, dim=0).reshape(batchdims)
self.assertEqual(actual_value, expected_value)
for matsize, batchdims in itertools.product([3, 5], [(3,), (5, 3)]):
run_test(matsize, batchdims, mat_chars=['sym_pd'])
run_test(matsize, batchdims, mat_chars=['sing'])
run_test(matsize, batchdims, mat_chars=['non_sing'])
run_test(matsize, batchdims, mat_chars=['sym', 'sym_pd', 'sym_psd'])
run_test(matsize, batchdims, mat_chars=['sing', 'non_sing'])
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_inverse(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(shape, batch, upper, contiguous):
A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)
if A.numel() > 0 and not contiguous:
A = A.mT
self.assertFalse(A.is_contiguous())
L = torch.linalg.cholesky(A)
expected_inverse = torch.inverse(A)
L = L.mH if upper else L
actual_inverse = torch.cholesky_inverse(L, upper)
self.assertEqual(actual_inverse, expected_inverse)
shapes = (0, 3, 5)
batches = ((), (0,), (3, ), (2, 2))
for shape, batch, upper, contiguous in list(itertools.product(shapes, batches, (True, False), (True, False))):
run_test(shape, batch, upper, contiguous)
# check the out= variant
A = random_hermitian_pd_matrix(3, 2, dtype=dtype, device=device)
L = torch.linalg.cholesky(A)
# There are two code paths currently for the out= variant
# 1. When 'out' tensor is in Fortran (column-major) memory format
# then the fast route is taken and the storage is reused directly in the computations
# 2. When 'out' tensor is not in Fortran format then a temporary tensor is allocated internally
# and the result is copied from the temporary tensor to 'out' tensor
# This test checks the first code path
out = torch.empty_like(A)
out_t = out.mT.clone(memory_format=torch.contiguous_format)
out = out_t.mT
ans = torch.cholesky_inverse(L, out=out)
self.assertEqual(ans, out)
expected = torch.inverse(A)
self.assertEqual(expected, out)
# This test checks the second code path
out = torch.empty_like(A)
ans = torch.cholesky_inverse(L, out=out)
self.assertEqual(ans, out)
expected = torch.inverse(A)
self.assertEqual(expected, out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_inverse_errors_and_warnings(self, device, dtype):
# cholesky_inverse requires the input to be at least 2 dimensional tensor
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.cholesky_inverse(a)
# cholesky_inverse requires a square matrix
a = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.cholesky_inverse(a)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(3, 3, device=device, dtype=dtype)
out = torch.empty(2, 3, device=device, dtype=dtype)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.cholesky_inverse(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(*a.shape, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.cholesky_inverse(a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.cholesky_inverse(a, out=out)
# cholesky_inverse raises an error for invalid inputs on CPU
# for example if at least one diagonal element is zero
a = torch.randn(3, 3, device=device, dtype=dtype)
a[1, 1] = 0
if self.device_type == 'cpu':
with self.assertRaisesRegex(torch.linalg.LinAlgError, r"cholesky_inverse: The diagonal element 2 is zero"):
torch.cholesky_inverse(a)
# cholesky_inverse on GPU does not raise an error for this case
elif self.device_type == 'cuda':
out = torch.cholesky_inverse(a)
self.assertTrue(out.isinf().any() or out.isnan().any())
def _select_broadcastable_dims(self, dims_full=None):
# select full dimensionality
if dims_full is None:
dims_full = []
ndims = random.randint(1, 4)
dims_full = [random.randint(1, 8) for _ in range(ndims)]
else:
ndims = len(dims_full)
# select actual dimensions for ops:
# larger: full ndims, individual sizes may be reduced
# smaller: possibly reduced ndims, sizes may be reduced
smaller_ndims = random.randint(1, ndims)
dims_small = []
dims_large = []
for i in range(ndims - 1, -1, -1):
j = random.randint(1, 3)
if j == 1: # no reduced singleton dimension
ds = dims_full[i]
dl = dims_full[i]
elif j == 2: # larger may have reduced singleton dimension
ds = dims_full[i]
dl = 1 if len(dims_small) < smaller_ndims else dims_full[i]
elif j == 3: # smaller may have reduced singleton dimension
ds = 1
dl = dims_full[i]
dims_large = [dl] + dims_large
if len(dims_small) < smaller_ndims:
dims_small = [ds] + dims_small
return (dims_small, dims_large, dims_full)
def test_broadcast_fused_matmul(self, device):
fns = ["baddbmm", "addbmm", "addmm", "addmv", "addr"]
for fn in fns:
batch_dim = random.randint(1, 8)
n_dim = random.randint(1, 8)
m_dim = random.randint(1, 8)
p_dim = random.randint(1, 8)
def dims_full_for_fn():
if fn == "baddbmm":
return ([batch_dim, n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim])
elif fn == "addbmm":
return ([n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim])
elif fn == "addmm":
return ([n_dim, p_dim], [n_dim, m_dim], [m_dim, p_dim])
elif fn == "addmv":
return ([n_dim], [n_dim, m_dim], [m_dim])
elif fn == "addr":
return ([n_dim, m_dim], [n_dim], [m_dim])
else:
raise AssertionError("unknown function")
(t0_dims_full, t1_dims, t2_dims) = dims_full_for_fn()
(t0_dims_small, _, _) = self._select_broadcastable_dims(t0_dims_full)
t0_small = torch.randn(*t0_dims_small, device=device).float()
t1 = torch.randn(*t1_dims, device=device).float()
t2 = torch.randn(*t2_dims, device=device).float()
t0_full = t0_small.expand(*t0_dims_full).to(device)
fntorch = getattr(torch, fn)
r0 = fntorch(t0_small, t1, t2)
r1 = fntorch(t0_full, t1, t2)
self.assertEqual(r0, r1)
@tf32_on_and_off(0.001)
def test_broadcast_batched_matmul(self, device):
n_dim = random.randint(1, 8)
m_dim = random.randint(1, 8)
p_dim = random.randint(1, 8)
full_batch_dims = [random.randint(1, 3) for i in range(random.randint(1, 3))]
(batch_dims_small, _, _) = self._select_broadcastable_dims(full_batch_dims)
def verify_batched_matmul(full_lhs, one_dimensional):
if not one_dimensional:
lhs_dims = [n_dim, m_dim]
rhs_dims = [m_dim, p_dim]
result_dims = [n_dim, p_dim]
else:
lhs_dims = [n_dim, m_dim] if full_lhs else [m_dim]
rhs_dims = [m_dim, p_dim] if not full_lhs else [m_dim]
result_dims = [n_dim] if full_lhs else [p_dim]
lhs_mat_dims = lhs_dims if len(lhs_dims) != 1 else [1, m_dim]
rhs_mat_dims = rhs_dims if len(rhs_dims) != 1 else [m_dim, 1]
full_mat_dims = lhs_mat_dims if full_lhs else rhs_mat_dims
dim0_dims = rhs_dims if full_lhs else lhs_dims
small_dims = batch_dims_small + (rhs_mat_dims if full_lhs else lhs_mat_dims)
small = torch.randn(*(small_dims), device=device).float()
dim0 = torch.randn(*(dim0_dims), device=device).float()
full = torch.randn(*(full_batch_dims + full_mat_dims), device=device).float()
if not one_dimensional:
(lhsTensors, rhsTensors) = ((full,), (small, dim0)) if full_lhs else ((small, dim0), (full,))
else:
(lhsTensors, rhsTensors) = ((full,), (dim0,)) if full_lhs else ((dim0,), (full,))
def maybe_squeeze_result(l, r, result):
if len(lhs_dims) == 1 and l.dim() != 1:
return result.squeeze(-2)
elif len(rhs_dims) == 1 and r.dim() != 1:
return result.squeeze(-1)
else:
return result
for lhs in lhsTensors:
lhs_expanded = lhs.expand(*(torch.Size(full_batch_dims) + torch.Size(lhs_mat_dims)))
lhs_expanded_matmul_fn = lhs_expanded.matmul
for rhs in rhsTensors:
rhs_expanded = ((rhs if len(rhs_dims) != 1 else rhs.unsqueeze(-1)).
expand(*(torch.Size(full_batch_dims) + torch.Size(rhs_mat_dims))))
truth = maybe_squeeze_result(lhs_expanded, rhs_expanded, lhs_expanded_matmul_fn(rhs_expanded))
for l in (lhs, lhs_expanded):
for r in (rhs, rhs_expanded):
l_matmul_fn = l.matmul
result = maybe_squeeze_result(l, r, l_matmul_fn(r))
self.assertEqual(truth, result)
# test torch.matmul function as well
torch_result = maybe_squeeze_result(l, r, torch.matmul(l, r))
self.assertEqual(truth, torch_result)
# test torch.matmul with out
out = torch.zeros_like(torch_result)
torch.matmul(l, r, out=out)
self.assertEqual(truth, maybe_squeeze_result(l, r, out))
# compare to bmm
bmm_result = (torch.bmm(lhs_expanded.contiguous().view(-1, *lhs_mat_dims),
rhs_expanded.contiguous().view(-1, *rhs_mat_dims)))
self.assertEqual(truth.view(-1, *result_dims), bmm_result.view(-1, *result_dims))
for indices in itertools.product((True, False), repeat=2):
verify_batched_matmul(*indices)
def lu_solve_test_helper(self, A_dims, b_dims, pivot, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_A = partial(make_fullrank, device=device, dtype=dtype)
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = make_A(*A_dims)
LU_data, LU_pivots, info = torch.linalg.lu_factor_ex(A)
self.assertEqual(info, torch.zeros_like(info))
return b, A, LU_data, LU_pivots
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_lu_solve(self, device, dtype):
def sub_test(pivot):
for k, n in zip([2, 3, 5], [3, 5, 7]):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper((n, n), (n, k), pivot, device, dtype)
x = torch.lu_solve(b, LU_data, LU_pivots)
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
sub_test(True)
if self.device_type == 'cuda':
sub_test(False)
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_lu_solve_batched(self, device, dtype):
def sub_test(pivot):
def lu_solve_batch_test_helper(A_dims, b_dims, pivot):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, pivot, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.lu_solve(b[i], LU_data[i], LU_pivots[i]))
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.lu_solve(b, LU_data, LU_pivots) # Actual output
self.assertEqual(x_exp, x_act) # Equality check
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax)
for batchsize in [1, 3, 4]:
lu_solve_batch_test_helper((batchsize, 5, 5), (batchsize, 5, 10), pivot)
# Tests tensors with 0 elements
b = torch.randn(3, 0, 3, dtype=dtype, device=device)
A = torch.randn(3, 0, 0, dtype=dtype, device=device)
LU_data, LU_pivots = torch.linalg.lu_factor(A)
self.assertEqual(torch.empty_like(b), b.lu_solve(LU_data, LU_pivots))
sub_test(True)
if self.device_type == 'cuda':
sub_test(False)
@skipCUDAIfRocm # ROCm: test was exceptionally slow, even for slow tests. Skip until triage.
@slowTest
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(*floating_and_complex_types())
def test_lu_solve_batched_many_batches(self, device, dtype):
def run_test(A_dims, b_dims):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, True, device, dtype)
x = torch.lu_solve(b, LU_data, LU_pivots)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(Ax))
run_test((65536, 5, 5), (65536, 5, 10))
run_test((262144, 5, 5), (262144, 5, 10))
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(*floating_and_complex_types())
def test_lu_solve_batched_broadcasting(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_A = partial(make_fullrank, device=device, dtype=dtype)
def run_test(A_dims, b_dims, pivot=True):
A_matrix_size = A_dims[-1]
A_batch_dims = A_dims[:-2]
A = make_A(*A_batch_dims, A_matrix_size, A_matrix_size)
b = make_tensor(b_dims, dtype=dtype, device=device)
x_exp = np.linalg.solve(A.cpu(), b.cpu())
LU_data, LU_pivots = torch.linalg.lu_factor(A)
x = torch.lu_solve(b, LU_data, LU_pivots)
self.assertEqual(x, x_exp)
# test against numpy.linalg.solve
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6)) # no broadcasting
run_test((2, 1, 3, 4, 4), (4, 6)) # broadcasting b
run_test((4, 4), (2, 1, 3, 4, 2)) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5)) # broadcasting A & b
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
# this tests https://github.com/pytorch/pytorch/issues/36921
def test_lu_solve_large_matrices(self, device, dtype):
def run_test(A_dims, b_dims):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, True, device, dtype)
x = torch.lu_solve(b, LU_data, LU_pivots)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(Ax))
run_test((1, 1), (1, 1, 1025))
@precisionOverride({torch.float32: 1e-5, torch.complex64: 1e-5})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_symeig(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(dims, eigenvectors, upper):
x = random_hermitian_matrix(*dims, dtype=dtype, device=device)
if dtype.is_complex:
real_dtype = torch.float32 if dtype is torch.complex64 else torch.float64
else:
real_dtype = dtype
oute = torch.empty(dims[1:] + dims[:1], dtype=real_dtype, device=device)
outv = torch.empty(dims[1:] + dims[:1] * 2, dtype=dtype, device=device)
torch.symeig(x, eigenvectors=eigenvectors, upper=upper, out=(oute, outv))
if eigenvectors:
outv_ = outv.cpu().numpy()
x_recon = np.matmul(np.matmul(outv_, torch.diag_embed(oute.to(dtype)).cpu().numpy()),
outv_.swapaxes(-2, -1).conj())
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using V @ diag(e) @ V.T')
else:
eigvals, _ = torch.symeig(x, eigenvectors=True, upper=upper)
self.assertEqual(eigvals, oute, msg='Eigenvalues mismatch')
self.assertEqual(torch.empty(0, device=device, dtype=dtype), outv, msg='Eigenvector matrix not empty')
rese, resv = x.symeig(eigenvectors=eigenvectors, upper=upper)
self.assertEqual(rese, oute, msg="outputs of symeig and symeig with out don't match")
self.assertEqual(resv, outv, msg="outputs of symeig and symeig with out don't match")
# test non-contiguous
x = random_hermitian_matrix(*dims, dtype=dtype, device=device)
n_dim = len(dims) + 1
# Reverse the batch dimensions and the matrix dimensions and then concat them
x = x.permute(tuple(range(n_dim - 3, -1, -1)) + (n_dim - 1, n_dim - 2))
assert not x.is_contiguous(), "x is intentionally non-contiguous"
rese, resv = torch.symeig(x, eigenvectors=eigenvectors, upper=upper)
if eigenvectors:
resv_ = resv.cpu().numpy()
x_recon = np.matmul(np.matmul(resv_, torch.diag_embed(rese.to(dtype)).cpu().numpy()),
resv_.swapaxes(-2, -1).conj())
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using V @ diag(e) @ V.T')
else:
eigvals, _ = torch.symeig(x, eigenvectors=True, upper=upper)
self.assertEqual(eigvals, rese, msg='Eigenvalues mismatch')
self.assertEqual(torch.empty(0, device=device, dtype=dtype), resv, msg='Eigenvector matrix not empty')
batch_dims_set = [(), (3,), (3, 5), (5, 3, 5)]
for batch_dims, eigenvectors, upper in itertools.product(batch_dims_set, (True, False), (True, False)):
run_test((5,) + batch_dims, eigenvectors, upper)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_symeig_out_errors_and_warnings(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
# if non-empty out tensor with wrong shape is passed a warning is given
a = random_hermitian_matrix(3, dtype=dtype, device=device)
real_dtype = a.real.dtype if dtype.is_complex else dtype
out_w = torch.empty(7, 7, dtype=real_dtype, device=device)
out_v = torch.empty(7, 7, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.symeig(a, out=(out_w, out_v))
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out_w = torch.empty(0, dtype=real_dtype, device=device)
out_v = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvectors with dtype Int"):
torch.symeig(a, out=(out_w, out_v))
out_w = torch.empty(0, dtype=torch.int, device=device)
out_v = torch.empty(0, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvalues with dtype Int"):
torch.symeig(a, out=(out_w, out_v))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=dtype)
out_v = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.symeig(a, out=(out_w, out_v))
out_w = torch.empty(0, device=device, dtype=dtype)
out_v = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.symeig(a, out=(out_w, out_v))
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
def test_pca_lowrank(self, device):
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
dtype = torch.double
def run_subtest(guess_rank, actual_rank, matrix_size, batches, device, pca, **options):
density = options.pop('density', 1)
if isinstance(matrix_size, int):
rows = columns = matrix_size
else:
rows, columns = matrix_size
if density == 1:
a_input = random_lowrank_matrix(actual_rank, rows, columns, *batches, device=device, dtype=dtype)
a = a_input
else:
a_input = random_sparse_matrix(rows, columns, density, device=device, dtype=dtype)
a = a_input.to_dense()
u, s, v = pca(a_input, q=guess_rank, **options)
self.assertEqual(s.shape[-1], guess_rank)
self.assertEqual(u.shape[-2], rows)
self.assertEqual(u.shape[-1], guess_rank)
self.assertEqual(v.shape[-1], guess_rank)
self.assertEqual(v.shape[-2], columns)
A1 = u.matmul(s.diag_embed()).matmul(v.mT)
ones_m1 = torch.ones(batches + (rows, 1), dtype=a.dtype, device=device)
c = a.sum(axis=-2) / rows
c = c.reshape(batches + (1, columns))
A2 = a - ones_m1.matmul(c)
self.assertEqual(A1, A2)
if density == 1:
# actual rank is known only for dense input
detect_rank = (s.abs() > 1e-5).sum(axis=-1)
self.assertEqual(actual_rank * torch.ones(batches, device=device, dtype=torch.int64), detect_rank)
S = torch.linalg.svdvals(A2)
self.assertEqual(s[..., :actual_rank], S[..., :actual_rank])
all_batches = [(), (1,), (3,), (2, 3)]
for actual_rank, size, all_batches in [
(2, (17, 4), all_batches),
(2, (100, 4), all_batches),
(6, (100, 40), all_batches),
(12, (1000, 1000), [()]),
]:
for batches in all_batches:
for guess_rank in [
actual_rank,
actual_rank + 2,
actual_rank + 6,
]:
if guess_rank <= min(*size):
run_subtest(guess_rank, actual_rank, size, batches, device, torch.pca_lowrank)
run_subtest(guess_rank, actual_rank, size[::-1], batches, device, torch.pca_lowrank)
# sparse input
for guess_rank, size in [
(4, (17, 4)), (4, (4, 17)), (16, (17, 17)),
(21, (100, 40)), (20, (40, 100)), (600, (1000, 1000))]:
for density in [0.005, 0.1]:
run_subtest(guess_rank, None, size, (), device, torch.pca_lowrank, density=density)
# jitting support
jitted = torch.jit.script(torch.pca_lowrank)
guess_rank, actual_rank, size, batches = 2, 2, (17, 4), ()
run_subtest(guess_rank, actual_rank, size, batches, device, jitted)
# Ensure that nuclear_norm's out variant gives the same result as the non-out
@onlyNativeDeviceTypes
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64)
def test_nuclear_norm_out(self, device, dtype):
test_cases = [
# input size, dim
((25, 25), None),
((25, 25), (0, 1)),
((25, 25), (1, 0)),
((25, 25, 25), (2, 0)),
((25, 25, 25), (0, 1)),
]
for keepdim in [False, True]:
for input_size, dim in test_cases:
msg = f'input_size: {input_size}, dim: {dim}, keepdim: {keepdim}'
x = torch.randn(*input_size, device=device, dtype=dtype)
result_out = torch.empty(0, device=device, dtype=dtype)
if dim is None:
result = torch.nuclear_norm(x, keepdim=keepdim)
torch.nuclear_norm(x, keepdim=keepdim, out=result_out)
else:
result = torch.nuclear_norm(x, keepdim=keepdim, dim=dim)
torch.nuclear_norm(x, keepdim=keepdim, dim=dim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_geqrf(self, device, dtype):
def run_test(shape):
# numpy.linalg.qr with mode = 'raw' computes the same operation as torch.geqrf
# so this test compares against that function
A = make_tensor(shape, dtype=dtype, device=device)
# numpy.linalg.qr doesn't work with batched input
m, n = A.shape[-2:]
tau_size = "n" if m > n else "m"
np_dtype = A.cpu().numpy().dtype
ot = [np_dtype, np_dtype]
numpy_geqrf_batched = np.vectorize(
lambda x: np.linalg.qr(x, mode='raw'),
otypes=ot,
signature=f'(m,n)->(n,m),({tau_size})')
expected = numpy_geqrf_batched(A.cpu())
actual = torch.geqrf(A)
# numpy.linalg.qr returns transposed result
self.assertEqual(expected[0].swapaxes(-2, -1), actual[0])
self.assertEqual(expected[1], actual[1])
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
for batch, (m, n) in product(batches, product(ns, ns)):
run_test((*batch, m, n))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_lstsq(self, device, dtype):
def _test_underdetermined(a, b, expectedNorm):
# underdetermined systems are only supported on CPU
if self.device_type != 'cpu':
return
m = a.size()[0]
n = a.size()[1]
assert(m <= n)
a_copy = a.clone()
b_copy = b.clone()
res1 = torch.lstsq(b, a)[0]
self.assertEqual(a, a_copy, atol=0, rtol=0)
self.assertEqual(b, b_copy, atol=0, rtol=0)
self.assertEqual((torch.mm(a, res1) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
ta = torch.tensor((), dtype=dtype, device=device)
tb = torch.tensor((), dtype=dtype, device=device)
res2 = torch.lstsq(b, a, out=(tb, ta))[0]
self.assertEqual(a, a_copy, atol=0, rtol=0)
self.assertEqual(b, b_copy, atol=0, rtol=0)
self.assertEqual((torch.mm(a, res1) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
res3 = torch.lstsq(b, a, out=(b, a))[0]
self.assertEqual((torch.mm(a_copy, b) - b_copy).norm(), expectedNorm, atol=1e-8, rtol=0)
self.assertEqual(res1, tb, atol=0, rtol=0)
self.assertEqual(res1, b, atol=0, rtol=0)
self.assertEqual(res1, res2, atol=0, rtol=0)
self.assertEqual(res1, res3, atol=0, rtol=0)
def _test_overdetermined(a, b, expectedNorm):
m = a.size()[0]
n = a.size()[1]
assert(m > n)
def check_norm(a, b, expected_norm, gels_result):
# Checks |ax - b| and the residual info from the result
# The first n rows is the least square solution.
# Rows n to m-1 contain residual information.
x = gels_result[:n]
resid_info = gels_result[n:]
resid_norm = (torch.mm(a, x) - b).norm()
self.assertEqual(resid_norm, expectedNorm, atol=1e-8, rtol=0)
self.assertEqual(resid_info.norm(), resid_norm, atol=1e-8, rtol=0)
a_copy = a.clone()
b_copy = b.clone()
res1 = torch.lstsq(b, a)[0]
self.assertEqual(a, a_copy, atol=0, rtol=0)
self.assertEqual(b, b_copy, atol=0, rtol=0)
check_norm(a, b, expectedNorm, res1)
ta = torch.tensor((), dtype=dtype, device=device)
tb = torch.tensor((), dtype=dtype, device=device)
res2 = torch.lstsq(b, a, out=(tb, ta))[0]
self.assertEqual(a, a_copy, atol=0, rtol=0)
self.assertEqual(b, b_copy, atol=0, rtol=0)
check_norm(a, b, expectedNorm, res2)
res3 = torch.lstsq(b, a, out=(b, a))[0]
check_norm(a_copy, b_copy, expectedNorm, res3)
self.assertEqual(res1, tb, atol=0, rtol=0)
self.assertEqual(res1, b, atol=0, rtol=0)
self.assertEqual(res1, res2, atol=0, rtol=0)
self.assertEqual(res1, res3, atol=0, rtol=0)
# basic test
expectedNorm = 0
a = torch.tensor(((1.44, -9.96, -7.55, 8.34),
(-7.84, -0.28, 3.24, 8.09),
(-4.39, -3.24, 6.27, 5.28),
(4.53, 3.83, -6.64, 2.06)), dtype=dtype, device=device).t()
b = torch.tensor(((8.58, 8.26, 8.48, -5.28),
(9.35, -4.43, -0.70, -0.26)), dtype=dtype, device=device).t()
_test_underdetermined(a, b, expectedNorm)
# test overdetermined
expectedNorm = 17.390200628863
a = torch.tensor(((1.44, -9.96, -7.55, 8.34, 7.08, -5.45),
(-7.84, -0.28, 3.24, 8.09, 2.52, -5.70),
(-4.39, -3.24, 6.27, 5.28, 0.74, -1.19),
(4.53, 3.83, -6.64, 2.06, -2.47, 4.70)), dtype=dtype, device=device).t()
b = torch.tensor(((8.58, 8.26, 8.48, -5.28, 5.72, 8.93),
(9.35, -4.43, -0.70, -0.26, -7.36, -2.52)), dtype=dtype, device=device).t()
_test_overdetermined(a, b, expectedNorm)
# test underdetermined
expectedNorm = 0
a = torch.tensor(((1.44, -9.96, -7.55),
(-7.84, -0.28, 3.24),
(-4.39, -3.24, 6.27),
(4.53, 3.83, -6.64)), dtype=dtype, device=device).t()
b = torch.tensor(((8.58, 8.26, 8.48),
(9.35, -4.43, -0.70)), dtype=dtype, device=device).t()
_test_underdetermined(a, b, expectedNorm)
# test reuse
expectedNorm = 0
a = torch.tensor(((1.44, -9.96, -7.55, 8.34),
(-7.84, -0.28, 3.24, 8.09),
(-4.39, -3.24, 6.27, 5.28),
(4.53, 3.83, -6.64, 2.06)), dtype=dtype, device=device).t()
b = torch.tensor(((8.58, 8.26, 8.48, -5.28),
(9.35, -4.43, -0.70, -0.26)), dtype=dtype, device=device).t()
ta = torch.tensor((), dtype=dtype, device=device)
tb = torch.tensor((), dtype=dtype, device=device)
torch.lstsq(b, a, out=(tb, ta))
self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
torch.lstsq(b, a, out=(tb, ta))
self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
torch.lstsq(b, a, out=(tb, ta))
self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_lapack_empty(self, device):
# FIXME: these are just a selection of LAPACK functions -- we need a general strategy here.
# The LAPACK functions themselves generally do NOT work with zero sized dimensions, although
# numpy/sci often has a direct wrapper (e.g. lu_factor) and a wrapper that "does the right thing"
# (e.g. lu). We often name our functions identically to the lapack function, so it will take work
# to name / migrate-to better wrappers.
def fn(torchfn, *args):
return torchfn(*tuple(torch.randn(shape, device=device) if isinstance(shape, tuple) else shape
for shape in args))
# inverse, pinverse
self.assertEqual((0, 0), fn(torch.inverse, (0, 0)).shape)
self.assertEqual((5, 0), fn(torch.pinverse, (0, 5)).shape)
self.assertEqual((0, 5), fn(torch.pinverse, (5, 0)).shape)
self.assertEqual((0, 0), fn(torch.pinverse, (0, 0)).shape)
# det, logdet, slogdet
self.assertEqual(torch.tensor(1., device=device), fn(torch.det, (0, 0)))
self.assertEqual(torch.tensor(0., device=device), fn(torch.logdet, (0, 0)))
self.assertEqual((torch.tensor(1., device=device), torch.tensor(0., device=device)),
fn(torch.slogdet, (0, 0)))
# eig, symeig
evalues, evectors = fn(torch.eig, (0, 0), True)
self.assertEqual([(0, 2), (0, 0)], [evalues.shape, evectors.shape])
evalues, evectors = fn(torch.symeig, (0, 0), True)
self.assertEqual([(0,), (0, 0)], [evalues.shape, evectors.shape])
# lstsq
self.assertRaises(RuntimeError, lambda: torch.lstsq(torch.randn(0, 0), torch.randn(0, 0)))
self.assertRaises(RuntimeError, lambda: torch.lstsq(torch.randn(0,), torch.randn(0, 0)))
@tf32_on_and_off(0.005)
def test_tensordot(self, device):
a = torch.arange(60., device=device).reshape(3, 4, 5)
b = torch.arange(24., device=device).reshape(4, 3, 2)
c = torch.tensordot(a, b, dims=([1, 0], [0, 1])).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy(),
axes=([1, 0], [0, 1])))
self.assertEqual(c, cn)
cout = torch.zeros((5, 2), device=device)
torch.tensordot(a, b, dims=([1, 0], [0, 1]), out=cout).cpu()
self.assertEqual(c, cout)
a = torch.randn(2, 3, 4, 5, device=device)
b = torch.randn(4, 5, 6, 7, device=device)
c = torch.tensordot(a, b, dims=2).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy(),
axes=2))
with self.assertRaisesRegex(RuntimeError, "expects dims >= 0"):
torch.tensordot(a, b, dims=-1)
self.assertEqual(c, cn)
c = torch.tensordot(a, b).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy()))
self.assertEqual(c, cn)
a = torch.tensordot(torch.tensor(0.), torch.tensor(0.), 0)
an = torch.from_numpy(np.tensordot(np.zeros((), dtype=np.float32), np.zeros((), dtype=np.float32), 0))
self.assertEqual(a, an)
@skipCUDAIfNoCusolver
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@skipCUDAIfRocm
@dtypes(*floating_and_complex_types())
def test_ldl_factor(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(shape, batch, hermitian):
A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)
actual_factors, actual_pivots, info = torch.linalg.ldl_factor_ex(A, hermitian=hermitian)
actual_L = torch.tril(actual_factors, diagonal=-1)
actual_L.diagonal(0, -2, -1).fill_(1.0)
# This test is designed only for inputs with 1x1 block diagonal matrix D.
# That is for positive definite input matrices, the pivots tensor is always > 0.
# If negative pivots are encountered, it means that the input matrix is not positive definite.
# And matrix D is a 2x2 block diagonal matrix.
self.assertTrue((actual_pivots > 0).all())
# Construct a 1x1 block diagonal matrix D from factors.
actual_D = torch.diag_embed(actual_factors.diagonal(0, -2, -1))
def T(x):
return x.mH if hermitian else x.mT
A_reconstructed = actual_L @ actual_D @ T(actual_L)
def symmetric(A):
return A.tril() + A.tril(-1).mT
self.assertEqual(symmetric(A) if not hermitian else A, A_reconstructed)
# Now test against SciPy implementation
if TEST_SCIPY:
from scipy.linalg import ldl as scipy_ldl
A_np = A.cpu().numpy()
np_dtype = A_np.dtype
scipy_ldl_batched = np.vectorize(
lambda x: scipy_ldl(x, hermitian=hermitian, lower=True),
otypes=[np_dtype, np_dtype, np.dtype('int64')],
signature='(m,m)->(m,m),(m,m),(m)')
expected = scipy_ldl_batched(A_np)
expected_L, expected_D, expected_pivots = expected
if expected_pivots.ndim > 1:
permuted_expected_L = np.stack(
[expected_L[i][expected_pivots[i], :] for i in range(expected_pivots.shape[0])]
)
else:
permuted_expected_L = expected_L[expected_pivots, :]
self.assertEqual(actual_L, permuted_expected_L)
self.assertEqual(actual_D, expected_D)
else:
self.assertEqual(actual_factors.shape, A.shape)
self.assertEqual(actual_pivots.shape, A.shape[:-1])
self.assertEqual(info.shape, A.shape[:-2])
# hermitian=True for complex inputs on CUDA is supported only with MAGMA 2.5.4+
magma_254_available = self.device_type == 'cuda' and _get_magma_version() >= (2, 5, 4)
hermitians = (True, False) if dtype.is_complex and (self.device_type == 'cpu' or magma_254_available) else (False,)
shapes = (5,)
batches = ((), (4,),)
for shape, batch, hermitian in itertools.product(shapes, batches, hermitians):
run_test(shape, batch, hermitian)
@skipCUDAIfNoCusolver
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@skipCUDAIfRocm
@skipCUDAIf(_get_torch_cuda_version() < (11, 4), "not available before CUDA 11.3.1")
@dtypes(*floating_and_complex_types())
def test_ldl_solve(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(shape, batch, nrhs, hermitian):
A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)
B = make_tensor((*A.shape[:-1], nrhs), dtype=dtype, device=device)
factors, pivots, info = torch.linalg.ldl_factor_ex(A, hermitian=hermitian)
X = torch.linalg.ldl_solve(factors, pivots, B, hermitian=hermitian)
def symmetric(A):
return A.tril() + A.tril(-1).mT
# verify A @ X == B
expected_B = symmetric(A) @ X if not hermitian else A @ X
self.assertEqual(B, expected_B)
# hermitian=True is not supported on CUDA yet
hermitians = (True, False) if dtype.is_complex and self.device_type == 'cpu' else (False,)
shapes = (5,)
batches = ((), (4,), (2, 2))
nrhss = (1, 7)
for shape, batch, nrhs, hermitian in itertools.product(shapes, batches, nrhss, hermitians):
run_test(shape, batch, nrhs, hermitian)
@onlyCUDA
@skipCUDAIfNoMagma
@skipCUDAIfNoCusolver
@setLinalgBackendsToDefaultFinally
def test_preferred_linalg_library(self):
# The main purpose of this test is to make sure these "backend" calls work normally without raising exceptions.
x = torch.randint(2, 5, (2, 4, 4), device='cuda', dtype=torch.double)
torch.backends.cuda.preferred_linalg_library('cusolver')
out1 = torch.linalg.inv(x)
torch.backends.cuda.preferred_linalg_library('magma')
out2 = torch.linalg.inv(x)
torch.backends.cuda.preferred_linalg_library('default')
# Although linalg preferred flags doesn't affect CPU currently,
# we set this to make sure the flag can switch back to default normally.
out_ref = torch.linalg.inv(x.cpu())
self.assertEqual(out_ref, out1.cpu())
self.assertEqual(out1, out2)
instantiate_device_type_tests(TestLinalg, globals())
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_linalg.py |
# Owner(s): ["module: mkldnn"]
import copy
import itertools
import functools
import unittest
try:
import torchvision
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS
# batched grad doesn't support mkldnn
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
# For OneDNN bf16 path, OneDNN requires the cpu has intel avx512 with avx512bw,
# avx512vl, and avx512dq at least. So we will skip the test case if one processor
# is not meet the requirement.
@functools.lru_cache(maxsize=None)
def has_bf16_support():
import sys
if sys.platform != 'linux':
return False
with open("/proc/cpuinfo", encoding="ascii") as f:
lines = f.read()
return all(word in lines for word in ["avx512bw", "avx512vl", "avx512dq"])
types = [torch.float, torch.bfloat16]
# Comment the line below to find out the CI machines having MKL-DNN build disabled
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
def test_conversion(self):
for cpu_tensor in [torch.randn((1, 2, 3, 4),
dtype=torch.float, device=torch.device('cpu')),
torch.randn((1, 2, 3, 4, 5),
dtype=torch.float, device=torch.device('cpu'))[:, :, :, :, 1]]:
cpu_tensor.requires_grad_()
# float cpu tensor to mkldnn float tensor or bfloat tensor.
for dtype1 in types:
mkldnn_tensor = cpu_tensor.to_mkldnn(dtype1)
self.assertEqual(mkldnn_tensor.dtype, dtype1)
cpu_tensor_1 = mkldnn_tensor.to_dense()
# not given dtype for to_dense, mkldnn tensor has same dtype with cpu tensor
self.assertEqual(mkldnn_tensor.dtype, cpu_tensor_1.dtype)
# mkldnn float/bfloat tensor to cpu float or bfloat tensor
for dtype2 in types:
cpu_tensor_2 = mkldnn_tensor.to_dense(dtype2)
self.assertEqual(cpu_tensor_2.dtype, dtype2)
atol = 1e-5 if dtype1 == torch.float and dtype2 == torch.float else 1e-2
self.assertEqual(cpu_tensor, cpu_tensor_2.float(), atol=atol, rtol=0)
self.assertEqual(mkldnn_tensor.device, torch.device('cpu'))
self.assertEqual(mkldnn_tensor.size(), torch.Size([1, 2, 3, 4]))
self.assertEqual(mkldnn_tensor.numel(), cpu_tensor.numel())
if dtype1 == torch.float:
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor.element_size())
else:
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor.element_size() / 2)
self.assertRaisesRegex(RuntimeError,
"Cannot access data pointer of Tensor that doesn't have storage",
lambda: mkldnn_tensor.data_ptr() != 0)
# bfloat cpu tensor to mkldnn float tensor or bfloat tensor.
cpu_tensor_bf16 = cpu_tensor.bfloat16()
for dtype1 in types:
mkldnn_tensor = cpu_tensor_bf16.to_mkldnn(dtype1)
self.assertEqual(mkldnn_tensor.dtype, dtype1)
cpu_tensor_1 = mkldnn_tensor.to_dense()
# not given dtype for to_dense, mkldnn tensor has same dtype with cpu tensor
self.assertEqual(mkldnn_tensor.dtype, cpu_tensor_1.dtype)
# mkldnn float/bfloat tensor to cpu float or bfloat tensor
for dtype2 in types:
cpu_tensor_2 = mkldnn_tensor.to_dense(dtype2)
self.assertEqual(cpu_tensor_2.dtype, dtype2)
self.assertEqual(cpu_tensor_bf16, cpu_tensor_2.bfloat16(), atol=1e-5, rtol=0)
self.assertEqual(mkldnn_tensor.device, torch.device('cpu'))
self.assertEqual(mkldnn_tensor.size(), torch.Size([1, 2, 3, 4]))
self.assertEqual(mkldnn_tensor.numel(), cpu_tensor.numel())
if dtype1 == torch.bfloat16:
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor_bf16.element_size())
else:
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor_bf16.element_size() * 2)
self.assertRaisesRegex(RuntimeError,
"Cannot access data pointer of Tensor that doesn't have storage",
lambda: mkldnn_tensor.data_ptr() != 0)
def test_copy(self):
x = torch.randn(4, 5, dtype=torch.float32)
mkldnn_x = x.to_mkldnn()
mkldnn_y = torch.randn(4, 5, dtype=torch.float32).to_mkldnn()
mkldnn_z = torch.randn(4, 10, dtype=torch.float32).to_mkldnn()
mkldnn_y.copy_(mkldnn_x)
self.assertEqual(x, mkldnn_y.to_dense())
self.assertRaisesRegex(RuntimeError,
"copy_mkldnn_: only support same size tensor.",
lambda: mkldnn_z.copy_(mkldnn_x))
self.assertRaisesRegex(RuntimeError,
"copy_mkldnn_: between mkldnn layout and dense Tensors is not implemented! "
"Found self type = torch.FloatTensor and src type = Mkldnntorch.FloatTensor",
lambda: x.copy_(mkldnn_x))
self.assertRaisesRegex(RuntimeError,
"copy_mkldnn_: between mkldnn layout and dense Tensors is not implemented! "
"Found self type = Mkldnntorch.FloatTensor and src type = torch.FloatTensor",
lambda: mkldnn_x.copy_(x))
def test_unsupported(self):
# unsupported types and unsupported types with gpu
for dtype in [torch.double, torch.half, torch.uint8, torch.int8,
torch.short, torch.int, torch.long]:
with self.assertRaises(RuntimeError) as context:
torch.randn(1, 2, 3, 4, dtype=dtype, device=torch.device('cpu')).to_mkldnn()
if torch.cuda.is_available():
with self.assertRaises(RuntimeError) as context:
torch.randn(1, 2, 3, 4, dtype=dtype, device=torch.device('cuda')).to_mkldnn()
# supported type with gpu
if torch.cuda.is_available():
with self.assertRaises(RuntimeError) as context:
torch.randn(1, 2, 3, 4, dtype=torch.float, device=torch.device('cuda')).to_mkldnn()
# some factory functions
for creator in [torch.ones, torch.randn, torch.rand]:
with self.assertRaises(RuntimeError) as context:
creator(1, 2, 3, 4, dtype=torch.float, device=torch.device('cpu'), layout=torch._mkldnn)
def test_mkldnn_conv_shapecheck(self):
input = torch.full((1, 1, 1, 24,), 1, dtype=torch.float32)
w1 = torch.full((1, 1, 1, 24,), 1, dtype=torch.float32)
b1 = torch.full((1,), 1, dtype=torch.float32)
w2 = torch.full((1, 1, 2, 24,), 1, dtype=torch.float32)
b2 = torch.full((2,), 1, dtype=torch.float32)
options = zip([-1, 0, 0, 0, 0, 0, 0], # padding
[1, 0, 1, 1, 1, 1, 1], # stride
[1, 1, 0, 1, 1, 1, 1], # dilation
[1, 1, 1, 0, 2, 1, 1], # groups
[w1, w1, w1, w1, w1, w1, w2], # weight
[b1, b1, b1, b1, b1, b2, b1]) # bias
for pad, st, dil, gr, w, b in options:
with self.assertRaises(RuntimeError) as _:
torch.mkldnn_convolution(input, w, b, [pad] * 2, [st] * 2, [dil] * 2, gr)
def test_autograd_to_mkldnn(self):
# MKLDNN only supports float32
root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True)
def func(root):
return root.to_mkldnn().to_dense()
# because MKLDNN only supports float32, we need to lessen the precision.
# these numbers are just empirical results that seem to work.
self.assertWarnsRegex(UserWarning,
'double precision floating point',
lambda: gradcheck(func, [root], atol=4e-2, rtol=1e-2))
self.assertWarnsRegex(UserWarning,
'double precision floating point',
lambda: gradgradcheck(func, [root], atol=4e-2, rtol=1e-2))
def test_autograd_from_mkldnn(self):
# MKLDNN only supports float32
root = torch.randn(4, 5, dtype=torch.float32).to_mkldnn().requires_grad_()
def func(root):
return root.to_dense()
# because MKLDNN only supports float32, we need to lessen the precision.
# these numbers are just empirical results that seem to work.
self.assertWarnsRegex(UserWarning,
'double precision floating point',
lambda: gradcheck(func, [root], atol=4e-2, rtol=1e-2))
def test_detach(self):
root = torch.randn(4, 5, dtype=torch.float32).to_mkldnn().requires_grad_()
detach = root.detach()
self.assertEqual((4, 5), detach.size())
self.assertFalse(detach.requires_grad)
self.assertTrue(root.requires_grad)
detach_ = root.detach_()
self.assertEqual((4, 5), detach_.size())
self.assertFalse(detach_.requires_grad)
self.assertFalse(root.requires_grad)
def test_repr(self):
self.assertTrue("layout=torch._mkldnn" in str(torch.randn((1, 2, 3, 4),
dtype=torch.float, device=torch.device('cpu')).to_mkldnn()))
def _test_conv_base(self, dim):
conv_module = {1: torch.nn.Conv1d, 2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
input_shapes = {1: (224,), 2: (224, 224), 3: (55, 55, 55)}
options = itertools.product([True, False], [True, False], [1, 2], [1, 4])
for train, bias, dilation, groups in options:
N = torch.randint(3, 10, (1,)).item()
M = torch.randint(1, 3, (1,)).item() * groups
C = torch.randint(1, 3, (1,)).item() * groups
x_shape = (N, C) + input_shapes[dim]
x = torch.randn(x_shape, dtype=torch.float32)
conv = conv_module[dim](in_channels=C,
out_channels=M,
kernel_size=3,
stride=2,
padding=1,
dilation=dilation,
bias=bias,
groups=groups).float()
x1 = x.clone()
x2 = x.clone().to_mkldnn()
if not train:
mkldnn_conv = mkldnn_utils.to_mkldnn(copy.deepcopy(conv))
elif train and dim != 1:
# TODO: enable conv1d training.
x1.requires_grad_()
x2.requires_grad_()
mkldnn_conv = copy.deepcopy(conv)
with torch.backends.mkldnn.flags(enabled=False):
y_aten = conv(x1)
if train and dim != 1:
loss1 = y_aten.sum()
loss1.backward()
if not train or (train and dim != 1):
y_mkldnn = mkldnn_conv(x2).to_dense()
self.assertEqual(y_aten, y_mkldnn)
if not train:
self._test_serialization(mkldnn_conv, (x.to_mkldnn(),))
self._test_tracing(mkldnn_conv, (x.to_mkldnn(),))
elif dim != 1:
loss2 = y_mkldnn.sum()
loss2.backward()
self.assertTrue(x2.grad.is_mkldnn)
self.assertEqual(x1.grad, x2.grad.to_dense())
self.assertEqual(conv.weight.grad,
mkldnn_conv.weight.grad,
atol=1e-3,
rtol=1e-3)
if bias:
self.assertEqual(conv.bias.grad, mkldnn_conv.bias.grad)
def test_conv1d(self):
self._test_conv_base(dim=1)
def test_conv2d(self):
self._test_conv_base(dim=2)
def test_conv3d(self):
self._test_conv_base(dim=3)
@unittest.skipIf(IS_WINDOWS, "Limit support for bf16 path")
def _test_conv_bf16_base(self, dim):
conv_module = {1: torch.nn.Conv1d, 2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
input_shapes = {1: (224,), 2: (224, 224), 3: (55, 55, 55)}
options = itertools.product([True, False], [1, 2], [1, 4])
for bias, dilation, groups in options:
N = torch.randint(3, 10, (1,)).item()
M = torch.randint(1, 3, (1,)).item() * groups
C = torch.randint(1, 3, (1,)).item() * groups
x_shape = (N, C) + input_shapes[dim]
x = torch.randn(x_shape, dtype=torch.float32)
conv = conv_module[dim](in_channels=C,
out_channels=M,
kernel_size=3,
stride=2,
padding=1,
dilation=dilation,
bias=bias,
groups=groups).float()
x_bf16 = x.bfloat16()
if has_bf16_support():
mkldnn_conv = mkldnn_utils.to_mkldnn(copy.deepcopy(conv))
mkldnn_conv_bf16 = mkldnn_utils.to_mkldnn(copy.deepcopy(conv), torch.bfloat16)
y = mkldnn_conv(x.to_mkldnn()).to_dense()
y_bf16 = mkldnn_conv_bf16(x_bf16.to_mkldnn()).to_dense(torch.float32)
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = r"bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
with self.assertRaisesRegex(RuntimeError, msg):
mkldnn_conv_bf16 = mkldnn_utils.to_mkldnn(copy.deepcopy(conv), torch.bfloat16)
y_bf16 = mkldnn_conv_bf16(x_bf16.to_mkldnn()).to_dense(torch.float32)
def test_conv1d_bf16(self):
self._test_conv_bf16_base(dim=1)
def test_conv2d_bf16(self):
self._test_conv_bf16_base(dim=2)
def test_conv3d_bf16(self):
self._test_conv_bf16_base(dim=3)
def _test_conv2d_nhwc_base(self, dtype):
conv_module = torch.nn.Conv2d
input_shapes = (224, 224)
options = itertools.product([True, False], [True, False], [1, 2], [1, 4])
for train, bias, dilation, groups in options:
N = torch.randint(3, 10, (1,)).item()
M = torch.randint(1, 3, (1,)).item() * groups
C = torch.randint(1, 3, (1,)).item() * groups
x_shape = (N, C) + input_shapes
x = torch.randn(x_shape, dtype=dtype)
# conv1: mkldnn conv2d in contiguous memory format (nchw)
# conv2: mkldnn conv2d in channels last memory format (nhwc)
conv1 = conv_module(in_channels=C,
out_channels=M,
kernel_size=3,
stride=2,
padding=1,
dilation=dilation,
bias=bias,
groups=groups).to(dtype=dtype)
conv2 = copy.deepcopy(conv1).to(memory_format=torch.channels_last)
x1 = x.clone()
x2 = x.clone().to(memory_format=torch.channels_last)
if train:
x1.requires_grad_()
x2.requires_grad_()
y1 = conv1(x1)
y2 = conv2(x2)
self.assertEqual(y1, y2)
if train:
y1.sum().backward()
y2.sum().backward()
self.assertTrue(x2.grad.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(conv1.weight.grad,
conv2.weight.grad,
atol=1e-3,
rtol=1e-3)
if bias:
self.assertEqual(conv1.bias.grad, conv2.bias.grad)
self.assertEqual(x1.grad, x2.grad)
def test_conv2d_nhwc(self):
self._test_conv2d_nhwc_base(dtype=torch.float32)
@unittest.skipIf(IS_WINDOWS, "Limit support for bf16 path")
def test_conv2d_nhwc_bf16(self):
# when has_bf16_support() returns false, bf16 CPU conv will fall back to thnn impl
if has_bf16_support():
self._test_conv2d_nhwc_base(dtype=torch.bfloat16)
def test_conv2d_legacy_jit_model(self):
"""
MKLDNN integration used to serialize models with 5d weight for grouped
convolutions, we'd like to preserve this behavior
"""
g = 4
conv2d = torch.nn.Conv2d(16, 16, 3, groups=g)
conv2d_mkldnn = torch.utils.mkldnn.to_mkldnn(conv2d)
# contrive legacy conv2d module with a 5-d weight
o, i, h, w = conv2d.weight.shape
weight_5d = conv2d.weight.reshape((g, o // g, i, h, w))
conv2d_mkldnn.weight = weight_5d.to_mkldnn()
x = torch.randn(1, 16, 8, 8)
with TemporaryFileName() as fname:
torch.jit.save(conv2d_mkldnn, fname)
conv2d_loaded = torch.jit.load(fname)
self.assertEqual(conv2d_mkldnn.weight.ndimension(), 5)
self.assertEqual(conv2d_loaded.weight.ndimension(), 4)
self.assertEqual(
conv2d(x),
conv2d_loaded(x.to_mkldnn()).to_dense())
# This test is to check whether 1D conv is supported for mkldnn tensor,
# which is exposed by Issue https://github.com/pytorch/pytorch/issues/68034.
def test_conv1d_functional(self):
input = torch.randn(2, 3, 10).to_mkldnn()
weight = torch.randn(3, 3, 3).to_mkldnn()
bias = torch.randn(3).to_mkldnn()
output = torch.nn.functional.conv1d(input, weight, bias)
self.assertEqual(output.size(), torch.Size([2, 3, 8]))
def test_relu(self):
x = torch.randn((4, 5), dtype=torch.float32) * 10
x1 = x.clone().requires_grad_()
x2 = x.clone().to_mkldnn().requires_grad_()
y1 = torch.relu(x1)
y2 = torch.relu(x2).to_dense()
loss1 = y1.sum()
loss2 = y2.sum()
loss1.backward()
loss2.backward()
self.assertEqual(y1, y2)
self.assertEqual(x1.grad, x2.grad.to_dense())
def test_relu_(self):
x = torch.randn((4, 5), dtype=torch.float32) * 10
x1 = x.clone().requires_grad_()
x2 = x.clone().to_mkldnn().requires_grad_()
y1 = torch.relu_(x1.clone())
y2 = torch.relu_(x2.clone()).to_dense()
loss1 = y1.sum()
loss2 = y2.sum()
loss1.backward()
loss2.backward()
self.assertEqual(y1, y2)
self.assertEqual(x1.grad, x2.grad.to_dense())
@unittest.skipIf(IS_WINDOWS, "Limit support for bf16 path")
def _test_relu_bf16_base(self, name):
x = torch.randn((4, 5), dtype=torch.float32) * 10
x_bf16 = x.bfloat16()
fn = getattr(torch, name)
if has_bf16_support():
y = fn(x.to_mkldnn()).to_dense()
y_bf16 = fn(x_bf16.to_mkldnn()).to_dense(torch.float32)
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = r"bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: fn(x_bf16.to_mkldnn()))
def test_relu_bf16(self):
self._test_relu_bf16_base("relu")
def test_relu_inplace_bf16(self):
self._test_relu_bf16_base("relu_")
def test_gelu(self):
m = torch.nn.GELU()
x = torch.randn((4, 5), dtype=torch.float32) * 10
x1 = x.clone().requires_grad_()
x2 = x.clone().to_mkldnn().requires_grad_()
y1 = m(x1)
y2 = m(x2).to_dense()
loss1 = y1.sum()
loss2 = y2.sum()
loss1.backward()
loss2.backward()
self.assertEqual(y1, y2)
self.assertEqual(x1.grad, x2.grad.to_dense())
@unittest.skipIf(IS_WINDOWS, "Limit support for bf16 path")
def test_gelu_bf16(self):
m = torch.nn.GELU()
x = torch.randn((4, 5), dtype=torch.float32) * 10
x1 = x.clone().to_mkldnn().requires_grad_()
x2 = x.clone().to_mkldnn(torch.bfloat16).requires_grad_()
if has_bf16_support():
y1 = m(x1).to_dense()
y2 = m(x2).to_dense()
loss1 = y1.sum()
loss2 = y2.sum()
loss1.backward()
loss2.backward()
self.assertEqual(y1, y2.to(torch.float32), atol=1e-1, rtol=0)
self.assertEqual(x1.grad.to_dense(), x2.grad.to_dense(torch.float32), atol=1e-2, rtol=0)
else:
msg = r"bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: m(x2))
def _test_prelu_base(self, size, num_channels):
x = torch.randn(size, dtype=torch.float32)
x1 = x.clone().requires_grad_()
x2 = x.clone().to_mkldnn().requires_grad_()
x3 = x.clone().to_mkldnn().requires_grad_()
m1 = torch.nn.PReLU(num_channels)
m2 = mkldnn_utils.to_mkldnn(copy.deepcopy(m1))
m3 = copy.deepcopy(m1)
y1 = m1(x1)
y2 = m2(x2).to_dense()
y3 = m3(x3).to_dense() # Only convert data to mkldnn, weight is Aten tensor
loss1 = y1.sum()
loss1.backward()
loss2 = y2.sum()
loss2.backward()
loss3 = y3.sum()
loss3.backward()
self.assertEqual(y1, y2)
self.assertEqual(y1, y3)
self.assertEqual(x1.grad, x2.grad.to_dense())
self.assertEqual(x1.grad, x3.grad.to_dense())
def test_prelu(self):
self._test_prelu_base(torch.Size([16]), 1)
self._test_prelu_base(torch.Size([16, 64]), 1)
self._test_prelu_base(torch.Size([16, 64]), 64)
self._test_prelu_base(torch.Size([16, 64, 112]), 1)
self._test_prelu_base(torch.Size([16, 64, 112]), 64)
self._test_prelu_base(torch.Size([16, 64, 112, 112]), 1)
self._test_prelu_base(torch.Size([16, 64, 112, 112]), 64)
self._test_prelu_base(torch.Size([16, 64, 112, 112, 1]), 1)
self._test_prelu_base(torch.Size([16, 64, 112, 112, 1]), 64)
@unittest.skipIf(IS_WINDOWS, "Limit support for bf16 path")
def _test_prelu_bf16_base(self, size, num_channels):
if has_bf16_support():
x = torch.randn(size, dtype=torch.float32)
x_fp32 = x.clone().to_mkldnn().requires_grad_()
x_bf16 = x.clone().to_mkldnn(torch.bfloat16).requires_grad_()
m = mkldnn_utils.to_mkldnn(torch.nn.PReLU())
m_bf16 = mkldnn_utils.to_mkldnn(torch.nn.PReLU(), torch.bfloat16)
y = m(x_fp32).to_dense()
y_bf16 = m_bf16(x_bf16).to_dense()
self.assertEqual(y, y_bf16.to(torch.float32), atol=1e-1, rtol=1e-3)
loss = y.sum()
loss.backward()
loss_bf16 = y_bf16.sum()
loss_bf16.backward()
self.assertEqual(x_fp32.grad.to_dense(), x_bf16.grad.to_dense(torch.float32))
else:
x_bf16 = torch.randn(size, dtype=torch.bfloat16).requires_grad_()
m_bf16 = mkldnn_utils.to_mkldnn(torch.nn.PReLU(), torch.bfloat16)
msg = r"bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: m_bf16(x_bf16))
def test_prelu_bf16(self):
self._test_prelu_bf16_base(torch.Size([16]), 1)
self._test_prelu_bf16_base(torch.Size([16, 64]), 1)
self._test_prelu_bf16_base(torch.Size([16, 64]), 64)
self._test_prelu_bf16_base(torch.Size([16, 64, 112]), 1)
self._test_prelu_bf16_base(torch.Size([16, 64, 112]), 64)
self._test_prelu_bf16_base(torch.Size([16, 64, 112, 112, 1]), 1)
self._test_prelu_bf16_base(torch.Size([16, 64, 112, 112, 1]), 64)
def _test_max_pool_base(self, dim, input):
pool_module = {2: torch.nn.MaxPool2d, 3: torch.nn.MaxPool3d}
for stride in [1, 2, 3]:
for ceil_mode in [False, True]:
max_pool = pool_module[dim](
kernel_size=3 if not ceil_mode else 7,
stride=stride,
padding=1,
ceil_mode=ceil_mode)
x1 = input.clone().requires_grad_()
x2 = input.clone().to_mkldnn().requires_grad_()
y1 = max_pool(x1)
y2 = max_pool(x2).to_dense()
loss1 = y1.sum()
loss2 = y2.sum()
loss1.backward()
loss2.backward()
self.assertEqual(y1, y2)
self.assertEqual(x1.grad, x2.grad.to_dense())
def test_max_pool2d(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
for H, W in [(64, 64), (35, 39), (16, 19), [7, 8]]:
x = torch.randn(N, C, H, W, dtype=torch.float32) * 10
self._test_max_pool_base(dim=2, input=x)
def test_max_pool3d(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
for D, H, W in [(64, 64, 64), (35, 39, 35), (16, 19, 20), [7, 8, 9]]:
x = torch.randn(N, C, D, H, W, dtype=torch.float32) * 10
self._test_max_pool_base(dim=3, input=x)
@unittest.skipIf(IS_WINDOWS, "Limit support for bf16 path")
def _test_max_pool_bf16_base(self, dim, input):
pool_module = {2: torch.nn.MaxPool2d, 3: torch.nn.MaxPool3d}
x_bf16 = input.bfloat16()
for stride in [1, 2, 3]:
for ceil_mode in [False, True]:
max_pool = pool_module[dim](
kernel_size=3 if not ceil_mode else 7,
stride=stride,
padding=1,
ceil_mode=ceil_mode)
if has_bf16_support():
y = max_pool(input.to_mkldnn()).to_dense()
y_bf16 = max_pool(x_bf16.to_mkldnn()).to_dense(torch.float32)
self.assertEqual(y, y_bf16, atol=0.1, rtol=1e-3)
else:
msg = "mkldnn_max_pool%dd: bf16 path needs the cpu support avx512bw, avx512vl and avx512dq" % dim
self.assertRaisesRegex(RuntimeError,
msg,
lambda: max_pool(x_bf16.to_mkldnn()))
def test_max_pool2d_bf16(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
for H, W in [(64, 64), (35, 39), (16, 19), [7, 8]]:
x = torch.randn(N, C, H, W, dtype=torch.float32) * 10
self._test_max_pool_bf16_base(dim=2, input=x)
def test_max_pool3d_bf16(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
for D, H, W in [(64, 64, 64), (35, 39, 35), (16, 19, 20), [7, 8, 9]]:
x = torch.randn(N, C, D, H, W, dtype=torch.float32) * 10
self._test_max_pool_bf16_base(dim=3, input=x)
def test_max_pool2d_stride_none(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
for H, W in [(64, 64), (35, 39), (16, 19), [7, 8]]:
x = torch.randn(N, C, H, W, dtype=torch.float32) * 10
for ceil_mode in [False, True]:
y1 = F.max_pool2d(
x,
kernel_size=3 if not ceil_mode else 7,
stride=None,
padding=1,
ceil_mode=ceil_mode)
y2 = F.max_pool2d(
x.to_mkldnn(),
kernel_size=3 if not ceil_mode else 7,
stride=None,
padding=1,
ceil_mode=ceil_mode)
self.assertEqual(y1, y2.to_dense())
def test_max_pool_unsupported(self):
# OneDNN not support dilation max_pooling, will be avilabled in v2.0.
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
# 2d dilation case
x = torch.randn(N, C, 7, 7, dtype=torch.float32).to_mkldnn()
max_pool2d = torch.nn.MaxPool2d(
kernel_size=3,
stride=3,
padding=1,
dilation=2)
self.assertRaisesRegex(RuntimeError,
'mkldnn_max_pool2d does not support dilation case',
lambda: max_pool2d(x))
# 3d dilation case
x = torch.randn(N, C, 7, 7, 7, dtype=torch.float32).to_mkldnn()
max_pool3d = torch.nn.MaxPool3d(
kernel_size=3,
stride=3,
padding=1,
dilation=2)
self.assertRaisesRegex(RuntimeError,
'mkldnn_max_pool3d does not support dilation case',
lambda: max_pool3d(x))
def _test_avg_pool_base(self, dim, input):
avg_module = {2: torch.nn.AvgPool2d, 3: torch.nn.AvgPool3d}
for count_include_pad in [True, False]:
avg_pool = avg_module[dim](
kernel_size=3,
stride=2,
padding=1,
count_include_pad=count_include_pad)
x1 = input.clone().requires_grad_()
x2 = input.clone().to_mkldnn().requires_grad_()
y1 = avg_pool(x1)
y2 = avg_pool(x2).to_dense()
loss1 = y1.sum()
loss2 = y2.sum()
loss1.backward()
loss2.backward()
self.assertEqual(y1, y2)
self.assertEqual(x1.grad, x2.grad.to_dense())
def test_avg_pool2d(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
x = torch.randn(N, C, 64, 64, dtype=torch.float32) * 10
self._test_avg_pool_base(dim=2, input=x)
def test_avg_pool3d(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
x = torch.randn(N, C, 64, 64, 64, dtype=torch.float32) * 10
self._test_avg_pool_base(dim=3, input=x)
@unittest.skipIf(IS_WINDOWS, "Limit support for bf16 path")
def _test_avg_pool_bf16_base(self, dim, input):
avg_module = {2: torch.nn.AvgPool2d, 3: torch.nn.AvgPool3d}
x_bf16 = input.bfloat16()
for count_include_pad in [True, False]:
avg_pool = avg_module[dim](
kernel_size=3,
stride=2,
padding=1,
count_include_pad=count_include_pad)
if has_bf16_support():
y = avg_pool(input.to_mkldnn()).to_dense()
y_bf16 = avg_pool(x_bf16.to_mkldnn()).to_dense(torch.float)
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = "mkldnn_avg_pool%dd: bf16 path needs the cpu support avx512bw, avx512vl and avx512dq" % dim
self.assertRaisesRegex(RuntimeError,
msg,
lambda: avg_pool(x_bf16.to_mkldnn()))
def test_avg_pool2d_bf16(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
x = torch.randn(N, C, 64, 64, dtype=torch.float32) * 10
self._test_avg_pool_bf16_base(dim=2, input=x)
def test_avg_pool3d_bf16(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
x = torch.randn(N, C, 64, 64, 64, dtype=torch.float32) * 10
self._test_avg_pool_bf16_base(dim=3, input=x)
def test_avg_pool2d_stride_none(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
x = torch.randn(N, C, 64, 64, dtype=torch.float32) * 10
for count_include_pad in [True, False]:
y1 = F.avg_pool2d(
x,
kernel_size=3,
stride=None,
padding=1,
count_include_pad=count_include_pad)
y2 = F.avg_pool2d(
x.to_mkldnn(),
kernel_size=3,
stride=None,
padding=1,
count_include_pad=count_include_pad)
self.assertEqual(y1, y2.to_dense())
def test_adaptive_avg_pool2d(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
x = torch.randn(N, C, 224, 224, dtype=torch.float32) * 100
adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d(7)
x1 = x.clone().requires_grad_()
x2 = x.clone().to_mkldnn().requires_grad_()
y1 = adaptive_avg_pool2d(x1)
y2 = adaptive_avg_pool2d(x2).to_dense()
loss1 = y1.sum()
loss2 = y2.sum()
loss1.backward()
loss2.backward()
self.assertEqual(y1, y2)
self.assertEqual(x1.grad, x2.grad.to_dense())
@unittest.skipIf(IS_WINDOWS, "Limit support for bf16 path")
def test_adaptive_avg_pool2d_bf16(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
x = torch.randn(N, C, 224, 224, dtype=torch.float32) * 100
x_bf16 = x.bfloat16()
adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d(7)
if has_bf16_support():
y = adaptive_avg_pool2d(x.to_mkldnn()).to_dense()
y_bf16 = adaptive_avg_pool2d(x.to_mkldnn()).to_dense(torch.float32)
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = "mkldnn_adaptive_avg_pool2d: bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: adaptive_avg_pool2d(x_bf16.to_mkldnn()))
def _test_batch_norm_base(self, dim, channels, input):
bn_module = {2 : torch.nn.BatchNorm2d, 3 : torch.nn.BatchNorm3d}
bn = bn_module[dim](channels).float().train(False)
mkldnn_bn = mkldnn_utils.to_mkldnn(copy.deepcopy(bn))
self.assertEqual(
bn(input),
mkldnn_bn(input.to_mkldnn()).to_dense())
self._test_serialization(mkldnn_bn, (input.to_mkldnn(),))
self._test_tracing(mkldnn_bn, (input.to_mkldnn(),))
def _test_batch_norm_train_base(self, dim, channels, input):
# TODO: support 3d batchnorm training.
bn_module = {2 : torch.nn.BatchNorm2d}
# TODO: support none affine.
options = itertools.product([True], [True, False])
for affine, track_running_stats in options:
bn = bn_module[dim](
num_features=channels,
affine=affine,
track_running_stats=track_running_stats).float().train(True)
mkldnn_bn = copy.deepcopy(bn)
x1 = input.clone().requires_grad_()
x2 = input.clone().to_mkldnn().requires_grad_()
y1 = bn(x1)
y2 = mkldnn_bn(x2).to_dense()
loss1 = y1.sum()
loss2 = y2.sum()
loss1.backward()
loss2.backward()
self.assertEqual(y1, y2)
self.assertEqual(x1.grad, x2.grad.to_dense())
self.assertEqual(bn.weight.grad, mkldnn_bn.weight.grad, rtol=1e-3, atol=1e-3)
if track_running_stats:
self.assertEqual(bn.running_mean, mkldnn_bn.running_mean)
self.assertEqual(bn.running_var, mkldnn_bn.running_var, rtol=1e-5, atol=1e-5)
def test_batch_norm_2d(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 100, (1,)).item()
x = torch.randn(N, C, 35, 45, dtype=torch.float32) * 10
self._test_batch_norm_base(dim=2, channels=C, input=x)
self._test_batch_norm_train_base(dim=2, channels=C, input=x)
def test_batch_norm_3d(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 100, (1,)).item()
x = torch.randn(N, C, 30, 30, 30, dtype=torch.float32) * 10
self._test_batch_norm_base(dim=3, channels=C, input=x)
@unittest.skipIf(IS_WINDOWS, "Limit support for bf16 path")
def _test_batch_norm_bf16_base(self, dim, channels, input):
bn_module = {2 : torch.nn.BatchNorm2d, 3 : torch.nn.BatchNorm3d}
x_bf16 = input.bfloat16()
# TODO: support training
for train in [False]:
bn = bn_module[dim](channels).float().train(train)
mkldnn_bn = mkldnn_utils.to_mkldnn(copy.deepcopy(bn))
if has_bf16_support():
y = bn(input.to_mkldnn().to_dense())
y_bf16 = bn(input.to_mkldnn().to_dense(torch.float))
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = "mkldnn_batch_norm: bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: bn(x_bf16.to_mkldnn()))
def test_batch_norm_2d_bf16(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 100, (1,)).item()
x = torch.randn(N, C, 35, 45, dtype=torch.float32) * 10
self._test_batch_norm_bf16_base(dim=2, channels=C, input=x)
def test_batch_norm_3d_bf16(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 100, (1,)).item()
x = torch.randn(N, C, 30, 30, 30, dtype=torch.float32) * 10
self._test_batch_norm_bf16_base(dim=3, channels=C, input=x)
def test_add(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 100, (1,)).item()
alpha = torch.randn(1, dtype=torch.float32).item()
x = torch.randn(N, C, 35, 45, dtype=torch.float32) * 10
y = torch.randn(N, C, 35, 45, dtype=torch.float32) * 10
mx = x.to_mkldnn()
my = y.to_mkldnn()
# add
self.assertEqual(
x + y,
(mx + my).to_dense())
self.assertEqual(
torch.add(x, y, alpha=alpha),
torch.add(mx, my, alpha=alpha).to_dense())
# add_
x += y
mx += my
self.assertEqual(x, mx.to_dense())
# add_out
out = x.clone()
mkldnn_out = out.to_mkldnn()
torch.add(x, y, alpha=alpha, out=out)
torch.add(mx, my, alpha=alpha, out=mkldnn_out)
self.assertEqual(out, mkldnn_out.to_dense())
# add_out inplace case: first input
torch.add(x, y, alpha=alpha, out=x)
torch.add(mx, my, alpha=alpha, out=mx)
self.assertEqual(x, mx.to_dense())
# add_out inplace case: second input
torch.add(x, y, alpha=alpha, out=y)
torch.add(mx, my, alpha=alpha, out=my)
self.assertEqual(y, my.to_dense())
def test_mul(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 100, (1,)).item()
value = torch.randn(1, dtype=torch.float32).item()
x = torch.randn(N, C, 35, 45, dtype=torch.float32) * 10
y = torch.randn(N, C, 35, 45, dtype=torch.float32) * 10
mx = x.to_mkldnn()
my = y.to_mkldnn()
# mul
self.assertEqual(
x * y,
(mx * my).to_dense())
self.assertEqual(
x * value,
(mx * value).to_dense())
self.assertEqual(
torch.mul(x, y),
torch.mul(mx, my).to_dense())
self.assertEqual(
torch.mul(x, value),
torch.mul(mx, value).to_dense())
# mul_
x *= y
mx *= my
self.assertEqual(x, mx.to_dense())
x *= value
mx *= value
self.assertEqual(x, mx.to_dense())
# mul_out
out = x.clone()
mkldnn_out = out.to_mkldnn()
torch.mul(x, y, out=out)
torch.mul(mx, my, out=mkldnn_out)
self.assertEqual(out, mkldnn_out.to_dense())
out = x.clone()
mkldnn_out = out.to_mkldnn()
torch.mul(x, value, out=out)
torch.mul(mx, value, out=mkldnn_out)
self.assertEqual(out, mkldnn_out.to_dense())
def test_0_dimension_tensor(self):
x = torch.rand([20, 20, 1, 1], dtype=torch.float)
y = torch.rand([20, 20, 0, 1], dtype=torch.float)
# unary ops work without modification
out_relu = torch.relu(y)
out_relu_mkldnn = torch.relu(y.to_mkldnn()).to_dense()
self.assertEqual(out_relu, out_relu_mkldnn)
out_mul = x * y
out_mul_mkldnn = (x.to_mkldnn() * y.to_mkldnn()).to_dense()
self.assertEqual(out_mul, out_mul_mkldnn)
out_add = x + y
out_add_mkldnn = (x.to_mkldnn() + y.to_mkldnn()).to_dense()
self.assertEqual(out_add, out_add_mkldnn)
x.requires_grad_(True)
y.requires_grad_(True)
with self.assertRaisesRegex(RuntimeError, "0-dimension Tensor in training"):
x.to_mkldnn() + y.to_mkldnn()
with self.assertRaisesRegex(RuntimeError, "must match"):
torch.rand([5]).to_mkldnn() + torch.rand([0]).to_mkldnn()
C = 7
m = torch.nn.Conv2d(C, C, 3)
x = torch.randn(0, C, C, 8, dtype=torch.float)
out_eager = m(x)
out_mkldnn = mkldnn_utils.to_mkldnn(m)(x)
self.assertEqual(out_eager, out_mkldnn)
def test_view(self):
x = torch.randn(3, 4, 5, dtype=torch.float32).to_mkldnn()
self.assertRaisesRegex(RuntimeError,
"Change to use reshape",
lambda: x.view(x.size(0), -1))
def test_reshape(self):
x = torch.randn(3, 4, 5, dtype=torch.float32) * 10
size = (x.size(0), -1)
self.assertEqual(
x.reshape(size),
x.to_mkldnn().reshape(size).to_dense(),
)
# test whether share same memory for plain format tensor
y = x.to_mkldnn()
z = y.reshape(size).add_(y.reshape(size))
self.assertEqual(
y.reshape(size).to_dense(),
z.to_dense(),
)
def test_reshape_blocked_format(self):
# construct an mkldnn blocked tensor with mkldnn conv2d
C = 7
m = mkldnn_utils.to_mkldnn(torch.nn.Conv2d(C, C, 3))
x = torch.randn(1, C, 8, 8).to_mkldnn()
# mkldnn tensor w/ blocked format
y_block = m(x)
# aten tensor w/ plain format
y_plain = y_block.to_dense()
y_block_reshape = y_block.reshape(C, -1)
y_plain_reshape = y_plain.reshape(C, -1)
self.assertEqual(y_plain_reshape, y_block_reshape.to_dense())
def test_reshape_backward(self):
x = torch.randn(3, 4, 5, dtype=torch.float32) * 10
size = (x.size(0), -1)
x1 = x.clone().requires_grad_()
x2 = x.clone().to_mkldnn().requires_grad_()
in_features = 20
out_features = torch.randint(3, 100, (1,)).item()
linear = torch.nn.Linear(in_features, out_features).float()
y1 = linear(x1.reshape(size)).sum()
y2 = linear(x2.reshape(size).to_dense()).sum()
y1.backward()
y2.backward()
self.assertEqual(x1.grad, x2.grad.to_dense())
def test_clone(self):
x = torch.randn(4, 5, dtype=torch.float32) * 10
self.assertEqual(
x.clone(),
x.to_mkldnn().clone().to_dense(),
)
# test whether share same memory
y = x.to_mkldnn()
z = y.clone().add_(y)
self.assertNotEqual(
y.to_dense(),
z.to_dense(),
)
def test_transpose(self):
x = torch.randn(3, 4, 5, dtype=torch.float32) * 10
for dim1 in range(x.ndim):
for dim2 in range(x.ndim):
self.assertEqual(
x.transpose(dim1, dim2),
x.to_mkldnn().transpose(dim1, dim2).to_dense(),
)
def test_linear_non_contiguous_weight(self):
in_features = torch.randint(3, 10, (1,)).item()
out_features = torch.randint(3, 100, (1,)).item()
x = torch.randn(3, in_features, dtype=torch.float32) * 10
w = torch.randn(in_features, out_features, dtype=torch.float32)
for bias in [True, False]:
x1 = x.clone().requires_grad_()
x2 = x.clone().to_mkldnn().requires_grad_()
linear = torch.nn.Linear(in_features, out_features).float()
linear.weight = torch.nn.Parameter(w.t())
mkldnn_linear = copy.deepcopy(linear)
y1 = linear(x1).sum()
y2 = mkldnn_linear(x2).to_dense().sum()
y1.backward()
y2.backward()
self.assertEqual(x1.grad, x2.grad.to_dense())
self.assertEqual(linear.weight.grad, mkldnn_linear.weight.grad)
if bias:
self.assertEqual(linear.bias.grad, mkldnn_linear.bias.grad)
def test_linear(self):
in_features = torch.randint(3, 10, (1,)).item()
out_features = torch.randint(3, 100, (1,)).item()
x = torch.randn(3, in_features, dtype=torch.float32) * 10
for bias in [True, False]:
linear = torch.nn.Linear(in_features, out_features, bias=bias).float()
mkldnn_linear = mkldnn_utils.to_mkldnn(copy.deepcopy(linear))
self.assertEqual(
linear(x),
mkldnn_linear(x.to_mkldnn()).to_dense())
self._test_serialization(mkldnn_linear, (x.to_mkldnn(),))
self._test_tracing(mkldnn_linear, (x.to_mkldnn(),))
def test_linear_backward(self):
in_features = torch.randint(3, 10, (1,)).item()
out_features = torch.randint(3, 100, (1,)).item()
x = torch.randn(3, in_features, dtype=torch.float32) * 10
for bias in [True, False]:
x1 = x.clone().requires_grad_()
x2 = x.clone().to_mkldnn().requires_grad_()
linear = torch.nn.Linear(in_features, out_features).float()
mkldnn_linear = copy.deepcopy(linear)
y1 = linear(x1).sum()
y2 = mkldnn_linear(x2).to_dense().sum()
y1.backward()
y2.backward()
self.assertEqual(x1.grad, x2.grad.to_dense())
self.assertEqual(linear.weight.grad, mkldnn_linear.weight.grad)
if bias:
self.assertEqual(linear.bias.grad, mkldnn_linear.bias.grad)
@unittest.skipIf(IS_WINDOWS, "Limit support for bf16 path")
def test_linear_bf16(self):
in_features = torch.randint(3, 10, (1,)).item()
out_features = torch.randint(3, 100, (1,)).item()
x = torch.randn(3, in_features, dtype=torch.float32) * 10
x_bf16 = x.bfloat16()
for bias in [True, False]:
linear = torch.nn.Linear(in_features, out_features, bias=bias).float()
mkldnn_linear = mkldnn_utils.to_mkldnn(copy.deepcopy(linear))
mkldnn_linear_bf16 = mkldnn_utils.to_mkldnn(copy.deepcopy(linear), torch.bfloat16)
if has_bf16_support():
y = mkldnn_linear(x.to_mkldnn()).to_dense()
y_bf16 = mkldnn_linear_bf16(x_bf16.to_mkldnn()).to_dense(torch.float32)
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = "mkldnn_linear: bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: mkldnn_linear_bf16(x_bf16.to_mkldnn()))
def test_softmax(self):
x = torch.randn(3, 4, 5, dtype=torch.float32) * 10
for dim in range(x.ndim):
softmax = torch.nn.Softmax(dim=dim)
self.assertEqual(
softmax(x),
softmax(x.to_mkldnn()).to_dense())
def test_sigmoid(self):
x = torch.randn(4, 5, dtype=torch.float32) * 10
mkldnn_x = x.to_mkldnn()
self.assertEqual(
torch.sigmoid(x),
torch.sigmoid(mkldnn_x).to_dense(),
)
# inplace
torch.sigmoid_(x)
torch.sigmoid_(mkldnn_x)
self.assertEqual(x, mkldnn_x.to_dense())
def test_tanh(self):
x = torch.randn(4, 5, dtype=torch.float32) * 10
mkldnn_x = x.to_mkldnn()
self.assertEqual(
torch.tanh(x),
torch.tanh(mkldnn_x).to_dense(),
)
# inplace
torch.tanh_(x)
torch.tanh_(mkldnn_x)
self.assertEqual(x, mkldnn_x.to_dense())
def _test_serialization(self, module, inputs):
with TemporaryFileName() as fname:
torch.jit.save(module, fname)
loaded = torch.jit.load(fname)
self.assertEqual(
module(*inputs).to_dense(),
loaded(*inputs).to_dense())
def _test_tracing(self, module, inputs):
traced = torch.jit.trace(module, inputs)
self.assertEqual(
module(*inputs).to_dense(),
traced(*inputs).to_dense())
def test_set_data_tensorimpl_type(self):
# Dense tensor has impl of type `TensorImpl`, while MKL-DNN tensor has impl
# of type `OpaqueTensorImpl<IDeepTensorWrapperPtr>`.
x = torch.randn((1, 2), dtype=torch.float, device=torch.device('cpu'))
x_mkldnn = x.to_mkldnn()
with self.assertRaisesRegex(RuntimeError, 'incompatible tensor type'):
x.data = x_mkldnn
def test_empty(self):
x1 = torch.empty(4, 5, 2, 3, dtype=torch.float32)
x2 = torch.empty(4, 5, 2, 3, dtype=torch.float32, layout=torch._mkldnn)
self.assertEqual(x1.size(), x2.to_dense().size())
self.assertEqual(x1.dtype, x2.to_dense().dtype)
def test_zero_(self):
x1 = torch.randn(4, 5, dtype=torch.float32) * 10
x2 = x1.clone().to_mkldnn()
self.assertEqual(
x1.zero_(),
x2.zero_().to_dense(),
)
def test_is_mkldnn(self):
x = torch.randn(1, dtype=torch.float32)
self.assertFalse(x.is_mkldnn)
self.assertTrue(x.to_mkldnn().is_mkldnn)
# legacy constructor/new doesn't support mkldnn tensors
def test_legacy_new_failure(self):
x = torch.randn(1, dtype=torch.float32)
x_mkldnn = x.to_mkldnn()
self.assertRaises(RuntimeError, lambda: x_mkldnn.new(device='cpu'))
self.assertRaises(RuntimeError, lambda: x_mkldnn.new(x.storage()))
self.assertRaises(RuntimeError, lambda: x_mkldnn.new(x))
self.assertRaises(RuntimeError, lambda: x_mkldnn.new(torch.Size([2, 3])))
self.assertRaises(RuntimeError, lambda: x_mkldnn.new([6]))
def test_is_mkldnn_jit(self):
class EnsureMkldnn(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
if not x.is_mkldnn:
x = x.to_mkldnn()
return x
m = EnsureMkldnn()
x = torch.randn(1, dtype=torch.float32)
self.assertTrue(m(x).is_mkldnn)
self.assertTrue(m(x.to_mkldnn()).is_mkldnn)
def _test_imagenet_model(self, model):
model = model.train(False).float()
mkldnn_model = mkldnn_utils.to_mkldnn(copy.deepcopy(model))
x = torch.randn(1, 3, 224, 224, dtype=torch.float32)
with torch.no_grad():
self.assertEqual(
model(x),
mkldnn_model(x.to_mkldnn()).to_dense(),
)
@skipIfNoTorchVision
def test_resnet18(self):
model = torchvision.models.resnet.resnet18(pretrained=False)
self._test_imagenet_model(model)
@skipIfNoTorchVision
def test_resnext50_32x4d(self):
model = torchvision.models.resnet.resnext50_32x4d(pretrained=False)
self._test_imagenet_model(model)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_mkldnn.py |
# Owner(s): ["module: unknown"]
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import textwrap
import unittest
from typing import List
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
import torch.cuda
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_SANDCASTLE, IS_WINDOWS
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
class RandomDatasetMock(torch.utils.data.Dataset):
def __getitem__(self, index):
return torch.tensor([torch.rand(1).item(), random.uniform(0, 1)])
def __len__(self):
return 1000
class TestCheckpoint(TestCase):
# This runs checkpoint_sequential on each of the nets in
# module_lists_to_compare, and compares them against the uncheckpointed model.
# To compare, it checks outputs as well as input gradients and parameter gradients
def _check_checkpoint_sequential(
self,
model,
module_lists_to_compare,
num_chunks,
input,
):
# not checkpointed
out = model(input)
out_not_checkpointed = out.detach().clone()
model.zero_grad()
out.sum().backward()
grad_not_checkpointed = {
name: param.grad.detach().clone()
for name, param in model.named_parameters()
}
input_grad_not_checkpointed = input.grad.detach().clone()
for model_to_compare in module_lists_to_compare:
# checkpointed model by passing list of modules
detached = input.detach()
detached.requires_grad = True
# pass list of modules to checkpoint
out = checkpoint_sequential(model_to_compare, num_chunks, detached)
out_checkpointed = out.detach().clone()
model.zero_grad()
out.sum().backward()
grad_checkpointed = {
name: param.grad.detach().clone()
for name, param in model.named_parameters()
}
input_grad_checkpointed = detached.grad.detach().clone()
# compare outputs as well as the gradients of input and parameters
self.assertEqual(out_checkpointed, out_not_checkpointed)
self.assertEqual(input_grad_not_checkpointed, input_grad_checkpointed)
for name in grad_checkpointed:
self.assertEqual(grad_checkpointed[name], grad_not_checkpointed[name])
# Test whether checkpoint is being triggered or not. For this, we check
# the number of times forward pass happens
def test_checkpoint_trigger(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.counter = 0
def forward(self, input_var):
self.counter += 1
return input_var
# checkpointed
modules = [Net() for _ in range(10)]
for m in modules:
self.assertEqual(m.counter, 0)
input_var = torch.randn(3, 4, requires_grad=True)
out = checkpoint_sequential(modules, 2, input_var)
for m in modules:
self.assertEqual(m.counter, 1)
out.sum().backward()
for m in modules[:(len(modules) // 2)]:
self.assertEqual(m.counter, 2)
for m in modules[(len(modules) // 2):]:
self.assertEqual(m.counter, 1)
def test_checkpoint_valid(self):
model = nn.Sequential(
nn.Linear(100, 50),
nn.ReLU(),
nn.Linear(50, 20),
nn.ReLU(),
nn.Linear(20, 5),
nn.ReLU()
)
input_var = torch.randn(1, 100, requires_grad=True)
# checkpointed
chunks = 2
modules = list(model.children())
out = checkpoint_sequential(modules, chunks, input_var)
with self.assertRaisesRegex(RuntimeError, "Checkpointing is not compatible"):
torch.autograd.grad(
outputs=[out], grad_outputs=[torch.ones(1, 5)], inputs=[input_var], create_graph=True
)
def test_checkpoint(self):
model = nn.Sequential(
nn.Linear(100, 50),
nn.ReLU(),
nn.Linear(50, 20),
nn.ReLU(),
nn.Linear(20, 5),
nn.ReLU()
)
# Compare uncheckpointed model with its checkpointed counterparts
# In addition to running checkpoint_sequential on the nn.Sequential
# instance, we also run the function on the list of functions within
# the module.
self._check_checkpoint_sequential(
model,
[list(model.children()), model],
2,
torch.randn(1, 100, requires_grad=True)
)
def test_checkpoint_module_list(self):
class ModuleListNet(nn.Module):
def __init__(self):
super(ModuleListNet, self).__init__()
module_list = [
nn.Linear(100, 50),
nn.ReLU(),
nn.Linear(50, 20),
nn.ReLU(),
nn.Linear(20, 5),
nn.ReLU(),
]
self.module_list = nn.ModuleList(module_list)
def forward(self, input):
for layer in self.module_list:
input = layer(input)
return input
model = ModuleListNet()
# Compare uncheckpointed model with its checkpointed counterparts.
self._check_checkpoint_sequential(
model,
[list(model.module_list.children()), model.module_list],
2,
torch.randn(1, 100, requires_grad=True),
)
def test_checkpoint_sequential_deprecated_multiple_args(self):
class Two(nn.Module):
def forward(self, a, b):
return a, b
model = nn.Sequential(Two())
a = torch.randn(1, 100, requires_grad=True)
b = torch.randn(1, 100, requires_grad=True)
with self.assertRaises(TypeError):
checkpoint_sequential(model, 1, a, b) # type: ignore[call-arg]
def test_checkpoint_sequential_deprecated_no_args(self):
class Noop(nn.Module):
def forward(self):
pass
model = nn.Sequential(Noop())
with self.assertRaises(TypeError):
checkpoint_sequential(model, 1) # type: ignore[call-arg]
def test_checkpoint_rng_cpu(self):
for _ in range(5):
inp = torch.randn(20000, device='cpu').requires_grad_()
phase1 = torch.nn.Dropout()
phase2 = torch.nn.Dropout()
def run_fn(input):
return phase2(input)
state = torch.get_rng_state()
out = phase1(inp)
out = checkpoint(run_fn, out)
out.sum().backward()
grad_with_checkpointing = inp.grad
torch.set_rng_state(state)
inp.grad = None
out = phase1(inp)
out = run_fn(out)
out.sum().backward()
grad_no_checkpointing = inp.grad
self.assertEqual(grad_with_checkpointing, grad_no_checkpointing)
@unittest.skipIf(not HAS_CUDA, 'No CUDA')
def test_checkpoint_rng_cuda(self):
for _ in range(5):
inp = torch.randn(20000, device='cuda').requires_grad_()
phase1 = torch.nn.Dropout()
phase2 = torch.nn.Dropout()
def run_fn(input):
return phase2(input)
state = torch.cuda.get_rng_state()
out = phase1(inp)
out = checkpoint(run_fn, out)
out.sum().backward()
grad_with_checkpointing = inp.grad
torch.cuda.set_rng_state(state)
inp.grad = None
out = phase1(inp)
out = run_fn(out)
out.sum().backward()
grad_no_checkpointing = inp.grad
self.assertEqual(grad_with_checkpointing, grad_no_checkpointing)
@unittest.skipIf(not HAS_CUDA, 'No CUDA')
def test_checkpoint_not_preserve_rng_state_and_without_reentrant(self):
inp = torch.randn(2, device='cuda').requires_grad_()
layer = torch.nn.Dropout()
def run_fn(input):
return layer(input)
out = checkpoint(run_fn, inp, use_reentrant=False, preserve_rng_state=False)
out.sum().backward()
# This should run without error
def test_checkpoint_non_tensor(self):
def run_fn(tensor1, tensor2):
if tensor2 is None:
return tensor1
return tensor1 + tensor2
input_var = torch.randn(1, 100, requires_grad=True)
out = checkpoint(run_fn, input_var, None)
out.sum().backward()
def test_checkpoint_non_tensor_inputs_outputs(self):
def foo(t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
t1 = torch.rand(10, requires_grad=True)
t2 = torch.rand(10, requires_grad=True)
t3 = torch.rand(10)
scale = random.randint(0, 10)
res = checkpoint(foo, t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
res[1].sum().backward(retain_graph=True)
res[4].sum().backward(retain_graph=True)
res[6].sum().backward()
with self.assertRaisesRegex(RuntimeError, "Trying to backward through the graph a second time"):
res[6].sum().backward()
t1_grad = t1.grad
t2_grad = t2.grad
# Reset grads, run without checkpoint and validate we receive same grads.
t1.grad = None
t2.grad = None
res = foo(t1, t2, scale, t3)
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertEqual(t1.grad, t1_grad)
self.assertEqual(t2.grad, t2_grad)
def test_checkpoint_no_tensors(self):
def foo(t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
t1 = random.random()
t2 = random.random()
t3 = random.random()
scale = random.randint(0, 10)
res = checkpoint(foo, t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
def test_checkpoint_partial_grad(self):
def run_fn(tensor1, tensor2):
# tensor 2 is used for other application logic
return tensor1, tensor2
input_var = torch.randn(1, 4, requires_grad=True)
input_var2 = torch.randn(1, 4, requires_grad=False)
out = checkpoint(run_fn, input_var, input_var2)
out[0].sum().backward()
def run_fn2(tensor1, tensor2):
return tensor1
input_var = torch.randn(1, 4, requires_grad=False)
input_var2 = torch.randn(1, 4, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError,
r"none of output has requires_grad=True, this checkpoint\(\) is not necessary"
):
out = checkpoint(run_fn2, input_var, input_var2)
out.sum().backward()
@unittest.skipIf(not torch.cuda.is_available(), "Test requires CUDA")
def test_checkpointing_without_reentrant_early_free(self):
# I don't know how to check if the temporary saved variable buffer
# get de-allocated directly. So using cuda memory usage as a proxy
def _do_test(fn, should_free):
stats: List[int] = []
def track(x, idx):
# Track that at each step of the backward, some Tensor were
# de-allocated (which correspond to the checkpoint storage being
# emptied at each step)
def hook(_unused):
self.assertEqual(len(stats), idx)
torch.cuda.synchronize()
stats.append(torch.cuda.memory_allocated())
if idx > 0:
if should_free:
self.assertLess(stats[idx], stats[idx - 1])
else:
self.assertEqual(stats[idx], stats[idx - 1])
x.register_hook(hook)
def test_fn(x):
# The main property of this function is that it contains multiple
# operations that save gradients in a chain.
x = x ** 2
track(x, 2)
x = x ** 2
track(x, 1)
x = x ** 2
track(x, 0)
x = x ** 2
return x.sum()
fn(test_fn)
return stats
x = torch.zeros(10, device="cuda", requires_grad=True)
x.grad = torch.zeros_like(x)
# In a regular backward, buffers get eagerly freed
non_retain_stats = _do_test(lambda fn: fn(x).backward(), True)
# In a retain_grad backward, buffers get preserved
retain_stats = _do_test(lambda fn: fn(x).backward(retain_graph=True), False)
# In a regular backward with checkpoint, buffers get eagerly freed
checkpoint_non_retain_stats = _do_test(lambda fn: checkpoint(fn, x, use_reentrant=False).backward(), True)
# In a retain_grad backward with checkpoint, buffers get preserved
checkpoint_retain_stats = _do_test(lambda fn: checkpoint(fn, x, use_reentrant=False).backward(retain_graph=True), False)
self.assertEqual(non_retain_stats, checkpoint_non_retain_stats)
self.assertEqual(retain_stats, checkpoint_retain_stats)
class TestDataLoaderUtils(TestCase):
def setUp(self):
self.dataset = torch.randn(5, 3, 3, 2)
self.batch_size = 3
def test_random_seed(self):
def run():
dataloader = torch.utils.data.DataLoader(RandomDatasetMock(),
batch_size=2,
num_workers=4,
shuffle=True)
return next(iter(dataloader))
torch.manual_seed(2018)
x1 = run()
torch.manual_seed(2018)
x2 = run()
self.assertEqual(x1, x2)
def test_single_keep(self):
# self.dataset is a Tensor here; technically not a valid input because
# not a Dataset subclass, but needs to stay working so add ignore's
# for type checking with mypy
dataloader : DataLoader = DataLoader(self.dataset, # type: ignore[arg-type]
batch_size=self.batch_size,
num_workers=0,
drop_last=False)
dataiter = iter(dataloader)
self.assertEqual(len(list(dataiter)), 2)
def test_single_drop(self):
dataloader : DataLoader = DataLoader(self.dataset, # type: ignore[arg-type]
batch_size=self.batch_size,
num_workers=0,
drop_last=True)
dataiter = iter(dataloader)
self.assertEqual(len(list(dataiter)), 1)
@unittest.skip("FIXME: Intermittent CUDA out-of-memory error on Windows and time-out under ASAN")
def test_multi_keep(self):
dataloader : DataLoader = DataLoader(self.dataset, # type: ignore[arg-type]
batch_size=self.batch_size,
num_workers=2,
drop_last=False)
dataiter = iter(dataloader)
self.assertEqual(len(list(dataiter)), 2)
def test_multi_drop(self):
dataloader : DataLoader = DataLoader(self.dataset, # type: ignore[arg-type]
batch_size=self.batch_size,
num_workers=2,
drop_last=True)
dataiter = iter(dataloader)
self.assertEqual(len(list(dataiter)), 1)
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
@unittest.skipIf('SKIP_TEST_BOTTLENECK' in os.environ.keys(), 'SKIP_TEST_BOTTLENECK is set')
class TestBottleneck(TestCase):
def _run(self, command, timeout=30):
"""Returns (return-code, stdout, stderr)"""
import subprocess
p = subprocess.Popen(command, stdout=subprocess.PIPE, # noqa: P204
stderr=subprocess.PIPE, shell=True)
try:
output, err = p.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
p.kill()
output, err = p.communicate()
rc = p.returncode
output_str = output.decode("ascii")
err_str = err.decode("ascii")
return (rc, output_str, err_str)
def _run_bottleneck(self, test_file, scriptargs=''):
curdir = os.path.dirname(os.path.abspath(__file__))
filepath = '{}/{}'.format(curdir, test_file)
if scriptargs != '':
scriptargs = ' {}'.format(scriptargs)
rc, out, err = self._run(
'{} -m torch.utils.bottleneck {}{}'.format(sys.executable, filepath, scriptargs))
return rc, out, err
def _check_run_args(self):
# Check that this fails due to missing args
rc, out, err = self._run_bottleneck('bottleneck_test/test_args.py')
self.assertEqual(rc, 2, atol=0, rtol=0, msg=self._fail_msg('Missing args should error', out + err))
# This should succeed
rc, out, err = self._run_bottleneck('bottleneck_test/test_args.py', '--foo foo --bar bar')
self.assertEqual(rc, 0, atol=0, rtol=0, msg=self._fail_msg('Should pass args to script', out + err))
def _fail_msg(self, msg, output):
return '{}, output was:\n{}'.format(msg, output)
def _check_environment_summary(self, output):
results = re.search('Environment Summary', output)
self.assertIsNotNone(results, self._fail_msg('Should have Environment Summary', output))
# Up to five lines away from the heading, there should be the version number
results = re.search(r'Environment Summary.*(\n.*){,5}\nPyTorch \d+\.\d+', output)
self.assertIsNotNone(results, self._fail_msg('Should have PyTorch version', output))
def _check_cprof_summary(self, output):
results = re.search('cProfile output', output)
self.assertIsNotNone(results, self._fail_msg('Should have cProfile output', output))
# This assumes that after the cProfile output section we have
# the autograd profiler output
results = re.search(r'cProfile output.*(\n.*){6,50}\n.*autograd profiler output', output)
self.assertIsNotNone(results, self._fail_msg(
'Distance between cProfile and autograd prof out not in [6, 50] lines', output))
def _check_autograd_summary(self, output):
results = re.search('autograd profiler output', output)
self.assertIsNotNone(results, self._fail_msg('Should have autograd profiler output', output))
# This assumes that after the autograd profiler output is the end of the
# output.
results = re.search(r'autograd profiler output.*(\n.*){6,100}', output)
self.assertIsNotNone(results, self._fail_msg(
'Distance between autograd prof output and end of output not in [6, 100] lines', output))
def _check_cuda(self, output):
if HAS_CUDA:
results = re.search('CUDA mode', output)
self.assertIsNotNone(results, self._fail_msg('Should tell users CUDA', output))
else:
results = re.search('CUDA mode', output)
self.assertIsNone(results, self._fail_msg('Should not tell users about CUDA', output))
@unittest.skipIf(HAS_CUDA, 'CPU-only test')
def test_bottleneck_cpu_only(self):
rc, out, err = self._run_bottleneck('bottleneck_test/test.py')
self.assertEqual(rc, 0, msg='Run failed with\n{}'.format(err))
self._check_run_args()
self._check_environment_summary(out)
self._check_autograd_summary(out)
self._check_cprof_summary(out)
self._check_cuda(out)
@unittest.skipIf(not HAS_CUDA, 'No CUDA')
def test_bottleneck_cuda(self):
rc, out, err = self._run_bottleneck('bottleneck_test/test_cuda.py')
self.assertEqual(rc, 0, msg='Run failed with\n{}'.format(err))
self._check_run_args()
self._check_environment_summary(out)
self._check_autograd_summary(out)
self._check_cprof_summary(out)
self._check_cuda(out)
from torch.utils.collect_env import get_pretty_env_info
class TestCollectEnv(TestCase):
def test_smoke(self):
info_output = get_pretty_env_info()
self.assertTrue(info_output.count('\n') >= 17)
class TestONNXUtils(TestCase):
def test_prepare_onnx_paddings(self):
sizes = [2, 3, 4]
pad = [1, 2, 3, 4]
paddings = _prepare_onnx_paddings(len(sizes), pad)
self.assertEqual(paddings, [0, 3, 1, 0, 4, 2])
def test_check_onnx_broadcast(self):
def try_check_onnx_broadcast(dims1, dims2, expect_broadcast, expect_fail):
broadcast = True
fail = False
try:
broadcast = check_onnx_broadcast(dims1, dims2)
except ValueError:
fail = True
self.assertEqual(broadcast, expect_broadcast)
self.assertEqual(fail, expect_fail)
# Case 1, check the case when len(dims1) < len(dims2) and numel(dims2) > 1
dims1 = [3, 4]
dims2 = [2, 3, 4]
try_check_onnx_broadcast(dims1, dims2, True, True)
# Case 2, check the case when len(dims1) < len(dims2) and numel(dims2) == 1
dims1 = [3, 4]
dims2 = [1, 1, 1]
try_check_onnx_broadcast(dims1, dims2, True, False)
# Case 3, check the case when len(dims1) > len(dims2) and numel(dims2) == 1
dims1 = [1, 1]
dims2 = [1]
try_check_onnx_broadcast(dims1, dims2, True, False)
# Case 4, check the case when len(dims1) > len(dims2) and dims1[x:] == dims2
dims1 = [2, 3, 4]
dims2 = [3, 4]
try_check_onnx_broadcast(dims1, dims2, True, False)
# Case 5, check the case when len(dims1) > len(dims2), but dims1[x:] != dims2
dims1 = [2, 3, 4]
dims2 = [1, 4]
try_check_onnx_broadcast(dims1, dims2, True, True)
# Case 6, check the equal case, no broadcast
dims1 = [3, 4]
dims2 = [3, 4]
try_check_onnx_broadcast(dims1, dims2, False, False)
# Case 7, check the case when len(dims1) == len(dims2), but dims1 != dims2
dims1 = [3, 4]
dims2 = [1, 4]
try_check_onnx_broadcast(dims1, dims2, True, True)
# Case 8, check the case when len(dims1) == len(dims2) and numel(s2) == 1
dims1 = [3, 4]
dims2 = [1, 1]
try_check_onnx_broadcast(dims1, dims2, True, False)
class TestHipify(TestCase):
def test_import_hipify(self):
from torch.utils.hipify import hipify_python # noqa: F401
class TestAssert(TestCase):
def test_assert_true(self):
# verify assertions work as expected
# bool argument
torch._assert(True, "foo")
with self.assertRaisesRegex(AssertionError, "bar"):
torch._assert(False, "bar")
# tensor argument
torch._assert(torch.tensor([True], dtype=torch.bool), "foo")
with self.assertRaisesRegex(AssertionError, "bar"):
torch._assert(torch.tensor([False], dtype=torch.bool), "bar")
def test_assert_scriptable(self):
class M(torch.nn.Module):
def forward(self, x):
torch._assert(x.sum() > 0, "foo")
return x
m = M()
# scriptable
ms = torch.jit.script(m)
# data can be passed without errors
x = torch.randn(4, 4).fill_(1.0)
ms(x)
with self.assertRaisesRegex(torch.jit.Error, "foo"):
ms(torch.tensor([False], dtype=torch.bool))
@unittest.skipIf(IS_SANDCASTLE, "cpp_extension is OSS only")
class TestStandaloneCPPJIT(TestCase):
def test_load_standalone(self):
build_dir = tempfile.mkdtemp()
try:
src_path = os.path.join(build_dir, "main.cpp")
src = textwrap.dedent("""\
#include <iostream>
#include <torch/torch.h>
int main() {
auto x = torch::eye(3);
std::cout << x << std::endl;
}
""")
with open(src_path, "wt") as f:
f.write(src)
exec_path = torch.utils.cpp_extension.load(
"standalone_load_test",
src_path,
build_directory=build_dir,
is_python_module=False,
is_standalone=True,
)
ext = ".exe" if IS_WINDOWS else ""
self.assertEqual(
exec_path,
os.path.join(build_dir, f"standalone_load_test{ext}")
)
for shell in [True, False]:
r = subprocess.run(
[exec_path],
shell=shell,
stdout=subprocess.PIPE,
)
self.assertEqual(r.returncode, 0)
self.assertEqual(
# Windows prints "\r\n" for newlines.
textwrap.dedent(r.stdout.decode("utf-8")).replace("\r\n", "\n"),
textwrap.dedent("""\
1 0 0
0 1 0
0 0 1
[ CPUFloatType{3,3} ]
""")
)
finally:
shutil.rmtree(build_dir)
class DummyXPUModule(object):
@staticmethod
def is_available():
return True
class TestExtensionUtils(TestCase):
def test_external_module_register(self):
# Built-in module
with self.assertRaisesRegex(RuntimeError, "The runtime module of"):
torch._register_device_module('cuda', torch.cuda)
# Wrong device type
with self.assertRaisesRegex(RuntimeError, "Expected one of cpu"):
torch._register_device_module('dummmy', DummyXPUModule)
with self.assertRaises(AttributeError):
torch.xpu.is_available() # type: ignore[attr-defined]
torch._register_device_module('xpu', DummyXPUModule)
torch.xpu.is_available() # type: ignore[attr-defined]
# No supporting for override
with self.assertRaisesRegex(RuntimeError, "The runtime module of"):
torch._register_device_module('xpu', DummyXPUModule)
class TestCppExtensionUtils(TestCase):
def test_cpp_compiler_is_ok(self):
self.assertTrue(torch.utils.cpp_extension.check_compiler_ok_for_platform('c++'))
def test_cc_compiler_is_ok(self):
self.assertTrue(torch.utils.cpp_extension.check_compiler_ok_for_platform('cc'))
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_utils.py |
from _pytest.junitxml import LogXML, _NodeReporter, bin_xml_escape
from _pytest.terminal import _get_raw_skip_reason
from _pytest.stash import StashKey
from _pytest.reports import TestReport
from _pytest.config.argparsing import Parser
from _pytest.config import filename_arg
from _pytest.config import Config
from _pytest._code.code import ReprFileLocation
from typing import Union
from typing import Optional
import xml.etree.ElementTree as ET
import functools
# a lot of this file is copied from _pytest.junitxml and modified to get rerun info
xml_key = StashKey["LogXMLReruns"]()
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("terminal reporting")
group.addoption(
"--junit-xml-reruns",
action="store",
dest="xmlpath_reruns",
metavar="path",
type=functools.partial(filename_arg, optname="--junit-xml-reruns"),
default=None,
help="create junit-xml style report file at given path.",
)
group.addoption(
"--junit-prefix-reruns",
action="store",
metavar="str",
default=None,
help="prepend prefix to classnames in junit-xml output",
)
parser.addini(
"junit_suite_name_reruns", "Test suite name for JUnit report", default="pytest"
)
parser.addini(
"junit_logging_reruns",
"Write captured log messages to JUnit report: "
"one of no|log|system-out|system-err|out-err|all",
default="no",
)
parser.addini(
"junit_log_passing_tests_reruns",
"Capture log information for passing tests to JUnit report: ",
type="bool",
default=True,
)
parser.addini(
"junit_duration_report_reruns",
"Duration time to report: one of total|call",
default="total",
)
parser.addini(
"junit_family_reruns",
"Emit XML for schema: one of legacy|xunit1|xunit2",
default="xunit2",
)
def pytest_configure(config: Config) -> None:
xmlpath = config.option.xmlpath_reruns
# Prevent opening xmllog on worker nodes (xdist).
if xmlpath and not hasattr(config, "workerinput"):
junit_family = config.getini("junit_family_reruns")
config.stash[xml_key] = LogXMLReruns(
xmlpath,
config.option.junitprefix,
config.getini("junit_suite_name_reruns"),
config.getini("junit_logging_reruns"),
config.getini("junit_duration_report_reruns"),
junit_family,
config.getini("junit_log_passing_tests_reruns"),
)
config.pluginmanager.register(config.stash[xml_key])
def pytest_unconfigure(config: Config) -> None:
xml = config.stash.get(xml_key, None)
if xml:
del config.stash[xml_key]
config.pluginmanager.unregister(xml)
class _NodeReporterReruns(_NodeReporter):
def _prepare_content(self, content: str, header: str) -> str:
return content
def _write_content(self, report: TestReport, content: str, jheader: str) -> None:
if content == "":
return
tag = ET.Element(jheader)
tag.text = bin_xml_escape(content)
self.append(tag)
class LogXMLReruns(LogXML):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def append_rerun(self, reporter: _NodeReporter, report: TestReport) -> None:
if hasattr(report, "wasxfail"):
reporter._add_simple("skipped", "xfail-marked test passes unexpectedly")
else:
assert report.longrepr is not None
reprcrash: Optional[ReprFileLocation] = getattr(
report.longrepr, "reprcrash", None
)
if reprcrash is not None:
message = reprcrash.message
else:
message = str(report.longrepr)
message = bin_xml_escape(message)
reporter._add_simple("rerun", message, str(report.longrepr))
def pytest_runtest_logreport(self, report: TestReport) -> None:
super().pytest_runtest_logreport(report)
if report.outcome == "rerun":
reporter = self._opentestcase(report)
self.append_rerun(reporter, report)
if report.outcome == "skipped":
if isinstance(report.longrepr, tuple):
fspath, lineno, reason = report.longrepr
reason = f"{report.nodeid}: {_get_raw_skip_reason(report)}"
report.longrepr = (fspath, lineno, reason)
def node_reporter(self, report: Union[TestReport, str]) -> _NodeReporterReruns:
nodeid: Union[str, TestReport] = getattr(report, "nodeid", report)
# Local hack to handle xdist report order.
workernode = getattr(report, "node", None)
key = nodeid, workernode
if key in self.node_reporters:
# TODO: breaks for --dist=each
return self.node_reporters[key]
reporter = _NodeReporterReruns(nodeid, self)
self.node_reporters[key] = reporter
self.node_reporters_ordered.append(reporter)
return reporter
| pytorch-master | test/conftest.py |
# Owner(s): ["module: tests"]
import torch
import numpy as np
import random
from torch._six import nan
from itertools import permutations, product
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import all_types, all_types_and, floating_types_and
from torch.testing._internal.common_utils import \
(TestCase, run_tests, slowTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, onlyNativeDeviceTypes,
onlyCUDA, dtypesIfCUDA, dtypesIfCPU, onlyCPU, largeTensorTest)
# TODO: remove this
SIZE = 100
class TestSortAndSelect(TestCase):
def assertIsOrdered(self, order, x, mxx, ixx, task):
SIZE = x.size(1)
if order == 'descending':
def check_order(a, b):
# `a != a` because we put NaNs
# at the end of ascending sorted lists,
# and the beginning of descending ones.
return ((a != a) | (a >= b)).all().item()
elif order == 'ascending':
def check_order(a, b):
# see above
return ((b != b) | (a <= b)).all().item()
else:
error('unknown order "{}", must be "ascending" or "descending"'.format(order))
are_ordered = True
for k in range(1, SIZE):
self.assertTrue(check_order(mxx[:, k - 1], mxx[:, k]),
'torch.sort ({}) values unordered for {}'.format(order, task))
seen = set()
indicesCorrect = True
size0 = x.size(0)
size = x.size(x.dim() - 1)
x = x.tolist()
mxx = mxx.tolist()
ixx = ixx.tolist()
for k in range(size0):
seen.clear()
for j in range(size):
self.assertEqual(x[k][ixx[k][j]], mxx[k][j],
msg='torch.sort ({}) indices wrong for {}'.format(order, task))
seen.add(ixx[k][j])
self.assertEqual(len(seen), size)
def test_sort(self, device):
# on CUDA 2048 vs >2048 have different code path for the dim being sorted
for SIZE in (4, 2049):
x = torch.rand(4, SIZE, device=device)
res1val, res1ind = torch.sort(x)
# Test inplace
y = x.clone()
y_inds = torch.tensor((), dtype=torch.int64, device=device)
torch.sort(y, out=(y, y_inds))
x_vals, x_inds = torch.sort(x)
self.assertEqual(x_vals, y)
self.assertEqual(x_inds, y_inds)
# Test use of result tensor
res2val = torch.tensor((), device=device)
res2ind = torch.tensor((), device=device, dtype=torch.long)
torch.sort(x, out=(res2val, res2ind))
self.assertEqual(res1val, res2val, atol=0, rtol=0)
self.assertEqual(res1ind, res2ind, atol=0, rtol=0)
self.assertEqual(torch.argsort(x), res1ind)
self.assertEqual(x.argsort(), res1ind)
# Test sorting of random numbers
self.assertIsOrdered('ascending', x, res2val, res2ind, 'random')
# Test simple sort
self.assertEqual(
torch.sort(torch.tensor((50, 40, 30, 20, 10), device=device))[0],
torch.tensor((10, 20, 30, 40, 50), device=device),
atol=0, rtol=0
)
# Test that we still have proper sorting with duplicate keys
x = torch.floor(torch.rand(4, SIZE, device=device) * 10)
torch.sort(x, out=(res2val, res2ind))
self.assertIsOrdered('ascending', x, res2val, res2ind, 'random with duplicate keys')
# DESCENDING SORT
x = torch.rand(4, SIZE, device=device)
res1val, res1ind = torch.sort(x, x.dim() - 1, True)
# Test use of result tensor
res2val = torch.tensor((), device=device)
res2ind = torch.tensor((), device=device, dtype=torch.long)
torch.sort(x, x.dim() - 1, True, out=(res2val, res2ind))
self.assertEqual(res1val, res2val, atol=0, rtol=0)
self.assertEqual(res1ind, res2ind, atol=0, rtol=0)
self.assertEqual(torch.argsort(x, x.dim() - 1, True), res1ind)
self.assertEqual(x.argsort(x.dim() - 1, True), res1ind)
# Test sorting of random numbers
self.assertIsOrdered('descending', x, res2val, res2ind, 'random')
# Test simple sort task
self.assertEqual(
torch.sort(torch.tensor((10, 20, 30, 40, 50), device=device), 0, True)[0],
torch.tensor((50, 40, 30, 20, 10), device=device),
atol=0, rtol=0
)
# Test that we still have proper sorting with duplicate keys
self.assertIsOrdered('descending', x, res2val, res2ind, 'random with duplicate keys')
# Test argument sorting with and without stable
x = torch.tensor([1, 10, 2, 2, 3, 7, 7, 8, 9, 9] * 3)
self.assertEqual(torch.argsort(x, stable=True), torch.sort(x, stable=True).indices)
self.assertEqual(torch.argsort(x, stable=False), torch.sort(x, stable=False).indices)
self.assertEqual(torch.argsort(x), torch.sort(x).indices)
# Test sorting with NaNs
x = torch.rand(4, SIZE, device=device)
x[1][2] = float('NaN')
x[3][0] = float('NaN')
torch.sort(x, out=(res2val, res2ind))
self.assertIsOrdered('ascending', x, res2val, res2ind,
'random with NaNs')
torch.sort(x, out=(res2val, res2ind), descending=True)
self.assertIsOrdered('descending', x, res2val, res2ind,
'random with NaNs')
@onlyCUDA
def test_sort_large_slice(self, device):
# tests direct cub path
x = torch.randn(4, 1024000, device=device)
res1val, res1ind = torch.sort(x, stable=True)
torch.cuda.synchronize()
# assertIsOrdered is too slow, so just compare to cpu
res1val_cpu, res1ind_cpu = torch.sort(x.cpu(), stable=True)
self.assertEqual(res1val, res1val_cpu.cuda())
self.assertEqual(res1ind, res1ind_cpu.cuda())
res1val, res1ind = torch.sort(x, descending=True, stable=True)
torch.cuda.synchronize()
res1val_cpu, res1ind_cpu = torch.sort(x.cpu(), descending=True, stable=True)
self.assertEqual(res1val, res1val_cpu.cuda())
self.assertEqual(res1ind, res1ind_cpu.cuda())
# FIXME: remove torch.bool from unsupported types once support is added for cub sort
@dtypes(*all_types_and(torch.half, torch.bfloat16))
def test_stable_sort(self, device, dtype):
sizes = (100, 1000, 10000)
for ncopies in sizes:
x = torch.tensor([0, 1] * ncopies, dtype=dtype, device=device)
_, idx = x.sort(stable=True)
self.assertEqual(
idx[:ncopies],
torch.arange(start=0, end=2 * ncopies, step=2, device=device)
)
self.assertEqual(
idx[ncopies:],
torch.arange(start=1, end=2 * ncopies, step=2, device=device)
)
@onlyCUDA
@dtypes(torch.uint8)
@largeTensorTest('200GB') # Unfortunately 80GB A100 is not large enough
def test_sort_large(self, device, dtype):
t0 = torch.randperm(8192, device=device).to(dtype)
t = t0.view(1, 8192).expand(2 ** 18 + 1, -1).contiguous()
v, i = t.sort()
del t
iv, im = i.var_mean(dim=0)
del i
vv, vm = v.var_mean(dim=0)
del v
self.assertEqual(vv, torch.zeros_like(vv))
self.assertEqual(iv, torch.zeros_like(iv))
self.assertEqual(vm, torch.arange(255, dtype=dtype, device=device))
self.assertEqual(im, t0.sort().indices)
@dtypes(torch.float32)
def test_sort_restride(self, device, dtype):
# Input: non-contiguous (stride: 5) 3-element array
tensor = torch.randn((3, 5), dtype=dtype, device=device)[:, 0]
# Outputs: 0-dim tensors
# They will need to be resized, which means they will also be
# restrided with the input tensor's strides as base.
values = torch.tensor(0, dtype=dtype, device=device)
indices = torch.tensor(0, dtype=torch.long, device=device)
torch.sort(tensor, out=(values, indices))
# Check: outputs were restrided to dense strides
self.assertEqual(values.stride(), (1,))
self.assertEqual(indices.stride(), (1,))
# Check: 'tensor' indexed by 'indices' is equal to 'values'
self.assertEqual(tensor[indices], values)
def _test_sort_discontiguous(self, device, dtype):
# on CUDA 2048 vs >2048 have different code path for the dim being sorted
sizes = (5, 7, 2049)
for shape in permutations(sizes):
for perm in permutations((0, 1, 2)):
for dim in range(3):
t = torch.randn(shape, device=device, dtype=dtype).permute(perm)
r1 = t.sort(dim=dim)
r2 = t.contiguous().sort(dim=dim)
self.assertEqual(r1, r2)
n = t.size(dim)
# assert ordered
self.assertTrue((r1.values.narrow(dim, 1, n - 1) >= r1.values.narrow(dim, 0, n - 1)).all())
# assert that different segments does not mix, which can easily happen
# if the stride is not handled correctly
self.assertTrue((t.unsqueeze(-1).transpose(dim, -1) == r1.values.unsqueeze(-1)).any(dim=dim).any(dim=-1).all())
# assert stride is preserved
if self.device_type == 'cuda':
# FIXME: this behavior should be true for all cases, not
# just the one specified in if condition
self.assertEqual(r1.values.stride(), t.stride())
self.assertEqual(r1.indices.stride(), t.stride())
@onlyCUDA
@dtypes(torch.float32)
def test_sort_discontiguous(self, device, dtype):
self._test_sort_discontiguous(device, dtype)
@slowTest # this test is slow on CPU, but not on CUDA
@onlyCPU
@dtypes(torch.float32)
def test_sort_discontiguous_slow(self, device, dtype):
self._test_sort_discontiguous(device, dtype)
@dtypes(torch.float32)
def test_sort_1d_output_discontiguous(self, device, dtype):
tensor = torch.randn(12, device=device, dtype=dtype)[:6]
values = torch.empty_like(tensor)[::2]
indices = torch.empty(18, device=device, dtype=torch.long)[::3]
torch.sort(tensor, out=(values, indices))
values_cont, indices_cont = tensor.sort()
self.assertEqual(indices, indices_cont)
self.assertEqual(values, values_cont)
@dtypes(torch.float32)
def test_topk_1d_output_discontiguous(self, device, dtype):
tensor = torch.randn(12, device=device, dtype=dtype)
values = torch.empty_like(tensor)[::2]
indices = torch.empty(18, device=device, dtype=torch.long)[::3]
for sorted in (True, False):
# outputs of `sorted=False` test are not guaranteed to be the same,
# but with current implementation they are
torch.topk(tensor, 6, sorted=sorted, out=(values, indices))
values_cont, indices_cont = tensor.topk(6, sorted=sorted)
self.assertEqual(indices, indices_cont)
self.assertEqual(values, values_cont)
# FIXME: remove torch.bool from unsupported types once support is added for cub sort
@dtypes(*all_types_and(torch.half, torch.bfloat16))
def test_stable_sort_against_numpy(self, device, dtype):
if dtype in floating_types_and(torch.float16, torch.bfloat16):
inf = float('inf')
neg_inf = -float('inf')
nan = float('nan')
else:
if dtype != torch.bool:
# no torch.iinfo support for torch.bool
inf = torch.iinfo(dtype).max
neg_inf = torch.iinfo(dtype).min
else:
inf = True
neg_inf = ~inf
# no nan for integral types, we use inf instead for simplicity
nan = inf
def generate_samples():
from itertools import chain, combinations
for sizes in [(1025,), (10000,)]:
size = sizes[0]
# binary strings
yield (torch.tensor([0, 1] * size, dtype=dtype, device=device), 0)
if self.device_type == 'cuda':
return
yield (torch.tensor([0, 1] * 100, dtype=dtype, device=device), 0)
def repeated_index_fill(t, dim, idxs, vals):
res = t
for idx, val in zip(idxs, vals):
res = res.index_fill(dim, idx, val)
return res
for sizes in [(1, 10), (10, 1), (10, 10), (10, 10, 10)]:
size = min(*sizes)
x = (torch.randn(*sizes, device=device) * size).to(dtype)
yield (x, 0)
# Generate tensors which are being filled at random locations
# with values from the non-empty subsets of the set (inf, neg_inf, nan)
# for each dimension.
n_fill_vals = 3 # cardinality of (inf, neg_inf, nan)
for dim in range(len(sizes)):
idxs = (torch.randint(high=size, size=(size // 10,)) for i in range(n_fill_vals))
vals = (inf, neg_inf, nan)
subsets = chain.from_iterable(combinations(list(zip(idxs, vals)), r)
for r in range(1, n_fill_vals + 1))
for subset in subsets:
idxs_subset, vals_subset = zip(*subset)
yield (repeated_index_fill(x, dim, idxs_subset, vals_subset), dim)
for sample, dim in generate_samples():
_, idx_torch = sample.sort(dim=dim, stable=True)
if dtype is torch.bfloat16:
sample_numpy = sample.float().cpu().numpy()
else:
sample_numpy = sample.cpu().numpy()
idx_numpy = np.argsort(sample_numpy, axis=dim, kind='stable')
self.assertEqual(idx_torch, idx_numpy)
@dtypes(*all_types_and(torch.half, torch.bfloat16))
def test_msort(self, device, dtype):
def test(shape):
tensor = make_tensor(shape, dtype=dtype, device=device, low=-9, high=9)
if tensor.size() != torch.Size([]):
if dtype is torch.bfloat16:
expected = torch.from_numpy(np.msort(tensor.float().cpu().numpy())).bfloat16()
else:
expected = torch.from_numpy(np.msort(tensor.cpu().numpy()))
else:
expected = tensor # numpy.msort() does not support empty shapes tensor
result = torch.msort(tensor)
self.assertEqual(result, expected)
out = torch.empty_like(result)
torch.msort(tensor, out=out)
self.assertEqual(out, expected)
shapes = (
[],
[0, ],
[20, ],
[1, 20],
[30, 30],
[10, 20, 30]
)
for shape in shapes:
test(shape)
def test_topk(self, device):
def topKViaSort(t, k, dim, dir):
sorted, indices = t.sort(dim, dir)
return sorted.narrow(dim, 0, k), indices.narrow(dim, 0, k)
def compareTensors(t, res1, ind1, res2, ind2, dim):
# Values should be exactly equivalent
self.assertEqual(res1, res2, atol=0, rtol=0)
# Indices might differ based on the implementation, since there is
# no guarantee of the relative order of selection
if not ind1.eq(ind2).all():
# To verify that the indices represent equivalent elements,
# gather from the input using the topk indices and compare against
# the sort indices
vals = t.gather(dim, ind2)
self.assertEqual(res1, vals, atol=0, rtol=0)
def compare(t, k, dim, dir):
topKVal, topKInd = t.topk(k, dim, dir, True)
sortKVal, sortKInd = topKViaSort(t, k, dim, dir)
compareTensors(t, sortKVal, sortKInd, topKVal, topKInd, dim)
t = torch.rand(random.randint(1, SIZE),
random.randint(1, SIZE),
random.randint(1, SIZE), device=device)
for _kTries in range(3):
for _dimTries in range(3):
for transpose in (True, False):
for dir in (True, False):
testTensor = t
if transpose:
dim1 = random.randrange(t.ndimension())
dim2 = dim1
while dim1 == dim2:
dim2 = random.randrange(t.ndimension())
testTensor = t.transpose(dim1, dim2)
dim = random.randrange(testTensor.ndimension())
k = random.randint(1, testTensor.size(dim))
compare(testTensor, k, dim, dir)
# This tests the code path where on CUDA, topk is implemented with sort.
t = torch.randn((2, 100000), device=device)
compare(t, 2000, 1, True)
compare(t, 2000, 1, False)
# This tests the code path where on CUDA, topk is implemented with multiblock
t = torch.randn((2, 10000), device=device)
compare(t, 2000, 1, True)
compare(t, 2000, 1, False)
def test_topk_arguments(self, device):
q = torch.randn(10, 2, 10, device=device)
# Make sure True isn't mistakenly taken as the 2nd dimension (interpreted as 1)
self.assertRaises(TypeError, lambda: q.topk(4, True))
def test_unique_dim(self, device):
self.assertFalse(hasattr(torch, 'unique_dim'))
def run_test(device, dtype):
x = torch.tensor([[[1., 1.],
[0., 1.],
[2., 1.],
[0., 1.]],
[[1., 1.],
[0., 1.],
[2., 1.],
[0., 1.]]],
dtype=dtype,
device=device)
x_empty = torch.empty(5, 0, dtype=dtype, device=device)
x_ill_formed_empty = torch.empty(5, 0, 0, dtype=dtype, device=device)
x_ill_formed_empty_another = torch.empty(5, 0, 5, dtype=dtype, device=device)
if dtype in floating_types_and(torch.float16, torch.bfloat16):
x_nan = torch.tensor([float("nan"), 0, 0, float("nan"), float("nan"), 1], dtype=dtype, device=device)
expected_unique_dim0 = torch.tensor([[[1., 1.],
[0., 1.],
[2., 1.],
[0., 1.]]],
dtype=dtype,
device=device)
expected_inverse_dim0 = torch.tensor([0, 0])
expected_counts_dim0 = torch.tensor([2])
expected_unique_dim1 = torch.tensor([[[0., 1.],
[1., 1.],
[2., 1.]],
[[0., 1.],
[1., 1.],
[2., 1.]]],
dtype=dtype,
device=device)
expected_unique_dim1_bool = torch.tensor([[[False, True], [True, True]],
[[False, True], [True, True]]],
dtype=torch.bool,
device=device)
expected_inverse_dim1 = torch.tensor([1, 0, 2, 0])
expected_inverse_dim1_bool = torch.tensor([1, 0, 1, 0])
expected_counts_dim1 = torch.tensor([2, 1, 1])
expected_counts_dim1_bool = torch.tensor([2, 2])
expected_unique_dim2 = torch.tensor([[[1., 1.],
[0., 1.],
[2., 1.],
[0., 1.]],
[[1., 1.],
[0., 1.],
[2., 1.],
[0., 1.]]],
dtype=dtype,
device=device)
expected_inverse_dim2 = torch.tensor([0, 1])
expected_counts_dim2 = torch.tensor([1, 1])
expected_unique_empty = torch.empty(5, 0, dtype=dtype, device=device)
expected_inverse_empty = torch.tensor([], dtype=torch.long, device=device)
expected_counts_empty = torch.tensor([], dtype=torch.long, device=device)
if dtype in floating_types_and(torch.float16, torch.bfloat16):
expected_unique_nan = torch.tensor([float("nan"), 0, float("nan"), float("nan"), 1], dtype=dtype, device=device)
expected_inverse_nan = torch.tensor([0, 1, 1, 2, 3, 4], dtype=torch.long, device=device)
expected_counts_nan = torch.tensor([1, 2, 1, 1, 1], dtype=torch.long, device=device)
# dim0
x_unique = torch.unique(x, dim=0)
self.assertEqual(expected_unique_dim0, x_unique)
x_unique, x_inverse = torch.unique(
x,
return_inverse=True,
dim=0)
self.assertEqual(expected_unique_dim0, x_unique)
self.assertEqual(expected_inverse_dim0, x_inverse)
x_unique, x_counts = torch.unique(
x,
return_inverse=False,
return_counts=True,
dim=0)
self.assertEqual(expected_unique_dim0, x_unique)
self.assertEqual(expected_counts_dim0, x_counts)
x_unique, x_inverse, x_counts = torch.unique(
x,
return_inverse=True,
return_counts=True,
dim=0)
self.assertEqual(expected_unique_dim0, x_unique)
self.assertEqual(expected_inverse_dim0, x_inverse)
self.assertEqual(expected_counts_dim0, x_counts)
# dim1
x_unique = torch.unique(x, dim=1)
if x.dtype == torch.bool:
self.assertEqual(expected_unique_dim1_bool, x_unique)
else:
self.assertEqual(expected_unique_dim1, x_unique)
x_unique, x_inverse = torch.unique(
x,
return_inverse=True,
dim=1)
if x.dtype == torch.bool:
self.assertEqual(expected_unique_dim1_bool, x_unique)
self.assertEqual(expected_inverse_dim1_bool, x_inverse)
else:
self.assertEqual(expected_unique_dim1, x_unique)
self.assertEqual(expected_inverse_dim1, x_inverse)
x_unique, x_counts = torch.unique(
x,
return_inverse=False,
return_counts=True,
dim=1)
if x.dtype == torch.bool:
self.assertEqual(expected_unique_dim1_bool, x_unique)
self.assertEqual(expected_counts_dim1_bool, x_counts)
else:
self.assertEqual(expected_unique_dim1, x_unique)
self.assertEqual(expected_counts_dim1, x_counts)
x_unique, x_inverse, x_counts = torch.unique(
x,
return_inverse=True,
return_counts=True,
dim=1)
if x.dtype == torch.bool:
self.assertEqual(expected_unique_dim1_bool, x_unique)
self.assertEqual(expected_inverse_dim1_bool, x_inverse)
self.assertEqual(expected_counts_dim1_bool, x_counts)
else:
self.assertEqual(expected_unique_dim1, x_unique)
self.assertEqual(expected_inverse_dim1, x_inverse)
self.assertEqual(expected_counts_dim1, x_counts)
# dim2
x_unique = torch.unique(x, dim=2)
self.assertEqual(expected_unique_dim2, x_unique)
x_unique, x_inverse = torch.unique(
x,
return_inverse=True,
dim=2)
self.assertEqual(expected_unique_dim2, x_unique)
self.assertEqual(expected_inverse_dim2, x_inverse)
x_unique, x_counts = torch.unique(
x,
return_inverse=False,
return_counts=True,
dim=2)
self.assertEqual(expected_unique_dim2, x_unique)
self.assertEqual(expected_counts_dim2, x_counts)
x_unique, x_inverse, x_counts = torch.unique(
x,
return_inverse=True,
return_counts=True,
dim=2)
self.assertEqual(expected_unique_dim2, x_unique)
self.assertEqual(expected_inverse_dim2, x_inverse)
self.assertEqual(expected_counts_dim2, x_counts)
# test empty tensor
x_unique, x_inverse, x_counts = torch.unique(
x_empty,
return_inverse=True,
return_counts=True,
dim=1)
self.assertEqual(expected_unique_empty, x_unique)
self.assertEqual(expected_inverse_empty, x_inverse)
self.assertEqual(expected_counts_empty, x_counts)
# test tensor with nan
if dtype in floating_types_and(torch.float16, torch.bfloat16):
x_unique, x_inverse, x_counts = torch.unique(
x_nan,
return_inverse=True,
return_counts=True,
dim=0)
self.assertEqual(expected_unique_nan, x_unique)
self.assertEqual(expected_inverse_nan, x_inverse)
self.assertEqual(expected_counts_nan, x_counts)
# test not a well formed tensor
# Checking for runtime error, as this is the expected behaviour
with self.assertRaises(RuntimeError):
torch.unique(
x_ill_formed_empty,
return_inverse=True,
return_counts=True,
dim=1)
# test along dim2
with self.assertRaises(RuntimeError):
torch.unique(
x_ill_formed_empty_another,
return_inverse=True,
return_counts=True,
dim=2)
# test consecutive version
y = torch.tensor(
[[0, 1],
[0, 1],
[0, 1],
[1, 2],
[1, 2],
[3, 4],
[0, 1],
[0, 1],
[3, 4],
[1, 2]],
dtype=dtype,
device=device
)
# test tensor with nan
if dtype in floating_types_and(torch.float16, torch.bfloat16):
y_nan = torch.tensor([float("nan"), 0, 0, float("nan"), float("nan"), 1], dtype=dtype, device=device)
expected_y_unique = torch.tensor(
[[0, 1],
[1, 2],
[3, 4],
[0, 1],
[3, 4],
[1, 2]],
dtype=dtype,
device=device
)
expected_y_inverse = torch.tensor([0, 0, 0, 1, 1, 2, 3, 3, 4, 5], dtype=torch.int64, device=device)
expected_y_counts = torch.tensor([3, 2, 1, 2, 1, 1], dtype=torch.int64, device=device)
expected_y_inverse_bool = torch.tensor([0, 0, 0, 1, 1, 1, 2, 2, 3, 3], dtype=torch.int64, device=device)
expected_y_counts_bool = torch.tensor([3, 3, 2, 2], dtype=torch.int64, device=device)
if dtype in floating_types_and(torch.float16, torch.bfloat16):
expected_y_unique_nan = torch.tensor([float("nan"), 0, float("nan"), float("nan"), 1], dtype=dtype, device=device)
expected_y_inverse_nan = torch.tensor([0, 1, 1, 2, 3, 4], dtype=torch.long, device=device)
expected_y_counts_nan = torch.tensor([1, 2, 1, 1, 1], dtype=torch.long, device=device)
y_unique, y_inverse, y_counts = torch.unique_consecutive(y, return_inverse=True, return_counts=True, dim=0)
if x.dtype == torch.bool:
self.assertEqual(expected_y_inverse_bool, y_inverse)
self.assertEqual(expected_y_counts_bool, y_counts)
else:
self.assertEqual(expected_y_inverse, y_inverse)
self.assertEqual(expected_y_counts, y_counts)
# test tensor with nan
if dtype in floating_types_and(torch.float16, torch.bfloat16):
y_unique, y_inverse, y_counts = torch.unique_consecutive(
y_nan,
return_inverse=True,
return_counts=True,
dim=0)
self.assertEqual(expected_y_unique_nan, y_unique)
self.assertEqual(expected_y_inverse_nan, y_inverse)
self.assertEqual(expected_y_counts_nan, y_counts)
run_test(device, torch.float)
run_test(device, torch.double)
run_test(device, torch.long)
run_test(device, torch.uint8)
run_test(device, torch.bool)
@onlyCUDA
def test_topk_noncontiguous_gpu(self, device):
# test different topk paths on cuda
single_block_t = torch.randn(20, device=device)[::2]
multi_block_t = torch.randn(20000, device=device)[::2]
sort_t = torch.randn(200000, device=device)[::2]
for t in (single_block_t, multi_block_t, sort_t):
for k in (5, 2000, 10000):
if k >= t.shape[0]:
continue
top1, idx1 = t.topk(k)
top2, idx2 = t.contiguous().topk(k)
self.assertEqual(top1, top2)
self.assertEqual(idx1, idx2)
def _test_topk_dtype(self, device, dtype, integral, size):
if integral:
a = torch.randint(torch.iinfo(dtype).min, torch.iinfo(dtype).max,
size=(size,), dtype=dtype, device=device)
else:
a = torch.randn(size=(size,), dtype=dtype, device=device)
sort_topk = a.sort()[0][-(size // 2):].flip(0)
topk = a.topk(size // 2)
self.assertEqual(sort_topk, topk[0]) # check values
self.assertEqual(sort_topk, a[topk[1]]) # check indices
@dtypes(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)
def test_topk_integral(self, device, dtype):
small = 10
large = 4096
verylarge = 8192 # multi_block topk on cuda
for curr_size in (small, large, verylarge):
self._test_topk_dtype(device, dtype, True, curr_size)
@onlyCUDA
@dtypes(torch.bfloat16)
def test_topk_bfloat16(self, device, dtype):
small = 10
large = 4096
verylarge = 8192 # multi_block topk on cuda
for curr_size in (small, large, verylarge):
self._test_topk_dtype(device, dtype, False, curr_size)
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@dtypes(torch.float, torch.double, torch.bfloat16)
def test_topk_nonfinite(self, device, dtype):
x = torch.tensor([float('nan'), float('inf'), 1e4, 0, -1e4, -float('inf')], device=device, dtype=dtype)
val, idx = x.topk(4)
expect = torch.tensor([float('nan'), float('inf'), 1e4, 0], device=device, dtype=dtype)
self.assertEqual(val, expect)
self.assertEqual(idx, [0, 1, 2, 3])
val, idx = x.topk(4, largest=False)
expect = torch.tensor([-float('inf'), -1e4, 0, 1e4], device=device, dtype=dtype)
self.assertEqual(val, expect)
self.assertEqual(idx, [5, 4, 3, 2])
def test_topk_4d(self, device):
small = 128
large = 8192
for size in (small, large):
x = torch.ones(2, size, 2, 2, device=device)
x[:, 1, :, :] *= 2.
x[:, 10, :, :] *= 1.5
val, ind = torch.topk(x, k=2, dim=1)
expected_ind = torch.ones(2, 2, 2, 2, dtype=torch.long, device=device)
expected_ind[:, 1, :, :] = 10
expected_val = torch.ones(2, 2, 2, 2, device=device)
expected_val[:, 0, :, :] *= 2.
expected_val[:, 1, :, :] *= 1.5
self.assertEqual(val, expected_val, atol=0, rtol=0)
self.assertEqual(ind, expected_ind, atol=0, rtol=0)
@onlyNativeDeviceTypes
@dtypesIfCUDA(*all_types_and(torch.bfloat16))
@dtypes(*all_types())
def test_topk_zero(self, device, dtype):
# https://github.com/pytorch/pytorch/issues/49205
t = torch.rand(2, 2, device=device).to(dtype=dtype)
val, idx = torch.topk(t, k=0, largest=False)
self.assertEqual(val.size(), torch.Size([2, 0]))
self.assertEqual(idx.size(), torch.Size([2, 0]))
def _test_unique_scalar_empty(self, dtype, device, f):
# test scalar
x = torch.tensor(0, dtype=dtype, device=device)
unique, inverse, counts = f(x, return_inverse=True, return_counts=True)
expected_unique = torch.tensor([0], dtype=dtype, device=device)
expected_inverse = torch.tensor(0, device=device)
expected_counts = torch.tensor([1], device=device)
self.assertEqual(unique, expected_unique)
self.assertEqual(inverse, expected_inverse)
self.assertEqual(counts, expected_counts)
# test zero sized tensor
x = torch.zeros((0, 0, 3), dtype=dtype, device=device)
unique, inverse, counts = f(x, return_inverse=True, return_counts=True)
expected_unique = torch.tensor([], dtype=dtype, device=device)
expected_inverse = torch.empty((0, 0, 3), dtype=torch.long, device=device)
expected_counts = torch.tensor([], dtype=torch.long, device=device)
self.assertEqual(unique, expected_unique)
self.assertEqual(inverse, expected_inverse)
self.assertEqual(counts, expected_counts)
def _test_unique_with_expects(self, device, dtype, f, x, expected_unique, expected_inverse, expected_counts, additional_shape):
def ensure_tuple(x):
if isinstance(x, torch.Tensor):
return (x,)
return x
for return_inverse in [True, False]:
for return_counts in [True, False]:
# test with expected
ret = ensure_tuple(f(x, return_inverse=return_inverse, return_counts=return_counts))
self.assertEqual(len(ret), 1 + int(return_inverse) + int(return_counts))
self.assertEqual(expected_unique, ret[0])
if return_inverse:
self.assertEqual(expected_inverse, ret[1])
if return_counts:
count_index = 1 + int(return_inverse)
self.assertEqual(expected_counts, ret[count_index])
# tests per-element unique on a higher rank tensor.
y = x.view(additional_shape)
y_unique, y_inverse, y_counts = f(y, return_inverse=True, return_counts=True)
self.assertEqual(expected_unique, y_unique)
self.assertEqual(expected_inverse.view(additional_shape), y_inverse)
self.assertEqual(expected_counts, y_counts)
@dtypesIfCPU(*all_types_and(torch.bool, torch.bfloat16))
@dtypes(*all_types_and(torch.half, torch.bool))
def test_unique(self, device, dtype):
def ensure_tuple(x):
if isinstance(x, torch.Tensor):
return (x,)
return x
if dtype is torch.bool:
x = torch.tensor([True, False, False, False, True, False, True, False], dtype=torch.bool, device=device)
expected_unique = torch.tensor([False, True], dtype=torch.bool, device=device)
expected_inverse = torch.tensor([1, 0, 0, 0, 1, 0, 1, 0], dtype=torch.long, device=device)
expected_counts = torch.tensor([5, 3], dtype=torch.long, device=device)
else:
x = torch.tensor([1, 2, 3, 2, 8, 5, 2, 3], dtype=dtype, device=device)
expected_unique = torch.tensor([1, 2, 3, 5, 8], dtype=dtype, device=device)
expected_inverse = torch.tensor([0, 1, 2, 1, 4, 3, 1, 2], device=device)
expected_counts = torch.tensor([1, 3, 2, 1, 1], device=device)
# test sorted unique
fs = (
lambda x, **kwargs: torch.unique(x, sorted=True, **kwargs),
lambda x, **kwargs: x.unique(sorted=True, **kwargs),
)
x_sliced = torch.empty(x.size(0) * 2, dtype=dtype, device=device)[::2].copy_(x)
xs = (x, x_sliced)
for f, x in product(fs, xs):
self._test_unique_with_expects(device, dtype, f, x, expected_unique, expected_inverse, expected_counts, (2, 2, 2))
self._test_unique_scalar_empty(dtype, device, f)
# test unsorted unique
fs = (
lambda x, **kwargs: torch.unique(x, sorted=False, **kwargs),
lambda x, **kwargs: x.unique(sorted=False, **kwargs)
)
for f, x in product(fs, xs):
self._test_unique_scalar_empty(dtype, device, f)
for return_inverse, return_counts in product((True, False), repeat=2):
ret = ensure_tuple(f(x, return_inverse=return_inverse, return_counts=return_counts))
self.assertEqual(len(ret), 1 + int(return_inverse) + int(return_counts))
x_list = x.tolist()
x_unique_list = ret[0].tolist()
self.assertEqual(expected_unique.tolist(), sorted(x_unique_list))
if return_inverse:
x_inverse_list = ret[1].tolist()
for i, j in enumerate(x_inverse_list):
self.assertEqual(x_list[i], x_unique_list[j])
if return_counts:
count_index = 1 + int(return_inverse)
x_counts_list = ret[count_index].tolist()
for i, j in zip(x_unique_list, x_counts_list):
count = 0
for k in x_list:
if k == i:
count += 1
self.assertEqual(j, count)
@dtypesIfCPU(*all_types_and(torch.bool, torch.bfloat16))
@dtypes(*all_types_and(torch.half, torch.bool))
def test_unique_consecutive(self, device, dtype):
if dtype is torch.bool:
x = torch.tensor([True, False, False, False, True, True, False, False, False], dtype=torch.bool, device=device)
expected_unique = torch.tensor([True, False, True, False], dtype=torch.bool, device=device)
expected_inverse = torch.tensor([0, 1, 1, 1, 2, 2, 3, 3, 3], dtype=torch.long, device=device)
expected_counts = torch.tensor([1, 3, 2, 3], dtype=torch.long, device=device)
else:
x = torch.tensor([1, 2, 2, 2, 5, 5, 2, 2, 3], dtype=dtype, device=device)
expected_unique = torch.tensor([1, 2, 5, 2, 3], dtype=dtype, device=device)
expected_inverse = torch.tensor([0, 1, 1, 1, 2, 2, 3, 3, 4], device=device)
expected_counts = torch.tensor([1, 3, 2, 2, 1], device=device)
for f in [torch.unique_consecutive, lambda x, **kwargs: x.unique_consecutive(**kwargs)]:
self._test_unique_with_expects(device, dtype, f, x, expected_unique, expected_inverse, expected_counts, (3, 3))
self._test_unique_scalar_empty(dtype, device, f)
@dtypes(torch.double)
def test_kthvalue(self, device, dtype):
SIZE = 50
x = torch.rand(SIZE, SIZE, SIZE, dtype=dtype, device=device)
x0 = x.clone()
k = random.randint(1, SIZE)
res1val, res1ind = torch.kthvalue(x, k, keepdim=False)
res2val, res2ind = torch.sort(x)
self.assertEqual(res1val[:, :], res2val[:, :, k - 1], atol=0, rtol=0)
self.assertEqual(res1ind[:, :], res2ind[:, :, k - 1], atol=0, rtol=0)
# test use of result tensors
k = random.randint(1, SIZE)
res1val = torch.tensor([], dtype=dtype, device=device)
res1ind = torch.tensor([], dtype=torch.long, device=device)
torch.kthvalue(x, k, keepdim=False, out=(res1val, res1ind))
res2val, res2ind = torch.sort(x)
self.assertEqual(res1val[:, :], res2val[:, :, k - 1], atol=0, rtol=0)
self.assertEqual(res1ind[:, :], res2ind[:, :, k - 1], atol=0, rtol=0)
# test non-default dim
k = random.randint(1, SIZE)
res1val, res1ind = torch.kthvalue(x, k, 0, keepdim=False)
res2val, res2ind = torch.sort(x, 0)
self.assertEqual(res1val, res2val[k - 1], atol=0, rtol=0)
self.assertEqual(res1ind, res2ind[k - 1], atol=0, rtol=0)
# non-contiguous
y = x.narrow(1, 0, 1)
y0 = y.contiguous()
k = random.randint(1, SIZE)
res1val, res1ind = torch.kthvalue(y, k)
res2val, res2ind = torch.kthvalue(y0, k)
self.assertEqual(res1val, res2val, atol=0, rtol=0)
self.assertEqual(res1ind, res2ind, atol=0, rtol=0)
# non-contiguous [Reference: https://github.com/pytorch/pytorch/issues/45721]
non_contig_t = torch.tensor([0, -1, 1, -2, 2], dtype=dtype, device=device)[::2]
expected_val, expected_ind = non_contig_t.contiguous().kthvalue(2)
non_contig_cpu_t = non_contig_t.cpu()
expected_val_cpu, expected_ind_cpu = non_contig_cpu_t.kthvalue(2)
out_val, out_ind = non_contig_t.kthvalue(2)
self.assertEqual(expected_val, out_val, atol=0, rtol=0)
self.assertEqual(expected_ind, out_ind, atol=0, rtol=0)
self.assertEqual(expected_val_cpu, out_val, atol=0, rtol=0)
self.assertEqual(expected_ind_cpu, out_ind, atol=0, rtol=0)
# check that the input wasn't modified
self.assertEqual(x, x0, atol=0, rtol=0)
# simple test case (with repetitions)
y = torch.tensor((3., 5, 4, 1, 1, 5), dtype=dtype, device=device)
self.assertEqual(torch.kthvalue(y, 3)[0], 3, atol=0, rtol=0)
self.assertEqual(torch.kthvalue(y, 2)[0], 1, atol=0, rtol=0)
# simple test case (with NaN)
SIZE = 50
x = torch.rand(SIZE, SIZE, SIZE, dtype=dtype, device=device)
x[torch.arange(SIZE), :, torch.randint(50, (50,))] = nan
ks = [random.randint(1, SIZE), 1, SIZE, SIZE - 1]
res2val, res2ind = torch.sort(x)
for k in ks:
res1val, res1ind = torch.kthvalue(x, k, keepdim=False)
self.assertEqual(res1val[:, :], res2val[:, :, k - 1], atol=0, rtol=0)
self.assertEqual(res1ind[:, :], res2ind[:, :, k - 1], atol=0, rtol=0)
@dtypes(torch.float)
@onlyNativeDeviceTypes # Fails on XLA
def test_kthvalue_scalar(self, device, dtype):
# Test scalar input (test case from https://github.com/pytorch/pytorch/issues/30818)
# Tests that passing a scalar tensor or 1D tensor with 1 element work either way
res = torch.tensor(2, device=device, dtype=dtype).kthvalue(1)
ref = torch.tensor([2], device=device, dtype=dtype).kthvalue(1)
self.assertEqual(res[0], ref[0].squeeze())
self.assertEqual(res[1], ref[1].squeeze())
@dtypes(*all_types())
@dtypesIfCUDA(*all_types_and(torch.half))
def test_isin(self, device, dtype):
def assert_isin_equal(a, b):
# Compare to the numpy reference implementation.
x = torch.isin(a, b)
a = a.cpu().numpy() if torch.is_tensor(a) else np.array(a)
b = b.cpu().numpy() if torch.is_tensor(b) else np.array(b)
y = np.isin(a, b)
self.assertEqual(x, y)
# multi-dim tensor, multi-dim tensor
a = torch.arange(24, device=device, dtype=dtype).reshape([2, 3, 4])
b = torch.tensor([[10, 20, 30], [0, 1, 3], [11, 22, 33]], device=device, dtype=dtype)
assert_isin_equal(a, b)
# zero-dim tensor
zero_d = torch.tensor(3, device=device, dtype=dtype)
assert_isin_equal(zero_d, b)
assert_isin_equal(a, zero_d)
assert_isin_equal(zero_d, zero_d)
# empty tensor
empty = torch.tensor([], device=device, dtype=dtype)
assert_isin_equal(empty, b)
assert_isin_equal(a, empty)
assert_isin_equal(empty, empty)
# scalar
assert_isin_equal(a, 6)
assert_isin_equal(5, b)
def define_expected(lst, invert=False):
expected = torch.tensor(lst, device=device)
if invert:
expected = expected.logical_not()
return expected
# Adapted from numpy's in1d tests
for mult in [1, 10]:
for invert in [False, True]:
a = torch.tensor([5, 7, 1, 2], device=device, dtype=dtype)
b = torch.tensor([2, 4, 3, 1, 5] * mult, device=device, dtype=dtype)
ec = define_expected([True, False, True, True], invert=invert)
c = torch.isin(a, b, assume_unique=True, invert=invert)
self.assertEqual(c, ec)
a[0] = 8
ec = define_expected([False, False, True, True], invert=invert)
c = torch.isin(a, b, assume_unique=True, invert=invert)
self.assertEqual(c, ec)
a[0], a[3] = 4, 8
ec = define_expected([True, False, True, False], invert=invert)
c = torch.isin(a, b, assume_unique=True, invert=invert)
self.assertEqual(c, ec)
a = torch.tensor([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5], device=device, dtype=dtype)
b = torch.tensor([2, 3, 4] * mult, device=device, dtype=dtype)
ec = define_expected([False, True, False, True, True, True, True, True, True,
False, True, False, False, False], invert=invert)
c = torch.isin(a, b, invert=invert)
self.assertEqual(c, ec)
b = torch.tensor([2, 3, 4] * mult + [5, 5, 4] * mult, device=device, dtype=dtype)
ec = define_expected([True, True, True, True, True, True, True, True, True, True,
True, False, True, True], invert=invert)
c = torch.isin(a, b, invert=invert)
self.assertEqual(c, ec)
a = torch.tensor([5, 7, 1, 2], device=device, dtype=dtype)
b = torch.tensor([2, 4, 3, 1, 5] * mult, device=device, dtype=dtype)
ec = define_expected([True, False, True, True], invert=invert)
c = torch.isin(a, b, invert=invert)
self.assertEqual(c, ec)
a = torch.tensor([5, 7, 1, 1, 2], device=device, dtype=dtype)
b = torch.tensor([2, 4, 3, 3, 1, 5] * mult, device=device, dtype=dtype)
ec = define_expected([True, False, True, True, True], invert=invert)
c = torch.isin(a, b, invert=invert)
self.assertEqual(c, ec)
a = torch.tensor([5, 5], device=device, dtype=dtype)
b = torch.tensor([2, 2] * mult, device=device, dtype=dtype)
ec = define_expected([False, False], invert=invert)
c = torch.isin(a, b, invert=invert)
self.assertEqual(c, ec)
# multi-dimensional input case using sort-based algo
for assume_unique in [False, True]:
a = torch.arange(6, device=device, dtype=dtype).reshape([2, 3])
b = torch.arange(3, 30, device=device, dtype=dtype)
ec = define_expected([[False, False, False], [True, True, True]], invert=invert)
c = torch.isin(a, b, invert=invert, assume_unique=assume_unique)
self.assertEqual(c, ec)
def test_isin_different_dtypes(self, device):
supported_types = all_types() if device == 'cpu' else all_types_and(torch.half)
for mult in [1, 10]:
for assume_unique in [False, True]:
for dtype1, dtype2 in product(supported_types, supported_types):
a = torch.tensor([1, 2, 3], device=device, dtype=dtype1)
b = torch.tensor([3, 4, 5] * mult, device=device, dtype=dtype2)
ec = torch.tensor([False, False, True], device=device)
c = torch.isin(a, b, assume_unique=assume_unique)
self.assertEqual(c, ec)
@onlyCUDA
@dtypes(*all_types())
def test_isin_different_devices(self, device, dtype):
a = torch.arange(6, device=device, dtype=dtype).reshape([2, 3])
b = torch.arange(3, 30, device='cpu', dtype=dtype)
with self.assertRaises(RuntimeError):
torch.isin(a, b)
c = torch.arange(6, device='cpu', dtype=dtype).reshape([2, 3])
d = torch.arange(3, 30, device=device, dtype=dtype)
with self.assertRaises(RuntimeError):
torch.isin(c, d)
instantiate_device_type_tests(TestSortAndSelect, globals())
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_sort_and_select.py |
# Owner(s): ["module: dataloader"]
import math
import sys
import errno
import os
import ctypes
import faulthandler
import torch
import gc
import time
import signal
import unittest
import itertools
import warnings
import tempfile
from torch import multiprocessing as mp
from torch.utils.data import (
ChainDataset,
ConcatDataset,
DataLoader,
DataLoader2,
Dataset,
IterableDataset,
IterDataPipe,
Subset,
TensorDataset,
communication,
_utils
)
from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL
from torch.utils.data.dataset import random_split
from torch.utils.data.datapipes.iter import IterableWrapper
from torch.utils.data.datapipes.map import SequenceWrapper
from torch._utils import ExceptionWrapper
from torch.testing._internal.common_utils import (TestCase, run_tests, TEST_NUMPY, IS_WINDOWS,
IS_CI, NO_MULTIPROCESSING_SPAWN, skipIfRocm, slowTest,
load_tests, TEST_WITH_ASAN, TEST_WITH_TSAN, IS_SANDCASTLE,
IS_MACOS)
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
err_msg = ("psutil not found. Some critical data loader tests relying on it "
"(e.g., TestDataLoader.test_proper_exit) will not run.")
if IS_CI:
raise ImportError(err_msg) from None
else:
warnings.warn(err_msg)
try:
import dill
# XXX: By default, dill writes the Pickler dispatch table to inject its
# own logic there. This globally affects the behavior of the standard library
# pickler for any user who transitively depends on this module!
# Undo this extension to avoid altering the behavior of the pickler globally.
dill.extend(use_dill=False)
HAS_DILL = True
except ImportError:
HAS_DILL = False
skipIfNoDill = unittest.skipIf(not HAS_DILL, "no dill")
try:
import numpy as np
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
skipIfNoNumpy = unittest.skipIf(not HAS_NUMPY, "no NumPy")
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA from torch.testing._internal.common_cuda here, because if we do that,
# the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed multiple times
# as well during the execution of this test suite, and it will cause
# CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
if TEST_CUDA:
dev_name = torch.cuda.get_device_name(torch.cuda.current_device()).lower()
IS_JETSON = 'xavier' in dev_name or 'nano' in dev_name or 'jetson' in dev_name or 'tegra' in dev_name
else:
IS_JETSON = False
if not NO_MULTIPROCESSING_SPAWN:
# We want to use `spawn` if able because some of our tests check that the
# data loader terminiates gracefully. To prevent hanging in the testing
# process, such data loaders are run in a separate subprocess.
#
# We also want to test the `pin_memory=True` configuration, thus `spawn` is
# required to launch such processes and they initialize the CUDA context.
#
# Mixing different start method is a recipe for disaster (e.g., using a fork
# `mp.Event` with a spawn `mp.Process` segfaults). So we set this globally
# to avoid bugs.
#
# Get a multiprocessing context because some test / third party library will
# set start_method when imported, and setting again triggers `RuntimeError`.
mp = mp.get_context(method='spawn')
# 60s of timeout?
# Yes, in environments where physical CPU resources are shared, e.g., CI, the
# time for a inter-process communication can be highly varying. With 15~17s of
# timeout, we have observed flakiness in some CI builds (see
# pytorch/pytorch#14501, pytorch/pytorch#16608). We follow the CPython
# multiprocessing setup and set the timeout to 60s here:
#
# https://github.com/python/cpython/blob/e8113f51a8bdf33188ee30a1c038a298329e7bfa/Lib/test/_test_multiprocessing.py#L73
JOIN_TIMEOUT = 60.0 # seconds
supported_multiprocessing_contexts = [None] + list(torch.multiprocessing.get_all_start_methods())
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDatasetRandomSplit(TestCase):
def test_lengths_must_equal_dataset_size(self):
with self.assertRaises(ValueError):
random_split([1, 2, 3, 4], [1, 2])
def test_splits_have_correct_size(self):
splits = random_split([1, 2, 3, 4, 5, 6], [2, 4])
self.assertEqual(len(splits), 2)
self.assertEqual(len(splits[0]), 2)
self.assertEqual(len(splits[1]), 4)
splits = random_split([1, 2, 3, 4, 5, 6], [0.5, 0.5])
self.assertEqual(len(splits), 2)
self.assertEqual(len(splits[0]), 3)
self.assertEqual(len(splits[1]), 3)
# Odd size splits
self.assertEqual(
len(random_split(range(3), [0.5, 0.5], generator=torch.Generator().manual_seed(1))),
2
)
# Odd sized round-robin splits
splits = random_split(range(106), [0.1, 0.2, 0.3, 0.4],
generator=torch.Generator().manual_seed(1))
self.assertEqual(len(splits[0]), 11)
self.assertEqual(len(splits[1]), 22)
self.assertEqual(len(splits[2]), 31)
self.assertEqual(len(splits[3]), 42)
def test_splits_are_mutually_exclusive(self):
data = [5, 2, 3, 4, 1, 6]
splits = random_split(data, [2, 4])
all_values = []
all_values.extend(list(splits[0]))
all_values.extend(list(splits[1]))
data.sort()
all_values.sort()
self.assertListEqual(data, all_values)
splits = random_split(data, [0.33, 0.67])
all_values = []
all_values.extend(list(splits[0]))
all_values.extend(list(splits[1]))
data.sort()
all_values.sort()
self.assertListEqual(data, all_values)
data = [1, 2, 3, 4]
splits = random_split(data, [0.25, 0.75])
all_values = []
all_values.extend(list(splits[0]))
all_values.extend(list(splits[1]))
data.sort()
all_values.sort()
self.assertListEqual(data, all_values)
def test_splits_indexing_type(self):
r"""Indices generated by random_split
should be of integer type
"""
class CustomDataset():
def __init__(self, test_object, custom_list):
self.data = custom_list
self.test_object = test_object
def __getitem__(self, key):
self.test_object.assertEqual(type(key), type(0))
return self.data[key]
def __len__(self):
return len(self.data)
x = [1, 2, 3, 4, 5]
dataset = CustomDataset(self, x)
dataset = random_split(dataset, [5])[0]
data_loader = DataLoader(dataset)
for batch in data_loader:
pass
# fractional splitting
dataset = CustomDataset(self, x)
dataset = random_split(dataset, [1.0])[0]
data_loader = DataLoader(dataset)
for batch in data_loader:
pass
def test_splits_reproducibility(self):
self.assertEqual(
[list(x) for x in random_split(range(10), [3, 7], generator=torch.Generator().manual_seed(1))],
[[5, 6, 1], [2, 0, 8, 9, 3, 7, 4]],
)
self.assertEqual(
random_split(range(100), [60, 40], generator=torch.Generator().manual_seed(42)),
random_split(range(100), [60, 40], generator=torch.Generator().manual_seed(42)),
)
self.assertEqual(
random_split(range(100), [0.5, 0.5], generator=torch.Generator().manual_seed(42)),
random_split(range(100), [0.5, 0.5], generator=torch.Generator().manual_seed(42)),
)
self.assertEqual(
random_split(range(100), [0.33, 0.33, 0.34], generator=torch.Generator().manual_seed(42)),
random_split(range(100), [0.33, 0.33, 0.34], generator=torch.Generator().manual_seed(42)),
)
def test_incomplete_fractional_splits(self):
with self.assertRaises(ValueError):
# should raise since the sum of fractions is not 1
random_split([1, 2, 3, 4], [0.1])
with self.assertRaises(ValueError):
# should raise since fraction > 1
random_split([1, 2, 3, 4], [1.1])
def test_splits_generator(self):
# A random_split without a specific generator should affect the default one
state = torch.get_rng_state()
a = torch.rand(10)
torch.set_rng_state(state)
random_split(range(10), [5, 5])
b = torch.rand(10)
self.assertNotEqual(a, b)
# A random_split with a specific generator should not affect the default one
state = torch.get_rng_state()
a = torch.rand(10)
torch.set_rng_state(state)
random_split(range(10), [5, 5], generator=torch.Generator().manual_seed(42))
b = torch.rand(10)
self.assertEqual(a, b)
def test_slicing_of_subset_of_dataset(self):
# Testing slicing a subset initialized with a dataset
dataset = TensorDataset(torch.tensor([1, 2, 3, 4, 5]))
subset_of_dataset = Subset(dataset, [0, 1, 2, 3, 4])
self.assertEqual(subset_of_dataset[:], dataset[:])
self.assertEqual(subset_of_dataset[1:2], dataset[1:2])
self.assertEqual(subset_of_dataset[0:-1:2], dataset[0:-1:2])
# Testing slicing of subset from random split
subset1, subset2 = random_split(dataset, [3, 2])
self.assertEqual(subset1[:], dataset[subset1.indices[:]])
self.assertEqual(subset1[0:2], dataset[subset1.indices[0:2]])
self.assertEqual(subset1[0:-1:2], dataset[subset1.indices[0:-1:2]])
def test_slicing_of_subset_of_subset(self):
# Testing slicing a subset initialized with a subset
dataset = TensorDataset(torch.tensor([1, 2, 3, 4, 5]))
subset_of_dataset = Subset(dataset, [0, 1, 2, 3, 4])
subset_of_subset = Subset(subset_of_dataset, [0, 1, 2, 3, 4])
self.assertEqual(subset_of_subset[:], dataset[:])
self.assertEqual(subset_of_subset[0:2], dataset[0:2])
self.assertEqual(subset_of_subset[0:-1:2], dataset[0:-1:2])
# Testing slicing of subset of subset from random split
subset1, subset2 = random_split(dataset, [4, 1])
subset_of_subset1, subset_of_subset2 = random_split(subset1, [3, 1])
idx = [subset1.indices[i] for i in subset_of_subset1.indices]
self.assertEqual(subset_of_subset1[:], dataset[idx[:]])
self.assertEqual(subset_of_subset1[0:2], dataset[idx[0:2]])
self.assertEqual(subset_of_subset1[0:-1:2], dataset[idx[0:-1:2]])
class CUDACountingDataset(Dataset):
def __init__(self, n):
super(CUDACountingDataset, self).__init__()
self.n = n
def __getitem__(self, i):
return torch.as_tensor(i, device='cuda')
def __len__(self):
return self.n
class CountingDataset(Dataset):
def __init__(self, n):
super(CountingDataset, self).__init__()
self.n = n
def __getitem__(self, i):
return i
def __len__(self):
return self.n
class CountingIterableDataset(IterableDataset):
def __init__(self, n):
super(CountingIterableDataset, self).__init__()
self.n = n
def __iter__(self):
return iter(range(self.n))
def __len__(self):
return self.n
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestTensorDataset(TestCase):
def test_len(self):
source = TensorDataset(torch.randn(15, 10, 2, 3, 4, 5), torch.randperm(15))
self.assertEqual(len(source), 15)
def test_getitem(self):
t = torch.randn(15, 10, 2, 3, 4, 5)
l = torch.randn(15, 10)
source = TensorDataset(t, l)
for i in range(15):
self.assertEqual(t[i], source[i][0])
self.assertEqual(l[i], source[i][1])
def test_getitem_1d(self):
t = torch.randn(15)
l = torch.randn(15)
source = TensorDataset(t, l)
for i in range(15):
self.assertEqual(t[i], source[i][0])
self.assertEqual(l[i], source[i][1])
def test_single_tensor(self):
t = torch.randn(5, 10)
source = TensorDataset(t)
self.assertEqual(len(source), 5)
for i in range(5):
self.assertEqual(t[i], source[i][0])
def test_many_tensors(self):
t0 = torch.randn(5, 10, 2, 3, 4, 5)
t1 = torch.randn(5, 10)
t2 = torch.randn(5, 10, 2, 5)
t3 = torch.randn(5, 10, 3, 7)
source = TensorDataset(t0, t1, t2, t3)
self.assertEqual(len(source), 5)
for i in range(5):
self.assertEqual(t0[i], source[i][0])
self.assertEqual(t1[i], source[i][1])
self.assertEqual(t2[i], source[i][2])
self.assertEqual(t3[i], source[i][3])
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestConcatDataset(TestCase):
def test_concat_two_singletons(self):
result = ConcatDataset([[0], [1]])
self.assertEqual(2, len(result))
self.assertEqual(0, result[0])
self.assertEqual(1, result[1])
def test_concat_two_non_singletons(self):
result = ConcatDataset([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
self.assertEqual(10, len(result))
self.assertEqual(0, result[0])
self.assertEqual(5, result[5])
def test_concat_two_non_singletons_with_empty(self):
# Adding an empty dataset somewhere is correctly handled
result = ConcatDataset([[0, 1, 2, 3, 4],
[],
[5, 6, 7, 8, 9]])
self.assertEqual(10, len(result))
self.assertEqual(0, result[0])
self.assertEqual(5, result[5])
def test_concat_raises_index_error(self):
result = ConcatDataset([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
with self.assertRaises(IndexError):
# this one goes to 11
result[11]
def test_add_dataset(self):
d1 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
d2 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
d3 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
result = d1 + d2 + d3
self.assertEqual(21, len(result))
self.assertEqual(0, (d1[0][0] - result[0][0]).abs().sum())
self.assertEqual(0, (d2[0][0] - result[7][0]).abs().sum())
self.assertEqual(0, (d3[0][0] - result[14][0]).abs().sum())
def test_iterable_dataset_err(self):
d1 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
it1 = CountingIterableDataset(5)
it2 = CountingIterableDataset(10)
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([d1, it2, it1])
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([it2])
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([it1, d1])
# takes in dummy var so this can also be used as a `worker_init_fn`
def set_faulthander_if_available(_=None):
faulthandler.enable(sys.__stderr__)
if not IS_WINDOWS:
# windows does not have faulthandler.register
# chain=False prevents the default behavior of killing the process
faulthandler.register(signal.SIGUSR1, file=sys.__stderr__, chain=False)
set_faulthander_if_available()
# Process `pid` must have called `set_faulthander_if_available`
def print_traces_of_all_threads(pid):
if not IS_WINDOWS:
# use the custom signal if available
os.kill(pid, signal.SIGUSR1)
else:
# otherwise we can still use the handler given by faulthandler.enable()
# at the cost of killing the process.
os.kill(pid, signal.SIGSEGV)
# wait in parent process to give subprocess some time to print
time.sleep(5)
# The following `ErrorTrackingProcess` stores the first encountered exception in
# its `.exception` attribute.
# Inspired by https://stackoverflow.com/a/33599967
class ErrorTrackingProcess(mp.Process):
# Why no *args?
# py2 doesn't support def fn(x, *args, key=val, **kwargs)
# Setting disable_stderr=True may generate a lot of unrelated error outputs
# but could be helpful for debugging.
def __init__(self, disable_stderr=True, **kwargs):
super(ErrorTrackingProcess, self).__init__(**kwargs)
self._pconn, self._cconn = mp.Pipe()
self._exception = None
self.disable_stderr = disable_stderr
def run(self):
set_faulthander_if_available()
if self.disable_stderr:
# Disable polluting stderr with errors that are supposed to happen.
with open(os.devnull, 'w') as devnull:
os.dup2(devnull.fileno(), sys.stderr.fileno())
try:
super(ErrorTrackingProcess, self).run()
self._cconn.send(None)
except Exception:
self._cconn.send(ExceptionWrapper(sys.exc_info()))
raise
def print_traces_of_all_threads(self):
assert self.is_alive(), "can only use print_traces_of_all_threads if the process is alive"
assert not self.disable_stderr, "do not disable stderr if you use print_traces_of_all_threads"
# On platforms without `SIGUSR1`, `set_faulthander_if_available` sets
# `faulthandler.enable()`, and `print_traces_of_all_threads` may kill
# the process. So let's poll the exception first
_ = self.exception
print_traces_of_all_threads(self.pid)
@property
def exception(self):
if self._pconn.poll():
self._exception = self._pconn.recv()
if self._exception is None:
return None
else:
return self._exception.exc_type(self._exception.exc_msg)
# ESRCH means that os.kill can't finds alive proc
def send_signal(self, signum, ignore_ESRCH=False):
try:
os.kill(self.pid, signum)
except OSError as e:
if not ignore_ESRCH or e.errno != errno.ESRCH:
raise
class ErrorDataset(Dataset):
def __init__(self, size):
self.size = size
def __len__(self):
return self.size
class SegfaultDataset(Dataset):
def __init__(self, size):
self.size = size
def __getitem__(self, idx):
return ctypes.string_at(0)
def __len__(self):
return self.size
class SleepDataset(Dataset):
def __init__(self, size, sleep_sec):
self.size = size
self.sleep_sec = sleep_sec
self.sleeped = False
def __getitem__(self, idx):
if not self.sleeped:
time.sleep(self.sleep_sec)
self.sleeped = True
return idx
def __len__(self):
return self.size
class SeedDataset(Dataset):
def __init__(self, size):
self.size = size
def __getitem__(self, idx):
return torch.initial_seed()
def __len__(self):
return self.size
class WorkerSpecificIterableDataset(IterableDataset):
def __init__(self, sizes_for_all_workers):
self.sizes_for_all_workers = sizes_for_all_workers
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
assert worker_info is not None
return iter(range(self.sizes_for_all_workers[worker_info.id]))
def __len__(self):
return sum(self.sizes_for_all_workers)
# Inspired by https://stackoverflow.com/a/26703365
# If all workers will call `sync_once`, they will be blocked until all workers
# reach the call (i.e., acting like a barrier).
# This can be used to ensure that each worker at least processes one data.
class SynchronizedDataset(Dataset):
def __init__(self, size, batch_size, num_workers):
assert size >= num_workers * batch_size
self.count = mp.Value('i', 0, lock=True)
self.barrier = mp.Semaphore(0)
self.num_workers = num_workers
self.size = size
def sync_once(self):
with self.count.get_lock():
self.count.value += 1
if self.count.value == self.num_workers:
self.barrier.release()
self.barrier.acquire()
self.barrier.release()
def __getitem__(self, idx):
raise NotImplementedError
def __len__(self):
return self.size
class EmptyTensorDataset(torch.utils.data.Dataset):
def __init__(self, len):
self.len = len
def __len__(self):
return self.len
def __getitem__(self, any):
return torch.empty(0)
class SynchronizedSeedDataset(SynchronizedDataset):
def __getitem__(self, idx):
self.sync_once()
return torch.initial_seed()
def _test_timeout(persistent_workers):
dataset = SleepDataset(10, 3)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, timeout=1,
persistent_workers=persistent_workers)
_ = next(iter(dataloader))
def _test_timeout_pin_memory(persistent_workers):
dataset = SleepDataset(10, 3)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, timeout=1, pin_memory=True,
persistent_workers=persistent_workers)
_ = next(iter(dataloader))
def _test_large_sampler_indices(persistent_workers):
# See
# test_large_sampler_indices
# https://github.com/pytorch/pytorch/issues/48666
dataloader = torch.utils.data.DataLoader(
EmptyTensorDataset(10000000),
batch_size=40960,
persistent_workers=persistent_workers,
num_workers=1)
it = iter(dataloader)
for x in it:
assert x.numel() == 0
raise RuntimeError('My Error')
def disable_stderr(worker_id):
r"""
Avoids printing "ERROR: Unexpected segmentation fault encountered in worker."
from workers. Since worker signal handler prints with low-level write(),
this has to be done on OS level via dup.
This is used as worker_init_fn for test_segfault.
"""
sys.stderr.flush() # flush library buffers that dup2 knows nothing about
# Can't use a with-block because otherwise the fd will be closed when this
# function ends.
with open(os.devnull, 'w') as devnull:
os.dup2(devnull.fileno(), sys.stderr.fileno())
def _test_segfault():
dataset = SegfaultDataset(10)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, worker_init_fn=disable_stderr)
_ = next(iter(dataloader))
def _test_no_segfault():
dataset = [1, 2, 3]
num_threads = torch.get_num_threads()
if num_threads < 4:
torch.set_num_threads(4)
else:
torch.set_num_threads(num_threads)
mp_ctx = torch.multiprocessing.get_context(method='fork')
dataloader = DataLoader(dataset, num_workers=1, worker_init_fn=disable_stderr,
multiprocessing_context=mp_ctx)
_ = next(iter(dataloader))
class TestProperExitDataset(Dataset):
def __init__(self, size, error_event):
self.size = size
self.error_event = error_event
def __len__(self):
return self.size
def __getitem__(self, idx):
worker_info = torch.utils.data.get_worker_info()
if self.error_event is not None and self.error_event.is_set() and \
worker_info.id == worker_info.num_workers - 1:
# only error in the last worker
raise RuntimeError('Worker error')
return torch.tensor([idx])
class TestProperExitIterableDataset(IterableDataset):
def __init__(self, size, error_event):
self.error_event = error_event
self.size = size
self.remaining = size
def __len__(self):
return self.size
def __iter__(self):
return self
def __next__(self):
worker_info = torch.utils.data.get_worker_info()
if self.error_event is not None and self.error_event.is_set() and \
worker_info.id == worker_info.num_workers - 1:
# only error in the last worker
raise RuntimeError('Worker error')
self.remaining -= 1
if self.remaining < 0:
raise StopIteration
return torch.tensor(-1000)
# See TestDataLoader.test_proper_exit for usage
def _test_proper_exit(is_iterable_dataset, use_workers, pin_memory, exit_method,
hold_iter_reference, loader_setup_event, tester_setup_event,
persistent_workers):
num_workers = 2 if use_workers else 0
if exit_method == 'worker_error' or exit_method == 'worker_kill':
assert use_workers is True
if exit_method == 'worker_error':
worker_error_event = mp.Event()
else:
worker_error_event = None
if is_iterable_dataset:
ds = TestProperExitIterableDataset(7, worker_error_event)
else:
ds = TestProperExitDataset(12, worker_error_event)
loader = DataLoader(ds, batch_size=1, shuffle=False,
num_workers=num_workers, pin_memory=pin_memory,
worker_init_fn=set_faulthander_if_available,
persistent_workers=persistent_workers)
error_it = 2
if use_workers:
# 2 is the magical per-worker prefetch number...
# FIXME: change this after the number becomes configurable.
if is_iterable_dataset:
assert len(ds) * num_workers > (error_it + 2 + 1)
else:
assert len(loader) > (error_it + 2 + 1) * num_workers
else:
if is_iterable_dataset:
assert len(ds) > error_it + 1
else:
assert len(loader) > error_it + 1
it = iter(loader)
if use_workers:
workers = it._workers
def kill_pid(pid):
psutil_p = psutil.Process(pid)
psutil_p.kill()
psutil_p.wait(JOIN_TIMEOUT)
assert not psutil_p.is_running()
for i, _ in enumerate(it):
if i == 0:
if not hold_iter_reference:
del it
del loader
loader_setup_event.set()
tester_setup_event.wait()
# ensure that the workers are still alive
if use_workers:
for w in workers:
assert w.is_alive()
if worker_error_event is not None:
worker_error_event.set()
if i == error_it:
if exit_method == 'loader_error':
raise RuntimeError('Loader error')
elif exit_method == 'loader_kill':
kill_pid(os.getpid())
elif exit_method == 'worker_kill':
kill_pid(workers[-1].pid) # kill last worker
if not hold_iter_reference:
# Tries to trigger the __del__ clean-up rather than the automatic
# exiting of daemonic children. Technically it should be automatically
# triggered, but I don't want to rely on the implementation detail of
# Python gc.
gc.collect()
class TestWorkerInfoDataset(SynchronizedDataset):
def __getitem__(self, idx):
self.sync_once()
return torch.tensor(self.value)
# Should be used as worker_init_fn with TestWorkerInfoDataset.
# See _test_get_worker_info below for usage.
def _test_worker_info_init_fn(worker_id):
worker_info = torch.utils.data.get_worker_info()
assert worker_id == worker_info.id, "worker_init_fn and worker_info should have consistent id"
assert worker_id < worker_info.num_workers, "worker_init_fn and worker_info should have valid id"
assert worker_info.seed == torch.initial_seed(), "worker_init_fn and worker_info should have consistent seed"
dataset = worker_info.dataset
assert isinstance(dataset, TestWorkerInfoDataset), "worker_info should have correct dataset copy"
assert not hasattr(dataset, 'value'), "worker_info should have correct dataset copy"
# test that WorkerInfo attributes are read-only
try:
worker_info.id = 3999
except RuntimeError as e:
assert str(e) == "Cannot assign attributes to WorkerInfo objects"
try:
worker_info.a = 3
except RuntimeError as e:
assert str(e) == "Cannot assign attributes to WorkerInfo objects"
for k in ['id', 'num_workers', 'seed', 'dataset']:
assert "{}=".format(k) in repr(worker_info)
dataset.value = [worker_id, os.getpid()]
def _test_get_worker_info():
# get_worker_info returns None in main proc
assert torch.utils.data.get_worker_info() is None
num_workers = 2
batch_size = 2
dataset = TestWorkerInfoDataset(6, batch_size, num_workers)
dataloader = DataLoader(dataset, batch_size=batch_size,
num_workers=num_workers,
worker_init_fn=_test_worker_info_init_fn)
it = iter(dataloader)
data = []
for d in it:
data.append(d)
worker_pids = [w.pid for w in it._workers]
data = torch.cat(data, 0)
for d in data:
# each `d` is a [worker_id, worker_pid] pair, which is set in
# _test_worker_info_init_fn
assert d[1] == worker_pids[d[0]]
# get_worker_info returns None in main proc after data loading
assert torch.utils.data.get_worker_info() is None
# main proc dataset was never assigned this attribute
assert not hasattr(dataset, 'value')
try:
_ = dataset[0]
except AttributeError:
return
raise RuntimeError('Expected AttributeError')
# test custom init function
def init_fn(worker_id):
torch.manual_seed(12345)
# used with test_error_in_init
class ErrorIterableDataset(IterableDataset):
def __iter__(self):
raise RuntimeError("Error in __iter__")
# used with test_error_in_init
def error_worker_init_fn(_):
raise RuntimeError("Error in worker_init_fn")
class BulkLoadingDataset(Dataset):
def __init__(self, length):
self.length = length
def __getitem__(self, indices):
assert isinstance(indices, (list, tuple))
return torch.as_tensor(indices)
def __len__(self):
return self.length
class BulkLoadingSampler(torch.utils.data.Sampler):
def __init__(self, dataset, batch_size):
self.dataset = dataset
self.batch_size = batch_size
def __iter__(self):
for x in torch.randperm(len(self.dataset)).split(self.batch_size):
yield x.tolist()
def __len__(self):
return int(math.ceil(len(self.dataset) / float(self.batch_size)))
class TestMultiEpochDataset(IterableDataset):
def __init__(self, length):
self.length = length
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
assert worker_info is not None
worker_id = worker_info.id
for idx in range(self.length // worker_info.num_workers):
yield worker_id
def __len__(self):
return self.length
class CustomList(list):
pass
class CustomDict(dict):
pass
def row_processor(row):
return np.add(row, 1)
def filter_len(row):
return len(row) == 4
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
@unittest.skipIf(
TEST_WITH_ASAN,
"DataLoader tests hang in ASAN, see: https://github.com/pytorch/pytorch/issues/66223")
class TestDataLoader(TestCase):
def setUp(self):
super(TestDataLoader, self).setUp()
self.data = torch.randn(100, 2, 3, 5)
self.labels = torch.randperm(50).repeat(2)
self.dataset = TensorDataset(self.data, self.labels)
self.persistent_workers = False
def _get_data_loader(self, dataset, **kwargs):
persistent_workers = kwargs.get('persistent_workers', self.persistent_workers)
if persistent_workers and kwargs.get('num_workers', 0) == 0:
persistent_workers = False
kwargs['persistent_workers'] = persistent_workers
return DataLoader(dataset, **kwargs)
def _test_sequential(self, loader):
batch_size = loader.batch_size
if batch_size is None:
for idx, (sample, target) in enumerate(loader):
self.assertEqual(sample, self.data[idx])
self.assertEqual(target, self.labels[idx])
self.assertEqual(idx, len(self.dataset) - 1)
else:
for i, (sample, target) in enumerate(loader):
idx = i * batch_size
self.assertEqual(sample, self.data[idx:idx + batch_size])
self.assertEqual(target, self.labels[idx:idx + batch_size])
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
def _test_shuffle(self, loader):
found_data = {i: 0 for i in range(self.data.size(0))}
found_labels = {i: 0 for i in range(self.labels.size(0))}
batch_size = loader.batch_size
if batch_size is None:
for i, (batch_samples, batch_targets) in enumerate(loader):
sample, target = (batch_samples, batch_targets)
for data_point_idx, data_point in enumerate(self.data):
if data_point.eq(sample).all():
self.assertFalse(found_data[data_point_idx])
found_data[data_point_idx] += 1
break
self.assertEqual(target, self.labels[data_point_idx])
found_labels[data_point_idx] += 1
self.assertEqual(sum(found_data.values()), (i + 1))
self.assertEqual(sum(found_labels.values()), (i + 1))
self.assertEqual(i, (len(self.dataset) - 1))
else:
for i, (batch_samples, batch_targets) in enumerate(loader):
for sample, target in zip(batch_samples, batch_targets):
for data_point_idx, data_point in enumerate(self.data):
if data_point.eq(sample).all():
self.assertFalse(found_data[data_point_idx])
found_data[data_point_idx] += 1
break
self.assertEqual(target, self.labels[data_point_idx])
found_labels[data_point_idx] += 1
self.assertEqual(sum(found_data.values()), (i + 1) * batch_size)
self.assertEqual(sum(found_labels.values()), (i + 1) * batch_size)
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
def _test_error(self, loader):
it = iter(loader)
errors = 0
while True:
try:
next(it)
except NotImplementedError:
errors += 1
except StopIteration:
self.assertEqual(errors,
math.ceil(float(len(loader.dataset)) / loader.batch_size))
return
def test_error_in_init(self):
for num_workers in [0, 2]:
loader = self._get_data_loader(ErrorIterableDataset(), num_workers=num_workers)
with self.assertRaisesRegex(RuntimeError, 'Error in __iter__'):
list(iter(loader))
loader = self._get_data_loader(self.dataset, num_workers=2, worker_init_fn=error_worker_init_fn)
with self.assertRaisesRegex(RuntimeError, 'Error in worker_init_fn'):
list(iter(loader))
def test_typing(self):
from typing import List
# Make sure there is no TypeError
class SomeDatasetClass(Dataset[List[torch.Tensor]]):
pass
def _create_dataloader(is_train: bool) -> DataLoader[List[torch.Tensor]]:
pass
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "No 'resource' module on Windows")
def test_fd_limit_exceeded(self):
# See NOTE [ DataLoader on Linux and open files limit ]
import subprocess
subprocess.check_output([sys.executable, '-c', """\
import torch
import resource
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
try:
keep_fds_alive = []
resource.setrlimit(resource.RLIMIT_NOFILE, (100, 100))
for random_t in DataLoader(RandomDataset(200, (2,2)), multiprocessing_context="fork",
num_workers=1):
random_t.max(dim=0)
keep_fds_alive.append(random_t)
except RuntimeError as e:
assert "ulimit -n" in str(e)
assert "set_sharing_strategy" in str(e)
"""])
def test_invalid_assign_after_init(self):
dl = self._get_data_loader(self.dataset)
for attr in ('batch_size', 'sampler', 'batch_sampler', 'drop_last', 'dataset'):
def fn():
setattr(dl, attr, {})
self.assertRaises(ValueError, fn)
def test_sequential_nonbatch(self):
self._test_sequential(self._get_data_loader(self.dataset, batch_size=None))
def test_sequential_batch(self):
self._test_sequential(self._get_data_loader(self.dataset))
self._test_sequential(self._get_data_loader(self.dataset, batch_size=2))
def test_bulk_loading_nobatch(self):
n = 35
bs = 4
ds = BulkLoadingDataset(n)
sampler = BulkLoadingSampler(ds, batch_size=4)
for num_workers in [0, 4]:
dl = self._get_data_loader(ds, num_workers=num_workers, batch_size=None, sampler=sampler, pin_memory=TEST_CUDA)
self.assertFalse(dl._auto_collation)
samples = list(dl)
self.assertEqual(samples[0].is_pinned(), TEST_CUDA)
self.assertEqual(set(torch.cat(samples, 0).tolist()), set(range(n)))
def test_growing_dataset(self):
dataset = [torch.ones(4) for _ in range(4)]
dataloader_seq = self._get_data_loader(dataset, shuffle=False)
dataloader_shuffle = self._get_data_loader(dataset, shuffle=True)
dataset.append(torch.ones(4))
self.assertEqual(len(dataloader_seq), 5)
self.assertEqual(len(dataloader_shuffle), 5)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_sequential_pin_memory(self):
loader = self._get_data_loader(self.dataset, batch_size=2, pin_memory=True)
for input, target in loader:
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())
def test_multiple_dataloaders(self):
for multiprocessing_context in supported_multiprocessing_contexts:
loader1_it = iter(self._get_data_loader(self.dataset, num_workers=1))
loader2_it = iter(self._get_data_loader(self.dataset, num_workers=2, multiprocessing_context=multiprocessing_context))
next(loader1_it)
next(loader1_it)
next(loader2_it)
next(loader2_it)
next(loader1_it)
next(loader2_it)
del loader1_it
del loader2_it
def test_segfault(self):
p = ErrorTrackingProcess(target=_test_segfault)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
if IS_WINDOWS:
self.assertIsInstance(p.exception, OSError)
self.assertRegex(str(p.exception), r'access violation reading ')
else:
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader worker \(pid \d+\) is killed by signal: ')
finally:
p.terminate()
# Tests if the child process forked by the DataLoader segfaults due to having more than 3 threads
# in the parent process after at least one set_num_threads invocation in the parent process.
# After forking, set_num_threads(1) in the child process entails handling some inherited data-structures
# of the Caffe2 thread-pool of the parent process, culminating in a segfault.
# Reference: https://github.com/pytorch/pytorch/issues/54752
@unittest.skipIf(IS_WINDOWS, "Needs fork")
def test_no_segfault(self):
p = ErrorTrackingProcess(target=_test_no_segfault)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
if p.exception:
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader worker \(pid \d+\) is killed by signal: ')
self.fail("Segfault occurred in worker process after fork")
finally:
p.terminate()
def test_timeout(self):
if TEST_CUDA and not NO_MULTIPROCESSING_SPAWN:
# This test runs in a subprocess, which can only initialize CUDA with spawn.
# _test_timeout_pin_memory with pin_memory=True initializes CUDA when the iterator is
# constructed.
targets = (_test_timeout, _test_timeout_pin_memory)
else:
targets = (_test_timeout,)
for target in targets:
p = ErrorTrackingProcess(target=target, args=(self.persistent_workers,))
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader timed out after \d+ seconds')
finally:
p.terminate()
def test_large_sampler_indices(self):
# Test that the data loader cleanly exit when the process errors
# 1. having an reference to the iterator
# 2. using a sampler that yields big elements s.t. _index_queues putters block
#
# More context: https://github.com/pytorch/pytorch/issues/48666
p = ErrorTrackingProcess(target=_test_large_sampler_indices, args=(self.persistent_workers,))
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'My Error')
finally:
p.terminate()
def test_invalid_ctor_args_combinations(self):
# general
with self.assertRaisesRegex(ValueError, "num_workers option should be non-negative"):
self._get_data_loader(self.dataset, num_workers=-1)
with self.assertRaisesRegex(ValueError, "timeout option should be non-negative"):
self._get_data_loader(self.dataset, timeout=-1)
# disable auto-batching
with self.assertRaisesRegex(ValueError,
"batch_size=None option disables auto-batching and is mutually exclusive"):
self._get_data_loader(self.dataset, batch_size=None, drop_last=True)
valid_ctx = list(torch.multiprocessing.get_all_start_methods())[-1]
with self.assertRaisesRegex(ValueError, r"multi-process loading \(num_workers > 0\), but got"):
self._get_data_loader(self.dataset, num_workers=0, multiprocessing_context=valid_ctx)
with self.assertRaisesRegex(ValueError, "should specify a valid start method in"):
self._get_data_loader(self.dataset, num_workers=1, multiprocessing_context='bad')
with self.assertRaisesRegex(TypeError, "multiprocessing_context option should be a valid context "):
self._get_data_loader(self.dataset, num_workers=1, multiprocessing_context=object())
# map-style
sampler = torch.utils.data.SequentialSampler(self.dataset)
batch_sampler = torch.utils.data.BatchSampler(sampler, 3, False)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_size=11, sampler=sampler, shuffle=True)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_sampler=batch_sampler, sampler=sampler, shuffle=True)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_sampler=batch_sampler, sampler=sampler, shuffle=3)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, batch_size=11, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, shuffle=True, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, drop_last=True, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, drop_last=3, batch_sampler=batch_sampler)
# iterable-style
dataset = CountingIterableDataset(20)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified shuffle"):
self._get_data_loader(dataset, shuffle=True)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified shuffle"):
self._get_data_loader(dataset, shuffle=3)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified sampler"):
self._get_data_loader(dataset, sampler=torch.utils.data.SequentialSampler(dataset))
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified sampler"):
self._get_data_loader(dataset, sampler=3)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified batch_sampler"):
self._get_data_loader(dataset, batch_sampler=torch.utils.data.BatchSampler(
torch.utils.data.SequentialSampler(dataset), 3, False))
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified batch_sampler"):
self._get_data_loader(dataset, batch_sampler=3)
def test_builtin_collection_conversion(self):
for coll_ty in (list, tuple):
for num_workers in (0, 1):
# map-style dataset
dataset = CountingDataset(20)
# no auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=None, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(range(20)))
# auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=2, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(torch.tensor([i, i + 1]) for i in range(0, 20, 2)))
# iterable-style dataset
dataset = CountingIterableDataset(20)
# no auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=None, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(range(20)))
# auto-batching
# this IterableDataset isn't configured for each worker, so for
# the equality test below to be valid, we cannot have more than 1 workers.
assert num_workers in [0, 1], "invalid test"
fetched = coll_ty(self._get_data_loader(dataset, batch_size=2, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(torch.tensor([i, i + 1]) for i in range(0, 20, 2)))
def test_iterable_style_dataset(self):
# [no auto-batching] single process loading
dataset = CountingIterableDataset(20)
dataloader = self._get_data_loader(dataset, batch_size=None)
fetched = list(dataloader)
self.assertEqual(len(fetched), 20)
for i, d in enumerate(fetched):
# non-batched should not convert ints into tensors
self.assertIsInstance(d, int)
self.assertEqual(d, i)
# DataLoader should match len of the iterable-style dataset (if implemented)
self.assertEqual(len(dataloader), len(dataset))
# [no auto-batching] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=None,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = sorted(dataloader_iter)
for a, b in zip(fetched, expected):
# non-batched should not convert ints into tensors
self.assertIsInstance(a, int)
self.assertEqual(a, b)
# DataLoader should match len of the iterable-style dataset (if implemented)
self.assertEqual(len(dataloader), len(dataset))
# When loading more than len(dataset) data, after accessing len(dataloader),
# we should get a warning. See NOTE [ IterableDataset and __len__ ].
dataset = CountingIterableDataset(20)
dataloader = self._get_data_loader(dataset, num_workers=num_workers,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
it = iter(dataloader)
for _ in range(40):
self.assertNotWarn(lambda: next(it), "Should not warn before accessing len(dataloader)")
self.assertEqual(len(dataloader), len(dataset))
self.assertEqual(len(dataloader), 20)
it = iter(dataloader)
for _ in range(20):
self.assertNotWarn(lambda: next(it), "Should not warn before exceeding length")
for _ in range(3):
with self.assertWarnsRegex(
UserWarning,
r"but [0-9]+ samples have been fetched\. For multiprocessing data-loading, this",
msg="Should always warn after exceeding length"):
next(it)
# [no auto-batching] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
# [auto-batching] single process loading
dataset = CountingIterableDataset(20)
fetched = list(self._get_data_loader(dataset, batch_size=7))
self.assertEqual(len(fetched), 3)
self.assertEqual(fetched[0].tolist(), list(range(7)))
self.assertEqual(fetched[1].tolist(), list(range(7, 14)))
self.assertEqual(fetched[2].tolist(), list(range(14, 20)))
# [auto-batching] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
# worker 0 should return 0 batches
# worker 1 should return 1 batches
# worker 2 should return 3 batches
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=7, prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = list(dataloader_iter)
self.assertEqual(len(fetched), 4)
fetched = set(tuple(t.tolist()) for t in fetched)
self.assertEqual(fetched, {tuple(range(4)), tuple(range(7)), tuple(range(7, 14)), tuple(range(14, 20))})
# [auto-batching] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
# [auto-batching & drop_last] single process loading
dataset = CountingIterableDataset(20)
fetched = list(self._get_data_loader(dataset, batch_size=7, drop_last=True))
self.assertEqual(len(fetched), 2)
self.assertEqual(fetched[0].tolist(), list(range(7)))
self.assertEqual(fetched[1].tolist(), list(range(7, 14)))
# [auto-batching & drop_last] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
# worker 0 should return 0 batches
# worker 1 should return 1 batches
# worker 2 should return 3 batches
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=7, drop_last=True,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = list(dataloader_iter)
self.assertEqual(len(fetched), 2)
fetched = set(tuple(t.tolist()) for t in fetched)
self.assertEqual(fetched, {tuple(range(7)), tuple(range(7, 14))})
# [auto-batching & drop_last] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
def test_chain_iterable_style_dataset(self):
# chaining (concatenation)
dataset1 = CountingIterableDataset(20)
dataset2 = CountingIterableDataset(15)
expected = list(range(20)) + list(range(15))
for num_workers in [0, 1]:
for chained_dataset in [dataset1 + dataset2, ChainDataset([dataset1, dataset2])]:
fetched = list(self._get_data_loader(chained_dataset, num_workers=num_workers))
self.assertEqual(len(fetched), len(expected))
for e, d in zip(expected, fetched):
self.assertIsInstance(d, torch.Tensor)
self.assertEqual(e, d)
with self.assertRaisesRegex(AssertionError, "ChainDataset only supports IterableDataset"):
list(iter(dataset1 + self.dataset))
with self.assertRaisesRegex(AssertionError, "ChainDataset only supports IterableDataset"):
list(iter(ChainDataset([dataset1, self.dataset])))
@unittest.skipIf(IS_MACOS, "Not working on macos")
def test_multiprocessing_contexts(self):
reference = [
torch.arange(3),
torch.arange(3, 6),
torch.arange(6, 9),
torch.arange(9, 11),
]
counting_ds_n = 11
dl_common_args = dict(num_workers=3, batch_size=3, pin_memory=(not TEST_CUDA))
for ctx in supported_multiprocessing_contexts:
# windows and jetson devices don't support sharing cuda tensor; ROCm does not yet fully support IPC
if ctx in ['spawn', 'forkserver'] and TEST_CUDA and not IS_WINDOWS and not IS_JETSON:
ds_cls = CUDACountingDataset
else:
ds_cls = CountingDataset
self.assertEqual(
reference, list(self._get_data_loader(ds_cls(counting_ds_n), multiprocessing_context=ctx, **dl_common_args)))
if ctx is not None:
# test ctx object
ctx = mp.get_context(ctx)
self.assertEqual(
reference, list(self._get_data_loader(ds_cls(counting_ds_n), multiprocessing_context=ctx, **dl_common_args)))
@skipIfNoNumpy
def test_multiprocessing_iterdatapipe(self):
# Testing to make sure that function from global scope (e.g. imported from library) can be serialized
# and used with multiprocess DataLoader
reference = [torch.as_tensor([[2, 3, 4, 5]], dtype=torch.int64),
torch.as_tensor([[2, 3, 4, 5]], dtype=torch.int64)]
datapipe: IterDataPipe = IterableWrapper([[1, 2, 3, 4], [1, 2, 3, 4, 5, 6]])
datapipe = datapipe.map(row_processor)
datapipe = datapipe.filter(lambda row: len(row) == 4) if HAS_DILL else datapipe.filter(filter_len)
dl_common_args = dict(num_workers=2, batch_size=2, shuffle=True, pin_memory=(not TEST_CUDA))
for ctx in supported_multiprocessing_contexts:
self.assertEqual(reference,
[t.type(torch.int64)
for t in self._get_data_loader(datapipe, multiprocessing_context=ctx, **dl_common_args)])
if ctx is not None:
# test ctx object
ctx = mp.get_context(ctx)
self.assertEqual(reference,
[t.type(torch.int64)
for t in
self._get_data_loader(datapipe, multiprocessing_context=ctx, **dl_common_args)])
def test_worker_seed(self):
num_workers = 6
batch_size = 1
dataset = SynchronizedSeedDataset(num_workers, batch_size, num_workers)
dataloader = self._get_data_loader(dataset, batch_size=batch_size, num_workers=num_workers)
seeds = set()
for batch in dataloader:
seeds.add(batch[0])
self.assertEqual(len(seeds), num_workers)
def test_worker_seed_reproducibility(self):
def get_dataloader():
return DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, generator=torch.Generator().manual_seed(42))
num_workers = 6
batch_size = 1
dataset = SynchronizedSeedDataset(num_workers, batch_size, num_workers)
self.assertEqual(set(int(batch) for batch in get_dataloader()), set(int(batch) for batch in get_dataloader()))
def test_multi_epochs_reproducibility(self):
num_workers = 2
batch_size = 10
num_epochs = 3
dataset = TestMultiEpochDataset(batch_size * num_workers)
dataloader = self._get_data_loader(dataset, batch_size=batch_size,
shuffle=False, num_workers=num_workers)
for ind in range(num_epochs):
for batch_idx, sample in enumerate(dataloader):
self.assertEqual(sample.tolist(), [batch_idx % num_workers] * batch_size)
def test_worker_init_fn(self):
dataset = SeedDataset(4)
dataloader = self._get_data_loader(dataset, batch_size=2, num_workers=2,
worker_init_fn=init_fn)
for batch in dataloader:
self.assertEqual(12345, batch[0])
self.assertEqual(12345, batch[1])
def test_get_worker_info(self):
p = ErrorTrackingProcess(target=_test_get_worker_info)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertEqual(p.exitcode, 0)
finally:
p.terminate()
def test_shuffle(self):
self._test_shuffle(self._get_data_loader(self.dataset, shuffle=True))
def test_shuffle_batch_none(self):
self._test_shuffle(DataLoader(self.dataset, batch_size=None, shuffle=True))
def test_shuffle_batch(self):
self._test_shuffle(self._get_data_loader(self.dataset, batch_size=2, shuffle=True))
def test_shuffle_reproducibility(self):
for fn in (
lambda: DataLoader(self.dataset, shuffle=True, num_workers=0, generator=torch.Generator().manual_seed(42)),
lambda: DataLoader(self.dataset, shuffle=True, num_workers=2, generator=torch.Generator().manual_seed(42)),
):
self.assertEqual(list(fn()), list(fn()))
def test_sequential_workers(self):
self._test_sequential(self._get_data_loader(self.dataset, num_workers=4))
def test_seqential_batch_workers(self):
self._test_sequential(self._get_data_loader(self.dataset, batch_size=2, num_workers=4))
def test_seqential_batch_workers_prefetch(self):
self._test_sequential(DataLoader(self.dataset, batch_size=2, num_workers=4, prefetch_factor=3))
def test_shuffle_workers(self):
self._test_shuffle(self._get_data_loader(self.dataset, shuffle=True, num_workers=4))
def test_shuffle_batch_workers(self):
self._test_shuffle(self._get_data_loader(self.dataset, batch_size=2, shuffle=True, num_workers=4))
def test_shuffle_batch_workers_prefetch(self):
self._test_shuffle(DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, prefetch_factor=3))
def test_random_sampler(self):
from collections import Counter
from torch.utils.data import RandomSampler
def sample_stat(sampler, num_samples):
counts = Counter(sampler)
count_repeated = sum(val > 1 for val in counts.values())
return (count_repeated, min(counts.keys()), max(counts.keys()), sum(counts.values()))
# test sample with replacement
n = len(self.dataset) + 1 # ensure at least one sample is drawn more than once
sampler_with_replacement = RandomSampler(self.dataset, replacement=True, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(sampler_with_replacement, n)
self.assertTrue(count_repeated > 0)
self.assertTrue(minval >= 0)
self.assertTrue(maxval < len(self.dataset))
self.assertTrue(count_total == n)
# test sample without replacement and without specified num_samples
sampler_without_replacement = RandomSampler(self.dataset)
count_repeated, minval, maxval, count_total = sample_stat(sampler_without_replacement, len(self.dataset))
self.assertTrue(count_repeated == 0)
self.assertTrue(minval == 0)
self.assertTrue(maxval == len(self.dataset) - 1)
self.assertTrue(count_total == len(self.dataset))
# test sample without replacement and with specified num_samples
n = len(self.dataset) * 2
sampler_without_replacement = RandomSampler(self.dataset, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(sampler_without_replacement, len(self.dataset))
self.assertTrue(count_repeated == len(self.dataset))
self.assertTrue(minval == 0)
self.assertTrue(maxval == len(self.dataset) - 1)
self.assertTrue(count_total == n)
n = len(self.dataset) - 1
sampler_without_replacement = RandomSampler(self.dataset, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(sampler_without_replacement, len(self.dataset))
self.assertTrue(count_repeated == 0)
self.assertTrue(minval >= 0)
self.assertTrue(maxval < len(self.dataset))
self.assertTrue(count_total == n)
n = len(self.dataset) + 1
sampler_without_replacement = RandomSampler(self.dataset, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(sampler_without_replacement, len(self.dataset))
self.assertTrue(count_repeated == 1)
self.assertTrue(minval == 0)
self.assertTrue(maxval == len(self.dataset) - 1)
self.assertTrue(count_total == n)
# raise error when replacement is non-boolean
with self.assertRaisesRegex(TypeError, "replacement should be a boolean value, but got replacement=0"):
RandomSampler(self.dataset, replacement=0)
def test_random_sampler_len_with_replacement(self):
from torch.utils.data import RandomSampler
# add 5 extra samples
num_samples = len(self.dataset) + 5
sampler = RandomSampler(self.dataset,
replacement=True,
num_samples=num_samples)
# test len method
self.assertEqual(num_samples, len(sampler))
# test with iteration
count_num_samples = sum(1 for _ in sampler)
self.assertEqual(num_samples, count_num_samples)
# test with dataloader, batch_size = 1
batch_size = 1
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(num_samples, count_num_samples_in_data_loader)
# test with dataloader, batch_size = 6
batch_size = 6
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(int(math.ceil(float(num_samples) / batch_size)),
count_num_samples_in_data_loader)
def test_random_sampler_len_without_replacement(self):
from torch.utils.data import RandomSampler
# add 5 extra samples
num_samples = len(self.dataset) + 5
sampler = RandomSampler(self.dataset,
replacement=False,
num_samples=num_samples)
# test len method
self.assertEqual(num_samples, len(sampler))
# test with iteration
count_num_samples = sum(1 for _ in sampler)
self.assertEqual(num_samples, count_num_samples)
# test with dataloader, batch_size = 1
batch_size = 1
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(num_samples, count_num_samples_in_data_loader)
# test with dataloader, batch_size = 6
batch_size = 6
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(num_samples // batch_size + (num_samples % batch_size > 0),
count_num_samples_in_data_loader)
def test_distributed_sampler_invalid_rank(self):
from torch.utils.data.distributed import DistributedSampler
dataset = torch.IntTensor(range(10))
with self.assertRaisesRegex(ValueError, "Invalid rank"):
sampler = DistributedSampler(dataset, 3, 3)
with self.assertRaisesRegex(ValueError, "Invalid rank"):
sampler = DistributedSampler(dataset, 3, -1)
def test_duplicating_data_with_drop_last(self):
from torch.utils.data.distributed import DistributedSampler
num_processes = 4
num_batches = 9
data_set = torch.IntTensor(range(num_batches))
scanned_data = torch.IntTensor([])
for i in range(num_processes):
s = DistributedSampler(data_set, num_processes, i)
d_loader = self._get_data_loader(data_set, batch_size=int(num_batches / num_processes), drop_last=True, sampler=s)
for data in d_loader:
scanned_data = torch.cat((scanned_data, data), 0)
self.assertEqual(scanned_data.size(), scanned_data.unique().size())
def test_sampler_reproducibility(self):
from torch.utils.data import RandomSampler, WeightedRandomSampler, SubsetRandomSampler
weights = [0.1, 0.9, 0.4, 0.7, 3.0, 0.6]
for fn in (
lambda: RandomSampler(self.dataset, num_samples=5, replacement=True, generator=torch.Generator().manual_seed(42)),
lambda: RandomSampler(self.dataset, replacement=False, generator=torch.Generator().manual_seed(42)),
lambda: WeightedRandomSampler(weights, num_samples=5, replacement=True, generator=torch.Generator().manual_seed(42)),
lambda: WeightedRandomSampler(weights, num_samples=5, replacement=False, generator=torch.Generator().manual_seed(42)),
lambda: SubsetRandomSampler(range(10), generator=torch.Generator().manual_seed(42)),
):
self.assertEqual(list(fn()), list(fn()))
for sampler in (
RandomSampler(self.dataset, num_samples=5, replacement=True),
RandomSampler(self.dataset, replacement=False),
WeightedRandomSampler(weights, num_samples=5, replacement=True),
WeightedRandomSampler(weights, num_samples=5, replacement=False),
SubsetRandomSampler(range(10)),
):
torch.manual_seed(0)
l1 = list(sampler) + list(sampler)
torch.manual_seed(0)
l2 = list(sampler) + list(sampler)
self.assertEqual(l1, l2)
its = (iter(sampler), iter(sampler))
ls = ([], [])
for idx in range(len(sampler)):
for i in range(2):
if idx == 0:
torch.manual_seed(0)
ls[i].append(next(its[i]))
self.assertEqual(ls[0], ls[1])
def _test_sampler(self, **kwargs):
indices = range(2, 12) # using a regular iterable
dl = self._get_data_loader(self.dataset, sampler=indices, batch_size=2, **kwargs)
self.assertEqual(len(dl), 5)
for i, (input, _target) in enumerate(dl):
self.assertEqual(len(input), 2)
self.assertEqual(input, self.data[i * 2 + 2:i * 2 + 4])
def test_sampler(self):
self._test_sampler()
self._test_sampler(num_workers=4)
if not NO_MULTIPROCESSING_SPAWN:
self._test_batch_sampler(num_workers=4, multiprocessing_context='spawn')
def _test_batch_sampler(self, **kwargs):
# [(0, 1), (2, 3, 4), (5, 6), (7, 8, 9), ...]
batches = [] # using a regular iterable
for i in range(0, 20, 5):
batches.append(tuple(range(i, i + 2)))
batches.append(tuple(range(i + 2, i + 5)))
dl = self._get_data_loader(self.dataset, batch_sampler=batches, **kwargs)
self.assertEqual(len(dl), 8)
for i, (input, _target) in enumerate(dl):
if i % 2 == 0:
offset = i * 5 // 2
self.assertEqual(len(input), 2)
self.assertEqual(input, self.data[offset:offset + 2])
else:
offset = i * 5 // 2
self.assertEqual(len(input), 3)
self.assertEqual(input, self.data[offset:offset + 3])
def test_batch_sampler(self):
self._test_batch_sampler()
self._test_batch_sampler(num_workers=4)
if not NO_MULTIPROCESSING_SPAWN:
self._test_batch_sampler(num_workers=4, multiprocessing_context='spawn')
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_shuffle_pin_memory(self):
loader = self._get_data_loader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
for input, target in loader:
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy(self):
import numpy as np
class TestDataset(torch.utils.data.Dataset):
def __getitem__(self, i):
return np.ones((2, 3, 4)) * i
def __len__(self):
return 1000
loader = self._get_data_loader(TestDataset(), batch_size=12)
batch = next(iter(loader))
self.assertIsInstance(batch, torch.DoubleTensor)
self.assertEqual(batch.size(), torch.Size([12, 2, 3, 4]))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy_gen_state(self):
from torch.utils.data._utils.worker import _generate_state
# Using NumPy generated states as the reference to test `_generate_state`
# having the same result.
# Test case: ((worker_id, base_seed), expected_state)
test_cases = [
((4, 13434589827475259383), (2884386318, 1088094898, 3523808998, 3860348662)),
((1, 15014285634777110771), (1934848465, 763213760, 2959016433, 179751970)),
((10, 978296274032934101), (1759791917, 3550927336, 1225977135, 1036538043)),
((12, 11868770762134256968), (3974661794, 3331131333, 3630387033, 2885815368)),
((9, 15378787925219019706), (3815056996, 3162224466, 2735102421, 3190253477)),
((5, 9055612723125076328), (3522565701, 3368424109, 959377806, 621878693)),
((15, 14617792358407278405), (3402479508, 1588702753, 1169536393, 3675067356)),
((9, 17363320784006640087), (957989458, 2518334477, 1421725660, 3086155459)),
((12, 480002904169484764), (2732851467, 1762620729, 4055801988, 1277640511)),
((15, 16803975943592702950), (3479415043, 4022359553, 295994005, 3358606349)),
((9, 11704776406047813044), (1968928009, 710113752, 2442656196, 1587420279)),
((10, 16357891985431864516), (1271733898, 4197047399, 3727213786, 2338547348)),
((2, 17423369006318065007), (544294336, 1911284083, 3299147734, 3231058347)),
((2, 2889492011444113593), (3721591783, 2595811276, 2212881745, 977682627)),
((0, 8979703111668486195), (4276723937, 2556068849, 2962827292, 233130238)),
((6, 6269787272229682235), (2548857855, 1216457374, 1012973562, 2999759647))
]
for (worker_id, base_seed), exp in test_cases:
self.assertEqual(exp, _generate_state(base_seed, worker_id))
def test_error(self):
self._test_error(self._get_data_loader(ErrorDataset(100), batch_size=2, shuffle=True))
def test_error_workers(self):
self._test_error(self._get_data_loader(ErrorDataset(41), batch_size=2, shuffle=True, num_workers=4))
@unittest.skipIf(IS_WINDOWS, "FIXME: stuck test")
def test_partial_workers(self):
r"""Check that workers exit even if the iterator is not exhausted."""
if TEST_CUDA:
pin_memory_configs = (True, False)
else:
pin_memory_configs = (False,)
for pin_memory in pin_memory_configs:
loader = iter(self._get_data_loader(self.dataset, batch_size=2, num_workers=4, pin_memory=pin_memory))
workers = loader._workers
if pin_memory:
pin_memory_thread = loader._pin_memory_thread
for i, _ in enumerate(loader):
if i == 10:
break
assert i == 10
del loader
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive(), 'subprocess not terminated')
if pin_memory:
pin_memory_thread.join(JOIN_TIMEOUT)
self.assertFalse(pin_memory_thread.is_alive())
# Takes 2.5min to finish, see https://github.com/pytorch/pytorch/issues/46065
@skipIfRocm
@unittest.skipIf(not HAS_PSUTIL, "psutil not found")
@slowTest
def test_proper_exit(self):
(r'''There might be ConnectionResetError or leaked semaphore warning '''
r'''(due to dirty process exit), but they are all safe to ignore''')
# TODO: test the case where the pin_memory_thread triggers an
# error/fatal signal. I haven't found out how to properly do that.
for is_iterable_dataset, use_workers, pin_memory, hold_iter_reference in \
itertools.product([True, False], repeat=4):
# `hold_iter_reference` specifies whether we hold a reference to the
# iterator. This is interesting because Python3 error traces holds a
# reference to the frames, which hold references to all the local
# variables including the iterator, and then the iterator dtor may
# not be called before process end. It is important to see that the
# processes still exit in both cases.
if pin_memory and (not TEST_CUDA or NO_MULTIPROCESSING_SPAWN or IS_WINDOWS):
# This test runs in a subprocess, which can only initialize CUDA with spawn.
# DataLoader with pin_memory=True initializes CUDA when its iterator is constructed.
# For windows, pin_memory sometimes causes CUDA oom.
continue
# `exit_method` controls the way the loader process ends.
# - `*_kill` means that `*` is killed by OS.
# - `*_error` means that `*` raises an error.
# - `None` means that no error happens.
# In all cases, all processes should end properly.
if use_workers:
# TODO: Fix test for 'loader_kill' that would cause running out of shared memory.
# Killing loader process would prevent DataLoader iterator clean up all queues
# and worker processes
exit_methods = [None, 'loader_error', 'worker_error', 'worker_kill']
persistent_workers = self.persistent_workers
else:
exit_methods = [None, 'loader_error', 'loader_kill']
persistent_workers = False
for exit_method in exit_methods:
if exit_method == 'worker_kill':
# FIXME: This sometimes hangs. See #16608.
continue
desc = []
desc.append('is_iterable_dataset={}'.format(is_iterable_dataset))
desc.append('use_workers={}'.format(use_workers))
desc.append('pin_memory={}'.format(pin_memory))
desc.append('hold_iter_reference={}'.format(hold_iter_reference))
desc.append('exit_method={}'.format(exit_method))
desc = 'test_proper_exit with ' + ', '.join(desc)
# Event that the loader process uses to signal testing process
# that various things are setup, including that the worker pids
# are specified in `worker_pids` array.
loader_setup_event = mp.Event()
# Event that this process has finished setting up, and the
# loader process can now proceed to trigger error events or
# finish normally.
tester_setup_event = mp.Event()
loader_p = ErrorTrackingProcess(target=_test_proper_exit,
args=(is_iterable_dataset, use_workers, pin_memory,
exit_method, hold_iter_reference,
loader_setup_event, tester_setup_event,
persistent_workers),
disable_stderr=False)
loader_p.start()
loader_psutil_p = psutil.Process(loader_p.pid)
# Wait for loader process to set everything up, e.g., starting
# workers.
loader_setup_event.wait(timeout=JOIN_TIMEOUT)
if not loader_setup_event.is_set():
fail_msg = desc + ': loader process failed to setup within given time'
if loader_p.exception is not None:
fail_msg += ', and had exception {}'.format(loader_p.exception)
elif not loader_p.is_alive():
fail_msg += ', and exited with code {} but had no exception'.format(loader_p.exitcode)
else:
fail_msg += ', and is still alive.'
if loader_p.is_alive():
# this may kill the process, needs to run after the above lines
loader_p.print_traces_of_all_threads()
self.fail(fail_msg)
# We are certain that the workers have started now.
worker_psutil_ps = loader_psutil_p.children()
def fail(reason):
report_psutil_attrs = ['pid', 'name', 'cpu_times', 'io_counters',
'memory_full_info', 'num_ctx_switches',
'open_files', 'threads', 'status',
'nice', 'ionice']
if reason is None:
err_msg = desc
else:
err_msg = '{}: {}'.format(desc, reason)
err_msg += '\nLoader info:\n\t'
if loader_psutil_p.is_running():
err_msg += str(loader_psutil_p.as_dict(attrs=report_psutil_attrs))
# this may kill the process, needs to run after the above line
loader_p.print_traces_of_all_threads()
else:
err_msg += 'exited with code {}'.format(loader_p.exitcode)
if use_workers:
err_msg += '\nWorker(s) info:'
for idx, worker_psutil_p in enumerate(worker_psutil_ps):
err_msg += '\n\tWorker {}:\n\t\t'.format(idx)
if worker_psutil_p.is_running():
err_msg += str(worker_psutil_p.as_dict(attrs=report_psutil_attrs))
# this may kill the process, needs to run after the above line
print_traces_of_all_threads(worker_psutil_p.pid)
else:
err_msg += 'exited with unknown code'
self.fail(err_msg)
tester_setup_event.set()
try:
loader_p.join(JOIN_TIMEOUT + MP_STATUS_CHECK_INTERVAL)
if loader_p.is_alive():
fail_reason = 'loader process did not terminate'
if loader_p.exception is not None:
fail(fail_reason + ', and had exception {}'.format(loader_p.exception))
else:
fail(fail_reason + ', and had no exception')
_, alive = psutil.wait_procs(worker_psutil_ps, timeout=(MP_STATUS_CHECK_INTERVAL + JOIN_TIMEOUT))
if len(alive) > 0:
fail('worker process (pid(s) {}) did not terminate'.format(
', '.join(str(p.pid) for p in alive)))
if exit_method is None:
if loader_p.exitcode != 0:
fail('loader process had nonzero exitcode {}'.format(loader_p.exitcode))
else:
if loader_p.exitcode == 0:
fail('loader process had zero exitcode')
if exit_method == 'loader_error':
if not isinstance(loader_p.exception, RuntimeError) or \
'Loader error' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif exit_method == 'worker_kill':
if isinstance(loader_p.exception, RuntimeError):
if 'DataLoader worker (pid' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif isinstance(loader_p.exception, ConnectionRefusedError):
# Sometimes, when the worker is being killed and is freeing its
# resources, the unpickling in loader process will be met an
# a `ConnectionRefusedError` as it can not open a socket to receive
# resource. In such cases, the worker may not have fully exited,
# and the loader can't know this via `is_alive` check or `SIGCHLD`
# handler. So we permit this as an allowed error as well.
# After all, we are happy as long as it terminates.
pass
else:
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif exit_method == 'worker_error':
if not isinstance(loader_p.exception, RuntimeError) or \
'Worker error' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
finally:
loader_p.terminate()
def test_len(self):
def check_len(dl, expected):
self.assertEqual(len(dl), expected)
n = 0
for _ in dl:
n += 1
self.assertEqual(n, expected)
check_len(self.dataset, 100)
check_len(self._get_data_loader(self.dataset, batch_size=2), 50)
check_len(self._get_data_loader(self.dataset, batch_size=3), 34)
def test_iterabledataset_len(self):
class IterableDataset(torch.utils.data.IterableDataset):
def __len__(self):
return 10
def __iter__(self):
return iter(range(10))
iterable_loader = DataLoader(IterableDataset(), batch_size=1)
self.assertEqual(len(iterable_loader), 10)
iterable_loader = DataLoader(IterableDataset(), batch_size=1, drop_last=True)
self.assertEqual(len(iterable_loader), 10)
iterable_loader = DataLoader(IterableDataset(), batch_size=2)
self.assertEqual(len(iterable_loader), 5)
iterable_loader = DataLoader(IterableDataset(), batch_size=2, drop_last=True)
self.assertEqual(len(iterable_loader), 5)
iterable_loader = DataLoader(IterableDataset(), batch_size=3)
self.assertEqual(len(iterable_loader), 4)
iterable_loader = DataLoader(IterableDataset(), batch_size=3, drop_last=True)
self.assertEqual(len(iterable_loader), 3)
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy_scalars(self):
import numpy as np
class ScalarDataset(torch.utils.data.Dataset):
def __init__(self, dtype):
self.dtype = dtype
def __getitem__(self, i):
return self.dtype()
def __len__(self):
return 4
dtypes = {
np.float64: torch.DoubleTensor,
np.float32: torch.FloatTensor,
np.float16: torch.HalfTensor,
np.int64: torch.LongTensor,
np.int32: torch.IntTensor,
np.int16: torch.ShortTensor,
np.int8: torch.CharTensor,
np.uint8: torch.ByteTensor,
}
for dt, tt in dtypes.items():
dset = ScalarDataset(dt)
loader = self._get_data_loader(dset, batch_size=2)
batch = next(iter(loader))
self.assertIsInstance(batch, tt)
def test_default_convert_mapping_keep_type(self):
data = CustomDict({"a": 1, "b": 2})
converted = _utils.collate.default_convert(data)
self.assertEqual(converted, data)
def test_default_convert_sequence_keep_type(self):
data = CustomList([1, 2, 3])
converted = _utils.collate.default_convert(data)
self.assertEqual(converted, data)
def test_default_convert_sequence_dont_keep_type(self):
data = range(2)
converted = _utils.collate.default_convert(data)
self.assertEqual(converted, [0, 1])
def test_default_collate_dtype(self):
arr = [1, 2, -1]
collated = _utils.collate.default_collate(arr)
self.assertEqual(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.int64)
arr = [1.1, 2.3, -0.9]
collated = _utils.collate.default_collate(arr)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.float64)
arr = [True, False]
collated = _utils.collate.default_collate(arr)
self.assertEqual(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.bool)
# Should be a no-op
arr = ['a', 'b', 'c']
self.assertEqual(arr, _utils.collate.default_collate(arr))
def test_default_collate_mapping_keep_type(self):
batch = [CustomDict({"a": 1, "b": 2}), CustomDict({"a": 3, "b": 4})]
collated = _utils.collate.default_collate(batch)
expected = CustomDict({"a": torch.tensor([1, 3]), "b": torch.tensor([2, 4])})
self.assertEqual(collated, expected)
def test_default_collate_sequence_keep_type(self):
batch = [CustomList([1, 2, 3]), CustomList([4, 5, 6])]
collated = _utils.collate.default_collate(batch)
expected = CustomList([
torch.tensor([1, 4]),
torch.tensor([2, 5]),
torch.tensor([3, 6]),
])
self.assertEqual(collated, expected)
def test_default_collate_sequence_dont_keep_type(self):
batch = [range(2), range(2)]
collated = _utils.collate.default_collate(batch)
self.assertEqual(collated, [torch.tensor([0, 0]), torch.tensor([1, 1])])
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_bad_numpy_types(self):
import numpy as np
# Should be a no-op
arr = np.array(['a', 'b', 'c'])
self.assertEqual(arr, _utils.collate.default_collate(arr))
arr = np.array([[['a', 'b', 'c']]])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
arr = np.array([object(), object(), object()])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
arr = np.array([[[object(), object(), object()]]])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_numpy_memmap(self):
import numpy as np
with tempfile.TemporaryFile() as f:
arr = np.array([[0, 1], [2, 3], [4, 5], [6, 7]])
arr_memmap = np.memmap(f, dtype=arr.dtype, mode='w+', shape=arr.shape)
arr_memmap[:] = arr[:]
arr_new = np.memmap(f, dtype=arr.dtype, mode='r', shape=arr.shape)
tensor = _utils.collate.default_collate(list(arr_new))
self.assertTrue((tensor == tensor.new_tensor([[0, 1], [2, 3], [4, 5], [6, 7]])).all().item())
def test_default_collate_bad_sequence_type(self):
batch = [['X'], ['X', 'X']]
self.assertRaises(RuntimeError, lambda: _utils.collate.default_collate(batch))
self.assertRaises(RuntimeError, lambda: _utils.collate.default_collate(batch[::-1]))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_shared_tensor(self):
import numpy as np
t_in = torch.zeros(1)
n_in = np.zeros(1)
self.assertEqual(t_in.is_shared(), False)
self.assertEqual(_utils.collate.default_collate([t_in]).is_shared(), False)
self.assertEqual(_utils.collate.default_collate([n_in]).is_shared(), False)
# FIXME: fix the following hack that makes `default_collate` believe
# that it is in a worker process (since it tests
# `get_worker_info() != None`), even though it is not.
old = _utils.worker._worker_info
try:
_utils.worker._worker_info = 'x'
self.assertEqual(_utils.collate.default_collate([t_in]).is_shared(), True)
self.assertEqual(_utils.collate.default_collate([n_in]).is_shared(), True)
finally:
_utils.worker._worker_info = old
def test_excessive_thread_creation_warning(self):
with self.assertWarnsRegex(
UserWarning,
r"excessive worker creation might get DataLoader running slow or even freeze"):
dataloader = DataLoader(self.dataset, batch_size=2, num_workers=1000)
# Define a global function for testing purposes since local functions cannot be pickled
def identity(x):
return x
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDataLoader2(TestCase):
@skipIfNoDill
def test_basics(self):
# TODO(VitalyFedyunin): This test will start breaking if we remove guaranteed order
# of traversing workers
dp = IterableWrapper(list(range(1000))).sharding_filter()
dl = DataLoader(dp, batch_size=3, collate_fn=identity, num_workers=2)
dl2 = DataLoader2(dp, batch_size=3, collate_fn=identity, num_workers=2)
dl2_threading = DataLoader2(dp, batch_size=3, collate_fn=identity, num_workers=2, parallelism_mode='thread')
self.assertEqual(list(dl), list(dl2))
self.assertEqual(list(dl), list(dl2_threading))
class Sorter(IterDataPipe):
def __init__(self, datapipe):
self.datapipe = datapipe
def __iter__(self):
return iter(sorted(self.datapipe))
def test_shuffle(self):
items = list(range(1000))
dp = IterableWrapper(items).sharding_filter().shuffle()
dl = DataLoader2(dp, batch_size=None, num_workers=2, shuffle=False)
self.assertEqual(items, list(dl))
dl = DataLoader2(dp, batch_size=None, num_workers=2, shuffle=True)
self.assertNotEqual(items, list(dl))
self.assertEqual(items, sorted(list(dl)))
dl = DataLoader2(dp, batch_size=None, num_workers=2, shuffle=True)
self.assertNotEqual(items, list(dl))
self.assertEqual(items, sorted(list(dl)))
dl = DataLoader2(self.Sorter(dp), batch_size=None, num_workers=2, shuffle=True)
self.assertEqual(list(dl), items)
dl = DataLoader2(self.Sorter(dp), batch_size=None, num_workers=2, shuffle=True)
self.assertEqual(list(dl), items)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDataLoader2_EventLoop(TestCase):
@skipIfNoDill
def test_basic_threading(self):
def clean_me(process, req_queue, res_queue):
req_queue.put(communication.messages.TerminateRequest())
_ = res_queue.get()
process.join()
it = list(range(100))
numbers_dp = IterableWrapper(it)
(process, req_queue, res_queue, _thread_local_datapipe) = communication.eventloop.SpawnThreadForDataPipeline(numbers_dp)
process.start()
local_datapipe = communication.iter.QueueWrapper(
communication.protocol.IterDataPipeQueueProtocolClient(req_queue, res_queue))
actual = list(local_datapipe)
clean_me(process, req_queue, res_queue)
self.assertEqual(list(range(100)), actual)
@skipIfNoDill
def test_basic_mapdatapipe_threading(self):
def clean_me(process, req_queue, res_queue):
req_queue.put(communication.messages.TerminateRequest())
_ = res_queue.get()
process.join()
input_len = 100
it = list(range(input_len))
numbers_dp = SequenceWrapper(it)
(process, req_queue, res_queue, _thread_local_datapipe) = communication.eventloop.SpawnThreadForDataPipeline(
numbers_dp)
process.start()
# Functional Test: Ensure that you can retrieve every element from the Queue and DataPipe
local_datapipe = communication.map.QueueWrapperForMap(
communication.protocol.MapDataPipeQueueProtocolClient(req_queue, res_queue))
actual = list(local_datapipe)
self.assertEqual([(x, x) for x in range(100)], actual)
# Functional Test: raise Error when input
local_datapipe = communication.map.QueueWrapperForMap(
communication.protocol.MapDataPipeQueueProtocolClient(req_queue, res_queue))
with self.assertRaisesRegex(IndexError, "out of bound"):
local_datapipe[1000]
# __len__ Test: Ensure that the correct length is returned
local_datapipe = communication.map.QueueWrapperForMap(
communication.protocol.MapDataPipeQueueProtocolClient(req_queue, res_queue))
self.assertEqual(input_len, len(local_datapipe))
clean_me(process, req_queue, res_queue)
class StringDataset(Dataset):
def __init__(self):
self.s = '12345'
def __len__(self):
return len(self.s)
def __getitem__(self, ndx):
return (self.s[ndx], ndx)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestStringDataLoader(TestCase):
def setUp(self):
super(TestStringDataLoader, self).setUp()
self.dataset = StringDataset()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_shuffle_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
for (s, n) in loader:
self.assertIsInstance(s[0], str)
self.assertTrue(n.is_pinned())
class DictDataset(Dataset):
def __len__(self):
return 4
def __getitem__(self, ndx):
return {
'a_tensor': torch.empty(4, 2).fill_(ndx),
'another_dict': {
'a_number': ndx,
},
}
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDictDataLoader(TestCase):
def setUp(self):
super(TestDictDataLoader, self).setUp()
self.dataset = DictDataset()
def test_sequential_batch(self):
for persistent_workers in (False, True):
if persistent_workers:
loader = DataLoader(self.dataset, batch_size=2, shuffle=False,
persistent_workers=persistent_workers, num_workers=1)
else:
loader = DataLoader(self.dataset, batch_size=2, shuffle=False,
persistent_workers=persistent_workers)
batch_size = loader.batch_size
for i, sample in enumerate(loader):
idx = i * batch_size
self.assertEqual(set(sample.keys()), {'a_tensor', 'another_dict'})
self.assertEqual(set(sample['another_dict'].keys()), {'a_number'})
t = sample['a_tensor']
self.assertEqual(t.size(), torch.Size([batch_size, 4, 2]))
self.assertTrue((t[0] == idx).all())
self.assertTrue((t[1] == idx + 1).all())
n = sample['another_dict']['a_number']
self.assertEqual(n.size(), torch.Size([batch_size]))
self.assertEqual(n[0], idx)
self.assertEqual(n[1], idx + 1)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True)
for sample in loader:
self.assertTrue(sample['a_tensor'].is_pinned())
self.assertTrue(sample['another_dict']['a_number'].is_pinned())
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_pin_memory_device(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True, pin_memory_device='cuda')
for sample in loader:
self.assertTrue(sample['a_tensor'].is_pinned(device='cuda'))
self.assertTrue(sample['another_dict']['a_number'].is_pinned(device='cuda'))
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_pin_memory_with_only_device(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory_device='cuda')
for sample in loader:
self.assertFalse(sample['a_tensor'].is_pinned(device='cuda'))
self.assertFalse(sample['another_dict']['a_number'].is_pinned(device='cuda'))
class DummyDataset(torch.utils.data.Dataset):
def __init__(self):
self.data = list(range(10))
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# The persistent workers always maintain the original
# dataset through the dataloader lifetime
# so the attributes will remain the same as the
# first time the workers where spawned (dataloader iteration)
assert self.start == 0
return self.data[idx]
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
@unittest.skipIf(
TEST_WITH_ASAN, "DataLoader tests hang in ASAN, see: https://github.com/pytorch/pytorch/issues/66223")
class TestDataLoaderPersistentWorkers(TestDataLoader):
def setUp(self):
super(TestDataLoaderPersistentWorkers, self).setUp()
self.persistent_workers = True
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "No 'resource' module on Windows")
def test_fd_limit_exceeded(self):
# See NOTE [ DataLoader on Linux and open files limit ]
import subprocess
subprocess.check_output([sys.executable, '-c', """\
import torch
import resource
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
try:
keep_fds_alive = []
resource.setrlimit(resource.RLIMIT_NOFILE, (100, 100))
for random_t in DataLoader(RandomDataset(200, (2,2)), multiprocessing_context="fork",
num_workers=1, persistent_workers=True):
random_t.max(dim=0)
keep_fds_alive.append(random_t)
except RuntimeError as e:
assert "ulimit -n" in str(e)
assert "set_sharing_strategy" in str(e)
"""])
def test_dataset_not_reset(self):
dataset = DummyDataset()
pin_memory_configs = [False]
if TEST_CUDA:
pin_memory_configs.append(True)
for pin_memory in pin_memory_configs:
dataloader = self._get_data_loader(dataset, num_workers=2, pin_memory=pin_memory)
dataset.start = 0
for i in range(10):
for x in dataloader:
pass
# Changing the start value here doesn't have any effect in the dataset
# cached by the workers. since they are not recreated between epochs
# and can cache values safely
dataset.start = i
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "Needs fork")
def test_early_exit(self):
import subprocess
proc = subprocess.check_output([sys.executable, '-c', """\
import torch
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
if __name__ == '__main__':
dl = DataLoader(
RandomDataset(64, (28, 28)),
batch_size=16,
num_workers=2,
pin_memory=True,
persistent_workers=True,
multiprocessing_context="fork",
)
for _ in dl:
break
"""])
class NamedTupleDataset(Dataset):
from collections import namedtuple
Batch = namedtuple('Batch', ['data', 'label', 'random_tensor'])
Data = namedtuple('Data', ['positive', 'negative'])
def __len__(self):
return 4
def __getitem__(self, ndx):
return self.Batch(data=self.Data(positive=ndx, negative=-ndx),
label=str(ndx), random_tensor=torch.randn(3))
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestNamedTupleDataLoader(TestCase):
def setUp(self):
super(TestNamedTupleDataLoader, self).setUp()
self.dataset = NamedTupleDataset()
def test_dataloader_with_namedtuple(self):
# auto-collation
loader = DataLoader(self.dataset, batch_size=2, pin_memory=TEST_CUDA)
for batch in loader:
self.assertIsInstance(batch, NamedTupleDataset.Batch)
self.assertEqual(batch.random_tensor.is_pinned(), TEST_CUDA)
self.assertIsInstance(batch.data, NamedTupleDataset.Data)
self.assertIsInstance(batch.data.positive, torch.Tensor)
self.assertEqual(batch.data.positive.is_pinned(), TEST_CUDA)
# no auto-collation
loader = DataLoader(self.dataset, batch_size=None, pin_memory=TEST_CUDA)
for batch in loader:
self.assertIsInstance(batch, NamedTupleDataset.Batch)
self.assertEqual(batch.random_tensor.is_pinned(), TEST_CUDA)
self.assertIsInstance(batch.data, NamedTupleDataset.Data)
self.assertNotIsInstance(batch.data.positive, torch.Tensor)
class SimpleCustomBatch(object):
def __init__(self, data):
transposed_data = list(zip(*data))
self.inp = torch.stack(transposed_data[0], 0)
self.tgt = torch.stack(transposed_data[1], 0)
def pin_memory(self):
self.inp = self.inp.pin_memory()
self.tgt = self.tgt.pin_memory()
return self
def is_pinned(self):
return self.inp.is_pinned() and self.tgt.is_pinned()
# Workaround for https://github.com/pytorch/pytorch/issues/50661
# Classes from `__main__` can not be correctly unpickled from spawned module
# See https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming
self_module = __import__(os.path.splitext(os.path.basename(__file__))[0])
def collate_wrapper(batch):
return self_module.SimpleCustomBatch(batch)
def collate_into_packed_sequence(batch):
data = torch.stack([sample[0] for sample in batch], 1)
t, b = data.size()
lengths = torch.randint(1, t, size=(b,), dtype=torch.int64)
return torch.nn.utils.rnn.pack_padded_sequence(data, lengths, enforce_sorted=False)
def collate_into_packed_sequence_batch_first(batch):
data = torch.stack([sample[0] for sample in batch], 0)
b, t = data.size()
lengths = torch.randint(1, t, size=(b,), dtype=torch.int64)
return torch.nn.utils.rnn.pack_padded_sequence(data, lengths, batch_first=True, enforce_sorted=False)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestCustomPinFn(TestCase):
def setUp(self):
super(TestCustomPinFn, self).setUp()
inps = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
tgts = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
self.dataset = TensorDataset(inps, tgts)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_custom_batch_pin(self):
test_cases = [
(collate_wrapper, self_module.SimpleCustomBatch),
(collate_into_packed_sequence, torch.nn.utils.rnn.PackedSequence),
(collate_into_packed_sequence_batch_first, torch.nn.utils.rnn.PackedSequence),
]
for collate_fn, elem_cls in test_cases:
loader = DataLoader(self.dataset, batch_size=2, collate_fn=collate_fn,
pin_memory=True)
for sample in loader:
self.assertIsInstance(sample, elem_cls)
self.assertTrue(sample.is_pinned())
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_custom_batch_pin_worker(self):
test_cases = [
(collate_wrapper, self_module.SimpleCustomBatch),
(collate_into_packed_sequence, torch.nn.utils.rnn.PackedSequence),
(collate_into_packed_sequence_batch_first, torch.nn.utils.rnn.PackedSequence),
]
for collate_fn, elem_cls in test_cases:
loader = DataLoader(self.dataset, batch_size=2, collate_fn=collate_fn,
pin_memory=True, num_workers=1)
for sample in loader:
self.assertIsInstance(sample, elem_cls)
self.assertTrue(sample.is_pinned())
class TestWorkerQueueDataset(Dataset):
def __init__(self, data):
self.data = data
self.worker_id = None
def worker_init_fn(self, worker_id):
self.worker_id = worker_id
def __getitem__(self, item):
return self.worker_id, self.data[item]
def __len__(self):
return len(self.data)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
@unittest.skipIf(
TEST_WITH_ASAN,
"Flaky with ASAN, see https://github.com/pytorch/pytorch/issues/65727")
class TestIndividualWorkerQueue(TestCase):
def setUp(self):
super(TestIndividualWorkerQueue, self).setUp()
self.dataset = TestWorkerQueueDataset(list(range(128)))
def _run_ind_worker_queue_test(self, batch_size, num_workers):
loader = DataLoader(
self.dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers,
timeout=5, worker_init_fn=self.dataset.worker_init_fn
)
current_worker_idx = 0
for i, (worker_ids, sample) in enumerate(loader):
self.assertEqual(worker_ids.tolist(), [current_worker_idx] * batch_size)
self.assertEqual(sample.tolist(), list(range(i * batch_size, (i + 1) * batch_size)))
current_worker_idx += 1
if current_worker_idx == num_workers:
current_worker_idx = 0
def test_ind_worker_queue(self):
max_num_workers = None
if hasattr(os, 'sched_getaffinity'):
try:
max_num_workers = len(os.sched_getaffinity(0))
except Exception:
pass
if max_num_workers is None:
cpu_count = os.cpu_count()
if cpu_count is not None:
# Use half number of CPUs
max_num_workers = cpu_count // 2
if max_num_workers is None:
max_num_workers = 1
for batch_size in (8, 16, 32, 64):
for num_workers in range(0, min(6, max_num_workers)):
self._run_ind_worker_queue_test(batch_size=batch_size, num_workers=num_workers + 1)
class SetAffinityDataset(IterableDataset):
def __iter__(self):
torch.randperm(1)
after = os.sched_getaffinity(0)
return iter(after)
@unittest.skipIf(
not hasattr(os, 'sched_setaffinity'),
"os.sched_setaffinity is not available")
class TestSetAffinity(TestCase):
def test_set_affinity_in_worker_init(self):
# Query the current affinity mask to avoid setting a disallowed one
old_affinity = os.sched_getaffinity(0)
if not old_affinity:
self.skipTest("No affinity information")
# Choose any
expected_affinity = list(old_affinity)[-1]
def worker_set_affinity(_):
os.sched_setaffinity(0, [expected_affinity])
dataset = SetAffinityDataset()
dataloader = torch.utils.data.DataLoader(
dataset, num_workers=2, worker_init_fn=worker_set_affinity)
for sample in dataloader:
self.assertEqual(sample, [expected_affinity])
class ConvDataset(Dataset):
def __init__(self):
self.x = torch.ones(1, 1, 24000)
# Call convolution on parent process
self[0]
def __len__(self):
return 1
def __getitem__(self, index):
return torch.nn.functional.conv1d(self.x, torch.ones(1, 1, 2))
@unittest.skipIf(IS_WINDOWS, "Needs fork")
class TestConvAfterFork(TestCase):
# Tests crash reported in https://github.com/pytorch/pytorch/issues/53565
def test_conv_after_fork(self):
loader = DataLoader(ConvDataset(), num_workers=1)
for x in loader:
self.assertEqual(x.shape, (1, 1, 1, 23999))
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_dataloader.py |
# Owner(s): ["module: fx.passes"]
from dataclasses import dataclass
import operator
import logging
import torch
from torch.fx._symbolic_trace import symbolic_trace
from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner
from torch.fx.passes.operator_support import OperatorSupport
from torch.fx.passes.utils.fuser_utils import fuse_by_partitions
from torch.fx.passes.utils.matcher_utils import SubgraphMatcher
from torch.testing._internal.common_utils import run_tests, parametrize, instantiate_parametrized_tests
from torch.testing._internal.jit_utils import JitTestCase
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(4, 4)
self.linear2 = torch.nn.Linear(4, 4)
self.param = torch.nn.Parameter(torch.rand(4, 4))
def forward(self, a, b, c):
add = a + b
linear_1 = self.linear(add)
add_1 = add + c
add_2 = add_1 + self.param
add_3 = add_1 + linear_1
add_4 = add_2 + add_3
linear_2 = self.linear2(add_4)
add_5 = linear_2 + add_4
add_6 = add_5 + a
relu = add_6.relu()
return add_4, add_6, relu
class TestPartitionFunctions:
@staticmethod
def forward1(a, b, c):
add = a + b
add_1 = add + b
add_2 = add_1 + c
relu_1 = add_2.relu()
add_3 = add_1 + add_2
add_4 = add_1 + relu_1 + add_3
relu_2 = add_4.relu()
add_5 = relu_2 + add_4
add_6 = add_5 + add_4
return add_4, add_6
@staticmethod
def forward2(a, b, _):
add = a + b
add_1 = add + b
relu_1 = add_1.relu() # blocked by this
add_3 = add_1 + relu_1
add_4 = add_1 + add_3
return add_4, add_1
@staticmethod
def forward3(a, b, c):
add = a + b
add_1 = a + c
add_2 = b + c
return add, add_1, add_2
@staticmethod
def forward4(a, b, c):
add = a + b
add_1 = a + c
add_2 = b + c
return torch.where(add > 0, add_1, add_2)
@staticmethod
def forward5(a, b, c):
# add should be fused right branch, as left branch is not supported
add = a + 1
# left branch
relu = add.relu()
# right branch
add_1 = add + 2
return relu, add_1
@staticmethod
def forward6(a, b, c):
# add should have its own partition, as neither branchs are supported
add = a + 1
# left branch
relu = add.relu()
# right branch
relu_1 = add.relu()
return relu, relu_1
@staticmethod
def forward7(a, b, c):
# both branches are supported, all adds should be fused together
add = a + 1
# left branch
add_1 = add + 2
# right branch is larger
add_2 = add + 1
add_3 = add_2 + 1
return add_3, add_1
@staticmethod
def forward8(a, b, c):
# both branches are in the same partition, add should join the same partition
add = a + 1
# left branch
add_1 = add + 2
# right branch
add_2 = add + 1
# left and right branch merges
add_3 = add_2 + add_1
return add_3
@staticmethod
def forward9(a, b, c):
add = a + 1
# branch 1
add_1 = add + 1
# branch 2
add_2 = add + 1
# branch_3
add_3 = add + 1
out = torch.stack([add_1, add_2, add_3])
return out
@staticmethod
def forward10(a, b, c):
add = a + 1
# branch 1
add_1 = add + 1
# branch 2
add_2 = add + 1
# branch 3: depends on branch 2
add_3 = add + add_2
out = torch.stack([add_1, add_2, add_3])
return out
@staticmethod
def forward11(a, b, c):
add = a + 1
# branch 1
add_1 = add.relu()
# branch 2 depends on branch 1
add_2 = add + add_1
# branch 3
add_3 = add.relu()
out = torch.stack([add_1, add_2, add_3])
return out
# A mock OperatorSupport class, where only operator.add is supported
class MockOperatorSupport(OperatorSupport):
def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
return node.op == "call_function" and node.target in {operator.add}
@instantiate_parametrized_tests
class TestFXGraphPasses(JitTestCase):
@parametrize("fn, expected_partition", [
(TestPartitionFunctions.forward1, [["add_7", "add_6"], ["add_5", "add_4", "add_3"], ["add_2", "add_1", "add"]]),
(TestPartitionFunctions.forward2, [["add_3", "add_2"], ["add_1", "add"]]),
# 2 branches cases
(TestPartitionFunctions.forward5, [["add_1", "add"]]),
(TestPartitionFunctions.forward6, [["add"]]),
(TestPartitionFunctions.forward7, [["add_3", "add_2", "add", "add_1"]]),
(TestPartitionFunctions.forward8, [["add_3", "add_2", "add", "add_1"]]),
# 3 branch cases
(TestPartitionFunctions.forward9, [['add_3', 'add_2', 'add_1', 'add']]),
(TestPartitionFunctions.forward10, [['add_3', 'add_2', 'add', 'add_1']]),
(TestPartitionFunctions.forward11, [['add_1'], ['add']]),
])
def test_partitioner(self, fn, expected_partition):
traced = symbolic_trace(fn)
supported_ops = MockOperatorSupport()
partitioner = CapabilityBasedPartitioner(traced, supported_ops, allows_single_node_partition=True)
partitions = partitioner.propose_partitions()
partitions_name = [[node.name for node in partition.nodes] for partition in partitions]
assert len(partitions_name) == len(expected_partition)
for i in range(len(partitions_name)):
assert set(partitions_name[i]) == set(expected_partition[i])
fused_graph = partitioner.fuse_partitions(partitions)
a, b, c = torch.rand(4), torch.rand(4), torch.rand(4)
expected = fn(a, b, c)
result = fused_graph(a, b, c)
torch.testing.assert_close(expected, result)
@parametrize("fn, expected_partition", [
# horizontal fusion without a common downstream node, not supported yet
(TestPartitionFunctions.forward3, [["add_2", "add_1", "add"]]),
# horizontal fusion with a common downstream node, not supported yet
(TestPartitionFunctions.forward4, [["add_2", "add_1", "add"]]),
])
def test_partitioner_xfail(self, fn, expected_partition):
traced = symbolic_trace(fn)
supported_ops = MockOperatorSupport()
partitioner = CapabilityBasedPartitioner(traced, supported_ops, allows_single_node_partition=True)
partitions = partitioner.propose_partitions()
partitions_name = [[node.name for node in partition.nodes] for partition in partitions]
with self.assertRaises(Exception):
assert len(partitions_name) == len(expected_partition)
@parametrize("partition", [
[['add', 'add_1'], ['add_5', 'add_6']],
[['add', 'add_1', 'add_2']], # vertical fusion
[['add_2', 'add_3']], # horizontal fusion
[['add_3', 'add_4']],
[['add_6', 'add_5']], # arbitray node order
[['add_4', 'add_1', 'add_3', 'add_2']], # arbitray node order
[['add_5', 'add_6'], ['add_1', 'add_2', 'add_3', 'add_4']], # arbitray partition order
[['add_5', 'linear2']], # includes call_function + call_module node
[['add_6', 'relu']], # includes call_function + call_module node
[['param', 'add_2']], # includes get_attr + call_module nodes
[['param', 'add_1', 'linear']], # includes get_attr + call_function + call_module nodes
[["add", "linear", "add_1", "param", "add_2", "add_3", "add_4", "linear2", "add_5", "add_6", "relu"]], # full graph
])
def test_fuser_util(self, partition):
m = TestModule()
gm = symbolic_trace(m)
nodes_by_name = {node.name : node for node in gm.graph.nodes}
partitions = []
for node_names in partition:
partitions.append([nodes_by_name[name] for name in node_names])
fused_graph = fuse_by_partitions(gm, partitions)
a, b, c = torch.rand(4), torch.rand(4), torch.rand(4)
expected = m(a, b, c)
result = fused_graph(a, b, c)
torch.testing.assert_close(expected, result)
@parametrize("partition", [
[['add', 'add_1'], ['add_1', 'add_5', 'add_6']], # add_1 exists in multiple partitions
[['add', 'add_1', 'add_3']], # invalid partition: circular dependency
[['add_4', 'add_5']], # invalid partition: circular dependency
[['relu', 'add_5']], # invalid partition: circular dependency
])
def test_fuser_util_xfail(self, partition):
m = TestModule()
gm = symbolic_trace(m)
nodes_by_name = {node.name : node for node in gm.graph.nodes}
partitions = []
for node_names in partition:
partitions.append([nodes_by_name[name] for name in node_names])
with self.assertRaises(Exception):
fuse_by_partitions(gm, partitions)
@dataclass
class TestCase:
match_output: bool
match_placeholder: bool
num_matches: int
remove_overlapping_matches: bool = True
class SingleNodePattern:
@staticmethod
def forward(x):
val = torch.neg(x)
return torch.add(val, val)
@staticmethod
def pattern(a):
return torch.neg(a)
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 1),
TestCase(True, False, 0),
TestCase(False, True, 1),
TestCase(True, True, 0)
]
class SimplePattern:
@staticmethod
def forward(x, w1, w2):
m1 = torch.cat([w1, w2]).sum()
m2 = torch.cat([w2, w1]).sum()
m3 = torch.cat([m1, m2]).sum()
return x + torch.max(m1) + torch.max(m2) + m3
@staticmethod
def pattern(a, b):
return torch.cat([a, b]).sum()
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 3),
TestCase(True, False, 0),
TestCase(False, True, 2),
TestCase(True, True, 0)
]
class SimpleFullGraphMatching:
@staticmethod
def forward(x):
a = torch.neg(x)
return torch.add(a, a)
@staticmethod
def pattern(x):
a = torch.neg(x)
return torch.add(a, a)
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 1),
TestCase(True, False, 1),
TestCase(False, True, 1),
TestCase(True, True, 1)
]
class DiamondShapePatternTestCase:
@staticmethod
def forward(x):
a = torch.neg(x)
a = a.relu()
left = a.sigmoid()
right = a.relu()
out = left + right
return out
@staticmethod
def pattern(a):
a = a.relu()
left = a.sigmoid()
right = a.relu()
out = left + right
return out
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 1),
TestCase(True, False, 1),
TestCase(False, True, 0),
TestCase(True, True, 0)
]
class NonFullyContainedMatches:
@staticmethod
def forward(x, w1, w2, b1, b2):
# fully contained matched subgraph
m1 = torch.cat([w1, w2])
m2 = torch.cat([x, b2])
t0 = torch.addmm(b1, m1, m2.t())
t0_sum = torch.sum(t0) # use of t0 is not leaking
# leaking matched subgraph, m3 is leaked
m3 = torch.cat([w1, w2])
m4 = torch.cat([x, b2])
t1 = torch.addmm(b1, m3, m4.t())
m3_sum = torch.sum(m3)
return t0_sum, m3_sum
@staticmethod
def pattern(x, w1, w2, b1, b2):
m1 = torch.cat([w1, w2])
m2 = torch.cat([x, b2])
return torch.addmm(b1, m1, m2.t())
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 1),
TestCase(True, False, 0),
TestCase(False, True, 1), # leaked used of placeholder is not leaking
]
class ChainRepeatedPattern:
@staticmethod
def forward(x):
x = torch.sigmoid(x)
x = torch.sigmoid(x)
x = torch.sigmoid(x)
return torch.sigmoid(x)
@staticmethod
def pattern(x):
return torch.sigmoid(torch.sigmoid(x))
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 3, remove_overlapping_matches=False),
TestCase(False, False, 2, remove_overlapping_matches=True),
TestCase(True, False, 1),
TestCase(False, True, 1),
TestCase(True, True, 0)
]
class QuantizationModel:
@staticmethod
def forward(x):
x += 3
x = x.dequantize()
x = torch.sigmoid(x)
x = x.to(torch.float16)
return x
@staticmethod
def pattern(x):
x = x.dequantize()
x = torch.sigmoid(x)
x = x.to(torch.float16)
return x
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 1),
TestCase(True, False, 1),
TestCase(False, True, 0),
TestCase(True, True, 0)
]
class MultipleOutputsWithDependency:
@staticmethod
def forward(x):
y = x.relu()
z = y.sigmoid()
return z, y
@staticmethod
def pattern(a):
b = a.relu()
c = b.sigmoid()
return b, c # outputs have data dependency
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 1),
TestCase(True, False, 0),
TestCase(False, True, 1),
TestCase(True, True, 0)
]
class MultipleOutputsWithoutDependency:
@staticmethod
def forward(x):
x = x + 1
# target subgraph to match
x = x.relu()
z = x.sum()
y = x.sigmoid()
out = y.sigmoid() + z.sum()
return out
@staticmethod
def pattern(a):
a = a.relu()
b = a.sigmoid()
c = a.sum()
return b, c
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 1),
TestCase(True, False, 0),
TestCase(False, True, 0),
TestCase(True, True, 0)
]
class MultipleOutputsMultipleOverlappingMatches:
@staticmethod
def forward(x):
x = x + 1
# target subgraph to match
x = x.relu()
z = x.sum()
z1 = x.sum()
y = x.sigmoid()
y1 = x.sigmoid()
return z + z1 + y + y1
@staticmethod
def pattern(a):
a = a.relu()
b = a.sigmoid()
c = a.sum()
return a, b, c
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 4, remove_overlapping_matches=False),
TestCase(False, False, 1, remove_overlapping_matches=True),
]
class MultipleOutputsMultipleNonOverlappingMatches:
@staticmethod
def forward(x):
x = x + 1
# target subgraph to match
x = x.relu()
z = x.sum()
y = x.sigmoid()
x = x.relu()
z1 = x.sum()
y1 = x.sigmoid()
return z + z1 + y + y1
@staticmethod
def pattern(a):
a = a.relu()
b = a.sigmoid()
c = a.sum()
return b, c
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 1),
]
class MultipleOutputsIdenticalAnchor:
@staticmethod
def forward(x):
x = x + 1
# target subgraph to match
x = x.relu()
y = x.sigmoid()
y1 = x.sigmoid()
return y, y1
@staticmethod
def pattern(a):
a = a.relu()
b = a.sigmoid()
b1 = a.sigmoid()
return b, b1
test_cases = [
# match_output, match_placeholder, num_matches
# (False, False, 2), # FIXME: currently still matches to 2, should fix to 1
TestCase(True, False, 1),
TestCase(False, True, 0),
]
class MultipleOutputsHorizontalPattern:
@staticmethod
def forward(x):
x = x + 1
# target subgraph to match
y1 = x.relu()
y2 = x.sigmoid()
return y1, y2
@staticmethod
def pattern(a):
b1 = a.relu()
b2 = a.sigmoid()
return b1, b2
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 1),
TestCase(True, False, 1),
TestCase(False, True, 0),
TestCase(True, True, 0)
]
class PatternWithPseudoAny:
@staticmethod
def forward(x):
x = x.relu()
x = x.sigmoid()
y = x.relu()
y = y + 1
z = y.relu()
z = z.relu()
return z
@staticmethod
def pattern(a):
y = a.relu()
z = torch.ops.pseudo.any(y)
return z
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 3),
TestCase(True, False, 1),
TestCase(False, True, 1),
TestCase(True, True, 0)
]
class PatternWithPseudoOneof:
@staticmethod
def forward(x):
x = x.relu()
x = torch.sigmoid(x)
z = x.relu()
z = torch.relu(z)
y = x.relu()
y = y + 1
return y
@staticmethod
def pattern(a):
y = a.relu()
z = torch.ops.pseudo.oneof(y, targets=["torch.sigmoid", "operator.add"])
return z
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 2),
TestCase(True, False, 1),
TestCase(False, True, 1),
TestCase(True, True, 0)
]
@instantiate_parametrized_tests
class TestFXMatcherUtils(JitTestCase):
@parametrize("test_model", [
SingleNodePattern,
SimplePattern,
SimpleFullGraphMatching,
DiamondShapePatternTestCase,
NonFullyContainedMatches,
ChainRepeatedPattern,
QuantizationModel,
MultipleOutputsWithDependency,
MultipleOutputsWithoutDependency,
MultipleOutputsMultipleOverlappingMatches,
MultipleOutputsMultipleNonOverlappingMatches,
MultipleOutputsIdenticalAnchor,
MultipleOutputsHorizontalPattern,
PatternWithPseudoAny,
PatternWithPseudoOneof,
])
def test_subgraph_matcher(self, test_model):
traced = symbolic_trace(test_model.forward)
pattern_traced = symbolic_trace(test_model.pattern)
for test_case in test_model.test_cases:
matcher = SubgraphMatcher(pattern_traced.graph,
match_output=test_case.match_output,
match_placeholder=test_case.match_placeholder,
remove_overlapping_matches=test_case.remove_overlapping_matches)
matches = matcher.match(traced.graph)
assert len(matches) == test_case.num_matches
for match in matches:
for node in pattern_traced.graph.nodes:
if not test_case.match_placeholder and node.op == "placeholder":
continue
if not test_case.match_output and node.op == "output":
continue
assert node in match.nodes_map
if __name__ == "__main__":
run_tests()
| pytorch-master | test/test_fx_passes.py |
# Owner(s): ["module: unknown"]
import collections
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists
class TestAutocastCPU(TestCase):
def setUp(self):
super(TestAutocastCPU, self).setUp()
self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))
def tearDown(self):
del self.autocast_lists
super(TestAutocastCPU, self).tearDown()
def _run_autocast_outofplace(self, op, args, run_as_type, out_type=None, module=torch, add_kwargs=None):
# helper to cast args
def cast(val, to_type):
if isinstance(val, torch.Tensor):
return val.to(to_type) if val.is_floating_point() else val
elif isinstance(val, collections.abc.Iterable):
return type(val)(cast(v, to_type) for v in val)
else:
return val
if add_kwargs is None:
add_kwargs = {}
self.assertFalse(torch.is_autocast_cpu_enabled())
with torch.cpu.amp.autocast():
self.assertTrue(torch.is_autocast_cpu_enabled())
out_type = out_type if out_type is not None else run_as_type
output = output_method = None
# Try module.* variant, if requested:
if module is not None and hasattr(module, op):
output = getattr(module, op)(*args, **add_kwargs)
if isinstance(output, torch.Tensor):
self.assertTrue(out_type == output.dtype,
"autocast for torch.{} produced {}, should produce {}"
.format(op, output.dtype, out_type))
# Try Tensor.* variant:
if hasattr(torch.Tensor, op):
output_method = getattr(args[0], op)(*args[1:], **add_kwargs)
if isinstance(output_method, torch.Tensor):
self.assertTrue(out_type == output_method.dtype,
"autocast for torch.{} produced {}, should produce torch.{}"
.format(op, output_method.dtype, out_type))
self.assertTrue((output is not None) or (output_method is not None),
"{} not found as an attribute on either Tensor or the requested module {}".format(
op, module))
# Accounts for ops that return Tensors, iterables, and other non-Tensors.
# For example, lstm_cell returns a tuple and equal returns bool.
def compare(first, second):
if isinstance(first, torch.Tensor):
return torch.equal(first, second)
elif isinstance(first, collections.abc.Iterable):
return all(compare(f, s) for f, s in zip(first, second))
else:
return first == second
# If both torch.* and Tensor.* variants were found, check outputs are identical
if (output is not None) and (output_method is not None):
self.assertTrue(type(output) == type(output_method))
comparison = compare(output, output_method)
self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op))
# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
# as the C++-side autocasting, and should be bitwise accurate.
output_to_compare = output if output is not None else output_method
with torch.cpu.amp.autocast(enabled=False):
self.assertFalse(torch.is_autocast_cpu_enabled())
if module is not None and hasattr(module, op):
control = getattr(module, op)(*cast(args, run_as_type), **add_kwargs)
else:
control = getattr(args[0].to(run_as_type), op)(*cast(args[1:], run_as_type), **add_kwargs)
self.assertTrue(type(output_to_compare) == type(control))
comparison = compare(output_to_compare, control)
self.assertTrue(comparison, "torch.{} result did not match control".format(op))
self.assertTrue(torch.is_autocast_cpu_enabled())
self.assertFalse(torch.is_autocast_cpu_enabled())
def args_maybe_kwargs(self, op_with_args):
if len(op_with_args) == 2:
return op_with_args[0], op_with_args[1], {}
else:
return op_with_args[0], op_with_args[1], op_with_args[2]
def test_autocast_torch_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.torch_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, out_type=out_type)
def test_autocast_methods_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.methods_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, module=None, out_type=out_type)
def test_autocast_torch_bf16(self):
for op_with_args in self.autocast_lists.torch_bf16:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.bfloat16, add_kwargs=maybe_kwargs)
def test_autocast_nn_bf16(self):
for op_with_args in self.autocast_lists.nn_bf16:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn, add_kwargs=maybe_kwargs)
def test_autocast_torch_fp32(self):
for op_with_args in self.autocast_lists.torch_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
def test_autocast_nn_fp32(self):
for op_with_args in self.autocast_lists.nn_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn, add_kwargs=maybe_kwargs)
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(op, args, torch.float32)
class TestTorchAutocast(TestCase):
def test_autocast_fast_dtype(self):
gpu_fast_dtype = torch.get_autocast_gpu_dtype()
cpu_fast_dtype = torch.get_autocast_cpu_dtype()
self.assertEqual(gpu_fast_dtype, torch.half)
self.assertEqual(cpu_fast_dtype, torch.bfloat16)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_autocast.py |
# Owner(s): ["module: unknown"]
import unittest
from typing import Dict, Optional
import numpy as np
import torch
from torch import nn
from torch.testing._internal.common_utils import TestCase, run_tests
from typing import List
class StaticModule:
def __init__(self, scripted):
# this is an nn.Module
if hasattr(scripted, "_c"):
self.static_module = torch._C._jit_to_static_module(scripted._c)
else:
self.static_module = torch._C._jit_to_static_module(scripted.graph)
def __call__(self, *args, **kwargs):
return self.static_module(*args, **kwargs)
def benchmark(self, args, kwargs, warmup_runs, main_runs):
self.static_module.benchmark(args, kwargs, warmup_runs, main_runs)
def runAsync(self, args, kwargs):
return self.static_module.runAsync(args, kwargs)
def benchmark_individual_ops(self, args, kwargs, warmup_runs, main_runs):
return self.static_module.benchmark_individual_ops(
args, kwargs, warmup_runs, main_runs
)
def linear_shim(
input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None
) -> torch.Tensor:
output = input.matmul(weight.t())
if bias is not None:
output += bias
ret = output
return ret
torch.nn.functional.linear = linear_shim
class MultiHeadAttentionLayer(nn.Module):
def __init__(self, hid_dim, n_heads, dropout, device):
super().__init__()
assert hid_dim % n_heads == 0
self.hid_dim = hid_dim
self.n_heads = n_heads
self.head_dim = hid_dim // n_heads
self.fc_q = nn.Linear(hid_dim, hid_dim)
self.fc_k = nn.Linear(hid_dim, hid_dim)
self.fc_v = nn.Linear(hid_dim, hid_dim)
self.fc_o = nn.Linear(hid_dim, hid_dim)
# self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([self.head_dim])).to(device)
def forward(self, query, key, value, mask):
batch_size = query.shape[0]
Q = self.fc_q(query)
K = self.fc_k(key)
V = self.fc_v(value)
Q = Q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
K = K.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
V = V.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
energy = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale
# energy = energy.masked_fill(mask == 0, -1e10)
attention = torch.softmax(energy, dim=-1)
# x = torch.matmul(self.dropout(attention), V)
x = torch.matmul(attention, V)
x = x.permute(0, 2, 1, 3).contiguous()
x = x.view(batch_size, -1, self.hid_dim)
x = self.fc_o(x)
return x, attention
# Taken from https://github.com/facebookresearch/dlrm/blob/master/dlrm_s_pytorch.py
def create_mlp(ln, sigmoid_layer):
layers = nn.ModuleList()
for i in range(0, len(ln) - 1):
n = ln[i]
m = ln[i + 1]
LL = nn.Linear(int(n), int(m), bias=True)
mean = 0.0 # std_dev = np.sqrt(variance)
std_dev = np.sqrt(2 / (m + n)) # np.sqrt(1 / m) # np.sqrt(1 / n)
W = np.random.normal(mean, std_dev, size=(m, n)).astype(np.float32)
std_dev = np.sqrt(1 / m) # np.sqrt(2 / (m + 1))
bt = np.random.normal(mean, std_dev, size=m).astype(np.float32)
LL.weight.data = torch.tensor(W, requires_grad=True)
LL.bias.data = torch.tensor(bt, requires_grad=True)
layers.append(LL)
if i == sigmoid_layer:
layers.append(nn.Sigmoid())
else:
layers.append(nn.ReLU())
with torch.no_grad():
s = torch.jit.script(torch.nn.Sequential(*layers))
s.eval()
return s
def trivial_graph(a, b, c):
s = torch.tensor([[3, 3], [3, 3]])
return a + b * c + s
def elementwise_square_addition(input1, input2):
return input1 * input1 + input2 * input2
def fork_wait_graph1(input1, input2):
fut = torch.jit.fork(elementwise_square_addition, input1, input2)
return torch.jit.wait(fut)
def fork_wait_graph2(input1, input2):
fut = torch.jit.fork(loop_graph, input1, input2, 5)
return torch.jit.wait(fut)
"""
graph with multiple fork/wait operations
:param input: torch.tensor input to forked subgraph
:param iters: number of future/wait pairs to be created
"""
def fork_wait_graph3(input, iters: int):
futures : List[torch.jit.Future[torch.Tensor]] = []
for _ in range(iters):
futures.append(torch.jit.fork(torch.neg, input))
results = []
for future in futures:
results.append(torch.jit.wait(future))
return torch.sum(torch.stack(results))
"""
graph with multi-level fork/wait operations
:param input: torch.tensor input to forked subgraph
:param num_forks: number of top level forks
:param num_child_forks: number of child forks per parent fork
"""
def fork_wait_graph4(input, num_forks: int, num_child_forks: int):
futures : List[torch.jit.Future[torch.Tensor]] = []
for _ in range(num_forks):
futures.append(torch.jit.fork(fork_wait_graph3, input, num_child_forks))
results = []
for future in futures:
results.append(torch.jit.wait(future))
return torch.sum(torch.stack(results))
def add_tensor(input1, input2):
return input1 + input2
def fork_wait_graph_exception(input1, input2):
fut = torch.jit.fork(add_tensor, input1, input2)
return torch.jit.wait(fut)
def loop_graph(a, b, iters: int):
c = a + b * 2
for i in range(iters):
c = c + b
c *= 2
c -= a
return c
def output_graph(a, b, c, iters: int):
s = torch.tensor([[3, 3], [3, 3]])
k = a + b * c + s
d: Dict[int, torch.Tensor] = {}
for i in range(iters):
d[i] = k + i
return d
class SubModule(nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.a = 11
self.b = 2
def forward(self, x):
return self.a + self.b + x
class SubModule2(nn.Module):
def __init__(self):
super(SubModule2, self).__init__()
self.a = 12
self.b = 2
def forward(self, x):
self.b = 30
return self.a + self.b + x
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.sub1 = SubModule()
self.sub2 = SubModule2()
self.a = 3
self.b = 4
def forward(self, x):
self.b = 20
return self.sub1(x) + self.a + self.b + self.sub2(x)
class TestStaticModule(TestCase):
"""
Test Case: To test simple fork/wait operation in a graph
fork is called on simple addition operation on input tensors
"""
def test_fork_wait_1(self):
inp1 = torch.ones(5, 5)
inp2 = torch.randn(5, 5)
torch_graph = torch.jit.script(fork_wait_graph1)
output_ref = torch_graph(inp1, inp2)
static_runtime_module = StaticModule(torch_graph)
output_test = static_runtime_module(inp1, inp2)
torch.testing.assert_close(output_test, output_ref)
"""
Test Case: To test simple fork/wait operation with
StaticRuntime runAsync API returning future
"""
def test_fork_wait_1_async(self):
inp1 = torch.ones(5, 5)
inp2 = torch.randn(5, 5)
torch_graph = torch.jit.script(fork_wait_graph1)
output_ref = torch_graph(inp1, inp2)
static_runtime_module = StaticModule(torch_graph)
output_test = static_runtime_module.runAsync((inp1, inp2), {})
output_test.wait()
torch.testing.assert_close(output_test.value(), output_ref)
"""
Test Case: To test fork/wait operation in a graph on
a loop subgraph performing mix of operations
"""
def test_fork_wait_2(self):
inp1 = torch.randn(5, 5)
inp2 = torch.randn(5, 5)
torch_graph = torch.jit.script(fork_wait_graph2)
output_ref = torch_graph(inp1, inp2)
static_runtime_module = StaticModule(torch_graph)
output_test = static_runtime_module(inp1, inp2)
torch.testing.assert_close(output_test, output_ref)
"""
Test Case: To test fork/wait operation on a loop
subgraph with StaticRuntime runAsync API returning future
"""
def test_fork_wait_2_async(self):
inp1 = torch.randn(5, 5)
inp2 = torch.randn(5, 5)
torch_graph = torch.jit.script(fork_wait_graph2)
output_ref = torch_graph(inp1, inp2)
static_runtime_module = StaticModule(torch_graph)
output_test = static_runtime_module.runAsync((inp1, inp2), {})
output_test.wait()
torch.testing.assert_close(output_test.value(), output_ref)
"""
Test Case: To test fork/wait operation in a graph on
having multiple fork/wait operations
"""
def test_fork_wait_3(self):
input = torch.ones(3, 3)
num_forks = 10
torch_graph = torch.jit.script(fork_wait_graph3)
output_ref = torch_graph(input, num_forks)
static_runtime_module = StaticModule(torch_graph)
output_test = static_runtime_module(input, num_forks)
torch.testing.assert_close(output_test, output_ref)
"""
Test Case: To test fork/wait operation in a graph with
multiple fork/wait operations on runAsync API returning future
"""
def test_fork_wait_3_async(self):
input = torch.ones(3, 3)
num_forks = 10
torch_graph = torch.jit.script(fork_wait_graph3)
output_ref = torch_graph(input, num_forks)
static_runtime_module = StaticModule(torch_graph)
output_test = static_runtime_module.runAsync((input, num_forks), {})
output_test.wait()
torch.testing.assert_close(output_test.value(), output_ref)
"""
Test Case: To test fork/wait operation in a graph on
multiple nested fork/wait operations
"""
def test_fork_wait_4(self):
input = torch.ones(3, 3)
num_forks = 10
num_child_forks = 10
torch_graph = torch.jit.script(fork_wait_graph4)
static_runtime_module = StaticModule(torch_graph)
output_ref = torch_graph(input, num_forks, num_child_forks)
output_test = static_runtime_module(input, num_forks, num_child_forks)
torch.testing.assert_close(output_test, output_ref)
"""
Test Case: To test fork/wait operation in a graph with multiple
nested fork/wait operations on runAsync API returning future
"""
def test_fork_wait_4_async(self):
input = torch.ones(3, 3)
num_forks = 10
num_child_forks = 10
torch_graph = torch.jit.script(fork_wait_graph4)
static_runtime_module = StaticModule(torch_graph)
output_ref = torch_graph(input, num_forks, num_child_forks)
output_test = static_runtime_module.runAsync(
(input, num_forks, num_child_forks), {})
output_test.wait()
torch.testing.assert_close(output_test.value(), output_ref)
"""
Test Case: To test exception handling in fork/wait
operation. Add.Tensor op is called for tensors with
non-matching dims on the forked subgraph and the
exception raised by subgraph is set on future returned
by prim::fork to parent graph. Returned exception is
checked for substring expected_error_msg as declared below
"""
def test_fork_wait_exception(self):
# incompatible tensors for add due to shape mismatch
input1 = torch.randn(4, 7)
input2 = torch.randn(4, 5)
torch_graph = torch.jit.script(fork_wait_graph_exception)
try:
static_runtime_module = StaticModule(torch_graph)
output_test = static_runtime_module(input1, input2)
except Exception as error:
expected_error_msg = (
"The size of tensor a (7) must match the size "
"of tensor b (5) at non-singleton dimension 1"
)
# test fails if error does not contain expected substr
if str(error).find(expected_error_msg) == -1:
raise RuntimeError(
"Tried execution of add.Tensors with incompatible shape. "
"Exception raised by forked runtime execution does "
f"not contain expected substring: \"{expected_error_msg}\""
) from error
"""
Test Case: To test exception handling in fork/wait
operation with runAsync API. Add.Tensor op is called for
tensors with non-matching dims on the forked subgraph
and the exception raised by subgraph is set on future returned
by prim::fork to parent graph. Returned exception is
checked for substring expected_error_msg as declared below
"""
def test_fork_wait_exception_async(self):
# incompatible tensors for add due to shape mismatch
input1 = torch.randn(4, 7)
input2 = torch.randn(4, 5)
torch_graph = torch.jit.script(fork_wait_graph_exception)
try:
static_runtime_module = StaticModule(torch_graph)
output_test = static_runtime_module.runAsync(
(input1, input2), {})
except Exception as error:
expected_error_msg = (
"The size of tensor a (7) must match the size "
"of tensor b (5) at non-singleton dimension 1"
)
# test fails if error does not contain expected substr
if str(error).find(expected_error_msg) == -1:
raise RuntimeError(
"Tried execution of add.Tensors with incompatible shape. "
"Exception raised by forked runtime execution does "
f"not contain expected substring: \"{expected_error_msg}\""
) from error
def test_multihead_attention_layer(self):
HID_DIM = 256
QUERY_LEN = 8
BATCH_SIZE = 128
LAYERS = 3
HEADS = 8
DROPOUT = 0.1
device = torch.device("cpu")
attention = MultiHeadAttentionLayer(HID_DIM, HEADS, DROPOUT, device).to(device)
with torch.no_grad():
src = torch.randn(BATCH_SIZE, QUERY_LEN, HID_DIM).to(device)
src_mask = (src > 0)[:, :, 0].unsqueeze(1).unsqueeze(2).to(device)
attention.eval()
attention = torch.jit.script(attention)
attention.eval()
o_ref = attention(src, src, src, src_mask)
attention_a = StaticModule(attention)
o_test = attention_a(src, src, src, src_mask)
o_test_kw = attention_a(src, src, value=src, mask=src_mask)
for a, b in zip(o_ref, o_test):
torch.testing.assert_close(a, b)
for a, b in zip(o_ref, o_test_kw):
torch.testing.assert_close(a, b)
def test_multihead_attention_layer_benchmark(self):
HID_DIM = 256
QUERY_LEN = 8
BATCH_SIZE = 128
LAYERS = 3
HEADS = 8
DROPOUT = 0.1
device = torch.device("cpu")
attention = MultiHeadAttentionLayer(HID_DIM, HEADS, DROPOUT, device).to(device)
with torch.no_grad():
src = torch.randn(BATCH_SIZE, QUERY_LEN, HID_DIM).to(device)
src_mask = (src > 0)[:, :, 0].unsqueeze(1).unsqueeze(2).to(device)
attention.eval()
attention = torch.jit.script(attention)
attention_a = StaticModule(attention)
attention_a.benchmark([src, src, src, src_mask], {}, 2, 2)
metrics = attention_a.benchmark_individual_ops(
[src, src, src, src_mask], {}, 2, 2
)
def test_mlp(self):
# Arguments taken from benchmark script, ./bench/dlrm_s_benchmark.sh
ln_bot = [512, 512, 64]
sigmoid_bot = -1
ln_top = [100, 1024, 1024, 1024, 1]
sigmoid_top = 3
bot_l = create_mlp(ln_bot, sigmoid_bot)
bot_l_acc = StaticModule(bot_l)
top_l = create_mlp(ln_top, sigmoid_top)
top_l_acc = StaticModule(top_l)
with torch.no_grad():
bot_inp = torch.randn(2048, 512) # torch.Size([2048, 512])
top_inp = torch.randn(2048, 100) # torch.Size([2048, 100])
ref_bot = bot_l(bot_inp)
acc_bot = bot_l_acc(bot_inp)
torch.testing.assert_close(acc_bot, ref_bot)
ref_top = top_l(top_inp)
acc_top = top_l_acc(top_inp)
torch.testing.assert_close(acc_top, ref_top)
for _ in range(5):
with torch.no_grad():
bot_inp = torch.randn(2048, 512) # torch.Size([2048, 512])
top_inp = torch.randn(2048, 100) # torch.Size([2048, 100])
ref_bot = bot_l(bot_inp)
acc_bot = bot_l_acc(bot_inp)
torch.testing.assert_close(acc_bot, ref_bot)
ref_top = top_l(top_inp)
acc_top = top_l_acc(top_inp)
torch.testing.assert_close(acc_top, ref_top)
def test_trivial_graph(self):
s = torch.full((2, 2), 2)
tg = torch.jit.script(trivial_graph)
o_ref = tg(s, s, s)
tg_a = StaticModule(tg)
o_test = tg_a(s, s, s)
torch.testing.assert_close(o_ref, o_test)
def test_leaky_relu(self):
s = torch.randn(5, 5)
tg = torch.jit.script(nn.LeakyReLU(0.1))
o_ref = tg(s)
tg_a = StaticModule(tg)
o_test = tg_a(s)
torch.testing.assert_close(o_ref, o_test)
def test_attr(self):
"""
TorchScript IR of TestModule() after freezing:
graph(%self : __torch__.test_static_runtime.___torch_mangle_0.TestModule,
%x.1 : Tensor):
%18 : int = prim::Constant[value=30]()
%30 : int = prim::Constant[value=13]()
%3 : int = prim::Constant[value=20]()
%2 : int = prim::Constant[value=1]()
%self.sub2.a : int = prim::Constant[value=12]()
%self.a : int = prim::Constant[value=3]()
= prim::SetAttr[name="b"](%self, %3)
%17 : Tensor = aten::add(%x.1, %30, %2)
%7 : Tensor = aten::add(%17, %self.a, %2)
%b.1 : int = prim::GetAttr[name="b"](%self)
%9 : Tensor = aten::add(%7, %b.1, %2)
%sub2 : __torch__.test_static_runtime.___torch_mangle_2.SubModule2 = prim::GetAttr[name="sub2"](%self)
= prim::SetAttr[name="b"](%sub2, %18)
%b : int = prim::GetAttr[name="b"](%sub2)
%22 : int = aten::add(%self.sub2.a, %b)
%23 : Tensor = aten::add(%x.1, %22, %2)
%12 : Tensor = aten::add(%9, %23, %2)
return (%12)
"""
# test prim::SetAttr and prim::GetAttr impl in Static Runtime
m = TestModule()
m.eval()
input = torch.randn(2, 2)
output_s = m.forward(input)
ms = torch.jit.script(m)
sm = StaticModule(ms)
output_sm = sm(input)
torch.testing.assert_close(output_s, output_sm)
sm.benchmark([input], {}, 2, 2)
sm.benchmark_individual_ops([input], {}, 2, 2)
sm.benchmark([], {"x": input}, 2, 2)
sm.benchmark_individual_ops([], {"x": input}, 2, 2)
@unittest.skip("Temporarily disabled")
def test_fusion_trivial_graph(self):
s = torch.full((2, 2), 2)
tg = torch.jit.script(trivial_graph)
o_ref = tg(s, s, s)
torch._C._fuse_to_static_module(tg.graph)
assert "StaticSubgraph" in str(tg.graph)
o_test = tg(s, s, s)
torch.testing.assert_close(o_ref, o_test)
@unittest.skip("Temporarily disabled")
def test_fusion_multihead_attention_layer(self):
HID_DIM = 256
QUERY_LEN = 8
BATCH_SIZE = 128
LAYERS = 3
HEADS = 8
DROPOUT = 0.1
device = torch.device("cpu")
attention = MultiHeadAttentionLayer(HID_DIM, HEADS, DROPOUT, device).to(device)
with torch.no_grad():
src = torch.randn(BATCH_SIZE, QUERY_LEN, HID_DIM).to(device)
src_mask = (src > 0)[:, :, 0].unsqueeze(1).unsqueeze(2).to(device)
attention.eval()
attention = torch.jit.script(attention)
attention.eval()
o_ref = attention(src, src, src, src_mask)
torch._C._fuse_to_static_module(attention._c)
o_test = attention(src, src, src, src_mask)
for a, b in zip(o_ref, o_test):
torch.testing.assert_close(a, b)
@unittest.skip("Temporarily disabled")
def test_fusion_loop(self):
a = torch.randn(5, 5)
b = torch.randn(5, 5)
c = 4
lg = torch.jit.script(loop_graph)
o_ref = lg(a, b, c)
torch._C._fuse_to_static_module(lg.graph)
assert "StaticSubgraph" in str(lg.graph)
o_test = lg(a, b, c)
torch.testing.assert_close(o_ref, o_test)
@unittest.skip("Temporarily disabled")
def test_fusion_outputs(self):
a = torch.randn(2, 2)
b = torch.randn(2, 2)
c = 4
og = torch.jit.script(output_graph)
o_ref = og(a, b, b, c)
torch._C._fuse_to_static_module(og.graph)
assert "StaticSubgraph" in str(og.graph)
o_test = og(a, b, b, c)
for i in o_ref.keys():
torch.testing.assert_close(o_ref[i], o_test[i])
def test_create_object(self):
class Foo: # noqa: B903
def __init__(self, x: torch.Tensor) -> None:
self.x = x
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, y: torch.Tensor) -> torch.Tensor:
foo = Foo(y)
return y * foo.x
mod = torch.jit.script(Mod()).eval()
y = torch.randn((1, ))
expected = mod(y)
static_mod = StaticModule(torch.jit.freeze(mod))
actual = static_mod(y)
self.assertEqual(expected, actual)
if __name__ == "__main__":
run_tests()
| pytorch-master | test/test_static_runtime.py |
# Owner(s): ["module: tests"]
import torch
import numpy as np
import math
from typing import Dict, List, Sequence
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
from torch._six import inf, nan
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and,
integral_types_and, floating_and_complex_types_and, all_types_and, all_types,
)
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)
from torch.testing._internal.common_methods_invocations import (
ReductionOpInfo, reduction_ops, reference_masked_ops)
# TODO: replace with make_tensor
def _generate_input(shape, dtype, device, with_extremal):
if shape == ():
x = torch.tensor((), dtype=dtype, device=device)
else:
if dtype.is_floating_point or dtype.is_complex:
# work around torch.randn not being implemented for bfloat16
if dtype == torch.bfloat16:
x = torch.randn(*shape, device=device) * random.randint(30, 100)
x = x.to(torch.bfloat16)
else:
x = torch.randn(*shape, dtype=dtype, device=device) * random.randint(30, 100)
x[torch.randn(*shape) > 0.5] = 0
if with_extremal and dtype.is_floating_point:
# Use extremal values
x[torch.randn(*shape) > 0.5] = float('nan')
x[torch.randn(*shape) > 0.5] = float('inf')
x[torch.randn(*shape) > 0.5] = float('-inf')
elif with_extremal and dtype.is_complex:
x[torch.randn(*shape) > 0.5] = complex('nan')
x[torch.randn(*shape) > 0.5] = complex('inf')
x[torch.randn(*shape) > 0.5] = complex('-inf')
elif dtype == torch.bool:
x = torch.zeros(shape, dtype=dtype, device=device)
x[torch.randn(*shape) > 0.5] = True
else:
x = torch.randint(15, 100, shape, dtype=dtype, device=device)
return x
# TODO: replace with make_tensor
def _rand_shape(dim, min_size, max_size):
shape = []
for i in range(dim):
shape.append(random.randint(min_size, max_size))
return tuple(shape)
def _reduced_shape(shape, dim=None, keepdim=False):
"""Computes the expected reduced shape given dim and keepdim
Args:
shape: The shape to reduce
dim : The dimensions to reduce
keepdim: If true, reduced dimensions have size 1 in the reduced shape,
otherwise they are removed from the reduced shape.
Returns:
The reduced shape
"""
if dim is None:
return [1] * len(shape) if keepdim else []
# Wrap negative dims
dim = dim if isinstance(dim, Sequence) else [dim]
dim = set(i if i >= 0 else len(shape) + i for i in dim)
result = []
for i, size in enumerate(shape):
if i not in dim:
result.append(size)
elif keepdim:
result.append(1)
return result
class TestReductions(TestCase):
###########################################################################
# ReductionOpInfo unit tests
###########################################################################
def _test_dim_keepdim(self, op: ReductionOpInfo, device, *, ndim, **dim_keepdim):
"""Tests output shape for input with ndim and dim and keepdim kwargs"""
shape = torch.randint(2, 5, (ndim,)).tolist()
t = make_tensor(shape, dtype=torch.float, device=device)
args, kwargs = next(op.generate_args_kwargs(t, **dim_keepdim))
result = op(t, *args, **dim_keepdim, **kwargs)
expected_shape = _reduced_shape(shape, **dim_keepdim)
self.assertEqual(result.shape, expected_shape, f"""
expected output shape to be {expected_shape} but got {list(result.shape)}
for input shape {shape} and {dim_keepdim}
""")
# TODO(@heitorschueroff) combine cases with and without keepdim once
# there's support for a @parametrize decorator.
@ops(reduction_ops, dtypes=OpDTypes.none)
def test_dim_default(self, device, op: ReductionOpInfo):
"""Tests that the default dim reduces all dimensions."""
for ndim in range(3):
self._test_dim_keepdim(op, device, ndim=ndim)
@ops(reduction_ops, dtypes=OpDTypes.none)
def test_dim_default_keepdim(self, device, op: ReductionOpInfo):
"""Tests that the default dim, when keepdim=True, reduces all dimensions to size 1."""
for ndim in range(3):
self._test_dim_keepdim(op, device, ndim=ndim, keepdim=True)
@ops(reduction_ops, dtypes=OpDTypes.none)
def test_dim_none(self, device, op: ReductionOpInfo):
"""Tests that dim=None reduces all dimensions."""
for ndim in range(3):
self._test_dim_keepdim(op, device, ndim=ndim, dim=None)
@ops(reduction_ops, dtypes=OpDTypes.none)
def test_dim_none_keepdim(self, device, op: ReductionOpInfo):
"""Tests that dim=None, when keepdim=True, reduces all dimensions to size 1."""
for ndim in range(3):
self._test_dim_keepdim(op, device, ndim=ndim, dim=None, keepdim=True)
@ops(reduction_ops, dtypes=OpDTypes.none)
def test_dim_single(self, device, op: ReductionOpInfo):
"""Tests that dim=i reduces dimension i."""
self._test_dim_keepdim(op, device, ndim=0, dim=0)
self._test_dim_keepdim(op, device, ndim=1, dim=0)
self._test_dim_keepdim(op, device, ndim=2, dim=-1)
self._test_dim_keepdim(op, device, ndim=3, dim=1)
@ops(reduction_ops, dtypes=OpDTypes.none)
def test_dim_single_keepdim(self, device, op: ReductionOpInfo):
"""Tests that dim=i, when keepdim=True, reduces dimension i to size 1."""
self._test_dim_keepdim(op, device, ndim=0, dim=0, keepdim=True)
self._test_dim_keepdim(op, device, ndim=1, dim=0, keepdim=True)
self._test_dim_keepdim(op, device, ndim=2, dim=-1, keepdim=True)
self._test_dim_keepdim(op, device, ndim=3, dim=1, keepdim=True)
@ops(filter(lambda op: op.supports_multiple_dims, reduction_ops), dtypes=OpDTypes.none)
def test_dim_empty(self, device, op: ReductionOpInfo):
"""Tests that dim=[] is a no-op"""
self._test_dim_keepdim(op, device, ndim=0, dim=[])
self._test_dim_keepdim(op, device, ndim=2, dim=[])
@ops(filter(lambda op: op.supports_multiple_dims, reduction_ops), dtypes=OpDTypes.none)
def test_dim_empty_keepdim(self, device, op: ReductionOpInfo):
"""Tests that dim=[], when keepdim=True, is a no-op"""
self._test_dim_keepdim(op, device, ndim=0, dim=[], keepdim=True)
self._test_dim_keepdim(op, device, ndim=2, dim=[], keepdim=True)
@ops(filter(lambda op: op.supports_multiple_dims, reduction_ops), dtypes=OpDTypes.none)
def test_dim_multi(self, device, op: ReductionOpInfo):
"""Tests that dim=[i, j, ...] reduces dimensions i, j, ...."""
self._test_dim_keepdim(op, device, ndim=1, dim=[0])
self._test_dim_keepdim(op, device, ndim=3, dim=[0, 2])
@ops(filter(lambda op: op.supports_multiple_dims, reduction_ops), dtypes=OpDTypes.none)
def test_dim_multi_keepdim(self, device, op: ReductionOpInfo):
"""Tests that dim=[i, j, ...], when keepdim=True, reduces dimensions i, j, .... to size 1."""
self._test_dim_keepdim(op, device, ndim=1, dim=[0], keepdim=True)
self._test_dim_keepdim(op, device, ndim=3, dim=[0, 2], keepdim=True)
@ops(filter(lambda op: op.supports_multiple_dims, reduction_ops), dtypes=OpDTypes.none)
def test_dim_multi_unsorted(self, device, op: ReductionOpInfo):
"""Tests that operator correctly handles unsorted dim list."""
self._test_dim_keepdim(op, device, ndim=4, dim=[3, 0, 2])
@ops(filter(lambda op: op.supports_multiple_dims, reduction_ops), dtypes=OpDTypes.none)
def test_dim_multi_unsorted_keepdim(self, device, op: ReductionOpInfo):
"""Tests that operator correctly handles unsorted dim list when keepdim=True."""
self._test_dim_keepdim(op, device, ndim=4, dim=[3, 0, 2], keepdim=True)
@ops(filter(lambda op: op.supports_multiple_dims, reduction_ops), dtypes=OpDTypes.none)
def test_dim_multi_duplicate(self, device, op: ReductionOpInfo):
"""Tests that an error is raised if dim has duplicate entries."""
with self.assertRaises(RuntimeError):
self._test_dim_keepdim(op, device, ndim=3, dim=[0, 1, 1, 2])
@ops(filter(lambda op: not op.supports_multiple_dims, reduction_ops), dtypes=OpDTypes.none)
def test_dim_multi_unsupported(self, device, op: ReductionOpInfo):
"""Tests that ops claiming to not support multi dim actually don't."""
with self.assertRaises(TypeError):
self._test_dim_keepdim(op, device, ndim=3, dim=[0, 2])
@ops(reduction_ops, dtypes=OpDTypes.none)
def test_dim_offbounds(self, device, op: ReductionOpInfo):
"""Tests that passing an off-bounds dim throws"""
with self.assertRaises(IndexError):
self._test_dim_keepdim(op, device, ndim=2, dim=2)
@ops(reduction_ops, dtypes=OpDTypes.none)
def test_dim_ndim_limit(self, device, op: ReductionOpInfo):
"""Tests that an exception is raised when reducing a tensor with more
than 64 dims along some specific dimensions. dim=None is ok"""
t = make_tensor([1] * 65, dtype=torch.float, device=device)
with self.assertRaisesRegex(RuntimeError, "only tensors with up to 64 dims are supported"):
op(t, dim=0)
@ops(filter(lambda op: op.identity is not None, reduction_ops), dtypes=OpDTypes.supported)
def test_identity(self, device, dtype, op: ReductionOpInfo):
"""Tests that the identity value is an identity for the operator"""
t = make_tensor((10,), dtype=dtype, device=device)
t[1::2] = op.identity
args, kwargs = next(op.generate_args_kwargs(t))
result = op(t[::2], *args, **kwargs)
result_with_identity = op(t, *args, **kwargs)
self.assertEqual(result, result_with_identity, """
Adding identity value to the input tensor should not change the result.
""")
# TODO(@heitorschueroff) Update these to use the nan_policy kwarg once
# it is added to reduction operators.
@ops(filter(lambda op: op.nan_policy == 'propagate', reduction_ops), dtypes=OpDTypes.supported,
allowed_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16))
def test_nan_policy_propagate(self, device, dtype, op: ReductionOpInfo):
"""Tests that nan is propagated to the output by default"""
t = make_tensor((5,), dtype=dtype, device=device)
t[2] = torch.nan
args, kwargs = next(op.generate_args_kwargs(t))
result = op(t, *args, **kwargs)
self.assertTrue(result.isnan())
@ops(filter(lambda op: op.nan_policy == 'omit', reduction_ops), dtypes=OpDTypes.supported,
allowed_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16))
def test_nan_policy_omit(self, device, dtype, op: ReductionOpInfo):
"""Tests that NaN values do not affect the result."""
t = make_tensor((10,), dtype=dtype, device=device)
t[1::2] = torch.nan
args, kwargs = next(op.generate_args_kwargs(t))
result = op(t[::2], *args, **kwargs)
result_with_nan = op(t, *args, **kwargs)
self.assertEqual(result, result_with_nan)
@ops(reduction_ops, dtypes=OpDTypes.supported)
def test_result_dtype(self, device, dtype, op: ReductionOpInfo):
"""Tests that the result has the correct dtype"""
t = make_tensor((5,), dtype=dtype, device=device)
args, kwargs = next(op.generate_args_kwargs(t))
result: torch.Tensor = op(t, *args, **kwargs)
is_integral = dtype in integral_types_and(torch.bool)
if op.promotes_int_to_float and is_integral:
self.assertTrue(torch.is_floating_point(result))
elif op.promotes_int_to_int64 and is_integral:
self.assertEqual(result.dtype, torch.int64)
elif op.result_dtype is not None:
self.assertEqual(result.dtype, op.result_dtype)
elif op.complex_to_real:
_complex_to_real_dtype_map = {
torch.complex128: torch.float64,
torch.complex64: torch.float32,
torch.complex32: torch.float16,
}
self.assertEqual(result.dtype, _complex_to_real_dtype_map.get(dtype, dtype))
else:
self.assertEqual(result.dtype, dtype)
@ops(reduction_ops, dtypes=OpDTypes.none)
def test_empty_tensor_empty_slice(self, device, op: ReductionOpInfo):
"""Tests for consistent behavior when reducing over an empty slice.
The rules for reducing over an empty slice are as follows:
- Return the identity value if the operator has one
- Otherwise, return NaN if the operator promotes integral dtype to
floating point dtypes.
- Otherwise, raise an error
See discussion here https://github.com/pytorch/pytorch/issues/61901
"""
t = make_tensor((0, 2, 3), dtype=torch.float, device=device)
for dim in [0] + [[0, 2]] if op.supports_multiple_dims else []:
args, kwargs = next(op.generate_args_kwargs(t, dim=dim))
if op.identity is not None:
# Reducing along empty slice should return identity
result = op(t, *args, dim=dim, **kwargs)
self.assertEqual(result, torch.full_like(result, op.identity))
elif op.promotes_int_to_float:
# Reducing along empty slice should return NaN
result = op(t, *args, dim=dim, **kwargs)
self.assertEqual(result, torch.full_like(result, torch.nan))
else:
# Reducing along empty slice should raise an error
with self.assertRaises(IndexError):
op(t, *args, dim=dim, **kwargs)
@ops(reduction_ops, dtypes=OpDTypes.none)
def test_empty_tensor_nonempty_slice(self, device, op: ReductionOpInfo):
"""Tests that reducing a nonempty slice of an empty tensor returns an
empty tensor with the dimensions reduced."""
t = make_tensor((0, 2, 3), dtype=torch.float, device=device)
for dim in [1] + [[1, 2]] if op.supports_multiple_dims else []:
args, kwargs = next(op.generate_args_kwargs(t, dim=dim))
result = op(t, *args, dim=dim, **kwargs)
self.assertEqual(result.shape, _reduced_shape(t.shape, dim))
def _test_noncontiguous(self, op: ReductionOpInfo, t: torch.Tensor, **reduction_kwargs):
"""Helper method to test noncontiguous input tensors."""
assert not t.is_contiguous()
t_contig = t.contiguous()
for args, kwargs in op.generate_args_kwargs(t_contig, **reduction_kwargs):
kwargs.update(reduction_kwargs)
result = op(t, *args, **kwargs)
expected = op(t_contig, *args, **kwargs)
self.assertEqual(result, expected)
@ops(reduction_ops)
def test_noncontiguous_innermost(self, device, dtype, op: ReductionOpInfo):
"""Tests reducing along noncontiguous innermost dimension."""
t = make_tensor((10, 10), dtype=dtype, device=device, low=-1, high=1)
self._test_noncontiguous(op, t[:, ::2], dim=1)
@ops(reduction_ops)
def test_noncontiguous_outermost(self, device, dtype, op: ReductionOpInfo):
"""Tests reducing along noncontiguous outermost dimension."""
t = make_tensor((10, 10), dtype=dtype, device=device, low=-1, high=1)
self._test_noncontiguous(op, t[::2, :], dim=0)
@ops(reduction_ops)
def test_noncontiguous_all(self, device, dtype, op: ReductionOpInfo):
"""Tests reducing all dimensions of a noncontiguous tensor."""
t = make_tensor((5, 5, 5), dtype=dtype, device=device, low=-1, high=1)
self._test_noncontiguous(op, t[::2, ::3, 1:-1:2])
@ops(reduction_ops)
def test_noncontiguous_transposed(self, device, dtype, op: ReductionOpInfo):
"""Tests reducing a transposed tensor."""
t = make_tensor((5, 5), dtype=dtype, device=device, low=-1, high=1)
self._test_noncontiguous(op, t.T)
@ops(reduction_ops)
def test_noncontiguous_expanded(self, device, dtype, op: ReductionOpInfo):
"""Tests reducing a tensor with expanded singleton dimensions."""
t = make_tensor((2, 3), dtype=dtype, device=device, low=-1, high=1)
self._test_noncontiguous(op, t.unsqueeze(1).expand(-1, 5, -1))
# NumPy does not support BFloat16 so we don't test that against reference
# implementations. We also don't compare dtypes or test for different
# keepdim because we already have other tests covering those.
# The test_reference_testing in test_ops.py only uses the samples from
# sample_inputs_func which do not test as exhaustively as these tests.
def _test_ref(self, op: ReductionOpInfo, t: torch.Tensor, **reduction_kwargs):
"""Compares op against op.ref for the given input and reduction kwargs"""
for args, kwargs in op.generate_args_kwargs(t, **reduction_kwargs):
kwargs.update(reduction_kwargs)
result = op(t, *args, **kwargs)
expected = op.ref(t.detach().cpu().numpy(), *args, **kwargs)
self.assertEqual(result, expected, exact_dtype=False)
@ops(filter(lambda op: op.ref is not None, reduction_ops),
allowed_dtypes=all_types_and_complex_and(torch.half, torch.bool))
def test_ref_scalar_input(self, device, dtype, op: ReductionOpInfo):
"""Compares op against reference for scalar input tensors"""
self._test_ref(op, make_tensor([], dtype=dtype, device=device))
@ops(filter(lambda op: op.ref is not None, reduction_ops),
allowed_dtypes=all_types_and_complex_and(torch.half, torch.bool))
def test_ref_small_input(self, device, dtype, op: ReductionOpInfo):
"""Compares op against reference for small input tensors"""
t = make_tensor((5, 3, 4, 2), dtype=dtype, device=device, low=-2, high=2, exclude_zero=True)
self._test_ref(op, t)
for dim in [0, 1, 3] + ([[0, 2], [1, 3]] if op.supports_multiple_dims else []):
self._test_ref(op, t, dim=dim)
@ops(filter(lambda op: op.ref is not None, reduction_ops),
allowed_dtypes=[torch.float64])
def test_ref_large_input_1D(self, device, dtype, op: ReductionOpInfo):
"""Compares op against reference for a large 1D input tensor to check stability"""
self._test_ref(op, make_tensor((2 ** 20,), dtype=dtype, device=device, low=-1, high=1, exclude_zero=True))
@ops(filter(lambda op: op.ref is not None, reduction_ops),
allowed_dtypes=[torch.float64])
def test_ref_large_input_2D(self, device, dtype, op: ReductionOpInfo):
"""Compares op against reference for a large 2D input tensor to test parallelism"""
t = make_tensor((32, 2 ** 16), dtype=dtype, device=device, low=-1, high=1, exclude_zero=True)
self._test_ref(op, t, dim=1)
@largeTensorTest("8gb")
@ops(filter(lambda op: op.ref is not None, reduction_ops),
allowed_dtypes=[torch.float64])
def test_ref_large_input_64bit_indexing(self, device, dtype, op: ReductionOpInfo):
"""Compares op against reference for a very large input tensor that requires 64 bit indexing"""
self._test_ref(op, make_tensor((275000000,), dtype=dtype, device=device, low=-1, high=1, exclude_zero=True))
@ops(filter(lambda op: op.ref is not None, reduction_ops),
allowed_dtypes=all_types_and_complex_and(torch.half, torch.bool))
def test_ref_duplicate_values(self, device, dtype, op: ReductionOpInfo):
"""Compares op against reference for input tensors with duplicate values"""
t = make_tensor((4, 4), dtype=dtype, device=device, low=-2, high=2, exclude_zero=True)
t[::2, ::2] = t[1::2, 1::2]
self._test_ref(op, t)
self._test_ref(op, t, dim=0)
self._test_ref(op, t, dim=1)
@ops(filter(lambda op: op.ref is not None, reduction_ops),
allowed_dtypes=[torch.float32, torch.complex64])
def test_ref_extremal_values(self, device, dtype, op: ReductionOpInfo):
"""Compares op against reference for input tensors with extremal values"""
t = make_tensor((5,), dtype=dtype, device=device, exclude_zero=True)
extremals = [0, 1, nan, inf, -inf]
for extremal in extremals:
t[2] = extremal
self._test_ref(op, t)
###########################################################################
# TODO: Legacy tests - port to ReductionOpInfo
###########################################################################
def test_var_unbiased(self, device):
tensor = torch.randn(100, device=device)
self.assertEqual(tensor.var(0), tensor.var(0, unbiased=True))
self.assertEqual(tensor.var(), tensor.var(unbiased=True))
self.assertEqual(tensor.var(unbiased=False), tensor.var(0, unbiased=False))
tensor = torch.tensor([1.0, 2.0], device=device)
self.assertEqual(tensor.var(unbiased=True), 0.5)
self.assertEqual(tensor.var(unbiased=False), 0.25)
tensor = torch.tensor([1.0, 2.0, 3.0], device=device)
self.assertEqual(tensor.var(unbiased=True), 1.0)
self.assertEqual(tensor.var(unbiased=False), 2.0 / 3.0)
tensor = torch.randn(100, device=device)
self.assertEqual(tensor.std(0), tensor.std(0, unbiased=True))
self.assertEqual(tensor.std(), tensor.std(unbiased=True))
self.assertEqual(tensor.std(unbiased=False), tensor.std(0, unbiased=False))
def test_var_stability(self, device):
tensor = torch.tensor([2281.5, 2281.25], device=device)
self.assertEqual(tensor.var(dim=0), 0.03125)
self.assertEqual(tensor.var(), 0.03125)
def test_sum_dim_reduction_uint8_overflow(self, device):
example = [[-1, 2, 1], [5, 3, 6]]
x = torch.tensor(example, dtype=torch.uint8, device=device)
self.assertEqual(x.sum(dtype=torch.uint8).item(), 16)
self.assertEqual(x.sum(0, dtype=torch.uint8), torch.tensor([4, 5, 7], dtype=torch.uint8, device=device))
self.assertEqual(x.sum(1, dtype=torch.uint8), torch.tensor([2, 14], dtype=torch.uint8, device=device))
y = torch.tensor(example, dtype=torch.uint8, device=device)
torch.sum(x, 0, out=y)
self.assertEqual(x.sum(0, dtype=torch.uint8), y)
def test_dim_reduction_less_than_64(self, device):
sizes = [1] * 65
x = torch.randn(sizes, device=device)
ops = [torch.mean, torch.sum, torch.nansum, torch.std, torch.logsumexp, torch.std, torch.var,
torch.norm]
for op in ops:
with self.assertRaisesRegex(RuntimeError, "only tensors with up to 64 dims are supported"):
op(x, 64)
with self.assertRaisesRegex(RuntimeError, "only tensors with up to 64 dims are supported"):
op(x, -1)
@onlyCPU
@dtypes(torch.float, torch.bfloat16)
def test_dim_reduction_lastdim(self, device, dtype):
x = torch.randn(3, 5, 40, device=device, dtype=dtype)
x = x[:, :, 0:40:2]
x2 = x.contiguous()
ops = [torch.norm, torch.argmax, torch.argmin]
for op in ops:
y = op(x, dim=-1)
y2 = op(x2, dim=-1)
self.assertEqual(y, y2)
@skipIfNoSciPy
def test_logsumexp(self, device):
from scipy.special import logsumexp
a = torch.randn(5, 4, device=device)
a[0, 0] = inf
a[1, :] = -inf
actual = a.logsumexp(1)
expected = logsumexp(a.cpu().numpy(), 1)
self.assertEqual(expected.shape, actual.shape)
self.assertEqual(expected, actual)
# check that out is actually inplace
b = torch.zeros(5, 2, device=device)
c = b[:, 0]
torch.logsumexp(a, 1, out=c)
self.assertEqual(expected, b[:, 0])
# check integral inputs is promoted to floating point
e = torch.randint(-100, 100, [5, 4], device=device)
actual = e.logsumexp(1).to(torch.float64)
expected = logsumexp(e.cpu().numpy(), 1)
self.assertEqual(expected.shape, actual.shape)
self.assertEqual(expected, actual)
@onlyCPU
def test_sum_parallel(self, device):
# To use parallel branches we'll need to compare on tensors
# that are relatively large. Even if this is run on a single
# core machine these tests will still give you signal on
# the correctness
def _run_test(size):
for dim in range(len(size) + 1):
nv = np.round(np.random.rand(*size)) # 0s and 1s
tv = torch.from_numpy(nv)
# Parallelisim is only used if numel is
# larger than grainsize defined in Parallel.h
self.assertTrue(tv.numel() > 32768)
if dim == len(size):
nvs = nv.sum()
tvs = tv.sum()
else:
nvs = nv.sum(dim)
tvs = tv.sum(dim)
diff = np.abs(nvs - tvs.numpy()).sum()
self.assertEqual(diff, 0)
_run_test([2, 3, 3, 3, 3, 2, 2, 3, 2, 3, 2, 3, 3])
_run_test([4, 4, 4, 4, 4, 4, 4, 4, 4, 4])
_run_test([1, 32 * 8 * 32 * 8])
_run_test([1, 32770])
# TODO: kill map2_ (and similar) uses and update to compare with NumPy
# only works on CPU since this uses map2_, which is only supported on CPU
def _testCSelection(self, torchfn, mathfn):
# Two tensors
size = (100, 100)
a = torch.rand(*size)
b = torch.rand(*size)
c = torchfn(a, b)
expected_c = torch.zeros(*size)
expected_c.map2_(a, b, lambda _, a, b: mathfn(a, b))
self.assertEqual(expected_c, c, atol=0, rtol=0)
@onlyCPU
def test_max_elementwise(self, device):
self._testCSelection(torch.max, max)
@onlyCPU
def test_min_elementwise(self, device):
self._testCSelection(torch.min, min)
def test_all_any(self, device):
def test(size):
x = torch.ones(*size, device=device).byte()
self.assertTrue(x.all())
self.assertTrue(x.any())
x[3] = 0
self.assertFalse(x.all())
self.assertTrue(x.any())
x.zero_()
self.assertFalse(x.all())
self.assertFalse(x.any())
x.fill_(2)
self.assertTrue(x.all())
self.assertTrue(x.any())
x = torch.ones(*size, device=device).bool()
self.assertTrue(x.all())
self.assertTrue(x.any())
x[3] = False
self.assertFalse(x.all())
self.assertTrue(x.any())
test((10,))
test((5, 5))
def test_all_any_with_dim(self, device):
def test(x):
r1 = x.prod(dim=0, keepdim=False).byte()
r2 = x.all(dim=0, keepdim=False)
self.assertEqual(r1.shape, r2.shape)
self.assertTrue((r1 == r2).all())
r3 = x.sum(dim=1, keepdim=True).clamp(0, 1).byte()
r4 = x.any(dim=1, keepdim=True)
self.assertEqual(r3.shape, r4.shape)
self.assertTrue((r3 == r4).all())
test(torch.tensor([[0, 0, 0],
[0, 0, 1],
[0, 1, 1],
[1, 1, 1]], device=device, dtype=torch.uint8))
def test_numpy_named_args(self, device):
x1 = torch.randn(10, device=device)
x2 = torch.randn(10, device=device)
res1 = torch.add(input=x1, other=x2)
res2 = torch.add(x1=x1, x2=x2)
self.assertEqual(res1, res2)
x1 = torch.randn(10, 10, 10, device=device)
res1 = x1.sum(dim=(0, 2), keepdim=True)
res2 = x1.sum(axis=(0, 2), keepdims=True)
self.assertEqual(res1, res2)
# TODO: kill this ane replace with common creation ops
def _make_tensors(self, shape, val_range=(-100, 100), use_floating=True, use_integral=True,
use_complex=False) -> Dict[str, List[torch.Tensor]]:
float_types = [torch.double,
torch.float]
int_types = [torch.int64,
torch.int32,
torch.int16]
complex_types = [torch.complex64,
torch.complex128]
def make_contiguous(shape, dtype) -> torch.Tensor:
if dtype in float_types:
val = torch.randn(shape, dtype=dtype)
val = val * ((val_range[1] - val_range[0]) / (math.pi * 2.0))
val = val + ((val_range[1] - val_range[0]) / 2.0)
val = torch.clamp(val, min=val_range[0], max=val_range[1])
return val
result = torch.zeros(shape, dtype=dtype)
result.apply_(lambda x: random.randint(val_range[0], val_range[1]))
return result
def make_non_contiguous(shape, dtype) -> torch.Tensor:
contig = make_contiguous(shape, dtype)
non_contig = torch.empty(shape + (2, 2), dtype=dtype)[..., 0]
non_contig = non_contig.select(-1, -1)
non_contig.copy_(contig)
self.assertFalse(non_contig.is_contiguous())
return non_contig
def make_contiguous_slice(size, dtype) -> torch.Tensor:
contig = make_contiguous((1, size), dtype)
non_contig = contig[:1, 1:size - 1]
self.assertTrue(non_contig.is_contiguous())
return contig
types = []
if use_floating:
types += float_types
if use_integral:
types += int_types
if use_complex:
types += complex_types
tensors: Dict[str, List[torch.Tensor]] = {"cont": [], "noncont": [], "slice": []}
for dtype in types:
tensors["cont"].append(make_contiguous(shape, dtype))
tensors["noncont"].append(make_non_contiguous(shape, dtype))
tensors["slice"].append(make_contiguous_slice(sum(list(shape)), dtype))
return tensors
# TODO: refactor this to use comparators from common_utils
def _assert_matches_numpy(self, t, n):
self.assertEqual(n.shape, t.shape)
if t.dtype == torch.float:
self.assertEqual(n, t, rtol=1e-03, atol=1e-05, equal_nan=True)
else:
self.assertEqual(n, t, equal_nan=True)
# TODO: update this and tests that use it to use the device argument properly
def _test_dim_ops(self, pytorch_op, numpy_op,
use_floating=True, use_integral=True, use_complex=False):
def do_one(tensors_dict, dim):
for category, tensors in tensors_dict.items():
if category == "slice":
dim = 0
for tensor in tensors:
# we have no control over NumPy warnings...
with warnings.catch_warnings():
warnings.simplefilter("ignore")
expected = numpy_op(tensor.cpu().numpy(), dim)
actual = pytorch_op(tensor, dim)
self._assert_matches_numpy(actual, expected)
if torch.cuda.is_available():
self._assert_matches_numpy(pytorch_op(tensor.cuda(), dim).cpu(), expected)
do_one(self._make_tensors((5, 400000), use_floating=use_floating,
use_integral=use_integral, use_complex=use_complex), 1)
do_one(self._make_tensors((3, 5, 7), use_floating=use_floating,
use_integral=use_integral, use_complex=use_complex), 0)
do_one(self._make_tensors((3, 5, 7), use_floating=use_floating,
use_integral=use_integral, use_complex=use_complex), 1)
do_one(self._make_tensors((3, 5, 7), use_floating=use_floating,
use_integral=use_integral, use_complex=use_complex), 2)
do_one(self._make_tensors((100000, ), use_floating=use_floating,
use_integral=use_integral, use_complex=use_complex), -1)
do_one(self._make_tensors((50, 50, 50), use_floating=use_floating,
use_integral=use_integral, use_complex=use_complex), 0)
do_one(self._make_tensors((50, 50, 50), use_floating=use_floating,
use_integral=use_integral, use_complex=use_complex), 1)
do_one(self._make_tensors((50, 50, 50), use_floating=use_floating,
use_integral=use_integral, use_complex=use_complex), 2)
do_one(self._make_tensors((50, 50, 50), use_floating=use_floating,
use_integral=use_integral, use_complex=use_complex), (1, 2))
do_one(self._make_tensors((50, 50, 50), use_floating=use_floating,
use_integral=use_integral, use_complex=use_complex), (1, -1))
do_one(self._make_tensors((50, 50, 50), use_floating=use_floating,
use_integral=use_integral, use_complex=use_complex), (0, 2))
do_one(self._make_tensors((50, 50, 50), use_floating=use_floating,
use_integral=use_integral, use_complex=use_complex), (0, 2, 1))
@slowTest
@onlyCPU
def test_sum_dim(self, device):
self._test_dim_ops(
lambda t, d: t.sum(d),
lambda n, d: n.sum(d),
use_floating=True, use_integral=True, use_complex=True)
@onlyCPU
def test_mean_dim(self, device):
self._test_dim_ops(
lambda t, d: t.mean(d),
lambda n, d: n.mean(d),
use_integral=False,
use_complex=True)
@onlyCPU
def test_std_dim(self, device):
for unbiased in [False, True]:
self._test_dim_ops(
lambda t, d: t.std(d, unbiased=unbiased),
lambda n, d: n.std(d, ddof=1 if unbiased else 0),
use_integral=False)
@onlyCPU
def test_var_dim(self, device):
for unbiased in [False, True]:
self._test_dim_ops(
lambda t, d: t.var(d, unbiased=unbiased),
lambda n, d: n.var(d, ddof=1 if unbiased else 0),
use_integral=False)
@onlyCPU
@skipIfNoSciPy
def test_logsumexp_dim(self, device):
from scipy.special import logsumexp
self._test_dim_ops(
lambda t, d: t.logsumexp(d),
lambda n, d: logsumexp(n, d),
use_integral=False)
@onlyCPU
def test_mean_int_with_optdtype(self, device):
a = make_tensor((3, 4, 5), dtype=torch.int64, device=device)
# If the optional desired output type is given, the input
# is internally cast.
a_float = a.to(torch.float32)
self.assertEqual(a_float.mean(), a.mean(dtype=torch.float32))
# TODO: update this and tests that use it to handle device properly
def _test_reduce_integer_upcast(self, fn, has_out=True, test_complex=True):
shape = (3, 4, 5)
reduced_shape = fn(torch.ones(shape)).shape
def _test_out(dtype, other_dtype):
out = torch.ones(reduced_shape, dtype=dtype)
result = fn(x, out=out)
self.assertIs(out.dtype, result.dtype)
self.assertEqual(fn(x.to(dtype)), result, exact_dtype=False)
result = fn(x, out=out, dtype=dtype)
self.assertIs(out.dtype, result.dtype)
self.assertEqual(fn(x.to(dtype)), result, exact_dtype=False)
# 'out' is favored over dtype, check error
self.assertRaises(RuntimeError, lambda: fn(x, out=out, dtype=other_dtype))
for dtype in [dtype for dtype in get_all_math_dtypes('cpu') if dtype != torch.float16]:
x = torch.ones(shape, dtype=dtype)
expected_dtype = dtype if dtype.is_floating_point or dtype.is_complex else torch.int64
self.assertIs(expected_dtype, fn(x).dtype)
self.assertEqual(fn(x.to(expected_dtype)), fn(x))
if dtype.is_floating_point:
other_dtype = torch.float32 if dtype == torch.float64 else torch.float64
elif dtype.is_complex:
other_dtype = torch.complex64 if dtype == torch.complex128 else torch.complex128
else:
other_dtype = torch.int32 if dtype != torch.int32 else torch.int16
self.assertIs(other_dtype, fn(x, dtype=other_dtype).dtype)
self.assertEqual(fn(x.to(other_dtype)), fn(x, dtype=other_dtype), exact_dtype=False)
# test mixed int/float/complex
if dtype.is_floating_point:
mixed_dtypes = [torch.int32, torch.complex64]
elif dtype.is_complex:
mixed_dtypes = [torch.int32, torch.float32]
else:
mixed_dtypes = [torch.float32, torch.complex64]
for mixed_dtype in mixed_dtypes:
self.assertIs(mixed_dtype, fn(x, dtype=mixed_dtype).dtype)
self.assertEqual(fn(x.to(mixed_dtype)), fn(x, dtype=mixed_dtype), exact_dtype=False)
if has_out:
_test_out(dtype, other_dtype)
_test_out(dtype, mixed_dtype)
@onlyCPU
def test_sum_integer_upcast(self, device):
self._test_reduce_integer_upcast(lambda x, **kwargs: torch.sum(x, **kwargs), False)
self._test_reduce_integer_upcast(lambda x, **kwargs: torch.sum(x, 0, **kwargs))
@onlyCPU
def test_prod_integer_upcast(self, device):
self._test_reduce_integer_upcast(lambda x, **kwargs: torch.prod(x, **kwargs), False)
self._test_reduce_integer_upcast(lambda x, **kwargs: torch.prod(x, 0, **kwargs))
@onlyCPU
def test_cumsum_integer_upcast(self, device):
self._test_reduce_integer_upcast(lambda x, **kwargs: torch.cumsum(x, 0, **kwargs))
@onlyCPU
def test_cumprod_integer_upcast(self, device):
self._test_reduce_integer_upcast(lambda x, **kwargs: torch.cumprod(x, 0, **kwargs))
@dtypes(*all_types())
def test_mode(self, device, dtype):
SIZE = 10
x = torch.arange(1., SIZE * SIZE + 1, device=device, dtype=dtype).clone().resize_(SIZE, SIZE)
x[:2] = 1
x[:, :2] = 1
x0 = x.clone()
# Pre-calculated results.
res1val = torch.ones(SIZE, device=device, dtype=dtype)
# The indices are the position of the last appearance of the mode element.
res1ind = torch.ones(SIZE, device=device, dtype=torch.long)
res1ind[0] = SIZE - 1
res1ind[1] = SIZE - 1
res2val, res2ind = torch.mode(x, keepdim=False)
self.assertEqual(res1val, res2val, atol=0, rtol=0)
self.assertEqual(res1ind, res2ind, atol=0, rtol=0)
# Test use of result tensor
res2val = torch.tensor((), device=device, dtype=dtype)
res2ind = torch.tensor((), device=device, dtype=torch.long)
torch.mode(x, keepdim=False, out=(res2val, res2ind))
self.assertEqual(res1val, res2val, atol=0, rtol=0)
self.assertEqual(res1ind, res2ind, atol=0, rtol=0)
# Test non-default dim
res2val, res2ind = torch.mode(x, 0, False)
self.assertEqual(res1val, res2val, atol=0, rtol=0)
self.assertEqual(res1ind, res2ind, atol=0, rtol=0)
# input unchanged
self.assertEqual(x, x0, atol=0, rtol=0)
def _test_mode_intervals(self, shape, intervals, device, dtype, v=1):
x = torch.arange(0, shape[1], device=device, dtype=dtype).expand(shape)
x = x.contiguous()
x[:, v] = intervals[0][0]
# Set the value of each interval to the mode "v"
for (beg, end) in intervals:
x[:, beg:end] = v
values, indices = torch.mode(x, -1, False)
# Check whether the returned indices correspond to the returned values
self.assertTrue((x.gather(1, indices.unsqueeze(1)).t() == values).all())
# Check whether the returned values are the mode
self.assertTrue((values == v).all().item())
@onlyCUDA
@dtypes(*all_types_and(torch.half, torch.bfloat16))
def test_mode_large(self, device, dtype):
# i should be less than (d - 2) / 2
def testset_for_shape(shape, i):
d = shape[-1]
# Mode only in the middle.
self._test_mode_intervals(shape, [(i, d - i)], device, dtype)
# Mode in discontiguous parts of the input.
self._test_mode_intervals(shape, [(0, i), (i + 1, d - i - 1), (d - i, d)], device, dtype)
# More than one line of (65535) thread blocks
testset_for_shape((65536, 10), 3)
# Max slice size (2048)
testset_for_shape((10, 2048), 10)
# Naive kernel for big slice sizes (> 2048)
testset_for_shape((10, 4096), 10)
def test_mode_boolean(self, device):
shapes = [
(10, 10),
(4, 2048),
(1, 4096),
]
for shape in shapes:
a = torch.zeros(shape, device=device, dtype=torch.bool)
a[:, (shape[1] - 1) // 2:] = True
values, indices = a.mode(-1)
self.assertEqual(values, torch.ones(shape[0], dtype=torch.bool))
print(indices)
indexed = a.gather(1, indices.unsqueeze(1)).squeeze(1)
self.assertEqual(values, indexed)
a.fill_(False)
a[:, shape[1] // 2 + 1:] = True
values, indices = a.mode(-1)
print(indices)
self.assertEqual(values, torch.zeros(shape[0], dtype=torch.bool))
indexed = a.gather(1, indices.unsqueeze(1)).squeeze(1)
self.assertEqual(values, indexed)
@expectedFailureMeta # mode only supports CPU and CUDA device type
@onlyNativeDeviceTypes
def test_mode_wrong_dtype(self, device):
def test_for_dtypes(x_ty, v_ty, i_ty, message):
x = torch.ones(10, device=device, dtype=x_ty)
v = torch.ones(10, device=device, dtype=v_ty)
i = torch.ones(10, device=device, dtype=i_ty)
with self.assertRaisesRegex(RuntimeError, message):
torch.mode(x, -1, True, out=(v, i))
err_msg = "expected scalar type .* but got .* for "
values_err = err_msg + "values"
indices_err = err_msg + "indices"
test_for_dtypes(torch.uint8, torch.int8, torch.long, values_err)
test_for_dtypes(torch.int8, torch.int16, torch.long, values_err)
test_for_dtypes(torch.int32, torch.float32, torch.long, values_err)
test_for_dtypes(torch.float32, torch.float64, torch.long, values_err)
test_for_dtypes(torch.uint8, torch.uint8, torch.int8, indices_err)
test_for_dtypes(torch.int8, torch.int8, torch.int16, indices_err)
test_for_dtypes(torch.int32, torch.int32, torch.float32, indices_err)
test_for_dtypes(torch.float32, torch.float32, torch.float64, indices_err)
@onlyCUDA
def test_mode_wrong_device(self, device):
# CPU Input Tensor
x = torch.ones(2)
with self.assertRaisesRegex(RuntimeError,
"expected device .* but got .* for values"):
values = torch.tensor([], device=device)
torch.mode(x, -1, True, out=(values, torch.tensor([], dtype=torch.long)))
with self.assertRaisesRegex(RuntimeError,
"expected device .* but got .* for indices"):
indices = torch.tensor([], device=device)
torch.mode(x, -1, True, out=(torch.tensor([]), indices))
# TODO: make work on CUDA, too
@onlyCPU
def test_accreal_type(self, device) -> None:
x = torch.ones(2, 3, 4)
self.assertIsInstance(x.double().sum().item(), float)
self.assertIsInstance(x.float().sum().item(), float)
self.assertIsInstance(x.long().sum().item(), int)
self.assertIsInstance(x.int().sum().item(), int)
self.assertIsInstance(x.short().sum().item(), int)
self.assertIsInstance(x.char().sum().item(), int)
self.assertIsInstance(x.byte().sum().item(), int)
def test_var_mean_some_dims(self, device):
sizes = (4, 6, 7, 5, 3)
dims = len(sizes)
x = torch.rand(sizes, device=device)
for num_of_dims in range(2, dims):
dim_list = list(combinations(list(range(dims)), r=num_of_dims))
for dim in dim_list:
for unbiased in [False, True]:
for keepdim in [False, True]:
var1, mean1 = torch.var_mean(x, dim=dim, unbiased=unbiased, keepdim=keepdim)
var2 = x.var(dim=dim, unbiased=unbiased, keepdim=keepdim)
mean2 = x.mean(dim=dim, keepdim=keepdim)
self.assertEqual(var1, var2)
self.assertEqual(mean1, mean2)
# TODO: this should be a generic opinfo test
def test_all_any_empty(self, device):
x = torch.ByteTensor().to(device)
self.assertTrue(x.all())
self.assertFalse(x.any())
x = torch.BoolTensor().to(device)
self.assertTrue(x.all())
self.assertFalse(x.any())
@dtypesIfCUDA(torch.half, torch.bfloat16, torch.float, torch.double)
@dtypes(torch.half, torch.bfloat16, torch.float, torch.double)
def test_max_with_inf(self, device, dtype):
a = torch.tensor([[-inf, -inf, inf, 3], [inf, inf, -inf, -1]], dtype=dtype, device=device)
self.assertTrue(torch.all(torch.max(a, dim=1).values == inf).item())
self.assertTrue(torch.all(torch.amax(a, dim=1) == inf).item())
self.assertTrue(torch.max(a).item() == inf)
self.assertTrue(torch.amax(a).item() == inf)
@dtypesIfCUDA(torch.half, torch.bfloat16, torch.float, torch.double)
@dtypes(torch.half, torch.float, torch.bfloat16, torch.double)
def test_min_with_inf(self, device, dtype):
a = torch.tensor([[-inf, -inf, inf, 3], [inf, inf, -inf, -1]], dtype=dtype, device=device)
self.assertTrue(torch.all(torch.min(a, dim=1).values == (-inf)).item())
self.assertTrue(torch.all(torch.amin(a, dim=1) == (-inf)).item())
self.assertTrue(torch.min(a).item() == -inf)
self.assertTrue(torch.amin(a).item() == -inf)
def _test_minmax_helper(self, torchfn, reffn, device, dtype, skip_indices=False):
def create_input(shape, device, dtype):
if dtype.is_floating_point:
return torch.randn(*shape, device=device, dtype=dtype)
else:
low = 0 if dtype == torch.bool else -1000
high = 2 if dtype == torch.bool else 1000
return torch.randint(low, high, shape, device=device, dtype=dtype)
x = create_input((100, 100), device, dtype)
self.compare_with_numpy(torchfn, reffn, x)
# non contiguous
x = create_input((10, 10, 10), device, dtype)
x = x[:, 4]
self.compare_with_numpy(torchfn, reffn, x)
def get_values(x):
if isinstance(x, tuple):
return x[0]
return x
# indices
if not skip_indices:
size = 5
x = create_input((size, size), device, dtype)
inputs = (x, x.t())
dims = (0, 1)
for xinp, d in product(inputs, dims):
self.compare_with_numpy(lambda x: get_values(torchfn(x, d, False)), lambda x: reffn(x, d, keepdims=False), xinp)
result = torchfn(xinp, d, False)
if isinstance(result, tuple):
v, i = result
if d == 1:
self.assertEqual(xinp[torch.arange(size), i], v, atol=0, rtol=0)
else:
self.assertEqual(xinp[i, torch.arange(size)], v, atol=0, rtol=0)
# nan
if dtype.is_floating_point:
for index in (0, 4, 99):
x = create_input((100,), device, dtype)
x[index] = nan
if not skip_indices:
result = torchfn(x, 0)
v = get_values(result)
self.assertEqual(v, nan)
if isinstance(result, tuple):
i = result[1]
self.assertEqual(i, index)
self.assertEqual(torchfn(x), nan)
@dtypesIfCPU(torch.float, torch.double, torch.long, torch.bool, torch.half)
@dtypesIfCUDA(torch.half, torch.float, torch.long, torch.bool)
@dtypes(torch.half, torch.float, torch.double)
def test_max(self, device, dtype):
self._test_minmax_helper(torch.max, np.amax, device, dtype)
@dtypesIfCPU(torch.float, torch.double, torch.long, torch.bool, torch.half)
@dtypesIfCUDA(torch.half, torch.float, torch.long, torch.bool)
@dtypes(torch.half, torch.float, torch.double)
def test_min(self, device, dtype):
self._test_minmax_helper(torch.min, np.amin, device, dtype)
@dtypesIfCPU(torch.half, torch.float, torch.double, torch.int, torch.long, torch.bool)
@dtypesIfCUDA(torch.half, torch.float, torch.int, torch.long, torch.bool)
@dtypes(torch.half, torch.float, torch.double)
def test_amin(self, device, dtype):
self._test_minmax_helper(torch.amin, np.amin, device, dtype)
@dtypesIfCPU(torch.half, torch.float, torch.double, torch.int, torch.long, torch.bool)
@dtypesIfCUDA(torch.half, torch.float, torch.int, torch.long, torch.bool)
@dtypes(torch.float, torch.double)
def test_amax(self, device, dtype):
self._test_minmax_helper(torch.amax, np.amax, device, dtype)
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
@dtypesIfCUDA(torch.half, torch.float, torch.bfloat16)
def test_aminmax(self, device, dtype):
def _amin_wrapper(x, dim=None, keepdims=False):
with self.assertWarnsOnceRegex(UserWarning, "_aminmax is deprecated"):
if dim is None:
return torch._aminmax(x)[0]
else:
return torch._aminmax(x, dim, keepdims)[0]
def _amax_wrapper(x, dim=None, keepdims=False):
with self.assertWarnsOnceRegex(UserWarning, "_aminmax is deprecated"):
if dim is None:
return torch._aminmax(x)[1]
else:
return torch._aminmax(x, dim, keepdims)[1]
self._test_minmax_helper(_amin_wrapper, np.amin, device, dtype)
self._test_minmax_helper(_amax_wrapper, np.amax, device, dtype)
# TODO: bincount isn't a classic reduction -- maybe this test suite is
# reductions and summary ops?
def test_bincount(self, device):
# negative input throws
with self.assertRaisesRegex(RuntimeError, '1-d non-negative integral'):
torch.bincount(torch.tensor([1, -1], device=device))
# n-d input, with n > 1 throws
with self.assertRaisesRegex(RuntimeError, '1-d non-negative integral'):
torch.bincount(torch.tensor([[1, 2], [3, 4]], device=device))
# floating input type throws
with self.assertRaisesRegex(RuntimeError, 'not implemented'):
torch.bincount(torch.tensor([1., 0.3], device=device))
# minlength < 0 throws
with self.assertRaisesRegex(RuntimeError, 'minlength should be >= 0'):
torch.bincount(torch.tensor([1, 3], device=device),
torch.tensor([.2, .2], device=device),
minlength=-1)
# input and weights dim mismatch
with self.assertRaisesRegex(RuntimeError, 'same length'):
torch.bincount(torch.tensor([1, 0], device=device),
torch.tensor([1., 0.3, 0.5], device=device))
# 1-d input with no elements and default minlength
self.assertEqual(torch.bincount(torch.tensor([], device=device, dtype=torch.long)),
torch.zeros(0, dtype=torch.long, device=device))
# 1-d input with no elements and specified minlength
self.assertEqual(torch.bincount(torch.tensor([], device=device, dtype=torch.long), minlength=10),
torch.zeros(10, dtype=torch.long, device=device))
# test tensor method without weights
long_counts = torch.tensor(
[0, 3, 2, 1, 3], dtype=torch.uint8, device=device).bincount()
self.assertEqual(
torch.tensor([1, 1, 1, 2], dtype=torch.int64, device=device),
long_counts)
# test avoiding overflow for uint8 (#76979)
count_uint8 = torch.tensor([0, 1, 2, 3, 255], dtype=torch.uint8, device=device).bincount()
count_int16 = torch.tensor([0, 1, 2, 3, 255], dtype=torch.int16, device=device).bincount()
self.assertEqual(count_uint8, count_int16)
# test minlength functionality
int_counts = torch.bincount(
torch.tensor([1, 1, 1, 1], device=device), minlength=5)
self.assertEqual(
torch.tensor([0, 4, 0, 0, 0], dtype=torch.int64, device=device),
int_counts)
# test weights
byte_counts = torch.bincount(
torch.tensor([0, 1, 1, 1, 4], device=device),
torch.tensor([.1, .2, .3, .4, .5], device=device))
self.assertEqual(
torch.tensor([0.1, 0.9, 0, 0, 0.5], device=device), byte_counts)
byte_counts = torch.bincount(
torch.tensor([0, 1, 1, 1, 4], device=device),
torch.tensor([1, 2, 3, 4, 5], dtype=torch.int8, device=device))
self.assertEqual(
torch.tensor([1, 9, 0, 0, 5], device=device, dtype=torch.float64), byte_counts)
# test non-contiguous inputs and weights
inputs = torch.tensor([[0, 0], [3, 1], [2, 1], [1, 1], [3, 4]], device=device)
weights = torch.tensor([[.1, 1], [.2, 2], [.3, 3], [.4, 4], [.5, 5]], device=device)
for i in [0, 1]:
assert not inputs[:, i].is_contiguous(), "Inputs are supposed to be non-contiguous"
assert not weights[:, i].is_contiguous(), "Weights are supposed to be non-contiguous"
# inputs are non-contiguous but weights are contiguous
self.assertEqual(inputs[:, 0].bincount(), torch.tensor([1, 1, 1, 2]))
# inputs and weights are non-contiguous
self.assertEqual(
inputs[:, 1].bincount(weights[:, 1]),
torch.tensor([1, 9, 0, 0, 5], dtype=torch.float32))
# weights are non-contiguous but inputs are contiguous
self.assertEqual(inputs[:, 1].contiguous().bincount(weights[:, 1]),
torch.tensor([1, 9, 0, 0, 5], dtype=torch.float32))
# test bincount on non-contiguous slices
all0s = torch.zeros((32, 2), dtype=torch.int64, device=device)
self.assertEqual(all0s[:, 0].bincount(), torch.tensor([32]))
all1s = torch.ones((32, 2), dtype=torch.int64, device=device)
self.assertEqual(all1s[:, 0].bincount(), torch.tensor([0, 32]))
# test large number of bins - global memory use
big_exp = torch.zeros(10000000, device=device)
big_exp[-1] = 50.0
big_w = torch.tensor([.5] * 100, device=device)
big_out = torch.tensor([9999999] * 100, device=device).bincount(big_w)
self.assertEqual(big_exp, big_out)
# test large input size
big_exp = torch.zeros(2, device=device, dtype=torch.int64)
big_exp[1] = 1000000
big_out = torch.ones(1000000, dtype=torch.int8, device=device).bincount()
self.assertEqual(big_exp, big_out)
# TODO: how many var stability tests are there?
def test_var_stability2(self, device):
tensor = torch.FloatTensor([2281.5, 2281.25]).to(device)
# Stability for inner dim
self.assertEqual(tensor.var(0), 0.03125)
# General stability
self.assertEqual(tensor.var(), 0.03125)
# Stability for outer dimensions
tensor = tensor.unsqueeze(1)
self.assertEqual(tensor.var(0), 0.03125)
@onlyCPU
@dtypes(torch.bool, torch.double)
def test_sum_all(self, device, dtype) -> None:
def check_sum_all(tensor: torch.Tensor) -> None:
pylist = tensor.reshape(-1).tolist()
self.assertEqual(tensor.sum(), sum(pylist))
if dtype != torch.bool:
check_sum_all(torch.tensor([1, 2, 3, 4, 5], dtype=dtype, device=device))
check_sum_all(torch.randn(200000, dtype=dtype, device=device))
check_sum_all(torch.randn(2000, 2, dtype=dtype, device=device)[:, 0])
else:
check_sum_all(torch.tensor([True, False, True], dtype=torch.bool, device=device))
def _test_memory_format_transformations(self, device, input_generator_fn, transformation_fn,
memory_format, compare_data=True, default_is_preserve=False):
assert(memory_format == torch.channels_last or memory_format == torch.channels_last_3d)
# xc is a channels last tensor
xc = input_generator_fn(device)
# xc is not memory dense, but looks like channels last
if memory_format == torch.channels_last:
xc = xc[..., ::2, ::2]
else:
xc = xc[..., ::2, ::2, ::2]
clone = transformation_fn(xc, memory_format=torch.preserve_format)
self.assertFalse(clone.is_contiguous())
self.assertTrue(clone.is_contiguous(memory_format=memory_format))
self.assertFalse(xc.is_contiguous())
self.assertFalse(xc.is_contiguous(memory_format=memory_format))
if compare_data:
self.assertEqual(xc, clone.to(xc))
xc = input_generator_fn(device)
clone = transformation_fn(xc, memory_format=torch.contiguous_format)
self.assertTrue(clone.is_contiguous())
self.assertFalse(clone.is_contiguous(memory_format=memory_format))
if compare_data:
self.assertEqual(xc, clone.to(xc))
xc = input_generator_fn(device)
clone = transformation_fn(xc)
if default_is_preserve:
self.assertFalse(clone.is_contiguous())
self.assertTrue(clone.is_contiguous(memory_format=memory_format))
else:
self.assertTrue(clone.is_contiguous())
self.assertFalse(clone.is_contiguous(memory_format=memory_format))
if compare_data:
self.assertEqual(xc, clone.to(xc))
x = torch.randn((3, 4, 5, 6, 7, 8, 9), device=device)
for _ in range(10):
permutation = list(range(len(x.shape)))
random.shuffle(permutation)
x = x.permute(permutation)
self.assertEqual(x.stride(), transformation_fn(x, memory_format=torch.preserve_format).stride())
@onlyCPU
@dtypes(torch.double)
def test_sum_out(self, device, dtype: torch.dtype) -> None:
x = torch.rand(100, 100, dtype=dtype, device=device)
res1 = torch.sum(x, 1)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.sum(x, 1, out=res2)
self.assertEqual(res1, res2)
x = torch.rand(100, 100, 100, dtype=dtype, device=device)
res1 = x.sum(2).sum(1)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.sum(x, (2, 1), out=res2)
self.assertEqual(res1, res2)
@onlyCUDA
@dtypes(torch.float16, torch.float32)
def test_prod_gpu(self, device, dtype):
x = torch.tensor([2, 3, 6, 9, 8], dtype=dtype, device=device)
# Check all combinations: fp16 input - fp16 output, fp16 input - fp32
# output, fp32 input - fp16 output, fp32 input - fp32 output
for dtype_output in [torch.float16, torch.float32]:
result_expected = torch.tensor(2592, dtype=dtype_output, device=device)
output = torch.prod(x, dtype=dtype_output)
self.assertEqual(output, result_expected)
output = x.prod(dtype=dtype_output)
self.assertEqual(output, result_expected)
@onlyCPU
@dtypes(torch.float)
def test_prod(self, device, dtype):
x = torch.rand(100, 100, dtype=dtype, device=device)
res1 = torch.prod(x, 1)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.prod(x, 1, out=res2)
self.assertEqual(res1, res2)
def test_prod_bool(self, device):
vals = [[True, True], [True, False], [False, False], []]
for val in vals:
result = torch.prod(torch.tensor(val, device=device), dtype=torch.bool).item()
expect = np.prod(np.array(val), dtype=np.bool)
self.assertEqual(result, expect)
result = torch.prod(torch.tensor(val, device=device)).item()
expect = np.prod(np.array(val))
self.assertEqual(result, expect)
@onlyCPU
def test_max_mixed_devices(self, device):
a = torch.randn(10, device=device)
if torch.cuda.is_available():
values = torch.randn(10).cuda()
indices = torch.cuda.LongTensor()
self.assertRaises(RuntimeError,
lambda: torch.max(a, 0, out=(values, indices)))
self.assertRaises(RuntimeError,
lambda: torch.amax(a, 0, out=values))
@onlyCPU
def test_min_mixed_devices(self, device):
a = torch.randn(10, device=device)
if torch.cuda.is_available():
values = torch.randn(10).cuda()
indices = torch.cuda.LongTensor()
self.assertRaises(RuntimeError,
lambda: torch.min(a, 0, out=(values, indices)))
self.assertRaises(RuntimeError,
lambda: torch.amin(a, 0, out=values))
# TODO: consider refactoring with bincount test
def test_bucketization(self, device):
values_1d = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9], device=device)
values_3d = torch.tensor([[[1, 3, 5], [2, 4, 6]], [[1, 2, 3], [4, 5, 6]]], device=device)
# simple 1d boundary and 3d input value
boundaries = torch.tensor([1, 2, 3, 4, 5, 6], device=device)
expected_result = torch.tensor([[[0, 2, 4], [1, 3, 5]], [[0, 1, 2], [3, 4, 5]]], device=device)
output = torch.empty(2, 2, 3, device=device, dtype=torch.int64)
self.assertEqual(torch.bucketize(values_3d, boundaries), expected_result)
self.assertEqual(torch.bucketize(values_3d, boundaries, out=output), expected_result)
expected_result = torch.tensor([[[1, 3, 5], [2, 4, 6]], [[1, 2, 3], [4, 5, 6]]], device=device)
self.assertEqual(torch.bucketize(values_3d, boundaries, right=True), expected_result)
self.assertEqual(torch.bucketize(values_3d, boundaries, out=output, right=True), expected_result)
# simple float 1d boundary and 1d input with output int32 type
for dtype in [torch.float32, torch.float16]:
values_1d_float = values_1d.to(dtype)
boundaries = torch.tensor([0.9, 1, 2, 2, 3, 3, 4, 4.1, 9, 9], device=device, dtype=dtype)
expected_result = torch.tensor([1, 2, 4, 6, 8, 8, 8, 8, 8], device=device, dtype=torch.int32)
self.assertEqual(torch.searchsorted(boundaries, values_1d_float, out_int32=True), expected_result)
self.assertEqual(torch.bucketize(values_1d_float, boundaries, out_int32=True), expected_result)
# multiple dimension input with 0 elements
boundaries = torch.tensor([1, 2, 3, 4, 5, 6], device=device, dtype=torch.int64)
values_0_el = torch.tensor([[[]]], device=device, dtype=torch.int64)
expected_result = values_0_el.to(torch.int64)
self.assertEqual(torch.searchsorted(boundaries, values_0_el), expected_result)
self.assertEqual(torch.bucketize(values_0_el, boundaries), expected_result)
# nan input
values_nan = torch.tensor([1.0, float('nan'), 2.0, float('nan')], device=device, dtype=torch.float64)
boundaries = torch.tensor([0.0, 1.0, 2.0, 3.0], device=device, dtype=torch.float64)
expected_result = torch.tensor([1, 4, 2, 4], device=device)
self.assertEqual(torch.searchsorted(boundaries, values_nan), expected_result)
expected_result = torch.tensor([2, 4, 3, 4], device=device)
self.assertEqual(torch.searchsorted(boundaries, values_nan, right=True), expected_result)
self.assertEqual(torch.searchsorted(boundaries, values_nan, side='right'), expected_result)
# type promotion and non contiguous tensors
values_3d_permute = values_3d.permute(2, 1, 0).to(torch.int32)
boundaries_permute = values_3d.permute(2, 1, 0).to(torch.float64)
expected_result = torch.tensor([[[0, 0], [0, 1]], [[2, 0], [0, 1]], [[2, 0], [0, 0]]], device=device)
if self.device_type != 'xla':
self.assertWarnsRegex(
UserWarning, "tensor is non-contiguous",
lambda: self.assertEqual(torch.searchsorted(boundaries_permute, values_3d_permute), expected_result))
else:
# All tensors in XLA is contiguous even doing permute, no warning msg will be generate in XLA
self.assertEqual(torch.searchsorted(boundaries_permute, values_3d_permute), expected_result)
# scalar type
boundaries = torch.tensor([1.5, 2.5, 3.5], device=device)
expected_result = torch.tensor(1, device=device)
self.assertEqual(torch.searchsorted(boundaries, 2), expected_result)
self.assertEqual(torch.bucketize(torch.tensor(2, device=device), boundaries), expected_result)
expected_result = torch.tensor(3, device=device)
scalar_tensor_nan = torch.tensor(float('nan'), device=device)
self.assertEqual(torch.searchsorted(boundaries, scalar_tensor_nan), expected_result)
self.assertEqual(torch.bucketize(float('nan'), boundaries, right=True), expected_result)
# invalid input dimensions
boundaries = torch.tensor([[1, 2, 3], [4, 5, 6]], device=device)
with self.assertRaisesRegex(
RuntimeError, "first N-1 dimensions of boundaries tensor and input value tensor must match"):
torch.searchsorted(boundaries, values_3d)
with self.assertRaisesRegex(
RuntimeError, "boundaries tensor must be 1 dimension"):
torch.bucketize(values_3d, boundaries)
with self.assertRaisesRegex(
RuntimeError, "only when boundaries tensor dimension is 1"):
torch.searchsorted(boundaries, 1)
# incompatiable output tensor's dtype
def test_output_dtype(dtype, is_int32):
output = values_1d.to(dtype)
with self.assertRaisesRegex(
RuntimeError, "output tensor's dtype is wrong"):
torch.searchsorted(values_1d, values_1d, out=output, out_int32=is_int32)
test_output_dtype(torch.float32, False)
test_output_dtype(torch.int32, False)
test_output_dtype(torch.int64, True)
# invalid side argument
with self.assertRaisesRegex(RuntimeError, "side can only be 'left' or 'right'"):
torch.searchsorted(values_1d, values_1d, side='bad')
# invalid sorter argument, wrong size
with self.assertRaisesRegex(RuntimeError, "boundary and sorter must have the same size"):
sequence = torch.rand_like(values_1d, dtype=torch.float)
_, sorted_idx = torch.sort(sequence)
torch.searchsorted(sequence, values_1d, sorter=sorted_idx[:-1])
# invalid sorter argument, is not dtype long
with self.assertRaisesRegex(RuntimeError, "sorter must be a tensor of long dtype"):
sequence = torch.rand_like(values_1d, dtype=torch.float)
_, sorted_idx = torch.sort(sequence)
torch.searchsorted(sequence, values_1d, sorter=sorted_idx.to(torch.float32))
# scalar type bfloat16
if self.device_type == 'cpu':
def test_dtype_bfloat16(values_bf16=False, boundaries_bf16=False):
values_1d_float = values_1d.to(torch.float32)
boundaries = torch.tensor([0.9, 1, 2, 2, 3, 3, 4, 4.1, 9, 9], device=device, dtype=torch.float32)
if values_bf16:
values_1d_float = values_1d_float.to(torch.bfloat16)
if boundaries_bf16:
boundaries = boundaries.to(torch.bfloat16)
expected_result = torch.tensor([1, 2, 4, 6, 8, 8, 8, 8, 8], device=device, dtype=torch.int32)
self.assertEqual(torch.bucketize(values_1d_float, boundaries, out_int32=True), expected_result)
test_dtype_bfloat16(True, False)
test_dtype_bfloat16(False, True)
test_dtype_bfloat16(True, True)
@dtypes(*all_types_and(torch.half, torch.bfloat16))
def test_nansum(self, device, dtype):
args = product(
(True, False), # noncontiguous
(0, 1, None), # dim
)
zero = torch.zeros((), device=device, dtype=dtype)
for noncontiguous, dim in args:
# Randomly scale the values
scale = random.randint(10, 100)
x = make_tensor((17, 17), device=device, dtype=dtype,
low=-scale, high=scale, noncontiguous=noncontiguous)
if dtype.is_floating_point:
nan_mask = x < 0.2 * scale
x_nonan = torch.where(nan_mask, zero, x)
x[nan_mask] = np.nan
else:
x_nonan = x
dim_kwargs = {} if dim is None else {"dim": dim}
expect = torch.sum(x_nonan, **dim_kwargs)
actual = torch.nansum(x, **dim_kwargs)
self.assertEqual(expect, actual)
def _test_reduction_function_with_numpy(self, torch_func, np_func, device, dtype,
with_extremal=False, atol=None, rtol=None,
exact_dtype=True, with_keepdim=False):
# Test 0-d to 3-d tensors.
for ndims in range(0, 4):
shape = _rand_shape(ndims, min_size=5, max_size=10)
for n in range(ndims + 1):
for c in combinations(list(range(ndims)), n):
for count_dim in permutations(c):
# Generate Input.
x = _generate_input(shape, dtype, device, with_extremal)
if count_dim == ():
# Default `dims=None` case
self.compare_with_numpy(torch_func, np_func, x, device=None, dtype=None,
atol=atol, rtol=rtol, exact_dtype=exact_dtype)
else:
# With `dims: tuple of ints` case
if with_keepdim:
torch_func_partial = partial(torch_func, keepdim=True, dim=count_dim)
np_func_partial = partial(np_func, keepdims=True, axis=count_dim)
else:
torch_func_partial = partial(torch_func, dim=count_dim)
np_func_partial = partial(np_func, axis=count_dim)
self.compare_with_numpy(torch_func_partial, np_func_partial, x, device=None, dtype=None,
atol=atol, rtol=rtol, exact_dtype=exact_dtype)
@dtypes(*all_types_and_complex_and(torch.half))
def test_count_nonzero(self, device, dtype):
self._test_reduction_function_with_numpy(torch.count_nonzero, np.count_nonzero, device, dtype)
self._test_reduction_function_with_numpy(torch.count_nonzero, np.count_nonzero, device, dtype, True)
def _test_sum_reduction_vs_numpy(self, torch_fn, np_fn, device, dtype, with_keepdim=False, with_extremal=False):
def is_integral(dtype):
return dtype in integral_types()
# On Windows CI, the current version of `numpy` promotes all lower integers
# dtypes to int32 while `torch` promotes them to int64. Hence we skip on checking
# the exact dtype.
# Reference : https://dr.pytorch.org/api/view-log-full?build_id=122051580
# PR : https://github.com/pytorch/pytorch/pull/38628#issuecomment-655905370
exact_dtype = False if (IS_WINDOWS and is_integral(dtype)) else True
if dtype == torch.uint8:
with self.assertRaises(TypeError):
self._test_reduction_function_with_numpy(torch_fn, np_fn, device, dtype, with_extremal=with_extremal)
else:
# TODO: Investigate why the output is not close to numpy.
if dtype == torch.float16:
atol = 0.4
rtol = 1e-2
elif dtype == torch.float32:
atol = 7e-05
rtol = 3e-06
else:
# Default values
atol = None
rtol = None
self._test_reduction_function_with_numpy(torch_fn, np_fn, device, dtype,
atol=atol, rtol=rtol, exact_dtype=exact_dtype,
with_keepdim=with_keepdim, with_extremal=with_extremal)
@onlyNativeDeviceTypes
@dtypes(*all_types_and(torch.half))
def test_sum_vs_numpy(self, device, dtype):
self._test_sum_reduction_vs_numpy(torch.sum, np.sum, device, dtype)
self._test_sum_reduction_vs_numpy(torch.sum, np.sum, device, dtype, with_extremal=True)
self._test_sum_reduction_vs_numpy(torch.sum, np.sum, device, dtype, with_keepdim=True)
@onlyNativeDeviceTypes
@dtypes(*all_types_and(torch.half))
def test_nansum_vs_numpy(self, device, dtype):
self._test_sum_reduction_vs_numpy(torch.nansum, np.nansum, device, dtype)
self._test_sum_reduction_vs_numpy(torch.nansum, np.nansum, device, dtype, with_extremal=True)
self._test_sum_reduction_vs_numpy(torch.nansum, np.nansum, device, dtype, with_keepdim=True)
@dtypes(*complex_types())
def test_nansum_complex(self, device, dtype):
x = torch.randn((3, 3, 3), device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "nansum does not support complex inputs"):
torch.nansum(x)
@dtypes(*all_types_and(torch.half))
def test_nansum_out_dtype(self, device, dtype):
out_dtype = dtype
inp_dtypes = all_types_and(torch.half) if out_dtype.is_floating_point else integral_types()
for inp_dtype in inp_dtypes:
shape = _rand_shape(random.randint(2, 5), min_size=5, max_size=10)
x = _generate_input(shape, inp_dtype, device, with_extremal=False)
torch_fn = partial(torch.nansum, dtype=out_dtype)
np_out_dtype = torch_to_numpy_dtype_dict[out_dtype]
np_fn = partial(np.nansum, dtype=np_out_dtype)
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
@dtypes(*all_types_and(torch.half))
def test_argminmax_multiple(self, device, dtype):
# Case: All Ones
t = torch.ones(3, 3, device=device, dtype=dtype)
self.compare_with_numpy(torch.argmax, np.argmax, t)
self.compare_with_numpy(torch.argmin, np.argmin, t)
# Case: With single `nan` present.
if dtype in floating_types_and(torch.half, torch.bfloat16):
t[2, 2] = float('nan')
self.compare_with_numpy(torch.argmax, np.argmax, t)
self.compare_with_numpy(torch.argmin, np.argmin, t)
# Case: Randomly Generated Tensors
for ndims in range(1, 5):
shape = _rand_shape(ndims, min_size=5, max_size=10)
for with_extremal in [False, True]:
for contiguous in [False, True]:
# Generate Input.
x = _generate_input(shape, dtype, device, with_extremal)
if dtype == torch.half:
max_val = torch.max(x.to(torch.float))
min_val = torch.min(x.to(torch.float))
else:
max_val = torch.max(x)
min_val = torch.min(x)
mask = torch.randn(x.shape) > 0.5
x[mask] = torch.tensor(max_val + 1, dtype=dtype)
mask = torch.randn(x.shape) > 0.5
x[mask] = torch.tensor(min_val - 1, dtype=dtype)
if not contiguous:
x = x.T
self.compare_with_numpy(torch.argmax, np.argmax, x, device=None, dtype=None)
self.compare_with_numpy(torch.argmin, np.argmin, x, device=None, dtype=None)
# Verify indices returned by max and min.
if dtype != torch.half:
rand_dim = random.randint(0, ndims - 1)
self.compare_with_numpy(lambda x: torch.max(x, dim=rand_dim)[1],
lambda x: np.argmax(x, axis=rand_dim), x, device=None, dtype=None)
self.compare_with_numpy(lambda x: torch.min(x, dim=rand_dim)[1],
lambda x: np.argmin(x, axis=rand_dim), x, device=None, dtype=None)
def verify_against_numpy(t):
# Argmax
torch_fn = partial(torch.argmax, dim=1)
np_fn = partial(np.argmax, axis=1)
self.compare_with_numpy(torch_fn, np_fn, t)
# Non-contiguous input
self.compare_with_numpy(torch_fn, np_fn, t.T)
# Verify indices returned by max.
if dtype != torch.half:
self.compare_with_numpy(lambda x: torch.max(x, dim=1)[1], np_fn, x, device=None, dtype=None)
self.compare_with_numpy(lambda x: torch.max(x, dim=1)[1], np_fn, x.T, device=None, dtype=None)
# Argmin
torch_fn = partial(torch.argmin, dim=1)
np_fn = partial(np.argmin, axis=1)
self.compare_with_numpy(torch_fn, np_fn, t)
# Non-contiguous input
self.compare_with_numpy(torch_fn, np_fn, t.T)
# Verify indices returned by min.
if dtype != torch.half:
self.compare_with_numpy(lambda x: torch.min(x, dim=1)[1], np_fn, x, device=None, dtype=None)
self.compare_with_numpy(lambda x: torch.min(x, dim=1)[1], np_fn, x.T, device=None, dtype=None)
# Case: Sample from issue: https://github.com/pytorch/pytorch/issues/41998
t = torch.tensor([[1, 5],
[2, 10],
[3, 3]], device=device, dtype=dtype)
verify_against_numpy(t)
# Case: Sample from issue: https://github.com/pytorch/pytorch/issues/41998
t = torch.tensor([[1, 5],
[2, 10],
[0, 0]], device=device, dtype=dtype)
verify_against_numpy(t)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool))
def test_all_any_vs_numpy(self, device, dtype):
# Note [all, any uint8 compatibility]: However for compatibility reason,
# for `uint8`, they return Tensor of same dtype `uint8`.
# Reference: https://github.com/pytorch/pytorch/pull/47878#issuecomment-747108561
exact_dtype = True if dtype != torch.uint8 else False
def _test_all_any(x):
self.compare_with_numpy(torch.all, np.all, x)
self.compare_with_numpy(torch.any, np.any, x)
def _test_all_any_with_dim(x, dim):
torch_fn = partial(torch.all, dim=dim)
np_fn = partial(np.all, axis=dim)
self.compare_with_numpy(torch_fn, np_fn, x, exact_dtype=exact_dtype)
torch_fn = partial(torch.any, dim=dim)
np_fn = partial(np.any, axis=dim)
self.compare_with_numpy(torch_fn, np_fn, x, exact_dtype=exact_dtype)
def _test_out_variant(x, dim):
out = torch.empty_like(x)
if dtype == torch.bool or dtype == torch.uint8:
expected = torch.all(x, dim)
torch.all(x, dim, out=out)
self.assertEqual(expected, out)
expected = torch.any(x, dim)
torch.any(x, dim, out=out)
self.assertEqual(expected, out)
else:
with self.assertRaisesRegex(RuntimeError, "all only supports bool tensor for result, got"):
torch.all(x, dim, out=out)
with self.assertRaisesRegex(RuntimeError, "any only supports bool tensor for result, got"):
torch.any(x, dim, out=out)
def _test_all_any_with_dim_keepdim(x, dim, keepdim):
torch_fn = partial(torch.all, dim=dim, keepdim=keepdim)
np_fn = partial(np.all, axis=dim, keepdims=keepdim)
self.compare_with_numpy(torch_fn, np_fn, x, exact_dtype=exact_dtype)
torch_fn = partial(torch.any, dim=dim, keepdim=keepdim)
np_fn = partial(np.any, axis=dim, keepdims=keepdim)
self.compare_with_numpy(torch_fn, np_fn, x, exact_dtype=exact_dtype)
def _test_output_dtype(x):
# This test will fail once the functions return bool output
# for uint8 input.
expected_dtype = torch.uint8 if dtype == torch.uint8 else torch.bool
self.assertEqual(torch.all(x).dtype, expected_dtype)
self.assertEqual(torch.any(x).dtype, expected_dtype)
self.assertEqual(torch.all(x, dim=0).dtype, expected_dtype)
self.assertEqual(torch.any(x, dim=0).dtype, expected_dtype)
for ndim in range(5):
shape = _rand_shape(ndim, 1, 5)
x = _generate_input(shape, dtype, device, with_extremal=False)
_test_all_any(x)
_test_all_any(x.T)
_test_all_any(x[..., ::2])
x = _generate_input(shape, dtype, device, with_extremal=True)
_test_all_any(x)
_test_all_any(x.T)
_test_all_any(x[..., ::2])
x = torch.zeros_like(x)
_test_all_any(x)
_test_all_any(x.T)
_test_all_any(x[..., ::2])
x = torch.ones_like(x)
_test_all_any(x)
_test_all_any(x.T)
_test_all_any(x[..., ::2])
_test_output_dtype(x)
for dim in range(ndim):
x = _generate_input(shape, dtype, device, with_extremal=False)
_test_all_any_with_dim(x, dim)
_test_all_any_with_dim(x.T, dim)
_test_all_any_with_dim(x[..., ::2], dim)
_test_out_variant(x, dim)
_test_all_any_with_dim_keepdim(x, dim, keepdim=True)
_test_all_any_with_dim_keepdim(x, dim, keepdim=False)
x = _generate_input(shape, dtype, device, with_extremal=True)
_test_all_any_with_dim(x, dim)
_test_all_any_with_dim(x.T, dim)
_test_all_any_with_dim(x[..., ::2], dim)
_test_out_variant(x, dim)
_test_all_any_with_dim_keepdim(x, dim, keepdim=True)
_test_all_any_with_dim_keepdim(x, dim, keepdim=False)
x = torch.zeros_like(x)
_test_all_any_with_dim(x, dim)
_test_all_any_with_dim(x.T, dim)
_test_all_any_with_dim(x[..., ::2], dim)
_test_out_variant(x, dim)
_test_all_any_with_dim_keepdim(x, dim, keepdim=True)
_test_all_any_with_dim_keepdim(x, dim, keepdim=False)
x = torch.ones_like(x)
_test_all_any_with_dim(x, dim)
_test_all_any_with_dim(x.T, dim)
_test_all_any_with_dim(x[..., ::2], dim)
_test_out_variant(x, dim)
_test_all_any_with_dim_keepdim(x, dim, keepdim=True)
_test_all_any_with_dim_keepdim(x, dim, keepdim=False)
# TODO: part of this test covers torch.norm, with should be covered by test_linalg
@onlyNativeDeviceTypes
def test_repeated_dim(self, device):
ops = [torch.mean, torch.sum, torch.nansum, torch.std, torch.logsumexp, torch.std, torch.var,
torch.norm]
x = torch.randn(3, 3, 3, 3, device=device)
error_msg = r'appears multiple times in the list of dims'
norm_error_msg = r'Expected dims to be different, got'
for op in ops:
for dim in [(0, 0), (0, -4)]:
e_msg = norm_error_msg if op == torch.norm else error_msg
with self.assertRaisesRegex(RuntimeError, e_msg):
op(x, dim=dim)
# TODO: update this test to comapre against NumPy
@onlyCUDA
def test_var(self, device):
cpu_tensor = torch.randn(2, 3, 3)
device_tensor = cpu_tensor.to(device)
self.assertEqual(device_tensor.var(), cpu_tensor.var())
self.assertEqual(device_tensor.var(1), cpu_tensor.var(1))
self.assertEqual(device_tensor.var(2), cpu_tensor.var(2))
self.assertEqual(device_tensor.std(), cpu_tensor.std())
self.assertEqual(device_tensor.std(1), cpu_tensor.std(1))
self.assertEqual(device_tensor.var(2), cpu_tensor.var(2))
cpu_tensor = torch.randn(100)
device_tensor = cpu_tensor.to(device)
self.assertEqual(device_tensor.var(), cpu_tensor.var())
# TODO: update this test to compare against NumPy
@onlyCUDA
def test_var_large_input(self, device):
# Large, not-nice input
cpu_tensor = torch.randn(2 * 32 * 1024 + 1, 2, 67)
device_tensor = cpu_tensor.to(device)
self.assertEqual(cpu_tensor.var(2), device_tensor.var(2))
# TODO: update this to compare against NumPy instead of CPU
@onlyCUDA
@dtypes(torch.double)
def test_sum_noncontig(self, device, dtype):
x = torch.randn(1, 75, 57, 20, dtype=dtype, device=device).permute(0, 3, 1, 2)
y = x.cpu()
self.assertEqual(x.sum().cpu(), y.sum())
self.assertEqual(x.sum(dim=(-1, -2)).cpu(), y.sum(dim=(-1, -2)))
self.assertEqual(x.sum(dim=(1, 3)).cpu(), y.sum(dim=(1, 3)))
# TODO: update this to compare against NumPy instead of CPU
@onlyCUDA
def test_min_max_nan(self, device):
tests = [(lambda x: x.min(), 'min'),
(lambda x: x.max(), 'max'),
(lambda x: x.amin(), 'amin'),
(lambda x: x.amax(), 'amax'),
(lambda x: x.min(0).values, 'min_dim'),
(lambda x: x.max(0).values, 'max_dim'),
(lambda x: x.amin(0), 'amin_dim'),
(lambda x: x.amax(0), 'amax_dim')]
for f, name in tests:
a = torch.arange(25.0).view(5, 5)
a[2, 2] = nan
actual = f(a.to(device)).cpu()
expected = f(a).cpu()
self.assertEqual(torch.isnan(actual), torch.isnan(expected), msg='nans for {}'.format(name))
self.assertEqual(actual[~torch.isnan(actual)],
expected[~torch.isnan(expected)], msg='nans for {}'.format(name))
# TODO: make this test generic using OpInfos
@onlyCUDA
def test_sum_cpu_device_mismatch(self, device):
x = torch.randn(20, dtype=torch.float32, device=device)
y = torch.randn(1, dtype=torch.float32)
err_string = f"Expected out tensor to have device {device}, but got cpu instead"
with self.assertRaisesRegex(RuntimeError, err_string):
torch.sum(x, dim=[0], dtype=torch.float32, out=y)
# tests half to float promotion
if self.device_type == 'cuda':
x = x.half()
with self.assertRaisesRegex(RuntimeError, err_string):
torch.sum(x, dim=[0], dtype=torch.float32, out=y)
# Assert for illegal dtype would not be raised on XLA
@onlyNativeDeviceTypes
def test_minmax_illegal_dtype(self, device):
x = torch.randn(5, 5, dtype=torch.float32, device=device)
valid_values = torch.empty(5, dtype=torch.float32, device=device)
valid_indices = torch.empty(5, dtype=torch.long, device=device)
illegal_values = torch.empty(5, dtype=torch.int, device=device)
illegal_indices = torch.empty(5, dtype=torch.double, device=device)
torch.max(x, dim=0, out=(valid_values, valid_indices))
torch.min(x, dim=0, out=(valid_values, valid_indices))
torch.amax(x, dim=0, out=valid_values)
torch.amin(x, dim=0, out=valid_values)
rmsg = r'scalar type|dtype'
with self.assertRaisesRegex(RuntimeError, rmsg):
torch.max(x, dim=0, out=(illegal_values, valid_indices))
with self.assertRaisesRegex(RuntimeError, rmsg):
torch.min(x, dim=0, out=(illegal_values, valid_indices))
with self.assertRaisesRegex(RuntimeError, rmsg):
torch.max(x, dim=0, out=(valid_values, illegal_indices))
with self.assertRaisesRegex(RuntimeError, rmsg):
torch.min(x, dim=0, out=(valid_values, illegal_indices))
with self.assertRaisesRegex(RuntimeError, rmsg):
torch.max(x, dim=0, out=(illegal_values, illegal_indices))
with self.assertRaisesRegex(RuntimeError, rmsg):
torch.min(x, dim=0, out=(illegal_values, illegal_indices))
@dtypes(*all_types_and(torch.half, torch.bfloat16))
def test_dim_arg_reduction_scalar(self, device, dtype):
example = 4.0
x = torch.tensor(example, device=device, dtype=dtype)
self.assertEqual(x.argmax().item(), 0)
self.assertEqual(x.argmax(dim=None).item(), 0)
self.assertEqual(x.argmax(dim=0).item(), 0)
self.assertEqual(x.argmax(dim=0, keepdim=True), torch.tensor(0, dtype=torch.int64))
x = torch.tensor(example, device=device, dtype=dtype)
self.assertEqual(x.argmin().item(), 0)
self.assertEqual(x.argmin(dim=None).item(), 0)
self.assertEqual(x.argmin(dim=0).item(), 0)
self.assertEqual(x.argmin(dim=0, keepdim=True), torch.tensor(0, dtype=torch.int64))
@precisionOverride({torch.float16: 1e-2, torch.bfloat16: 1e-2})
@dtypes(*set(all_types_and(torch.half, torch.bfloat16)) - {torch.uint8})
def test_dim_reduction(self, device, dtype):
example = [[-1, 2, 1], [5, 3, 6]]
sum_dtype = {
torch.bfloat16: torch.bfloat16,
torch.double: torch.double,
torch.float: torch.float,
torch.half: torch.half,
torch.int64: torch.int64,
torch.int32: torch.int64,
torch.int16: torch.int64,
torch.int8: torch.int64
}
# This won't test for 256bit instructions, since we usually
# only work on 1 cacheline (512bit) at a time and these
# examples aren't big enough to trigger that.
x = torch.tensor(example, device=device, dtype=dtype)
self.assertEqual(x.sum().item(), 16)
self.assertEqual(x.sum(0), torch.tensor([4, 5, 7], dtype=sum_dtype[dtype]))
self.assertEqual(x.sum(1), torch.tensor([2, 14], dtype=sum_dtype[dtype]))
y = torch.tensor(example, device=device, dtype=sum_dtype[dtype])
torch.sum(x, 0, out=y)
self.assertEqual(x.sum(0), y)
# Mean not supported for Int types
if dtype in [torch.float16, torch.bfloat16, torch.float32, torch.float64]:
x = torch.tensor(example, device=device, dtype=dtype)
self.assertEqual(x.mean().item(), 16.0 / 6)
self.assertEqual(x.mean(0), torch.tensor([2.0, 2.5, 7.0 / 2], dtype=dtype))
self.assertEqual(x.mean(1), torch.tensor([2.0 / 3, 14.0 / 3], dtype=dtype))
self.assertEqual(x.mean(), x.mean((0, 1)))
prod_dtype = {
torch.bfloat16: torch.bfloat16,
torch.double: torch.double,
torch.float: torch.float,
torch.float16: torch.float16,
torch.int64: torch.int64,
torch.int32: torch.int64,
torch.int16: torch.int64,
torch.int8: torch.int64,
}
# prod is not supported for float16 & bfloat16 on CPU
if not (self.device_type == 'cpu' and dtype in [torch.float16, torch.bfloat16]):
x = torch.tensor(example, device=device, dtype=dtype)
self.assertEqual(x.prod().item(), -180)
self.assertEqual(x.prod(0), torch.tensor([-5, 6, 6], dtype=prod_dtype[dtype]))
self.assertEqual(x.prod(1), torch.tensor([-2, 90], dtype=prod_dtype[dtype]))
x = torch.tensor(example, device=device, dtype=dtype)
self.assertEqual(x.min().item(), -1)
self.assertEqual(x.argmin().item(), 0)
# TODO: torch.min does not support the same operation as argmin
# for the same case, should we enable it?
self.assertEqual(x.argmin(dim=None).item(), 0)
self.assertEqual(x.min(0), (torch.tensor([-1, 2, 1], dtype=dtype),
torch.tensor([0, 0, 0], dtype=torch.int64)))
self.assertEqual(x.amin(0), torch.tensor([-1, 2, 1], dtype=dtype))
self.assertEqual(x.argmin(0), torch.tensor([0, 0, 0], dtype=torch.int64))
self.assertEqual(x.min(dim=0, keepdim=True), (torch.tensor([[-1, 2, 1]], dtype=dtype),
torch.tensor([[0, 0, 0]], dtype=torch.int64)))
self.assertEqual(x.amin(dim=0, keepdim=True), torch.tensor([[-1, 2, 1]], dtype=dtype))
self.assertEqual(x.argmin(dim=0, keepdim=True), torch.tensor([[0, 0, 0]], dtype=torch.int64))
self.assertEqual(x.min(1), (torch.tensor([-1, 3], dtype=dtype),
torch.tensor([0, 1], dtype=torch.int64)))
self.assertEqual(x.amin(1), torch.tensor([-1, 3], dtype=dtype))
self.assertEqual(x.argmin(1), torch.tensor([0, 1], dtype=torch.int64))
self.assertEqual(x.min(dim=1, keepdim=True), (torch.tensor([[-1], [3]], dtype=dtype),
torch.tensor([[0], [1]], dtype=torch.int64)))
self.assertEqual(x.amin(dim=1, keepdim=True), torch.tensor([[-1], [3]], dtype=dtype))
self.assertEqual(x.argmin(dim=1, keepdim=True), torch.tensor([[0], [1]], dtype=torch.int64))
# test that non-contiguous tensors work
self.assertEqual(x[:, :2].min().item(), -1)
self.assertEqual(x[:, :2].amin().item(), -1)
self.assertEqual(x[:, :2].argmin().item(), 0)
x = torch.tensor(example, device=device, dtype=dtype)
self.assertEqual(x.max().item(), 6)
self.assertEqual(x.amax().item(), 6)
self.assertEqual(x.argmax().item(), 5)
self.assertEqual(x.max(0), (torch.tensor([5, 3, 6], dtype=dtype),
torch.tensor([1, 1, 1], dtype=torch.int64)))
self.assertEqual(x.amax(0), torch.tensor([5, 3, 6], dtype=dtype))
self.assertEqual(x.argmax(dim=0), torch.tensor([1, 1, 1], dtype=torch.int64))
self.assertEqual(x.max(dim=0, keepdim=True), (torch.tensor([[5, 3, 6]], dtype=dtype),
torch.tensor([[1, 1, 1]], dtype=torch.int64)))
self.assertEqual(x.amax(dim=0, keepdim=True), torch.tensor([[5, 3, 6]], dtype=dtype))
self.assertEqual(x.argmax(dim=0, keepdim=True), torch.tensor([[1, 1, 1]], dtype=torch.int64))
self.assertEqual(x.max(1), (torch.tensor([2, 6], dtype=dtype),
torch.tensor([1, 2], dtype=torch.int64)))
self.assertEqual(x.amax(1), torch.tensor([2, 6], dtype=dtype))
self.assertEqual(x.argmax(dim=1), torch.tensor([1, 2], dtype=torch.int64))
self.assertEqual(x.max(1, keepdim=True), (torch.tensor([[2], [6]], dtype=dtype),
torch.tensor([[1], [2]], dtype=torch.int64)))
self.assertEqual(x.amax(1, keepdim=True), torch.tensor([[2], [6]], dtype=dtype))
self.assertEqual(x.argmax(dim=1, keepdim=True), torch.tensor([[1], [2]], dtype=torch.int64))
# test that non-contiguous tensors work
self.assertEqual(x[:, :2].max().item(), 5)
self.assertEqual(x[:, :2].amax().item(), 5)
self.assertEqual(x[:, :2].argmax().item(), 2)
dim_red_fns = [
"mean", "median", "nanmedian", "mode", "norm", "prod",
"std", "sum", "var", "max", "min", "amax", "amin"]
def normfn_attr(t, dim, keepdim=False, out=None):
attr = torch.norm
return attr(t, 2, dim, keepdim, out=out)
for fn_name in dim_red_fns:
fn_attr = getattr(torch, fn_name) if fn_name != "norm" else normfn_attr
def fn(x, dim, keepdim=False, out=None):
ans = fn_attr(x, dim, keepdim=keepdim, out=out)
return ans if not isinstance(ans, tuple) else ans[0]
def fn_tuple(x, dim, keepdim=False, out=None):
return fn_attr(x, dim, keepdim=keepdim, out=out)
def test_multidim(x, dim):
self.assertEqual(fn(x, dim).unsqueeze(dim), fn(x, dim, keepdim=True))
self.assertEqual(x.ndimension() - 1, fn(x, dim).ndimension())
self.assertEqual(x.ndimension(), fn(x, dim, keepdim=True).ndimension())
# general case
x = torch.randn(3, 4, 5, device=device)
dim = random.randint(0, 2)
test_multidim(x, dim)
# check 1-d behavior
x = torch.randn(1, device=device)
dim = 0
self.assertEqual(fn(x, dim).shape, ())
self.assertEqual(fn(x, dim, keepdim=True).shape, (1,))
# check reducing of a singleton dimension
dims = [3, 4, 5]
singleton_dim = random.randint(0, 2)
dims[singleton_dim] = 1
x = torch.randn(dims, device=device)
test_multidim(x, singleton_dim)
# check reducing with output kwargs
if fn_name in ['median', 'nanmedian', 'mode', 'max', 'min']:
y = torch.randn(5, 3, device=device)
values = torch.randn(5, 3, device=device)
indices = torch.zeros(5, 3, device=device).long() - 1
fn_tuple(y, 1, keepdim=False, out=(values[:, 1], indices[:, 1]))
values_expected, indices_expected = fn_tuple(y, 1, keepdim=False)
self.assertEqual(values[:, 1], values_expected,
msg='{} values with out= kwarg'.format(fn_name))
self.assertEqual(indices[:, 1], indices_expected,
msg='{} indices with out= kwarg'.format(fn_name))
continue
x = torch.randn(5, 3, device=device)
y = torch.randn(5, 3, device=device)
fn(y, 1, keepdim=False, out=x[:, 1])
expected = fn(y, 1, keepdim=False)
self.assertEqual(x[:, 1], expected, msg='{} with out= kwarg'.format(fn_name))
@onlyCUDA
@largeTensorTest('10GB')
def test_reduction_split(self, device):
# Test reduction when there is a 32bit-indexing split
# https://github.com/pytorch/pytorch/issues/37583
input_ = torch.randn(5, 14400, 14400, device=device)
result = input_.sum(dim=0)
expect = input_[0] + input_[1] + input_[2] + input_[3] + input_[4]
self.assertEqual(result, expect)
@onlyCUDA
@dtypes(torch.half, torch.float, torch.double, torch.bfloat16)
def test_reduction_vectorize_along_input_corner(self, device, dtype):
# 1D case: sum
size = 1024 * 1024 * 64 + 3
shift = 1
x = torch.zeros(size, dtype=dtype, device=device)
y = x[shift:]
for i in range(100):
x.zero_()
x[i] = 1
self.assertEqual(x.sum(), 1.0)
if i < shift:
self.assertEqual(y.sum(), 0.0)
else:
self.assertEqual(y.sum(), 1.0)
for i in range(1, 100):
x.zero_()
x[-i] = 1
self.assertEqual(x.sum(), 1.0)
self.assertEqual(y.sum(), 1.0)
# 1D case: argmax
size = 1024 * 1024 * 64 + 3
shift = 1
ysize = size - shift
x = torch.zeros(size, dtype=dtype, device=device)
y = x[shift:]
for i in range(100):
x.zero_()
x[i] = 1
self.assertEqual(x.argmax().item(), i)
if i >= shift:
self.assertEqual(y.argmax().item(), i - shift)
for i in range(1, 100):
x.zero_()
x[-i] = 1
self.assertEqual(x.argmax().item(), size - i)
self.assertEqual(y.argmax().item(), ysize - i)
# 2D case: sum
size = (7, 1024 * 1024 + 3)
x = torch.zeros(size, dtype=dtype, device=device)
for i in range(100):
x.zero_()
for j in range(7):
x[j][i] = j
xs = x.sum(dim=-1)
for j in range(7):
self.assertEqual(xs[j].item(), float(j))
for i in range(100):
x.zero_()
for j in range(7):
x[j][-i] = j
xs = x.sum(dim=-1)
for j in range(7):
self.assertEqual(xs[j].item(), float(j))
# 2D case: max/argmax
size = (7, 1024 * 1024 + 3)
x = torch.zeros(size, dtype=dtype, device=device)
for i in range(100):
x.zero_()
for j in range(7):
x[j][i] = j + 1
xs1 = x.argmax(dim=-1)
xs2 = x.max(dim=-1).indices
for j in range(7):
self.assertEqual(xs1[j].item(), i)
self.assertEqual(xs2[j].item(), i)
for i in range(1, 100):
x.zero_()
for j in range(7):
x[j][-i] = j + 1
xs1 = x.argmax(dim=-1)
xs2 = x.max(dim=-1).indices
for j in range(7):
self.assertEqual(xs1[j].item(), size[1] - i)
self.assertEqual(xs2[j].item(), size[1] - i)
# 2D case: min/argmin
size = (7, 1024 * 1024 + 3)
x = torch.zeros(size, dtype=dtype, device=device)
for i in range(100):
x.zero_()
for j in range(7):
x[j][i] = -(j + 1)
xs1 = x.argmin(dim=-1)
xs2 = x.min(dim=-1).indices
for j in range(7):
self.assertEqual(xs1[j].item(), i)
self.assertEqual(xs2[j].item(), i)
for i in range(1, 100):
x.zero_()
for j in range(7):
x[j][-i] = -(j + 1)
xs1 = x.argmin(dim=-1)
xs2 = x.min(dim=-1).indices
for j in range(7):
self.assertEqual(xs1[j].item(), size[1] - i)
self.assertEqual(xs2[j].item(), size[1] - i)
@onlyCUDA
@dtypes(torch.half, torch.float, torch.double, torch.bfloat16)
def test_reduction_vectorize_along_output(self, device, dtype):
def run_test(input_):
M, N = input_.shape
input_.zero_()
for i in range(min(M, N)):
input_[i][i] = 1
output1 = input_.argmax(dim=0)
output2 = input_.sum(dim=0)
for i in range(min(M, N)):
self.assertEqual(output1[i], i)
self.assertEqual(output2[i], 1)
# vec 4
run_test(torch.zeros(64, 64, dtype=dtype, device=device))
# vec 2
run_test(torch.zeros(64 * 64 + 2, dtype=dtype, device=device)[2:].view(64, 64))
run_test(torch.zeros(64, 62, dtype=dtype, device=device))
run_test(torch.zeros(64, 2, dtype=dtype, device=device))
# vec 1
run_test(torch.zeros(64 * 64 + 1, dtype=dtype, device=device)[1:].view(64, 64))
run_test(torch.zeros(64, 61, dtype=dtype, device=device))
run_test(torch.zeros(64, 1, dtype=dtype, device=device))
@onlyCUDA
def test_argminmax_large_axis(self, device):
# Regression test for gh-32863
x = torch.zeros(2**31, device=device, dtype=torch.int8)
x[-1] = 1
self.assertEqual(x.argmax(0), x.shape[0] - 1)
self.assertEqual(x.max(0).indices, x.shape[0] - 1)
x[-1] = -1
self.assertEqual(x.argmin(0), x.shape[0] - 1)
self.assertEqual(x.min(0).indices, x.shape[0] - 1)
def test_argminmax_axis_with_dim_one(self, device):
# See: https://github.com/pytorch/pytorch/issues/38922
n = 32768
x = torch.zeros(1, n)
self.assertEqual(x.argmax(dim=0), torch.zeros(n, dtype=torch.int64))
self.assertEqual(x.argmin(dim=0), torch.zeros(n, dtype=torch.int64))
self.assertEqual(x.argmax(dim=-2), torch.zeros(n, dtype=torch.int64))
self.assertEqual(x.argmin(dim=-2), torch.zeros(n, dtype=torch.int64))
self.assertEqual(x.argmax(dim=0, keepdim=True), torch.zeros(1, n, dtype=torch.int64))
self.assertEqual(x.argmin(dim=0, keepdim=True), torch.zeros(1, n, dtype=torch.int64))
self.assertEqual(x.argmax(dim=-2, keepdim=True), torch.zeros(1, n, dtype=torch.int64))
self.assertEqual(x.argmin(dim=-2, keepdim=True), torch.zeros(1, n, dtype=torch.int64))
@dtypes(torch.int, torch.long, torch.float, torch.double)
@dtypesIfCUDA(torch.int, torch.long, torch.half, torch.float, torch.double)
def test_median_real_values(self, device, dtype):
# Generate random 0-3D sizes
sizes = [random.sample(range(1, 32), i) for i in range(4) for _ in range(2)]
for size in sizes:
# Create random input tensor
t = torch.randn(size, device=device).type(dtype)
t_numpy = t.cpu().numpy()
res = t.median()
self.assertEqual(res, t.nanmedian())
k = int((t.numel() - 1) / 2)
self.assertEqual(res, t.view(-1).sort()[0][k])
if t.numel() % 2 == 1:
# We can only test agains numpy for odd reductions because numpy
# returns the mean of the two medians and torch returns the lower
self.assertEqual(res.cpu().numpy(), np.median(t_numpy))
for dim in range(t.ndim):
res = t.median(dim, True)
self.assertEqual(res, t.nanmedian(dim, True))
size = t.size(dim) if t.ndim > 0 else 1
k = int((size - 1) / 2)
self.assertEqual(res[0], (t.sort(dim)[0]).select(dim, k).unsqueeze_(dim))
self.assertEqual(res[0], t.gather(dim, res[1]))
if size % 2 == 1:
# We can only test agains numpy for odd reductions because numpy
# returns the mean of the two medians and torch returns the lower
self.assertEqual(res[0].cpu().numpy(), np.median(t_numpy, dim, keepdims=True), exact_dtype=False)
@dtypes(torch.float, torch.double)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
def test_median_nan_values(self, device, dtype):
# Generate random 0-3D sizes
sizes = [random.sample(range(1, 32), i) for i in range(4) for _ in range(2)]
for size in sizes:
# Create random input tensor with nan values
t = torch.rand(size, device=device, dtype=dtype)
t.masked_fill_(t < 0.1, float('nan'))
t_numpy = t.cpu().numpy()
for op in [torch.median, torch.nanmedian]:
numpy_op = np.median if op == torch.median else np.nanmedian
res = op(t)
num_nan = t.isnan().sum()
if op == torch.median and num_nan > 0:
k = t.numel() - 1
else:
k = int((t.numel() - num_nan - 1) / 2)
self.assertEqual(res, t.view(-1).sort()[0][k])
if (t.numel() - num_nan) % 2 == 1:
# We can only test agains numpy for odd reductions because numpy
# returns the mean of the two medians and torch returns the lower
self.assertEqual(res.item(), numpy_op(t.cpu().numpy()))
for dim in range(t.ndim):
res = op(t, dim, True)
size = t.size(dim) if t.ndim > 0 else 1
num_nan = t.isnan().sum(dim, True)
if op == torch.median:
k = torch.where(num_nan > 0, size - 1, int((size - 1) / 2))
else:
k = ((size - num_nan - 1) / 2).type(torch.long)
self.assertEqual(res[0], (t.sort(dim)[0]).gather(dim, k))
self.assertEqual(res[0], t.gather(dim, res[1]))
# We can only test agains numpy for odd reductions because numpy
# returns the mean of the two medians and torch returns the lower
mask = (size - num_nan) % 2 == 1
res = res[0].masked_select(mask).cpu()
ref = numpy_op(t_numpy, dim, keepdims=True)[mask.cpu().numpy()]
self.assertEqual(res, torch.from_numpy(ref))
def test_median_corner_cases(self, device):
def check(op, a, args, key):
t = torch.tensor(a, device=device)
res = op(t, *args)
if not args:
key = torch.tensor(key, device=device)
else:
if len(key) == 1:
key = torch.tensor(key[0], device=device)
res = res[0]
else:
key = (torch.tensor(key[0], device=device), torch.tensor(key[1], device=device))
self.assertEqual(res, key)
nan = float('nan')
check(torch.median, nan, [], nan)
check(torch.median, [], [], nan)
check(torch.nanmedian, nan, [], nan)
check(torch.median, nan, [0], [nan, 0])
check(torch.nanmedian, nan, [0], [nan, 0])
check(torch.median, [nan], [0, True], [[nan], [0]])
check(torch.nanmedian, [nan], [0, True], [[nan], [0]])
check(torch.median, [nan], [0, True], [[nan], [0]])
check(torch.nanmedian, [nan], [0, True], [[nan], [0]])
# Indices are not deterministic here so can only check values
check(torch.median, [[nan, nan], [1, 2]], [0], [[nan, nan]])
check(torch.nanmedian, [[nan, nan], [1, 2]], [0], [[1, 2.]])
check(torch.median, [[nan, nan], [1, 2]], [1], [[nan, 1]])
check(torch.nanmedian, [[nan, nan], [1, 2]], [1], [[nan, 1.]])
# Discontiguous and strided tensors
a = torch.arange(12, device=device)
self.assertEqual(a[::2].median(), torch.tensor(4, device=device))
self.assertEqual(a[::2].nanmedian(), torch.tensor(4, device=device))
a.resize_(3, 4)
self.assertEqual(a.T.median(), torch.tensor(5, device=device))
self.assertEqual(a.T.nanmedian(), torch.tensor(5, device=device))
self.assertEqual(a[::2, ::2].median(-1)[0], torch.tensor([0, 8], device=device))
self.assertEqual(a[::2, ::2].nanmedian(-1)[0], torch.tensor([0, 8], device=device))
a.resize_(2, 3, 2)
self.assertEqual(a.T.median(), torch.tensor(5, device=device))
self.assertEqual(a.T.nanmedian(), torch.tensor(5, device=device))
self.assertEqual(a[:, ::2, :].median(-1)[0], torch.tensor([[0, 4], [6, 10]], device=device))
self.assertEqual(a[:, ::2, :].nanmedian(-1)[0], torch.tensor([[0, 4], [6, 10]], device=device))
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
def test_quantile(self, device, dtype):
# Generate some random test cases
ops = ['quantile', 'nanquantile']
inputs = [tuple(np.random.randint(2, 10, size=i)) for i in range(1, 4)]
quantiles = [tuple(np.random.rand(i)) for i in range(0, 5)]
keepdims = [True, False]
# Add corner cases
inputs.extend([0.75, (1,), (1, 1), (1, 2, 1)])
inputs.extend([[float('nan')], [[float('nan'), float('nan')], [1, 2]]])
inputs.extend([[[float('nan'), float('nan')], [float('nan'), 2]]])
quantiles.extend([0.5, [0., 1.], np.random.rand(10)])
# Enumerate all input combinations
for op, x, q, keepdim in product(ops, inputs, quantiles, keepdims):
if type(x) is tuple:
a = torch.randn(x, dtype=dtype, device=device)
# Make some random elements NaN
a.masked_fill_(torch.randint_like(a, 20) == 0, float('nan'))
else:
a = torch.tensor(x, dtype=dtype, device=device)
q = torch.tensor(q, dtype=dtype, device=device)
torch_op = getattr(torch, op)
numpy_op = getattr(np, op)
# Compute quantile along every dimension and flattened tensor
interpolations = ('linear', 'lower', 'higher', 'midpoint', 'nearest')
for interpolation, dim in product(interpolations,
[None] + list(range(a.ndim))):
result = torch_op(a, q, dim=dim, keepdim=keepdim, interpolation=interpolation)
expected = numpy_op(a.cpu().numpy(), q.cpu().numpy(), dim,
interpolation=interpolation, keepdims=keepdim)
self.assertEqual(result.cpu(), torch.from_numpy(np.array(expected)).type(result.type()))
# Test out variation
out = torch.empty_like(result)
torch_op(a, q, dim=dim, keepdim=keepdim, interpolation=interpolation, out=out)
self.assertEqual(out.cpu(), result.cpu())
def test_quantile_backward(self, device):
def check(a, q, dim, expected_grad, ops=(torch.quantile, torch.nanquantile)):
for op in ops:
t = torch.tensor(a, device=device, requires_grad=True)
op(t, torch.tensor(q, device=device), dim).sum().backward()
self.assertEqual(t.grad, expected_grad)
check([1., 2, 3], 0.5, 0, [0, 1, 0])
check([1., 2, 3, 4], 0.5, 0, [0, 0.5, 0.5, 0])
check([3., 1, 4, 2], 0.5, 0, [0.5, 0, 0, 0.5])
check([1., 2, 3, 4], [0.25, 0.5, 0.75], 0, [0.25, 1.25, 1.25, 0.25])
check([[1., 2], [2, 1]], 0., 0, [[1, 0], [0, 1]])
check([[1., 2], [4, 3]], 1., 1, [[0, 1], [1, 0]])
check([1, float('nan'), 2], 0.5, 0, [0, 1, 0], [torch.quantile])
check([1, float('nan'), 2], 0.5, 0, [0.5, 0, 0.5], [torch.nanquantile])
def test_quantile_error(self, device):
def check(a, q, args, kwargs, message):
with self.assertRaisesRegex(RuntimeError, r'quantile\(\) ' + message):
at = torch.tensor(a, device=device)
qt = torch.tensor(q, device=device) if isinstance(q, list) else q
torch.quantile(at, qt, *args, **kwargs)
check([], 0.5, [], {}, r'input tensor must be non-empty')
check([1.], [[1.]], [], {}, r'q must be a scalar or 1D tensor')
check([1], 0.5, [], {}, r'input tensor must be either float or double dtype')
check([1.], [1], [], {}, r'q tensor must be same dtype as the input tensor')
check([1.], -1., [], {}, r'q must be in the range \[0, 1\] but got -1')
check([1.], 1.1, [], {}, r'q must be in the range \[0, 1\] but got 1.1')
check([1.], 0.5, [], {'out': torch.empty([], dtype=torch.int32, device=device)},
r'out tensor must be same dtype as the input tensor')
check([1.], [1.], [None, False], {'interpolation': 'random_mode'},
r"interpolation must be one of linear, lower, higher, midpoint or nearest, but got random_mode")
if self.device_type == "cpu":
check([1.], [0.5, 1.1, -1], [], {}, r'q values must be in the range \[0, 1\]')
if self.device_type == "cuda":
with self.assertRaisesRegex(
RuntimeError, r'quantile\(\) q tensor must be on the same device as the input tensor'):
torch.randn(1, device=device).quantile(torch.tensor(0.5))
with self.assertRaisesRegex(
RuntimeError, r'quantile\(\) out tensor must be on the same device as the input tensor'):
torch.quantile(torch.randn(1, device=device), 0.5, out=torch.scalar_tensor(1))
def test_std_mean(self, device):
x = torch.rand(100, 50, 20, device=device)
for dim in range(x.dim()):
for unbiased in [False, True]:
for keepdim in [False, True]:
std1, mean1 = torch.std_mean(x, dim=dim, unbiased=unbiased, keepdim=keepdim)
std2 = x.std(dim=dim, unbiased=unbiased, keepdim=keepdim)
mean2 = x.mean(dim=dim, keepdim=keepdim)
self.assertEqual(std1, std2)
self.assertEqual(mean1, mean2)
def test_std_mean_all_dims(self, device):
x = torch.rand(100, 50, 20, device=device)
for unbiased in [False, True]:
std1, mean1 = torch.std_mean(x, unbiased=unbiased)
std2 = x.std(unbiased=unbiased)
mean2 = x.mean()
self.assertEqual(std1, std2)
self.assertEqual(mean1, mean2)
def test_var_mean(self, device):
x = torch.rand(100, 300, 50, device=device)
for dim in range(x.dim()):
for unbiased in [False, True]:
for keepdim in [False, True]:
var1, mean1 = torch.var_mean(x, dim=dim, unbiased=unbiased, keepdim=keepdim)
var2 = x.var(dim=dim, unbiased=unbiased, keepdim=keepdim)
mean2 = x.mean(dim=dim, keepdim=keepdim)
self.assertEqual(var1, var2)
self.assertEqual(mean1, mean2)
def test_var_mean_all_dims(self, device):
x = torch.rand(100, 50, 20, device=device)
for unbiased in [False, True]:
var1, mean1 = torch.var_mean(x, unbiased=unbiased)
var2 = x.var(unbiased=unbiased)
mean2 = x.mean()
self.assertEqual(var1, var2)
self.assertEqual(mean1, mean2)
def test_std_mean_some_dims(self, device):
sizes = (4, 6, 7, 5, 3)
dims = len(sizes)
x = torch.rand(sizes, device=device)
for num_of_dims in range(2, dims):
dim_list = list(combinations(list(range(dims)), r=num_of_dims))
for dim in dim_list:
for unbiased in [False, True]:
for keepdim in [False, True]:
std1, mean1 = torch.std_mean(x, dim=dim, unbiased=unbiased, keepdim=keepdim)
std2 = x.std(dim=dim, unbiased=unbiased, keepdim=keepdim)
mean2 = x.mean(dim=dim, keepdim=keepdim)
self.assertEqual(std1, std2)
self.assertEqual(mean1, mean2)
def _compare_std_var_with_numpy(self, op, device, dtype, input, dim,
keepdim, unbiased, use_out):
a = input.cpu().numpy() if input.dtype is not torch.bfloat16 else input.float().cpu().numpy()
numpy_kwargs = {
'axis' : dim,
'keepdims' : keepdim,
'ddof' : 1 if unbiased else 0,
}
if dim is None:
del numpy_kwargs['axis']
del numpy_kwargs['keepdims']
if op == 'var':
torch_op = torch.var
numpy_op = np.var
elif op == 'std':
torch_op = torch.std
numpy_op = np.std
else:
self.fail("Unknown op!")
numpy_result = numpy_op(a, **numpy_kwargs)
if dim is None and use_out is False:
torch_result = torch_op(input, unbiased)
elif dim is not None and use_out is False:
torch_result = torch_op(input, dim, unbiased, keepdim)
elif dim is not None and use_out is True:
out = torch.empty(0, device=device, dtype=dtype)
torch_result = torch_op(input, dim, unbiased, keepdim, out=out)
else:
out = torch.empty(0, device=device, dtype=dtype)
torch_result = torch_op(input, dim, unbiased, keepdim, out=out)
exact_dtype = input.dtype not in (torch.bfloat16, torch.complex32, torch.complex64, torch.complex128)
self.assertEqual(torch_result, numpy_result, exact_dtype=exact_dtype)
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_var_vs_numpy(self, device, dtype):
_size = (20, 20)
for test_case in product((torch.randn(_size, device=device, dtype=dtype),),
(None, 0, 1),
(False, True),
(False, True),
(False, True),):
self._compare_std_var_with_numpy('var', device, dtype, *test_case)
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_std_vs_numpy(self, device, dtype):
_size = (20, 20)
for test_case in product((torch.randn(_size, device=device, dtype=dtype),),
(None, 0, 1),
(False, True),
(False, True),
(False, True),):
self._compare_std_var_with_numpy('std', device, dtype, *test_case)
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_var_correction_vs_numpy(self, device, dtype):
_size = (20, 20)
test_args = [
*product(
# dim
(None, 0, 1),
# correction
(None, 0, 10, 30),
# keepdim
(False, True),
),
[None, -100, True], # Negative correction
]
tensor = make_tensor(_size, device=device, dtype=dtype)
array = tensor.cpu().numpy()
for dim, correction, keepdim in test_args:
numpy_kwargs = dict(axis=dim, ddof=correction, keepdims=keepdim)
if correction is None:
# NumPy default is not compatible with torch.std (gh-50010)
numpy_kwargs['ddof'] = 1
numpy_res = np.asarray(np.var(array, **numpy_kwargs))
torch_res = torch.var(tensor, dim=dim, correction=correction, keepdim=keepdim)
# inf vs. nan results are sensitive to machine precision,
# just treat them as equivalent
numpy_res[np.isinf(numpy_res)] = np.nan
torch_res[torch_res.isinf()] = np.nan
self.assertEqual(torch_res, numpy_res)
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_std_correction_vs_numpy(self, device, dtype):
_size = (20, 20)
test_args = [
*product(
# dim
(None, 0, 1),
# correction
(None, 0, 10, 30),
# keepdim
(False, True),
),
[None, -100, True], # Negative correction
]
tensor = make_tensor(_size, device=device, dtype=dtype)
array = tensor.cpu().numpy()
for dim, correction, keepdim in test_args:
numpy_kwargs = dict(axis=dim, ddof=correction, keepdims=keepdim)
if correction is None:
# NumPy default is incompatible with torch.std (gh-50010)
numpy_kwargs['ddof'] = 1
numpy_res = np.asarray(np.std(array, **numpy_kwargs))
torch_res = torch.std(tensor, dim=dim, correction=correction, keepdim=keepdim)
# inf vs. nan results are sensitive to machine precision,
# just treat them as equivalent
numpy_res[np.isinf(numpy_res)] = np.nan
torch_res[torch_res.isinf()] = np.nan
self.assertEqual(torch_res, numpy_res)
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_std_mean_correction(self, device, dtype):
_size = (20, 20)
test_args = [
*product(
# dim
(None, 0, 1),
# correction
(None, 0, 10, 30),
# keepdim
(False, True),
),
[None, -100, True], # Negative correction
]
tensor = make_tensor(_size, device=device, dtype=dtype)
for dim, correction, keepdim in test_args:
kwargs = dict(dim=dim, correction=correction, keepdim=keepdim)
std1 = torch.std(tensor, **kwargs)
if dim is not None:
mean1 = torch.mean(tensor, dim=dim, keepdim=keepdim)
else:
mean1 = torch.mean(tensor)
if keepdim:
mean1 = mean1.reshape((1,) * tensor.ndim)
std2, mean2 = torch.std_mean(tensor, **kwargs)
self.assertEqual(std1, std2)
self.assertEqual(mean1, mean2)
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_var_mean_correction(self, device, dtype):
_size = (20, 20)
test_args = [
*product(
# dim
(None, 0, 1),
# correction
(None, 0, 10, 30),
# keepdim
(False, True),
),
[None, -100, True], # Negative correction
]
tensor = make_tensor(_size, device=device, dtype=dtype)
for dim, correction, keepdim in test_args:
kwargs = dict(dim=dim, correction=correction, keepdim=keepdim)
var1 = torch.var(tensor, **kwargs)
if dim is not None:
mean1 = torch.mean(tensor, dim=dim, keepdim=keepdim)
else:
mean1 = torch.mean(tensor)
if keepdim:
mean1 = mean1.reshape((1,) * tensor.ndim)
var2, mean2 = torch.var_mean(tensor, **kwargs)
self.assertEqual(var1, var2)
self.assertEqual(mean1, mean2)
def test_amin_amax_some_dims(self, device):
sizes = (4, 6, 7, 5, 3)
dims = len(sizes)
x = torch.rand(sizes, device=device)
for num_of_dims in range(2, dims):
dim_list = list(combinations(list(range(dims)), r=num_of_dims))
for dim in dim_list:
for keepdim in [False, True]:
amin1 = torch.amin(x, dim=dim, keepdim=keepdim)
amax1 = torch.amax(x, dim=dim, keepdim=keepdim)
amin2 = x
amax2 = x
for i, d in enumerate(dim):
if not keepdim:
d -= i
amin2 = torch.amin(amin2, dim=d, keepdim=keepdim)
amax2 = torch.amax(amax2, dim=d, keepdim=keepdim)
self.assertEqual(amin1, amin2)
self.assertEqual(amax1, amax2)
def test_histc(self, device):
# negative nbins throws
with self.assertRaisesRegex(RuntimeError, 'bins must be > 0'):
torch.histc(torch.tensor([1], dtype=torch.float, device=device), bins=-1)
# empty tensor
actual = torch.histc(torch.tensor([], device=device), min=0, max=3)
expected = torch.zeros(100, dtype=torch.float, device=device)
self.assertEqual(expected, actual)
# without nbins
actual = torch.histc(
torch.tensor([2, 5], dtype=torch.float, device=device))
expected = torch.zeros(100, dtype=torch.float, device=device)
expected[0] = 1
expected[99] = 1
self.assertEqual(expected, actual)
# tensor with the same element
actual = torch.histc(torch.ones(5, dtype=torch.float, device=device), bins=5)
self.assertEqual(
torch.tensor([0, 0, 5, 0, 0], dtype=torch.float, device=device),
actual)
# no element falls between [min, max]
actual = torch.histc(
torch.ones(5, dtype=torch.float, device=device), bins=5, min=2, max=3)
self.assertEqual(
torch.tensor([0, 0, 0, 0, 0], dtype=torch.float, device=device),
actual)
# element falls below min + integral bin size and
actual = torch.histc(
torch.tensor([2, 4, 2, 2, 5, 4], dtype=torch.float, device=device),
bins=5, min=1, max=5)
self.assertEqual(
torch.tensor([0, 3, 0, 2, 1], dtype=torch.float, device=device),
actual)
# non-integral bin size
actual = torch.histc(
torch.tensor([1, 2, 1], dtype=torch.float, device=device),
bins=4, min=0, max=3)
self.assertEqual(
torch.tensor([0, 2, 1, 0], dtype=torch.float, device=device),
actual)
# double input
actual = torch.histc(
torch.tensor([1, 2, 1], dtype=torch.double, device=device), bins=4, min=0, max=3)
self.assertEqual(
torch.tensor([0, 2, 1, 0], dtype=torch.double, device=device),
actual)
self.assertEqual(actual.dtype, torch.double)
# mixed input
actual = torch.histc(
torch.tensor([1., 2, 1], dtype=torch.float, device=device),
bins=4, min=0, max=3)
self.assertEqual(
torch.tensor([0, 2, 1, 0], dtype=torch.float, device=device),
actual)
self.assertEqual(actual.dtype, torch.float)
# scalar input and 1 bin -- should return a 1-dimensional tensor, not a scalar.
actual = torch.histc(
torch.tensor(0, dtype=torch.float, device=device),
bins=1, min=0, max=3)
self.assertEqual(
torch.tensor([1], dtype=torch.float, device=device),
actual)
# tensors with inf; min, max not provided -- should throw a RuntimeError
with self.assertRaisesRegex(RuntimeError, r'range of \[inf, inf\] is not finite'):
torch.histc(torch.tensor([float("inf")], dtype=torch.float, device=device))
with self.assertRaisesRegex(RuntimeError, r'range of \[1, inf\] is not finite'):
torch.histc(torch.tensor([1., 2., float("inf")], dtype=torch.float, device=device))
# tensors with inf; min, max provided
self.assertEqual(
torch.histc(torch.tensor([float("inf")], dtype=torch.float, device=device),
bins=1, min=0, max=3),
torch.tensor([0], dtype=torch.float, device=device))
self.assertEqual(
torch.histc(torch.tensor([1., 2., float("inf")], dtype=torch.float, device=device),
bins=4, max=3),
torch.tensor([0, 1, 1, 0], dtype=torch.float, device=device))
# tensor with nan -- should throw a RuntimeError
with self.assertRaisesRegex(RuntimeError, r'range of \[nan, nan\] is not finite'):
torch.histc(torch.tensor([float("nan")], dtype=torch.float, device=device))
# tensors with min > max -- should throw a RuntimeError
with self.assertRaisesRegex(RuntimeError, "max must be larger than min"):
torch.histc(torch.tensor([1., 2., 3.], dtype=torch.float, device=device),
bins=4, min=5, max=1)
# test against numpy.histogram()
def test_against_np(tensor, bins=100, min=0, max=0):
if min == 0 and max == 0:
min = tensor.min().item()
max = tensor.max().item()
nparr = tensor.cpu().numpy()
actual = torch.histc(tensor, bins=bins, min=min, max=max)
expected = torch.from_numpy(np.histogram(nparr, bins=bins, range=(min, max))[0])
actual_cpu = actual.cpu()
# NB: Numpy returns a int64 tensor, like normal people...
self.assertEqual(actual, expected.to(actual_cpu))
test_against_np(torch.tensor([1., 2, 1], device=device))
test_against_np(torch.randn(5000, device=device))
# Test bins arg
test_against_np(torch.randn(301, device=device), bins=10)
# Test truncated range
test_against_np(torch.randn(201, device=device), min=0.1, max=1)
noncontig = torch.randn(100, 3, device=device)[:, 2]
test_against_np(noncontig)
multidim = torch.randn(3, 5, 7, 2, device=device)
test_against_np(multidim)
expanded = torch.randn(1, 5, 1, 2, device=device).expand(3, 5, 7, 2)
test_against_np(expanded)
@onlyCPU
def test_histc_bfloat16(self, device):
actual = torch.histc(
torch.tensor([1, 2, 1], dtype=torch.bfloat16, device=device), bins=4, min=0, max=3)
self.assertEqual(
torch.tensor([0, 2, 1, 0], dtype=torch.bfloat16, device=device),
actual)
self.assertEqual(actual.dtype, torch.bfloat16)
"""
Runs torch.histogram and numpy.histogram on the specified input parameters
and asserts that their output is equal.
"""
def _test_histogram_numpy(self, t, bins, bin_range, weights, density):
def to_np(t):
if not torch.is_tensor(t):
return t
else:
return t.cpu().numpy()
# Wrapper around numpy.histogram performing conversions between torch tensors and numpy arrays.
def reference_histogram(self, t, bins, bin_range, weights, density, dtype):
(np_t, np_bins, np_weights) = map(to_np, [t, bins, weights])
(np_hist, np_bin_edges) = np.histogram(np_t, np_bins, range=bin_range, weights=np_weights, density=density)
return (torch.from_numpy(np_hist).to(dtype), torch.from_numpy(np_bin_edges).to(dtype))
# Doesn't pass a 'range' kwarg unless necessary because the override of histogram with Tensor bins doesn't accept one
if bin_range:
(actual_hist, actual_bin_edges) = torch.histogram(t, bins, range=bin_range, weight=weights, density=density)
else:
(actual_hist, actual_bin_edges) = torch.histogram(t, bins, weight=weights, density=density)
(expected_hist, expected_bin_edges) = reference_histogram(self, t, bins, bin_range, weights, density, actual_hist.dtype)
"""
Works around linspace discrepancies by passing torch's constructed bin_edges to numpy.
When bin edges are not explicitly defined, histogram uses the linspace operator internally
to construct the sequence of bin edges. In some cases, torch.linspace output differs slightly
from numpy.linspace output.
Issue: https://github.com/pytorch/pytorch/issues/58758
"""
if not torch.is_tensor(bins):
self.assertEqual(actual_bin_edges, expected_bin_edges, atol=1e-5, rtol=1e-5)
# Calls numpy.histogram again, passing torch's actual_bin_edges as the bins argument
(expected_hist, expected_bin_edges) = reference_histogram(
self, t, actual_bin_edges, bin_range, weights, density, actual_hist.dtype)
self.assertEqual(actual_hist, expected_hist)
self.assertEqual(actual_bin_edges, expected_bin_edges)
# Test passing non-contiguous output tensors
hist_out = make_tensor(expected_hist.shape, device=expected_hist.device, dtype=expected_hist.dtype,
noncontiguous=True)
bin_edges_out = make_tensor(expected_bin_edges.shape, device=expected_bin_edges.device, dtype=expected_bin_edges.dtype,
noncontiguous=True)
# Doesn't pass a 'range' kwarg unless necessary because the override of histogram with Tensor bins doesn't accept one
if bin_range:
torch.histogram(t, bins, range=bin_range, weight=weights, density=density, out=(hist_out, bin_edges_out))
else:
torch.histogram(t, bins, weight=weights, density=density, out=(hist_out, bin_edges_out))
self.assertEqual(hist_out, expected_hist)
self.assertEqual(bin_edges_out, expected_bin_edges)
@onlyCPU
@dtypes(torch.float32)
def test_histogram(self, device, dtype):
shapes = (
(),
(0,),
(1,),
(1, 5),
(3, 5),
(1, 5, 1),
(2, 3, 5))
for contig, bins_contig, bin_ct, weighted, density, shape in \
product([True, False], [True, False], range(1, 10), [True, False], [True, False], shapes):
values = make_tensor(shape, dtype=dtype, device=device, low=-9, high=9, noncontiguous=not contig)
weights = make_tensor(shape, dtype=dtype, device=device, low=0, high=9, noncontiguous=not contig) if weighted else None
# Tests passing just the bin_ct
self._test_histogram_numpy(values, bin_ct, None, weights, density)
# Tests with caller-specified histogram range
bin_range = sorted((random.uniform(-9, 9), random.uniform(-9, 9)))
self._test_histogram_numpy(values, bin_ct, bin_range, weights, density)
# Tests with range min=max
bin_range[1] = bin_range[0]
self._test_histogram_numpy(values, bin_ct, bin_range, weights, density)
# Tests with caller-specified bin edges
bin_edges = make_tensor(bin_ct + 1, dtype=dtype, device=device, low=-9, high=9).msort()
if not bins_contig:
# Necessary because msort always produces contiguous output
bin_edges_noncontig = make_tensor(bin_ct + 1, dtype=dtype, device=device, noncontiguous=not bins_contig)
bin_edges_noncontig.copy_(bin_edges)
bin_edges = bin_edges_noncontig
self.assertEqual(bin_edges.is_contiguous(), bins_contig)
self._test_histogram_numpy(values, bin_edges, None, weights, density)
# Tests with input tensor in which all elements are equal
elt = random.uniform(-9, 9)
values = make_tensor(shape, dtype=dtype, device=device, low=elt, high=elt, noncontiguous=not contig)
self._test_histogram_numpy(values, bin_ct, bin_range, weights, density)
self._test_histogram_numpy(values, bin_edges, None, weights, density)
# Tests with input equal to bin_edges
weights = (
make_tensor(bin_ct + 1, dtype=dtype, device=device, low=0, high=9, noncontiguous=not contig)
if weighted
else None
)
self._test_histogram_numpy(bin_edges, bin_edges, None, weights, density)
# Tests values of default args
for bin_ct, shape in product(range(1, 10), shapes):
values = make_tensor(shape, dtype=dtype, device=device, low=-9, high=9)
(actual_hist, actual_bin_edges) = torch.histogram(values, bin_ct)
(expected_hist, expected_bin_edges) = torch.histogram(
values, bin_ct, range=None, weight=None, density=False)
self.assertEqual(actual_hist, expected_hist)
self.assertEqual(actual_bin_edges, expected_bin_edges)
"""
Runs torch.histogramdd and numpy.histogramdd on the specified input parameters
and asserts that their output is equal.
"""
def _test_histogramdd_numpy(self, t, bins, bin_range, weights, density):
def to_np(t):
if type(t) == list:
return list(map(to_np, t))
if not torch.is_tensor(t):
return t
return t.cpu().numpy()
# Wrapper around numpy.histogram performing conversions between torch tensors and numpy arrays.
def reference_histogramdd(t, bins, bin_range, weights, density, dtype):
(np_t, np_bins, np_weights) = map(to_np, [t, bins, weights])
# numpy.histogramdd accepts only (N, D) shapes
D = np_t.shape[-1]
N = np.prod(np_t.shape[:-1])
reshaped_t = np.reshape(np_t, (N, D))
reshaped_wt = np.reshape(np_weights, (N,)) if np_weights is not None else None
# numpy.histogramdd throws an error for D=0
if D == 0:
return (torch.tensor(float('nan') if density else 0.), [])
# numpy.histogramdd expects range to be specified as a sequence of D (lower, upper) tuples
reshaped_range = None if not bin_range else [(bin_range[2 * i], bin_range[2 * i + 1]) for i in range(D)]
(np_hist, np_bin_edges) = np.histogramdd(reshaped_t, np_bins,
range=reshaped_range, weights=reshaped_wt, density=density)
return (torch.from_numpy(np_hist).to(dtype), [torch.from_numpy(t).to(dtype) for t in np_bin_edges])
(actual_hist, actual_bin_edges) = torch.histogramdd(t, bins, range=bin_range, weight=weights, density=density)
(expected_hist, expected_bin_edges) = reference_histogramdd(t, bins, bin_range, weights, density, actual_hist.dtype)
D = len(actual_bin_edges)
self.assertEqual(D, len(expected_bin_edges))
"""
Works around linspace discrepancies by passing torch's constructed bin_edges to numpy.
When bin edges are not explicitly defined, histogram uses the linspace operator internally
to construct the sequence of bin edges. In some cases, torch.linspace output differs slightly
from numpy.linspace output.
Issue: https://github.com/pytorch/pytorch/issues/58758
"""
if not torch.is_tensor(bins):
for dim in range(D):
self.assertEqual(actual_bin_edges[dim], expected_bin_edges[dim], atol=1e-5, rtol=1e-5)
# Calls numpy.histogram again, passing torch's actual_bin_edges as the bins argument
(expected_hist, expected_bin_edges) = reference_histogramdd(
t, actual_bin_edges, bin_range, weights, density, actual_hist.dtype)
self.assertEqual(D, len(expected_bin_edges))
self.assertEqual(actual_hist, expected_hist)
for dim in range(D):
self.assertEqual(actual_bin_edges[dim], expected_bin_edges[dim])
@onlyCPU
@dtypes(torch.float32)
def test_histogramdd(self, device, dtype):
shapes = (
(1, 5),
(3, 5),
(1, 5, 1),
(2, 3, 5),
(7, 7, 7, 7),
(16, 8, 4, 2),
(10, 10, 10),
(7, 0, 3),
(5, 0),)
for contig, bins_contig, weighted, density, shape in \
product([True, False], [True, False], [True, False], [True, False], shapes):
D = shape[-1]
values = make_tensor(shape, dtype=dtype, device=device, low=-9, high=9, noncontiguous=not contig)
weights = (
make_tensor(shape[:-1], dtype=dtype, device=device, low=0, high=9, noncontiguous=not contig)
if weighted
else None
)
# Tests passing a single bin count
bin_ct = random.randint(1, 5)
self._test_histogramdd_numpy(values, bin_ct, None, weights, density)
# Tests passing a bin count for each dimension
bin_ct = [random.randint(1, 5) for dim in range(D)]
self._test_histogramdd_numpy(values, bin_ct, None, weights, density)
# Tests with caller-specified histogram range
bin_range_tuples = [sorted((random.uniform(-9, 9), random.uniform(-9, 9))) for dim in range(D)]
bin_range = [elt for t in bin_range_tuples for elt in t]
self._test_histogramdd_numpy(values, bin_ct, bin_range, weights, density)
# Tests with range min=max
for dim in range(D):
bin_range[2 * dim + 1] = bin_range[2 * dim]
self._test_histogramdd_numpy(values, bin_ct, bin_range, weights, density)
# Tests with caller-specified bin edges
bin_edges = [make_tensor(ct + 1, dtype=dtype, device=device, low=-9, high=9).msort() for ct in bin_ct]
if not bins_contig:
# Necessary because msort always produces contiguous output
bin_edges_noncontig = [
make_tensor(ct + 1, dtype=dtype, device=device, noncontiguous=not bins_contig)
for ct in bin_ct
]
for dim in range(D):
bin_edges_noncontig[dim].copy_(bin_edges[dim])
bin_edges = bin_edges_noncontig
for dim in range(D):
self.assertEqual(bin_edges[dim].is_contiguous(), bins_contig)
self._test_histogramdd_numpy(values, bin_edges, None, weights, density)
@onlyCPU
@dtypes(torch.float32)
def test_histogram_error_handling(self, device, dtype):
with self.assertRaisesRegex(RuntimeError, 'not implemented for'):
values = make_tensor((), dtype=torch.int32, device=device)
torch.histogram(values, 1)
inconsistent_dtype = torch.float32 if dtype != torch.float32 else torch.float64
with self.assertRaisesRegex(RuntimeError, 'input tensor and bins tensors should have the same dtype'):
values = make_tensor((), dtype=dtype, device=device)
bins = make_tensor((), dtype=inconsistent_dtype, device=device)
torch.histogram(values, bins)
with self.assertRaisesRegex(RuntimeError, 'input tensor and weight tensor should have the same dtype'):
values = make_tensor((), dtype=dtype, device=device)
weight = make_tensor((), dtype=inconsistent_dtype, device=device)
torch.histogram(values, 1, weight=weight)
with self.assertRaisesRegex(RuntimeError, 'input tensor and hist tensor should have the same dtype'):
values = make_tensor((), dtype=dtype, device=device)
hist = make_tensor((), dtype=inconsistent_dtype, device=device)
bin_edges = make_tensor((), dtype=dtype, device=device)
torch.histogram(values, 1, out=(hist, bin_edges))
with self.assertRaisesRegex(RuntimeError, 'input tensor and bin_edges tensor should have the same dtype'):
values = make_tensor((), dtype=dtype, device=device)
hist = make_tensor((), dtype=dtype, device=device)
bin_edges = make_tensor((), dtype=inconsistent_dtype, device=device)
torch.histogram(values, 1, out=(hist, bin_edges))
with self.assertRaisesRegex(RuntimeError, 'bins tensor should have one dimension'):
t = make_tensor((2, 2), dtype=dtype, device=device)
torch.histogram(t, t)
with self.assertRaisesRegex(RuntimeError, 'bins tensor should have at least 1 element'):
t = make_tensor((0), dtype=dtype, device=device)
torch.histogram(t, t)
with self.assertRaisesRegex(RuntimeError, 'bins must be > 0'):
values = make_tensor((), dtype=dtype, device=device)
torch.histogram(values, -1)
with self.assertRaisesRegex(RuntimeError, 'if weight tensor is provided it should have the same shape \
as the input tensor excluding its innermost dimension'):
values = make_tensor((2, 2), dtype=dtype, device=device)
weight = make_tensor((1), dtype=dtype, device=device)
torch.histogram(values, 1, weight=weight)
with self.assertRaisesRegex(TypeError, 'received an invalid combination of arguments'):
values = make_tensor((), dtype=dtype, device=device)
bin_edges = make_tensor((), dtype=dtype, device=device)
torch.histogram(values, bin_edges, range=(0, 1))
with self.assertRaisesRegex(RuntimeError, 'min should not exceed max'):
values = make_tensor((), dtype=dtype, device=device)
torch.histogram(values, 2, range=(1, 0))
with self.assertRaisesRegex(RuntimeError, r'range \[nan, nan\] is not finite'):
values = torch.tensor([float("nan")], device=device, dtype=dtype)
torch.histogram(values, 2)
# Tests to ensure that reduction functions employing comparison operators are usable when there
# exists a zero dimension (i.e. when the the tensors are empty) in the tensor. These tests specifically
# cater to functions where specifying the `dim` parameter is necessary.
def test_tensor_compare_ops_empty(self, device):
shape = (2, 0, 4)
master_input = torch.randn(shape, device=device)
np_input = np.empty(shape)
test_functions = [
('amax', torch.amax, np.amax),
('amin', torch.amin, np.amin),
('max', lambda *args, **kwargs: torch.max(*args, **kwargs).values, np.max),
('min', lambda *args, **kwargs: torch.min(*args, **kwargs).values, np.min),
('median', lambda *args, **kwargs: torch.median(*args, **kwargs).values, np.median),
]
for name, fn, np_function in test_functions:
# Check if reduction happens along the specified dim with and without keepdim. Check with
# numpy to maintain compatibility with numpy functions.
error_msg = f"test function: {name}"
self.assertEqual(torch.empty((2, 0), device=device), fn(master_input, dim=2), msg=error_msg)
self.assertEqual(np_function(np_input, axis=2),
fn(master_input, dim=2).cpu().numpy(), msg=error_msg, exact_dtype=False)
self.assertEqual(torch.empty((2, 0), device=device), fn(master_input, dim=-1), msg=error_msg)
self.assertEqual(np_function(np_input, axis=-1),
fn(master_input, dim=-1).cpu().numpy(), msg=error_msg, exact_dtype=False)
self.assertEqual(torch.empty((2, 0, 1), device=device), fn(master_input, dim=2, keepdim=True),
msg=error_msg)
self.assertEqual(np_function(np_input, axis=2, keepdims=True),
fn(master_input, dim=2, keepdim=True).cpu().numpy(), msg=error_msg, exact_dtype=False)
self.assertEqual(torch.empty((2, 0, 1), device=device), fn(master_input, dim=-1, keepdim=True),
msg=error_msg)
self.assertEqual(np_function(np_input, axis=-1, keepdims=True),
fn(master_input, dim=-1, keepdim=True).cpu().numpy(), msg=error_msg, exact_dtype=False)
# Check if function raises error on specified zero'd dimension as reduction dim.
self.assertRaisesRegex(IndexError, "Expected reduction dim", lambda: fn(master_input, dim=1))
# Tests to ensure that reduction of zero-dim tensors (i.e. empty tensors) using comparison operators
# raises an error if no `dim` parameter is specified. This exists separately from tests in
# test_tensot_compare_ops_empty because not specifying a `dim` parameter in the former tests does
# not throw errors. Also, checking the return type of argmax requires supplying a different dtype
# argument than that for the input tensor. There is also variantion in numpy testing.
def test_tensor_compare_ops_argmax_argmix_kthvalue_dim_empty(self, device):
shape = (2, 0, 4)
master_input = torch.randn(shape, device=device)
np_input = np.empty(shape)
test_functions = [
('argmax', torch.argmax, {'dtype': torch.int64}, np.argmax),
('argmin', torch.argmin, {'dtype': torch.int64}, np.argmin),
('kthvalue', lambda *args, k=1, **kwargs: torch.kthvalue(*args, k=1, **kwargs).values,
{}, lambda *args, k=1, axis=None, **kwargs: np.partition(*args, k, **kwargs).take(k - 1, axis=axis))
]
for name, fn, dtype, np_function in test_functions:
error_msg = f"test function: {name}"
self.assertEqual(torch.empty((2, 0), device=device, **dtype), fn(master_input, dim=2), msg=error_msg)
self.assertEqual(
np_function(np_input, axis=2), fn(master_input, dim=2).cpu().numpy(), msg=error_msg, exact_dtype=False
)
self.assertEqual(torch.empty((2, 0), device=device, **dtype), fn(master_input, dim=-1), msg=error_msg)
self.assertEqual(
np_function(np_input, axis=-1), fn(master_input, dim=-1).cpu().numpy(), msg=error_msg, exact_dtype=False
)
# keepdim variant does not exist for numpy
self.assertEqual(torch.empty((2, 0, 1), device=device, **dtype), fn(master_input, dim=2, keepdim=True),
msg=error_msg)
self.assertEqual(torch.empty((2, 0, 1), device=device, **dtype), fn(master_input, dim=-1, keepdim=True),
msg=error_msg)
# Check if function raises error on specified zero'd dimension as reduction dim.
self.assertRaisesRegex(IndexError, "Expected reduction dim", lambda: fn(master_input, dim=1))
if name != 'kthvalue':
self.assertRaisesRegex(IndexError, "Expected reduction dim", lambda: fn(master_input))
# Tests to ensure that reduction of zero-dim tensors (i.e. empty tensors) using math operators works when a
# non-zero dim is specified for the reduction and throws an error when the dim specified is 0. Although
# there is some repetition with test_tensor_compare_ops_optional_dim_empty and test_tensor_compare_ops_empty,
# these tests are kept separate since tests for math operators also require checking for correctness of the
# returned data using allclose() or isinf() which does not exists in the former tests.
@skipIfNoSciPy
def test_tensor_reduce_ops_empty(self, device):
from scipy.special import logsumexp
shape = (2, 0, 4)
master_input = torch.randn(shape, device=device)
np_input = np.empty(shape)
test_functions = [
('prod', torch.prod, 1., np.prod),
('sum', torch.sum, 0., np.sum),
('norm', torch.norm, 0., np.linalg.norm),
('mean', torch.mean, nan, np.mean),
('var', torch.var, nan, np.var),
('std', torch.std, nan, np.std),
('logsumexp', torch.logsumexp, -inf, logsumexp),
]
for name, fn, return_value, np_function in test_functions:
# Check if reduction happens along the specified dimension.
error_msg = f"test function: {name}"
self.assertEqual(torch.empty((2, 0), device=device), fn(master_input, dim=2), msg=error_msg)
self.assertEqual(np_function(np_input, axis=2), fn(master_input, dim=2).cpu().numpy(), msg=error_msg,
exact_dtype=False)
self.assertEqual(torch.empty((2, 0), device=device), fn(master_input, dim=-1), msg=error_msg)
self.assertEqual(np_function(np_input, axis=-1), fn(master_input, dim=-1).cpu().numpy(), msg=error_msg,
exact_dtype=False)
self.assertEqual(torch.empty((2, 0, 1), device=device), fn(master_input, dim=2, keepdim=True),
msg=error_msg)
self.assertEqual(np_function(np_input, axis=2, keepdims=True), fn(master_input, dim=2, keepdim=True),
msg=error_msg, exact_dtype=False)
self.assertEqual(torch.empty((2, 0, 1), device=device), fn(master_input, dim=-1, keepdim=True),
msg=error_msg)
self.assertEqual(np_function(np_input, axis=-1, keepdims=True), fn(master_input, dim=-1, keepdim=True),
msg=error_msg, exact_dtype=False)
self.assertEqual(torch.full((2, 4), return_value, device=device), fn(master_input, dim=1), msg=error_msg)
self.assertEqual(torch.full((2, 4), return_value, device=device), fn(master_input, dim=-2), msg=error_msg)
self.assertEqual(torch.full((2, 1, 4), return_value, device=device), fn(master_input, dim=1, keepdim=True),
msg=error_msg)
self.assertEqual(torch.full((2, 1, 4), return_value, device=device), fn(master_input, dim=-2, keepdim=True),
msg=error_msg)
if name != 'logsumexp':
# The scipy function does not work for reduction the zero dimension
self.assertEqual(np.float32(np_function(np_input, axis=1)), fn(master_input, dim=1).cpu().numpy(),
msg=error_msg)
self.assertEqual(np.float32(np_function(np_input, axis=-2)), fn(master_input, dim=-2).cpu().numpy(),
msg=error_msg)
self.assertEqual(np.float32(np_function(np_input, axis=1, keepdims=True)),
fn(master_input, dim=1, keepdim=True).cpu().numpy(),
msg=error_msg)
self.assertEqual(np.float32(np_function(np_input, axis=-2, keepdims=True)),
fn(master_input, dim=-2, keepdim=True).cpu().numpy(),
msg=error_msg)
# logsumexp throws a type error when not specifying dim so test separately.
self.assertEqual(torch.full((), return_value, device=device), fn(master_input), msg=error_msg)
else:
self.assertRaises(TypeError, lambda: fn(master_input))
# Tests to ensure that any() and all() functions work with zero-dim tensors. Kept separate from
# other tests for checking reduction with zero-dim tensors because these tests have significantly
# different testing behaviour than that used for the former tests.
def test_reduction_empty_any_all(self, device):
shape = (2, 0, 4)
x = torch.randn(shape, device=device)
for dtype in all_types_and_complex_and(torch.half, torch.bool):
# Refer: [all, any uint8 compatibility]
if dtype == torch.uint8:
out_dtype = torch.uint8
else:
out_dtype = torch.bool # output of all/any is bool irrespective of input dtype
xb = x.to(dtype)
yb = x.to(dtype)
# any
self.assertEqual((2, 0), xb.any(2).shape)
self.assertEqual((2, 0, 1), xb.any(2, keepdim=True).shape)
self.assertEqual(torch.zeros((2, 4), device=device, dtype=out_dtype), xb.any(1))
self.assertEqual(torch.zeros((2, 1, 4), device=device, dtype=out_dtype), xb.any(1, keepdim=True))
self.assertEqual(torch.zeros((), device=device, dtype=out_dtype), xb.any())
# all
self.assertEqual((2, 0), xb.all(2).shape)
self.assertEqual((2, 0, 1), xb.all(2, keepdim=True).shape)
self.assertEqual(torch.ones((2, 4), device=device, dtype=out_dtype), xb.all(1))
self.assertEqual(torch.ones((2, 1, 4), device=device, dtype=out_dtype), xb.all(1, keepdim=True))
self.assertEqual(torch.ones((), device=device, dtype=out_dtype), xb.all())
# TODO: can these be merged with their respective OpInfos?
def test_reduce_dtype(self, device):
def test_reduction(op, has_no_dim, takes_dtype=True):
x = torch.randn(3, 3, dtype=torch.float, requires_grad=True, device=device)
if has_no_dim:
grad1, = torch.autograd.grad([op(x)], [x])
grad2, = torch.autograd.grad([op(x, dtype=torch.double)], [x])
self.assertEqual(grad1, grad2)
self.assertEqual(grad2.dtype, torch.float)
gi = torch.randn(op(x, dim=0).shape, dtype=torch.float, device=device)
grad1, = torch.autograd.grad([op(x, dim=0)], [x], gi)
if takes_dtype:
grad2, = torch.autograd.grad([op(x, dim=0, dtype=torch.double)], [x], gi.double())
else:
grad2, = torch.autograd.grad([op(x.double(), dim=0)], [x], gi.double())
self.assertEqual(grad1, grad2)
self.assertEqual(grad2.dtype, torch.float)
test_reduction(torch.sum, True)
test_reduction(torch.prod, True)
test_reduction(torch.cumsum, False)
test_reduction(torch.cumprod, False)
test_reduction(torch.logcumsumexp, False, takes_dtype=False)
@ops(reference_masked_ops)
def test_reference_masked(self, device, dtype, op):
"""Test masked reduction operations on strided-only tensors using
numpy reductions as reference.
"""
def to_numpy(input):
if input.dtype is torch.bfloat16:
return input.cpu().to(torch.float32).numpy()
else:
return input.cpu().numpy()
samples = op.sample_inputs_func(op, device, dtype, requires_grad=False)
for sample_input in samples:
t = sample_input.input
actual = op(t, *sample_input.args, **sample_input.kwargs)
exact_dtype = not (t.dtype is torch.bfloat16
or (op.promotes_int_to_float and not torch.is_floating_point(t)))
expected = op.ref(to_numpy(t), *sample_input.args,
**dict(
# `identity` is mapped to numpy reduction `initial` argument
identity=torch._masked._reduction_identity(op.name, t),
**sample_input.kwargs))
# Workaround https://github.com/pytorch/pytorch/issues/66556
expected = np.asarray(expected) # transform numpy scalars to numpy.ndarray instances
msg = ("Failed to produce expected results! Input tensor was"
" {0}, torch result is {1}, and reference result is"
" {2}.").format(t, actual, expected) if t.numel() < 10 else None
self.assertEqual(actual, expected, msg, exact_dtype=exact_dtype)
instantiate_device_type_tests(TestReductions, globals())
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_reductions.py |
# Owner(s): ["module: codegen"]
import torch
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfTorchDynamo, TEST_WITH_TORCHDYNAMO
from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs
from torch.utils._pytree import tree_map
from torch.fx.experimental.proxy_tensor import make_fx
from torch.fx.passes.reinplace import reinplace
import unittest
def are_aliased(x, y):
if x._base is None and y._base is None:
return False
if x._base is not None and y._base is None:
return x._base is y
if x._base is None and y._base is not None:
return y._base is x
return x._base is y._base
# We can unify testing and use functionalize() here instead
# if/when functorch moves into core.
# This is basically a crappy version of `functionalize()` for single-tensor-arg inputs.
def _functionalize(f, *, reapply_views: bool):
def wrapped(a):
input_functional = torch._to_functional_tensor(a)
torch._enable_functionalization(reapply_views=reapply_views)
try:
out = f(input_functional)
finally:
torch._disable_functionalization()
torch._sync(input_functional)
inpt_new = torch._from_functional_tensor(input_functional)
if inpt_new is not a:
# Existing deficiency in functionalize():
# we don't correctly mutate input metadata (yet?)
if inpt_new.shape == a.shape:
a.copy_(inpt_new)
tree_map(torch._sync, out)
out_unwrapped = tree_map(torch._from_functional_tensor, out)
return out_unwrapped
return wrapped
@unittest.skipIf(TEST_WITH_TORCHDYNAMO, "https://github.com/pytorch/pytorch/issues/81457")
class TestFunctionalization(TestCase):
def get_logs(self, func, inpt, *, reapply_views=False, run_reinplace=False):
inpt_clone = inpt.clone()
traced_f = make_fx(_functionalize(func, reapply_views=reapply_views))(inpt)
if run_reinplace:
traced_f = reinplace(traced_f, inpt_clone)
return traced_f.code
def assert_functionalization(self, func, inpt, *, reapply_views=False, mutated_input_metadata=False):
input_clone = inpt.clone()
input_clone2 = inpt.clone()
input_clone3 = inpt.clone()
# Compare outputs (and mutated inputs), with and without functionalization.
out_ref = func(inpt)
out_functional = _functionalize(func, reapply_views=reapply_views)(input_clone)
# The reinplacing pass is only valid to run with reapply_views=True.
functional_func = make_fx(_functionalize(func, reapply_views=True))(input_clone2)
reinplace_func = reinplace(make_fx(_functionalize(func, reapply_views=True))(input_clone2), input_clone2)
# NOTE: for now, need to pass in fresh inputs here, because make_fx
# will directly mutate the inputs that you trace with.
# Once this is fixed we can clean this up.
out_reinplace = reinplace_func(input_clone3)
# functionalize() deficiency: input metadata mutations aren't propagated properly,
# so we just need to skip checks here for the tests that exercise that.
if not mutated_input_metadata:
self.assertEqual(inpt, input_clone) # input mutations should still occur
self.assertEqual(inpt, input_clone3)
# Handle tests with multi-tensor outputs
if isinstance(out_ref, tuple):
out_refs, out_functionals, out_reinplaces = list(out_ref), list(out_functional), list(out_reinplace)
else:
out_refs, out_functionals, out_reinplaces = [out_ref], [out_functional], [out_reinplace]
for out_ref_, out_functional_, out_reinplace_ in zip(out_refs, out_functionals, out_reinplaces):
self.assertEqual(out_ref_, out_functional_)
self.assertEqual(out_ref_, out_reinplace_)
def test_save_for_backwards_segfault(self):
inp = torch._to_functional_tensor(LoggingTensor(torch.randn(2, 2))).requires_grad_(True)
inp.exp()
def test_multiple_views_of_same_base(self):
def f(x):
y = x.view(-1)
z = x.view(-1)
x.add_(1)
# y should have been updated.
y2 = y + 1
# z should have been updated too.
z2 = z + 1
return z2
self.assert_functionalization(f, torch.ones(4))
def test_simple(self):
def f(x):
# simple test: 1 view op, 1 inplace op
tmp = torch.ones(4, 2)
y = x.view(4, 2)
y.add_(tmp)
z = x * x
return y
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
ones = torch.ops.aten.ones.default([4, 2], device = device(type='cpu'), pin_memory = False)
view_copy_default = torch.ops.aten.view_copy.default(a_1, [4, 2])
add_tensor = torch.ops.aten.add.Tensor(view_copy_default, ones); view_copy_default = ones = None
view_copy_default_1 = torch.ops.aten.view_copy.default(add_tensor, [4, 2])
mul_tensor = torch.ops.aten.mul.Tensor(view_copy_default_1, view_copy_default_1)
copy__default = torch.ops.aten.copy_.default(a_1, view_copy_default_1); a_1 = view_copy_default_1 = None
return add_tensor
""")
reinplaced_logs = self.get_logs(f, torch.ones(4, 2), reapply_views=True, run_reinplace=True)
self.assertExpectedInline(reinplaced_logs, """\
def forward(self, a_1):
ones = torch.ops.aten.ones.default([4, 2], device = device(type='cpu'), pin_memory = False)
view_default = torch.ops.aten.view.default(a_1, [4, 2])
add_tensor = torch.ops.aten.add.Tensor(view_default, ones); view_default = ones = None
view_default_1 = torch.ops.aten.view.default(add_tensor, [4, 2])
mul_tensor = torch.ops.aten.mul.Tensor(view_default_1, view_default_1)
copy__default = torch.ops.aten.copy_.default(a_1, view_default_1); a_1 = view_default_1 = None
return add_tensor
""")
def test_simple_out(self):
def f(x):
tmp = torch.ones(4, 2)
y = x.view(4, 2)
# the out= tensor will get resized, since it has size=0 to start.
z = torch.empty(())
torch.add(y, tmp, out=z)
w = z * z
return w
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
ones = torch.ops.aten.ones.default([4, 2], device = device(type='cpu'), pin_memory = False)
view_copy_default = torch.ops.aten.view_copy.default(a_1, [4, 2]); a_1 = None
empty = torch.ops.aten.empty.memory_format([], device = device(type='cpu'), pin_memory = False)
add_tensor = torch.ops.aten.add.Tensor(view_copy_default, ones); view_copy_default = ones = None
mul_tensor = torch.ops.aten.mul.Tensor(add_tensor, add_tensor); add_tensor = None
return mul_tensor
""")
reinplaced_logs = self.get_logs(f, torch.ones(4, 2), reapply_views=True, run_reinplace=True)
self.assertExpectedInline(reinplaced_logs, """\
def forward(self, a_1):
ones = torch.ops.aten.ones.default([4, 2], device = device(type='cpu'), pin_memory = False)
view_default = torch.ops.aten.view.default(a_1, [4, 2]); a_1 = None
empty = torch.ops.aten.empty.memory_format([], device = device(type='cpu'), pin_memory = False)
add_tensor = torch.ops.aten.add.Tensor(view_default, ones); view_default = ones = None
mul_tensor = torch.ops.aten.mul.Tensor(add_tensor, add_tensor); add_tensor = None
return mul_tensor
""")
def test_multi_out(self):
def f(x):
# aminmax.out returns a tuple of tensors.
# functionalization should properly handle the tuple.
out_min = torch.empty(4)
out_max = torch.empty(4)
torch.aminmax(x, dim=0, out=(out_max, out_min))
return out_max
self.assert_functionalization(f, torch.arange(8, dtype=torch.float32))
logs = self.get_logs(f, torch.arange(8, dtype=torch.float32))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
empty = torch.ops.aten.empty.memory_format([4], device = device(type='cpu'), pin_memory = False)
empty_1 = torch.ops.aten.empty.memory_format([4], device = device(type='cpu'), pin_memory = False)
aminmax_default = torch.ops.aten.aminmax.default(a_1, dim = 0); a_1 = None
getitem = aminmax_default[0]
getitem_1 = aminmax_default[1]; aminmax_default = None
return getitem
""")
reinplaced_logs = self.get_logs(f, torch.arange(8, dtype=torch.float32), reapply_views=True, run_reinplace=True)
self.assertExpectedInline(reinplaced_logs, """\
def forward(self, a_1):
empty = torch.ops.aten.empty.memory_format([4], device = device(type='cpu'), pin_memory = False)
empty_1 = torch.ops.aten.empty.memory_format([4], device = device(type='cpu'), pin_memory = False)
aminmax_default = torch.ops.aten.aminmax.default(a_1, dim = 0); a_1 = None
getitem = aminmax_default[0]
getitem_1 = aminmax_default[1]; aminmax_default = None
return getitem
""")
def test_tensor_ctr(self):
def f(x):
y = torch.tensor((1, 2, 3))
z = y.view(-1)
z.add_(1)
return y
inpt = torch.arange(3, dtype=torch.float32)
self.assert_functionalization(f, inpt)
logs = self.get_logs(f, inpt)
self.assertExpectedInline(logs, """\
def forward(self, a_1):
_tensor_constant0 = self._tensor_constant0
lift_fresh = torch.ops.aten.lift_fresh_copy.default(_tensor_constant0); _tensor_constant0 = None
view_copy_default = torch.ops.aten.view_copy.default(lift_fresh, [-1]); lift_fresh = None
add_tensor = torch.ops.aten.add.Tensor(view_copy_default, 1); view_copy_default = None
view_copy_default_1 = torch.ops.aten.view_copy.default(add_tensor, [3]); add_tensor = None
return view_copy_default_1
""")
reinplaced_logs = self.get_logs(f, inpt, reapply_views=True, run_reinplace=True)
self.assertExpectedInline(reinplaced_logs, """\
def forward(self, a_1):
_tensor_constant0 = self._tensor_constant0
lift_fresh = torch.ops.aten.lift_fresh_copy.default(_tensor_constant0); _tensor_constant0 = None
view_default = torch.ops.aten.view.default(lift_fresh, [-1]); lift_fresh = None
add_tensor = torch.ops.aten.add_.Tensor(view_default, 1)
view_default_1 = torch.ops.aten.view.default(view_default, [3]); view_default = None
return view_default_1
""")
def test_tensor_list_mixed_functional_nonfunctional(self):
nonfunctional_tensor = torch.ones(2, dtype=torch.long)
def f(x):
# simple test: 1 view op, 1 inplace op
functional_tensor = torch.ones(2, dtype=torch.long)
out = x[functional_tensor, nonfunctional_tensor]
return out
out = f(torch.ones(2, 2))
out_functional = _functionalize(f, reapply_views=True)(torch.ones(2, 2))
self.assertEqual(out, out_functional)
def test_inplace_on_non_view(self):
def f(x):
# test for the case where we functionalize an inplace op on the other tensor - not a view.
# This is worth checking because the tensor will have an empty ViewMeta stack, which needs to be special cased.
tmp = torch.ones(4, 2)
y = x.view(4, 2)
x.add_(tmp)
return y
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
ones = torch.ops.aten.ones.default([4, 2], device = device(type='cpu'), pin_memory = False)
view_copy_default = torch.ops.aten.view_copy.default(a_1, [4, 2])
add_tensor = torch.ops.aten.add.Tensor(a_1, ones); ones = None
copy__default = torch.ops.aten.copy_.default(a_1, add_tensor); a_1 = None
view_copy_default_1 = torch.ops.aten.view_copy.default(add_tensor, [4, 2]); add_tensor = None
return view_copy_default_1
""")
reinplaced_logs = self.get_logs(f, torch.ones(4, 2), reapply_views=True, run_reinplace=True)
self.assertExpectedInline(reinplaced_logs, """\
def forward(self, a_1):
ones = torch.ops.aten.ones.default([4, 2], device = device(type='cpu'), pin_memory = False)
view_default = torch.ops.aten.view.default(a_1, [4, 2])
add_tensor = torch.ops.aten.add.Tensor(a_1, ones); ones = None
copy__default = torch.ops.aten.copy_.default(a_1, add_tensor); a_1 = None
view_default_1 = torch.ops.aten.view.default(add_tensor, [4, 2]); add_tensor = None
return view_default_1
""")
# Some ops that are mutable are neither inplace nor out= ops.
# They also need special handling.
def test_mutable_op_not_inplace_or_other(self):
def f(x):
return torch._fused_moving_avg_obs_fq_helper(x, x, x, x, x, x, x, 1.0, 0, 1, 0)
logs = self.get_logs(f, torch.ones(1))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
_fused_moving_avg_obs_fq_helper_functional_default = torch.ops.aten._fused_moving_avg_obs_fq_helper_functional.default(a_1, a_1, a_1, a_1, a_1, a_1, a_1, 1.0, 0, 1, 0)
getitem = _fused_moving_avg_obs_fq_helper_functional_default[0]
getitem_1 = _fused_moving_avg_obs_fq_helper_functional_default[1]
getitem_2 = _fused_moving_avg_obs_fq_helper_functional_default[2]
getitem_3 = _fused_moving_avg_obs_fq_helper_functional_default[3]
getitem_4 = _fused_moving_avg_obs_fq_helper_functional_default[4]
getitem_5 = _fused_moving_avg_obs_fq_helper_functional_default[5]; _fused_moving_avg_obs_fq_helper_functional_default = None
copy__default = torch.ops.aten.copy_.default(a_1, getitem_5); a_1 = getitem_5 = None
return (getitem, getitem_1)
""") # noqa: B950
def test_as_strided(self):
def f(x):
y = x.as_strided((2,), (2,), 1)
y.add_(1)
return x
self.assert_functionalization(f, torch.ones(9))
logs = self.get_logs(f, torch.ones(9))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
as_strided_copy_default = torch.ops.aten.as_strided_copy.default(a_1, [2], [2], 1)
add_tensor = torch.ops.aten.add.Tensor(as_strided_copy_default, 1); as_strided_copy_default = None
as_strided_scatter_default = torch.ops.aten.as_strided_scatter.default(a_1, add_tensor, [2], [2], 1); add_tensor = None
copy__default = torch.ops.aten.copy_.default(a_1, as_strided_scatter_default); a_1 = None
return as_strided_scatter_default
""")
def test_tensor_list_composite(self):
def f(x):
# Test an op with TensorList input
y = torch.block_diag(x, x)
return y
self.assert_functionalization(f, torch.ones(2, 2))
logs = self.get_logs(f, torch.ones(2, 2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
block_diag_default = torch.ops.aten.block_diag.default([a_1, a_1]); a_1 = None
return block_diag_default
""")
def test_cat(self):
def f(x):
out = torch.empty(0)
torch.cat((x,), out=out)
return out
self.assert_functionalization(f, torch.ones(2, 2))
logs = self.get_logs(f, torch.ones(2, 2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
empty = torch.ops.aten.empty.memory_format([0], device = device(type='cpu'), pin_memory = False)
cat_default = torch.ops.aten.cat.default([a_1]); a_1 = None
return cat_default
""")
reinplaced_logs = self.get_logs(f, torch.ones(2, 2), reapply_views=True, run_reinplace=True)
self.assertExpectedInline(reinplaced_logs, """\
def forward(self, a_1):
empty = torch.ops.aten.empty.memory_format([0], device = device(type='cpu'), pin_memory = False)
cat_default = torch.ops.aten.cat.default([a_1]); a_1 = None
return cat_default
""")
def test_diagonal(self):
def f(x):
# test: view ops that take a subset of the original tensor (select/diagonal)
tmp = torch.ones(2)
y = x.clone().diagonal()
y.add_(tmp)
z = x * x
return z
self.assert_functionalization(f, torch.ones(2, 2))
logs = self.get_logs(f, torch.ones(2, 2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
ones = torch.ops.aten.ones.default([2], device = device(type='cpu'), pin_memory = False)
clone_default = torch.ops.aten.clone.default(a_1)
diagonal_copy_default = torch.ops.aten.diagonal_copy.default(clone_default); clone_default = None
add_tensor = torch.ops.aten.add.Tensor(diagonal_copy_default, ones); diagonal_copy_default = ones = None
mul_tensor = torch.ops.aten.mul.Tensor(a_1, a_1); a_1 = None
return mul_tensor
""")
reinplaced_logs = self.get_logs(f, torch.ones(2, 2), reapply_views=True, run_reinplace=True)
self.assertExpectedInline(reinplaced_logs, """\
def forward(self, a_1):
ones = torch.ops.aten.ones.default([2], device = device(type='cpu'), pin_memory = False)
clone_default = torch.ops.aten.clone.default(a_1)
diagonal_default = torch.ops.aten.diagonal.default(clone_default); clone_default = None
add_tensor = torch.ops.aten.add_.Tensor(diagonal_default, ones); diagonal_default = ones = None
mul_tensor = torch.ops.aten.mul.Tensor(a_1, a_1); a_1 = None
return mul_tensor
""")
def test_diagonal_mutated_input(self):
def f(x):
# simple test: there are pending updates afterwards, which the test syncs manually
tmp = torch.ones(2)
y = x.diagonal()
y.add_(tmp)
return x
x = torch.ones(2, 2)
self.assert_functionalization(f, x)
logs = self.get_logs(f, torch.ones(2, 2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
ones = torch.ops.aten.ones.default([2], device = device(type='cpu'), pin_memory = False)
diagonal_copy_default = torch.ops.aten.diagonal_copy.default(a_1)
add_tensor = torch.ops.aten.add.Tensor(diagonal_copy_default, ones); diagonal_copy_default = ones = None
diagonal_scatter_default = torch.ops.aten.diagonal_scatter.default(a_1, add_tensor); add_tensor = None
copy__default = torch.ops.aten.copy_.default(a_1, diagonal_scatter_default); a_1 = None
return diagonal_scatter_default
""")
def test_split(self):
def f(x):
# test: view ops that return multiple tensors (split)
tmp = torch.ones(2)
y1, y2 = x.split(2)
y3 = y2.diagonal()
y3.add_(tmp)
z = x * x
return y3
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
ones = torch.ops.aten.ones.default([2], device = device(type='cpu'), pin_memory = False)
split_copy_tensor = torch.ops.aten.split_copy.Tensor(a_1, 2)
getitem = split_copy_tensor[0]
getitem_1 = split_copy_tensor[1]; split_copy_tensor = None
diagonal_copy_default = torch.ops.aten.diagonal_copy.default(getitem_1); getitem_1 = None
add_tensor = torch.ops.aten.add.Tensor(diagonal_copy_default, ones); diagonal_copy_default = ones = None
split_copy_tensor_1 = torch.ops.aten.split_copy.Tensor(a_1, 2)
getitem_2 = split_copy_tensor_1[0]
getitem_3 = split_copy_tensor_1[1]; split_copy_tensor_1 = None
diagonal_scatter_default = torch.ops.aten.diagonal_scatter.default(getitem_3, add_tensor); getitem_3 = None
slice_scatter_default = torch.ops.aten.slice_scatter.default(a_1, diagonal_scatter_default, 0, 2, 4); diagonal_scatter_default = None
mul_tensor = torch.ops.aten.mul.Tensor(slice_scatter_default, slice_scatter_default)
copy__default = torch.ops.aten.copy_.default(a_1, slice_scatter_default); a_1 = slice_scatter_default = None
return add_tensor
""") # noqa: B950
def test_view_inplace(self):
def f(x):
# test: view + inplace op (transpose_)
tmp = torch.ones(4)
x.transpose_(1, 0)
y = x[0]
y.add_(tmp)
return x
self.assert_functionalization(f, torch.ones(4, 2), mutated_input_metadata=True)
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
ones = torch.ops.aten.ones.default([4], device = device(type='cpu'), pin_memory = False)
transpose_copy_int = torch.ops.aten.transpose_copy.int(a_1, 1, 0)
select_copy_int = torch.ops.aten.select_copy.int(transpose_copy_int, 0, 0); transpose_copy_int = None
add_tensor = torch.ops.aten.add.Tensor(select_copy_int, ones); select_copy_int = ones = None
transpose_copy_int_1 = torch.ops.aten.transpose_copy.int(a_1, 1, 0); a_1 = None
select_scatter_default = torch.ops.aten.select_scatter.default(transpose_copy_int_1, add_tensor, 0, 0); transpose_copy_int_1 = add_tensor = None
transpose_copy_int_2 = torch.ops.aten.transpose_copy.int(select_scatter_default, 1, 0); select_scatter_default = None
transpose_copy_int_3 = torch.ops.aten.transpose_copy.int(transpose_copy_int_2, 1, 0); transpose_copy_int_2 = None
return transpose_copy_int_3
""") # noqa: B950
def test_optional_tensor_list(self):
def f(x):
# test: an operator that takes in a List[Optional[Tensor]] argument
# (index_put)
y = x.view(8)
indices = torch.arange(4)
values = torch.arange(4, dtype=y.dtype)
y.index_put_((indices,), values, accumulate=False)
return y
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
view_copy_default = torch.ops.aten.view_copy.default(a_1, [8])
arange = torch.ops.aten.arange.default(4, device = device(type='cpu'), pin_memory = False)
arange_1 = torch.ops.aten.arange.default(4, dtype = torch.float32, device = device(type='cpu'), pin_memory = False)
index_put_default = torch.ops.aten.index_put.default(view_copy_default, [arange], arange_1); view_copy_default = arange = arange_1 = None
view_copy_default_1 = torch.ops.aten.view_copy.default(index_put_default, [4, 2])
copy__default = torch.ops.aten.copy_.default(a_1, view_copy_default_1); a_1 = view_copy_default_1 = None
return index_put_default
""") # noqa: B950
def test_scalars(self):
def f(x):
# test: the pass can handle scalar inputs properly
tmp = torch.ones(4, 2)
y = x.view(4, 2)
y.add_(1)
z = 2 * y
z.div_(1)
return z
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
ones = torch.ops.aten.ones.default([4, 2], device = device(type='cpu'), pin_memory = False)
view_copy_default = torch.ops.aten.view_copy.default(a_1, [4, 2])
add_tensor = torch.ops.aten.add.Tensor(view_copy_default, 1); view_copy_default = None
mul_tensor = torch.ops.aten.mul.Tensor(add_tensor, 2)
div_tensor = torch.ops.aten.div.Tensor(mul_tensor, 1); mul_tensor = None
view_copy_default_1 = torch.ops.aten.view_copy.default(add_tensor, [4, 2]); add_tensor = None
copy__default = torch.ops.aten.copy_.default(a_1, view_copy_default_1); a_1 = view_copy_default_1 = None
return div_tensor
""")
@skipIfTorchDynamo("Test does not work with TorchDynamo")
def test_metadata_change(self):
def f(x):
# ops like ge_() are allowed to change the dtype of the input.
# functionalization should pick up on that.
y = x.clone()
out = y.ge_(0)
return out
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
clone_default = torch.ops.aten.clone.default(a_1); a_1 = None
ge_scalar = torch.ops.aten.ge.Scalar(clone_default, 0); clone_default = None
_to_copy_default = torch.ops.aten._to_copy.default(ge_scalar, dtype = torch.float32, layout = torch.strided); ge_scalar = None
return _to_copy_default
""")
reinplaced_logs = self.get_logs(f, torch.ones(2, 2), reapply_views=True, run_reinplace=True)
self.assertExpectedInline(reinplaced_logs, """\
def forward(self, a_1):
clone_default = torch.ops.aten.clone.default(a_1); a_1 = None
ge_scalar = torch.ops.aten.ge_.Scalar(clone_default, 0)
_to_copy_default = torch.ops.aten._to_copy.default(clone_default, dtype = torch.float32, layout = torch.strided); clone_default = None
return _to_copy_default
""") # noqa: B950
@skipIfTorchDynamo("Test does not work with TorchDynamo")
def test_metadata_change_out_op(self):
def f(t, y):
out_1 = torch.ones(1)
return torch.add(t, y, out=out_1)
inpt1, inpt2 = torch.tensor([1]), torch.tensor([1])
inpt1_func, inpt2_func = torch._to_functional_tensor(inpt1), torch._to_functional_tensor(inpt2)
out_ref = f(inpt1, inpt2)
torch._enable_functionalization(reapply_views=True)
try:
out_functional = f(inpt1_func, inpt2_func)
finally:
torch._disable_functionalization()
self.assertEqual(out_ref, torch._from_functional_tensor(out_functional))
def test_only_one_view(self):
def f(x):
# This tests that we don't have any unnecessary views in the trace.
# If the input wasn't mutated, we don't need to regenerate it,
# so there should be a total of 1 op in the output trace.
return x.view(4, 2)
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
view_copy_default = torch.ops.aten.view_copy.default(a_1, [4, 2]); a_1 = None
return view_copy_default
""")
def test_everything(self):
def f(x):
# test: everything
tmp = torch.ones(2, 2)
x2 = x + x
y = x2.view(8)
z0 = y.reshape(2, 4)
z1 = z0.transpose(1, 0)
z1.unsqueeze_(0)
z1.squeeze_()
z2, z3 = z1.split(2)
z2.add_(tmp)
z4 = z0[0] + z2.reshape(4)
return z2
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
ones = torch.ops.aten.ones.default([2, 2], device = device(type='cpu'), pin_memory = False)
add_tensor = torch.ops.aten.add.Tensor(a_1, a_1); a_1 = None
view_copy_default = torch.ops.aten.view_copy.default(add_tensor, [8])
_reshape_alias_copy_default = torch.ops.aten._reshape_alias_copy.default(view_copy_default, [2, 4], [4, 1]); view_copy_default = None
transpose_copy_int = torch.ops.aten.transpose_copy.int(_reshape_alias_copy_default, 1, 0)
unsqueeze_copy_default = torch.ops.aten.unsqueeze_copy.default(transpose_copy_int, 0); transpose_copy_int = None
squeeze_copy_default = torch.ops.aten.squeeze_copy.default(unsqueeze_copy_default); unsqueeze_copy_default = None
split_copy_tensor = torch.ops.aten.split_copy.Tensor(squeeze_copy_default, 2); squeeze_copy_default = None
getitem = split_copy_tensor[0]
getitem_1 = split_copy_tensor[1]; split_copy_tensor = None
add_tensor_1 = torch.ops.aten.add.Tensor(getitem, ones); getitem = ones = None
select_copy_int = torch.ops.aten.select_copy.int(_reshape_alias_copy_default, 0, 0); _reshape_alias_copy_default = None
clone_default = torch.ops.aten.clone.default(add_tensor_1, memory_format = torch.contiguous_format)
_unsafe_view_default = torch.ops.aten._unsafe_view.default(clone_default, [4]); clone_default = None
view_copy_default_1 = torch.ops.aten.view_copy.default(add_tensor, [8]); add_tensor = None
_reshape_alias_copy_default_1 = torch.ops.aten._reshape_alias_copy.default(view_copy_default_1, [2, 4], [4, 1]); view_copy_default_1 = None
transpose_copy_int_1 = torch.ops.aten.transpose_copy.int(_reshape_alias_copy_default_1, 1, 0); _reshape_alias_copy_default_1 = None
unsqueeze_copy_default_1 = torch.ops.aten.unsqueeze_copy.default(transpose_copy_int_1, 0); transpose_copy_int_1 = None
squeeze_copy_default_1 = torch.ops.aten.squeeze_copy.default(unsqueeze_copy_default_1); unsqueeze_copy_default_1 = None
slice_scatter_default = torch.ops.aten.slice_scatter.default(squeeze_copy_default_1, add_tensor_1, 0, 0, 2); squeeze_copy_default_1 = None
unsqueeze_copy_default_2 = torch.ops.aten.unsqueeze_copy.default(slice_scatter_default, 0); slice_scatter_default = None
squeeze_copy_dim = torch.ops.aten.squeeze_copy.dim(unsqueeze_copy_default_2, 0); unsqueeze_copy_default_2 = None
transpose_copy_int_2 = torch.ops.aten.transpose_copy.int(squeeze_copy_dim, 1, 0); squeeze_copy_dim = None
_reshape_alias_copy_default_2 = torch.ops.aten._reshape_alias_copy.default(transpose_copy_int_2, [8], [1]); transpose_copy_int_2 = None
view_copy_default_2 = torch.ops.aten.view_copy.default(_reshape_alias_copy_default_2, [4, 2]); _reshape_alias_copy_default_2 = None
view_copy_default_3 = torch.ops.aten.view_copy.default(view_copy_default_2, [8]); view_copy_default_2 = None
_reshape_alias_copy_default_3 = torch.ops.aten._reshape_alias_copy.default(view_copy_default_3, [2, 4], [4, 1]); view_copy_default_3 = None
select_copy_int_1 = torch.ops.aten.select_copy.int(_reshape_alias_copy_default_3, 0, 0); _reshape_alias_copy_default_3 = None
add_tensor_2 = torch.ops.aten.add.Tensor(select_copy_int_1, _unsafe_view_default); select_copy_int_1 = _unsafe_view_default = None
return add_tensor_1
""") # noqa: B950
reinplaced_logs = self.get_logs(f, torch.ones(4, 2), reapply_views=True, run_reinplace=True)
self.assertExpectedInline(reinplaced_logs, """\
def forward(self, a_1):
ones = torch.ops.aten.ones.default([2, 2], device = device(type='cpu'), pin_memory = False)
add_tensor = torch.ops.aten.add.Tensor(a_1, a_1); a_1 = None
view_default = torch.ops.aten.view.default(add_tensor, [8])
_reshape_alias_default = torch.ops.aten._reshape_alias.default(view_default, [2, 4], [4, 1]); view_default = None
transpose_int = torch.ops.aten.transpose.int(_reshape_alias_default, 1, 0)
unsqueeze_default = torch.ops.aten.unsqueeze.default(transpose_int, 0); transpose_int = None
squeeze_default = torch.ops.aten.squeeze.default(unsqueeze_default); unsqueeze_default = None
split_tensor = torch.ops.aten.split.Tensor(squeeze_default, 2); squeeze_default = None
getitem = split_tensor[0]
getitem_1 = split_tensor[1]; split_tensor = None
add_tensor_1 = torch.ops.aten.add_.Tensor(getitem, ones); ones = None
select_int = torch.ops.aten.select.int(_reshape_alias_default, 0, 0); _reshape_alias_default = None
clone_default = torch.ops.aten.clone.default(getitem, memory_format = torch.contiguous_format)
_unsafe_view_default = torch.ops.aten._unsafe_view.default(clone_default, [4]); clone_default = None
view_default_1 = torch.ops.aten.view.default(add_tensor, [8]); add_tensor = None
_reshape_alias_default_1 = torch.ops.aten._reshape_alias.default(view_default_1, [2, 4], [4, 1]); view_default_1 = None
transpose_int_1 = torch.ops.aten.transpose.int(_reshape_alias_default_1, 1, 0); _reshape_alias_default_1 = None
unsqueeze_default_1 = torch.ops.aten.unsqueeze.default(transpose_int_1, 0); transpose_int_1 = None
squeeze_default_1 = torch.ops.aten.squeeze.default(unsqueeze_default_1); unsqueeze_default_1 = None
unsqueeze_default_2 = torch.ops.aten.unsqueeze.default(squeeze_default_1, 0); squeeze_default_1 = None
squeeze_dim = torch.ops.aten.squeeze.dim(unsqueeze_default_2, 0); unsqueeze_default_2 = None
transpose_int_2 = torch.ops.aten.transpose.int(squeeze_dim, 1, 0); squeeze_dim = None
_reshape_alias_default_2 = torch.ops.aten._reshape_alias.default(transpose_int_2, [8], [1]); transpose_int_2 = None
view_default_2 = torch.ops.aten.view.default(_reshape_alias_default_2, [4, 2]); _reshape_alias_default_2 = None
view_default_3 = torch.ops.aten.view.default(view_default_2, [8]); view_default_2 = None
_reshape_alias_default_3 = torch.ops.aten._reshape_alias.default(view_default_3, [2, 4], [4, 1]); view_default_3 = None
select_int_1 = torch.ops.aten.select.int(_reshape_alias_default_3, 0, 0); _reshape_alias_default_3 = None
add_tensor_2 = torch.ops.aten.add.Tensor(select_int_1, _unsafe_view_default); select_int_1 = _unsafe_view_default = None
return getitem
""")
def test_reapply_views_simple(self):
def f(x):
tmp = torch.ones(4, 2)
y = x.view(4, 2)
y.add_(tmp)
z = x * x
return y
self.assert_functionalization(f, torch.ones(4, 2), reapply_views=True)
logs = self.get_logs(f, torch.ones(4, 2), reapply_views=True)
self.assertExpectedInline(logs, """\
def forward(self, a_1):
ones = torch.ops.aten.ones.default([4, 2], device = device(type='cpu'), pin_memory = False)
view_default = torch.ops.aten.view.default(a_1, [4, 2])
add_tensor = torch.ops.aten.add.Tensor(view_default, ones); view_default = ones = None
view_default_1 = torch.ops.aten.view.default(add_tensor, [4, 2])
mul_tensor = torch.ops.aten.mul.Tensor(view_default_1, view_default_1)
copy__default = torch.ops.aten.copy_.default(a_1, view_default_1); a_1 = view_default_1 = None
return add_tensor
""")
def test_aliases_maintained_after_pass_when_reapplying_views(self):
def f(x):
tmp = torch.ones(4, 2)
y = x.view(4, 2)
z = x.view(4, 2)
y.add_(tmp)
return y, z
input_functional = torch._to_functional_tensor(torch.ones(4, 2))
torch._enable_functionalization(reapply_views=True)
try:
y, z = f(input_functional)
torch._sync(y)
torch._sync(z)
finally:
torch._disable_functionalization()
# y and z are aliases inside of the function, and that aliasing relationship should be maintained.
_y = torch._from_functional_tensor(y)
_z = torch._from_functional_tensor(z)
self.assertTrue(are_aliased(_y, _z))
# copy_() gets its own test, because it is special cased in functionalization.
# self.copy_(src) decomposes into src.to(self).expand_as(self).
def test_copy_(self):
def f(x):
tmp = torch.zeros(2, 2)
tmp_slice = tmp.diagonal()
y = tmp_slice.copy_(x)
z = y.add_(x)
return z
# Test 1: copy_() with same dtype and shape
# to() is a composite op that noops when the dtype/shape match, so nothing gets logged.
# self.assert_functionalization(f, torch.ones(2))
logs = self.get_logs(f, torch.ones(2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
zeros = torch.ops.aten.zeros.default([2, 2], device = device(type='cpu'), pin_memory = False)
diagonal_copy_default = torch.ops.aten.diagonal_copy.default(zeros); zeros = None
add_tensor = torch.ops.aten.add.Tensor(a_1, a_1); a_1 = None
return add_tensor
""")
reinplaced_logs = self.get_logs(f, torch.ones(2), reapply_views=True, run_reinplace=True)
self.assertExpectedInline(reinplaced_logs, """\
def forward(self, a_1):
zeros = torch.ops.aten.zeros.default([2, 2], device = device(type='cpu'), pin_memory = False)
diagonal_default = torch.ops.aten.diagonal.default(zeros); zeros = None
add_tensor = torch.ops.aten.add.Tensor(a_1, a_1); a_1 = None
return add_tensor
""")
# Test 2: copy_() with same dtype, different shape
self.assert_functionalization(f, torch.ones(1))
logs = self.get_logs(f, torch.ones(1))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
zeros = torch.ops.aten.zeros.default([2, 2], device = device(type='cpu'), pin_memory = False)
diagonal_copy_default = torch.ops.aten.diagonal_copy.default(zeros); zeros = None
expand_copy_default = torch.ops.aten.expand_copy.default(a_1, [2])
add_tensor = torch.ops.aten.add.Tensor(expand_copy_default, a_1); expand_copy_default = a_1 = None
return add_tensor
""")
reinplaced_logs = self.get_logs(f, torch.ones(1), reapply_views=True, run_reinplace=True)
self.assertExpectedInline(reinplaced_logs, """\
def forward(self, a_1):
zeros = torch.ops.aten.zeros.default([2, 2], device = device(type='cpu'), pin_memory = False)
diagonal_default = torch.ops.aten.diagonal.default(zeros); zeros = None
expand_copy_default = torch.ops.aten.expand_copy.default(a_1, [2])
add_tensor = torch.ops.aten.add_.Tensor(expand_copy_default, a_1); a_1 = None
return expand_copy_default
""")
# Test 3: copy_() with different dtype, same shape
self.assert_functionalization(f, torch.ones(2, dtype=torch.long))
logs = self.get_logs(f, torch.ones(2, dtype=torch.long))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
zeros = torch.ops.aten.zeros.default([2, 2], device = device(type='cpu'), pin_memory = False)
diagonal_copy_default = torch.ops.aten.diagonal_copy.default(zeros); zeros = None
_to_copy_default = torch.ops.aten._to_copy.default(a_1, dtype = torch.float32, layout = torch.strided, device = device(type='cpu'), pin_memory = False)
add_tensor = torch.ops.aten.add.Tensor(_to_copy_default, a_1); _to_copy_default = a_1 = None
return add_tensor
""") # noqa: B950
reinplaced_logs = self.get_logs(f, torch.ones(2, dtype=torch.long), reapply_views=True, run_reinplace=True)
self.assertExpectedInline(reinplaced_logs, """\
def forward(self, a_1):
zeros = torch.ops.aten.zeros.default([2, 2], device = device(type='cpu'), pin_memory = False)
diagonal_default = torch.ops.aten.diagonal.default(zeros); zeros = None
_to_copy_default = torch.ops.aten._to_copy.default(a_1, dtype = torch.float32, layout = torch.strided, device = device(type='cpu'), pin_memory = False)
add_tensor = torch.ops.aten.add_.Tensor(_to_copy_default, a_1); a_1 = None
return _to_copy_default
""") # noqa: B950
# Test 4: copy_() with different dtype, different shape
self.assert_functionalization(f, torch.ones(1, dtype=torch.long))
logs = self.get_logs(f, torch.ones(1, dtype=torch.long))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
zeros = torch.ops.aten.zeros.default([2, 2], device = device(type='cpu'), pin_memory = False)
diagonal_copy_default = torch.ops.aten.diagonal_copy.default(zeros); zeros = None
_to_copy_default = torch.ops.aten._to_copy.default(a_1, dtype = torch.float32, layout = torch.strided, device = device(type='cpu'), pin_memory = False)
expand_copy_default = torch.ops.aten.expand_copy.default(_to_copy_default, [2]); _to_copy_default = None
add_tensor = torch.ops.aten.add.Tensor(expand_copy_default, a_1); expand_copy_default = a_1 = None
return add_tensor
""") # noqa: B950
reinplaced_logs = self.get_logs(f, torch.ones(1, dtype=torch.long), reapply_views=True, run_reinplace=True)
self.assertExpectedInline(reinplaced_logs, """\
def forward(self, a_1):
zeros = torch.ops.aten.zeros.default([2, 2], device = device(type='cpu'), pin_memory = False)
diagonal_default = torch.ops.aten.diagonal.default(zeros); zeros = None
_to_copy_default = torch.ops.aten._to_copy.default(a_1, dtype = torch.float32, layout = torch.strided, device = device(type='cpu'), pin_memory = False)
expand_copy_default = torch.ops.aten.expand_copy.default(_to_copy_default, [2]); _to_copy_default = None
add_tensor = torch.ops.aten.add_.Tensor(expand_copy_default, a_1); a_1 = None
return expand_copy_default
""") # noqa: B950
def test_expand_symint(self):
# Once some existing SymInt bugs are ironed out, we should update
# this test to plumb FakeSymbolicTensors through it
def f(x):
return x.expand(x.size(0), x.size(1))
self.assert_functionalization(f, torch.ones(2, 2))
logs = self.get_logs(f, torch.ones(2, 2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
expand_copy_default = torch.ops.aten.expand_copy.default(a_1, [2, 2]); a_1 = None
return expand_copy_default
""")
def test_fill_(self):
def f(x):
y = x + x
z = y.diagonal()
z.fill_(0)
return y
self.assert_functionalization(f, torch.ones(2, 2))
logs = self.get_logs(f, torch.ones(2, 2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
add_tensor = torch.ops.aten.add.Tensor(a_1, a_1); a_1 = None
diagonal_copy_default = torch.ops.aten.diagonal_copy.default(add_tensor)
fill_scalar = torch.ops.aten.fill.Scalar(diagonal_copy_default, 0); diagonal_copy_default = None
diagonal_scatter_default = torch.ops.aten.diagonal_scatter.default(add_tensor, fill_scalar); add_tensor = fill_scalar = None
return diagonal_scatter_default
""")
reinplaced_logs = self.get_logs(f, torch.ones(2, 2), reapply_views=True, run_reinplace=True)
self.assertExpectedInline(reinplaced_logs, """\
def forward(self, a_1):
add_tensor = torch.ops.aten.add.Tensor(a_1, a_1); a_1 = None
diagonal_default = torch.ops.aten.diagonal.default(add_tensor)
fill_scalar = torch.ops.aten.fill_.Scalar(diagonal_default, 0); diagonal_default = None
return add_tensor
""")
def test_resize_smaller(self):
def f(w):
# Resizing to a smaller size doesn't affect storage
x = w + 1
y = x.view(4, 4)
y.resize_(3, 3)
y2 = y.view(-1)
y2.add_(1)
z = y + 1
return z
self.assert_functionalization(f, torch.ones(8, 2))
logs = self.get_logs(f, torch.ones(8, 2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
add_tensor = torch.ops.aten.add.Tensor(a_1, 1); a_1 = None
view_copy_default = torch.ops.aten.view_copy.default(add_tensor, [4, 4])
resize_default = torch.ops.aten.resize.default(view_copy_default, [3, 3])
as_strided_copy_default = torch.ops.aten.as_strided_copy.default(view_copy_default, [3, 3], [3, 1]); view_copy_default = None
view_copy_default_1 = torch.ops.aten.view_copy.default(as_strided_copy_default, [-1]); as_strided_copy_default = None
add_tensor_1 = torch.ops.aten.add.Tensor(view_copy_default_1, 1); view_copy_default_1 = None
view_copy_default_2 = torch.ops.aten.view_copy.default(add_tensor, [4, 4]); add_tensor = None
as_strided_copy_default_1 = torch.ops.aten.as_strided_copy.default(view_copy_default_2, [3, 3], [3, 1])
view_copy_default_3 = torch.ops.aten.view_copy.default(add_tensor_1, [3, 3]); add_tensor_1 = None
as_strided_scatter_default = torch.ops.aten.as_strided_scatter.default(view_copy_default_2, view_copy_default_3, [3, 3], [3, 1]); view_copy_default_2 = view_copy_default_3 = None
view_copy_default_4 = torch.ops.aten.view_copy.default(as_strided_scatter_default, [8, 2]); as_strided_scatter_default = None
view_copy_default_5 = torch.ops.aten.view_copy.default(view_copy_default_4, [4, 4]); view_copy_default_4 = None
as_strided_copy_default_2 = torch.ops.aten.as_strided_copy.default(view_copy_default_5, [3, 3], [3, 1]); view_copy_default_5 = None
add_tensor_2 = torch.ops.aten.add.Tensor(as_strided_copy_default_2, 1); as_strided_copy_default_2 = None
return add_tensor_2
""") # noqa: B950
reinplaced_logs = self.get_logs(f, torch.ones(8, 2), reapply_views=True, run_reinplace=True)
self.assertExpectedInline(reinplaced_logs, """\
def forward(self, a_1):
add_tensor = torch.ops.aten.add.Tensor(a_1, 1); a_1 = None
view_default = torch.ops.aten.view.default(add_tensor, [4, 4])
resize_default = torch.ops.aten.resize.default(view_default, [3, 3])
as_strided_default = torch.ops.aten.as_strided.default(view_default, [3, 3], [3, 1]); view_default = None
view_default_1 = torch.ops.aten.view.default(as_strided_default, [-1]); as_strided_default = None
add_tensor_1 = torch.ops.aten.add_.Tensor(view_default_1, 1)
view_default_2 = torch.ops.aten.view.default(add_tensor, [4, 4]); add_tensor = None
as_strided_default_1 = torch.ops.aten.as_strided.default(view_default_2, [3, 3], [3, 1])
view_default_3 = torch.ops.aten.view.default(view_default_1, [3, 3]); view_default_1 = None
view_default_4 = torch.ops.aten.view.default(view_default_2, [8, 2]); view_default_2 = None
view_default_5 = torch.ops.aten.view.default(view_default_4, [4, 4]); view_default_4 = None
as_strided_default_2 = torch.ops.aten.as_strided.default(view_default_5, [3, 3], [3, 1]); view_default_5 = None
add_tensor_2 = torch.ops.aten.add_.Tensor(as_strided_default_2, 1)
return as_strided_default_2
""")
def test_resize_larger_valid(self):
def f(x):
y = x + 1
# resizing a tensor to a larger size is only currently allowed
# if the tensor-to-resize is not a view / has no outstanding views.
# See Note [resize_() in functionalization pass]
y.resize_(5, 5)
y2 = y.view(25)
# Do a mutation to ensure that aliases of the output of resize_()
# propagate mutations correctly.
# I'm using fill_ specifically because I want to guarantee that
# none of the output has uninitialized memory at the end
# (since these tests compare the data output against a reference impl)
y2.fill_(1)
out = y + 1
return y, out
self.assert_functionalization(f, torch.ones(8, 2))
logs = self.get_logs(f, torch.ones(8, 2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
add_tensor = torch.ops.aten.add.Tensor(a_1, 1); a_1 = None
resize_default = torch.ops.aten.resize.default(add_tensor, [5, 5]); add_tensor = None
view_copy_default = torch.ops.aten.view_copy.default(resize_default, [25]); resize_default = None
fill_scalar = torch.ops.aten.fill.Scalar(view_copy_default, 1); view_copy_default = None
view_copy_default_1 = torch.ops.aten.view_copy.default(fill_scalar, [5, 5]); fill_scalar = None
add_tensor_1 = torch.ops.aten.add.Tensor(view_copy_default_1, 1)
return (view_copy_default_1, add_tensor_1)
""")
reinplaced_logs = self.get_logs(f, torch.ones(8, 2), reapply_views=True, run_reinplace=True)
self.assertExpectedInline(reinplaced_logs, """\
def forward(self, a_1):
add_tensor = torch.ops.aten.add.Tensor(a_1, 1); a_1 = None
resize_default = torch.ops.aten.resize_.default(add_tensor, [5, 5])
view_default = torch.ops.aten.view.default(add_tensor, [25]); add_tensor = None
fill_scalar = torch.ops.aten.fill_.Scalar(view_default, 1)
view_default_1 = torch.ops.aten.view.default(view_default, [5, 5]); view_default = None
add_tensor_1 = torch.ops.aten.add.Tensor(view_default_1, 1)
return (view_default_1, add_tensor_1)
""")
def test_resize_larger_invalid(self):
def f(x):
y = x + 1
z = y.view(4, 4)
# resizing a tensor to a larger size is only currently allowed
# if the tensor-to-resize is not a view / has no outstanding views.
# See Note [resize_() in functionalization pass]
# This should fail
z.resize_(5, 5)
z2 = z.view(25)
z2.fill_(1)
out = z + 1
return y, out
with self.assertRaisesRegex(
RuntimeError,
r'Attempted to resize a view tensor to a larger size. This is not allowed in the functionalization pass'):
self.assert_functionalization(f, torch.ones(8, 2))
def test_nested_functions_propagate_updates(self):
def g(x):
# Create a view of x
y = x[0]
y.add_(1)
# The view, y, gets deallocated at the end of this function
def f(x):
# Calling g(x) should mutate x
g(x)
# We expect x to be synced here, even though the alias created in g() has been deallocated!
y = x + x
return y
self.assert_functionalization(f, torch.ones(2, 2))
def test_mixed_wrappers_valid(self):
def f(x, y):
z = x + y
z.add_(1)
return z
x1_not_functional = LoggingTensor(torch.ones(4))
x2_functional = torch._to_functional_tensor(LoggingTensor(torch.ones(4)))
with capture_logs() as logs:
y = f(x1_not_functional, x2_functional)
# Make sure that functionalization ran the "+" kernel
# with a functional + non-functional tensor, and wrapped the output appropriately.
self.assertExpectedInline('\n'.join(logs), """\
$2 = torch._ops.aten.add.Tensor($0, $1)
$3 = torch._ops.aten.add.Tensor($2, 1)""")
def test_mixed_wrappers_invalid(self):
x1_not_functional = torch.ones(4)
x2_functional = torch._to_functional_tensor(torch.ones(4))
# When dealing with mixed functional + non functional tensors,
# normal_tensor.add_(functional_tensor) is not valid
# because normal_tensor would need to be "promoted" to a functional tensor.
with self.assertRaises(RuntimeError):
x1_not_functional.add_(x2_functional)
def test_index_mutation_on_non_input(self):
def f(x):
tmp = torch.zeros(10)
tmp[5].fill_(1)
return tmp
self.assert_functionalization(f, torch.ones(2))
logs = self.get_logs(f, torch.ones(2))
self.assertExpectedInline(logs, """\
def forward(self, a_1):
zeros = torch.ops.aten.zeros.default([10], device = device(type='cpu'), pin_memory = False)
select_copy_int = torch.ops.aten.select_copy.int(zeros, 0, 5)
fill_scalar = torch.ops.aten.fill.Scalar(select_copy_int, 1); select_copy_int = None
select_scatter_default = torch.ops.aten.select_scatter.default(zeros, fill_scalar, 0, 5); zeros = fill_scalar = None
return select_scatter_default
""") # noqa: B950
reinplaced_logs = self.get_logs(f, torch.ones(2), reapply_views=True, run_reinplace=True)
self.assertExpectedInline(reinplaced_logs, """\
def forward(self, a_1):
zeros = torch.ops.aten.zeros.default([10], device = device(type='cpu'), pin_memory = False)
select_int = torch.ops.aten.select.int(zeros, 0, 5)
fill_scalar = torch.ops.aten.fill_.Scalar(select_int, 1); select_int = None
return zeros
""")
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_functionalization.py |
# Owner(s): ["module: primTorch"]
from collections import defaultdict
from torch import Tensor
import torch.autograd
from torch.utils._python_dispatch import enable_torch_dispatch_mode
from torch._decomp import decomposition_table
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils._mode_utils import no_dispatch
from torch.testing._internal.common_utils import (
is_iterable_of_tensors,
TestCase,
skipIfCrossRef,
suppress_warnings,
TEST_WITH_ASAN,
run_tests,
skipIfSlowGradcheckEnv,
)
from torch.testing._internal.common_device_type import (
onlyNativeDeviceTypes,
ops,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import itertools
import functools
from functools import partial
import unittest
aten = torch.ops.aten
# TODO: this isn't going to work with non-aten namespaces
def overload_to_aten_name(overload):
return overload._schema.name.split("::")[1]
# All operators that can have decomp tests
decomposition_names = {overload_to_aten_name(k) for k in decomposition_table}
_decomp_test_ops = [
op
for op in op_db
if op.aten_name in decomposition_names
or op.aten_backward_name in decomposition_names
]
def diff_arg(arg, requires_grad=True):
def is_differentiable_arg(arg):
if requires_grad:
return arg.requires_grad
else:
return arg.is_floating_point() or arg.is_complex()
if is_iterable_of_tensors(arg):
if all([is_differentiable_arg(a) for a in arg]):
return True
if all([not is_differentiable_arg(a) for a in arg]):
return False
raise RuntimeError("NYI: The test runner can't handle this")
return isinstance(arg, Tensor) and is_differentiable_arg(arg)
# Version of autograd.grad with some differences:
# - pytree inputs is allowed (but leaves of the pytree have to all
# be tensors)
# - if an input is not used as part of derivatives, we will return a
# zero-filled tensor for the result
def _autograd_grad(
outputs, inputs, grad_outputs=None, retain_graph=False, create_graph=True
):
inputs, inputs_spec = tree_flatten(inputs)
diff_inputs = tuple(inp for inp in inputs if inp.requires_grad)
if grad_outputs is None:
diff_outputs = tuple(out for out in outputs if out.requires_grad)
else:
diff_grad_outputs = [
(out, go) for out, go in zip(outputs, grad_outputs) if out.requires_grad
]
if len(diff_grad_outputs) == 0:
diff_outputs, grad_outputs = (), ()
else:
diff_outputs, grad_outputs = zip(*diff_grad_outputs)
grad_inputs = torch.autograd.grad(
diff_outputs,
diff_inputs,
grad_outputs,
retain_graph=retain_graph,
create_graph=create_graph,
allow_unused=True,
)
result = []
grad_inputs_iter = iter(grad_inputs)
for inp in inputs:
if inp.requires_grad:
grad_input = next(grad_inputs_iter)
if grad_input is None:
result.append(torch.zeros_like(inp))
else:
result.append(grad_input)
else:
result.append(torch.zeros_like(inp))
return tree_unflatten(result, inputs_spec)
def _as_tuple(val):
if isinstance(val, tuple):
return val
return (val,)
def ref_vjp_no_create(f, *primals):
result = f(*primals)
def wrapped(cotangents):
return _autograd_grad(
_as_tuple(result), primals, _as_tuple(cotangents), create_graph=False
)
return result, wrapped
dtype_precisions = {
torch.float16: (0.001, 1e-5),
torch.bfloat16: (0.016, 1e-4),
torch.float32: (1.3e-6, 1e-5),
torch.float64: (1e-7, 1e-7),
torch.complex32: (0.001, 1e-5),
torch.complex64: (1.3e-6, 1e-5),
torch.complex128: (1e-7, 1e-7),
}
# Returns the "default" rtol and atol for comparing scalars or
# tensors of the given dtypes.
def _getDefaultRtolAndAtol(dtype0, dtype1):
rtol = max(
dtype_precisions.get(dtype0, (0, 0))[0], dtype_precisions.get(dtype1, (0, 0))[0]
)
atol = max(
dtype_precisions.get(dtype0, (0, 0))[1], dtype_precisions.get(dtype1, (0, 0))[1]
)
return rtol, atol
def op_assert_ref(test_case, op, test_dtype, i, orig, decomp, ref, args, kwargs):
assert orig.dtype == decomp.dtype, f"{i} Operation: {op}"
if orig.numel() == 0 or decomp.numel() == 0:
assert orig.numel() == decomp.numel()
return
assert orig.shape == decomp.shape, f"{i} Operation: {op}"
tol_table = {
(torch.bfloat16, torch.ops.aten.native_layer_norm.default): 1e-5,
(torch.float16, torch.ops.aten.native_layer_norm.default): 1e-5,
(torch.bfloat16, torch.ops.aten.native_batch_norm.default): 1e-5,
(torch.float16, torch.ops.aten.native_batch_norm.default): 1e-5,
(torch.bfloat16, torch.ops.aten.linalg_vector_norm.default): 1e-6,
(torch.float16, torch.ops.aten.linalg_vector_norm.default): 1e-6,
}
if ref.is_floating_point():
orig_diff = (orig - ref).abs().max()
decomp_diff = (decomp - ref).abs().max()
atol = tol_table.get((test_dtype, op), 1e-7)
if decomp_diff > orig_diff + atol:
raise RuntimeError(
f"Difference from float64 is larger with decomposition {op.__name__}"
f" than original on output {i}. Original max diff: {orig_diff}, Decomp max diff: {decomp_diff}\n"
f"atol = {atol}\n"
f"args = {args}\n"
f"kwargs = {kwargs}"
)
else:
test_case.assertEqual(
orig, decomp, msg=f"{op.__name__}\nargs = {args}\nkwargs = {kwargs}"
)
def op_assert_equal(test_case, op, test_dtype, orig, decomp, args, kwargs):
test_case.assertEqual(
orig.dtype, decomp.dtype, f"Operation: {op}, orig.dtype: {orig.dtype}, decomp.dtype: {decomp.dtype}, {args}, {kwargs}")
# Before adding an entry to this table, make sure your decomposition is right :)
tol_table = {
# Due to strange epsilon behaviors, see https://github.com/pytorch/pytorch/issues/73161
(torch.float32, torch.ops.aten.native_layer_norm.default): (1e-3, 1e-3),
(torch.float32, torch.ops.aten.native_layer_norm_backward.default): (
1e-3,
1e-3,
),
}
if (test_dtype, op) in tol_table:
rtol, atol = tol_table[(decomp.dtype, op)]
else:
rtol, atol = _getDefaultRtolAndAtol(orig.dtype, decomp.dtype)
test_case.assertEqual(orig, decomp, rtol=rtol, atol=atol, msg=f"{op.__name__}\nargs = {args}\nkwargs = {kwargs}")
# Given f, returns an f' such that:
# - f' takes only positional arguments
# - All arguments to f' are floating-point Tensors
# - All outputs of f' are floating-point Tensors
def normalize_op_input_output2(
f, args, kwargs, output_process_fn_grad=None, requires_grad=True
):
flat_args, args_spec = tree_flatten(args)
diff_argnums = tuple(
i
for i, arg in enumerate(flat_args)
if diff_arg(arg, requires_grad=requires_grad)
)
assert len(diff_argnums) > 0
primals = tuple(flat_args[i] for i in diff_argnums)
@functools.wraps(f)
def wrapped(*primals):
_args = list(flat_args)
for num, arg in zip(diff_argnums, primals):
_args[num] = arg
_args = tree_unflatten(_args, args_spec)
result = f(*_args, **kwargs)
if output_process_fn_grad is not None:
result = output_process_fn_grad(result)
if isinstance(result, tuple):
# TODO: Remove the following hack for namedtuples
result = tuple(result)
result = tuple(
r
for r in result
if isinstance(r, Tensor) and (r.is_floating_point() or r.is_complex())
)
assert len(result) > 0
return result
return wrapped, primals
# NB: This also upcasts dtype arguments
# TODO: handle complex correctly
def upcast_tensor(x, dtype=torch.float32):
if isinstance(x, Tensor) and x.dtype.is_floating_point:
return x.to(dtype=dtype)
elif (isinstance(x, torch.dtype)
and x in [torch.float16, torch.bfloat16, torch.float]):
return dtype
else:
return x
def normalize_op_input_output(f, sample, requires_grad=True):
args = tuple([sample.input] + list(sample.args))
return normalize_op_input_output2(
f,
args,
sample.kwargs,
sample.output_process_fn_grad,
requires_grad=requires_grad,
)
CROSS_REF_EXCLUDE_SET = {
# CUBLAS_STATUS_NOT_SUPPORTED when calling
# `cublasGemmStridedBatchedExFix(handle, opa, opb, (int)m, (int)n, (int)k,
# (void*)&falpha, a, CUDA_R_16BF, (int)lda, stridea, b, CUDA_R_16BF,
# (int)ldb, strideb, (void*)&fbeta, c, CUDA_R_16BF, (int)ldc, stridec,
# (int)num_batches, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)`
("cuda", torch.bfloat16, "nn.functional.bilinear"),
# randomness
("cuda", torch.float16, "nn.functional.dropout"),
("cuda", torch.bfloat16, "nn.functional.dropout"),
("cuda", torch.float64, "nn.functional.dropout"),
("cuda", torch.float32, "nn.functional.dropout"),
(None, None, "new_empty"),
# decomp has problem even with opmath
# doesn't work
("cuda", torch.bfloat16, "nn.functional.embedding"),
# CompositeAutogradImplicit
# See https://github.com/pytorch/pytorch/issues/81669
(None, None, "nn.functional.relu6"),
(None, None, "meshgrid"),
}
all_decomposed = set()
all_called = defaultdict(int)
# Helpful snippet for testing coverage
"""
import atexit
def check_coverage():
print("missing coverage:")
print("\n".join(map(str, decomposition_table.keys() - all_decomposed)))
atexit.register(check_coverage)
"""
# Helpful snippet for Horace to create his google sheet :)
"""
import atexit
def dump_ops():
with open('run_ops.txt', 'w') as f, open('count_ops.txt', 'w') as g:
for op, count in sorted(all_called.items(), key=lambda x: x[0].__name__):
f.write(f'{op.__name__}\n')
g.write(f'{count}\n')
with open('run_decompositions.txt', 'w') as f:
for op in sorted([i.__name__ for i in all_decomposed]):
f.write(f'{op}\n')
atexit.register(dump_ops)
"""
def any_unsupported(args, kwargs):
def test_unsupported(t):
if type(t) is torch.Tensor or type(t) is torch.nn.Parameter:
# These are all things that we haven't coded decompositions
# to handle correctly. Maybe they should.
return any([
t.is_sparse_csr, t.is_sparse, t.is_mkldnn, t.is_quantized,
t.is_nested, torch._is_functional_tensor(t),
])
elif torch.overrides.is_tensor_like(t):
# Decompositions will generally change the behavior of Tensor-like
# subclasses, so bypass tests in this case too
return True
else:
return False
flat_args, _ = tree_flatten(args)
flat_kwargs, _ = tree_flatten(kwargs)
return any(test_unsupported(x) for x in itertools.chain(flat_args, flat_kwargs))
@skipIfSlowGradcheckEnv
class TestDecomp(TestCase):
longMessage = True
# NB: This actually overlaps with test_comprehensive, but it only
# runs on things that are definitely decomposed so it's a lot faster
# to run
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@onlyNativeDeviceTypes
@skipIfCrossRef
@suppress_warnings
@ops(_decomp_test_ops)
def test_quick(self, device, dtype, op):
self.do_cross_ref(device, dtype, op, run_all=False)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@onlyNativeDeviceTypes
@skipIfCrossRef
@suppress_warnings
@ops(op_db)
def test_comprehensive(self, device, dtype, op):
self.do_cross_ref(device, dtype, op, run_all=True)
def do_cross_ref(self, device, dtype, op, *, run_all):
if (torch.device(device).type, dtype, op.name) in CROSS_REF_EXCLUDE_SET or (
None,
dtype,
op.name,
) in CROSS_REF_EXCLUDE_SET or (None, None, op.name) in CROSS_REF_EXCLUDE_SET:
self.skipTest(f"{op.name} in {dtype} not supported")
test_dtype = dtype
# We check the correctness of each decomposition right after running it.
# So, when we encounter a decomposition, we run the function normally, and
# then run the decomposition, and ensure they're identical.
called = set()
decomposed = set()
saved_precision = self.precision
saved_rel_tol = self.rel_tol
class DecompCrossRefMode(torch.Tensor):
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
with no_dispatch():
return cls._torch_dispatch(func, types, args, kwargs)
@classmethod
def _torch_dispatch(cls, func, types, args=(), kwargs=None):
self.precision = saved_precision
self.rel_tol = saved_rel_tol
called.add(func)
all_called[func] += 1
# Stuff we shouldn't bother testing
# (TODO: remove detach from the decomp table?)
if func not in decomposition_table or func in [
torch.ops.aten.detach.default,
# non-deterministic ops
torch.ops.aten.new_empty.default,
torch.ops.aten.new_empty.SymInt
] or any_unsupported(args, kwargs):
return func(*args, **kwargs)
decomposed.add(func)
all_decomposed.add(func)
# We take 2 main strategies for verifying correctness/numerical stability of decompositions
# The first one is simply tolerance checking between decomp_out and pytorch_out
# However, for fp16/bf16 and reductions, this becomes very
# finicky, as there are not many guarantees we can make.
# So, for fp16/bf16, we instead compare the difference of
# {decomp_out, pytorch_out_64} and {pytorch_out,
# pytorch_out_64}. In other words, we compare how far the
# decomposition and pytorch are from the "ground truth" (i.e.
# fp64). If the decomposition results in more error, we error
decomposition = decomposition_table[func]
do_relative_check = test_dtype in [torch.float16, torch.bfloat16]
real_out_unflat = func(*args, **kwargs)
real_out, _ = tree_flatten(real_out_unflat)
decomp_out, _ = tree_flatten(decomposition(*args, **kwargs))
assert len(real_out) == len(decomp_out)
if do_relative_check:
upcast = partial(upcast_tensor, dtype=torch.float64)
real_out_double, _ = tree_flatten(
func(*tree_map(upcast, args), **tree_map(upcast, kwargs))
)
for i, orig, decomp, ref in zip(range(len(real_out)), real_out, decomp_out, real_out_double):
if not isinstance(orig, torch.Tensor):
assert type(orig) == type(decomp)
assert orig == decomp
continue
op_assert_ref(self, func, test_dtype, i, orig, decomp, ref, args, kwargs)
else:
for orig, decomp in zip(real_out, decomp_out):
if not isinstance(orig, torch.Tensor):
assert type(orig) == type(decomp)
assert orig == decomp
continue
op_assert_equal(self, func, test_dtype, orig, decomp, args, kwargs)
return real_out_unflat
requires_grad = (
op.supports_autograd
and dtype in op.supported_backward_dtypes(torch.device(device).type)
# TODO: OpInfo really ought to error out for this case, but it's
# not exercised in test_ops_gradients atm. The problem is not
# complex32 per-se (which is supported by data movement only ops)
# but that when we do backwards we expect other ops like add to work
and not dtype == torch.complex32
)
samples = op.sample_inputs(device, test_dtype, requires_grad=requires_grad)
def check_decomposed(aten_name):
self.assertTrue(
any(overload_to_aten_name(c) == aten_name for c in decomposed),
msg=(f"aten.{aten_name} was not decomposed, saw calls for: "
f"{', '.join(map(str, list(called)))}. If your op is "
f"CompositeImplicitAutograd you should skip this test "
"by updating CROSS_REF_EXCLUDE_SET.")
)
aten_name = op.decomp_aten_name or op.aten_name
func = op.get_op()
for sample_input in samples:
if requires_grad:
if None in sample_input.args:
continue
fn, primals = normalize_op_input_output(func, sample_input)
primals = tree_map(
lambda x: x if isinstance(x, torch.Tensor) else x, primals
)
# Once https://github.com/pytorch/pytorch/pull/75965/ I can
# store the called list on the mode object instance and no
# explicit clearing is necessary as I will create a fresh mode
# for each region
decomposed.clear()
with enable_torch_dispatch_mode(DecompCrossRefMode):
decomp_out, decomp_vjp_fn = ref_vjp_no_create(fn, *primals)
if aten_name in decomposition_names:
check_decomposed(aten_name)
if op.aten_backward_name in decomposition_names or run_all:
cotangents = tree_map(lambda x: torch.randn_like(x), decomp_out)
decomposed.clear()
with enable_torch_dispatch_mode(DecompCrossRefMode):
decomp_vjp_fn(cotangents)
if not run_all:
check_decomposed(op.aten_backward_name)
elif aten_name in decomposition_names or run_all:
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
decomposed.clear()
with enable_torch_dispatch_mode(DecompCrossRefMode):
func(*args, **kwargs)
if not run_all:
check_decomposed(aten_name)
else:
assert op.supports_autograd
self.skipTest(
"only backwards is decomposed, but dtype doesn't support AD"
)
instantiate_device_type_tests(TestDecomp, globals())
if __name__ == "__main__":
run_tests()
| pytorch-master | test/test_decomp.py |
# Owner(s): ["module: unknown"]
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
import torch
from torch.testing._internal.common_utils import TestCase
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
class PruningOpTest(TestCase):
# Generate rowwise mask vector based on indicator and threshold value.
# indicator is a vector that contains one value per weight row and it
# represents the importance of a row.
# We mask a row if its indicator value is less than the threshold.
def _generate_rowwise_mask(self, embedding_rows):
indicator = torch.from_numpy((np.random.random_sample(embedding_rows)).astype(np.float32))
threshold = np.random.random_sample()
mask = torch.BoolTensor([True if val >= threshold else False for val in indicator])
return mask
def _test_rowwise_prune_op(self, embedding_rows, embedding_dims, indices_type, weights_dtype):
embedding_weights = None
if weights_dtype in [torch.int8, torch.int16, torch.int32, torch.int64]:
embedding_weights = torch.randint(0, 100, (embedding_rows, embedding_dims), dtype=weights_dtype)
else:
embedding_weights = torch.rand((embedding_rows, embedding_dims), dtype=weights_dtype)
mask = self._generate_rowwise_mask(embedding_rows)
def get_pt_result(embedding_weights, mask, indices_type):
return torch._rowwise_prune(embedding_weights, mask, indices_type)
# Reference implementation.
def get_reference_result(embedding_weights, mask, indices_type):
num_embeddings = mask.size()[0]
compressed_idx_out = torch.zeros(num_embeddings, dtype=indices_type)
pruned_weights_out = embedding_weights[mask[:]]
idx = 0
for i in range(mask.size()[0]):
if mask[i]:
compressed_idx_out[i] = idx
idx = idx + 1
else:
compressed_idx_out[i] = -1
return (pruned_weights_out, compressed_idx_out)
pt_pruned_weights, pt_compressed_indices_map = get_pt_result(
embedding_weights, mask, indices_type)
ref_pruned_weights, ref_compressed_indices_map = get_reference_result(
embedding_weights, mask, indices_type)
torch.testing.assert_close(pt_pruned_weights, ref_pruned_weights)
self.assertEqual(pt_compressed_indices_map, ref_compressed_indices_map)
self.assertEqual(pt_compressed_indices_map.dtype, indices_type)
@given(
embedding_rows=st.integers(1, 100),
embedding_dims=st.integers(1, 100),
weights_dtype=st.sampled_from([torch.float64, torch.float32,
torch.float16, torch.int8,
torch.int16, torch.int32, torch.int64])
)
def test_rowwise_prune_op_32bit_indices(self, embedding_rows, embedding_dims, weights_dtype):
self._test_rowwise_prune_op(embedding_rows, embedding_dims, torch.int, weights_dtype)
@given(
embedding_rows=st.integers(1, 100),
embedding_dims=st.integers(1, 100),
weights_dtype=st.sampled_from([torch.float64, torch.float32,
torch.float16, torch.int8,
torch.int16, torch.int32, torch.int64])
)
def test_rowwise_prune_op_64bit_indices(self, embedding_rows, embedding_dims, weights_dtype):
self._test_rowwise_prune_op(embedding_rows, embedding_dims, torch.int64, weights_dtype)
| pytorch-master | test/test_pruning_op.py |
# Owner(s): ["module: unknown"]
from functools import partial
from textwrap import dedent
import torch
from torch.testing import FileCheck
from torch.testing._internal.common_utils import \
(run_tests, IS_SANDCASTLE, clone_input_helper, first_sample, skipIfSlowGradcheckEnv)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes
from torch.testing._internal.common_jit import JitCommonTestCase, check_against_reference
from torch.testing._internal.jit_metaprogramming_utils import create_script_fn, create_traced_fn, check_alias_annotation
from torch.testing._internal.jit_utils import disable_autodiff_subgraph_inlining, is_lambda
# TODO: fixme https://github.com/pytorch/pytorch/issues/68972
torch.set_default_dtype(torch.float32)
# variant testing is only done with torch.float and torch.cfloat to avoid
# excessive test times and maximize signal to noise ratio
_variant_ops = partial(ops, dtypes=OpDTypes.supported,
allowed_dtypes=(torch.float, torch.cfloat))
# Tests operators for consistency between JIT and eager, also checks
# correctness of JIT specific alias schemas and intended
# autodifferentiation behavior.
# Inherits from JitCommonTestCase instead of TestCase directly to share
# functionality with original test_jit.py method operator tests
@skipIfSlowGradcheckEnv
class TestJit(JitCommonTestCase):
exact_dtype = True
# Tests that the forward and backward passes of operations produce the
# same values for the cross-product of op variants (function, method, inplace)
# and runtimes (eager, traced, scripted).
# TODO WARNING: inplace x {traced, scripted} not currently tested
@_variant_ops(op_db)
def test_variant_consistency_jit(self, device, dtype, op):
_requires_grad = (dtype in op.supported_backward_dtypes(torch.device(device).type))
include_conjugated_inputs = op.test_conjugated_samples and dtype.is_complex
samples = op.sample_inputs(device, dtype, requires_grad=_requires_grad, include_conjugated_inputs=include_conjugated_inputs)
# Acquires variants to test
func = op.get_op()
method = op.get_method()
variants = {
# TODO: inplace tests currently fail, fix and add inplace variant
'function': func, 'method': method,
}
# TODO: find better way to standardize on op registration itself..
has_fake_function = op.name in ["resize_", 'resize_as_']
if has_fake_function:
variants = {'method': getattr(torch.Tensor, op.name)}
samples = op.sample_inputs(device, dtype, requires_grad=False)
tested = False
for sample in samples:
# Test traced and scripted consistency
for func_type, variant in variants.items():
if variant is None:
continue
# scripting and check_alias_analysis do not work with lambdas
# lambdas are typically used as a way to simulate methods without
# functional variants, so rely on the other variant for testing
# for now
if is_lambda(variant):
continue
tested = True
try:
self.indiv_variant_test_jit(device, dtype, op, sample, func_type, variant, has_fake_function)
except Exception as e:
variant_error_info = dedent(f"""
Error testing {op.name} {func_type} variant
with dtype: {dtype}
with inputs {sample}:
""")
raise Exception(variant_error_info) from e
assert tested, "JIT Test does not execute any logic"
def indiv_variant_test_jit(self, device, dtype, op, sample, func_type, variant, has_fake_function):
_requires_grad = (dtype in op.supported_backward_dtypes(torch.device(device).type))
support_script = op.supports_scripting
# Create accessor for script function variant
name = op.name + '_' if func_type == 'inplace' else op.name
# run with disable_autodiff_subgraph_inlining(True) to test
# autodiff support. Context manager forces the graph to contain
# DifferentiableGraph nodes if they are present
with disable_autodiff_subgraph_inlining():
# Check scripted forward, grad, and grad grad
if support_script:
script_fn = create_script_fn(self, name, func_type)
def out_fn(output):
# Processes the output for autograd
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
def get_sample():
return clone_input_helper(sample.input) if op.name[-1] == '_' else sample.input
if support_script:
check_against_reference(self,
script_fn,
op.get_op(),
out_fn,
(get_sample(),) + sample.args,
sample.kwargs,
no_grad=not _requires_grad, no_gradgrad=not op.supports_gradgrad)
# Check traced forward, grad, and grad grad
# TODO: fix tracing here
supports_tracing = op.supports_tracing and not has_fake_function
if op.assert_jit_shape_analysis:
self.assertTrue(supports_tracing)
if supports_tracing:
traced_fn = create_traced_fn(self, variant)
check_against_reference(self,
traced_fn,
op.get_op(),
out_fn,
(get_sample(),) + sample.args,
sample.kwargs,
no_grad=not _requires_grad, no_gradgrad=not op.supports_gradgrad)
# Check alias annotation schema for correctness (make
# sure inputs that aren't supposed to be modified aren't)
# Note: only runs in float32 because schema isn't affected by dtype,
# so running it on all dtypes is would be excessive
if dtype == torch.float32:
# TODO: no reason why we cant run this with tracing graph
if support_script and op.name != "rsub":
check_alias_annotation(name, (get_sample(),) + sample.args, sample.kwargs,
func_type=func_type, aten_name=op.aten_name)
# TODO: use script graph as well
checked_shape_analysis = False
if supports_tracing:
out = variant(get_sample(), *sample.args, **sample.kwargs)
# right now, tuple of outputs and tensor output supported
# TODO: list of tensor outputs
tuple_of_tensors = isinstance(out, tuple) and all([isinstance(elem, torch.Tensor) for elem in out])
if isinstance(out, torch.Tensor) or tuple_of_tensors:
if tuple_of_tensors:
sizes = [elem.size() for elem in out]
else:
sizes = out.size()
self.checkShapeAnalysis(sizes, traced_fn.graph, op.assert_jit_shape_analysis)
checked_shape_analysis = True
if op.assert_jit_shape_analysis:
self.assertTrue(checked_shape_analysis)
# Check autodifferentiation of nodes for traced and scripted graphs, only need to check once per sample
if dtype is torch.float32:
# Sandcastle doesn't fuse nodes
if IS_SANDCASTLE:
# fusible nodes are expected to be found in FusionGroups in the DifferentiableGraphs
nonfusible_nodes = op.autodiff_nonfusible_nodes + op.autodiff_fusible_nodes
fusible_nodes = []
else:
nonfusible_nodes = op.autodiff_nonfusible_nodes
fusible_nodes = op.autodiff_fusible_nodes
if supports_tracing:
self.assertAutodiffNode(traced_fn.last_graph, op.assert_autodiffed, nonfusible_nodes, fusible_nodes)
if support_script:
self.assertAutodiffNode(script_fn.last_graph, op.assert_autodiffed, nonfusible_nodes, fusible_nodes)
# alias testing is only done with torch.float for the same reason
_alias_ops = partial(ops, dtypes=OpDTypes.supported,
allowed_dtypes=(torch.float,))
@_alias_ops((op for op in op_db if op.aliases))
def test_jit_alias_remapping(self, device, dtype, op):
# NOTE: only tests on first sample
samples = op.sample_inputs(device, dtype, requires_grad=True)
sample = first_sample(self, samples)
# [Scripting Data Preparation]
# Prepare data for test scripting
# Below we prepare strings of args/kwargs with and without type annotations.
# These strings are inserted into function template strings which is then torch scripted.
# - args string is ["t0"] corresponding to the "input" tensor required by the op
# - args_kw is the value of args and strings of kwargs used to call the op (without type annotations), for example,
# ["to", "1.0", "(1,)", "True", "tensor(1.0)"] -> def fn(t0): return variant(t0, 1.0, (1,), True, tensor(1.0))
args = ["t0"]
def quote_strs(v):
if isinstance(v, str):
return f"'{v}'"
return str(v)
args_kw = args + \
[f"{v}" for v in sample.args] + \
[f"{k}={quote_strs(v)}" for k, v in sample.kwargs.items()]
# Prepare data for test tracing
sample_args_kwargs = ()
if len(sample.args) > 0:
sample_args_kwargs += (sample.args, )
if len(sample.kwargs) > 0:
sample_args_kwargs += (sample.kwargs, )
original_name = op.aten_name
original_name_inplace = original_name + "_"
expected_dtype = op(sample.input, *sample.args, **sample.kwargs).dtype
for a_op in op.aliases:
inplace = a_op.inplace_variant
method_or_inplace = [a_op.inplace_variant, a_op.method_variant]
variants = (v for v in (a_op.op, a_op.method_variant, a_op.inplace_variant) if v is not None)
# Test scripting:
for variant in variants:
variant_name = variant.__name__
op_name = original_name_inplace if variant is inplace else original_name
if variant in method_or_inplace:
fn_template = '''
def _fn(t0{c}):
return t0.{alias_name}({args_kw})
'''
# remove the first input tensor
script = fn_template.format(
c=", " if len(args_kw[1:]) > 1 else "",
args_kw=", ".join(args_kw[1:]),
alias_name=variant_name,
)
else:
fn_template = '''
def _fn({args}):
return variant({args_kw})
'''
script = fn_template.format(
args=", ".join(args),
args_kw=", ".join(args_kw),
)
# Required to avoid undefined value: tensor error in JIT
# compilation of the function template
script = script.replace("tensor(", "torch.tensor(")
scripted = torch.jit.CompilationUnit(script)._fn
if (variant is inplace and not torch.can_cast(expected_dtype, dtype)):
try:
inp = clone_input_helper(sample.input)
scripted(inp)
except Exception as e:
continue
self.fail("Inplace operation on integer tensor that should be promoted to float didn't fail!")
inp = clone_input_helper(sample.input)
scripted(inp)
inp = clone_input_helper(sample.input)
graph = scripted.graph_for(inp)
FileCheck().check(op.aten_name).check_not(variant_name).run(graph)
# Test tracing:
for variant in variants:
variant_name = variant.__name__
op_name = original_name_inplace if variant is inplace else original_name
def _fn(*sample_args, **sample_kwargs):
return variant(*sample_args, **sample_kwargs)
inp = (clone_input_helper(sample.input),) + sample_args_kwargs
traced = torch.jit.trace(_fn, *inp)
inp = (clone_input_helper(sample.input),) + sample_args_kwargs
traced(*inp)
inp = (clone_input_helper(sample.input),) + sample_args_kwargs
graph = traced.graph_for(*inp)
FileCheck().check(op_name).check_not(variant_name).run(graph)
instantiate_device_type_tests(TestJit, globals())
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_ops_jit.py |
# Owner(s): ["oncall: package/deploy"]
def load_tests(loader, standard_tests, pattern):
"""Load all tests from `test/pacakge/`
"""
if pattern is None:
# Use the default pattern if none is specified by the test loader.
pattern = "test*.py"
package_tests = loader.discover("package", pattern=pattern)
standard_tests.addTests(package_tests)
return standard_tests
if __name__ == "__main__":
from torch.testing._internal.common_utils import run_tests
run_tests()
| pytorch-master | test/test_package.py |
# Owner(s): ["oncall: jit"]
import sys
sys.argv.append("--jit_executor=legacy")
from test_jit import * # noqa: F403
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_jit_legacy.py |
# Owner(s): ["module: unknown"]
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
class LoggingTest(TestCase):
def testApiUsage(self):
"""
This test verifies that api usage logging is not triggered via static
initialization. Since it's triggered at first invocation only - we just
subprocess
"""
s = TestCase.runWithPytorchAPIUsageStderr("import torch")
self.assertRegex(s, "PYTORCH_API_USAGE.*import")
# import the shared library directly - it triggers static init but doesn't call anything
s = TestCase.runWithPytorchAPIUsageStderr("from ctypes import CDLL; CDLL('{}')".format(torch._C.__file__))
self.assertNotRegex(s, "PYTORCH_API_USAGE")
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_logging.py |
# Owner(s): ["oncall: jit"]
import sys
import os
import contextlib
import subprocess
from torch.testing._internal.common_utils import TestCase, run_tests, TemporaryFileName
@contextlib.contextmanager
def _jit_disabled():
cur_env = os.environ.get("PYTORCH_JIT", "1")
os.environ["PYTORCH_JIT"] = "0"
try:
yield
finally:
os.environ["PYTORCH_JIT"] = cur_env
class TestJitDisabled(TestCase):
"""
These tests are separate from the rest of the JIT tests because we need
run a new subprocess and `import torch` with the correct environment
variables set.
"""
def compare_enabled_disabled(self, src):
"""
Runs the script in `src` with PYTORCH_JIT enabled and disabled and
compares their stdout for equality.
"""
# Write `src` out to a temporary so our source inspection logic works
# correctly.
with TemporaryFileName() as fname:
with open(fname, 'w') as f:
f.write(src)
with _jit_disabled():
out_disabled = subprocess.check_output([
sys.executable,
fname])
out_enabled = subprocess.check_output([
sys.executable,
fname])
self.assertEqual(out_disabled, out_enabled)
def test_attribute(self):
_program_string = """
import torch
class Foo(torch.jit.ScriptModule):
def __init__(self, x):
super(Foo, self).__init__()
self.x = torch.jit.Attribute(x, torch.Tensor)
def forward(self, input):
return input
s = Foo(torch.ones(2, 3))
print(s.x)
"""
self.compare_enabled_disabled(_program_string)
def test_script_module_construction(self):
_program_string = """
import torch
class AModule(torch.jit.ScriptModule):
def __init__(self):
super(AModule, self).__init__()
@torch.jit.script_method
def forward(self, input):
pass
AModule()
print("Didn't throw exception")
"""
self.compare_enabled_disabled(_program_string)
def test_recursive_script(self):
_program_string = """
import torch
class AModule(torch.nn.Module):
def __init__(self):
super(AModule, self).__init__()
def forward(self, input):
pass
sm = torch.jit.script(AModule())
print("Didn't throw exception")
"""
self.compare_enabled_disabled(_program_string)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_jit_disabled.py |
# Owner(s): ["NNC"]
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
import itertools
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests
from torch.testing._internal.jit_utils import JitTestCase, TensorExprTestOptions
class BaseTestClass(JitTestCase):
def setUp(self):
super(BaseTestClass, self).setUp()
self.tensorexpr_options = TensorExprTestOptions()
self.devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
def tearDown(self):
self.tensorexpr_options.restore()
super(BaseTestClass, self).tearDown()
def assertLastGraphAllFused(self):
self.assertAllFused(torch.jit.last_executed_optimized_graph())
def warmup_and_run_forward(f, *args):
for _ in range(torch._C._jit_get_num_profiled_runs() + 1):
results = f(*args)
return results
class TestTensorExprFuser(BaseTestClass):
def test_easy(self):
def easy(x, y):
aaa = torch.add(x, y)
return aaa
traced = torch.jit.trace(easy, (torch.rand(1024), torch.rand(1024)))
a = torch.rand(1024)
b = torch.rand(1024)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose(a.numpy() + b.numpy(), x.numpy())
def test_three_arg(self):
def easy(x, y, z):
aaa = torch.add(x, y)
bbb = torch.add(aaa, z)
return bbb
traced = torch.jit.trace(
easy, (torch.rand(1024), torch.rand(1024), torch.rand(1024))
)
a = torch.rand(1024)
b = torch.rand(1024)
c = torch.rand(1024)
x = warmup_and_run_forward(traced, a, b, c)
self.assertLastGraphAllFused()
npr = a.numpy() + b.numpy() + c.numpy()
np.testing.assert_allclose(npr, x.numpy())
def test_four_arg(self):
def run_addcmul(x, y, z, w):
c = torch.addcmul(torch.add(x, y), z, w)
return c
for dev in self.devices:
rand_a = torch.rand(1024, dtype=torch.float, device=dev)
rand_b = torch.rand(1024, dtype=torch.float, device=dev)
rand_c = torch.rand(1024, dtype=torch.float, device=dev)
rand_d = torch.rand(1024, dtype=torch.float, device=dev)
traced = torch.jit.trace(
run_addcmul,
(
torch.zeros(1024, dtype=torch.float, device=dev),
torch.zeros(1024, dtype=torch.float, device=dev),
torch.zeros(1024, dtype=torch.float, device=dev),
torch.zeros(1024, dtype=torch.float, device=dev),
),
)
x = warmup_and_run_forward(traced, rand_a, rand_b, rand_c, rand_d)
self.assertLastGraphAllFused()
y = run_addcmul(rand_a, rand_b, rand_c, rand_d)
np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy(), atol=1e-6)
def test_three_arg2(self):
for device in self.devices:
def test(x, y, z):
aaa = torch.add(x, y)
bbb = torch.add(aaa, z)
return bbb
M = 32
N = 32
traced = torch.jit.trace(
test,
(
torch.rand(M, N, device=device),
torch.rand(M, N, device=device),
torch.rand(M, N, device=device),
),
)
a = torch.rand(M, N, device=device)
b = torch.rand(M, N, device=device)
c = torch.rand(M, N, device=device)
x = traced(a, b, c)
x = warmup_and_run_forward(traced, a, b, c)
self.assertLastGraphAllFused()
npr = a.cpu().numpy() + b.cpu().numpy() + c.cpu().numpy()
np.testing.assert_allclose(npr, x.cpu().numpy())
def test_broadcast3(self):
for device in self.devices:
def test_body(M, N, L, K):
def test(x, y, z):
v1 = torch.add(x, y)
v2 = torch.add(v1, z)
return v2
a_shape = [M, N]
b_shape = [L, M, 1]
c_shape = [K, L, 1, 1]
traced = torch.jit.trace(
test,
(
torch.rand(*a_shape, device=device),
torch.rand(*b_shape, device=device),
torch.rand(*c_shape, device=device),
),
)
a = torch.rand(*a_shape, device=device)
b = torch.rand(*b_shape, device=device)
c = torch.rand(*c_shape, device=device)
x = warmup_and_run_forward(traced, a, b, c)
self.assertLastGraphAllFused()
npr = a.cpu().numpy() + b.cpu().numpy() + c.cpu().numpy()
np.testing.assert_allclose(npr, x.cpu().numpy())
test_configs = [[5, 2, 7, 3], [8, 8, 8, 8]]
for test_config in test_configs:
test_body(*test_config)
def test_all_combos(self):
def easy(x, y, z):
a = torch.add(x, y)
b = torch.add(a, z)
c = torch.add(x, b)
d = torch.add(c, a)
return d
def np_easy(x, y, z):
a = x + y
b = a + z
c = x + b
d = c + a
return d
traced = torch.jit.trace(
easy, (torch.rand(1024), torch.rand(1024), torch.rand(1024))
)
a = torch.rand(1024)
b = torch.rand(1024)
c = torch.rand(1024)
x = warmup_and_run_forward(traced, a, b, c)
self.assertLastGraphAllFused()
npr = np_easy(a.numpy(), b.numpy(), c.numpy())
np.testing.assert_allclose(npr, x.numpy())
def test_rank_two(self):
def easy(x, y, z):
a = torch.add(x, y)
b = torch.add(a, z)
c = torch.add(x, b)
d = torch.add(c, a)
return d
def np_easy(x, y, z):
a = x + y
b = a + z
c = x + b
d = c + a
return d
shape = 32, 32
traced = torch.jit.trace(
easy, (torch.rand(shape), torch.rand(shape), torch.rand(shape))
)
a = torch.rand(shape)
b = torch.rand(shape)
c = torch.rand(shape)
x = warmup_and_run_forward(traced, a, b, c)
self.assertLastGraphAllFused()
npr = np_easy(a.numpy(), b.numpy(), c.numpy())
np.testing.assert_allclose(npr, x.numpy())
def test_broadcast(self):
def easy(x, y, z):
a = torch.add(x, y)
b = torch.add(a, z)
return b
def np_easy(x, y, z):
a = x + y
b = a + z
return b
N = 32
traced = torch.jit.trace(easy, (torch.rand(N, N), torch.rand(N), torch.rand(N, N)))
a = torch.rand(N, N)
b = torch.rand(N)
c = torch.rand(N, N)
x = warmup_and_run_forward(traced, a, b, c)
self.assertLastGraphAllFused()
npr = np_easy(a.numpy(), b.numpy(), c.numpy())
np.testing.assert_allclose(npr, x.numpy())
def test_broadcast_2(self):
zero = torch.tensor([0.0], dtype=torch.float)
def foo(x, y, z):
aaa = torch.add(x, y)
bbb = torch.add(zero, aaa)
return torch.add(bbb, z)
def foo_np(x, y, z):
a = x + y
b = zero.numpy() + a
return b + z
x = torch.rand(3, 4)
y = torch.ones(3, 1)
z = torch.rand(4)
traced = torch.jit.trace(foo, (x, y, z))
r = warmup_and_run_forward(traced, x, y, z)
self.assertLastGraphAllFused()
rnp = foo_np(x.numpy(), y.numpy(), z.numpy())
np.testing.assert_allclose(r, rnp)
def test_broadcast_big2(self):
zero = torch.tensor([0.0], dtype=torch.float)
def foo(x, y, z):
aaa = torch.add(x, y)
bbb = torch.add(zero, aaa)
return torch.add(bbb, z)
def foo_np(x, y, z):
a = x + y
b = zero.numpy() + a
return b + z
x = torch.rand(32, 1024)
y = torch.ones(32, 1)
z = torch.rand(1024)
traced = torch.jit.trace(foo, (x, y, z))
r = warmup_and_run_forward(traced, x, y, z)
self.assertLastGraphAllFused()
rnp = foo_np(x.numpy(), y.numpy(), z.numpy())
np.testing.assert_allclose(r, rnp)
def test_alpha(self):
def alpha(x):
aaa = torch.add(x, x, alpha=2.0)
return aaa
traced = torch.jit.trace(alpha, (torch.tensor([1.0])))
a = torch.tensor([1.0])
x = traced(a)
np.testing.assert_allclose(a.numpy() + 2.0 * a.numpy(), x.numpy())
@suppress_warnings
def test_constant(self):
def constant(x):
bbb = torch.tensor([1.0])
aaa = torch.add(x, bbb)
return aaa
traced = torch.jit.trace(constant, (torch.tensor([1.0])))
a = torch.tensor([1.0])
x = warmup_and_run_forward(traced, a)
self.assertLastGraphAllFused()
np.testing.assert_allclose(a.numpy() + 1.0, x.numpy())
def test_add_sub(self):
def easy(x, y, z):
aaa = torch.add(x, y)
bbb = torch.sub(aaa, z)
return bbb
traced = torch.jit.trace(
easy, (torch.rand(1024), torch.rand(1024), torch.rand(1024))
)
a = torch.rand(1024)
b = torch.rand(1024)
c = torch.rand(1024)
x = warmup_and_run_forward(traced, a, b, c)
self.assertLastGraphAllFused()
np.testing.assert_allclose(a.numpy() + b.numpy() - c.numpy(), x.numpy())
def test_promotion(self):
def easy(x, y):
aaa = torch.add(x, y)
return aaa
traced = torch.jit.trace(
easy,
(torch.zeros(1024, dtype=torch.int32), torch.rand(1024, dtype=torch.float32)),
)
a = torch.zeros(1024, dtype=torch.int32)
b = torch.rand(1024, dtype=torch.float32)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose(a.numpy() + b.numpy(), x.numpy())
def test_double(self):
TENSOR_LEN = 8
def easy(x, y):
aaa = torch.add(x, y)
bbb = torch.mul(aaa, y)
return bbb
traced = torch.jit.trace(
easy,
(torch.rand(TENSOR_LEN, dtype=torch.float64), torch.full((TENSOR_LEN,), 0.5, dtype=torch.float64)),
)
a = torch.rand(TENSOR_LEN, dtype=torch.double)
b = torch.full((TENSOR_LEN,), 0.5, dtype=torch.double)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose((a.numpy() + b.numpy()) * b.numpy(), x.numpy())
def test_short(self):
TENSOR_LEN = 8
def easy(x, y):
aaa = torch.add(x, y)
bbb = torch.mul(aaa, y)
return bbb
traced = torch.jit.trace(
easy,
(torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int16),
torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int16)),
)
a = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int16)
b = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int16)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose((a.numpy() + b.numpy()) * b.numpy(), x.numpy())
def test_char(self):
TENSOR_LEN = 8
def easy(x, y):
aaa = torch.add(x, y)
bbb = torch.mul(aaa, y)
return bbb
traced = torch.jit.trace(
easy,
(torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8),
torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8)),
)
a = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8)
b = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose((a.numpy() + b.numpy()) * b.numpy(), x.numpy())
def test_int64_promotion(self):
TENSOR_LEN = 8
def easy(x, y):
aaa = torch.add(x, y)
bbb = torch.mul(aaa, y)
return bbb
traced = torch.jit.trace(
easy,
(torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8),
torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int64)),
)
a = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8)
b = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int64)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose((a.numpy() + b.numpy()) * b.numpy(), x.numpy())
def test_eq(self):
def easy(x, y):
c = torch.eq(x, y)
return c
traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024)))
a = torch.zeros(1024, dtype=torch.int32)
b = torch.zeros(1024, dtype=torch.int32)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose(np.ones(1024), x.numpy())
def test_ne(self):
def easy(x, y):
c = torch.ne(x, y)
return c
traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024)))
a = torch.zeros(1024, dtype=torch.int32)
b = torch.ones(1024, dtype=torch.int32)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose(np.ones(1024), x.numpy())
def test_ge(self):
def easy(x, y):
c = torch.ge(x, y)
return c
traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024)))
aa = np.empty([1024], dtype=np.int32)
aa.fill(5)
a = torch.from_numpy(aa)
b = torch.zeros(1024, dtype=torch.int32)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose(np.ones(1024), x.numpy())
def test_gt(self):
def easy(x, y):
c = torch.gt(x, y)
return c
traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024)))
a = torch.ones(1024, dtype=torch.int32)
b = torch.zeros(1024, dtype=torch.int32)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose(np.ones(1024), x.numpy())
def test_le(self):
def easy(x, y):
c = torch.le(x, y)
return c
traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024)))
aa = np.empty([1024], dtype=np.int32)
aa.fill(5)
a = torch.from_numpy(aa)
b = torch.zeros(1024, dtype=torch.int32)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose(np.zeros(1024), x.numpy())
def test_lt(self):
def easy(x, y):
c = torch.lt(x, y)
return c
for dev in self.devices:
traced = torch.jit.trace(easy, (torch.zeros(1024, device=dev), torch.zeros(1024, device=dev)))
a = torch.ones(1024, dtype=torch.int32, device=dev)
b = torch.zeros(1024, dtype=torch.int32, device=dev)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose(np.zeros(1024), x.cpu().numpy())
@suppress_warnings
def test_min_max(self):
def test(x, y):
return torch.max(torch.min(x, y), torch.tensor([4.0]))
traced = torch.jit.trace(test, (torch.zeros(1024), torch.zeros(1024)))
a = 8.0 * torch.rand(1024)
b = 8.0 * torch.rand(1024)
np.testing.assert_allclose(
warmup_and_run_forward(traced, a, b), np.maximum(np.minimum(a.numpy(), b.numpy()), [4.0])
)
self.assertLastGraphAllFused()
def test_min_max_reduction(self):
def test(x):
return torch.min(x) + torch.max(x)
traced = torch.jit.trace(test, (torch.zeros(1024)))
a = 8.0 * torch.rand(1024)
np.testing.assert_allclose(warmup_and_run_forward(traced, a), np.amin(a.numpy()) + np.amax(a.numpy()))
self.assertLastGraphAllFused()
def test_min_max_reduction2(self):
def test(x):
return x.min() + x.max()
traced = torch.jit.trace(test, (torch.zeros(1024)))
a = 8.0 * torch.rand(1024)
np.testing.assert_allclose(warmup_and_run_forward(traced, a), np.amin(a.numpy()) + np.amax(a.numpy()))
self.assertLastGraphAllFused()
def test_min_max_reduction_dim1(self):
def test(x):
return torch.min(x, 1)[0] + torch.max(x, 1)[0]
traced = torch.jit.trace(test, (torch.zeros(16, 16)))
a = 8.0 * torch.rand(16, 16)
np.testing.assert_allclose(warmup_and_run_forward(traced, a), np.amin(
a.numpy(), axis=1) + np.amax(a.numpy(), axis=1))
self.assertLastGraphAllFused()
def test_min_max_reduction_dim1_2(self):
def test(x):
return torch.min(x * x, 1)
traced = torch.jit.trace(test, (torch.zeros(16, 16)))
a = 8.0 * torch.rand(16, 16)
np.testing.assert_allclose(warmup_and_run_forward(traced, a)[0], np.amin((a * a).numpy(), axis=1))
self.assertLastGraphAllFused()
def test_clamp(self):
def test(x):
return torch.clamp(x + 3.0, 0.0, 6.0)
for dev in self.devices:
traced = torch.jit.trace(test, (torch.zeros(1024, device=dev)))
a = 20.0 * torch.rand(1024, device=dev) - 10.0
an = a.cpu().numpy()
np.testing.assert_allclose(warmup_and_run_forward(traced, a).cpu(), np.clip(an + 3.0, 0.0, 6.0))
self.assertLastGraphAllFused()
def test_relu(self):
def test(x):
return torch.clamp(F.relu(x), 0, 0.5)
for dev in self.devices:
traced = torch.jit.trace(test, (torch.zeros(1024, device=dev)))
a = 20.0 * torch.rand(1024, device=dev) - 10.0
an = a.cpu().numpy()
np.testing.assert_allclose(warmup_and_run_forward(traced, a).cpu(), np.clip((np.maximum(0, an)), 0, 0.5))
self.assertLastGraphAllFused()
def test_reps(self):
def easy(x, y):
c = torch.add(x, y)
return c
traced = torch.jit.trace(easy, (torch.rand(1024), torch.rand(1024)))
for _ in range(32):
a = torch.ones(1024)
b = torch.zeros(1024)
x = warmup_and_run_forward(traced, a, b)
np.testing.assert_allclose(np.ones(1024), x.numpy())
def test_add_const_rhs(self):
def test(x):
return x + 3.0
traced = torch.jit.trace(test, torch.rand(4))
x = torch.rand(4)
y = warmup_and_run_forward(traced, x)
self.assertLastGraphAllFused()
np.testing.assert_allclose(x.numpy() + 3.0, y.numpy())
def test_int_output(self):
def test(x, y, z):
return x * y * z
xs = [(torch.rand(4) * 3 + 1).to(torch.int32) for i in range(3)]
x, y, z = xs
xn, yn, zn = [t.numpy() for t in xs]
traced = torch.jit.trace(test, (x, y, z))
res = warmup_and_run_forward(traced, x, y, z)
self.assertLastGraphAllFused()
np.testing.assert_allclose(xn * yn * zn, res.numpy())
def test_binary_ops(self):
def test_atan2(x, y):
c = torch.atan2(torch.add(x, y), y)
return c
def test_gt(x, y):
c = torch.gt(torch.add(x, y), y)
return c
def test_ge(x, y):
c = torch.ge(torch.add(x, y), y)
return c
def test_lt(x, y):
c = torch.lt(torch.add(x, y), y)
return c
def test_le(x, y):
c = torch.le(torch.add(x, y), y)
return c
def test_lerp(x, y):
c = torch.lerp(torch.add(x, 1), x, 2.0)
return c
def test_mul(x, y):
c = torch.mul(torch.add(x, y), y)
return c
def test_ne(x, y):
c = torch.ne(torch.add(x, y), y)
return c
def test_div(x, y):
c = torch.div(torch.add(x, y), 2)
return c
def test_eq(x, y):
c = torch.eq(torch.add(x, y), y)
return c
def test_fmod(x, y):
c = torch.fmod(torch.add(x, y), 2)
return c
def test_sub(x, y):
c = torch.sub(torch.add(x, y), x)
return c
def test_remainder(x, y):
c = torch.remainder(torch.add(x, y), 3.0)
return c
def test_pow(x, y):
c = torch.pow(torch.add(x, y), 2.0)
return c
def test_type_as(x, y):
return x.type_as(torch.add(x, y))
fns = {
test_atan2,
test_gt,
test_ge,
test_lt,
test_le,
test_lerp,
test_mul,
test_ne,
test_div,
test_eq,
test_fmod,
test_sub,
test_remainder,
test_pow,
test_type_as,
}
for torch_fn in fns:
for dev in self.devices:
rand_a = torch.rand(1024, device=dev)
rand_b = torch.rand(1024, device=dev)
in1 = 20 * torch.rand(1024, device=dev)
in2 = 20 * torch.rand(1024, device=dev)
traced = torch.jit.trace(torch_fn, (in1, in2))
x = warmup_and_run_forward(traced, rand_a, rand_b)
self.assertLastGraphAllFused()
y = torch_fn(rand_a, rand_b)
np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy(), atol=2e-3)
def test_unary_ops(self):
def test_cast_float(x, y):
c = torch.ops.aten._cast_Float(torch.add(x, y))
return c
def test_round(x, y):
c = torch.round(torch.add(x, y))
return c
def test_sin(x, y):
c = torch.sin(torch.add(x, y))
return c
def test_asin(x, y):
c = torch.asin(torch.add(x, y))
return c
def test_sinh(x, y):
c = torch.sinh(torch.add(x, y))
return c
def test_cos(x, y):
c = torch.cos(torch.add(x, y))
return c
def test_acos(x, y):
c = torch.acos(torch.add(x, y))
return c
def test_cosh(x, y):
c = torch.cosh(torch.add(x, y))
return c
def test_tan(x, y):
c = torch.tan(torch.add(x, y))
return c
def test_atan(x, y):
c = torch.atan(torch.add(x, y))
return c
def test_tanh(x, y):
c = torch.tanh(torch.add(x, y))
return c
def test_sqrt(x, y):
c = torch.sqrt(torch.add(x, y))
return c
def test_rsqrt(x, y):
c = torch.rsqrt(torch.add(x, y))
return c
def test_floor(x, y):
c = torch.floor(torch.add(x, y))
return c
def test_ceil(x, y):
c = torch.ceil(torch.add(x, y))
return c
def test_trunc(x, y):
c = torch.trunc(torch.add(x, y))
return c
def test_abs(x, y):
c = torch.abs(torch.add(x, y))
return c
def test_log(x, y):
c = torch.log(torch.add(x, y))
return c
def test_log2(x, y):
c = torch.log2(torch.add(x, y))
return c
def test_log10(x, y):
c = torch.log10(torch.add(x, y))
return c
def test_log1p(x, y):
c = torch.log1p(torch.add(x, y))
return c
def test_rqrt(x, y):
c = torch.rsqrt(torch.add(x, y))
return c
def test_erf(x, y):
c = torch.erf(torch.add(x, y))
return c
def test_exp(x, y):
c = torch.exp(torch.add(x, y))
return c
def test_expm1(x, y):
c = torch.expm1(torch.add(x, y))
return c
def test_erfc(x, y):
c = torch.erfc(torch.add(x, y))
return c
def test_frac(x, y):
c = torch.frac(torch.add(x, y))
return c
def test_lgamma(x, y):
c = torch.lgamma(torch.add(x, y))
return c
def test_sigmoid(x, y):
c = torch.sigmoid(torch.add(x, y))
return c
def test_reciprocal(x, y):
c = torch.reciprocal(torch.add(x, y))
return c
def test_neg(x, y):
c = torch.neg(torch.add(x, y))
return c
def test_relu(x, y):
c = torch.relu(torch.add(x, y))
return c
def test_hardtanh(x, y):
c = F.hardtanh(torch.add(x, y), -1.0, 1.0)
return c
def test_threshold(x, y):
c = F.threshold(torch.add(x, y), 0.5, 10)
return c
fns = {
test_round,
test_sin,
test_asin,
test_sinh,
test_cos,
test_acos,
test_cosh,
test_tan,
test_atan,
test_sqrt,
test_floor,
test_ceil,
test_trunc,
test_abs,
test_log,
test_log2,
test_log10,
test_log1p,
test_rsqrt,
test_exp,
test_expm1,
test_erf,
test_erfc,
test_frac,
test_lgamma,
test_reciprocal,
test_neg,
test_threshold,
test_relu,
test_tanh,
test_hardtanh,
test_sigmoid,
}
for torch_fn in fns:
for dev in self.devices:
# print(torch_fn, dev)
rand_a = torch.rand(1024, device=dev)
rand_b = torch.rand(1024, device=dev)
ins = 20 * torch.rand(1024, device=dev)
cc = np.empty([1024], dtype=np.float32)
cc.fill(np.nan)
nans = torch.from_numpy(cc).to(dev)
traced = torch.jit.trace(torch_fn, (ins, ins))
x = warmup_and_run_forward(traced, rand_a, rand_b)
self.assertLastGraphAllFused()
y = torch_fn(rand_a, rand_b)
np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy(), atol=2e-3)
# nans
# TODO: reenable. Currently all of the tests fail
# traced = torch.jit.trace(torch_fn, (ins, ins))
# x = warmup_and_run_forward(traced, rand_a, rand_b)
# y = torch_fn(nans, rand_b)
# try:
# np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy())
# print("Succeeded on dev=", dev, "function=", torch_fn)
# except AssertionError:
# # Print extra info before exiting:
# print("Failed on dev=", dev, "function=", torch_fn)
# # np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy())
def test_rand_like(self):
N = 1 << 16
def run_rand_like(x, y):
return torch.rand_like(torch.add(x, y))
for device in self.devices:
x = torch.rand(N, device=device)
traced = torch.jit.trace(run_rand_like, (x, x), check_trace=False)
x_v = warmup_and_run_forward(traced, x, x)
self.assertLastGraphAllFused()
x_np = x.cpu().numpy()
x1_mean = np.mean(x_np)
x2_mean = np.mean(x_np ** 2)
x3_mean = np.mean(x_np ** 3)
np.testing.assert_allclose(x1_mean, 1. / 2, rtol=2e-2)
np.testing.assert_allclose(x2_mean, 1. / 3, rtol=2e-2)
np.testing.assert_allclose(x3_mean, 1. / 4, rtol=2e-2)
def test_nans(self):
def test_max(x, y):
return torch.max(2 * x, 2 * y)
def test_min(x, y):
return torch.min(2 * x, 2 * y)
tmax = torch.jit.trace(test_max, (torch.rand(1), torch.rand(1)))
tmin = torch.jit.trace(test_min, (torch.rand(1), torch.rand(1)))
x = torch.tensor([np.nan])
y = torch.tensor([1.0])
assert np.isnan(warmup_and_run_forward(tmin, x, y).item())
assert np.isnan(warmup_and_run_forward(tmin, y, x).item())
self.assertLastGraphAllFused()
assert np.isnan(warmup_and_run_forward(tmax, x, y).item())
assert np.isnan(warmup_and_run_forward(tmax, y, x).item())
self.assertLastGraphAllFused()
def test_double_intrinsics(self):
def do_pow(x):
return torch.pow(x, 7)
for device in self.devices:
x = torch.rand(10, dtype=torch.double, device=device)
traced = torch.jit.trace(do_pow, (x))
x = warmup_and_run_forward(traced, x)
self.assertLastGraphAllFused()
def test_remainder(self):
def run_remainder(x, y):
c = torch.remainder(torch.add(x, y), x)
return c
a = torch.rand(1024, dtype=float)
b = torch.rand(1024, dtype=float)
zeros = torch.zeros(1024, dtype=float)
cc = np.array(1024, dtype=float)
cc.fill(np.nan)
nans = torch.from_numpy(cc)
# random floats
traced = torch.jit.trace(run_remainder, (torch.zeros(1024), torch.zeros(1024)))
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
y = run_remainder(a, b)
np.testing.assert_allclose(x.numpy(), y.numpy())
# div by 0
traced = torch.jit.trace(run_remainder, (torch.zeros(1024), torch.zeros(1024)))
x = warmup_and_run_forward(traced, zeros, a)
self.assertLastGraphAllFused()
y = run_remainder(zeros, a)
np.testing.assert_allclose(x.numpy(), y.numpy())
# numerators and denominatos are nan
traced = torch.jit.trace(run_remainder, (torch.zeros(1024), torch.zeros(1024)))
x = warmup_and_run_forward(traced, nans, a)
self.assertLastGraphAllFused()
y = run_remainder(nans, a)
np.testing.assert_allclose(x.numpy(), y.numpy())
def test_multioutput(self):
def easy(x):
b = x + 1
c = b + b
return (b, c)
traced = torch.jit.trace(easy, (torch.zeros(1024)))
a = torch.zeros(1024)
b, c = warmup_and_run_forward(traced, a)
self.assertLastGraphAllFused()
bp = a.numpy() + 1
cp = bp + bp
np.testing.assert_allclose(b.numpy(), bp)
np.testing.assert_allclose(c.numpy(), cp)
def test_chunk(self):
def easy(x):
y = x + 1
aaa, bbb = torch.chunk(y, 2)
return aaa + bbb
traced = torch.jit.trace(easy, (torch.zeros(1024, 1024)))
a = torch.zeros(32, 32)
x = warmup_and_run_forward(traced, a)
self.assertLastGraphAllFused()
npr = a.numpy()
npr2 = npr + 1
npr_a, npr_b = np.array_split(npr2, 2)
np.testing.assert_allclose(npr_a + npr_b, x.numpy())
def test_cat(self):
for device in self.devices:
def foo(*args):
args_2 = [v + i for i, v in enumerate(args)]
v = torch.cat(args_2, dim=1)
return v * v
M = 16
Ns = [128, 16, 1]
values = [torch.zeros(M, N, device=device) for N in Ns]
traced = torch.jit.trace(foo, values)
x = warmup_and_run_forward(traced, *values)
self.assertLastGraphAllFused()
ref = foo(*values)
np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())
# This test checks that we correctly handle fusion group with just aten::cat in it.
# Note that the test only makes sense with min_fusion_group=1, otherwise no
# fusion groups would be formed at all.
# TODO: Fix and re-enable the test.
@unittest.skip("cat is broken with fusion group inlining disabled")
def test_cat_only(self):
for device in self.devices:
def foo(*args):
args_2 = [v + i for i, v in enumerate(args)]
v = torch.cat(args_2, dim=1)
return v
M = 16
Ns = [128, 16, 1]
values = [torch.zeros(M, N, device=device) for N in Ns]
traced = torch.jit.trace(foo, values)
x = warmup_and_run_forward(traced, *values)
self.assertLastGraphAllFused()
ref = foo(*values)
np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())
def test_cat_negative_dim(self):
for device in self.devices:
def foo(*args):
v = torch.cat(args, dim=-1)
return v * v
M = 16
Ns = [128, 16, 1]
values = [torch.randn(M, N, device=device) for N in Ns]
traced = torch.jit.trace(foo, values)
x = warmup_and_run_forward(traced, *values)
self.assertLastGraphAllFused()
ref = foo(*values)
np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())
def test_cat_promote_inputs(self):
for device in self.devices:
def foo(*args):
v = torch.cat(args, dim=1)
return v * v
M = 16
Ns = [128, 16, 1]
dtypes = [torch.half, torch.float32, torch.double]
values = [torch.randn(M, N, device=device, dtype=dt) for N, dt in zip(Ns, dtypes)]
traced = torch.jit.trace(foo, values)
x = warmup_and_run_forward(traced, *values)
self.assertLastGraphAllFused()
ref = foo(*values)
np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())
def test_cat_empty_tensors(self):
for device in self.devices:
def foo(*args):
v = torch.cat(args, dim=1)
return v * v
M = 16
Ns = [128, 16, 1]
empty = torch.tensor([], device=device, dtype=torch.double)
values = [empty] + [torch.randn(M, N, device=device) for N in Ns]
traced = torch.jit.trace(foo, values)
x = warmup_and_run_forward(traced, *values)
self.assertLastGraphAllFused()
ref = foo(*values)
np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())
# now test with only empty tensors
values = [empty for i in range(3)]
traced = torch.jit.trace(foo, values)
x = warmup_and_run_forward(traced, *values)
self.assertLastGraphAllFused()
ref = foo(*values)
np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())
def test_cat_with_constant_dim(self):
for device in self.devices:
def foo(*args):
v1 = torch.cat(args, dim=1)
v2 = torch.cat([v1], dim=1)
return v2 * v2
empty = torch.tensor([], device=device, dtype=torch.float32)
inputs = [empty] + [torch.randn(1, 64, device=device), torch.randn(1, 64, device=device)]
traced = torch.jit.trace(foo, inputs)
x = warmup_and_run_forward(traced, *inputs)
self.assertLastGraphAllFused()
ref = foo(*inputs)
np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())
def test_scalar(self):
@torch.jit.script
def test_float(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, a: float, b: float) -> torch.Tensor:
return torch.add(torch.add(x, y, alpha=a), z, alpha=b)
@torch.jit.script
def test_int(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, a: int, b: int) -> torch.Tensor:
return torch.add(torch.add(x, y, alpha=a), z, alpha=b)
for test in (test_float, test_int):
x, y, z = [torch.rand(4) for i in range(3)]
a, b = 1, 2
test(x, y, z, a, b)
r = test(x, y, z, a, b)
xn, yn, zn = [t.numpy() for t in (x, y, z)]
np.testing.assert_allclose(r.numpy(), xn + yn * a + zn * b)
def test_loop(self):
@torch.jit.script
def test(x: torch.Tensor, y: torch.Tensor, z: int) -> torch.Tensor:
b = y
for i in range(0, z):
a = x + y
b = b + y
return b
x, y, z = (torch.zeros(32, 32), torch.ones(32, 32), 4)
test(x, y, z)
r = test(x, y, z)
def test_slice(self):
def easy(x, y):
a = x[0:512:2]
b = y[0:512:2]
return a + b
traced = torch.jit.trace(easy, (torch.ones(1024, 1024), torch.zeros(1024, 1024)))
a = torch.ones(1024, 1024)
x = traced(a, a)
npr = a[0:512:2]
npr = npr + npr
np.testing.assert_allclose(npr.numpy(), x.numpy())
def test_unsqueeze(self, N=256):
def easy(x, y):
a = torch.unsqueeze(x, 0)
b = torch.unsqueeze(y, 0)
return a + b
traced = torch.jit.trace(easy, (torch.ones(N, N), torch.zeros(N, N)))
a = torch.rand(N, N)
x = traced(a, a)
npr = np.expand_dims(a, 0)
npr = npr + npr
np.testing.assert_allclose(npr, x.numpy())
def _test_softmax(self, device):
def test_softmax(x, y):
a = F.softmax(x, dim=0, dtype=torch.float32)
b = F.softmax(y, dim=0, dtype=torch.float32)
c = F.softmax(x, dim=1, dtype=torch.float32)
d = F.softmax(y, dim=1, dtype=torch.float32)
return a + b + c + d
def test_softmax_neg_index(x, y):
a = F.softmax(x, dim=-2, dtype=torch.float32)
b = F.softmax(y, dim=-2, dtype=torch.float32)
c = F.softmax(x, dim=-1, dtype=torch.float32)
d = F.softmax(y, dim=-1, dtype=torch.float32)
return a + b + c + d
def test_log_softmax(x, y):
a = F.log_softmax(x, dim=0, dtype=torch.float32)
b = F.log_softmax(y, dim=0, dtype=torch.float32)
c = F.log_softmax(x, dim=1, dtype=torch.float32)
d = F.log_softmax(y, dim=1, dtype=torch.float32)
return a + b + c + d
for test in (test_softmax, test_log_softmax, test_softmax_neg_index):
old = torch._C._jit_set_texpr_reductions_enabled(True)
traced = torch.jit.trace(test, (torch.randn(2, 3, device=device), torch.randn(2, 3, device=device)))
inp = torch.randn(2, 3, device=device)
res = traced(inp, inp)
# Use eager mode as reference.
ref = test(inp, inp)
np.testing.assert_allclose(ref, res.cpu().numpy(), rtol=1e-06, atol=1e-06)
torch._C._jit_set_texpr_reductions_enabled(old)
def test_softmax_cpu(self):
self._test_softmax('cpu')
@unittest.skipIf(not torch.cuda.is_available(), "requires CUDA")
@unittest.skip("global allocs are not supported yet.")
def test_softmax_cuda(self):
self._test_softmax('cuda')
def test_half_gelu(self):
devices = ["cuda"] if torch.cuda.is_available() else []
@torch.jit.script
def bias_gelu(bias, y):
x = bias + y
return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
for device in devices:
a = torch.rand(1024, dtype=torch.half, device=device)
b = torch.rand(1024, dtype=torch.half, device=device)
traced = torch.jit.trace(bias_gelu, (a, b))
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
def test_half_bn_relu(self):
devices = ["cuda"] if torch.cuda.is_available() else []
def foo(a, b, c):
y = torch.nn.functional.batch_norm(a, b, c)
z = y.relu()
return z
for device in devices:
a = torch.rand(16, 16, dtype=torch.half, device=device)
b = torch.rand(16, dtype=torch.half, device=device)
c = torch.rand(16, dtype=torch.half, device=device)
traced = torch.jit.trace(foo, (a, b, c))
print(traced.graph)
x = warmup_and_run_forward(traced, a, b, c)
self.assertLastGraphAllFused()
def test_exp_pow(self):
@torch.jit.script
def do_exp(x, y, z):
return ((x * y) * 2) * torch.pow(z, 2)
for device in self.devices:
x = torch.rand(10, dtype=torch.double, device=device)
y = torch.rand(10, dtype=torch.double, device=device)
z = torch.rand(10, dtype=torch.double, device=device)
traced = torch.jit.trace(do_exp, (x, y, z))
x = warmup_and_run_forward(traced, x, y, z)
self.assertLastGraphAllFused()
def test_transpose(self):
@torch.jit.script
def test(x, y, z):
return x.transpose(0, 1) + y + z
x = torch.rand(4, 5, 2, 3)
y = torch.rand(5, 4, 2, 3)
z = torch.rand(5, 4, 2, 3)
ref = test(x, y, z)
res = test(x, y, z)
np.testing.assert_allclose(ref.numpy(), res.numpy())
def test_sliced_stride(self):
@torch.jit.script
def test(x, y, z):
return x + y + z
x = torch.rand(16, 4, 2, 3)[::2]
y = torch.rand(8, 4, 2, 3)
z = torch.rand(8, 4, 2, 3)
ref = test(x, y, z)
res = test(x, y, z)
np.testing.assert_allclose(ref.numpy(), res.numpy())
@unittest.skip("dynamic shapes are not quite there yet")
@unittest.skipIf(not torch.cuda.is_available(), "requires CUDA")
def test_dynamic_shape(self):
with num_profiled_runs(2):
@torch.jit.script
def test(x, y, z):
return x * y * z
x, y, z = [torch.rand(4, 8).cuda() for _ in range(3)]
ref = test(x, y, z)
_ = test(*[torch.rand(6, 8).cuda() for _ in range(3)])
res = test(x, y, z)
np.testing.assert_allclose(ref.cpu().numpy(), res.cpu().numpy())
# A wild broadcast appears.
x = torch.rand(4, 8).cuda()
y = torch.rand(1, 8).cuda()
z = torch.rand(4, 1).cuda()
res = test(x, y, z)
xn, yn, zn = [t.cpu().numpy() for t in (x, y, z)]
np.testing.assert_allclose(res.cpu().numpy(), xn * yn * zn)
# Mismatched shapes shouldn't reach codegen.
x = torch.rand(4, 8).cuda()
y = torch.rand(4, 8).cuda()
z = torch.rand(5, 8).cuda()
try:
res = test(x, y, z)
except RuntimeError as e:
assert "The size of tensor a (4) must match" in e.args[0]
# Changing a static dimension fails guards.
# x, y, z = [torch.rand(4, 7).cuda() for _ in range(3)]
# xn, yn, zn = [t.cpu().numpy() for t in (x, y, z)]
# res = test(x, y, z)
# print(test.graph_for(x, y, z))
# np.testing.assert_allclose(res.cpu().numpy(), xn * yn * zn)
@unittest.skipIf(not torch.cuda.is_available(), "requires CUDA")
def test_guard_fails(self):
@torch.jit.script
def test(x, y, z):
return x * y * z
r1 = test(*[torch.rand(4).cuda() for _ in range(3)])
r2 = test(*[torch.rand(4).cuda() for _ in range(3)])
r3 = test(*[torch.rand(4).cuda() for _ in range(3)])
r4 = test(*[torch.rand(7).cuda() for _ in range(3)])
def test_bitwise_ops(self):
def run_and(x, y):
return x & (x & y)
def run_or(x, y):
return x & (x | y)
def run_xor(x, y):
return x ^ (x ^ y)
def run_lshift(x, y):
return x & (x << y)
def run_rshift(x, y):
return x & (x >> y)
fns = {run_and, run_or, run_xor, run_lshift, run_rshift}
for device in self.devices:
for fn in fns:
a = torch.ones(128, dtype=torch.int32, device=device)
b = torch.zeros(128, dtype=torch.int32, device=device)
inp = torch.ones(128, dtype=torch.int32, device=device)
traced = torch.jit.trace(fn, (inp, inp))
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
y = fn(a, b)
np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy())
def test_where(self):
def run_where(x, y):
return torch.where(torch.gt(x, y), x, y)
a = torch.rand(1024, dtype=float)
b = torch.rand(1024, dtype=float)
traced = torch.jit.trace(run_where, (torch.zeros(1024), torch.zeros(1024)))
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
y = run_where(a, b)
np.testing.assert_allclose(x.numpy(), y.numpy())
def test_multi_rand(self):
for device in self.devices:
def test(x):
y = torch.rand_like(x)
return (x + y) - (y - x)
a = torch.rand(4, device=device)
scripted = torch.jit.script(test)
out = warmup_and_run_forward(scripted, a)
self.assertLastGraphAllFused()
assert torch.allclose(out, 2 * a)
def test_mask(self):
def test(x):
return x.unsqueeze(1) == 0
for d in self.devices:
x = torch.rand(4, device=d) > 0.5
scripted = torch.jit.script(test)
out = warmup_and_run_forward(scripted, x)
self.assertLastGraphAllFused()
assert torch.equal(out, test(x))
def test_simple_add(self):
val = torch._C._jit_get_te_generate_block_code()
torch._C._jit_set_te_generate_block_code(True)
fall_bk = torch._C._jit_texpr_fallback_allowed()
torch._C._jit_texpr_set_fallback_allowed(True)
def simple(a, b):
return torch.add(a, b)
a = torch.ones(256, 256)
b = torch.ones(256, 256)
traced = torch.jit.trace(simple,
(torch.ones(256, 256), torch.ones(256, 256)))
f = traced(a, b)
f_test = np.full((256, 256), 2, dtype=float)
np.testing.assert_allclose(f.numpy(), f_test)
torch._C._jit_set_te_generate_block_code(val)
torch._C._jit_texpr_set_fallback_allowed(fall_bk)
def test_strided_output_preserved(self):
def foo(a, b):
return a + b - a
# smaller, easier to debug example
x = torch.arange(6)
x = torch.as_strided(x, (2, 3), (1, 2))
total = 0
for i in range(2):
for j in range(3):
x[i, j] = total
total += 1
foo_script = torch.jit.script(foo)
foo_script(x, x)
foo_script(x, x)
out_s = foo_script(x, x)
out_eager = foo(x, x)
self.assertEqual(out_s, out_eager)
self.assertEqual(out_s.stride(), out_eager.stride())
self.assertLastGraphAllFused()
# more dims
N, C, H, W, = 2, 3, 4, 5
x = torch.rand(N, C, H, W).to(memory_format=torch.channels_last)
foo_script = torch.jit.script(foo)
foo_script(x, x)
foo_script(x, x)
out_s = foo_script(x, x)
out_eager = foo(x, x)
self.assertEqual(out_s, out_eager)
self.assertEqual(out_s.stride(), out_eager.stride())
self.assertLastGraphAllFused()
def test_alias_analysis_module(self):
class AliasModule(nn.Module):
def __init__(self):
super(AliasModule, self).__init__()
torch.manual_seed(1337)
self.a = torch.randn(128, 128)
self.b = torch.randn(128, 128)
self.c = torch.randn(128, 128)
def forward(self, x, y, z):
z = z + self.a
self.b.add_(y)
w = z + self.a
z = w + x
return z
x = torch.randn(128, 128)
def getModule(script):
am = AliasModule()
if script:
return torch.jit.script(am)
return am
am = getModule(False)
am_s = getModule(True)
ref = am(x, x, x)
test = am_s(x, x, x)
torch.testing.assert_close(ref, test)
# Now do the aliasing
am.a = am.b
ref = am(x, x, x)
am_s.a = am_s.b
test = am_s(x, x, x)
torch.testing.assert_close(ref, test)
def test_alias_analysis_inputs(self):
class AliasModule(nn.Module):
def __init__(self):
super(AliasModule, self).__init__()
torch.manual_seed(1337)
self.a = torch.randn(128, 128)
self.b = torch.randn(128, 128)
self.c = torch.randn(128, 128)
def forward(self, x, y, z):
x.add_(y)
w = z + self.a
z = w + x
return z
def getModule(script):
am = AliasModule()
if script:
return torch.jit.script(am)
return am
am = getModule(False)
am_s = getModule(True)
torch.manual_seed(1337)
x = torch.randn(128, 128)
ref = am(x, x, x)
torch.manual_seed(1337)
x = torch.randn(128, 128)
test = am_s(x, x, x)
torch.testing.assert_close(ref, test)
def test_alias_analysis_input_and_module(self):
class AliasModule(nn.Module):
def __init__(self):
super(AliasModule, self).__init__()
torch.manual_seed(1337)
self.a = torch.randn(128, 128)
self.b = torch.randn(128, 128)
self.c = torch.randn(128, 128)
def forward(self, x, y, z):
x.add_(y)
w = z + self.b
z = w + x
return z
def getModule(script):
am = AliasModule()
if script:
return torch.jit.script(am)
return am
am = getModule(False)
am_s = getModule(True)
torch.manual_seed(1337)
x = torch.randn(128, 128)
am.b = x
ref = am(x, x, x)
torch.manual_seed(1337)
x = torch.randn(128, 128)
am_s.b = x
test = am_s(x, x, x)
torch.testing.assert_close(ref, test)
def test_multiple_outputs(self):
for device in self.devices:
# A bug reported internally similar to the one reported in #48533
def foo(a, b, c):
t_next = c + 1
t5 = t_next * b
t6 = torch.unsqueeze(t_next, 1)
t7 = a * t6
return (t7, t5, t_next)
a = torch.rand(20, 20, dtype=torch.float32, device=device)
b = torch.rand(20 * 29, dtype=torch.float32, device=device).as_strided([20], [29])
c = torch.ones(20, dtype=torch.int64, device=device)
traced = torch.jit.trace(foo, (a, b, c))
ref = foo(a, b, c)
exp = traced(a, b, c)
exp = traced(a, b, c)
self.assertEqual(ref, exp)
def test_propagated_mem_layout(self):
def foo(a, b, c):
t_next = c + 1
t5 = t_next * b
t7 = a * t5
return t7
def foo_multi_outputs(a, b, c):
t_next = c + 1
t5 = b * t_next
t7 = a * t5
return (t7, t5, t_next)
def foo_multi_outputs_i_nhwc_o_nchw(a, b, c):
t_next = c + 1
t5 = b * t_next
t7 = a * t5
t8 = t7.to(memory_format=torch.contiguous_format)
return (t8, t7, t5, t_next)
def run_foo_case(foo, a, b, c):
traced_contiguous = torch.jit.trace(foo, (a, b, c))
ref = foo(a, b, c)
exp = traced_contiguous(a, b, c)
exp = traced_contiguous(a, b, c)
self.assertEqual(ref, exp)
mem_layouts = list(itertools.product([torch.contiguous_format, torch.channels_last], repeat=3))
shapes = [(2, 3, 4, 5), (2, 1, 1, 5), (1, 1, 1, 1)]
permutes = [(0, 3, 2, 1), (0, 3, 1, 2)]
funcs = [foo, foo_multi_outputs, foo_multi_outputs_i_nhwc_o_nchw]
configs = itertools.product(funcs, shapes, mem_layouts, permutes)
for strategy in ["STATIC", "DYNAMIC"]:
old_strategy = torch.jit.set_fusion_strategy([(strategy, 10)])
for _func, _shape, _mem_layouts, _permute in configs:
a = torch.rand(_shape, dtype=torch.float32).to(memory_format=_mem_layouts[0])
b = torch.rand(_shape, dtype=torch.float32).to(memory_format=_mem_layouts[1])
c = torch.rand(_shape, dtype=torch.float32).to(memory_format=_mem_layouts[2])
run_foo_case(_func, a, b, c)
a = a.permute(dims=_permute)
b = b.permute(dims=_permute)
c = c.permute(dims=_permute)
run_foo_case(_func, a, b, c)
torch.jit.set_fusion_strategy(old_strategy)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_tensorexpr.py |
# Owner(s): ["module: type promotion"]
from functools import (partial, wraps)
import itertools
import unittest
import torch
from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests, make_tensor,
TEST_NUMPY, torch_to_numpy_dtype_dict, numpy_to_torch_dtype_dict)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyNativeDeviceTypes,
dtypes, onlyCPU, expectedFailureMeta, skipMeta)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, floating_types, get_all_dtypes
)
from torch.testing._creation import (
float_to_corresponding_complex_type_map
)
import numpy as np
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# Not thread-safe decorator that runs the decorated test once with
# the default dtype being torch.float and again with the default dtype
# being torch.double.
def float_double_default_dtype(fn):
@wraps(fn)
def wrapped_fn(*args, **kwargs):
cur_dtype = torch.get_default_dtype()
try:
torch.set_default_dtype(torch.float)
fn(*args, **kwargs)
torch.set_default_dtype(torch.double)
fn(*args, **kwargs)
finally:
torch.set_default_dtype(cur_dtype)
return wrapped_fn
class TestTypePromotion(TestCase):
# In-place operations don't promote.
# `int+float -> float` but `int.add_(float)` is rejected as an error.
# Promoting inplace would require re-allocating and copying the memory of the
# tensor data, since element size could change.
@float_double_default_dtype
def test_inplace(self, device):
int_tensor = torch.ones([4, 4, 4], dtype=torch.int32, device=device)
self.assertRaisesRegex(RuntimeError, "can't be cast to", lambda: int_tensor.add_(1.5))
expected = torch.ones([4, 4, 4], dtype=torch.int32, device=device)
long_tensor = torch.ones([4, 4, 4], dtype=torch.int64, device=device)
int_tensor.add_(long_tensor)
int_tensor.add_(1)
three = expected + 2
self.assertEqual(int_tensor, three)
self.assertEqual(int_tensor.dtype, torch.int32)
bool_tensor = torch.tensor([1, 1, 1], dtype=torch.bool, device=device)
uint8_tensor = torch.tensor([1, 1, 1], dtype=torch.uint8, device=device)
# We treat bool as a separate category, which means uint8 cannot cast to bool.
self.assertRaisesRegex(RuntimeError, "can't be cast to", lambda: bool_tensor.add_(uint8_tensor))
# We allow demotion from signed to unsigned, unlike numpy, because:
# * We don't want the performance penalty of inspecting scalar values.
# * We don't want 'signed' to be considered a distinct 'category'
# in promotion rules.
# We don't want signed to be a separate category because if it was,
# uint16_tensor + 5 would result in a long_tensor, which is not what we want.
int16_tensor = torch.tensor([1, 1, 1], dtype=torch.int16, device=device)
uint8_tensor *= int16_tensor
@float_double_default_dtype
def test_unsigned(self, device):
dont_promote = torch.ones(3, dtype=torch.uint8, device=device) + 5
self.assertEqual(dont_promote.dtype, torch.uint8)
# some basic examples
@float_double_default_dtype
def test_int_promotion(self, device):
a = torch.ones([4, 4, 4], dtype=torch.int32, device=device)
b = torch.ones([4, 4, 4], dtype=torch.int64, device=device)
c = a + b
self.assertEqual(c, b + b)
self.assertEqual(c.dtype, torch.int64)
@float_double_default_dtype
def test_float_promotion(self, device):
def test_promotion(dtype_float, dtype_double):
a = torch.ones([4, 4, 4], dtype=dtype_float, device=device)
b = torch.ones([4, 4, 4], dtype=dtype_double, device=device)
c = a + b
self.assertEqual(c, b + b)
self.assertEqual(c.dtype, dtype_double)
c = b + a
self.assertEqual(c, b + b)
self.assertEqual(c.dtype, dtype_double)
test_promotion(torch.float, torch.double)
@float_double_default_dtype
def test_complex_promotion(self, device):
def test_promotion(dtype_float, dtype_double):
a = torch.ones([4, 4, 4], dtype=dtype_float, device=device)
b = torch.ones([4, 4, 4], dtype=dtype_double, device=device)
c = a + b
self.assertEqual(c, b + b)
self.assertEqual(c.dtype, dtype_double)
c = b + a
self.assertEqual(c, b + b)
self.assertEqual(c.dtype, dtype_double)
test_promotion(torch.complex64, torch.complex128)
a = torch.randn(3, dtype=torch.complex64, device=device)
self.assertEqual((a * 5).dtype, torch.complex64)
# not a "wrapped number"
other = torch.tensor(5.5, dtype=torch.double, device=device)
self.assertEqual((a + other).dtype, torch.complex64)
def make_scalar_tensor(dtype):
return make_tensor((), dtype=dtype, device=device)
def make_1d_tensor(dtype):
return make_tensor((3,), dtype=dtype, device=device)
def complex_scalar_tensor_test(s, t):
# As per type promotion rules,
# Complex Scalar and Float Tensor -> Complex Tensor with Value type of Float Tensor
# Complex Scalar and Integral Tensor -> Complex Tensor with Value type of Complex Scalar
if t.dtype.is_floating_point:
# defaults to return complex64 (for bfloat16)
expected_dtype = float_to_corresponding_complex_type_map.get(t.dtype, torch.complex64)
else: # integral tensor
if isinstance(s, torch.Tensor):
expected_dtype = s.dtype
else:
expected_dtype = float_to_corresponding_complex_type_map[torch.get_default_dtype()]
self.assertEqual((s * t).dtype, expected_dtype)
self.assertEqual((t * s).dtype, expected_dtype)
self.assertEqual(torch.result_type(s, t), expected_dtype)
self.assertEqual(torch.result_type(t, s), expected_dtype)
if torch.device(device).type != 'xla':
# chalf is not supported on XLA
s = make_scalar_tensor(dtype=torch.chalf)
# Same Value type
t = make_1d_tensor(dtype=torch.half)
# 0-D Tensor X 1-D Tensor
complex_scalar_tensor_test(s, t)
# Python Scalar X 1-D Tensor
complex_scalar_tensor_test(s.item(), t)
# Higher Value Type
t = make_1d_tensor(dtype=torch.float)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
# Special Case
t = make_1d_tensor(dtype=torch.bfloat16)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
# Integral Tensor
t = make_1d_tensor(dtype=torch.long)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
# CFloat Scalar
s = make_scalar_tensor(dtype=torch.cfloat)
# Lower Value type than CFloat
t = make_1d_tensor(dtype=torch.half)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
# Higher Value type than CFloat
t = make_1d_tensor(dtype=torch.double)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
# Integral Tensor
t = make_1d_tensor(dtype=torch.long)
# 0-D Tensor X 1-D Tensor
complex_scalar_tensor_test(s, t)
# Python Scalar X 1-D Tensor
complex_scalar_tensor_test(s.item(), t)
# CDouble Scalar
s = make_scalar_tensor(dtype=torch.cdouble)
# Lower Value type than CDouble
t = make_1d_tensor(dtype=torch.float)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
# Special Case
t = make_1d_tensor(dtype=torch.bfloat16)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
@float_double_default_dtype
def test_complex_scalar_mult_tensor_promotion(self, device):
a = 1j * torch.ones(2, device=device)
a = a + 1j
b = torch.tensor([2j, 2j], device=device)
self.assertEqual(a, b)
self.assertEqual(a.dtype, b.dtype)
@float_double_default_dtype
def test_add_wrapped(self, device):
a = torch.ones([4, 4, 4], dtype=torch.int, device=device)
b = 1
c = a + b
self.assertEqual(c, a + a)
self.assertEqual(c.dtype, torch.int)
@float_double_default_dtype
def test_int_to_float(self, device):
a = torch.ones([4, 4, 4], dtype=torch.int32, device=device)
b = torch.ones([4, 4, 4], dtype=torch.float, device=device)
c = a + b
self.assertEqual(c.dtype, torch.float32)
# some examples from:
# https://github.com/pytorch/pytorch/issues/9515
@float_double_default_dtype
def test_from_issue(self, device):
a = torch.rand(3, dtype=torch.float32, device=device)
u = torch.tensor([0, 0, 1], dtype=torch.uint8, device=device)
self.assertEqual((a * 5).dtype, torch.float32)
self.assertEqual((u + 1).dtype, torch.uint8)
self.assertEqual((u + 1000).dtype, torch.uint8) # integer overflow
# not a "wrapped number"
other = torch.tensor(5.5, dtype=torch.double, device=device)
self.assertEqual((u + 5.5).dtype, torch.get_default_dtype())
self.assertEqual((u + other).dtype, torch.double)
# adding a 0-dim tensor to a float doesn't promote to double unless first
# type was integral.
self.assertEqual((a + other).dtype, torch.float32)
@float_double_default_dtype
def test_half(self, device):
half = torch.tensor(5.5, dtype=torch.float16, device=device)
self.assertEqual((half + 2.2).dtype, torch.float16)
self.assertEqual((half + 100000).dtype, torch.float16) # inf
default_tensor = torch.tensor(100000.0, device=device)
self.assertEqual((half + default_tensor).dtype, torch.get_default_dtype())
def test_bfloat16(self, device):
# with scalar
bf = torch.tensor(5.5, dtype=torch.bfloat16, device=device)
for scalar in (2.2, 5, 100000): # bf + 100000 is inf
self.assertEqual((bf + scalar).dtype, torch.bfloat16)
self.assertEqual(scalar + bf, bf + scalar)
for scalar in (complex(1, 1), complex(-2, 0), complex(0, -3)):
self.assertEqual((bf + scalar).dtype, torch.cfloat)
self.assertEqual(bf + scalar, scalar + bf)
# with tensor
for dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool):
t = torch.tensor(1, dtype=dtype, device=device)
self.assertEqual(bf + t, t + bf)
if dtype in (torch.float16, torch.float32, torch.float64, torch.cfloat, torch.cdouble):
# Handles bfloat16 x float16 -> float32 promotion
expected_dtype = dtype if dtype != torch.half else torch.float32
elif dtype is torch.chalf:
expected_dtype = torch.cfloat
elif dtype in (torch.bool, torch.uint8,
torch.int8, torch.int16, torch.int32, torch.int64, torch.bfloat16):
expected_dtype = torch.bfloat16
else:
raise AssertionError(f'Missing dtype {dtype} not tested.')
self.assertEqual(torch.promote_types(dtype, torch.bfloat16), expected_dtype)
self.assertEqual(torch.promote_types(torch.bfloat16, dtype), expected_dtype)
self.assertEqual((bf + t).dtype, expected_dtype)
@onlyNativeDeviceTypes
def test_complex_half(self, device):
# with scalar
chalf = torch.tensor(5.5, dtype=torch.chalf, device=device)
for scalar in (2.2, 5, 100000): # chalf + 100000 is inf
self.assertEqual((chalf * scalar).dtype, torch.chalf)
self.assertEqual(scalar * chalf, chalf * scalar)
for scalar in (complex(1, 1), complex(-2, 0), complex(0, -3)):
self.assertEqual((chalf * scalar).dtype, torch.chalf)
self.assertEqual(chalf * scalar, scalar * chalf)
# with tensor
dtypes = all_types_and_complex_and(torch.chalf, torch.half, torch.bfloat16, torch.bool)
for dtype in dtypes:
t = torch.tensor(1, dtype=dtype, device=device)
self.assertEqual(chalf * t, t * chalf)
if dtype in (torch.float16, torch.chalf):
expected_dtype = torch.chalf
elif dtype in (torch.float, torch.double, torch.bfloat16):
expected_dtype = torch.cdouble if dtype is torch.double else torch.cfloat
elif dtype in (torch.cfloat, torch.cdouble):
expected_dtype = dtype
elif dtype in (torch.bool, torch.uint8,
torch.int8, torch.int16, torch.int32, torch.int64):
expected_dtype = torch.chalf
else:
raise AssertionError(f'Missing dtype {dtype} not tested.')
self.assertEqual(torch.promote_types(dtype, torch.chalf), expected_dtype)
self.assertEqual(torch.promote_types(torch.chalf, dtype), expected_dtype)
self.assertEqual((chalf * t).dtype, expected_dtype)
@float_double_default_dtype
def test_alternate_result(self, device):
f = torch.tensor([1, 1, 1, 1], dtype=torch.float, device=device)
o = torch.tensor([0, 0, 0, 0], dtype=torch.long, device=device)
self.assertRaisesRegex(RuntimeError,
"can't be cast to",
lambda: torch.add(f, f, out=o))
d = torch.tensor([1, 1, 1, 1], dtype=torch.double, device=device)
torch.add(f, f, out=d)
self.assertEqual(d.dtype, torch.double)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(f + f, d)
@float_double_default_dtype
def test_mixed_type_backward(self, device):
f = torch.ones([3, 3], dtype=torch.float, requires_grad=True, device=device)
ten = torch.tensor([10.], dtype=torch.double, device=device)
tens = f * ten
s = (tens + 2).sum()
s.backward()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(f.grad, tens)
# If we don't convert the returned grad_input to the actual input type
# we get an error like:
# RuntimeError: Function SubBackward0 returned an invalid gradient at index 0 - expected type \
# torch.FloatTensor but got torch.DoubleTensor
f_dtypes = [torch.float, torch.double]
if self.device_type == 'cuda':
f_dtypes = f_dtypes + [torch.half]
i_dtypes = [torch.int, torch.long]
for func in [torch.add, torch.sub, torch.rsub, torch.mul, torch.div]:
for dtype1, dtype2 in itertools.product(f_dtypes, f_dtypes + i_dtypes):
x = torch.ones(10, requires_grad=True, dtype=dtype1, device=device)
y = torch.ones(10, dtype=dtype2, device=device)
func(x, y).sum().backward()
def _get_test_tensor(self, device, dtype, remove_zeros=False):
shape = [5, 5, 5]
if dtype == torch.bool:
tensor = torch.randint(int(remove_zeros), 2, shape, device=device, dtype=dtype)
elif dtype.is_floating_point or dtype.is_complex:
# "_th_normal_ not supported on CPUType for Half" so simpler create and convert
tensor = torch.randn(shape, device=device)
tensor = tensor.to(dtype)
if remove_zeros:
tensor[torch.abs(tensor) < 0.05] = 5
else:
tensor = torch.randint(-5 if dtype.is_signed else 0, 10, shape, device=device, dtype=dtype)
if remove_zeros:
tensor[tensor == 0] = 5
return tensor
# verifies that torch.<op>(first, second) is the same as
# torch.<op>(first.to(common_dtype), second.to(common_dtype)) in cases where that should hold.
@float_double_default_dtype
def test_many_promotions(self, device):
# Can also include half on CPU in cases where it will be promoted to a
# supported dtype
dtypes1 = get_all_math_dtypes('cuda')
dtypes2 = get_all_math_dtypes(device)
ops = [torch.add, torch.sub, torch.mul, torch.div, torch.rsub]
for dt1, dt2 in itertools.product(dtypes1, dtypes2):
for op, non_contiguous in itertools.product(ops, [True, False]):
common_dtype = torch.promote_types(dt1, dt2)
if common_dtype == torch.half and self.device_type == 'cpu':
continue
if op == torch.sub and common_dtype != torch.bool:
# Subtraction, the `-` operator, with a bool tensor is not supported.
continue
first = self._get_test_tensor(device, dt1)
second = self._get_test_tensor(device, dt2, op == torch.div)
# test ops with non-contiguous tensors
if non_contiguous:
first = first.transpose(0, 2)
second = second.transpose(2, 1)
self.assertNotEqual(first.stride(), second.stride(),
msg="some non-contiguous issues could be missed if tensors have same strides")
self.assertEqual(not first.is_contiguous(), non_contiguous)
self.assertEqual(not second.is_contiguous(), non_contiguous)
result = op(first, second)
expected = op(first.to(common_dtype), second.to(common_dtype))
self.assertEqual(result.dtype, expected.dtype, msg='{} with {}, {}'.format(op.__name__, dt1, dt2))
self.assertEqual(result, expected, msg='{} with {}, {}'.format(op.__name__, dt1, dt2))
@float_double_default_dtype
def test_non_promoting_ops(self, device):
x = torch.ones(4, dtype=torch.double, device=device)
with self.assertRaises(RuntimeError):
torch.lerp(x, torch.ones(4, dtype=torch.float, device=device), 1)
@float_double_default_dtype
def test_alpha_mismatch(self, device):
x = torch.ones(4, dtype=torch.int, device=device)
err = 'alpha must not be'
self.assertRaisesRegex(RuntimeError, err,
lambda: torch.add(x, x, alpha=1.1))
x = x.to(torch.bool)
self.assertRaisesRegex(RuntimeError, err,
lambda: torch.add(x, x, alpha=1.1))
self.assertEqual(x + x, torch.add(x, x, alpha=True))
@float_double_default_dtype
def test_booleans(self, device):
onedim = torch.tensor([True], device=device)
self.assertEqual(onedim + onedim, onedim)
self.assertEqual(onedim + True, onedim)
self.assertEqual(torch.add(True, True), True)
self.assertEqual(torch.add(False, False), False)
self.assertEqual(torch.add(False, True), True)
self.assertRaisesRegex(RuntimeError, "Boolean alpha only supported",
lambda: torch.add(1, 1, alpha=True))
self.assertEqual(torch.add(torch.tensor(True, device=device),
torch.tensor(True, device=device), True),
torch.tensor(True, device=device))
@float_double_default_dtype
def test_create_bool_tensors(self, device):
expected = torch.tensor([0], dtype=torch.int64, device=device)
self.assertEqual(torch.arange(False, True, device=device), expected)
self.assertEqual(torch.arange(True, device=device), expected)
expected = torch.tensor([0, 0.5], dtype=torch.get_default_dtype(), device=device)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.arange(False, True, 0.5, device=device), expected)
expected = torch.ones(0, dtype=torch.int64, device=device)
self.assertEqual(torch.arange(False, False, device=device), expected)
bool_tensor_lin = torch.linspace(False, True, steps=100, device=device)
int_tensor_lin = torch.linspace(0, 1, steps=100, device=device)
self.assertEqual(bool_tensor_lin, int_tensor_lin)
bool_tensor_log = torch.linspace(False, True, steps=100, device=device)
int_tensor_log = torch.linspace(0, 1, steps=100, device=device)
self.assertEqual(bool_tensor_log, int_tensor_log)
# this seems like odd behavior but ints also create float tensors, numpy doesn't have this function.
self.assertEqual(torch.scalar_tensor(False, device=device), torch.tensor(0., device=device))
@dtypes(*itertools.product(all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)))
def test_result_type(self, device, dtypes):
"Test result_type for tensor vs tensor and scalar vs scalar."
def _get_dtype(x):
"Get the dtype of x if x is a tensor. If x is a scalar, get its corresponding dtype if it were a tensor."
if torch.is_tensor(x):
return x.dtype
elif isinstance(x, bool):
return torch.bool
elif isinstance(x, int):
return torch.int64
elif isinstance(x, float):
return torch.float32
elif isinstance(x, complex):
return torch.complex64
else:
raise AssertionError(f"Unkonwn type {x}")
# tensor against tensor
a_tensor = torch.tensor((0, 1), device=device, dtype=dtypes[0])
a_single_tensor = torch.tensor(1, device=device, dtype=dtypes[0])
a_scalar = a_single_tensor.item()
b_tensor = torch.tensor((1, 0), device=device, dtype=dtypes[1])
b_single_tensor = torch.tensor(1, device=device, dtype=dtypes[1])
b_scalar = b_single_tensor.item()
combo = ((a_tensor, a_single_tensor, a_scalar), (b_tensor, b_single_tensor, b_scalar))
for a, b in itertools.product(*combo):
dtype_a = _get_dtype(a)
dtype_b = _get_dtype(b)
try:
result = a + b
except RuntimeError:
with self.assertRaises(RuntimeError):
torch.promote_types(dtype_a, dtype_b)
with self.assertRaises(RuntimeError):
torch.result_type(a, b)
else:
dtype_res = _get_dtype(result)
if a is a_scalar and b is b_scalar and dtype_a == torch.bool and dtype_b == torch.bool:
# special case: in Python, True + True is an integer
self.assertEqual(dtype_res, torch.int64, f"a == {a}, b == {b}")
else:
self.assertEqual(dtype_res, torch.result_type(a, b), f"a == {a}, b == {b}")
if a is a_scalar and b is b_scalar: # Python internal type determination is good enough in this case
continue
if any(a is a0 and b is b0 for a0, b0 in zip(*combo)): # a and b belong to the same class
self.assertEqual(dtype_res, torch.promote_types(dtype_a, dtype_b), f"a == {a}, b == {b}")
# Spot check some result type for tensor against scalar (including single-element tensor).
@float_double_default_dtype
def test_result_type_tensor_vs_scalar(self, device):
def _test_spot(a, b, res_dtype):
self.assertEqual(torch.result_type(a, b), res_dtype)
self.assertEqual(torch.result_type(b, a), res_dtype)
_test_spot(torch.tensor([1, 2], dtype=torch.half, device=device),
torch.tensor(1, dtype=torch.long, device=device), torch.half)
_test_spot(torch.tensor(1, dtype=torch.float, device=device),
torch.tensor([1, 2], dtype=torch.double, device=device), torch.double)
_test_spot(torch.tensor(1, dtype=torch.int, device=device), 1, torch.int)
_test_spot(torch.tensor(1, device=device), 1., torch.get_default_dtype())
_test_spot(torch.tensor(1, dtype=torch.long, device=device),
torch.tensor([1, 1], dtype=torch.int, device=device), torch.int)
_test_spot(torch.tensor([1., 1.], dtype=torch.float, device=device), 1., torch.float)
_test_spot(torch.tensor([1., 1.], dtype=torch.complex64, device=device),
torch.tensor(1., dtype=torch.complex128, device=device), torch.complex64)
_test_spot(torch.tensor([1., 1.], dtype=torch.complex128, device=device),
torch.tensor(1., dtype=torch.complex64, device=device), torch.complex128)
_test_spot(torch.tensor([1, 1], dtype=torch.bool, device=device), 1., torch.get_default_dtype())
@float_double_default_dtype
def test_can_cast(self, device):
self.assertTrue(torch.can_cast(torch.double, torch.float))
self.assertFalse(torch.can_cast(torch.float, torch.int))
@float_double_default_dtype
def test_comparison_ops_with_type_promotion(self, device):
value_for_type = {
torch.uint8: (1 << 5),
torch.int8: (1 << 5),
torch.int16: (1 << 10),
torch.int32: (1 << 20),
torch.int64: (1 << 35),
torch.float16: (1 << 10),
torch.float32: (1 << 20),
torch.float64: (1 << 35),
torch.complex64: (1 << 20),
torch.complex128: (1 << 35)
}
comparison_ops = [
dict(
name="lt",
out_op=lambda x, y, d: torch.lt(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
ret_op=lambda x, y: torch.lt(x, y),
compare_op=lambda x, y: x < y,
),
dict(
name="le",
out_op=lambda x, y, d: torch.le(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
ret_op=lambda x, y: torch.le(x, y),
compare_op=lambda x, y: x <= y,
),
dict(
name="gt",
out_op=lambda x, y, d: torch.gt(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
ret_op=lambda x, y: torch.gt(x, y),
compare_op=lambda x, y: x > y,
),
dict(
name="ge",
out_op=lambda x, y, d: torch.ge(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
ret_op=lambda x, y: torch.ge(x, y),
compare_op=lambda x, y: x >= y,
),
dict(
name="eq",
out_op=lambda x, y, d: torch.eq(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
ret_op=lambda x, y: torch.eq(x, y),
compare_op=lambda x, y: x == y,
),
dict(
name="ne",
out_op=lambda x, y, d: torch.ne(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
ret_op=lambda x, y: torch.ne(x, y),
compare_op=lambda x, y: x != y,
),
]
for op in comparison_ops:
for dt1 in get_all_math_dtypes(device):
for dt2 in get_all_math_dtypes(device):
if (dt1.is_complex or dt2.is_complex) and not (op["name"] == "eq" or op["name"] == "ne"):
continue
val1 = value_for_type[dt1]
val2 = value_for_type[dt2]
t1 = torch.tensor([val1], dtype=dt1, device=device)
t2 = torch.tensor([val2], dtype=dt2, device=device)
expected = torch.tensor([op["compare_op"](val1, val2)], dtype=torch.bool)
out_res = op["out_op"](t1, t2, device)
self.assertEqual(out_res, expected)
self.assertTrue(out_res.dtype == torch.bool)
self.assertTrue(t1.dtype == dt1)
self.assertTrue(t2.dtype == dt2)
out_res = op["ret_op"](t1, t2)
self.assertEqual(out_res, expected)
self.assertTrue(out_res.dtype == torch.bool)
self.assertTrue(t1.dtype == dt1)
self.assertTrue(t2.dtype == dt2)
# test that comparing a zero dim tensor with another zero dim tensor has type promotion behavior
t1 = torch.tensor(val1, dtype=dt1, device=device)
t2 = torch.tensor(val2, dtype=dt2, device=device)
expected = torch.tensor(op["compare_op"](val1, val2), dtype=torch.bool)
out_res = op["out_op"](t1, t2, device)
self.assertEqual(out_res, expected)
self.assertTrue(out_res.dtype == torch.bool)
self.assertTrue(t1.dtype == dt1)
self.assertTrue(t2.dtype == dt2)
out_res = op["ret_op"](t1, t2)
self.assertEqual(out_res, expected)
self.assertTrue(out_res.dtype == torch.bool)
self.assertTrue(t1.dtype == dt1)
self.assertTrue(t2.dtype == dt2)
# XLA tests fail for self.assertRaises for complex dtypes
@onlyNativeDeviceTypes
def test_complex_assertraises(self, device):
comparison_ops = [
dict(name="lt", compare_op=lambda x, y: x < y, ),
dict(name="le", compare_op=lambda x, y: x <= y, ),
dict(name="gt", compare_op=lambda x, y: x > y, ),
dict(name="ge", compare_op=lambda x, y: x >= y, ),
dict(name="eq", compare_op=lambda x, y: x == y, ),
dict(name="ne", compare_op=lambda x, y: x != y, ),
]
for op in comparison_ops:
is_cuda = torch.device(device).type == 'cuda'
dtypes = get_all_dtypes(include_half=is_cuda,
include_bfloat16=False, include_bool=False,
include_complex32=True)
for dt1, dt2 in itertools.product(dtypes, dtypes):
if (dt1.is_complex or dt2.is_complex) and not (op["name"] == "eq" or op["name"] == "ne"):
u = torch.tensor([1], dtype=dt1, device=device)
v = torch.tensor([2], dtype=dt2, device=device)
self.assertRaises(RuntimeError, lambda: torch.tensor([op["compare_op"](u, v)], dtype=torch.bool))
@float_double_default_dtype
def test_lt_with_type_promotion(self, device):
for dt in get_all_math_dtypes(device):
x = torch.tensor([0], dtype=dt, device=device)
expected = torch.tensor([True], dtype=torch.bool, device=device)
if dt.is_complex:
continue
actual = x < 0.5
self.assertTrue(actual, expected)
self.assertTrue(actual.dtype == torch.bool)
actual = x < torch.tensor(0.5, device=device)
self.assertTrue(actual, expected)
self.assertTrue(actual.dtype == torch.bool)
x = torch.tensor(0, dtype=dt, device=device)
expected = torch.tensor(True, dtype=torch.bool, device=device)
actual = x < 0.5
self.assertTrue(actual, expected)
self.assertTrue(actual.dtype == torch.bool)
actual = x < torch.tensor(0.5, device=device)
self.assertTrue(actual, expected)
self.assertTrue(actual.dtype == torch.bool)
@float_double_default_dtype
def test_promote_types(self, device):
self.assertEqual(torch.promote_types(torch.float, torch.int), torch.float)
self.assertEqual(torch.promote_types(torch.float, torch.double), torch.double)
self.assertEqual(torch.promote_types(torch.int, torch.uint8), torch.int)
@float_double_default_dtype
def test_promote_self(self, device):
for dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf, torch.bool):
self.assertEqual(torch.promote_types(dtype, dtype), dtype)
@expectedFailureMeta
@float_double_default_dtype
def test_indexing_fail(self, device):
# https://github.com/pytorch/pytorch/issues/28010
a = torch.ones(5, 2, dtype=torch.double, device=device)
b = torch.zeros(5, dtype=torch.int, device=device)
with self.assertRaises(RuntimeError):
a[:, [1]] = b.unsqueeze(-1)
@float_double_default_dtype
def test_indexing(self, device):
x = torch.ones(5, 2, dtype=torch.double, device=device)
y = torch.zeros(5, dtype=torch.double, device=device)
x[:, [1]] = y.unsqueeze(-1)
expected = torch.tensor([(1, 0), (1, 0), (1, 0), (1, 0), (1, 0)], dtype=torch.double, device=device)
self.assertEqual(x, expected)
# https://github.com/pytorch/pytorch/issues/27824
tmp = torch.ones(9, 9, dtype=torch.float, device=device)
mask = torch.ones(10, 10, dtype=torch.uint8, device=device)
result = tmp + mask[1:, 1:]
expected = torch.full([9, 9], 2., dtype=torch.float, device=device).fill_(2.)
self.assertEqual(result, expected)
@float_double_default_dtype
def test_transpose(self, device):
# https://github.com/pytorch/pytorch/issues/28502
a = torch.tensor([[True, True], [False, True]], device=device)
self.assertEqual(a.t() == 0, a.t() == False) # noqa: E712
@dtypes(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
@float_double_default_dtype
def test_div_promotion(self, device, dtype):
for op in (torch.div, torch.true_divide):
dividend = (torch.randn(5, device=device) * 100).to(dtype)
divisor = torch.arange(1, 6, device=device).to(dtype)
# Tests tensor/tensor division
casting_result = dividend.to(torch.get_default_dtype()) / divisor.to(torch.get_default_dtype())
self.assertEqual(casting_result, op(dividend, divisor))
# Tests tensor/scalar division
casting_result = dividend.to(torch.get_default_dtype()) / 2
self.assertEqual(casting_result, op(dividend, 2.))
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double,
torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_div_promotion_out(self, device, dtype):
for op in (torch.div, torch.true_divide):
dividend = (torch.randn(5, device=device) * 100).to(dtype)
divisor = torch.arange(1, 6, device=device).to(dtype)
# Tests that requests for an integer quotient fail
if not dtype.is_floating_point:
integral_quotient = torch.empty(5, device=device, dtype=dtype)
with self.assertRaises(RuntimeError):
op(dividend, divisor, out=integral_quotient)
with self.assertRaises(RuntimeError):
op(dividend, 2, out=integral_quotient)
else:
# Tests that requests for a floating quotient succeed
floating_quotient = torch.empty(5, device=device, dtype=dtype)
div_result = dividend / divisor
self.assertEqual(div_result,
op(dividend, divisor, out=floating_quotient))
self.assertEqual(dividend / 2,
op(dividend, 2, out=floating_quotient))
@dtypes(torch.float, torch.double,
torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_div_promotion_inplace(self, device, dtype):
for op in (torch.Tensor.div_, torch.Tensor.true_divide_):
dividend = (torch.randn(5, device=device) * 100).to(dtype)
divisor = torch.arange(1, 6, device=device).to(dtype)
# Tests that requests for an integer quotient fail
if not dtype.is_floating_point:
with self.assertRaises(RuntimeError):
op(dividend, divisor)
with self.assertRaises(RuntimeError):
op(dividend, 2)
else:
# Tests that requests for a floating quotient succeed
div_result = dividend.clone().div_(divisor)
self.assertEqual(div_result, op(dividend.clone(), divisor))
self.assertEqual(dividend.clone().div_(2), op(dividend.clone(), 2))
def _test_sparse_op_input_tensors(self, device, dtype, coalesced, zeros=True):
t = self._get_test_tensor(device, dtype, not zeros)
if zeros and dtype != torch.bool:
# ensure sparsity. Bool should already have sufficient sparsity.
mask = self._get_test_tensor(device, torch.bool)
t = t * mask
if coalesced:
s = t.to_sparse()
else:
s = t.to_sparse()
indices = torch.cat((s.indices(), s.indices()), 1)
values = torch.cat((s.values(), s.values()), 0)
s = torch.sparse_coo_tensor(indices=indices, values=values, size=s.size(), dtype=dtype, device=device)
t = s.to_dense()
self.assertEqual(s.is_coalesced(), coalesced)
self.assertEqual(s.dtype, dtype)
self.assertEqual(t.dtype, s.dtype)
return t, s
def _get_precision(self, dtype, coalesced):
if dtype == torch.half and not coalesced:
# very low precision for uncoalesced float16 sparse tensors since
# ops like (s1 + s2).to_dense() will add four low-precision
# floating point values.
return 5e-2
if dtype == torch.half:
return 1e-3
# uses default
return None
def _test_sparse_op(self, op_name, inplace, dtype1, dtype2, device, coalesced):
if dtype1.is_complex or dtype2.is_complex:
return
suffix = '_' if inplace else ''
err = "{} {}({}, {})".format(" coalesced" if coalesced else "uncoalesced", op_name + suffix, dtype1, dtype2)
def op(t1, t2, suf=None):
suf = suffix if suf is None else suf
return getattr(t1, op_name + suf)(t2)
add_sub = op_name == 'add' or op_name == 'sub'
(dense1, sparse1) = self._test_sparse_op_input_tensors(device, dtype1, coalesced)
(dense2, sparse2) = self._test_sparse_op_input_tensors(device, dtype2, coalesced, op_name != 'div')
common_dtype = torch.result_type(dense1, dense2)
if self.device_type == 'cpu' and common_dtype == torch.half:
self.assertRaises(RuntimeError, lambda: op(s1, d2))
# Skip inplace tests that would fail due to inability to cast to the output type.
# Some of these would also raise errors due to not being a supported op.
if inplace and not torch.can_cast(common_dtype, dtype1):
self.assertRaises(RuntimeError, lambda: op(dense1, sparse2))
self.assertRaises(RuntimeError, lambda: op(sparse1, sparse2))
self.assertRaises(RuntimeError, lambda: op(sparse1, dense2))
return
expected = op(dense1.clone(), dense2)
precision = self._get_precision(expected.dtype, coalesced)
rtol = None if precision is None else 0
test_tensors = [expected, dense1, sparse1, dense2, sparse2]
e, d1, s1, d2, s2 = [x.clone() for x in test_tensors] if inplace else test_tensors
# Test op(sparse, sparse)
if op_name != 'div':
sparse = op(s1, s2)
self.assertEqual(sparse.dtype, e.dtype)
self.assertEqual(e, sparse.to_dense(), atol=precision, rtol=rtol, msg=err)
else:
# sparse division only supports division by a scalar
self.assertRaises(RuntimeError, lambda: op(s1, s2).to_dense())
# Test op(dense, sparse)
if add_sub or op_name == 'mul':
if inplace:
e, d1, s1, d2, s2 = [x.clone() for x in test_tensors]
dense_sparse = op(d1, s2)
dense_sparse = dense_sparse.to_dense() if dense_sparse.is_sparse else dense_sparse
self.assertEqual(e, dense_sparse, atol=precision, rtol=rtol, msg=err)
else:
# sparse division only supports division by a scalar
# mul: Didn't find kernel to dispatch to for operator 'aten::_nnz'
self.assertRaises(RuntimeError, lambda: op(d1, s2))
# Test op(sparse, dense) not supported for all ops but 'mul'.
# add(sparse, dense) is not supported. Use add(dense, sparse) instead.
# sparse division only supports division by a scalar
if op_name != 'mul':
self.assertRaises(RuntimeError, lambda: op(s1, d2))
else:
# No type promotions for inplace operations, hence suf=''
op(s1, d2, suf='')
# Test op(sparse, scalar)
if not add_sub and not (self.device_type == 'cpu' and dtype1 == torch.half):
if inplace:
e, d1, s1, d2, s2 = [x.clone() for x in test_tensors]
scalar = d2.view(d2.numel())[0].item()
sparse = op(s1, scalar)
dense_scalar = op(d1, scalar)
self.assertEqual(sparse.dtype, dense_scalar.dtype)
self.assertEqual(dense_scalar, sparse.to_dense(), atol=precision, rtol=rtol, msg=err)
else:
# add(sparse, dense) is not supported. Use add(dense, sparse) instead.
# "mul_cpu" / "div_cpu" not implemented for 'Half'
self.assertRaises(RuntimeError, lambda: op(s1, d2.view(d2.numel())[0].item()))
def _run_all_tests_for_sparse_op(self, op_name, device, dtypes):
for dtype1, dtype2 in itertools.product(dtypes, dtypes):
for inplace, coalesced in itertools.product([True, False], [True, False]):
self._test_sparse_op(op_name, inplace, dtype1, dtype2, device, coalesced)
@onlyNativeDeviceTypes
def test_sparse_add(self, device):
self._run_all_tests_for_sparse_op('add', device,
dtypes=get_all_math_dtypes(device))
@onlyNativeDeviceTypes
def test_sparse_mul(self, device):
self._run_all_tests_for_sparse_op('mul', device,
dtypes=get_all_math_dtypes(device))
@onlyNativeDeviceTypes
def test_sparse_div(self, device):
self._run_all_tests_for_sparse_op('div', device,
dtypes=(torch.float32, torch.float64,
torch.complex64, torch.complex128))
@onlyNativeDeviceTypes
def test_sparse_sub(self, device):
self._run_all_tests_for_sparse_op('sub', device,
dtypes=get_all_math_dtypes(device))
@onlyNativeDeviceTypes
@dtypes(torch.bool, torch.short, torch.uint8, torch.int, torch.long)
@float_double_default_dtype
def test_sparse_div_promotion(self, device, dtype):
for op in (torch.div, torch.true_divide):
dividend = torch.randn(5, device=device).to(dtype)
divisor = 2
dividend_sparse = dividend.to_sparse()
casting_result = dividend.to(torch.get_default_dtype()) / 2
self.assertEqual(casting_result, op(dividend_sparse, 2).to_dense())
@onlyNativeDeviceTypes
@dtypes(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)
def test_integer_addcdiv_deprecated(self, device, dtype):
t = torch.tensor(1, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, '^Integer division.+is no longer supported.+'):
torch.addcdiv(t, t, t)
with self.assertRaisesRegex(RuntimeError, '^Integer division.+is no longer supported.+'):
torch.addcdiv(t, t, t, out=t)
with self.assertRaisesRegex(RuntimeError, '^Integer division.+is no longer supported+'):
t.addcdiv_(t, t)
def _ternary_promotion_common(self, device, op1, op2):
make_arg = partial(make_tensor, device=device)
types = (
(torch.float64, torch.float64, torch.complex128),
(torch.long, torch.bfloat16, torch.float32),
)
for type1, type2, type3 in types:
arg1 = make_arg([5, 5], dtype=type1)
arg2 = make_arg([5, 5], dtype=type2)
arg3 = make_arg([1, 5], dtype=type3)
res1 = op1(arg1, arg2, arg3)
res2 = op2(arg1, arg2, arg3)
# res1 and res2 are not guaranteed to be the same. They are the
# same when all the inputs are tensors with one or more dimensions.
self.assertEqual(res1, res2)
self.assertEqual(res1.dtype, res2.dtype)
# Fails on XLA:
# https://github.com/pytorch/pytorch/pull/74234#issuecomment-1117169366
# https://github.com/pytorch/xla/issues/3551
@onlyNativeDeviceTypes
def test_addcdiv_promotion(self, device):
def op1(arg1, arg2, arg3):
return torch.addcdiv(arg1, arg2, arg3)
def op2(arg1, arg2, arg3):
return arg1 + arg2 / arg3
self._ternary_promotion_common(device, op1, op2)
# Fails on XLA:
# https://github.com/pytorch/pytorch/pull/74234#issuecomment-1117169366
# https://github.com/pytorch/xla/issues/3551
@onlyNativeDeviceTypes
def test_addcmul_promotion(self, device):
def op1(arg1, arg2, arg3):
return torch.addcmul(arg1, arg2, arg3)
def op2(arg1, arg2, arg3):
return arg1 + arg2 * arg3
self._ternary_promotion_common(device, op1, op2)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
@float_double_default_dtype
@onlyCPU
@dtypes(*list(itertools.product(set(numpy_to_torch_dtype_dict.values()),
set(numpy_to_torch_dtype_dict.values()))))
def test_numpy_array_binary_ufunc_promotion(self, device, dtypes):
import operator
np_type = torch_to_numpy_dtype_dict[dtypes[0]]
torch_type = dtypes[1]
t = torch.tensor((1,), device=device, dtype=torch_type)
a = np.array((1,), dtype=np_type)
a_as_t = torch.from_numpy(a).to(device=device)
for np_first in (True, False):
for op in (operator.add, torch.add):
# Acquires results of binary ufunc type promotion.
try:
actual = op(a, t) if np_first else op(t, a)
except Exception as e:
actual = e
try:
expected = op(a_as_t, t) if np_first else op(t, a_as_t)
except Exception as e:
expected = e
same_result = (type(expected) == type(actual)) and expected == actual
# Note: An "undesired failure," as opposed to an "expected failure"
# is both expected (we know the test will fail) and
# undesirable (if PyTorch was working properly the test would
# not fail). This test is affected by three issues (see below)
# that will cause undesired failures. It detects when these
# issues will occur and updates this bool accordingly.
undesired_failure = False
# A NumPy array as the first argument to the plus operator
# or as any argument to torch.add is not working as
# intended.
# See https://github.com/pytorch/pytorch/issues/36363.
if np_first and op is operator.add:
undesired_failure = True
if op is torch.add:
undesired_failure = True
# Expects the same result if undesired_failure is false
# and a different result otherwise.
# Note: These cases prettyprint the failing inputs to make
# debugging test failures easier.
if undesired_failure and same_result:
msg = ("Failure: {0} == {1}. "
"torch type was {2}. NumPy type was {3}. np_first is {4} "
"default type is {5}.").format(actual, expected,
torch_type, np_type,
np_first,
torch.get_default_dtype())
self.fail(msg)
if not undesired_failure and not same_result:
msg = ("Failure: {0} != {1}. "
"torch type was {2}. NumPy type was {3}. np_first is {4} "
"default type is {5}.").format(actual, expected,
torch_type, np_type,
np_first,
torch.get_default_dtype())
self.fail(msg)
@onlyNativeDeviceTypes
def test_cat_different_dtypes(self, device):
dtypes = all_types_and_complex_and(torch.half, torch.bool)
for x_dtype, y_dtype in itertools.product(dtypes, dtypes):
x_vals, y_vals = [1, 2, 3], [4, 5, 6]
x = torch.tensor(x_vals, device=device, dtype=x_dtype)
y = torch.tensor(y_vals, device=device, dtype=y_dtype)
if x_dtype is torch.bool:
x_vals = [1, 1, 1]
if y_dtype is torch.bool:
y_vals = [1, 1, 1]
res_dtype = torch.result_type(x, y)
expected_res = torch.tensor(x_vals + y_vals, device=device, dtype=res_dtype)
res = torch.cat([x, y])
self.assertEqual(res, expected_res, exact_dtype=True)
# cat: full and an empty tensor.
y = torch.tensor([], device=device, dtype=y_dtype)
res_dtype = torch.result_type(x, y)
expected_res = torch.tensor(x_vals + [], device=device, dtype=res_dtype)
res = torch.cat([x, y])
self.assertEqual(res, expected_res, exact_dtype=True)
@onlyNativeDeviceTypes
def test_cat_out_different_dtypes(self, device):
dtypes = all_types_and_complex_and(torch.half)
for x_dtype, y_dtype, out_dtype in itertools.product(dtypes, dtypes, dtypes):
out = torch.zeros(6, device=device, dtype=out_dtype)
x = torch.tensor([1, 2, 3], device=device, dtype=x_dtype)
y = torch.tensor([4, 5, 6], device=device, dtype=y_dtype)
expected_out = torch.tensor([1, 2, 3, 4, 5, 6], device=device, dtype=out_dtype)
if (((x_dtype.is_floating_point or y_dtype.is_floating_point)
and not (out_dtype.is_floating_point or out_dtype.is_complex))
or ((x_dtype.is_complex or y_dtype.is_complex) and not out_dtype.is_complex)):
# This combinations do not support type conversion to a different class out type
with self.assertRaises(RuntimeError):
torch.cat([x, y], out=out)
else:
torch.cat([x, y], out=out)
self.assertEqual(out, expected_out, exact_dtype=True)
# Verfies that unary ops require matching out types
@onlyNativeDeviceTypes
@dtypes(*itertools.product((torch.int64,
torch.float32, torch.float64,
torch.complex64, torch.complex128),
(torch.int64,
torch.float32, torch.float64,
torch.complex64, torch.complex128)))
def test_unary_op_out_casting(self, device, dtypes):
t = torch.tensor((1), dtype=dtypes[0], device=device)
out = torch.empty(0, dtype=dtypes[1], device=device)
ops = (torch.neg, torch.floor, torch.ceil)
float_only_ops = {torch.floor, torch.ceil}
real_only_ops = {torch.floor, torch.ceil}
for op in ops:
if dtypes[0] is not dtypes[1]:
with self.assertRaises(RuntimeError):
op(t, out=out)
elif op in real_only_ops and dtypes[0].is_complex:
with self.assertRaises(RuntimeError):
op(t, out=out)
elif (
op in float_only_ops
and (not dtypes[0].is_floating_point and not dtypes[0].is_complex)
and device != "meta"
):
with self.assertRaises(RuntimeError):
op(t, out=out)
else:
self.assertEqual(op(t, out=out), op(t))
self.assertEqual(op(t, out=out), out)
# Verifies that the out= argument doesn't affect the computation, that
# is, out = op(...) and op(..., out=out) produce the same result.
@onlyNativeDeviceTypes
@skipMeta
def test_computation_ignores_out(self, device):
t = torch.tensor(33000, dtype=torch.float16, device=device)
out = torch.empty(0, dtype=torch.float64, device=device)
result = torch.add(t, t, out=out)
self.assertEqual(result, t + t, exact_dtype=False)
self.assertNotEqual(result, t.double() + t, exact_dtype=False)
a = torch.tensor(1.5, dtype=torch.float16, device=device)
b = torch.tensor(.666, dtype=torch.float16, device=device)
result = torch.true_divide(a, b, out=out)
self.assertEqual(result, a / b, exact_dtype=False)
self.assertNotEqual(result, a.double() / a, exact_dtype=False)
a = torch.tensor(5, dtype=torch.uint8, device=device)
b = torch.tensor(8, dtype=torch.uint8, device=device)
result = torch.sub(a, b, out=out)
self.assertEqual(result, a - b, exact_dtype=False)
self.assertNotEqual(result, a.double() - b, exact_dtype=False)
@onlyNativeDeviceTypes
@dtypes(*itertools.product((torch.bool, torch.int, torch.float, torch.double), repeat=3))
def test_clamp_type_promotion(self, device, dtypes):
dtype0, dtype1, dtype2 = dtypes
S = 4
def make_tensor(size, dtype):
if dtype == torch.bool:
return torch.randint(2, size, dtype=dtype, device=device)
elif dtype == torch.int:
return torch.randint(10, size, dtype=dtype, device=device)
else:
return torch.randn(size, dtype=dtype, device=device)
min_t = make_tensor((S,), dtype1)
max_t = make_tensor((S,), dtype2)
mins = (min_t, min_t[0], min_t[0].item())
maxs = (max_t, max_t[0], max_t[0].item())
inp = make_tensor((S,), dtype0)
for min_v, max_v in itertools.product(mins, maxs):
if type(max_v) != type(min_v):
continue
if isinstance(min_v, torch.Tensor) and min_v.ndim == 0 and max_v.ndim == 0:
continue # 0d tensors go to scalar overload, and it's tested separately
def expected_type(inp, max, min):
arg1, arg2 = max, min
if isinstance(max, torch.Tensor) and max.ndim == 0:
# first do a maybe dimensional boundary
arg1, arg2 = min, max
exp_type = torch.result_type(inp, arg1)
inp_new = torch.empty_like(inp, dtype=exp_type)
return torch.result_type(inp_new, arg2)
exp_type = expected_type(inp, min_v, max_v)
if exp_type != torch.bool:
actual = torch.clamp(inp, min_v, max_v)
inps = list(map(lambda x: x.to(exp_type) if isinstance(x, torch.Tensor) else x,
(inp, min_v, max_v)))
expected = torch.clamp(inps[0], inps[1], inps[2])
self.assertEqual(actual, expected)
if inp.dtype in floating_types() or exp_type == inp.dtype:
actual = torch.clamp_(inp, min_v, max_v)
self.assertEqual(actual, expected, exact_dtype=False)
for val in mins:
def expected_type(inp, val):
return torch.result_type(inp, val)
exp_type = expected_type(inp, val)
if exp_type != torch.bool:
actual = torch.clamp_min(inp, val)
inps = list(map(lambda x: x.to(exp_type) if isinstance(x, torch.Tensor) else x,
(inp, val)))
expected = torch.clamp_min(inps[0], inps[1])
self.assertEqual(actual.dtype, exp_type)
self.assertEqual(actual, expected)
if inp.dtype == exp_type:
actual = torch.clamp_min_(inp, val)
self.assertEqual(actual, expected)
actual = torch.clamp_max(inp, val)
expected = torch.clamp_max(inps[0], inps[1])
self.assertEqual(actual, expected)
if inp.dtype in floating_types() or exp_type == inp.dtype:
actual = torch.clamp_max_(inp, val)
self.assertEqual(actual, expected, exact_dtype=False)
instantiate_device_type_tests(TestTypePromotion, globals())
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_type_promotion.py |
# Owner(s): ["module: unknown"]
import torch
import copy
from torch.testing._internal.common_utils import TestCase, run_tests
class TestPerOverloadAPI(TestCase):
def test_basics_opoverloadpacket(self):
# add is ony used as an example here. It is ok to update the test
# if the semantics of add are modified in the future.
add_packet = torch.ops.aten.add
# class attributes
self.assertEqual(add_packet.__name__, 'add')
self.assertEqual(str(add_packet), 'aten.add')
# callable
self.assertEqual(add_packet(torch.tensor(2), torch.tensor(3)), torch.tensor(5))
# correct module
self.assertEqual(add_packet.__module__, add_packet.op.__module__)
# caching
another_add_packet = torch.ops.aten.add
self.assertEqual(id(add_packet), id(another_add_packet))
# deepcopy is a no-op
self.assertEqual(id(add_packet), id(copy.deepcopy(add_packet)))
# pretty print
self.assertEqual(repr(add_packet), "<OpOverloadPacket(op='aten.add')>")
self.assertRaises(AttributeError, lambda: add_packet.foo)
def test_basics_opoverload(self):
add_packet = torch.ops.aten.add
add_tensoroverload = add_packet.Tensor
# class attributes
self.assertEqual(str(add_tensoroverload), 'aten.add.Tensor')
self.assertEqual(add_tensoroverload.__name__, 'add.Tensor')
self.assertEqual(add_tensoroverload.overloadpacket, add_packet)
# deepcopy is a no-op
self.assertEqual(id(add_tensoroverload), id(copy.deepcopy(add_tensoroverload)))
# caching
another_add_tensoroverload = torch.ops.aten.add.Tensor
self.assertEqual(id(add_tensoroverload), id(another_add_tensoroverload))
# pretty print
self.assertEqual(repr(add_tensoroverload), "<OpOverload(op='aten.add', overload='Tensor')>")
# callable
self.assertEqual(add_tensoroverload(torch.tensor(2), torch.tensor(3)), torch.tensor(5))
a = torch.tensor(2)
b = torch.tensor(0)
torch.ops.aten.add.out(a, a, out=b)
self.assertEqual(b, torch.tensor(4))
self.assertRaises(RuntimeError, lambda: add_tensoroverload(a, a, out=b))
def test_decompose(self):
x = torch.randn(2, 3)
y = torch.randn(5, 3)
self.assertEqual(
torch.ops.aten.linear.default.decompose(x, y),
torch.ops.aten.linear.default(x, y)
)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_per_overload_api.py |
# Owner(s): ["module: ci"]
from torch.testing._internal.common_utils import TestCase, run_tests
# these tests could eventually be changed to fail if the import/init
# time is greater than a certain threshold, but for now we just use them
# as a way to track the duration of `import torch`.
class TestImportTime(TestCase):
def test_time_import_torch(self):
TestCase.runWithPytorchAPIUsageStderr("import torch")
def test_time_cuda_device_count(self):
TestCase.runWithPytorchAPIUsageStderr(
"import torch; torch.cuda.device_count()",
)
if __name__ == "__main__":
run_tests()
| pytorch-master | test/test_import_stats.py |
# Owner(s): ["module: sparse"]
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard, first_sample, TEST_WITH_CROSSREF
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, CUDA11OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes)
from torch.testing._internal.common_methods_invocations import \
(sparse_unary_ufuncs, sparse_masked_reduction_ops)
from torch.testing._internal.common_dtype import (
all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types,
floating_and_complex_types_and, integral_types, floating_types_and,
)
from torch.utils._python_dispatch import TorchDispatchMode
if TEST_SCIPY:
import scipy.sparse
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# batched grad doesn't support sparse
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and CUDA11OrLater)
class CrossRefSparseFakeMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
def on_tensor(f):
def go(t):
if isinstance(t, torch.Tensor):
return f(t)
else:
return t
return go
# empty_like excluded for now due to sparse complex
# aten._to_dense.default this one is getting called with csc
if (
func not in [
torch.ops.aten.lift_fresh.default,
torch.ops.aten.empty_like.default,
torch.ops.aten.set_.source_Storage_storage_offset,
torch.ops.aten.sspaddmm.out,
torch.ops.aten._spdiags.default,
torch.ops.aten._to_dense.default
]
and torch.Tag.dynamic_output_shape not in func.tags
and torch.Tag.inplace_view not in func.tags
):
from torch._subclasses.fake_tensor import FakeTensorMode, UnsupportedFakeTensorException
from torch.utils._pytree import tree_map
try:
with FakeTensorMode(allow_meta=True) as fake_mode:
fake_args, fake_kwargs = tree_map(on_tensor(fake_mode.from_tensor), (args, kwargs))
fake_r = func(*fake_args, **fake_kwargs)
except UnsupportedFakeTensorException:
pass
r = func(*args, **kwargs)
return r
class TestSparseBase(TestCase):
def run(self, result=None):
if TEST_WITH_CROSSREF:
with CrossRefSparseFakeMode():
return super().run(result)
else:
return super().run(result)
class TestSparse(TestSparseBase):
def setUp(self):
TestCase.setUp(self)
self.index_tensor = lambda *args, **kwargs: torch.tensor(*args, **kwargs, dtype=torch.int64)
def sparse_empty_factory(*args, **kwargs):
kwargs['layout'] = kwargs.get('layout', torch.sparse_coo)
return torch.empty(*args, **kwargs)
self.sparse_empty = sparse_empty_factory
def sparse_tensor_factory(*args, **kwargs):
return torch.sparse_coo_tensor(*args, **kwargs)
self.sparse_tensor = sparse_tensor_factory
self.legacy_sparse_tensor = torch.sparse.DoubleTensor
def _gen_sparse(self, sparse_dim, nnz, with_size, dtype, device, coalesced):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dim
x, i, v = self.genSparseTensor(with_size, sparse_dim, nnz, not coalesced, dtype=dtype, device=device)
if not coalesced:
self.assert_uncoalesced(x)
return x, i, v
def assert_uncoalesced(self, x):
"""
Test if a CPU tensor is uncoalesced. This is used to ensure
correctness of the uncoalesced tensor generation algorithm.
"""
assert not x.is_coalesced()
existing_indices = set()
for i in range(x._nnz()):
index = str(x._indices()[:, i])
if index in existing_indices:
return True
else:
existing_indices.add(index)
def randn(self, *args, **kwargs):
"""
Variant of torch.randn that also works in the TEST_CUDA case.
"""
# TODO: Put this in torch.cuda.randn
return torch.empty(*args, **kwargs).normal_()
@dtypes(torch.double)
def test_print_coalesced(self, device, dtype):
self._test_print(device, dtype, True)
@dtypes(torch.double)
def test_print_uncoalesced(self, device, dtype):
self._test_print(device, dtype, False)
def _test_print(self, device, dtype, coalesced):
shape_sparse_dim_nnz = [
((), 0, 2),
((0,), 0, 10),
((2,), 0, 3),
((100, 3), 1, 3),
((100, 20, 3), 2, 0),
((10, 0, 3), 0, 3),
((10, 0, 3), 0, 0),
]
printed = []
for shape, sparse_dim, nnz in shape_sparse_dim_nnz:
indices_shape = torch.Size((sparse_dim, nnz))
values_shape = torch.Size((nnz,) + shape[sparse_dim:])
printed.append("# shape: {}".format(torch.Size(shape)))
printed.append("# nnz: {}".format(nnz))
printed.append("# sparse_dim: {}".format(sparse_dim))
printed.append("# indices shape: {}".format(indices_shape))
printed.append("# values shape: {}".format(values_shape))
indices = torch.arange(indices_shape.numel(), dtype=self.index_tensor(0).dtype,
device=device).view(indices_shape)
for d in range(sparse_dim):
indices[d].clamp_(max=(shape[d] - 1)) # make it valid index
if not coalesced and indices.numel() > 0:
indices[:, -1] = indices[:, 0] # make it uncoalesced
values_numel = values_shape.numel()
values = torch.arange(values_numel, dtype=dtype,
device=device).view(values_shape).div_(values_numel / 2.)
sp_tensor = self.sparse_tensor(indices, values, shape, dtype=dtype, device=device)
dtypes = [torch.int32]
if values.dtype == torch.double:
dtypes.append(torch.float)
else:
dtypes.append(torch.double)
for dtype in dtypes:
printed.append("########## {} ##########".format(dtype))
x = sp_tensor.detach().to(dtype)
printed.append("# sparse tensor")
printed.append(str(x))
if x.dtype.is_floating_point:
printed.append("# after requires_grad_")
printed.append(str(x.requires_grad_()))
printed.append("# after addition")
printed.append(str(x + x))
printed.append("# _indices")
printed.append(str(x._indices()))
printed.append("# _values")
printed.append(str(x._values()))
printed.append('')
self.assertExpected('\n'.join(printed))
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_basic(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
@coalescedonoff
@dtypes(torch.double, torch.cdouble, torch.bfloat16)
@precisionOverride({torch.bfloat16: 1e-2})
def test_coalesce(self, device, dtype, coalesced):
def _test_coalesce(t):
tc = t.coalesce()
self.assertEqual(tc.to_dense(), t.to_dense())
self.assertTrue(tc.is_coalesced())
# Our code below doesn't work when nnz is 0, because
# then it's a 0D tensor, not a 2D tensor.
if t._nnz() == 0:
self.assertEqual(t._indices(), tc._indices())
self.assertEqual(t._values(), tc._values())
return tc
value_map: Dict[Any, Any] = {}
for idx, val in zip(t._indices().t(), t._values()):
idx_tup = tuple(idx.tolist())
if idx_tup in value_map:
value_map[idx_tup] += val
else:
value_map[idx_tup] = val.clone() if isinstance(val, torch.Tensor) else val
new_indices = sorted(list(value_map.keys()))
_new_values = [value_map[idx] for idx in new_indices]
if t._values().ndimension() < 2:
new_values = t._values().new(_new_values)
else:
new_values = torch.stack(_new_values)
new_indices = t._indices().new(new_indices).t()
tg = t.new(new_indices, new_values, t.size())
self.assertEqual(tc._indices(), tg._indices())
self.assertEqual(tc._values(), tg._values())
if t.is_coalesced():
self.assertEqual(tc._indices(), t._indices())
self.assertEqual(tc._values(), t._values())
for empty_i, empty_v, empty_nnz in itertools.product([True, False], repeat=3):
sparse_size = [] if empty_i else [2, 1]
dense_size = [1, 0, 2] if empty_v else [1, 2]
nnz = 0 if empty_nnz else 5
t, _, _ = self._gen_sparse(len(sparse_size), nnz, sparse_size + dense_size, dtype, device, coalesced)
_test_coalesce(t) # this tests correctness
@dtypes(torch.double)
def test_coalesce_reference_cycle(self, device, dtype):
# Test coalesce doesn't create autograd graph cycles (gh-52253)
# Sanity check that the helper class works as expected
t = torch.rand(2)
t_ref = torch._C._WeakTensorRef(t)
self.assertFalse(t_ref.expired())
del t
self.assertTrue(t_ref.expired())
def test_sparse_sum():
i = torch.tensor([[0], [4]], dtype=torch.long, device=device)
v = torch.tensor([[[-0.4567, -1.8797, 0.0380, 1.4316]]],
dtype=dtype, device=device)
S = torch.sparse_coo_tensor(i, v)
S = S.coalesce()
S.requires_grad_(True)
S2 = S.coalesce()
self.assertTrue(S2.is_coalesced())
return torch._C._WeakTensorRef(S2)
ref = test_sparse_sum()
self.assertTrue(ref.expired())
@dtypes(torch.double)
def test_ctor_large_sizes(self, device, dtype):
# Test that integer overflow is detected when computing numel
# of a sparse tensor with large dimensions (gh-57416). Notice
# that numel is computed internally when constructing a
# tensor, hence the overflow may appear during the tensor
# construction step.
N = 100000
indices = torch.tensor([[N, N - 1]] * 4, dtype=torch.int64, device=device)
values = torch.tensor([1, 2], dtype=dtype, device=device)
self.assertRaises(RuntimeError,
lambda: torch.sparse_coo_tensor(
indices, values, (N + 1,) * 4, device=device))
@dtypes(torch.double, torch.cdouble)
def test_ctor_size_checks(self, device, dtype):
indices = self.index_tensor([
[0, 0, 0],
[0, 3, 0],
[0, 0, 0],
[0, 0, 0],
], device=device)
values = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
# indices inconsistent with size
self.assertRaises(
RuntimeError,
lambda: self.sparse_tensor(indices, values, torch.Size([2, 1, 1])))
# values inconsistent with size
values = torch.tensor([
[2, 1, 2, 1],
[1, 0, 5, 2],
], dtype=dtype, device=device)
self.assertRaises(
RuntimeError,
lambda: self.sparse_tensor(indices, values, torch.Size([2, 4, 2, 1])))
@dtypes(*floating_and_complex_types_and(torch.float16, torch.bfloat16))
def test_to_dense(self, device, dtype):
def test_tensor(x, res):
x.to_dense() # Tests triple to_dense for memory corruption
x.to_dense()
x.to_dense()
dense_x = x.to_dense()
safe_dense_x = self.safeToDense(x)
dense_x = dense_x.to(res.dtype)
safe_dense_x = safe_dense_x.to(res.dtype)
self.assertEqual(res, dense_x)
self.assertEqual(res, safe_dense_x)
# Only run autograd test for float64
if x.dtype != torch.float64:
return
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
for value_type in [torch.double, torch.cdouble]:
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
# we don't have to_dense for half types on CPU because it is implemented
# with a slower add_ operation
v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]), dtype=value_type, device=device)
res = torch.tensor([
[[2, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 3, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 4]],
], dtype=dtype, device=device)
test_tensor(x, res)
test_tensor(res, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
v = torch.empty(4, 0, dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]), dtype=value_type, device=device)
res = torch.empty((3, 4, 5, 0), dtype=dtype, device=device)
test_tensor(x, res)
@coalescedonoff
@dtypes(torch.float16, torch.bfloat16, torch.float64, torch.int, torch.cfloat, torch.cdouble)
def test_to_sparse(self, device, dtype, coalesced):
shape = [5, 2, 10, 4]
max_nnz = 1
for value_type in [torch.double, torch.cdouble]:
for dim, dim_sz in enumerate(shape, 1):
max_nnz *= dim_sz
rnnz = torch.randint(2, max_nnz, (1,)).item()
for nnz in [0, 1, rnnz]:
expected, _, _ = self._gen_sparse(dim, nnz, shape, dtype=value_type, device=device,
coalesced=coalesced)
expected = expected.to(dtype)
d = expected.to_dense()
result = d.to_sparse(dim)
self.assertEqual(d, result.to_dense())
self.assertEqual(expected.size(), result.size())
self.assertEqual(dim, result.sparse_dim())
sp, _, _ = self._gen_sparse(2, 10, [3, 3, 3], dtype=value_type, device=device, coalesced=coalesced)
self.assertRaises(RuntimeError, lambda: sp.to_sparse())
@dtypes(torch.double, torch.cdouble)
def test_sparse_bool(self, device, dtype):
a = torch.tensor([True, False], dtype=dtype, device=device).to(torch.bool)
b = a.to_sparse().to_dense()
self.assertEqual(a, b)
@dtypes(torch.double, torch.cdouble)
def test_scalar(self, device, dtype):
# tensor with value
a = self.sparse_tensor(self.index_tensor([], device=device).unsqueeze(1), 12.3, [], dtype=dtype, device=device)
self.assertEqual(1, a._values().numel())
self.assertEqual(a, a.clone())
a_coalesced = a.coalesce()
self.assertTrue(a_coalesced.is_coalesced())
self.assertEqual(torch.tensor(12.3, dtype=dtype, device=device), a.to_dense())
self.assertEqual(a, a.to_dense().to_sparse())
# tensor with multiple values
a = self.sparse_tensor(self.index_tensor([], device=device).unsqueeze(1).expand(0, 2),
[12.3, 12.3], [], dtype=dtype, device=device)
self.assertEqual(2, a._values().numel())
self.assertEqual(a, a.clone())
a_coalesced = a.coalesce()
self.assertTrue(a_coalesced.is_coalesced())
self.assertEqual(torch.tensor(12.3 * 2, dtype=dtype, device=device), a.to_dense())
self.assertEqual(a.coalesce(), a.coalesce().to_dense().to_sparse())
# tensor without value
a = self.sparse_empty((), dtype=dtype, device=device)
self.assertEqual(0, a._values().numel())
self.assertEqual(a, a.clone())
a_coalesced = a.coalesce()
self.assertTrue(a_coalesced.is_coalesced())
self.assertEqual(torch.tensor(0, dtype=dtype, device=device), a.to_dense())
self.assertEqual(a, a.to_dense().to_sparse())
@dtypes(torch.double, torch.cdouble)
def test_shared(self, device, dtype):
i = self.index_tensor([[2]], device=device)
v = torch.tensor([5], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3]))
v[0] = 6
self.assertEqual(torch.tensor([0, 0, 6], dtype=dtype, device=device), self.safeToDense(x))
i[0][0] = 0
self.assertEqual(torch.tensor([6, 0, 0], dtype=dtype, device=device), self.safeToDense(x))
i = self.index_tensor([[2]], device=device)
v = torch.empty((1, 0), dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 0]))
i[0][0] = 0
self.assertEqual(torch.empty((3, 0), dtype=dtype, device=device), self.safeToDense(x))
@dtypes(torch.double, torch.cdouble)
def test_to_dense_hybrid(self, device, dtype):
def test_tensor(x, res):
x.to_dense() # Tests double to_dense for memory corruption
x.to_dense()
x.to_dense()
self.assertEqual(res, x.to_dense())
self.assertEqual(res, self.safeToDense(x))
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
], device=device)
v = torch.tensor([[2, 3], [1, 2], [3, 4], [4, 5]], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 2]))
res = torch.tensor([
[[2, 3],
[0, 0],
[0, 0],
[0, 0]],
[[1, 2],
[0, 0],
[0, 0],
[0, 0]],
[[3, 4],
[0, 0],
[0, 0],
[4, 5]],
], dtype=dtype, device=device)
test_tensor(x, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
], device=device)
v = torch.empty((4, 2, 0), dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 2, 0]))
res = torch.empty((3, 4, 2, 0), dtype=dtype, device=device)
test_tensor(x, res)
@dtypes(torch.double, torch.cdouble)
def test_contig(self, device, dtype):
def test_tensor(x, exp_i, exp_v):
x = x.coalesce()
self.assertEqual(exp_i, x._indices())
self.assertEqual(exp_v, x._values())
i = self.index_tensor([
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
[92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
], device=device)
v = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([100, 100]))
exp_i = self.index_tensor([
[0, 1, 6, 14, 27, 35, 39, 40, 66, 71],
[31, 92, 65, 50, 34, 62, 22, 56, 74, 89],
], device=device)
exp_v = torch.tensor([2, 1, 6, 4, 10, 3, 5, 9, 8, 7], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[2, 0, 2, 1],
[0, 0, 3, 0],
[1, 0, 4, 0],
], device=device)
v = torch.tensor([3, 2, 4, 1], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]))
exp_i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
exp_v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[2, 0, 2, 1],
[0, 0, 3, 0],
[1, 0, 4, 0],
], device=device)
v = torch.empty([4, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]))
exp_i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
exp_v = torch.empty([4, 0], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
# Duplicate indices
i = self.index_tensor([
[0, 0, 2, 0],
[0, 0, 3, 0],
[0, 0, 4, 0],
], device=device)
v = torch.tensor([3, 2, 4, 1], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]))
exp_i = self.index_tensor([
[0, 2],
[0, 3],
[0, 4],
], device=device)
exp_v = torch.tensor([6, 4], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[0, 0, 2, 0],
[0, 0, 3, 0],
[0, 0, 4, 0],
], device=device)
v = torch.empty([4, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]))
exp_i = self.index_tensor([
[0, 2],
[0, 3],
[0, 4],
], device=device)
exp_v = torch.empty([2, 0], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
@dtypes(torch.double, torch.cdouble)
def test_contig_hybrid(self, device, dtype):
def test_tensor(x, exp_i, exp_v):
x = x.coalesce()
self.assertEqual(exp_i, x._indices())
self.assertEqual(exp_v, x._values())
i = self.index_tensor([
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
[92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
], device=device)
v = torch.tensor([
[1, 2], [2, 3], [3, 4], [4, 5], [5, 6],
[6, 7], [7, 8], [8, 9], [9, 10], [10, 11],
], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([100, 100, 2]))
exp_i = self.index_tensor([
[0, 1, 6, 14, 27, 35, 39, 40, 66, 71],
[31, 92, 65, 50, 34, 62, 22, 56, 74, 89],
], device=device)
exp_v = torch.tensor([
[2, 3], [1, 2], [6, 7], [4, 5], [10, 11],
[3, 4], [5, 6], [9, 10], [8, 9], [7, 8],
], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[2, 0, 2, 1],
[0, 0, 3, 0],
[1, 0, 4, 0],
], device=device)
v = torch.tensor([[3, 3, 3], [2, 2, 2], [4, 4, 4], [1, 1, 1]], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 3]))
exp_i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
exp_v = torch.tensor([[2, 2, 2], [1, 1, 1], [3, 3, 3], [4, 4, 4]], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[2, 0, 2, 1],
[0, 0, 3, 0],
[1, 0, 4, 0],
], device=device)
v = torch.empty([4, 3, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 3, 0]))
exp_i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
exp_v = torch.empty([4, 3, 0], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
# Duplicate indices
i = self.index_tensor([
[0, 0, 2, 0],
[0, 0, 3, 0],
[0, 0, 4, 0],
], device=device)
v = torch.tensor([[3, 2, 3], [2, 1, 1], [4, 3, 4], [1, 1, 1]], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 3]))
exp_i = self.index_tensor([
[0, 2],
[0, 3],
[0, 4],
], device=device)
exp_v = torch.tensor([[6, 4, 5], [4, 3, 4]], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[0, 0, 2, 0],
[0, 0, 3, 0],
[0, 0, 4, 0],
], device=device)
v = torch.empty([4, 3, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 3, 0]))
exp_i = self.index_tensor([
[0, 2],
[0, 3],
[0, 4],
], device=device)
exp_v = torch.empty([2, 3, 0], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_clone(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
x = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
if not coalesced:
self.assertFalse(x.is_coalesced())
y = x.clone()
self.assertFalse(y.is_coalesced())
x = x.coalesce()
self.assertTrue(x.is_coalesced())
y = x.clone()
self.assertTrue(y.is_coalesced())
test_shape(4, 20, 5)
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
@coalescedonoff
@dtypes(torch.double, torch.cdouble, torch.bfloat16)
@precisionOverride({torch.bfloat16: 2e-2})
def test_Sparse_to_Sparse_copy_(self, device, dtype, coalesced):
# This is for testing torch.copy_(SparseTensor, SparseTensor)
sparse_dims = 3
nnz = 10
sizes = [2, 3, 4, 5] # hybrid sparse
x1, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(sparse_dims, nnz + 10, sizes, dtype, device, coalesced)
# test copy
x2_dense = x2.to_dense()
x1.copy_(x2)
self.assertEqual(x2_dense, x1.to_dense())
# test type conversion (when x1.copy_(x2), x1.dtype should stay the same)
x1 = x1.to(torch.float32)
x2 = x2.to(torch.float16)
x1_dtype = x1.dtype
x1.copy_(x2)
self.assertEqual(x1_dtype, x1.dtype)
x2 = x2.to(torch.float64)
x1_dtype = x1.dtype
x1.copy_(x2)
self.assertEqual(x1_dtype, x1.dtype)
# test no broadcast
self.assertRaises(RuntimeError, lambda: x1.copy_(x2.narrow_copy(0, 0, 1)))
# test raise error on copy_() between dense and sparse Tensors
self.assertRaises(RuntimeError, lambda: x1.copy_(torch.randn(5, 5)))
# test autograd
x1, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(sparse_dims, nnz + 10, sizes, dtype, device, coalesced)
x2.requires_grad_(True)
x1.copy_(x2)
y = x1 * 2
x2_clone = x2.clone()
y.backward(x2_clone)
expected_grad = x2_clone * 2
self.assertEqual(expected_grad.to_dense(), x2.grad.to_dense())
self.assertEqual(None, x1.grad)
@coalescedonoff
@unittest.skipIf(torch.cuda.device_count() < 2, "no multi-GPU")
@dtypes(torch.double, torch.cdouble)
def test_Sparse_to_Sparse_copy_multi_gpu(self, device, dtype, coalesced):
# This is for testing torch.copy_(SparseTensor, SparseTensor) across GPU devices
sparse_dims = 3
nnz = 10
sizes = [2, 3, 4, 5] # hybrid sparse
x1, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(sparse_dims, nnz + 10, sizes, dtype, device, coalesced)
x1 = x1.to('cuda:0')
def test_cross_device(x1, x2):
x1_device = x1.device
x1.copy_(x2)
self.assertEqual(x2.to('cuda:0').to_dense(), x1.to_dense())
self.assertEqual(x1_device, x1.device)
test_cross_device(x1, x2.to('cuda:1')) # test across gpu devices
test_cross_device(x1, x2.to('cpu')) # test between cpu and gpu
# test autograd
x2 = x2.to('cuda:1')
x2.requires_grad_(True)
x1.copy_(x2)
y = x1 * 2
x2_clone = x2.clone().to('cuda:0')
y.backward(x2_clone)
expected_grad = x2_clone * 2
self.assertEqual(expected_grad.to_dense(), x2.grad.to('cuda:0').to_dense())
self.assertEqual(None, x1.grad)
@onlyCUDA
def test_cuda_empty(self, device):
def test_tensor(x):
y = x.to(device)
self.assertEqual(x.sparse_dim(), y.sparse_dim())
self.assertEqual(x.dense_dim(), y.dense_dim())
x = y.cpu()
self.assertEqual(y.sparse_dim(), x.sparse_dim())
self.assertEqual(y.dense_dim(), x.dense_dim())
x = torch.sparse.FloatTensor(2, 3, 4)
test_tensor(x)
x = torch.sparse.HalfTensor(2, 3, 4)
test_tensor(x)
x = torch.cuda.sparse.HalfTensor(2, 3, 4)
test_tensor(x)
x = torch.sparse.FloatTensor(2, 3, 4, 0)
test_tensor(x)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_transpose(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
x = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
y = self.safeToDense(x)
for i, j in itertools.combinations(range(4), 2):
x = x.transpose_(i, j)
y = y.transpose(i, j)
self.assertEqual(self.safeToDense(x), y)
x = x.transpose(i, j)
y = y.transpose(i, j)
self.assertEqual(self.safeToDense(x), y)
test_shape(4, 6, 3)
test_shape(4, 3, [7, 7, 7, 3, 3, 3, 0])
test_shape(4, 0, [0, 0, 7, 3, 3, 3, 0])
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_permute(self, device, dtype, coalesced):
# trivial checks
s = torch.rand(3, 3, 3, device=device, dtype=dtype).to_sparse()
with self.assertRaisesRegex(RuntimeError, "does not match the length"):
s.permute(dims=(1, 0))
with self.assertRaisesRegex(RuntimeError, "duplicate dims"):
s.permute(dims=(1, 1, 1))
def test_shape(sparse_dims, nnz, with_size):
ndim = len(with_size)
valid_sparse_dims = torch.arange(-ndim, -ndim + sparse_dims)
valid_dense_dims = torch.arange(-ndim + sparse_dims, 0)
for dims in itertools.permutations(range(-ndim, 0)):
s = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
d = self.safeToDense(s)
dims_sparse, _ = torch.tensor(dims[:sparse_dims]).sort()
dims_dense, _ = torch.tensor(dims[sparse_dims:]).sort()
if (valid_sparse_dims == dims_sparse).all() and (valid_dense_dims == dims_dense).all():
# if valid permutation, test for correctness
s_permuted = s.permute(dims)
self.assertEqual(s_permuted, d.permute(dims))
# if s is coalesced, and perm does not touch 0-dim,
# the result has to be coalesced as well
if dims[0] == 0:
self.assertEqual(s_permuted.is_coalesced(), s.is_coalesced())
else:
self.assertFalse(s_permuted.is_coalesced())
gradcheck(lambda t: t.permute(dims).to_dense(), s.requires_grad_(True), check_sparse_nnz=True)
else:
# otherwise check if exception is thrown
fail_message = "transpositions between sparse and dense dimensions are not allowed"
with self.assertRaisesRegex(RuntimeError, fail_message):
s.permute(dims)
test_shape(2, 3, [2, 3, 4, 5])
test_shape(2, 3, [2, 2, 0])
# if nnz=0, it is not true that t == t.to_dense().to_sparse()
# unless t.sparse_dim == t.dim (i.e. t is not hybrid)
test_shape(3, 0, [0, 0, 2])
@coalescedonoff
@onlyCPU
@dtypes(torch.double)
def test_coalesce_transpose_mm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x, _, _ = self._gen_sparse(2, nnz, [dj, di], dtype, device, coalesced)
y = torch.randn(dj, dk, dtype=dtype, device=device)
x_coalesced = x.coalesce()
self.assertTrue(x_coalesced.is_coalesced())
x_coalesced_t = x_coalesced.t()
# Transpose is `colasced`-preserving if the indices tensor is empty.
self.assertEqual(x_coalesced_t.is_coalesced(), di * nnz == 0)
res = torch.mm(x_coalesced_t, y)
expected = torch.mm(self.safeToDense(x_coalesced_t), y)
self.assertEqual(res, expected)
test_shape(10, 20, 30, 20)
test_shape(0, 20, 30, 0)
test_shape(10, 0, 30, 0)
test_shape(10, 20, 0, 0)
test_shape(10, 20, 0, 20)
@dtypes(torch.double, torch.cdouble)
def test_t_empty(self, device, dtype):
def test_in_place(x):
shape_original = x.shape
x.t_()
self.assertEqual(torch.Size([shape_original[1], shape_original[0]]), x.size())
self.assertEqual(0, x._indices().numel())
self.assertEqual(0, x._values().numel())
self.assertEqual(x.sparse_dim(), 2)
self.assertEqual(x.dense_dim(), 0)
def test_not_in_place(x):
shape_original = x.shape
y = x.t()
self.assertEqual(torch.Size([shape_original[1], shape_original[0]]), y.size())
self.assertEqual(0, y._indices().numel())
self.assertEqual(0, y._values().numel())
self.assertEqual(x.sparse_dim(), 2)
self.assertEqual(x.dense_dim(), 0)
x = self.sparse_empty(2, 3, dtype=dtype, device=device)
test_in_place(x)
test_not_in_place(x)
x = self.sparse_empty(2, 0, dtype=dtype, device=device)
test_in_place(x)
test_not_in_place(x)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_add_zeros(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, sizes):
x, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
zeros = torch.zeros(sizes, layout=torch.sparse_coo).to(x.device)
r1 = zeros + x
r2 = x + zeros
self.assertEqual(r1, x)
self.assertEqual(r2, x)
test_shape(1, 20, [1])
test_shape(4, 20, [3, 17, 19, 5])
test_shape(2, 20, [3, 17, 19, 5])
test_shape(2, 20, [3, 17, 19, 0])
@dtypes(torch.double, torch.cdouble)
def test_add_sub_nnz(self, device, dtype):
# nnz should not grow unbounded (gh-34964)
x = torch.randn(10, dtype=dtype, device=device).to_sparse()
x.add_(x)
x.add_(x)
self.assertLessEqual(x._nnz(), 10)
x.sub_(2 * x)
x.sub_(2 * x)
self.assertLessEqual(x._nnz(), 10)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_cat(self, device, dtype, coalesced):
# shapes: list of tuples (sparse_dims, nnz, sizes)
def test_shapes(shapes, dim, fail_message=None):
inputs = [self._gen_sparse(shape[0], shape[1], shape[2], dtype, device, coalesced)[0]
for shape in shapes]
if fail_message:
with self.assertRaisesRegex(RuntimeError, fail_message):
torch.cat(inputs, dim)
else:
result = torch.cat(inputs, dim)
dense_result = torch.cat([t.to_dense() for t in inputs], dim)
self.assertEqual(dense_result, result.to_dense())
test_shapes(
[(3, 10, [2, 3, 4]), (3, 10, [2, 1, 4]), (3, 10, [2, 4, 4])], 1)
# mismatched sizes
test_shapes([(3, 10, [2, 3, 4]), (3, 10, [2, 1, 4])], 0,
"All tensors must have the same shape: \\[2, 3, 4].*\\[2, 1, 4]")
# hybrid sparse/dense
test_shapes(
[(2, 10, [2, 3, 4]), (2, 10, [2, 1, 4]), (2, 10, [2, 4, 4])], 1)
# cat along dense dim
test_shapes([(2, 10, [2, 3, 4]), (2, 10, [2, 3, 7])], 2)
test_shapes([(1, 10, [2, 3, 4]), (1, 10, [2, 3, 4])], 1)
test_shapes([(1, 10, [2, 3, 4]), (1, 10, [2, 3, 4])], 2)
# mismatched dimensions
test_shapes([(2, 10, [2, 3, 4]), (3, 10, [2, 3, 4])], 0,
"All tensors must have the same.*2, 1, but tensor at position 1 has 3, 0.")
# wrapped dimension
test_shapes(
[(3, 10, [2, 3, 4]), (3, 10, [2, 1, 4]), (3, 10, [2, 4, 4])], -2)
# sparse with dense
sp = self._gen_sparse(3, 10, [2, 3, 4], dtype, device, coalesced)[0]
dn = sp.to_dense()
with self.assertRaisesRegex(RuntimeError,
"Concatenating sparse tensors, but a dense tensor was found at position 1."):
torch.cat((sp, dn))
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_unsqueeze(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, sizes, unsqueeze_dim, fail_message=None):
x, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
if fail_message:
with self.assertRaisesRegex(IndexError, fail_message):
torch.unsqueeze(x, unsqueeze_dim)
else:
result = torch.unsqueeze(x, unsqueeze_dim)
dense_result = torch.unsqueeze(x.to_dense(), unsqueeze_dim)
self.assertEqual(dense_result, result.to_dense())
# basic case
test_shape(3, 10, [5, 7, 11], 0)
# hybrid sparse/dense, unsqueeze along sparse dim
test_shape(3, 10, [5, 7, 11, 13, 17], 0)
test_shape(3, 10, [5, 7, 11, 13, 17], 3)
# unsqueeze along dense dimensions
test_shape(3, 10, [5, 7, 11, 13, 17], 4)
test_shape(3, 10, [5, 7, 11, 13, 17], 5)
# wrapped dimensions
test_shape(3, 10, [5, 7, 11, 13, 17], -1)
test_shape(3, 10, [5, 7, 11, 13, 17], -6)
# bounds
test_shape(3, 10, [5, 7, 11, 13, 17], -7, "Dimension out of range")
test_shape(3, 10, [5, 7, 11, 13, 17], 6, "Dimension out of range")
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_select(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, sizes, select_dim, select_index, fail_message=None):
x, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
if fail_message:
with self.assertRaisesRegex(IndexError, fail_message):
torch.select(x, select_dim, select_index)
else:
result = torch.select(x, select_dim, select_index)
if result.is_sparse:
result = result.to_dense()
dense_result = torch.select(x.to_dense(), select_dim, select_index)
self.assertEqual(dense_result, result)
sizes = [5, 7, 11, 13, 17]
# hybrid sparse/dense, select sparse dim, result is dense
for i in range(sizes[0]):
test_shape(1, 10, sizes, 0, i)
test_shape(1, 10, sizes, 0, sizes[0] + 1, r'select[(][)][:] index \d out of range.*')
# hybrid sparse/dense, select sparse dim, result is sparse
for d in range(3):
for i in range(sizes[d]):
test_shape(3, 10, sizes, d, i)
# hybrid sparse/dense, select dense dim, result is sparse
for d in range(1, 3):
for i in range(sizes[d]):
test_shape(1, 10, sizes, d, i)
@dtypes(*integral_types())
def test_select_no_type_promotion(self, device, dtype):
# see https://github.com/pytorch/pytorch/issues/82150
idx = torch.tensor([[0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1]])
val = torch.ones(6, dtype=dtype)
s = torch.sparse_coo_tensor(idx, val, size=(3, 3))
for t in (s, s * torch.tensor(0, dtype=dtype)):
# empty checks
self.assertEqual(t.dtype, t[2].dtype)
self.assertEqual(t.dtype, t[0, 1].dtype)
# sum should not promote
self.assertEqual(t.dtype, t[0, 0].dtype)
self.assertEqual(t.dtype, t[1, 1].dtype)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_index_select(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, sizes, select_dim, select_index, fail_message=None):
if isinstance(select_index, int):
select_index = [select_index]
if isinstance(select_index, list):
select_index = torch.tensor(select_index, device=device, dtype=torch.long)
x, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
if fail_message:
with self.assertRaisesRegex(IndexError, fail_message):
torch.index_select(x, select_dim, select_index)
else:
result = torch.index_select(x, select_dim, select_index)
if result.is_sparse:
result = result.to_dense()
dense_result = torch.index_select(x.to_dense(), select_dim, select_index)
self.assertEqual(dense_result, result)
sizes = [5, 7, 11, 13, 17]
for d in range(len(sizes)):
for index in [0, sizes[d] - 1, [0, sizes[d] // 2, sizes[d] - 1]]:
test_shape(1, 10, sizes, d, index)
test_shape(len(sizes) // 2, 10, sizes, d, index)
test_shape(len(sizes), 10, sizes, d, index)
def _test_index_select_exhaustive_index(self, sizes, dims, device, dtype, coalesced):
t = make_tensor(sizes, dtype=dtype, device=device)
t_sparse = t.to_sparse().coalesce() if coalesced else t.to_sparse()
t_small_sparse, _, _ = self._gen_sparse(len(sizes), 2, sizes, dtype, device, coalesced)
t_small = t_small_sparse.to_dense()
for d in dims:
# NOTE: indices are negative
idx_dim_d_range = list(range(-sizes[d], 0))
for idx_len in range(sizes[d], sizes[d] + 1):
# creates all possible valid indices into dim d of lenght idx_len
for idx in itertools.product(*itertools.repeat(idx_dim_d_range, idx_len)):
t_idx = torch.tensor(idx, dtype=torch.long, device=device)
# NOTE: index_select for dense does not support negative indices,
# hence + sizes[d]. See https://github.com/pytorch/pytorch/issues/76347
# tests the nnz > sizes[d] branch
dense_result = t.index_select(d, t_idx + sizes[d])
sparse_result = t_sparse.index_select(d, t_idx)
self.assertEqual(dense_result, sparse_result)
# tests the nnz <= sizes[d] branch
small_dense_result = t_small.index_select(d, t_idx + sizes[d])
small_sparse_result = t_small_sparse.index_select(d, t_idx)
self.assertEqual(small_dense_result, small_sparse_result)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_index_select_exhaustive_index_small(self, device, dtype, coalesced):
# will trigger brute-force algo
self._test_index_select_exhaustive_index((3, 3, 4), range(3), device, dtype, coalesced)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_index_select_exhaustive_index_large(self, device, dtype, coalesced):
# will trigger more sophisticated algos
self._test_index_select_exhaustive_index((100, 50, 3, 3), (2, 3), device, dtype, coalesced)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_index_select_empty_and_non_contiguous_index(self, device, dtype, coalesced):
# empty index
idx_empty = torch.tensor([], dtype=torch.long, device=device)
t = make_tensor((5, 5), dtype=dtype, device=device)
res_dense = t.index_select(0, idx_empty)
res_sparse = t.to_sparse().index_select(0, idx_empty)
self.assertEqual(res_dense, res_sparse)
# non-contigous index
idx = torch.randint(low=0, high=5, size=(10, 2), device=device)[:, 0]
def run_test(sizes):
# case nnz > size[d]
t = make_tensor(sizes, dtype=dtype, device=device)
res_dense = t.index_select(0, idx)
res_sparse = t.to_sparse().index_select(0, idx)
self.assertEqual(res_dense, res_sparse)
# case nnz <= size[d]
t_small_sparse, _, _ = self._gen_sparse(len(sizes), 2, sizes, dtype, device, coalesced)
res_sparse = t_small_sparse.index_select(0, idx)
res_dense = t_small_sparse.to_dense().index_select(0, idx)
self.assertEqual(res_dense, res_sparse)
# brute-force
run_test((10, 10))
# more sophisticated algos
run_test((10, 100, 100))
@onlyCPU
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_index_select_parallelization(self, device, dtype, coalesced):
"""
Test with sizes that will trigger parallelization (i.e. with sizes
that are >= at::internal::GRAIN_SIZE)
"""
def run_test(nnz, size):
t_sparse, _, _ = self._gen_sparse(1, nnz, (size,), dtype, device, coalesced)
t_dense = t_sparse.to_dense()
# idx_small to (sort) and (binary) search into t_sparse
idx_small = torch.randint(size, (nnz // 2,), device=device)
# idx_large to (sort) and (binary) search into idx_large
# NOTE: when coalesced=True, the (binary) search will be
# done over t_sparse anyway, as it is already sorted.
idx_large = torch.randint(size, (nnz * 2,), device=device)
for idx in (idx_small, idx_large):
res_dense = t_dense.index_select(0, idx)
res_sparse = t_sparse.index_select(0, idx)
self.assertEqual(res_dense, res_sparse)
# NOTE: GRAIN_SIZE = 32768
# case nnz <= size[d]
tlen = 70000 # > 2 * GRAIN_SIZE
run_test(tlen, tlen)
# case nnz > size[d]
run_test(tlen, tlen // 2)
@onlyCPU
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_mm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x, _, _ = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)
t = torch.randn(di, dk, dtype=dtype, device=device)
y = torch.randn(dj, dk, dtype=dtype, device=device)
alpha = random.random()
beta = random.random()
res = torch.addmm(t, x, y, beta=beta, alpha=alpha)
expected = torch.addmm(t, self.safeToDense(x), y, beta=beta, alpha=alpha)
self.assertEqual(res, expected)
res = torch.addmm(t, x, y)
expected = torch.addmm(t, self.safeToDense(x), y)
self.assertEqual(res, expected)
res = torch.mm(x, y)
expected = torch.mm(self.safeToDense(x), y)
self.assertEqual(res, expected)
test_shape(10, 100, 100, 20)
test_shape(100, 1000, 200, 20)
test_shape(64, 10000, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(10, 0, 100, 0)
test_shape(10, 100, 0, 0)
test_shape(10, 100, 0, 20)
@unittest.skipIf(
IS_WINDOWS and TEST_CUDA,
"bmm sparse-dense CUDA is not yet supported in Windows, at least up to CUDA 10.1"
)
@unittest.skipIf(
TEST_CUDA and _get_torch_cuda_version() < (10, 1),
"bmm sparse-dense requires CUDA 10.1 or greater"
)
@coalescedonoff
@dtypes(torch.double)
def test_bmm(self, device, dtype, coalesced):
def test_shape(num_mats, dim_i, dim_j, dim_k, nnz):
a_list = []
b_list = []
for mat_idx in range(num_mats):
a_mat = self._gen_sparse(2, nnz, [dim_i, dim_j], dtype, device, coalesced)[0]
b_mat = torch.randn([dim_j, dim_k], dtype=dtype, device=device)
a_list.append(a_mat)
b_list.append(b_mat)
a = torch.stack(a_list)
b = torch.stack(b_list)
ab = a.bmm(b)
# Compare each matrix against result from mm()
for mat_idx in range(num_mats):
a_mat = a_list[mat_idx]
b_mat = b_list[mat_idx]
ab_mat_bmm = ab[mat_idx]
ab_mat_mm = a_mat.mm(b_mat)
self.assertEqual(ab_mat_bmm, ab_mat_mm)
test_shape(10, 10, 100, 99, 20)
test_shape(10, 100, 1000, 200, 20)
test_shape(10, 64, 10000, 300, 20)
test_shape(10, 0, 100, 99, 0)
test_shape(10, 10, 0, 100, 0)
test_shape(10, 10, 100, 0, 0)
test_shape(10, 10, 100, 0, 20)
test_shape(10, 10, 100, 0, 20)
a = torch.rand([10, 23, 32], dtype=dtype, device=device)
a[3] = torch.zeros(23, 32, dtype=dtype, device=device)
a[6] = torch.zeros(23, 32, dtype=dtype, device=device)
a = a.to_sparse()
b = torch.rand([10, 32, 10], dtype=dtype, device=device)
b[4] = torch.zeros(32, 10, dtype=dtype, device=device)
b[6] = torch.zeros(32, 10, dtype=dtype, device=device)
ab = a.bmm(b)
for mat_idx in range(ab.size(0)):
ab_mat = ab[mat_idx]
ab_mat_check = a[mat_idx].mm(b[mat_idx])
self.assertEqual(ab_mat, ab_mat_check)
ab_traspose_check = b.transpose(1, 2).to_sparse().bmm(
a.transpose(1, 2).to_dense()
).transpose(1, 2)
self.assertEqual(ab, ab_traspose_check)
@onlyCUDA
@coalescedonoff
@dtypes(torch.double)
@unittest.skipIf(
IS_WINDOWS,
"bmm sparse-dense CUDA is not yet supported in Windows, at least up to CUDA 10.1"
)
@unittest.skipIf(
_get_torch_cuda_version() < (10, 1),
"bmm sparse-dense requires CUDA 10.1 or greater"
)
def test_bmm_deterministic(self, device, dtype, coalesced):
def test_shape(num_mats, dim_i, dim_j, dim_k, nnz):
a_list = []
b_list = []
for mat_idx in range(num_mats):
a_list.append(self._gen_sparse(2, nnz, [dim_i, dim_j], dtype, device, coalesced)[0])
b_list.append(torch.randn([dim_j, dim_k], dtype=dtype, device=device))
a = torch.stack(a_list).cuda()
b = torch.stack(b_list).cuda()
with DeterministicGuard(torch.are_deterministic_algorithms_enabled()):
torch.use_deterministic_algorithms(False)
ab_nondeterministic = torch.bmm(a, b)
torch.use_deterministic_algorithms(True)
ab_deterministic = torch.bmm(a, b)
diff_abs = (ab_deterministic - ab_nondeterministic).abs()
diff_rel = diff_abs / ab_deterministic.abs()
diff_rel[torch.isnan(diff_rel)] = 0
# deterministic and non-deterministic results should either be
# equal or within a small relative difference
equal_abs_or_rel = diff_abs.eq(0).logical_or(diff_rel.lt(0.001))
self.assertTrue(equal_abs_or_rel.all())
test_shape(10, 10, 100, 99, 20)
test_shape(10, 100, 1000, 200, 20)
test_shape(10, 64, 10000, 300, 20)
test_shape(10, 0, 100, 99, 0)
test_shape(10, 10, 0, 100, 0)
test_shape(10, 10, 100, 0, 0)
test_shape(10, 10, 100, 0, 20)
test_shape(10, 10, 100, 0, 20)
@onlyCUDA
@unittest.skipIf(
not IS_WINDOWS or _get_torch_cuda_version() >= (11, 0),
"this test ensures bmm sparse-dense CUDA gives an error when run on Windows with CUDA < 11.0"
)
@dtypes(torch.double)
def test_bmm_windows_error(self, device, dtype):
a = torch.rand(2, 2, 2, dtype=dtype).to_sparse().cuda()
b = torch.rand(2, 2, 2, dtype=dtype).cuda()
with self.assertRaisesRegex(
RuntimeError,
"bmm sparse-dense CUDA is not supported on Windows with cuda before 11.0"):
ab = a.bmm(b)
@onlyCUDA
@skipIfRocm
@unittest.skipIf(
_get_torch_cuda_version() >= (10, 1),
"this test ensures bmm gives error if CUDA version is less than 10.1"
)
@dtypes(torch.double)
def test_bmm_cuda_version_error(self, device, dtype):
a = torch.rand(2, 2, 2, dtype=dtype).to_sparse().cuda()
b = torch.rand(2, 2, 2, dtype=dtype).cuda()
with self.assertRaisesRegex(
RuntimeError,
"bmm sparse-dense requires CUDA 10.1 or greater"):
ab = a.bmm(b)
@onlyCPU
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_saddmm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)[0]
t = self._gen_sparse(2, nnz, [di, dk], dtype, device, coalesced)[0]
y = torch.randn(dj, dk, dtype=dtype, device=device)
alpha = random.random()
beta = random.random()
res = torch.saddmm(t, x, y, beta=beta, alpha=alpha)
expected = torch.addmm(self.safeToDense(t), self.safeToDense(x), y, beta=beta, alpha=alpha)
self.assertEqual(self.safeToDense(res), expected)
res = torch.saddmm(t, x, y)
expected = torch.addmm(self.safeToDense(t), self.safeToDense(x), y)
self.assertEqual(self.safeToDense(res), expected)
res = torch.smm(x, y)
expected = torch.mm(self.safeToDense(x), y)
self.assertEqual(self.safeToDense(res), expected)
test_shape(7, 5, 3, 20)
test_shape(1000, 100, 100, 20)
test_shape(3000, 64, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(1000, 0, 100, 0)
test_shape(1000, 100, 0, 0)
@onlyCPU
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_sspaddmm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)[0]
t = self._gen_sparse(2, nnz, [di, dk], dtype, device, coalesced)[0]
y = torch.randn(dj, dk, dtype=dtype, device=device)
alpha = random.random()
beta = random.random()
res = t.sspaddmm(x, y, beta=beta, alpha=alpha)
expected = torch.addmm(self.safeToDense(t), self.safeToDense(x), y, beta=beta, alpha=alpha)
self.assertEqual(self.safeToDense(res), expected)
res = t.sspaddmm(x, y)
expected = torch.addmm(self.safeToDense(t), self.safeToDense(x), y)
self.assertEqual(self.safeToDense(res), expected)
test_shape(7, 5, 3, 20)
test_shape(1000, 100, 100, 20)
test_shape(3000, 64, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(1000, 0, 100, 0)
test_shape(1000, 100, 0, 0)
# Test code from issue https://github.com/pytorch/pytorch/issues/45113
batch_size, input_size, hidden_size = 5, 3, 7
# Create coalesced sparse tensor with non-contiguous indices
weight = torch.randn(hidden_size, input_size, dtype=dtype, device=device).to_sparse()
self.assertTrue(weight.is_coalesced())
non_contig_indices = weight.indices().mT.contiguous().mT
weight = torch.sparse_coo_tensor(
indices=non_contig_indices, values=weight.values(), size=weight.shape)
weight._coalesced_(True)
self.assertFalse(weight._indices().is_contiguous())
# Create un/coalesced sparse tensor
bias = torch.randn((hidden_size, 1), dtype=dtype, device=device).to_sparse()
bias = torch.cat([bias] * batch_size, dim=1)
if coalesced:
bias = bias.coalesce()
x = torch.randn(input_size, batch_size, dtype=dtype, device=device)
res = bias.sspaddmm(weight, x)
true_result = (bias.to_dense() + torch.matmul(weight.to_dense(), x)).to_sparse()
self.assertEqual(self.safeToDense(res), self.safeToDense(true_result))
@coalescedonoff
@unittest.skip("See https://github.com/pytorch/pytorch/issues/73145")
@dtypes(torch.double, torch.cdouble, torch.bfloat16)
def test_sparse_addmm(self, device, dtype, coalesced):
def test_shape(m, n, p, nnz, broadcast, alpha_beta=None):
if alpha_beta is None:
alpha = random.random()
beta = random.random()
else:
alpha, beta = alpha_beta
if broadcast:
D1 = make_tensor((), dtype=dtype, device=device, requires_grad=True)
else:
D1 = make_tensor([n, p], dtype=dtype, device=device, requires_grad=True)
D2 = make_tensor([m, p], dtype=dtype, device=device, requires_grad=True)
S = self._gen_sparse(2, nnz, [n, m], dtype, device, coalesced)[0]
S_dense = S.to_dense().requires_grad_(True)
S.requires_grad_(True)
Y = torch.sparse.addmm(D1, S, D2, beta=beta, alpha=alpha)
Y_dense = torch.addmm(D1, S_dense, D2, beta=beta, alpha=alpha)
self.assertEqual(Y, Y_dense)
def fn(S, D1, D2, beta=beta, alpha=alpha):
return torch.sparse.addmm(D1, S, D2, beta=beta, alpha=alpha)
gradcheck(fn, (S, D1, D2), check_sparse_nnz=True)
test_shape(7, 8, 9, 20, False, None)
test_shape(7, 8, 9, 20, True, None)
test_shape(7, 8, 9, 20, False, (1, 0))
test_shape(7, 8, 9, 20, True, (1, 0))
test_shape(7, 8, 9, 20, False, (1, 1))
test_shape(7, 8, 9, 20, True, (1, 1))
@coalescedonoff
@dtypes(torch.double)
def test_sparse_mm(self, device, dtype, coalesced):
def test_shape(d1, d2, d3, nnz, transposed):
if transposed:
D = torch.randn(d3, d2, dtype=dtype,
device=device).t_().requires_grad_(True)
else:
D = torch.randn(d2, d3, dtype=dtype, device=device).requires_grad_(True)
S = self._gen_sparse(2, nnz, [d1, d2], dtype, device, coalesced)[0]
S_dense = S.to_dense().requires_grad_(True)
S.requires_grad_(True)
self.assertEqual(torch.sparse.mm(S, D), torch.mm(S_dense, D))
def fn(S, D):
return torch.sparse.mm(S, D)
gradcheck(fn, (S, D), check_sparse_nnz=True)
test_shape(7, 8, 9, 20, False)
test_shape(7, 8, 9, 20, True)
@coalescedonoff
@dtypes(torch.double)
def test_sparse_mul(self, device, dtype, coalesced):
# https://github.com/pytorch/pytorch/issues/79914
a = torch.tensor([[0., 1]], dtype=dtype, device=device).to_sparse().requires_grad_(True)
b = torch.tensor([[0., 1]], dtype=dtype, device=device).to_sparse().requires_grad_(True)
gradcheck(lambda x, y: torch.sparse.sum(x * y).to_dense(), [a, b], check_sparse_nnz=True)
def test_shape(sparse_dims, nnz, with_shape):
a = self._gen_sparse(sparse_dims, nnz, with_shape, dtype, device, coalesced)[0].requires_grad_(True)
b = self._gen_sparse(sparse_dims, nnz, with_shape, dtype, device, coalesced)[0].requires_grad_(True)
self.assertEqual((a * b).to_dense(), a.to_dense() * b.to_dense())
gradcheck(lambda x, y: (x * y).to_dense(), [a, b], check_sparse_nnz=True)
# Issues with 0-dim indices/values
gradcheck(lambda x, y: torch.sparse.sum(x * y).to_dense(), [a, b], check_sparse_nnz=True)
# TODO: Re-enable these
# test_shape(2, 3, [2, 3, 4, 5])
# test_shape(2, 3, [2, 2, 0])
@coalescedonoff
@dtypes(torch.double)
def test_dsmm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)[0]
y = self.randn(dj, dk, dtype=dtype, device=device)
res = torch.dsmm(x, y)
expected = torch.mm(self.safeToDense(x), y)
self.assertEqual(res, expected)
test_shape(7, 5, 3, 20)
test_shape(1000, 100, 100, 20)
test_shape(3000, 64, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(1000, 0, 100, 0)
test_shape(1000, 100, 0, 0)
test_shape(1000, 100, 0, 20)
@coalescedonoff
@dtypes(torch.double)
def test_hsmm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)[0]
y = self.randn(dj, dk, dtype=dtype, device=device)
res = torch.hsmm(x, y)
expected = torch.mm(self.safeToDense(x), y)
self.assertEqual(res.to_dense(), expected)
test_shape(7, 5, 3, 20)
test_shape(1000, 100, 100, 20)
test_shape(3000, 64, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(1000, 0, 100, 0)
test_shape(1000, 100, 0, 0)
test_shape(1000, 100, 0, 20)
@coalescedonoff
@dtypes(torch.double)
def test_spadd(self, device, dtype, coalesced):
def _test_spadd_shape(nnz, shape_i, shape_v=None):
shape = shape_i + (shape_v or [])
x, _, _ = self._gen_sparse(len(shape_i), nnz, shape, dtype, device, coalesced)
y = self.randn(*shape, dtype=dtype, device=device)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * self.safeToDense(x)
self.assertEqual(res, expected)
# Non contiguous dense tensor
s = list(shape)
s[0] = shape[-1]
s[-1] = shape[0]
y = self.randn(*s, dtype=dtype, device=device)
y.transpose_(0, len(s) - 1)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * self.safeToDense(x)
self.assertEqual(res, expected)
x, i, v = self._gen_sparse(len(shape_i), nnz, shape, dtype, device, coalesced)
nnz = i.size(1)
# Non contiguous sparse indices tensor
x_ = self.sparse_tensor(i[:, ::2], v[:(nnz + 1) // 2], x.shape, dtype=dtype, device=device)
res = torch.add(y, x_, alpha=r)
expected = y + r * self.safeToDense(x_)
self.assertEqual(res, expected)
# Non contiguous sparse values tensor
x_ = self.sparse_tensor(i[:, :(nnz + 1) // 2], v[::2], x.shape, dtype=dtype, device=device)
res = torch.add(y, x_, alpha=r)
expected = y + r * self.safeToDense(x_)
self.assertEqual(res, expected)
# Non contiguous sparse indices and values tensors
x_ = self.sparse_tensor(i[:, 1::2], v[1::2], x.shape, dtype=dtype, device=device)
res = torch.add(y, x_, alpha=r)
expected = y + r * self.safeToDense(x_)
self.assertEqual(res, expected)
def _test_spadd():
_test_spadd_shape(10, [5, 6])
_test_spadd_shape(10, [10, 10, 10])
_test_spadd_shape(10, [50, 30, 20])
_test_spadd_shape(10, [5, 5, 5, 5, 5, 5])
_test_spadd_shape(0, [0, 30, 20])
_test_spadd_shape(0, [50, 0, 20])
_test_spadd_shape(0, [50, 30, 0])
def _test_spadd_hybrid():
_test_spadd_shape(10, [5, 6], [2, 3])
_test_spadd_shape(10, [10, 10, 10], [3])
_test_spadd_shape(10, [50, 30, 20], [2])
_test_spadd_shape(10, [5, 5, 5, 5, 5, 5], [2])
_test_spadd_shape(0, [0, 30, 20], [2, 0])
_test_spadd_shape(0, [50, 0, 20], [2, 0])
_test_spadd_shape(0, [50, 30, 0], [2, 0])
_test_spadd_shape(10, [50, 30, 20], [2, 0])
_test_spadd()
_test_spadd_hybrid()
@onlyCUDA
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_sparse_add_out_bfloat16(self, device, dtype, coalesced):
# fp32
x, _, _ = self._gen_sparse(3, 5, 10, dtype, device, coalesced)
y, _, _ = self._gen_sparse(3, 5, 10, dtype, device, coalesced)
x = x.float().cuda()
y = y.float().cuda()
res_fp32 = torch.add(x, y)
# bfloat16
x = x.bfloat16()
y = y.bfloat16()
res_bf16 = torch.add(x, y)
res_bf16 = res_bf16.float() # to compare with reference
self.assertEqual(res_fp32, res_bf16, atol=1e-2, rtol=0)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_norm(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
x, _, _ = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
y = x.coalesce()
self.assertEqual(x.norm(), y._values().norm())
test_shape(3, 10, 100)
test_shape(4, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(4, 0, [0, 0, 100, 5, 5, 5, 0])
# Unsupported arguments should error
kwarg_error_pairs = [
({'keepdim': True},
RuntimeError, r'norm_sparse currently does not support keepdim=True'),
({'dim': 0},
RuntimeError, r'norm_sparse currently only supports full reductions'),
({'dtype': torch.double, 'p': 'fro'},
ValueError, r'dtype argument is not supported in frobenius norm'),
({'dtype': torch.double, 'p': 0},
RuntimeError, r"norm_sparse currently does not support 'dtype' argument")
]
x = self._gen_sparse(3, 10, 100, dtype, device, coalesced)[0]
for kwargs, err, msg in kwarg_error_pairs:
with self.assertRaisesRegex(err, msg):
x.norm(**kwargs)
@coalescedonoff
@dtypes(torch.double)
@unittest.skipIf(TEST_WITH_CROSSREF, "fallback triggers cuda device error")
def test_sparse_sum(self, device, dtype, coalesced):
def run_tests(S, td=None):
D = S.coalesce().to_dense().detach().requires_grad_(True)
if td is None:
S_sum = torch.sparse.sum(S)
D_sum = D.sum()
self.assertEqual(S_sum.item(), D_sum.item())
def fn(S):
res = torch.sparse.sum(S)
if res.is_sparse:
res = res.to_dense()
return res
gradcheck(fn, (S,), check_sparse_nnz=True)
else:
S_sum = torch.sparse.sum(S, td)
D_sum = D.sum(td)
self.assertEqual(S_sum.to_dense() if S_sum.is_sparse else S_sum, D_sum)
def fn(S):
res = torch.sparse.sum(S, td)
if res.is_sparse:
res = res.to_dense()
return res
gradcheck(fn, (S,), check_sparse_nnz=True)
nnz = 10
sparse_dims = 2
with_size = [5, 5, 1, 4] # use a dense dim = 1 to test for squeeze
test_dims = []
for i in range(1, 5):
test_dims += itertools.combinations(range(len(with_size)), i)
# https://github.com/pytorch/pytorch/issues/16501
x = torch.tensor([[1., 0., 0., 1.],
[0., 1., 0., 0.],
[0., 1., 1., 0.],
[0., 1., 0., 2.]], dtype=dtype, device=device).to_sparse()
self.assertEqual(torch.sparse.sum(x, dim=0), torch.sparse.sum(x, dim=-2))
self.assertEqual(torch.sum(x.to_dense(), dim=0), torch.sparse.sum(x, dim=0).to_dense())
# not support SparseTensor.sum()
S = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
self.assertRaises(RuntimeError, lambda: S.sum())
# dim out of range
self.assertRaises(IndexError, lambda: torch.sparse.sum(S, 5))
# dim 0 appears multiple times in the list of dims
self.assertRaises(RuntimeError, lambda: torch.sparse.sum(S, [0, 0]))
# sum an empty tensor
empty_S = torch.sparse_coo_tensor(size=with_size, dtype=dtype, device=device)
self.assertEqual(torch.sparse.sum(empty_S, [0]).to_dense(), torch.sum(empty_S.to_dense(), [0]))
self.assertEqual(torch.sparse.sum(empty_S), torch.tensor(0, dtype=dtype, device=device))
empty_S.requires_grad_(True)
empty_S_sum = torch.sparse.sum(empty_S)
empty_S_sum.backward()
self.assertEqual(empty_S.grad.to_dense(), empty_S.clone().detach().to_dense())
# test values().sum()
S = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
run_tests(S.requires_grad_(True))
for test_dim in test_dims:
S = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
run_tests(S.requires_grad_(True), test_dim)
def _test_basic_ops_shape(self, nnz_x1, nnz_x2, shape_i, shape_v, dtype, device, coalesced):
shape = shape_i + (shape_v)
x1, _, _ = self._gen_sparse(len(shape_i), nnz_x1, shape, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(len(shape_i), nnz_x2, shape, dtype, device, coalesced)
y1 = x1 + x2
y2 = x1.clone()
y2.add_(x2)
expected = self.safeToDense(x1) + self.safeToDense(x2)
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y1 = x1 - x2
y2 = x1.clone()
y2.sub_(x2)
expected = self.safeToDense(x1) - self.safeToDense(x2)
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y1 = x1 * x2
y2 = x1.clone()
y2.mul_(x2)
expected = self.safeToDense(x1) * self.safeToDense(x2)
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y1 = x1 * 37.5
y2 = x1.clone()
y2.mul_(37.5)
expected = self.safeToDense(x1) * 37.5
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y1 = x1 / 37.5
y2 = x1.clone()
y2.div_(37.5)
expected = self.safeToDense(x1) / 37.5
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y1 = x1 // 37.5
y2 = x1.clone()
y2.floor_divide_(37.5)
expected = self.safeToDense(x1) // 37.5
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
# TODO: add back inplace support
y1 = x1 ** 2
y2 = x1.clone()
y2 = y2.pow(2)
expected = self.safeToDense(x1) ** 2
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y = x1.clone()
y.zero_()
expected = torch.zeros(x1.size(), dtype=dtype, device=device)
self.assertEqual(self.safeToDense(y), expected)
self.assertEqual(x1.is_coalesced(), coalesced)
y = x1.coalesce()
z = x1.coalesce()
self.assertEqual(x1.is_coalesced(), coalesced)
self.assertTrue(y.is_coalesced())
y._values().add_(1)
if not x1.is_coalesced():
# check that coalesce is out of place if the original tensor is not
# coalesced.
self.assertEqual(z._values() + 1, y._values())
else:
# check that coalesce is in-place if the original tensor is
# coalesced.
self.assertEqual(z._values(), y._values())
@coalescedonoff
@dtypes(torch.double)
def test_basic_ops(self, device, dtype, coalesced):
def _test_basic_ops():
self._test_basic_ops_shape(9, 12, [5, 6], [], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [50, 30, 20], [], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [5, 5, 5, 5, 5, 5], [], dtype, device, coalesced)
self._test_basic_ops_shape(0, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_basic_ops_shape(9, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 0], [], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [], [], dtype, device, coalesced)
def _test_basic_ops_hybrid():
self._test_basic_ops_shape(9, 12, [5, 6], [2, 3], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [10, 10, 10], [3], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [50, 30, 20], [2], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [5, 5, 5, 5, 5, 5], [2], dtype, device, coalesced)
self._test_basic_ops_shape(0, 12, [10, 10, 10], [2], dtype, device, coalesced)
self._test_basic_ops_shape(9, 0, [10, 10, 10], [2], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 10], [2], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_basic_ops_shape(0, 12, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_basic_ops_shape(9, 0, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 0], [2, 0], dtype, device, coalesced)
_test_basic_ops()
_test_basic_ops_hybrid()
@dtypes(torch.double, torch.cdouble)
def test_add_dense_sparse_mismatch(self, device, dtype):
def test_shape(dense_size, sparse_dims_shape, dense_dims_shape, sparse_size):
x = torch.zeros(dense_size, dtype=dtype, device=device)
sparse_y = self.sparse_tensor(torch.zeros(sparse_dims_shape, dtype=torch.int64, device=device),
torch.randn(dense_dims_shape, dtype=dtype, device=device),
torch.Size(sparse_size))
with self.assertRaisesRegex(
RuntimeError,
"add: expected 'self' and 'other' to have same size"):
x + sparse_y
test_shape([3, 4], [1, 4], [4, 4, 4], [3, 4, 4])
test_shape([3, 4, 0], [1, 4], [4, 4, 4, 0], [3, 4, 4, 0])
@dtypes(torch.double, torch.cdouble)
def test_add_noncontiguous(self, device, dtype):
indices = self.index_tensor([[1, 2], [0, 2]], device=device)
values = torch.tensor([1.], dtype=dtype, device=device).expand(2, 3, 4, 5)
x = self.sparse_tensor(indices, values, dtype=dtype, device=device)
assert not x._values().is_contiguous()
y = x + x
expected = self.safeToDense(x) + self.safeToDense(x)
self.assertEqual(self.safeToDense(y), expected)
def _test_sparse_mask_shape(self, nnz_x1, nnz_x2, shape_i, shape_v, dtype, device, coalesced):
shape = shape_i + (shape_v or [])
x1, _, _ = self._gen_sparse(len(shape_i), nnz_x1, shape, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(len(shape_i), nnz_x2, shape, dtype, device, coalesced)
y1 = x1 + x2
y2 = x1.clone()
y2.add_(x2)
expected = self.safeToDense(x1) + self.safeToDense(x2)
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_sparse_mask(self, device, dtype, coalesced):
def _test_sparse_mask_fixed():
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
], device=device)
v = torch.tensor([1, 2, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([5, 4]), dtype=dtype, device=device).coalesce()
dense = torch.tensor([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
[17, 18, 19, 20],
], dtype=dtype, device=device)
exp_v = torch.tensor([7, 14, 3, 20], dtype=dtype, device=device)
res = dense.sparse_mask(x)
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4]), dtype=dtype, device=device)
self.assertEqual(res.coalesce(), expected.coalesce())
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
], device=device)
v = torch.empty([4, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([5, 4, 0])).coalesce()
dense = torch.empty([5, 4, 0], dtype=dtype, device=device)
exp_v = torch.empty([4, 0], dtype=dtype, device=device)
res = dense.sparse_mask(x)
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4, 0]), dtype=dtype, device=device)
self.assertEqual(res.coalesce(), expected.coalesce())
_test_sparse_mask_fixed()
self._test_sparse_mask_shape(9, 12, [5, 6], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [50, 30, 20], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [5, 5, 5, 5, 5, 5], [], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 0], [], dtype, device, coalesced)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_sparse_mask_hybrid(self, device, dtype, coalesced):
def _test_sparse_mask_hybrid_fixed():
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
])
v = torch.tensor([[1, 2], [2, 3], [3, 4], [4, 5]])
# TODO: This is also testing that, if coalesce is a no-op,
# the indices don't get permuted. I don't know if we actually
# want to give this invariant.
x = self.sparse_tensor(i, v, torch.Size([5, 4, 2])).coalesce()
dense = torch.tensor([
[[1, 3], [2, 2], [3, 3], [4, 2]],
[[5, 7], [6, 7], [7, 9], [8, 9]],
[[9, 2], [10, 4], [11, 1], [12, 3]],
[[13, 5], [14, 1], [15, 1], [16, 6]],
[[17, 7], [18, 2], [19, 7], [20, 1]],
])
res = dense.sparse_mask(x)
exp_v = torch.tensor([[7, 9], [14, 1], [3, 3], [20, 1]])
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4, 2]))
self.assertEqual(res.coalesce(), expected.coalesce())
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
])
v = torch.empty(4, 2, 0)
x = self.sparse_tensor(i, v, torch.Size([5, 4, 2, 0])).coalesce()
dense = torch.empty(5, 4, 2, 0)
res = dense.sparse_mask(x)
exp_v = torch.empty(4, 2, 0)
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4, 2, 0]))
self.assertEqual(res.coalesce(), expected.coalesce())
_test_sparse_mask_hybrid_fixed()
self._test_sparse_mask_shape(9, 12, [5, 6], [2, 3], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [10, 10, 10], [3], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [50, 30, 20], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [5, 5, 5, 5, 5, 5], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 12, [10, 10, 10], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 0, [10, 10, 10], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 10], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 12, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 0, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 0], [2, 0], dtype, device, coalesced)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_zeros(self, device, dtype, coalesced):
def _test_zeros(nnzs, shape, out_shape_i, out_shape_v=None):
out_shape = out_shape_i + (out_shape_v or [])
for nnz in nnzs:
out, _, _ = self._gen_sparse(len(out_shape_i), nnz, out_shape, dtype, device, coalesced)
torch.zeros(*shape, out=out, dtype=dtype, device=device)
self.assertEqual(tuple(out.size()), tuple(shape))
self.assertTrue(out._indices().numel() == out._values().numel() == 0)
self.assertEqual(out._nnz(), 0)
self.assertEqual(out.sparse_dim(), len(shape))
self.assertEqual(out.dense_dim(), 0)
def test_shape(i_shapes, v_shapes, shape, nnzs):
for i_dim in range(1, len(i_shapes) + 1):
for v_dim in range(len(v_shapes) + 1):
_test_zeros(nnzs, shape, i_shapes[:i_dim], v_shapes[:v_dim])
test_shape([2, 3, 4], [3, 4, 5, 6], [2, 3, 4], [9, 12])
test_shape([0, 3, 4], [3, 4, 5, 6], [2, 3, 4], [0])
test_shape([2, 3, 4], [0, 4, 5, 6], [2, 3, 4], [9, 12])
test_shape([2, 3, 4], [3, 4, 5, 6], [2, 3, 0], [9, 12])
test_shape([0, 3, 4], [3, 4, 5, 6], [2, 3, 0], [0])
test_shape([2, 3, 4], [0, 4, 5, 6], [2, 3, 0], [9, 12])
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_zeros_like(self, device, dtype, coalesced):
def _test_zeros_like(nnzs, template_shape_i, template_shape_v=None):
template_shape_v = template_shape_v or []
template_shape = template_shape_i + template_shape_v
for nnz in nnzs:
t, _, _ = self._gen_sparse(len(template_shape_i), nnz, template_shape, dtype, device, coalesced)
res = torch.zeros_like(t)
self.assertEqual(tuple(res.size()), tuple(template_shape))
self.assertTrue(res._indices().numel() == res._values().numel() == 0)
self.assertEqual(res._nnz(), 0)
self.assertEqual(res.sparse_dim(), len(template_shape_i))
self.assertEqual(res.dense_dim(), len(template_shape_v))
def test_shape(i_shapes, v_shapes, nnzs):
for i_dim in range(1, len(i_shapes) + 1):
for v_dim in range(len(v_shapes) + 1):
_test_zeros_like(nnzs, i_shapes[:i_dim], v_shapes[:v_dim])
test_shape([2, 3, 4], [3, 4, 5, 6], [9, 12])
test_shape([0, 3, 4], [3, 4, 5, 6], [0])
test_shape([2, 3, 4], [0, 4, 5, 6], [9, 12])
test_shape([2, 3, 4], [3, 4, 5, 6], [9, 12])
test_shape([0, 3, 4], [3, 4, 5, 6], [0])
test_shape([2, 3, 4], [0, 4, 5, 6], [9, 12])
sparse_tensor, _, _ = self._gen_sparse(len([2, 3]), 9, [2, 3] + [5, 6], dtype, device, coalesced)
data = (sparse_tensor, sparse_tensor, sparse_tensor, sparse_tensor.unsqueeze(0))
mem_formats = [torch.channels_last, torch.contiguous_format, torch.preserve_format, torch.channels_last_3d]
for x, mem_format in zip(data, mem_formats):
with self.assertRaisesRegex(RuntimeError, "memory format option is only supported by strided tensors"):
result = torch.zeros_like(x, memory_format=mem_format)
result = torch.zeros_like(x, layout=torch.strided, memory_format=mem_format)
self.assertTrue(result.layout == torch.strided)
dense_tensor = sparse_tensor.to_dense()
result = torch.zeros_like(dense_tensor, layout=torch.sparse_coo)
self.assertEqual(dense_tensor.shape, result.shape)
self.assertEqual(result.layout, torch.sparse_coo)
sparse_zeros = torch.zeros(dense_tensor.shape, layout=torch.sparse_coo)
self.assertEqual(result._indices().shape, sparse_zeros._indices().shape)
self.assertEqual(result._values().shape, sparse_zeros._values().shape)
def _assert_sparse_invars(self, t):
# SparseTensor has the following invariants:
# - sparse_dim + dense_dim = len(SparseTensor.shape)
# - SparseTensor._indices().shape = (sparse_dim, nnz)
# - SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:])
self.assertEqual(t.sparse_dim() + t.dense_dim(), len(t.shape))
self.assertEqual(tuple(t._indices().shape), (t.sparse_dim(), t._nnz()))
self.assertEqual(tuple(t._values().shape), (t._nnz(), ) + t.shape[t.sparse_dim():])
def _test_empty_like(self, sparse_tensor, dtype, device, coalesced):
result = torch.empty_like(sparse_tensor)
self.assertTrue(result.is_sparse)
self._assert_sparse_invars(result)
self.assertEqual(result.shape, sparse_tensor.shape)
self.assertEqual(result.dtype, sparse_tensor.dtype)
self.assertEqual(result.device, sparse_tensor.device)
self.assertEqual(result.sparse_dim(), sparse_tensor.sparse_dim())
self.assertEqual(result.dense_dim(), sparse_tensor.dense_dim())
sparse_tensor, _, _ = self._gen_sparse(len([2, 3]), 9, [2, 3] + [5, 6], dtype, device, coalesced)
data = (sparse_tensor, sparse_tensor, sparse_tensor, sparse_tensor.unsqueeze(0))
mem_formats = [torch.channels_last, torch.contiguous_format, torch.preserve_format, torch.channels_last_3d]
for x, mem_format in zip(data, mem_formats):
with self.assertRaisesRegex(RuntimeError, "memory format option is only supported by strided tensors"):
result = torch.empty_like(x, memory_format=mem_format)
result = torch.empty_like(x, layout=torch.strided, memory_format=mem_format)
self.assertTrue(result.layout == torch.strided)
with self.assertRaisesRegex(
RuntimeError, r"Could not run 'aten::empty_strided' with arguments from the 'Sparse(CPU|CUDA)' backend"
):
dense_tensor = sparse_tensor.to_dense()
result = torch.empty_like(dense_tensor, layout=torch.sparse_coo)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_empty_like(self, device, dtype, coalesced):
# tests https://github.com/pytorch/pytorch/issues/43699
if coalesced:
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0, 1, 2]]),
values=torch.tensor([3.0, -4.0, 5.0]),
size=[3, ],
dtype=dtype,
device=device
).coalesce()
self._test_empty_like(input_coalesced, dtype, device, coalesced)
# hybrid sparse input
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[1, 3], [2, 4]]),
values=torch.tensor([[-1.0, 3.0], [-5.0, 7.0]]),
size=[4, 5, 2],
dtype=dtype,
device=device
).coalesce()
self._test_empty_like(input_coalesced, dtype, device, coalesced)
if not coalesced:
# test uncoalesced input
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
values=torch.tensor([2.0, -3.0, -4.0, 1.0, -1.0, 1.5]),
size=[3, ],
dtype=dtype,
device=device
)
self._test_empty_like(input_uncoalesced, dtype, device, coalesced)
# test on empty sparse tensor
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.zeros([2, 0]),
values=torch.zeros([0, 5, 5, 5, 5, 5, 5, 0]),
size=[0, 0, 5, 5, 5, 5, 5, 5, 0],
dtype=dtype,
device=device
)
self._test_empty_like(input_uncoalesced, dtype, device, coalesced)
def _test_narrow(self, input, narrow_args):
expected = input.to_dense().narrow(*narrow_args)
self.assertEqual(expected, input.narrow_copy(*narrow_args).to_dense())
def _all_narrow_combs(self, shape):
for dim, dim_sz in enumerate(shape):
for start in range(dim_sz):
for length in range(dim_sz - start):
yield [dim, start, length]
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_narrow(self, device, dtype, coalesced):
shape = [3, 3, 4, 2]
input, _, _ = self._gen_sparse(4, 19, shape, dtype, device, coalesced)
for narrow_args in self._all_narrow_combs(shape):
self._test_narrow(input, narrow_args)
self.assertRaises(RuntimeError, lambda: input.narrow_copy(-1, 0, 3)) # dim < 0
self.assertRaises(RuntimeError, lambda: input.narrow_copy(10, 0, 3)) # dim > input.dim()
self.assertRaises(RuntimeError, lambda: input.narrow_copy(0, shape[0] + 1, 3)) # start > size of dim
self.assertRaises(RuntimeError, lambda: input.narrow_copy(0, 2, shape[0])) # start+length > size of dim
with_dense, _, _ = self._gen_sparse(2, 7, shape, dtype, device, coalesced)
for narrow_args in self._all_narrow_combs(shape):
self._test_narrow(with_dense, narrow_args)
self.assertRaises(RuntimeError, lambda: with_dense.narrow_copy(10, 0, 3)) # dim > sparseDim + denseDim
def _test_log1p_tensor(self, sparse_tensor, coalesced):
def is_integral(dtype):
return dtype in integral_types()
dense_tensor = sparse_tensor.to_dense()
expected_output = dense_tensor.log1p()
is_integral_dtype = is_integral(sparse_tensor.dtype)
self.assertEqual(expected_output, sparse_tensor.log1p().to_dense())
if is_integral_dtype:
with self.assertRaisesRegex(RuntimeError, "result type .* can't be cast to"):
sparse_tensor.coalesce().log1p_()
else:
self.assertEqual(expected_output, sparse_tensor.coalesce().log1p_().to_dense())
if not coalesced:
# test in-place op on uncoalesced input
with self.assertRaisesRegex(RuntimeError, "log1p_ requires coalesced input"):
sparse_tensor.log1p_()
if not is_integral_dtype:
sparse_tensor.requires_grad_()
self.assertTrue(sparse_tensor.requires_grad)
# test autograd
x = sparse_tensor.clone()
y = sparse_tensor.log1p()
with self.assertRaisesRegex(RuntimeError, "log1p of a sparse tensor is made to be non-differentiable"):
y.backward(x)
else:
with self.assertRaisesRegex(RuntimeError, "only Tensors of floating point dtype can require gradients"):
sparse_tensor.requires_grad_()
@coalescedonoff
@dtypes(*all_types())
def test_log1p(self, device, dtype, coalesced):
if coalesced:
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2]]).transpose(1, 0),
values=torch.tensor([3.0, 4.0, 5.0]),
size=[3, ],
device=device,
dtype=dtype
).coalesce()
self._test_log1p_tensor(input_coalesced, coalesced)
# hybrid sparse input
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[1, 3], [2, 4]]),
values=torch.tensor([[1.0, 3.0], [5.0, 7.0]]),
size=[4, 5, 2],
device=device,
dtype=dtype
).coalesce()
self._test_log1p_tensor(input_coalesced, coalesced)
if not coalesced:
# test uncoalesced input
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
values=torch.tensor([2.0, 3.0, 4.0, 1.0, 1.0, 1.0]),
size=[3, ],
device=device,
dtype=dtype
)
self._test_log1p_tensor(input_uncoalesced, coalesced)
# test on empty sparse tensor
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.zeros([2, 0]),
values=torch.zeros([0, 5, 5, 5, 5, 5, 5, 0]),
size=[0, 0, 5, 5, 5, 5, 5, 5, 0],
device=device,
dtype=dtype
)
self._test_log1p_tensor(input_uncoalesced, coalesced)
def _test_neg_negative(self, sparse_tensor):
dense_tensor = sparse_tensor.to_dense()
expected_output = dense_tensor.neg()
ops = (
torch.neg, torch.Tensor.neg, torch.Tensor.neg_,
torch.negative, torch.Tensor.negative, torch.Tensor.negative_,
operator.neg
)
for op in ops:
sparse_tensor_copy = sparse_tensor.clone()
self.assertEqual(expected_output, op(sparse_tensor_copy).to_dense())
if op in (torch.neg, torch.negative):
sparse_tensor_out = torch.zeros_like(sparse_tensor)
op(sparse_tensor, out=sparse_tensor_out)
self.assertEqual(expected_output, sparse_tensor_out.to_dense())
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_neg_negative(self, device, dtype, coalesced):
if coalesced:
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0, 1, 2]]),
values=torch.tensor([3.0, -4.0, 5.0]),
size=[3, ],
dtype=dtype,
device=device
).coalesce()
self._test_neg_negative(input_coalesced)
# hybrid sparse input
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[1, 3], [2, 4]]),
values=torch.tensor([[-1.0, 3.0], [-5.0, 7.0]]),
size=[4, 5, 2],
dtype=dtype,
device=device
).coalesce()
self._test_neg_negative(input_coalesced)
if not coalesced:
# test uncoalesced input
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
values=torch.tensor([2.0, -3.0, -4.0, 1.0, -1.0, 1.5]),
size=[3, ],
dtype=dtype,
device=device
)
self._test_neg_negative(input_uncoalesced)
# test on empty sparse tensor
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.zeros([2, 0]),
values=torch.zeros([0, 5, 5, 5, 5, 5, 5, 0]),
size=[0, 0, 5, 5, 5, 5, 5, 5, 0],
dtype=dtype,
device=device
)
self._test_neg_negative(input_uncoalesced)
def _test_asin_arcsin(self, sparse_tensor, coalesced):
def is_integral(dtype):
return dtype in integral_types()
is_integral_dtype = is_integral(sparse_tensor.dtype)
dense_tensor = sparse_tensor.to_dense()
expected_output = dense_tensor.asin()
ops = (
torch.asin, torch.Tensor.asin,
torch.arcsin, torch.Tensor.arcsin,
)
for op in ops:
self.assertEqual(expected_output, op(sparse_tensor).to_dense())
if op in (torch.asin, torch.arcsin):
sparse_tensor_out = torch.zeros_like(sparse_tensor)
if not is_integral_dtype:
op(sparse_tensor, out=sparse_tensor_out)
self.assertEqual(expected_output, sparse_tensor_out.to_dense())
else:
with self.assertRaisesRegex(RuntimeError, "result type .* can't be cast to"):
op(sparse_tensor, out=sparse_tensor_out)
for op in (torch.Tensor.asin_, torch.Tensor.arcsin_):
if is_integral_dtype:
# test coalesce on integral dtype tensor
with self.assertRaisesRegex(RuntimeError, "result type .* can't be cast to"):
op(sparse_tensor.clone().coalesce()).to_dense()
else:
self.assertEqual(expected_output, op(sparse_tensor.clone().coalesce()).to_dense())
if not coalesced:
# test in-place op on uncoalesced input
with self.assertRaisesRegex(RuntimeError, "asin_ requires coalesced input"):
op(sparse_tensor)
@coalescedonoff
@dtypes(*all_types())
def test_asin_arcsin(self, device, dtype, coalesced):
if coalesced:
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0, 1, 2, 3]]),
values=torch.tensor([0.5, -0.5, 0.7, -0.7]),
size=[4, ],
dtype=dtype,
device=device
).coalesce()
self._test_asin_arcsin(input_coalesced, coalesced)
# hybrid sparse input
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[1, 3], [2, 4]]),
values=torch.tensor([[-0.1, 0.24], [-0.44, 0.1]]),
size=[4, 5, 2],
dtype=dtype,
device=device
).coalesce()
self._test_asin_arcsin(input_coalesced, coalesced)
if not coalesced:
# test uncoalesced input
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
values=torch.tensor([0.3, -0.3, -0.4, 0.3, -0.5, 0.15]),
size=[3, ],
dtype=dtype,
device=device
)
self._test_asin_arcsin(input_uncoalesced, coalesced)
# test on empty sparse tensor
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.zeros([2, 0]),
values=torch.zeros([0, 5, 5, 5, 5, 5, 5, 0]),
size=[0, 0, 5, 5, 5, 5, 5, 5, 0],
dtype=dtype,
device=device
)
self._test_asin_arcsin(input_uncoalesced, coalesced)
@coalescedonoff
@dtypes(torch.double)
def test_mv(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x, _, _ = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)
t = torch.randn(dk, dtype=dtype, device=device)
res = x.matmul(t)
expected = self.safeToDense(x).matmul(t)
self.assertEqual(res, expected)
test_shape(10, 100, 100, 20)
test_shape(100, 1000, 1000, 20)
test_shape(64, 10000, 10000, 20)
test_shape(0, 100, 100, 0)
test_shape(10, 0, 0, 0)
test_shape(10, 100, 100, 0)
test_shape(10, 100, 100, 20)
with self.assertRaisesRegex(RuntimeError, r"mv: expected self\.size\(-1\) == vec\.size\(-1\)"):
test_shape(10, 100, 10, 20)
with self.assertRaisesRegex(RuntimeError, "mv: two tensor dim should be 2 and 1"):
x, _, _ = self._gen_sparse(2, 20, [10, 100], dtype, device, coalesced)
y, _, _ = self._gen_sparse(2, 20, [10, 100], dtype, device, coalesced)
res = x.mv(y)
@dtypes(*floating_and_complex_types())
def test_sparse_add_coalesce(self, device, dtype):
i = self.index_tensor([[1, 2, 1]], device=device)
v = torch.tensor([3, 4, 5], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3]))
y = self.sparse_tensor(i, v, torch.Size([3]))
z = x + y
self.assertFalse(z._indices().numel() != 2 and z.is_coalesced())
i = self.index_tensor([[1, 2, 1]], device=device)
v = torch.empty([3, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 0]))
y = self.sparse_tensor(i, v, torch.Size([3, 0]))
z = x + y
self.assertFalse(z._indices().numel() != 2 and z.is_coalesced())
@onlyCUDA
def test_storage_not_null(self):
x = torch.cuda.sparse.FloatTensor(2)
self.assertNotEqual(x.get_device(), -1)
x = torch.cuda.sparse.FloatTensor(2, 0)
self.assertNotEqual(x.get_device(), -1)
@onlyCUDA
@deviceCountAtLeast(2)
def test_same_gpu(self, devices):
def check_device(x, device_id):
self.assertEqual(x.get_device(), device_id)
self.assertEqual(x._values().get_device(), device_id)
self.assertEqual(x._indices().get_device(), device_id)
dev1, dev2 = devices[0], devices[1]
i = self.index_tensor([[2]], device=dev2)
v = torch.tensor([5], device=dev2)
x = self.sparse_tensor(i, v, torch.Size([3]), device=1)
check_device(x, 1)
i = self.index_tensor([[2]], device=dev2)
v = torch.empty(1, 0, device=dev2)
x = self.sparse_tensor(i, v, torch.Size([3, 0]), device=1)
check_device(x, 1)
x = self.sparse_empty(3, device=1)
check_device(x, 1)
x = self.sparse_empty(3, 0, device=1)
check_device(x, 1)
i = self.index_tensor([[2]], device=dev2)
v = torch.tensor([5], device=dev1)
# NB: non-legacy constructor allows this and moves indices
self.assertRaises(RuntimeError, lambda: self.legacy_sparse_tensor(i, v, torch.Size([3])))
i = self.index_tensor([[2]], device=dev2)
v = torch.empty(1, 0, device=dev1)
# NB: non-legacy constructor allows this and moves indices
self.assertRaises(RuntimeError, lambda: self.legacy_sparse_tensor(i, v, torch.Size([3, 0])))
def _test_new_device(self, size, device=torch.cuda):
with torch.cuda.device(device):
x = torch.cuda.sparse.DoubleTensor(*size)
self.assertEqual(x.get_device(), device)
x1 = x.new()
x2 = x.new(2, 3)
self.assertEqual(x1.get_device(), device)
self.assertEqual(x2.get_device(), device)
@onlyCUDA
def test_new_device_single_gpu(self):
self._test_new_device((), 0)
self._test_new_device((30, 20), 0)
self._test_new_device((30, 20, 10), 0)
self._test_new_device((30, 20, 10, 0), 0)
@onlyCUDA
@unittest.skipIf(torch.cuda.device_count() < 2, "only one GPU detected")
def test_new_device_multi_gpu(self):
self._test_new_device((), 1)
self._test_new_device((30, 20), 1)
self._test_new_device((30, 20, 10), 1)
self._test_new_device((30, 20, 10, 0), 1)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_new(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
x, indices, values = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
if not x.is_cuda:
# CUDA sparse tensors currently requires the size to be
# specified if nDimV > 0
out = x.new(indices, values).coalesce()
x_c = x.coalesce()
self.assertEqual((out.indices(), out.values()), (x_c.indices(), x_c.values()))
self.assertEqual(x.new(indices, values, x.size()), x)
test_shape(3, 10, 100)
test_shape(3, 0, [100, 100, 0])
@onlyCPU # not really, but we only really want to run this once
@dtypes(torch.float64, torch.float32, torch.float16, torch.cfloat, torch.cdouble)
def test_factory(self, device, dtype):
for test_empty_tensor in [True, False]:
if test_empty_tensor:
default_size = torch.Size([1, 3, 0])
size = torch.Size([3, 3, 0])
else:
default_size = torch.Size([1, 3])
size = torch.Size([3, 3])
for include_size in [True, False]:
for use_tensor_idx in [True, False]:
for use_tensor_val in [True, False]:
for use_cuda in ([False] if not torch.cuda.is_available() else [True, False]):
# have to include size with cuda sparse tensors
include_size = include_size or use_cuda
long_dtype = torch.int64
device = torch.device('cpu') if not use_cuda else \
torch.device(torch.cuda.device_count() - 1)
indices = torch.tensor(([0], [2]), dtype=long_dtype) if use_tensor_idx else ([0], [2])
if test_empty_tensor:
values = torch.empty(1, 0).to(dtype)
else:
if use_tensor_val:
values = torch.tensor([1.], dtype=dtype)
else:
values = 1.
if include_size:
sparse_tensor = torch.sparse_coo_tensor(indices, values, size, dtype=dtype,
device=device, requires_grad=True)
else:
sparse_tensor = torch.sparse_coo_tensor(indices, values, dtype=dtype,
device=device, requires_grad=True)
self.assertEqual(indices, sparse_tensor._indices())
self.assertEqual(values, sparse_tensor._values())
self.assertEqual(size if include_size else default_size, sparse_tensor.size())
self.assertEqual(dtype, sparse_tensor.dtype)
if use_cuda:
self.assertEqual(device, sparse_tensor._values().device)
self.assertEqual(True, sparse_tensor.requires_grad)
@dtypes(torch.double, torch.cdouble)
def test_factory_size_check(self, device, dtype):
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.tensor([.5, .5], dtype=dtype, device=device)
sizes = torch.Size([2, 3])
with self.assertRaisesRegex(RuntimeError, "size is inconsistent with indices"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices.fill_(-1)
with self.assertRaisesRegex(RuntimeError, "found negative index"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.empty([2, 1, 0], dtype=dtype, device=device)
sizes = torch.Size([2, 3, 1, 0])
with self.assertRaisesRegex(RuntimeError, "size is inconsistent with indices"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.empty([2, 2, 2], dtype=dtype, device=device)
sizes = torch.Size([0, 0, 2, 2])
with self.assertRaisesRegex(RuntimeError, "size is inconsistent with indices"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.tensor([[1, 1, 1], [1, 1, 1]], dtype=dtype, device=device)
sizes = torch.Size([3, 3, 2])
with self.assertRaisesRegex(RuntimeError, "values has incorrect size"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.empty([2, 1, 0], dtype=dtype, device=device)
sizes = torch.Size([3, 3, 2, 0])
with self.assertRaisesRegex(RuntimeError, "values has incorrect size"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
def test_factory_default(self, device):
tensor = self.legacy_sparse_tensor()
expected_indices = self.index_tensor([[]], device=device)
expected_size = torch.Size([0])
self.assertEqual(tensor._indices(), expected_indices)
self.assertEqual(tensor.shape, expected_size)
def test_factory_empty_indices(self, device):
tensor = self.legacy_sparse_tensor()
expected_indices = torch.empty((1, 0), dtype=torch.long, device=device)
self.assertEqual(tensor._indices(), expected_indices)
tensor = torch.sparse_coo_tensor(torch.Size([2, 0]), device=device)
expected_indices = torch.empty((2, 0), dtype=torch.long, device=device)
self.assertEqual(tensor._indices(), expected_indices)
tensor = torch.sparse_coo_tensor(torch.Size([2, 2, 0]), device=device)
expected_indices = torch.empty((3, 0), dtype=torch.long, device=device)
self.assertEqual(tensor._indices(), expected_indices)
tensor = torch.sparse_coo_tensor(torch.Size([2, 2, 0, 0]), device=device)
expected_indices = torch.empty((4, 0), dtype=torch.long, device=device)
self.assertEqual(tensor._indices(), expected_indices)
@dtypes(torch.double, torch.cdouble)
def test_factory_nnz(self, device, dtype):
indices = self.index_tensor([[0]], device=device) # (sparse_dim, nnz): (1, 1)
values = torch.tensor([[1, 1], [1, 1]], dtype=dtype, device=device) # (nnz, ...): (2, 2)
sizes = torch.Size([2, 2])
with self.assertRaisesRegex(RuntimeError, "indices and values must have same nnz"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[0]], device=device) # (sparse_dim, nnz): (1, 1)
values = torch.empty([2, 0], dtype=dtype, device=device) # (nnz, ...): (2, 0)
sizes = torch.Size([2, 0])
with self.assertRaisesRegex(RuntimeError, "indices and values must have same nnz"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
@dtypes(torch.double, torch.cdouble)
def test_factory_nnz_zero(self, device, dtype):
def test_shape(i_shape, v_shape, size, expected_size):
if size:
t = torch.sparse_coo_tensor(torch.empty(i_shape), torch.empty(v_shape), torch.Size(size),
dtype=dtype, device=device)
else:
t = torch.sparse_coo_tensor(torch.empty(i_shape), torch.empty(v_shape), dtype=dtype, device=device)
expected_indices = torch.empty(i_shape, device=device, dtype=torch.int64)
expected_values = torch.empty(v_shape, device=device, dtype=dtype)
expected_size = torch.Size(expected_size)
self.assertEqual(t._indices(), expected_indices)
self.assertEqual(t._values(), expected_values)
self.assertEqual(t.size(), expected_size)
test_shape([1, 0], [0, 2, 4, 0], None, [0, 2, 4, 0])
test_shape([3, 0], [0, 2, 4, 0], None, [0, 0, 0, 2, 4, 0])
test_shape([1, 0], [0, 2, 4, 0], [0, 2, 4, 0], [0, 2, 4, 0])
test_shape([3, 0], [0, 2, 4, 0], [0, 0, 0, 2, 4, 0], [0, 0, 0, 2, 4, 0])
test_shape([3, 0], [0, 2, 4, 0], [1, 2, 3, 2, 4, 0], [1, 2, 3, 2, 4, 0])
@dtypes(torch.double, torch.cdouble)
def test_factory_dense_dim(self, device, dtype):
indices = self.index_tensor([[0]], device=device)
values = torch.tensor([[[1, 1, 1], [1, 1, 1]]], dtype=dtype, device=device)
sizes = torch.Size([1, 3, 4])
with self.assertRaisesRegex(RuntimeError, "values has incorrect size"):
torch.sparse_coo_tensor(indices, values, sizes)
indices = self.index_tensor([[0]], device=device)
values = torch.empty([1, 2, 3, 0], dtype=dtype, device=device)
sizes = torch.Size([1, 3, 4, 0])
with self.assertRaisesRegex(RuntimeError, "values has incorrect size"):
torch.sparse_coo_tensor(indices, values, sizes)
@onlyCPU
@dtypes(torch.float16, torch.float32, torch.float64, torch.cfloat, torch.cdouble, torch.int64)
def test_factory_type_inference(self, device, dtype):
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.tensor([1.], dtype=dtype))
self.assertEqual(dtype, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.tensor([1]))
self.assertEqual(torch.int64, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.HalfTensor(1, 0))
self.assertEqual(torch.float16, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.FloatTensor(1, 0))
self.assertEqual(torch.float32, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.DoubleTensor(1, 0))
self.assertEqual(torch.float64, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.LongTensor(1, 0))
self.assertEqual(torch.int64, t.dtype)
@onlyCUDA
def test_factory_device_type_inference(self, device):
# both indices/values are CUDA
cpu_cuda = ('cpu', 'cuda')
cpu_cuda_none = cpu_cuda + (None,)
for indices_device, values_device, device in itertools.product(cpu_cuda,
cpu_cuda,
cpu_cuda_none):
indices = torch.tensor(([0], [2]), device=indices_device)
values = torch.tensor([1.], device=values_device)
empty_values = torch.empty(1, 0).to(values_device)
shape = (1, 3)
empty_shape = (1, 3, 0)
if device is None and indices_device != values_device:
with self.assertRaises(RuntimeError):
torch.sparse_coo_tensor(indices, values, shape, device=device)
with self.assertRaises(RuntimeError):
torch.sparse_coo_tensor(indices, empty_values, empty_shape, device=device)
else:
t = torch.sparse_coo_tensor(indices, values, shape, device=device)
t_empty = torch.sparse_coo_tensor(indices, empty_values, empty_shape, device=device)
should_be_cuda = (device == 'cuda' or (device is None and values_device == 'cuda'))
self.assertEqual(should_be_cuda, t.is_cuda)
self.assertEqual(t.is_cuda, t_empty.is_cuda)
@onlyCPU
def test_factory_copy(self, device):
def test_tensor(indices, values, indices_equal, values_equal):
sparse_tensor = torch.sparse_coo_tensor(indices, values, dtype=torch.float64, device=device)
if indices_equal:
self.assertEqual(indices.data_ptr(), sparse_tensor._indices().data_ptr())
else:
self.assertNotEqual(indices.data_ptr(), sparse_tensor._indices().data_ptr())
if values_equal:
self.assertEqual(values.data_ptr(), sparse_tensor._values().data_ptr())
else:
self.assertNotEqual(values.data_ptr(), sparse_tensor._values().data_ptr())
# both correct
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.tensor([1.], dtype=torch.float64)
test_tensor(indices, values, True, True)
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.DoubleTensor(1, 0)
test_tensor(indices, values, True, True)
# only indices correct
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.tensor([1.], dtype=torch.float32)
test_tensor(indices, values, True, False)
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.tensor([1.], dtype=torch.float16)
test_tensor(indices, values, True, False)
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.FloatTensor(1, 0)
test_tensor(indices, values, True, True) # An empty tensor's data_ptr is always equal to 0
# only values correct
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = torch.tensor([1.], dtype=torch.float64)
test_tensor(indices, values, False, True)
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = torch.DoubleTensor(1, 0)
test_tensor(indices, values, False, True)
# neither correct
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = torch.tensor([1.], dtype=torch.float32)
test_tensor(indices, values, False, False)
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = torch.FloatTensor(1, 0)
test_tensor(indices, values, False, True) # An empty tensor's data_ptr is always equal to 0
# complex support
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = make_tensor([1, ], dtype=torch.cdouble, device=device)
test_tensor(indices, values, True, False)
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = make_tensor([1, 1], dtype=torch.cdouble, device=device)
test_tensor(indices, values, False, False)
@onlyCPU # just run once, we test both cpu and cuda
def test_constructor_device_legacy(self, device):
i = torch.tensor([[0, 1, 1], [2, 0, 2]])
v = torch.tensor([3., 4., 5.])
size = torch.Size([2, 3])
self.assertRaises(RuntimeError, lambda: torch.sparse.FloatTensor(device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.sparse.FloatTensor(i, v, device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.sparse.FloatTensor(i, v, size, device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.sparse.FloatTensor(torch.Size([2, 3, 4]), device='cuda'))
x = torch.sparse_coo_tensor(i, v, size, device='cpu')
self.assertRaises(RuntimeError, lambda: x.new(device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, size, device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cuda'))
if torch.cuda.is_available():
self.assertRaises(RuntimeError, lambda: torch.cuda.sparse.FloatTensor(device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.sparse.FloatTensor(i, v, device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.sparse.FloatTensor(i, v, size, device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.sparse.FloatTensor(torch.Size([2, 3, 4]), device='cpu'))
x = torch.sparse_coo_tensor(i, v, size, device='cuda')
self.assertRaises(RuntimeError, lambda: x.new(device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, size, device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cpu'))
def test_legacy_constructor(self, device):
i = torch.tensor([[0, 1, 1], [2, 0, 2]])
v = torch.tensor([3., 4., 5.])
size = torch.Size([2, 3])
self.assertRaises(TypeError, lambda: torch.sparse.FloatTensor(v.storage()))
self.assertRaises(TypeError, lambda: torch.sparse.FloatTensor(v))
self.assertEqual(torch.sparse_coo, torch.sparse.FloatTensor(torch.Size([2, 3])).layout)
self.assertRaises(TypeError, lambda: torch.sparse.FloatTensor([6]))
def test_legacy_new(self, device):
i = torch.tensor([[0, 1, 1], [2, 0, 2]])
v = torch.tensor([3., 4., 5.])
size = torch.Size([2, 3])
s = torch.sparse_coo_tensor(i, v, size)
self.assertEqual(torch.sparse_coo, s.new(device='cpu').layout)
self.assertRaises(TypeError, lambda: s.new(v.storage()))
self.assertRaises(TypeError, lambda: s.new(v))
self.assertEqual(torch.sparse_coo, s.new(torch.Size([2, 3])).layout)
self.assertRaises(TypeError, lambda: s.new([6]))
@onlyCPU # not really, but we only really want to run this once
def test_dtypes(self, device):
all_sparse_dtypes = all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)
do_test_dtypes(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cpu'))
if torch.cuda.is_available():
do_test_dtypes(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cuda:0'))
@onlyCPU # not really, but we only really want to run this once
def test_empty_full(self, device):
all_sparse_dtypes = all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)
do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cpu'))
if torch.cuda.device_count() > 0:
do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, None)
do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cuda:0'))
def test_is_sparse(self, device):
x = torch.randn(3, 3)
self.assertFalse(x.is_sparse)
x = torch.randn(3, 3, 0)
self.assertFalse(x.is_sparse)
x = self.legacy_sparse_tensor()
self.assertTrue(x.is_sparse)
x = self.sparse_empty(1, 0, device=device)
self.assertTrue(x.is_sparse)
def test_resize_as(self, device):
def do_test(t):
y = t.new().resize_as_(t).zero_()
self.assertEqual(y.shape, t.shape)
# Check that y can be added to t. Currently, this requires that
# sparse_dim and dense_dim match.
self.assertEqual(t, t + y)
do_test(self.legacy_sparse_tensor())
do_test(self.sparse_empty([3, 0], device=device))
do_test(self.sparse_empty([3, 3], device=device))
def _test_resize_shape(self, x_i, x_v, x_size, y_i, y_v, y_size, dtype, device):
x_v_numel = torch.zeros(x_v).numel()
y_v_numel = torch.zeros(y_v).numel()
x = torch.sparse_coo_tensor(torch.zeros(x_i),
torch.arange(x_v_numel).resize_(x_v).to(torch.float),
torch.Size(x_size), dtype=dtype, device=device)
x_dense = x.to_dense()
y = torch.sparse_coo_tensor(torch.zeros(y_i),
torch.ones(y_v).to(torch.float),
torch.Size(y_size), dtype=dtype, device=device)
y_dense = y.to_dense()
x.resize_as_(y)
x_dense.resize_as_(y_dense)
self.assertEqual(x.shape, y.shape)
self.assertEqual(x.sparse_dim(), y.sparse_dim())
self.assertEqual(x.dense_dim(), y.dense_dim())
self.assertEqual(x.shape, x_dense.shape)
self.assertEqual(y.shape, y_dense.shape)
# Here we make sure that the original data are preserved after resizing
self.assertEqual(x.to_dense().view(-1)[0:x_v_numel].view(x_v),
x_dense.view(-1)[0:x_v_numel].view(x_v))
@dtypes(torch.double, torch.cdouble)
def test_resize(self, device, dtype):
# 1. Expand the size of some dense dimensions [Supported]
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 4], [2, 2, 4],
dtype=dtype, device=device)
self._test_resize_shape([1, 1], [1, 2, 0], [2, 2, 0],
[1, 1], [1, 2, 4], [2, 2, 4],
dtype=dtype, device=device)
# 2. Expand the size of some sparse dimensions [Supported]
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 3], [4, 2, 3],
dtype=dtype, device=device)
# 3. Change the shapes of both sparse and dense dimensions when nnz is zero [Supported]
self._test_resize_shape([1, 0], [0, 2, 3], [2, 2, 3],
[2, 0], [0, 2, 4, 5], [1, 1, 2, 4, 5],
dtype=dtype, device=device)
self._test_resize_shape([1, 0], [0, 2, 3], [2, 2, 3],
[2, 0], [0, 2, 4, 0], [1, 1, 2, 4, 0],
dtype=dtype, device=device)
# 4. Add dims to dense dimensions [Not Supported]
with self.assertRaisesRegex(RuntimeError, "changing the number of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 3, 4], [2, 2, 3, 4],
dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "changing the number of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 3, 0], [2, 2, 3, 0],
dtype=dtype, device=device)
# 5. Remove dims from dense dimensions [Not Supported]
with self.assertRaisesRegex(RuntimeError, "changing the number of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2], [2, 2],
dtype=dtype, device=device)
# 6. Change the number of sparse dimensions on a non-empty sparse tensor [Not Supported]
with self.assertRaisesRegex(RuntimeError, "changing the number of sparse dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[2, 1], [1, 2, 3], [1, 2, 2, 3],
dtype=dtype, device=device)
# 7. Shrink the size of some sparse dimensions on a non-empty sparse tensor [Not Supported]
with self.assertRaisesRegex(RuntimeError, "shrinking the size of sparse dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 3], [1, 2, 3],
dtype=dtype, device=device)
# 8. Shrink the size of some dense dimensions on a non-empty sparse tensor [Not Supported]
with self.assertRaisesRegex(RuntimeError, "shrinking the size of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 2], [2, 2, 2],
dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "shrinking the size of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 0], [2, 2, 0],
dtype=dtype, device=device)
def test_is_nonzero(self, device):
self.assertTrue(torch.sparse_coo_tensor(([0],), 1., (1,), device=device).is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0],), 0., (1,), device=device).is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0], [0]), 0., (1, 1), device=device).is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0, 0],), (0., 0.), (1,), device=device).is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0, 0],), (-1., 1.), (1,), device=device).is_nonzero())
# scalar sparse tensor
self.assertTrue(torch.sparse_coo_tensor(torch.zeros(0, 1), 12.3, [], device=device).is_nonzero())
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch.sparse_coo_tensor(([0, 1],), torch.empty(2, 0), (4, 0), device=device).is_nonzero()
self.assertTrue(torch.sparse_coo_tensor(([0],), 2.3 - 4.5j, (1,), dtype=torch.cfloat, device=device)
.is_nonzero())
self.assertTrue(torch.sparse_coo_tensor(([0],), 2.3 - 4.5j, (1,), dtype=torch.cdouble, device=device)
.is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0],), 0. + 0j, (1,), dtype=torch.cfloat, device=device)
.is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0],), 0. + 0j, (1,), dtype=torch.cdouble, device=device)
.is_nonzero())
def test_allow_tensor_metadata_change(self, device):
def do_test(t):
with self.assertRaisesRegex(
RuntimeError,
"raw_resize_ is not allowed on a Tensor created from .data or .detach()"):
t.transpose_(0, 1)
with self.assertRaisesRegex(
RuntimeError,
"resize_ is not allowed on a Tensor created from .data or .detach()"):
t.resize_as_(self.sparse_empty(3, 3))
with self.assertRaisesRegex(
RuntimeError,
"resize_and_clear_ is not allowed on a Tensor created from .data or .detach()"):
t.mul_(t)
with self.assertRaisesRegex(
RuntimeError,
"set_coalesced is not allowed on a Tensor created from .data or .detach()"):
t._coalesced_(True)
with self.assertRaisesRegex(
RuntimeError,
"set_indices_and_values_unsafe is not allowed on a Tensor created from .data or .detach()"):
a = self.sparse_tensor(torch.tensor([[0, 1, 1], [2, 0, 2]]), torch.tensor([3., 4., 5.])).data
a.add_(a)
with self.assertRaisesRegex(
RuntimeError,
"resize_and_clear_ is not allowed on a Tensor created from .data or .detach()"):
a.zero_()
with self.assertRaisesRegex(
RuntimeError,
"resize_ is not allowed on a Tensor created from .data or .detach()"):
a.copy_(self.sparse_empty(3, 3))
do_test(self.sparse_empty([3, 0], device=device).data)
do_test(self.sparse_empty([3, 0], device=device).detach())
@dtypes(torch.double, torch.cdouble)
def test_change_tensor_metadata(self, device, dtype):
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]), dtype=dtype, device=device)
i.resize_(2, 3)
v.resize_(4, 5)
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]))
i.resize_as_(self.index_tensor([0, 1], device=device))
v.resize_as_(torch.tensor([3, 4, 5], dtype=dtype, device=device))
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]))
i.as_strided_((2, 1), (1, 1))
v.as_strided_((1, 3), (1, 1))
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]))
i.set_(self.index_tensor([0, 1], device=device))
v.set_(torch.tensor([3, 4, 5], dtype=dtype, device=device))
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]))
i.transpose_(0, 1)
v.transpose_(0, 1)
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
@skipIfRocm
@coalescedonoff
@dtypes(torch.double)
def test_pickle(self, device, dtype, coalesced):
import pickle
shape_sparse_dim_nnz = [
((), 0, 2),
((0,), 0, 10),
((2,), 0, 3),
((100, 3), 1, 3),
((100, 20, 3), 2, 0),
((10, 0, 3), 0, 3),
((10, 0, 3), 0, 0),
]
for shape, sparse_dim, nnz in shape_sparse_dim_nnz:
indices_shape = torch.Size((sparse_dim, nnz))
values_shape = torch.Size((nnz,) + shape[sparse_dim:])
indices = torch.arange(indices_shape.numel(), dtype=self.index_tensor(0).dtype,
device=device).view(indices_shape)
for d in range(sparse_dim):
indices[d].clamp_(max=(shape[d] - 1)) # make it valid index
if not coalesced and indices.numel() > 0:
indices[:, -1] = indices[:, 0] # make it uncoalesced
values_numel = values_shape.numel()
values = torch.arange(values_numel, dtype=dtype,
device=device).view(values_shape).div_(values_numel / 2.)
sp_tensor = self.sparse_tensor(indices, values, shape)
serialized = pickle.dumps(sp_tensor)
sp_tensor_loaded = pickle.loads(serialized)
self.assertEqual(sp_tensor, sp_tensor_loaded)
def test_any(self, device):
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [2, 0])), torch.tensor([False, False]), device=device)
t_any = torch.tensor(False)
self.assertEqual(torch.any(t), t_any)
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [2, 0])), torch.tensor([True, False]), device=device)
t_any = torch.tensor(True)
self.assertEqual(torch.any(t), t_any)
def test_isnan(self, device):
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [0, 2])), torch.tensor([1, 4]), device=device)
t_nan = torch.sparse_coo_tensor(torch.tensor(([0, 0], [0, 2])), torch.tensor([False, False]), device=device)
self.assertEqual(torch.isnan(t).int(), t_nan.int())
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [0, 2])), torch.tensor([1, float("nan")]), device=device)
t_nan = torch.sparse_coo_tensor(torch.tensor(([0, 0], [0, 2])), torch.tensor([False, True]), device=device)
self.assertEqual(torch.isnan(t).int(), t_nan.int())
@coalescedonoff
@dtypes(torch.float32, torch.float64)
def test_div_rounding_mode(self, device, dtype, coalesced):
sparse, _, _ = self._gen_sparse(2, 10, (10, 10), dtype,
device, coalesced)
dense = self.safeToDense(sparse)
for mode in (None, 'floor', 'trunc'):
actual = sparse.div(-2, rounding_mode=mode)
expect = dense.div(-2, rounding_mode=mode)
self.assertEqual(self.safeToDense(actual), expect)
# Test inplace
actual = sparse.clone().div_(-2, rounding_mode=mode)
self.assertEqual(self.safeToDense(actual), expect)
# Test out argument
actual.zero_()
torch.div(sparse, -2, rounding_mode=mode, out=actual)
self.assertEqual(self.safeToDense(actual), expect)
def test_div_by_sparse_error(self, device):
self.assertRaisesRegex(RuntimeError, 'Sparse division requires',
lambda: torch.tensor(1., device=device).to_sparse()
/ torch.tensor(1., device=device).to_sparse())
def test_floor_divide_by_sparse_error(self, device):
self.assertRaisesRegex(RuntimeError, 'Sparse floor division requires',
lambda: torch.tensor(1., device=device).to_sparse()
// torch.tensor(1., device=device).to_sparse())
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
@onlyCPU
def test_sparse_to_numpy(self, device):
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [2, 0])), torch.tensor([1, 4]))
self.assertRaises(TypeError, lambda: t.numpy())
@coalescedonoff
@dtypes(torch.double)
def test_softmax(self, device, dtype, coalesced):
import torch.nn.functional as F
def to_dense(sparse, fill_value=None):
"""
Return dense tensor from a sparse tensor using given fill value.
"""
if fill_value is None or fill_value == 0:
return sparse.to_dense()
sparse = sparse.coalesce()
dense = torch.full(sparse.shape, fill_value, dtype=sparse.dtype, device=sparse.device)
for idx, value in zip(sparse._indices().t(), sparse._values()):
dense[tuple(idx)] = value
return dense
def softmax_to_dense(sparse, dim):
"""Dense softmax of a sparse tensor. Useful only for testing softmax
correctness.
When computing softmax of a sparse tensor, the value of
unspecified items is negative infinity rather than zero so
that
softmax(sparse.to_dense(fill_value=-inf), dim) == softmax(sparse, dim).to_dense()
holds for non-empty lines. One empty lines, the softmax
values are defined as 0 in order to preserve the sparsity
of result.
Note that in PyTorch, ``to_dense`` method does not
implement the ``fill_value`` keyword argument.
"""
dtype = sparse.dtype
device = sparse.device
dense = to_dense(sparse, fill_value=-float('inf'))
r = F.softmax(dense, dim)
# softmax on empty lines results nan, replace with zeros to match the definition
r[r != r] = 0
return r
def sparse_softmax(sparse, dim):
"""Pure Python softmax of a sparse tensor. Assuming -inf for
unspecified sparse tensor data. This is a prototype of
sparse softmax algorithm in Python.
"""
dtype = sparse.dtype
device = sparse.device
# softmax is non-linear operation, so sparse tensors must
# be coalesced.
sparse = sparse.coalesce()
inf = float('inf')
indices = sparse._indices()
values = sparse._values()
if dim < sparse.sparse_dim():
nnz = sparse._nnz()
# compute pool indices
size = sparse.size()
strides = torch.ones((sparse.sparse_dim(), 1), dtype=indices.dtype, device=indices.device)
for i in reversed(range(sparse.sparse_dim() - 1)):
strides[i, 0] = strides[i + 1, 0] * size[i + 1]
strides[dim, 0] = 0
pool = (indices * strides).sum(dim=0)
i2p = {}
for i in range(nnz):
c = int(pool[i])
if c not in i2p:
i2p[c] = len(i2p)
pool[i] = i2p[c]
# compute max
dense_size = tuple(size[sparse.sparse_dim():])
mx = torch.empty((pool.max() + 1,) + dense_size, dtype=dtype, device=device)
mx[:] = -inf
for n in range(nnz):
p = pool[n]
mx[p] = torch.max(mx[p], values[n])
# apply exp to (v - mx) and sum the results
exp_values = torch.empty_like(values)
exp_sums = torch.zeros_like(mx)
for n in range(nnz):
p = pool[n]
v = exp_values[n] = (values[n] - mx[p]).exp()
exp_sums[p] = exp_sums[p] + v
# normalize with the sum of exponents
for n in range(nnz):
p = pool[n]
exp_values[n] = exp_values[n] / exp_sums[p]
return torch.sparse_coo_tensor(indices,
exp_values,
sparse.size(),
dtype=dtype, device=device)
elif dim < sparse.sparse_dim() + sparse.dense_dim():
return torch.sparse_coo_tensor(indices,
F.softmax(values, dim - sparse.sparse_dim() + 1),
sparse.size(),
dtype=dtype, device=device)
else:
raise ValueError(
'`dim(=%s)` must be smaller than `sparse_dim(=%s) + dense_dim(=%s)`'
% (dim, sparse.sparse_dim(), sparse.dense_dim()))
def softmax_jacobian_analytic(x, dim):
"""Return Jacobian of softmax using analytic formula
D_jS_i = S_i * (1[i==j] - S_j).
where S = softmax(x, dim), x is dense tensor, i,j in
range(x.shape[dim]).
"""
y = F.softmax(x, dim)
y[y != y] = 0 # replace nan-s with zeros
J = torch.zeros((x.shape[dim],) + tuple(x.shape), dtype=x.dtype, device=x.device)
si = [slice(None)] * len(y.shape)
sj = [slice(None)] * len(y.shape)
s = [slice(None)] * len(J.shape)
for i in range(y.shape[dim]):
si[dim] = i
s[dim + 1] = i
yi = y[tuple(si)]
for j in range(y.shape[dim]):
sj[dim] = j
s[0] = j
if i == j:
J[tuple(s)] = yi * (1 - yi)
else:
yj = y[tuple(sj)]
J[tuple(s)] = - yi * yj
sj[dim] = slice(None)
si[dim] = slice(None)
s[dim + 1] = slice(None)
return J
def softmax_jacobian_autograd(x, dim, log=False):
"""Return Jacobian of softmax using PyTorch autograd feature.
x can be dense or sparse tensor.
"""
import itertools
if x.is_sparse:
x = x.coalesce()
dtype = x.dtype
device = x.device
shape = tuple(x.shape)
J = torch.zeros((shape[dim],) + shape, dtype=dtype, device=device)
for i in range(shape[dim]):
if x.is_sparse:
sparse_dim = x.sparse_dim()
dense_dim = x.dense_dim()
if dim < sparse_dim:
ranges = []
for j, sz in enumerate(shape[:sparse_dim]):
if dim == j:
ranges.append([i])
else:
ranges.append(list(range(sz)))
indices = torch.tensor(list(itertools.product(*ranges)), dtype=torch.long, device=device).t()
values = torch.ones((indices.shape[1],) + shape[sparse_dim:], dtype=dtype, device=device)
else:
ranges = []
for j, sz in enumerate(shape[:sparse_dim]):
ranges.append(list(range(sz)))
indices = torch.tensor(list(itertools.product(*ranges)), dtype=torch.long, device=device).t()
values = torch.zeros((indices.shape[1],) + shape[sparse_dim:], dtype=dtype, device=device)
sv = [slice(None)] * (dense_dim + 1)
sv[dim - sparse_dim + 1] = i
values[tuple(sv)] = 1
v = torch.sparse_coo_tensor(indices, values, shape, dtype=dtype, device=device)
else:
v = torch.zeros_like(x)
sv = [slice(None)] * len(v.shape)
sv[dim] = i
v[tuple(sv)] = 1
x_ = x.clone()
x_.requires_grad_(True)
if log:
if x_.is_sparse:
y = torch.sparse.log_softmax(x_, dim)
else:
y = F.log_softmax(x_, dim)
else:
if x_.is_sparse:
y = torch.sparse.softmax(x_, dim)
else:
y = F.softmax(x_, dim)
# replace nan-s with zeros
y.data[y != y] = 0
y.backward(v)
g = x_.grad
if not g.is_sparse:
# replace nan-s with zeros
g.data[g != g] = 0
J[i] = g.to_dense() if g.is_sparse else g
return J
def test_op(sparse_dims, nnz, with_size, coalesced):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
def sparse_log(x):
return torch.sparse_coo_tensor(x._indices(), x._values().log(),
x.size(), dtype=x.dtype, device=x.device)
for dim in range(x.sparse_dim() + x.dense_dim()):
# Check sparse softmax definition
# check Python sparse softmax
y = sparse_softmax(x, dim)
r1 = softmax_to_dense(x, dim)
r2 = y.to_dense()
self.assertEqual(r1, r2)
# check C++ sparse softmax
y1 = torch.sparse.softmax(x, dim)
self.assertEqual(y, y1)
# check C++ sparse log_softmax
ly1 = torch.sparse.log_softmax(x, dim)
self.assertEqual(ly1, sparse_log(y1))
# Check autograd support on sparse softmax
# check softmax Jacobian definition for dense input
x1 = to_dense(x, fill_value=float('-inf'))
J = softmax_jacobian_analytic(x1, dim)
assert J.shape[0] == x.shape[dim]
assert J.shape[dim + 1] == x.shape[dim]
# check softmax Jacobian from autograd, dense input
J2 = softmax_jacobian_autograd(x1, dim)
self.assertEqual(J, J2)
# check softmax Jacobian from autograd, sparse input
J3 = softmax_jacobian_autograd(x, dim)
self.assertEqual(J, J3)
'''
y = softmax(x, dim)
z = log(y) = log_softmax(x, dim)
Dy/Dx = J
Dz/Dx = Dz/Dy Dy/Dx = 1/y * J
=> J = J_log * y
'''
# log_softmax Jacobian from autograd, dense input
J2_log = softmax_jacobian_autograd(x1, dim, log=True)
# log_softmax Jacobian from autograd, sparse input
J3_log = softmax_jacobian_autograd(x, dim, log=True)
J = J.transpose(0, dim + 1)
J2_log = J2_log.transpose(0, dim + 1)
J3_log = J3_log.transpose(0, dim + 1)
self.assertEqual(J, J2_log * r1)
self.assertEqual(J, J3_log * r1)
if dim == 0:
# check dtype argument
other_dtype = torch.float32
y2 = torch.sparse.softmax(x, dim, dtype=other_dtype)
self.assertEqual(y2.dtype, other_dtype)
self.assertEqual(y2, y1.type(other_dtype))
ly2 = torch.sparse.log_softmax(x, dim, dtype=other_dtype)
self.assertEqual(ly2.dtype, other_dtype)
self.assertEqual(ly2, ly1.type(other_dtype))
test_op(1, 10, [3], coalesced)
test_op(1, 10, [2, 3], coalesced)
test_op(1, 10, [3, 2], coalesced)
test_op(2, 10, [2, 3, 4], coalesced)
test_op(2, 10, [3, 4], coalesced)
test_op(2, 5, [5, 4], coalesced)
test_op(2, 10, [3, 4, 2], coalesced)
test_op(3, 10, [3, 4, 2], coalesced)
test_op(3, 100, [3, 4, 2], coalesced)
test_op(3, 100, [3, 4, 2, 3], coalesced)
test_op(3, 100, [3, 4, 2, 3, 5, 2], coalesced)
test_op(4, 100, [3, 4, 2, 3, 5, 2], coalesced)
@dtypes(torch.double)
def test_softmax_zero_nnz(self, device, dtype):
t = torch.sparse_coo_tensor([[]], [], (3,), device=device, dtype=dtype)
out = torch.sparse.softmax(t, 0)
self.assertEqual(out.to_dense(), torch.zeros_like(t))
# TODO: Check after why ROCm's cusparseXcsrgemm2Nnz function doesn't return the same nnz value as CUDA
@skipIfRocm
@coalescedonoff
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_types_and(*[torch.half] if CUDA11OrLater and SM53OrLater else [],
*[torch.bfloat16] if CUDA11OrLater and SM80OrLater else [],
*[torch.complex64] if CUDA11OrLater else [],
*[torch.complex128] if CUSPARSE_SPMM_COMPLEX128_SUPPORTED else []))
@unittest.skipIf(TEST_WITH_CROSSREF, "not working with fake tensor")
@precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2, torch.complex64: 1e-2, torch.float32: 1e-2})
def test_sparse_matmul(self, device, dtype, coalesced):
"""
This function test `torch.sparse.mm` when both the mat1 and mat2 are sparse tensors.
"""
def ref_sparse_mm(a, b):
return a.to_dense() @ b.to_dense()
def grad_with_custom_sparsity_pattern_test_helper(sparse_dims, nnz, shape_a, shape_b):
def test_grad_dense(a_s, b_s, g_s):
a = a_s.to_dense().detach()
b = b_s.to_dense().detach()
g = g_s.to_dense().detach()
a.requires_grad_(True)
b.requires_grad_(True)
c = a @ b
c.backward(g)
return a.grad.sparse_mask(a_s.coalesce()), b.grad.sparse_mask(b_s.coalesce())
a, _, _ = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, _, _ = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
a.requires_grad_(True)
b.requires_grad_(True)
c = torch.sparse.mm(a, b)
c2 = c.to_dense().detach()
c2 = torch.rand_like(c2)
g = c2.sparse_mask(c.coalesce())
c.backward(g)
a_grad, b_grad = test_grad_dense(a, b, g)
self.assertEqual(a.grad, a_grad)
self.assertEqual(b.grad, b_grad)
def test_sparse_matmul(sparse_dims, nnz, shape_a, shape_b):
a, i_a, v_a = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, i_b, v_b = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
# dense implementation
r1 = ref_sparse_mm(a, b)
# cpp implementation
r2 = torch.sparse.mm(a, b)
self.assertEqual(r1, r2.to_dense())
if dtype in [torch.double, torch.cdouble]:
a.requires_grad_(True)
b.requires_grad_(True)
# check autograd support on sparse matmul
def fn(D1, D2):
return torch.sparse.mm(D1, D2).to_dense()
if a.is_cuda:
# For cuda, `nondet_tol` is set with `1e-5`
# This is because cuSparse sometimes returns approximate zero values like `~e-323`
# TODO: Check this cuSparse issue.
# This happens when you do chain multiplication `torch.sparse.mm` operations
gradcheck(fn, (a, b), check_sparse_nnz=True, nondet_tol=1e-5)
else:
gradcheck(fn, (a, b), check_sparse_nnz=True)
grad_with_custom_sparsity_pattern_test_helper(sparse_dims, nnz, shape_a, shape_b)
def test_error_cases():
def fn(sparse_dims, nnz, shape_a, shape_b):
a, i_a, v_a = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, i_b, v_b = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
r2 = torch.sparse.mm(a, b)
# This is not a matrix
self.assertRaises(RuntimeError, lambda: fn(3, 4, [2, 2, 2], [2, 2, 2]))
# Shapes does not
self.assertRaisesRegex(RuntimeError,
r"mat1 and mat2 shapes cannot be multiplied \(2x3 and 4x2\)",
lambda: fn(2, 10, [2, 3], [4, 2]))
def different_dtypes():
a, i_a, v_a = self._gen_sparse(2, 10, [2, 2], dtype, device, coalesced)
b, i_b, v_b = self._gen_sparse(2, 10, [2, 2], dtype, device, coalesced)
r2 = torch.sparse.mm(a.to(torch.float64), a.to(torch.float32))
self.assertRaisesRegex(RuntimeError, 'mat1 dtype Double does not match mat2 dtype Float', different_dtypes)
for n in range(2, 5):
for m in range(2, 8):
for p in range(2, 8):
test_sparse_matmul(2, 10, [n, m], [m, p])
test_sparse_matmul(2, 0, [0, 0], [0, 0])
test_sparse_matmul(2, 0, [0, 10], [10, 0])
test_error_cases()
@coalescedonoff
@dtypes(torch.double)
def test_assign(self, device, dtype, coalesced):
def assign_to():
a, i_a, v_a = self._gen_sparse(2, 5, [2, 3], dtype, device, coalesced)
a[0] = 100
self.assertRaises(TypeError, assign_to)
@dtypes(torch.double, torch.cdouble)
def test_full_broadcast_to(self, device, dtype):
def can_broadcast(s0, s1):
s0 = tuple(reversed(s0))
s1 = tuple(reversed(s1))
for i in range(len(s0)):
if s0[i] != 1 and s0[i] != s1[i]:
return False
return True
sizes = (
(), (1,), (2,), (1, 1), (3, 1), (3, 2), (4, 1, 1), (4, 3, 2)
)
for s0, s1 in itertools.combinations(sizes, r=2):
t = make_tensor(s0, dtype=dtype, device=device, low=-9, high=9)
for sparse_dims in range(1, len(s0) + 1):
s = t.to_sparse(sparse_dims)
if can_broadcast(s0, s1):
t_res = torch.broadcast_to(t, s1)
s_res = torch._sparse_broadcast_to(s, s1)
torch._validate_sparse_coo_tensor_args(s_res._indices(), s_res._values(), s_res.shape)
if s_res.is_coalesced():
# ensure that is_coalesced is estimated correctly
self.assertEqual(s_res, torch.sparse_coo_tensor(s_res._indices(), s_res._values(), s_res.shape).coalesce())
self.assertEqual(s_res.to_dense(), t_res)
else:
with self.assertRaisesRegex(RuntimeError,
r"The expanded size of the tensor \(\d\) "
r"must match the existing size \(\d\)"):
torch._sparse_broadcast_to(s, s1)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_sparse_broadcast_to(self, device, dtype, coalesced):
def test(sparse_dims, nnz, with_size, new_size):
x = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
y = self.safeToDense(x)
x1 = torch._sparse_broadcast_to(x, new_size)
y1 = y.broadcast_to(new_size)
self.assertEqual(self.safeToDense(x1), y1)
test(4, 6, [7, 3, 1, 3, 0], [7, 3, 4, 3, 0])
test(4, 6, [7, 3, 1, 3, 0], [2, 7, 3, 1, 3, 0])
test(4, 6, [7, 3, 1, 3, 1, 3], [7, 3, 1, 3, 2, 3])
test(4, 6, [7, 3, 1, 3, 2, 1], [7, 3, 1, 3, 2, 3])
@coalescedonoff
@dtypes(*all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16))
@precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2})
def test_sparse_dense_mul(self, device, dtype, coalesced):
skipTestIfUncoalesced = False
# This case always coalesce inputs and that could lead to loss of precision,
# hence it is inhibited for float16/bfloat16 by providing already coalesced tensors.
if not coalesced and dtype in {torch.float16, torch.bfloat16}:
skipTestIfUncoalesced = True
# to_dense is problematic for boolean non-coalesced CUDA tensors
# see https://github.com/pytorch/pytorch/issues/81648
if not coalesced and dtype == torch.bool and torch.device(device).type == "cuda":
skipTestIfUncoalesced = True
if skipTestIfUncoalesced:
self.skipTest(f"Test with dtype={dtype}, device={device} runs only with coalesced inputs")
shape = (2, 3, 4, 10)
nnz = 10
def check(self, s, d):
res = d * s
# check commutativity
self.assertEqual(res, s * d)
# check correctness
self.assertEqual(res.to_dense(), s.to_dense() * d)
# check in-placeness for dense
if d.dim() >= s.dim():
dc = d.clone()
self.assertEqual(d.mul_(s), dc.mul_(s.to_dense()))
# check in-placeness for sparse
if s.dim() >= d.dim():
# for sparse
sc = s.clone()
self.assertEqual(s.mul_(d).to_dense(), sc.to_dense().mul_(d))
for dim in range(len(shape) + 1):
sub_shape = shape[dim:]
sparse_dim = len(sub_shape) // 2
def check_empty(sparse_shape, nnz, dense_shape, coalesce):
from itertools import product
for nnz_val, shape_suffix in product((nnz, 0), ((), (0,))):
empty_sparse_shape = sparse_shape + shape_suffix
empty_dense_shape = dense_shape + shape_suffix
s = self._gen_sparse(sparse_dim, nnz_val, empty_sparse_shape, dtype, device, coalesce)[0]
d = make_tensor(empty_dense_shape, dtype=dtype, device=device)
check(self, s, d)
# check scalar multiplication
s = self._gen_sparse(sparse_dim, nnz, sub_shape, dtype, device, coalesced)[0]
for scalar in (True, 1, 1.0):
res_sparse_right = s * scalar
res_sparse_left = scalar * s
res_dense = s.to_dense() * scalar
# check correctness and dtype
self.assertEqual(s.to(res_sparse_right.dtype), res_sparse_right)
self.assertEqual(res_sparse_right, res_sparse_left)
self.assertEqual(res_sparse_right.dtype, res_dense.dtype)
self.assertEqual(res_sparse_left.dtype, res_dense.dtype)
# check scalar as 0-dim sparse tensor
tscalar = torch.tensor(scalar, device=device)
sscalar = tscalar.to_sparse()
res_sparse_right = s * sscalar
res_sparse_left = sscalar * s
self.assertEqual(res_sparse_right, res_sparse_left)
self.assertEqual(s.to(res_sparse_right.dtype), res_sparse_right)
# check non-coalesced 0-dim scalar
# we skip torch.bool because for such tensors
# coalesce.to_dense != to_dense
if dtype == torch.bool:
return
for scalar_dtype in (int, float):
scalar = scalar_dtype(1)
idx = torch.tensor([], device=device).reshape(0, 2)
val = torch.tensor([scalar, scalar], device=device)
sscalar = torch.sparse_coo_tensor(idx, val, ())
res_dense = s.to_dense() * sscalar.to_dense()
self.assertEqual((s * sscalar).to_dense(), res_dense)
self.assertEqual((sscalar * s).to_dense(), res_dense)
# Case 1: sparse broadcasts over dense
s = self._gen_sparse(sparse_dim, nnz, sub_shape, dtype, device, coalesced)[0]
d = make_tensor(shape, dtype=dtype, device=device)
check(self, s, d)
check_empty(sub_shape, nnz, shape, coalesced)
# Case 2: dense broadcasts over sparse
s = self._gen_sparse(3, nnz, shape, dtype, device, coalesced)[0]
d = make_tensor(sub_shape, dtype=dtype, device=device)
check(self, s, d)
check_empty(shape, nnz, sub_shape, coalesced)
@unittest.skipIf(not TEST_NUMPY, "NumPy is not availible")
@onlyCPU
@dtypes(*all_types_and_complex_and(torch.bool))
def test_sparse_spdiags(self, device, dtype):
make_diags = functools.partial(make_tensor, dtype=dtype, device=device)
make_offsets = functools.partial(torch.tensor, dtype=torch.long, device=device)
if TEST_SCIPY:
def reference(diags, offsets, shape):
return scipy.sparse.spdiags(diags, offsets, *shape).toarray()
else:
def reference(diags, offsets, shape):
result = torch.zeros(shape, dtype=dtype, device=device)
for i, off in enumerate(offsets):
res_view = result.diagonal(off)
data = diags[i]
if off > 0:
data = data[off:]
m = min(res_view.shape[0], data.shape[0])
res_view[:m] = data[:m]
return result
def check_valid(diags, offsets, shape, layout=None):
ref_out = reference(diags, offsets, shape)
out = torch.sparse.spdiags(diags, offsets, shape, layout=layout)
if layout is None:
ex_layout = torch.sparse_coo
else:
ex_layout = layout
out_dense = out.to_dense()
self.assertTrue(out.layout == ex_layout, f"Output layout {out.layout} expected {ex_layout}")
self.assertEqual(out_dense, ref_out, f"Result:\n{out_dense} does not match reference:\n{ref_out}")
def check_invalid(args, error):
with self.assertRaisesRegex(RuntimeError, error):
torch.sparse.spdiags(*args)
def valid_cases():
# some normal cases
yield (make_diags((1, 5)), make_offsets([0]), (5, 5))
yield (make_diags((3, 3)), make_offsets([-1, 0, 1]), (4, 4))
# noncontigous diags
yield (make_diags((5, 4), noncontiguous=True), make_offsets([-1, 1, 0, 2, -2]), (5, 5))
# noncontigous offsets
yield (make_diags((3, 4)), make_offsets([1, -1, 0, -2, 2])[::2], (5, 5))
# noncontigous diags + offsets
yield (make_diags((3, 4), noncontiguous=True), make_offsets([1, -1, 0, -2, 2])[::2], (5, 5))
# correct dimensionality, 2d, 2d , and shapes match, but the number of diagonals is zero
yield (make_diags((0, 3)), make_offsets([]), (3, 3))
# forward rotation of upper diagonals
yield (make_diags((3, 8)), make_offsets([1, 2, 3]), (4, 4))
# rotation exausts input space to read from
yield (make_diags((2, 3)), make_offsets([2, 1]), (3, 3))
# Simple cases repeated with special output format
yield (make_diags((1, 5)), make_offsets([0]), (5, 5), torch.sparse_csc)
yield (make_diags((3, 3)), make_offsets([-1, 0, 1]), (4, 4), torch.sparse_csr)
# vector diags
yield (make_diags((3, )), make_offsets([1]), (4, 4))
# Scalar offset
yield (make_diags((1, 3)), make_offsets(2), (4, 4))
# offsets out of range
yield (make_diags((1, 3)), make_offsets([3]), (3, 3))
yield (make_diags((1, 3)), make_offsets([-3]), (3, 3))
for case in valid_cases():
check_valid(*case)
def invalid_cases():
yield (make_diags((1, 3)), make_offsets([0]), (3, 2, 3)), "Output shape must be 2d"
yield (make_diags((2, 3)), make_offsets([[1, 2], [0, 3]]), (3, 3)), "Offsets must be scalar or vector"
yield (make_diags((3, 2, 3)), make_offsets([0, 1, 2]), (4, 4)), "Diagonals must be vector or matrix"
yield (make_diags((3, 3)), make_offsets([-1, 0]), (3, 3)),\
r"Number of diagonals \(\d\) does not match the number of offsets \(\d\)"
yield (make_diags((5,)), make_offsets([0, 1, 2, 3, 4]), (3, 3)),\
r"Number of diagonals \(\d\) does not match the number of offsets \(\d\)"
yield (make_diags((2, 2)), make_offsets([-1, 0]), (2, 3), torch.strided),\
r"Only output layouts \(\w+, \w+, \w+\) are supported, got \w+"
yield (make_diags((2, 5)), make_offsets([0, 0]), (5, 5)), "Offset tensor contains duplicate values"
yield (make_diags((1, 5)), make_offsets([0]).to(torch.int32), (5, 5)), r"Offset Tensor must have dtype Long but got \w+"
for case, error_regex in invalid_cases():
check_invalid(case, error_regex)
class TestSparseOneOff(TestCase):
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_cuda_from_cpu(self):
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!"):
torch.sparse.FloatTensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4),
[3, 4, 4])
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!"):
torch.sparse.FloatTensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4, 0),
[3, 4, 4, 0])
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!"):
torch.sparse.FloatTensor(torch.LongTensor(1, 0).cuda(),
torch.randn(0, 4, 4, 0),
[0, 4, 4, 0])
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_cuda_sparse_cpu_dense_add(self):
x = torch.zeros(3, 4, 4)
sparse_y = torch.cuda.sparse.FloatTensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4).cuda(),
[3, 4, 4])
with self.assertRaisesRegex(RuntimeError, "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"):
x + sparse_y
x = torch.zeros(3, 4, 4, 0)
sparse_y = torch.cuda.sparse.FloatTensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4, 0).cuda(),
[3, 4, 4, 0])
with self.assertRaisesRegex(RuntimeError, "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"):
x + sparse_y
x = torch.zeros(0, 4, 4, 0)
sparse_y = torch.cuda.sparse.FloatTensor(torch.LongTensor(1, 0).cuda(),
torch.randn(0, 4, 4, 0).cuda(),
[0, 4, 4, 0])
with self.assertRaisesRegex(RuntimeError, "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"):
x + sparse_y
def _sparse_to_dense(tensor):
if tensor.dtype != torch.bool:
return tensor.to_dense()
# to_dense uses coalesce which isn't implemented for bool
return tensor.to(torch.int8).to_dense().to(torch.bool)
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
class TestSparseUnaryUfuncs(TestCase):
exact_dtype = True
@_sparse_unary_ops
def test_sparse_consistency(self, device, dtype, op):
sample = first_sample(self, op.sample_inputs(device, dtype))
assert isinstance(sample.input, torch.Tensor)
expected = op(sample.input, *sample.args, **sample.kwargs)
assert torch.is_tensor(expected)
output = op(sample.input.to_sparse(), *sample.args, **sample.kwargs)
assert torch.is_tensor(output)
self.assertEqual(_sparse_to_dense(output), expected)
@_sparse_unary_ops
def test_out(self, device, dtype, op):
if not op.supports_out:
self.skipTest("Skipped! Out not supported")
sample = first_sample(self, op.sample_inputs(device, dtype))
sample.input = sample.input.to_sparse()
expect = op(sample.input, *sample.args, **sample.kwargs)
out = torch.zeros(sample.input.shape, device=device,
dtype=expect.dtype, layout=torch.sparse_coo)
op(sample.input, *sample.args, **sample.kwargs, out=out)
self.assertEqual(out, expect)
@_sparse_unary_ops
def test_inplace(self, device, dtype, op):
if op.inplace_variant is None:
self.skipTest("Skipped! Out not supported")
sample = first_sample(self, op.sample_inputs(device, dtype))
sample.input = sample.input.to_sparse().coalesce()
expect = op(sample.input, *sample.args, **sample.kwargs)
if not torch.can_cast(expect.dtype, dtype):
with self.assertRaisesRegex(RuntimeError, "result type .* can't be cast to"):
op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
return
actual = op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
self.assertIs(actual, sample.input)
self.assertEqual(actual, expect)
@_sparse_unary_ops
def test_sparse_zero_dims(self, device, dtype, op):
# test 0x0 sparse_coo_tensor
indices = torch.empty(2, 0, dtype=torch.int64)
values = torch.empty(0, dtype=dtype)
sparse_0x0 = torch.sparse_coo_tensor(indices, values, (0, 0))
expected = torch.sparse_coo_tensor(indices, op(values), (0, 0))
actual = op(sparse_0x0)
self.assertEqual(expected, actual)
@_sparse_unary_ops
def test_sparse_zeros(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
zero_input = torch.zeros((), device=device, dtype=dtype)
sparse_input = torch.zeros((), dtype=dtype, device=device,
layout=torch.sparse_coo)
expect = op(zero_input)
actual = op(sparse_input)
self.assertEqual(expect, _sparse_to_dense(actual))
@ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=[torch.double, torch.cdouble])
def test_sparse_fn_grad(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Op doesn't support autograd")
for sample in op.sample_inputs(device, dtype):
sparse_input = sample.input.to_sparse().detach().requires_grad_(True)
def fn(x):
return _sparse_to_dense(
op(x, *sample.args, **sample.kwargs))
self.assertTrue(gradcheck(
fn,
(sparse_input,),
check_batched_grad=False,
check_grad_dtypes=True,
check_sparse_nnz=True,
nondet_tol=op.gradcheck_nondet_tol,
fast_mode=op.gradcheck_fast_mode))
class TestSparseMaskedReductions(TestCase):
exact_dtype = True
@ops(sparse_masked_reduction_ops)
def test_future_empty_dim(self, device, dtype, op):
"""Currently, `dim=()` in reductions operations means "reduce over
all dimensions" while in future, it will read "no reduce". See
https://github.com/pytorch/pytorch/issues/29137
For sparse masked reductions, we'll implement the current behavior.
For testing, we'll use samples with `dim=0` and map it to
`dim=()` until
torch.testing._internal.common_methods_invocations._generate_reduction_kwargs
is made to generate samples with `dim=()` for non-scalar
inputs. With this and after gh-29137 is resolved, this test
can be deleted. See also `torch._masked._canonical_dim`
implementation about changing the `dim=()` behavior.
"""
samples = op.sample_inputs_func(op, device, dtype, requires_grad=False)
op_name = op.name.replace('_masked.', '')
for sample_input in samples:
if sample_input.kwargs.get('dim') != 0:
continue
sample_input_kwargs = dict(sample_input.kwargs)
sample_input_kwargs['dim'] = () # reduce over all dimensions
t = sample_input.input
mask = sample_input_kwargs.get('mask')
if mask is None and op_name in {'prod', 'amax', 'amin'}:
# FIXME: for now reductions with non-zero reduction identity and
# unspecified mask are not supported for sparse COO
# tensors, see torch._masked.prod implementation
# for details.
continue
sparse_op_kwargs = dict(sample_input_kwargs)
actual = op(t.to_sparse(), *sample_input.args, **sample_input_kwargs)
self.assertEqual(actual.layout, torch.sparse_coo)
expected = op(t, *sample_input.args, **sample_input_kwargs).to_sparse()
self.assertEqual(actual, expected)
class TestSparseMeta(TestCase):
exact_dtype = True
def test_basic(self):
r = torch.empty(4, 4, layout=torch.sparse_coo, device='meta')
self.assertTrue(r.is_meta)
self.assertEqual(r.device.type, "meta")
r2 = torch.empty_like(r)
self.assertTrue(r2.is_meta)
self.assertEqual(r, r2)
r3 = torch.sparse_coo_tensor(size=(4, 4), device='meta')
self.assertTrue(r3.is_meta)
self.assertEqual(r, r3)
r.sparse_resize_((4, 4), 1, 1)
r.sparse_resize_and_clear_((4, 4, 4), 2, 1)
self.assertEqual(r.sparse_dim(), 2)
self.assertEqual(r.dense_dim(), 1)
self.assertEqual(r._dimV(), 1)
self.assertEqual(r._nnz(), 0)
# TODO: nnz zero sparse tensors should always be coalesced...
self.assertEqual(r.is_coalesced(), False)
r._coalesced_(True)
self.assertEqual(r.is_coalesced(), True)
# TODO: this sort of aliasing will need to be handled by
# functionalization
self.assertEqual(r._indices(), torch.empty(2, 0, device='meta', dtype=torch.int64))
self.assertEqual(r._values(), torch.empty(0, 4, device='meta'))
self.assertEqual(r.indices(), torch.empty(2, 0, device='meta', dtype=torch.int64))
self.assertEqual(r.values(), torch.empty(0, 4, device='meta'))
# e.g., TestSparseUnaryUfuncsCPU and TestSparseUnaryUfuncsCUDA
instantiate_device_type_tests(TestSparseUnaryUfuncs, globals(), except_for='meta')
instantiate_device_type_tests(TestSparseMaskedReductions, globals(), except_for='meta')
# e.g., TestSparseCPU and TestSparseCUDA
instantiate_device_type_tests(TestSparse, globals(), except_for='meta')
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_sparse.py |
#!/usr/bin/env python3
# Owner(s): ["oncall: mobile"]
import sys
import os
import io
import functools
import tempfile
import urllib
import unittest
import torch
import torch.backends.xnnpack
import torch.utils.model_dump
import torch.utils.mobile_optimizer
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, skipIfNoXNNPACK
from torch.testing._internal.common_quantized import supported_qengines
class SimpleModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer1 = torch.nn.Linear(16, 64)
self.relu1 = torch.nn.ReLU()
self.layer2 = torch.nn.Linear(64, 8)
self.relu2 = torch.nn.ReLU()
def forward(self, features):
act = features
act = self.layer1(act)
act = self.relu1(act)
act = self.layer2(act)
act = self.relu2(act)
return act
class QuantModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
self.core = SimpleModel()
def forward(self, x):
x = self.quant(x)
x = self.core(x)
x = self.dequant(x)
return x
class ModelWithLists(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = [torch.zeros(1)]
self.ot = [torch.zeros(1), None]
def forward(self, arg):
arg = arg + self.rt[0]
o = self.ot[0]
if o is not None:
arg = arg + o
return arg
def webdriver_test(testfunc):
@functools.wraps(testfunc)
def wrapper(self, *args, **kwds):
self.needs_resources()
if os.environ.get("RUN_WEBDRIVER") != "1":
self.skipTest("Webdriver not requested")
from selenium import webdriver
for driver in [
"Firefox",
"Chrome",
]:
with self.subTest(driver=driver):
wd = getattr(webdriver, driver)()
testfunc(self, wd, *args, **kwds)
wd.close()
return wrapper
class TestModelDump(TestCase):
def needs_resources(self):
if sys.version_info < (3, 7):
self.skipTest("importlib.resources was new in 3.7")
def test_inline_skeleton(self):
self.needs_resources()
skel = torch.utils.model_dump.get_inline_skeleton()
assert "unpkg.org" not in skel
assert "src=" not in skel
def do_dump_model(self, model, extra_files=None):
# Just check that we're able to run successfully.
buf = io.BytesIO()
torch.jit.save(model, buf, _extra_files=extra_files)
info = torch.utils.model_dump.get_model_info(buf)
assert info is not None
def open_html_model(self, wd, model, extra_files=None):
buf = io.BytesIO()
torch.jit.save(model, buf, _extra_files=extra_files)
page = torch.utils.model_dump.get_info_and_burn_skeleton(buf)
wd.get("data:text/html;charset=utf-8," + urllib.parse.quote(page))
def open_section_and_get_body(self, wd, name):
container = wd.find_element_by_xpath(f"//div[@data-hider-title='{name}']")
caret = container.find_element_by_class_name("caret")
if container.get_attribute("data-shown") != "true":
caret.click()
content = container.find_element_by_tag_name("div")
return content
def test_scripted_model(self):
model = torch.jit.script(SimpleModel())
self.do_dump_model(model)
def test_traced_model(self):
model = torch.jit.trace(SimpleModel(), torch.zeros(2, 16))
self.do_dump_model(model)
def test_main(self):
self.needs_resources()
if IS_WINDOWS:
# I was getting tempfile errors in CI. Just skip it.
self.skipTest("Disabled on Windows.")
with tempfile.NamedTemporaryFile() as tf:
torch.jit.save(torch.jit.script(SimpleModel()), tf)
stdout = io.StringIO()
torch.utils.model_dump.main(
[
None,
"--style=json",
tf.name,
],
stdout=stdout)
self.assertRegex(stdout.getvalue(), r'\A{.*SimpleModel')
stdout = io.StringIO()
torch.utils.model_dump.main(
[
None,
"--style=html",
tf.name,
],
stdout=stdout)
self.assertRegex(
stdout.getvalue().replace("\n", " "),
r'\A<!DOCTYPE.*SimpleModel.*componentDidMount')
def get_quant_model(self):
fmodel = QuantModel().eval()
fmodel = torch.ao.quantization.fuse_modules(fmodel, [
["core.layer1", "core.relu1"],
["core.layer2", "core.relu2"],
])
fmodel.qconfig = torch.ao.quantization.get_default_qconfig("qnnpack")
prepped = torch.ao.quantization.prepare(fmodel)
prepped(torch.randn(2, 16))
qmodel = torch.ao.quantization.convert(prepped)
return qmodel
@unittest.skipUnless("qnnpack" in supported_qengines, "QNNPACK not available")
def test_quantized_model(self):
qmodel = self.get_quant_model()
self.do_dump_model(torch.jit.script(qmodel))
@skipIfNoXNNPACK
@unittest.skipUnless("qnnpack" in supported_qengines, "QNNPACK not available")
def test_optimized_quantized_model(self):
qmodel = self.get_quant_model()
smodel = torch.jit.trace(qmodel, torch.zeros(2, 16))
omodel = torch.utils.mobile_optimizer.optimize_for_mobile(smodel)
self.do_dump_model(omodel)
def test_model_with_lists(self):
model = torch.jit.script(ModelWithLists())
self.do_dump_model(model)
def test_invalid_json(self):
model = torch.jit.script(SimpleModel())
self.do_dump_model(model, extra_files={"foo.json": "{"})
@webdriver_test
def test_memory_computation(self, wd):
def check_memory(model, expected):
self.open_html_model(wd, model)
memory_table = self.open_section_and_get_body(wd, "Tensor Memory")
device = memory_table.find_element_by_xpath("//table/tbody/tr[1]/td[1]").text
self.assertEqual("cpu", device)
memory_usage_str = memory_table.find_element_by_xpath("//table/tbody/tr[1]/td[2]").text
self.assertEqual(expected, int(memory_usage_str))
simple_model_memory = (
# First layer, including bias.
64 * (16 + 1) +
# Second layer, including bias.
8 * (64 + 1)
# 32-bit float
) * 4
check_memory(torch.jit.script(SimpleModel()), simple_model_memory)
# The same SimpleModel instance appears twice in this model.
# The tensors will be shared, so ensure no double-counting.
a_simple_model = SimpleModel()
check_memory(
torch.jit.script(
torch.nn.Sequential(a_simple_model, a_simple_model)),
simple_model_memory)
# The freezing process will move the weight and bias
# from data to constants. Ensure they are still counted.
check_memory(
torch.jit.freeze(torch.jit.script(SimpleModel()).eval()),
simple_model_memory)
# Make sure we can handle a model with both constants and data tensors.
class ComposedModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.w1 = torch.zeros(1, 2)
self.w2 = torch.ones(2, 2)
def forward(self, arg):
return arg * self.w2 + self.w1
check_memory(
torch.jit.freeze(
torch.jit.script(ComposedModule()).eval(),
preserved_attrs=["w1"]),
4 * (2 + 4))
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_model_dump.py |
# Owner(s): ["module: ProxyTensor"]
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import unittest
import warnings
import torch.nn.utils._stateless as stateless
from collections.abc import Iterable
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_methods_invocations import DecorateInfo
from torch.testing._internal.common_methods_invocations import op_db, wrapper_set_seed
from torch._subclasses.fake_tensor import DynamicOutputShapeException
from torch._decomp import decomposition_table
from torch.testing._internal.common_device_type import ops
from torch._C import _disabled_torch_function_impl
from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule
from torch.utils._pytree import tree_map
from torch import nn
import re
import types
import functools
aten = torch.ops.aten
try:
import sympy # noqa: F401
HAS_SYMPY = True
except ImportError:
HAS_SYMPY = False
skipIfNoSympy = unittest.skipIf(not HAS_SYMPY, "no sympy")
def process_failures():
"""
Takes file containing failures like
FAILED test/test_proxy_tensor.py::TestProxyTensorOpInfoCPU::test_make_fx_symbolic_exhaustive___getitem___cpu_float32 - RuntimeError: aten.size.default - couldn't find symbolic meta function/decomposition # noqa: B950
and processes them into a list of opinfo xfails
"""
f = open('pytest_failures')
failures = f.readlines()
failures = [i.strip() for i in failures]
def process_failure_string(s, matcher):
out = re.search(matcher, s)
return out.groups()
SYMBOLIC_TRACE_MATCH = r'exhaustive_(.*)_cpu.*: (.*)'
failures = [process_failure_string(s, SYMBOLIC_TRACE_MATCH) for s in failures]
def create_normalized_name(op):
if op.variant_test_name == '':
s = op.name
else:
s = f"{op.name}.{op.variant_test_name}"
return s.replace('.', '_')
remap_opinfo = {create_normalized_name(op): (op.name, op.variant_test_name) for op in op_db}
print("symbolic_tensor_failures = {")
for failure, reason in failures:
print(f" xfail{remap_opinfo[failure]}, # {reason}")
print("}")
def copy_func(f):
"""Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)"""
g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__,
argdefs=f.__defaults__,
closure=f.__closure__)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
return g
# Copied from functorch
def xfail(op_name, variant_name='', *, device_type=None, dtypes=None):
return (op_name, variant_name, device_type, dtypes, True)
def skip(op_name, variant_name='', *, device_type=None, dtypes=None):
return (op_name, variant_name, device_type, dtypes, False)
def skipOps(test_case_name, base_test_name, to_skip):
all_opinfos = op_db
for xfail in to_skip:
op_name, variant_name, device_type, dtypes, expected_failure = xfail
matching_opinfos = [o for o in all_opinfos
if o.name == op_name and o.variant_test_name == variant_name]
assert len(matching_opinfos) >= 1, f"Couldn't find OpInfo for {xfail}"
for opinfo in matching_opinfos:
decorators = list(opinfo.decorators)
if expected_failure:
decorator = DecorateInfo(unittest.expectedFailure,
test_case_name, base_test_name,
device_type=device_type, dtypes=dtypes)
decorators.append(decorator)
else:
decorator = DecorateInfo(unittest.skip("Skipped!"),
test_case_name, base_test_name,
device_type=device_type, dtypes=dtypes)
decorators.append(decorator)
opinfo.decorators = tuple(decorators)
# This decorator doesn't modify fn in any way
def wrapped(fn):
return fn
return wrapped
USE_TORCHVISION = False
try:
import torchvision
USE_TORCHVISION = True
except ImportError:
warnings.warn("Couldn't import torchvision. Some of our tests use it, try "
"to install it with commands from pytorch.org, post-fixed with "
"`--no-deps` to avoid overwriting the pytorch installation",
UserWarning)
def _create_new_input(x):
if not isinstance(x, torch.Tensor):
return x
if x.dtype != torch.float:
return x + 1
if x.is_leaf:
return torch.rand_like(x, requires_grad=x.requires_grad)
else:
return torch.rand_like(x)
"""
Delays a cos being executed on the unwraptensor until its used. Simulates a CommTensor used
"""
class UnwrapTensor(torch.Tensor):
@staticmethod
def __new__(cls, tensor: torch.Tensor):
r = torch.Tensor._make_wrapper_subclass(
cls,
tensor.size(),
dtype=tensor.dtype,
device=tensor.device,
layout=tensor.layout,
requires_grad=tensor.requires_grad,
)
r._tensor = tensor
return r
def __repr__(self):
# TODO: consider all_gather the local tensors for better debugging
return f"UnwrapTensor({self._tensor})"
__torch_function__ = _disabled_torch_function_impl
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
def unwrap(e):
ret = e
if isinstance(e, UnwrapTensor):
ret = e._tensor.cos()
return ret
args = tree_map(unwrap, args)
kwargs = tree_map(unwrap, kwargs)
return func(*args, **kwargs)
class TestGenericProxyTensor(TestCase):
# WARNING: if any of your inputs are index tensors, DO NOT use this
# function
def _test(self, f, inps):
fx_f = make_fx(f, tracing_mode=self.tracing_mode)(*inps)
new_inps = tree_map(_create_new_input, inps)
self.assertEqual(fx_f(*new_inps), f(*new_inps))
def test_make_fx_simple(self):
def f(x):
return torch.sin(x)
self._test(f, (torch.randn(3),))
def test_scalar_device(self, device='cpu'):
def f(a, b):
return a + b
self._test(f, [torch.randn(3, device=device), torch.tensor(5)])
def test_isolated_graphmodule(self):
def is_any_sum(gm):
return any(node.target == torch.ops.aten.sum.default for node in gm.graph.nodes)
def is_any_digamma(gm):
return any(node.target == torch.ops.aten.digamma.default for node in gm.graph.nodes)
def is_any_sigmoid(gm):
return any(node.target == torch.ops.aten.sigmoid.default for node in gm.graph.nodes)
def inner(x):
return torch.sum(x)
def f(x):
gm = get_isolated_graphmodule(inner, (x,), {})
self.assertTrue(is_any_sum(gm))
return x + torch.randn(x.shape)
# get_isolated_graphmodule uses make_fx internally that shouldn't be traced
# by the outer make_fx call
traced = make_fx(f)(torch.randn(3))
self.assertFalse(is_any_sum(traced))
# When factory functions are used, they should not be traced
# by the outer make_fx call
def inner_with_factory():
val = torch.tensor(float(1))
val.add_(2)
return torch.full((10, 10), val).sum()
def f1(x):
gm = get_isolated_graphmodule(inner_with_factory, (), {})
self.assertTrue(is_any_sum(gm))
return torch.sigmoid(x)
def f2(x):
gm = get_isolated_graphmodule(f1, (x,), {})
self.assertFalse(is_any_sum(gm))
self.assertTrue(is_any_sigmoid(gm))
return torch.digamma(x)
traced = make_fx(f2)(torch.randn(3))
self.assertFalse(is_any_sum(traced))
self.assertFalse(is_any_sigmoid(traced))
self.assertTrue(is_any_digamma(traced))
# Verify nested make_fx calls don't make factory functions to be leaked
# into the outer graph
def f2(x):
gm = make_fx(f1)(x)
self.assertFalse(is_any_sum(gm))
self.assertTrue(is_any_sigmoid(gm))
return torch.digamma(x)
traced = make_fx(f2)(torch.randn(3))
self.assertFalse(is_any_sum(traced))
self.assertTrue(is_any_sigmoid(traced))
self.assertTrue(is_any_digamma(traced))
# Verify interaction with non-ProxyTensor modes
from torch.testing._internal.logging_tensor import LoggingTensorMode
def f1_logging(x):
with LoggingTensorMode():
gm = get_isolated_graphmodule(inner_with_factory, (), {})
self.assertTrue(is_any_sum(gm))
return torch.sigmoid(x)
def f2_logging(x):
with LoggingTensorMode(), LoggingTensorMode():
gm = get_isolated_graphmodule(f1_logging, (x,), {})
self.assertFalse(is_any_sum(gm))
self.assertTrue(is_any_sigmoid(gm))
return torch.digamma(x)
traced = make_fx(f2_logging)(torch.randn(3))
self.assertFalse(is_any_sum(traced))
self.assertFalse(is_any_sigmoid(traced))
self.assertTrue(is_any_digamma(traced))
# Verify interaction with another tensor subclass
# This case currently doesn't work and should raise an error
# See: https://github.com/pytorch/pytorch/pull/81764#issuecomment-1200472068
from torch.testing._internal.logging_tensor import LoggingTensor
def f1_logging_tensor(x):
gm = get_isolated_graphmodule(inner_with_factory, (), {})
self.assertTrue(is_any_sum(gm))
return torch.sigmoid(x)
def f2_logging_tensor(x):
x = LoggingTensor(x)
gm = get_isolated_graphmodule(f1_logging_tensor, (x,), {})
self.assertFalse(is_any_sum(gm))
self.assertTrue(is_any_sigmoid(gm))
return torch.digamma(x)
with self.assertRaisesRegex(AssertionError, "ProxyTensor is wrapped with another Tensor subclass"):
traced = make_fx(f2_logging_tensor)(torch.randn(3))
self.assertFalse(is_any_sum(traced))
self.assertFalse(is_any_sigmoid(traced)) # this fails, sigmoid is traced with LoggingTensor
self.assertTrue(is_any_digamma(traced))
def test_proxy_tensor_mode_with_decomp_table_preserves_proxy(self):
def f(x):
y = x.new_zeros(x.size())
y.copy_(x)
return y
def _new_zeros_decomp(inp, size, dtype=None, layout=None, device=None, pin_memory=None):
return torch.zeros(size, dtype=inp.dtype, device=inp.device)
factory_func_decomp = {torch.ops.aten.new_zeros.default: _new_zeros_decomp}
# When new_zeros() decomposes into torch.zero(), we expect ProxyTensorMode
# to still be (re-entrantly) enabled, so that the `torch.zero()` call
# returns a ProxyTensor.
out = make_fx(f, decomposition_table=factory_func_decomp)(torch.ones(2))
self.assertExpectedInline(out.code, """\
def forward(self, x_1):
zeros = torch.ops.aten.zeros.default([2], dtype = torch.float32, device = device(type='cpu'), pin_memory = False)
copy__default = torch.ops.aten.copy_.default(zeros, x_1); zeros = x_1 = None
return copy__default
""")
def test_make_fx_reentrant_dispatch(self):
def f(x):
return torch.ops.aten.norm.Scalar(x, 2.0)
def norm_decomp(x, p=2.0):
if p != 2.0:
raise RuntimeError("can't handle with p != 2")
return torch.sqrt(torch.sum(torch.square(x)))
decomp = {torch.ops.aten.norm.Scalar: norm_decomp}
traced = make_fx(f, decomposition_table=decomp, tracing_mode=self.tracing_mode)(torch.rand(3))
for n in traced.graph.nodes:
self.assertTrue("square" not in str(n.target))
self.assertTrue("norm" not in str(n.target))
@unittest.skipIf(not USE_TORCHVISION, "test requires torchvision")
def test_resnet18_backward_trace(self):
mod = torchvision.models.resnet18()
# An old version of this test called the module directly. This works
# for tracing_mode == "real", but for fake tensors, we also have to
# ensure that the parameters and buffers get wrapped in fake tensors
# because free fake tensors are not supported. Fortunately stateless
# does precisely this for us.
def f(x, params, buffers):
for p in params.values():
p.grad = None
loss = stateless.functional_call(mod, {**params, **buffers}, (x,)).sum()
# I could have done this with the functional API, but there is
# plenty of exercising this; I want to show mutating API still
# works
loss.backward()
return [p.grad for p in params.values()]
inp = torch.randn(3, 3, 250, 250)
self._test(f, [inp, dict(mod.named_parameters()), dict(mod.named_buffers())])
def test_varargs(self):
def f(*args):
return sum(args)
self._test(f, [torch.randn(2), torch.randn(2)])
def test_proxy_tensor(self):
def f_grad(x):
val = x.cos().cos().sum()
return torch.autograd.grad(val, x)
def f_backward(x):
val = x.cos().cos().sum()
val.backward()
return x.grad
for f in [f_grad, f_backward]:
self._test(f, [torch.randn(3, requires_grad=True)])
def test_inplace_metadata(self):
def f(x):
x = x.clone()
x.unsqueeze_(-1)
assert x.shape[-1] == 1
return x
self._test(f, [torch.randn(5)])
def test_mode_tracing_factory_function(self):
def f(x):
return x + torch.randn(x.shape)
# default behavior should trace factory functions
traced = make_fx(f, tracing_mode=self.tracing_mode)(torch.randn(3))
self.assertTrue(
any(
node.target == aten.randn.default
for node in traced.graph.nodes
)
)
def test_make_fx_overloads(self):
def f(x):
return x.cos() + torch.randn(x.shape)
traced = make_fx(f, tracing_mode=self.tracing_mode)(torch.randn(3))
self.assertTrue(all([isinstance(node.target, torch._ops.OpOverload)
for node in traced.graph.nodes if node.op == 'call_function']))
def test_tensor_constants(self):
def f():
val = torch.tensor(float('inf'))
return torch.full((100, 100), val)
self._test(f, [])
def test_allclose(self):
def f(a, b):
return torch.allclose(a, b)
self.assertRaisesRegex(
RuntimeError, "data-dependent",
lambda: make_fx(f, tracing_mode=self.tracing_mode)(
torch.zeros(3), torch.zeros(3)
)
)
def test_constant_proxy_tensor_mut(self):
def f():
val = torch.tensor(float(1))
val.add_(2)
return torch.full((100, 100), val)
g = make_fx(f, tracing_mode=self.tracing_mode)()
self.assertEqual(g(), f())
# In case we mutated shared state in the g graph!
self.assertEqual(g(), f())
def test_constant_unbind(self):
def f():
val = torch.tensor([2])
r, = torch.unbind(val, 0)
return r.item()
g = make_fx(f, tracing_mode=self.tracing_mode)()
self.assertEqual(g(), f())
def test_decomposition_interpreter(self):
def fn(x):
return torch.nn.functional.silu(x)
x = torch.rand((4, 4))
fx_module = make_fx(fn, tracing_mode=self.tracing_mode, decomposition_table=None)(x)
found_silu = False
for n in fx_module.graph.nodes:
if n.target == torch.ops.aten.silu or n.target == torch.ops.aten.silu.default:
found_silu = True
self.assertTrue(found_silu)
new_graph = torch.fx.Graph()
silu_decomp_table = {torch.ops.aten.silu.default: decomposition_table[torch.ops.aten.silu.default]}
DecompositionInterpreter(
fx_module,
new_graph=new_graph,
decomposition_table=silu_decomp_table,
).run(x)
decomposed_module = torch.fx.GraphModule(fx_module, new_graph)
for n in decomposed_module.graph.nodes:
self.assertTrue(n.target != torch.ops.aten.silu)
self.assertTrue(n.target != torch.ops.aten.silu.default)
self.assertEqual(fx_module(x), decomposed_module(x))
def test_make_fx_model_fwd_bwd(self):
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
return self.linear(x).relu()
model = Foo()
def f(x, params):
out = stateless.functional_call(model, params, x).sum()
out.backward()
return list(params.values())
input = torch.randn(3, 5, requires_grad=True)
params = dict(model.named_parameters())
fx_f = make_fx(f, tracing_mode=self.tracing_mode)(input, params)
# fx may change the order of parameters in list, so using set() to compare
self.assertTrue(
torch.allclose(fx_f(input, params)[0], f(input, params)[0])
or
torch.allclose(fx_f(input, params)[0], f(input, params)[1])
)
self.assertTrue(
torch.allclose(fx_f(input, params)[1], f(input, params)[0])
or
torch.allclose(fx_f(input, params)[1], f(input, params)[1])
)
def test_make_fx_model_fwd_bwd_wgtupdate(self):
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
return self.linear(x).relu()
model = Foo()
def f(args, params, buffers):
if not isinstance(args, Iterable):
args = [args]
params_and_buffers = {**params, **buffers}
out = stateless.functional_call(model, params_and_buffers, args)
out.sum().backward()
return [p - 1e-4 * p.grad for p in params.values()]
input = torch.randn(3, 5, requires_grad=True)
params = dict(model.named_parameters())
buffers = dict(model.named_buffers())
fx_f = make_fx(f, tracing_mode=self.tracing_mode)(input, params, buffers)
# fx may change the order of parameters in list, so using set() to compare
# also there is a numerical difference in results so changing atol from 1e-08 to 1e-03
self.assertTrue(
torch.allclose(fx_f(input, params, buffers)[0], f(input, params, buffers)[0], atol=1e-03)
or
torch.allclose(fx_f(input, params, buffers)[0], f(input, params, buffers)[1], atol=1e-03)
)
self.assertTrue(
torch.allclose(fx_f(input, params, buffers)[1], f(input, params, buffers)[0], atol=1e-03)
or
torch.allclose(fx_f(input, params, buffers)[1], f(input, params, buffers)[1], atol=1e-03)
)
def test_trace_subclasses(self):
def f(x):
x = UnwrapTensor(x)
y = x * 2
return y
inp = [torch.randn(5)]
self._test(f, [torch.randn(5)])
class TestGenericProxyTensorReal(TestGenericProxyTensor):
tracing_mode = "real"
class TestGenericProxyTensorFake(TestGenericProxyTensor):
tracing_mode = "fake"
def xfail_inherited_tests(tests):
"""
Given a list of test names which are defined by a superclass of the
class this decorates, mark them as expected failure. This is useful
if you are doing poor man's parameterized tests by subclassing a generic
test class.
"""
def deco(cls):
for t in tests:
# NB: expectedFailure operates by mutating the method in question,
# which is why you have to copy the function first
setattr(cls, t, unittest.expectedFailure(copy_func(getattr(cls, t))))
return cls
return deco
@skipIfNoSympy
@xfail_inherited_tests([
"test_inplace_metadata",
"test_mode_tracing_factory_function",
"test_make_fx_overloads",
"test_make_fx_model_fwd_bwd_wgtupdate",
"test_make_fx_model_fwd_bwd",
"test_proxy_tensor",
"test_resnet18_backward_trace",
"test_trace_subclasses",
])
class TestGenericProxyTensorSymbolic(TestGenericProxyTensor):
tracing_mode = "symbolic"
del TestGenericProxyTensor
class TestRealProxyTensor(TestCase):
pass
class TestFakeProxyTensor(TestCase):
def test_issue82547(self):
x = nn.Parameter(torch.randn(3, 3))
def f():
return torch.ops.aten.t.default(x)
self.assertRaisesRegex(Exception, "non-Fake Tensor", lambda: make_fx(f, tracing_mode="fake")())
class A(torch.Tensor):
pass
x = A(torch.randn(3, 3))
self.assertRaisesRegex(TypeError, "no implementation found", lambda: make_fx(f, tracing_mode="fake")())
def test_use_fake_and_tensor(self):
def f(x, y):
z = torch.tensor([2.0, 3.0])
return x + y + z
g = make_fx(f, tracing_mode="fake")(torch.randn(2), torch.randn(2))
x, y = torch.randn(2), torch.randn(2)
self.assertEqual(g(x, y), f(x, y))
# TODO: Need to test the guards themselves specifically as well
@skipIfNoSympy
class TestSymbolicTracing(TestCase):
def _test_dynamic(self, fn, trace_inputs, test_inputs, assert_eq=True):
"""
Tests fn traced with trace_inputs against test_inputs
Also returns shape env
"""
trace_inputs = [torch.randn(shape) for shape in trace_inputs]
traced_f = make_fx(fn, tracing_mode="symbolic")(*trace_inputs)
for input in test_inputs:
input = [torch.randn(shape) for shape in input]
rx, ry = traced_f(*input), fn(*input)
if assert_eq:
self.assertEqual(rx, ry)
return traced_f.shape_env
def test_unary(self):
def f(x):
assert x.shape[0] < 20
return x.cos()
test_inputs = []
test_inputs.append([(2, 5)])
test_inputs.append([(6, 8)])
shape_env = self._test_dynamic(f, [(3, 4)], test_inputs)
self.assertTrue(shape_env.evaluate_guards_for_args(torch.randn(4, 5)))
self.assertFalse(shape_env.evaluate_guards_for_args(torch.randn(25, 5)))
assert len(shape_env.guards) == 1
def test_binary_broadcast(self):
def f(a, b):
c = a * b
return c
test_inputs = []
test_inputs.append([(1, 5), (3, 1)])
test_inputs.append([(1, 4), (4, 1)])
shape_env = self._test_dynamic(f, [(1, 2), (3, 1)], test_inputs)
assert len(shape_env.guards) == 0
def test_multiply_shape(self):
def f(a):
return torch.empty(a.shape[0] * 2)
r = str(make_fx(f, tracing_mode="symbolic")(torch.empty(4)).code).strip()
self.assertExpectedInline(r, """\
def forward(self, a_1):
size = a_1.size(0); a_1 = None
mul = size * 2; size = None
empty = torch.ops.aten.empty.SymInt([mul], device = device(type='cpu'), pin_memory = False); mul = None
size_1 = empty.size(0)
return empty""")
def test_cat(self):
def f(a, b):
val = torch.mul(a, b)
out = torch.cat([val, val])
if out.shape[0] * out.shape[1] > 20:
out = out.cos()
return out
test_inputs = []
test_inputs.append([(1, 5), (6, 1)])
test_inputs.append([(1, 4), (3, 1)])
shape_env = self._test_dynamic(f, [(1, 6), (8, 1)], test_inputs)
self.assertTrue(shape_env.evaluate_guards_for_args(torch.randn(1, 10), torch.randn(6, 1)))
self.assertFalse(shape_env.evaluate_guards_for_args(torch.randn(1, 2), torch.randn(4, 1)))
assert len(shape_env.guards) == 1
def test_new_empty(self):
def f(a, b):
return a.new_empty(b.shape[0], b.shape[1] * 2)
self._test_dynamic(f, [(2, 4), (4, 5)], [[(2, 3), (5, 7)], [(3, 7), (9, 3)]], assert_eq=False)
def test_expand(self):
def f(a):
b = torch.mul(a, a)
c = b.expand(a.shape)
return c
self._test_dynamic(f, [(3,)], [[(3,)], [(4,)], [(2,)]])
self._test_dynamic(f, [(5, 1)], [[(4, 1)], [(3, 1)], [(6, 1)]])
make_fx_failures = {
# unknown
xfail('allclose'),
xfail('equal'),
xfail('linalg.eigvals'),
xfail('nn.functional.max_pool1d', device_type='cpu'),
# empty
skip('new_empty'),
skip('empty_like'),
skip('empty'),
# flaky
skip('linalg.lstsq', 'grad_oriented'),
skip('nn.functional.max_unpool1d', '', device_type='cpu'),
skip('nn.functional.max_unpool2d', '', device_type='cpu'),
skip('nn.functional.max_unpool3d', '', device_type='cpu'),
skip('linalg.lstsq'), # flaky, probably just a precision issue
# data-dependent control flow
xfail('cov'),
xfail('istft'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('tensor_split'),
xfail('corrcoef'),
xfail('quantile'),
xfail('nanquantile'),
# Seems like it's creating a sparse tensor that isn't captured by tensor.is_sparse
xfail('sparse.sampled_addmm'),
# ???
xfail('nn.functional.ctc_loss'),
# proxy tensor doesn't support sparse correctly right now
skip('to_sparse'),
# segfaults
skip('block_diag'),
}
fake_tensor_failures = {
# FakeTensor fallback doesn't work
xfail('segment_reduce', 'lengths'),
xfail('multinomial'),
xfail('mvlgamma', 'mvlgamma_p_1'),
xfail('mvlgamma', 'mvlgamma_p_3'),
xfail('mvlgamma', 'mvlgamma_p_5'),
xfail('cholesky'),
xfail('cholesky_inverse'),
# ASAN failures due to divide by 0
skip('nn.functional.nll_loss'),
}
symbolic_tensor_failures = {
# Needs complex-value support
xfail('polar'),
xfail('complex'),
xfail('linalg.eig'),
xfail('__getitem__', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('__rmatmul__', ''), # aten.new_empty.default - couldn't find symbolic meta function/decomposition
xfail('__rpow__', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('_masked.amax', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('_masked.amin', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('_masked.argmax', ''), # aten.argmax.default - couldn't find symbolic meta function/decomposition
xfail('_masked.argmin', ''), # aten.argmin.default - couldn't find symbolic meta function/decomposition
xfail('_masked.cumprod', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('_masked.cumsum', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('_masked.log_softmax', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('_masked.logaddexp', ''), # aten.logaddexp.default - couldn't find symbolic meta function/decomposition
xfail('_masked.logsumexp', ''), # Tensors of type TensorImpl do not have numel
xfail('_masked.mean', ''), # ones() received an invalid combination of arguments - got (torch.Size, device=torch.device, ...
xfail('_masked.median', ''), # aten.nanmedian.dim - couldn't find symbolic meta function/decomposition
xfail('_masked.norm', ''), # aten.linalg_vector_norm.default - couldn't find symbolic meta function/decomposition
xfail('_masked.normalize', ''), # aten.linalg_vector_norm.default - couldn't find symbolic meta function/decomposition
xfail('_masked.prod', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('_masked.softmax', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('_masked.softmin', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('_masked.std', ''), # ones() received an invalid combination of arguments - got (torch.Size, device=torch.device, d...
xfail('_masked.sum', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('_masked.var', ''), # ones() received an invalid combination of arguments - got (torch.Size, device=torch.device, d...
xfail('addbmm', ''), # aten.addbmm.default - couldn't find symbolic meta function/decomposition
xfail('addmm', ''), # aten.mm.default - couldn't find symbolic meta function/decomposition
xfail('addmm', 'decomposed'), # aten.mm.default - couldn't find symbolic meta function/decomposition
xfail('addmv', ''), # aten.addmv.default - couldn't find symbolic meta function/decomposition
xfail('addr', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('all', ''), # Unexpected type <class 'torch.SymIntNode'> when computing elementwise type promotion!
xfail('aminmax', ''), # aten.aminmax.default - couldn't find symbolic meta function/decomposition
xfail('argmax', ''), # aten.argmax.default - couldn't find symbolic meta function/decomposition
xfail('argmin', ''), # aten.argmin.default - couldn't find symbolic meta function/decomposition
xfail('argsort', ''), # aten.sort.default - couldn't find symbolic meta function/decomposition
xfail('argwhere', ''), # aten.nonzero.default - couldn't find symbolic meta function/decomposition
xfail('as_strided', ''), # aten.as_strided.default - couldn't find symbolic meta function/decomposition
xfail('as_strided_scatter', ''), # aten.as_strided_scatter.default - couldn't find symbolic meta function/decomposition
xfail('baddbmm', ''), # aten.baddbmm.default - couldn't find symbolic meta function/decomposition
xfail('bernoulli', ''), # aten.bernoulli.default - couldn't find symbolic meta function/decomposition
xfail('bfloat16', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('bmm', ''), # aten.bmm.default - couldn't find symbolic meta function/decomposition
xfail('bool', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('broadcast_tensors', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('bucketize', ''), # aten.bucketize.Tensor - couldn't find symbolic meta function/decomposition
xfail('byte', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('cartesian_prod', ''), # Tensors of type TensorImpl do not have numel
xfail('cdist', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('chalf', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('char', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('cholesky_solve', ''), # Could not run 'aten::_cholesky_solve_helper' with arguments from the 'Meta' back...
xfail('chunk', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('clamp_max', ''), # Received type <class 'NoneType'> that is neither a tensor or a number!
xfail('clone', ''), # aten.clone.default - couldn't find symbolic meta function/decomposition
xfail('column_stack', ''), # Tensors of type TensorImpl do not have numel
xfail('constant_pad_nd', ''), # aten.fill.Scalar - couldn't find symbolic meta function/decomposition
xfail('count_nonzero', ''), # Could not run 'aten::count_nonzero.dim_IntList' with arguments from the 'Meta' ba...
xfail('cross', ''), # aten.linalg_cross.default - couldn't find symbolic meta function/decomposition
xfail('cummax', ''), # aten.cummax.default - couldn't find symbolic meta function/decomposition
xfail('cummin', ''), # aten.cummin.default - couldn't find symbolic meta function/decomposition
xfail('cumprod', ''), # aten.cumprod.default - couldn't find symbolic meta function/decomposition
xfail('cumsum', ''), # aten.cumsum.default - couldn't find symbolic meta function/decomposition
xfail('cumulative_trapezoid', ''), # aten.slice.Tensor - couldn't find symbolic meta function/decomposition
xfail('deg2rad', ''), # aten.deg2rad.default - couldn't find symbolic meta function/decomposition
xfail('diag_embed', ''), # aten.diag_embed.default - couldn't find symbolic meta function/decomposition
xfail('diagflat', ''), # Tensors of type TensorImpl do not have numel
xfail('diagonal', ''), # aten.diagonal.default - couldn't find symbolic meta function/decomposition
xfail('diagonal_scatter', ''), # aten.diagonal_scatter.default - couldn't find symbolic meta function/decomposition
xfail('diff', ''), # aten.empty_like.default - couldn't find symbolic meta function/decomposition
xfail('dist', ''), # aten.dist.default - couldn't find symbolic meta function/decomposition
xfail('double', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('dsplit', ''), # aten.slice.Tensor - couldn't find symbolic meta function/decomposition
xfail('eig', ''), # aten.eig.default - couldn't find symbolic meta function/decomposition
xfail('einsum', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('expand_as', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('fft.fft2', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('fft.fft', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('fft.fftn', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('fft.fftshift', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('fft.hfft2', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('fft.hfft', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('fft.hfftn', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('fft.ifft2', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('fft.ifft', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('fft.ifftn', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('fft.ifftshift', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('fft.ihfft2', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('fft.ihfft', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('fft.ihfftn', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('fft.irfft2', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('fft.irfft', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('fft.irfftn', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('fft.rfft2', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('fft.rfft', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('fft.rfftn', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('fill', ''), # The underlying op of 'aten.stride' has no overload name '_schema'
xfail('flatten', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('unflatten', ''), # RuntimeError: Trying to call aten.size on a tensor with symbolic shapes...
xfail('float', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('float_power', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('frexp', ''), # aten.frexp.Tensor - couldn't find symbolic meta function/decomposition
xfail('full_like', ''), # aten.full_like.default - couldn't find symbolic meta function/decomposition
xfail('gather', ''), # aten.gather.default - couldn't find symbolic meta function/decomposition
xfail('geqrf', ''), # aten.geqrf.default - couldn't find symbolic meta function/decomposition
xfail('gradient', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('half', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('histc', ''), # Could not run 'aten::histc' with arguments from the 'Meta' backend. This could be because...
xfail('histogram', ''), # Could not run 'aten::histogram.bin_ct' with arguments from the 'Meta' backend. This c...
xfail('histogramdd', ''), # aten._histogramdd_bin_edges.default - couldn't find symbolic meta function/decomposition
xfail('hsplit', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('i0', ''), # aten.i0.default - couldn't find symbolic meta function/decomposition
xfail('index_add', ''), # Float
xfail('index_copy', ''), # Expected a long tensor for index, but got Float
xfail('index_fill', ''), # aten.index_fill.int_Scalar - couldn't find symbolic meta function/decomposition
xfail('index_put', ''), # aten.index_put.default - couldn't find symbolic meta function/decomposition
xfail('index_reduce', ''), # Float
xfail('inner', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('int', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('inverse', ''), # Tensors of type TensorImpl do not have numel
xfail('isclose', ''), # The underlying op of 'aten.stride' has no overload name '_schema'
xfail('isin', ''), # aten.isin.Tensor_Tensor - couldn't find symbolic meta function/decomposition
xfail('isreal', ''), # aten.empty_like.default - couldn't find symbolic meta function/decomposition
xfail('kron', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('kthvalue', ''), # aten.kthvalue.default - couldn't find symbolic meta function/decomposition
xfail('lerp', ''), # aten.lerp.Scalar - couldn't find symbolic meta function/decomposition
xfail('linalg.cholesky', ''), # aten.linalg_cholesky_ex.default - couldn't find symbolic meta function/decomposition
xfail('linalg.cholesky_ex', ''), # aten.linalg_cholesky_ex.default - couldn't find symbolic meta function/decomposition
xfail('linalg.cond', ''), # Tensors of type TensorImpl do not have numel
xfail('linalg.cross', ''), # aten.linalg_cross.default - couldn't find symbolic meta function/decomposition
xfail('linalg.det', ''), # aten._linalg_det.default - couldn't find symbolic meta function/decomposition
xfail('linalg.eigh', ''), # aten._linalg_eigh.default - couldn't find symbolic meta function/decomposition
xfail('linalg.eigvalsh', ''), # aten._linalg_eigh.default - couldn't find symbolic meta function/decomposition
xfail('linalg.householder_product', ''), # aten.linalg_householder_product.default - couldn't find symbolic meta funct...
xfail('linalg.inv', ''), # aten.linalg_inv_ex.default - couldn't find symbolic meta function/decomposition
xfail('linalg.inv_ex', ''), # aten.linalg_inv_ex.default - couldn't find symbolic meta function/decomposition
xfail('linalg.ldl_factor', ''), # aten.linalg_ldl_factor_ex.default - couldn't find symbolic meta function/decomposition
xfail('linalg.ldl_factor_ex', ''), # aten.linalg_ldl_factor_ex.default - couldn't find symbolic meta function/decompos...
xfail('linalg.ldl_solve', ''), # aten.linalg_ldl_solve.default - couldn't find symbolic meta function/decomposition
xfail('linalg.lu', ''), # aten.linalg_lu.default - couldn't find symbolic meta function/decomposition
xfail('linalg.lu_factor', ''), # aten.linalg_lu_factor_ex.default - couldn't find symbolic meta function/decomposition
xfail('linalg.lu_factor_ex', ''), # aten.linalg_lu_factor_ex.default - couldn't find symbolic meta function/decomposition
xfail('linalg.lu_solve', ''), # aten.linalg_lu_solve.default - couldn't find symbolic meta function/decomposition
xfail('linalg.matrix_power'), # RuntimeError: Trying to call aten.size on a tensor with symbolic shape
xfail('linalg.matrix_norm', ''), # aten.linalg_vector_norm.default - couldn't find symbolic meta function/decomposition
xfail('linalg.matrix_rank', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('linalg.matrix_rank', 'hermitian'), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('linalg.multi_dot', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('linalg.norm', ''), # TensorImpl do not have numel
xfail('linalg.norm', 'subgradients_at_zero'), # TensorImpl do not have numel
xfail('linalg.pinv', ''), # aten.linalg_pinv.atol_rtol_tensor - couldn't find symbolic meta function/decomposition
xfail('linalg.pinv', 'singular'), # aten.linalg_cholesky_ex.default - couldn't find symbolic meta function/decomposition
xfail('linalg.pinv', 'hermitian'), # aten.linalg_pinv.atol_rtol_tensor - couldn't find symbolic meta function/decompo...
xfail('linalg.qr', ''), # aten.linalg_qr.default - couldn't find symbolic meta function/decomposition
xfail('linalg.slogdet', ''), # aten._linalg_slogdet.default - couldn't find symbolic meta function/decomposition
xfail('linalg.solve', ''), # aten._linalg_solve_ex.default - couldn't find symbolic meta function/decomposition
xfail('linalg.solve_ex', ''), # aten._linalg_solve_ex.default - couldn't find symbolic meta function/decomposition
xfail('linalg.solve_triangular', ''), # aten.linalg_solve_triangular.default - couldn't find symbolic meta function/de...
xfail('linalg.svd', ''), # aten._linalg_svd.default - couldn't find symbolic meta function/decomposition
xfail('linalg.svdvals', ''), # aten._linalg_svd.default - couldn't find symbolic meta function/decomposition
xfail('linalg.tensorinv', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('linalg.tensorsolve', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('linalg.vander', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('linalg.vecdot', ''), # Could not run 'aten::vdot' with arguments from the 'Meta' backend. This could be ...
xfail('linalg.vector_norm', ''), # TensorImpl do not have numel
xfail('log_softmax', 'dtype'), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('logaddexp2', ''), # aten.logaddexp2.default - couldn't find symbolic meta function/decomposition
xfail('logaddexp', ''), # aten.logaddexp.default - couldn't find symbolic meta function/decomposition
xfail('logcumsumexp', ''), # aten.logcumsumexp.default - couldn't find symbolic meta function/decomposition
xfail('logdet', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('long', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('lu', ''), # aten.linalg_lu_factor_ex.default - couldn't find symbolic meta function/decomposition
xfail('lu_solve', ''), # aten.linalg_lu_solve.default - couldn't find symbolic meta function/decomposition
xfail('lu_unpack', ''), # aten.lu_unpack.default - couldn't find symbolic meta function/decomposition
xfail('masked_fill', ''), # expected predicate to be bool, got torch.float32
xfail('masked_scatter', ''), # aten.masked_scatter.default - couldn't find symbolic meta function/decomposition
xfail('masked_select', ''), # aten.masked_select.default - couldn't find symbolic meta function/decomposition
xfail('matmul', ''), # aten.new_empty.default - couldn't find symbolic meta function/decomposition
xfail('matrix_exp', ''), # aten.linalg_matrix_exp.default - couldn't find symbolic meta function/decomposition
xfail('max', 'reduction_with_dim'), # aten.max.dim - couldn't find symbolic meta function/decomposition
xfail('mean', ''), # Unexpected type <class 'torch.SymIntNode'> when computing elementwise type promotion!
xfail('median', ''), # Could not run 'aten::median' with arguments from the 'Meta' backend. This could be becau...
xfail('meshgrid', 'list_of_tensors'), # Tensors of type TensorImpl do not have numel
xfail('meshgrid', 'variadic_tensors'), # Tensors of type TensorImpl do not have numel
xfail('min', 'reduction_with_dim'), # aten.min.dim - couldn't find symbolic meta function/decomposition
xfail('mm', ''), # aten.mm.default - couldn't find symbolic meta function/decomposition
xfail('mode', ''), # aten.mode.default - couldn't find symbolic meta function/decomposition
xfail('msort', ''), # aten.sort.default - couldn't find symbolic meta function/decomposition
xfail('mv', ''), # aten.mv.default - couldn't find symbolic meta function/decomposition
xfail('nanmean', ''), # The underlying op of 'aten.stride' has no overload name '_schema'
xfail('nanquantile', ''), # Could not run 'aten::equal' with arguments from the 'Meta' backend.
xfail('narrow', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('native_layer_norm', ''), # Unexpected type <class 'torch.SymIntNode'> when computing elementwise type promot...
xfail('nn.functional.adaptive_avg_pool1d', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.adaptive_avg_pool2d', ''), # argument 'size' must be tuple of ints, but found element o...
xfail('nn.functional.adaptive_avg_pool3d', ''), # aten._adaptive_avg_pool3d.default - couldn't find symbolic meta func...
xfail('nn.functional.adaptive_max_pool1d', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.adaptive_max_pool2d', ''), # aten.adaptive_max_pool2d.default - couldn't find symbolic meta funct...
xfail('nn.functional.adaptive_max_pool3d', ''), # argument 'output_size' (position 2) must be tupl...
xfail('nn.functional.avg_pool1d', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.avg_pool2d', ''), # aten.avg_pool2d.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.avg_pool3d', ''), # aten.avg_pool3d.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.batch_norm', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.bilinear', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.binary_cross_entropy', ''), # aten.new_empty.default - couldn't find symbolic meta function/decom...
xfail('nn.functional.binary_cross_entropy_with_logits', ''), # aten.binary_cross_entropy_with_logits.default - couldn'...
xfail('nn.functional.conv1d', ''), # aten.convolution.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.conv2d', ''), # aten.convolution.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.conv_transpose1d', ''), # aten.convolution.default - couldn't find symbolic meta function/decompo...
xfail('nn.functional.conv_transpose2d', ''), # aten.convolution.default - couldn't find symbolic meta function/decompo...
xfail('nn.functional.conv_transpose3d', ''), # aten.convolution.default - couldn't find symbolic meta function/decompo...
xfail('nn.functional.cosine_embedding_loss', ''), # The underlying op of 'aten.stride' has no overload name '_schema'
xfail('nn.functional.cosine_similarity', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.cross_entropy', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.dropout2d', ''), # Tensors of type TensorImpl do not have numel
xfail('nn.functional.dropout3d', ''), # Tensors of type TensorImpl do not have numel
xfail('nn.functional.dropout', ''), # Tensors of type TensorImpl do not have numel
xfail('nn.functional.embedding_bag', ''), # aten._embedding_bag_forward_only.default - couldn't find symbolic meta fun...
xfail('nn.functional.embedding', ''), # argument 'size' must be tuple of ints, but found element of type tor...
xfail('nn.functional.feature_alpha_dropout', 'with_train'), # Tensors of type TensorImpl do not have numel
xfail('nn.functional.fractional_max_pool2d', ''), # argument 'size' must be tuple of ints, but found element of t...
xfail('nn.functional.fractional_max_pool3d', ''), # argument 'size' must be tuple of ints, but found element of t...
xfail('nn.functional.glu', ''), # aten.glu.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.grid_sample', ''), # aten.grid_sampler_2d.default - couldn't find symbolic meta function/decompos...
xfail('nn.functional.group_norm', ''), # 'torch._C.SymIntNode' and 'int'
xfail('nn.functional.hardsigmoid', ''), # Received type <class 'NoneType'> that is neither a tensor or a number!
xfail('nn.functional.hardswish', ''), # Received type <class 'NoneType'> that is neither a tensor or a number!
xfail('nn.functional.hinge_embedding_loss', ''), # aten.empty_like.default - couldn't find symbolic meta function/deco...
xfail('nn.functional.huber_loss', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.instance_norm', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.interpolate', 'area'), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.interpolate', 'bicubic'), # aten.upsample_bicubic2d.vec - couldn't find symbolic meta function/d...
xfail('nn.functional.interpolate', 'bilinear'), # aten.upsample_bilinear2d.vec - couldn't find symbolic meta function...
xfail('nn.functional.interpolate', 'linear'), # aten.upsample_linear1d.vec - couldn't find symbolic meta function/dec...
xfail('nn.functional.interpolate', 'nearest'), # aten.upsample_nearest1d.vec - couldn't find symbolic meta function/d...
xfail('nn.functional.interpolate', 'trilinear'), # aten.upsample_trilinear3d.vec - couldn't find symbolic meta functi...
xfail('nn.functional.kl_div', ''), # Unexpected type <class 'torch.SymIntNode'> when computing elementwise type pro...
xfail('nn.functional.l1_loss', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.layer_norm', ''), # Unexpected type <class 'torch.SymIntNode'> when computing elementwise type...
xfail('nn.functional.linear', ''), # aten.mv.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.local_response_norm', ''), # Tensors of type TensorImpl do not have numel
xfail('nn.functional.margin_ranking_loss', ''), # The underlying op of 'aten.stride' has no overload name '_schema'
xfail('nn.functional.max_pool2d', ''), # aten.max_pool2d_with_indices.default - couldn't find symbolic meta function/d...
xfail('nn.functional.max_pool3d', ''), # aten.max_pool3d_with_indices.default - couldn't find symbolic meta function/d...
xfail('nn.functional.max_unpool1d', 'grad'), # aten.max_unpool2d.default - couldn't find symbolic meta function/decom...
xfail('nn.functional.max_unpool2d', 'grad'), # aten.max_unpool2d.default - couldn't find symbolic meta function/decom...
xfail('nn.functional.max_unpool3d', 'grad'), # aten.max_unpool3d.default - couldn't find symbolic meta function/decom...
xfail('nn.functional.mse_loss', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.multi_margin_loss', ''), # Could not run 'aten::multi_margin_loss' with arguments from the...
xfail('nn.functional.multilabel_margin_loss', ''), # Could not run 'aten::multilabel_margin_loss_forward' with ...
xfail('nn.functional.multilabel_soft_margin_loss', ''), # aten.new_empty.default - couldn't find symbolic meta functio...
xfail('nn.functional.normalize', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.pad', 'circular'), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.pad', 'constant'), # aten.fill.Scalar - couldn't find symbolic meta function/decomposition
xfail('nn.functional.pad', 'reflect'), # aten.reflection_pad1d.default - couldn't find symbolic meta function/decompo...
xfail('nn.functional.pad', 'replicate'), # aten.replication_pad1d.default - couldn't find symbolic meta function/deco...
xfail('nn.functional.pdist', ''), # Could not run 'aten::_pdist_forward' with arguments from the 'Meta' backend...
xfail('nn.functional.pixel_shuffle', ''), # aten.pixel_shuffle.default - couldn't find symbolic meta function/decompos...
xfail('nn.functional.pixel_unshuffle', ''), # aten.pixel_unshuffle.default - couldn't find symbolic meta function/deco...
xfail('nn.functional.poisson_nll_loss', ''), # The underlying op of 'aten.stride' has no overload name '_schema'
xfail('nn.functional.rrelu', ''), # aten.empty_like.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.smooth_l1_loss', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.soft_margin_loss', ''), # aten.soft_margin_loss.default - couldn't find symbolic meta function/de...
xfail('nn.functional.softmin', 'with_dtype'), # aten._to_copy.default - couldn't find symbolic meta function/decompos...
xfail('nn.functional.triplet_margin_loss', ''), # Unexpected type <class 'torch.SymIntNode'> when computing element...
xfail('nn.functional.triplet_margin_with_distance_loss', ''), # Unexpected type <class 'torch.SymIntNode'> when com...
xfail('nn.functional.unfold', ''), # aten.im2col.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.upsample_bilinear', ''), # aten.upsample_bilinear2d.vec - couldn't find symbolic meta function/de...
xfail('nn.functional.upsample_nearest', ''), # aten.upsample_nearest1d.vec - couldn't find symbolic meta function/deco...
xfail('norm', ''), # TensorImpl does not have numel
xfail('norm', 'nuc'), # aten._linalg_svd.default - couldn't find symbolic meta function/decomposition
xfail('normal', ''), # aten.normal.Tensor_Tensor - couldn't find symbolic meta function/decomposition
xfail('normal', 'number_mean'), # aten.normal.float_Tensor - couldn't find symbolic meta function/decomposition
xfail('ones_like', ''), # aten.ones_like.default - couldn't find symbolic meta function/decomposition
xfail('ormqr', ''), # aten.ormqr.default - couldn't find symbolic meta function/decomposition
xfail('outer', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('pca_lowrank', ''), # aten.mm.default - couldn't find symbolic meta function/decomposition
xfail('pinverse', ''), # aten.linalg_pinv.atol_rtol_tensor - couldn't find symbolic meta function/decomposition
xfail('polygamma', 'polygamma_n_0'), # aten.polygamma.default - couldn't find symbolic meta function/decomposition
xfail('polygamma', 'polygamma_n_1'), # aten.polygamma.default - couldn't find symbolic meta function/decomposition
xfail('polygamma', 'polygamma_n_2'), # aten.polygamma.default - couldn't find symbolic meta function/decomposition
xfail('polygamma', 'polygamma_n_3'), # aten.polygamma.default - couldn't find symbolic meta function/decomposition
xfail('polygamma', 'polygamma_n_4'), # aten.polygamma.default - couldn't find symbolic meta function/decomposition
xfail('put', ''), # aten.clone.default - couldn't find symbolic meta function/decomposition
xfail('quantile', ''), # Could not run 'aten::equal' with arguments from the 'Meta' backend.
xfail('qr', ''), # aten.linalg_qr.default - couldn't find symbolic meta function/decomposition
xfail('rad2deg', ''), # aten.rad2deg.default - couldn't find symbolic meta function/decomposition
xfail('rand_like', ''), # aten.randn_like.default - couldn't find symbolic meta function/decomposition
xfail('randint_like', ''), # aten.randint_like.default - couldn't find symbolic meta function/decomposition
xfail('randn_like', ''), # aten.randn_like.default - couldn't find symbolic meta function/decomposition
xfail('ravel', ''), # Tensors of type TensorImpl do not have numel
xfail('renorm', ''), # aten.renorm.default - couldn't find symbolic meta function/decomposition
xfail('repeat', ''), # aten.repeat.default - couldn't find symbolic meta function/decomposition
xfail('reshape_as', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('reshape', ''), # Tensors of type TensorImpl do not have numel
xfail('resize_', ''), # aten.clone.default - couldn't find symbolic meta function/decomposition
xfail('resize_as_', ''), # aten.clone.default - couldn't find symbolic meta function/decomposition
xfail('roll', ''), # Tensors of type TensorImpl do not have numel
xfail('rot90', ''), # aten.empty_like.default - couldn't find symbolic meta function/decomposition
xfail('round', ''), # aten.round.default - couldn't find symbolic meta function/decomposition
xfail('round', 'decimals_0'), # aten.round.decimals - couldn't find symbolic meta function/decomposition
xfail('round', 'decimals_3'), # aten.round.decimals - couldn't find symbolic meta function/decomposition
xfail('round', 'decimals_neg_3'), # aten.round.decimals - couldn't find symbolic meta function/decomposition
xfail('scatter_add', ''), # aten.scatter_add.default - couldn't find symbolic meta function/decomposition
xfail('scatter', ''), # aten.scatter.src - couldn't find symbolic meta function/decomposition
xfail('scatter_reduce', 'amax'), # aten.scatter_reduce.two - couldn't find symbolic meta function/decomposition
xfail('scatter_reduce', 'amin'), # aten.scatter_reduce.two - couldn't find symbolic meta function/decomposition
xfail('scatter_reduce', 'mean'), # aten.scatter_reduce.two - couldn't find symbolic meta function/decomposition
xfail('scatter_reduce', 'prod'), # aten.scatter_reduce.two - couldn't find symbolic meta function/decomposition
xfail('scatter_reduce', 'sum'), # aten.scatter_reduce.two - couldn't find symbolic meta function/decomposition
xfail('searchsorted', ''), # Could not run 'aten::searchsorted.Tensor' with arguments from the 'Meta' backend. ...
xfail('segment_reduce', 'offsets'), # aten.segment_reduce.default - couldn't find symbolic meta function/decomposition
xfail('select', ''), # aten.select.int - couldn't find symbolic meta function/decomposition
xfail('select_scatter', ''), # aten.select_scatter.default - couldn't find symbolic meta function/decomposition
xfail('sgn', ''), # aten.sgn.default - couldn't find symbolic meta function/decomposition
xfail('short', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('sinc', ''), # aten.sinc.default - couldn't find symbolic meta function/decomposition
xfail('slice_scatter', ''), # aten.slice_scatter.default - couldn't find symbolic meta function/decomposition
xfail('softmax', 'with_dtype'), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
xfail('sort', ''), # aten.sort.default - couldn't find symbolic meta function/decomposition
xfail('special.airy_ai', ''), # aten.special_airy_ai.default - couldn't find symbolic meta function/decomposition
xfail('special.bessel_j0', ''), # aten.special_bessel_j0.default - couldn't find symbolic meta function/decomposition
xfail('special.bessel_j1', ''), # aten.special_bessel_j1.default - couldn't find symbolic meta function/decomposition
xfail('special.bessel_y0', ''), # aten.special_bessel_y0.default - couldn't find symbolic meta function/decomposition
xfail('special.bessel_y1', ''), # aten.special_bessel_y1.default - couldn't find symbolic meta function/decomposition
xfail('special.chebyshev_polynomial_t', ''), # aten.special_chebyshev_polynomial_t.default - couldn't find symbolic me...
xfail('special.chebyshev_polynomial_u', ''), # aten.special_chebyshev_polynomial_u.default - couldn't find symbolic me...
xfail('special.entr', ''), # aten.special_entr.default - couldn't find symbolic meta function/decomposition
xfail('special.erfcx', ''), # aten.special_erfcx.default - couldn't find symbolic meta function/decomposition
xfail('special.hermite_polynomial_h', ''), # aten.special_hermite_polynomial_h.default - couldn't find symbolic meta f...
xfail('special.hermite_polynomial_he', ''), # aten.special_hermite_polynomial_he.default - couldn't find symbolic meta...
xfail('special.laguerre_polynomial_l', ''), # aten.special_laguerre_polynomial_l.default - couldn't find symbolic meta...
xfail('special.log_ndtr', ''), # aten.special_log_ndtr.default - couldn't find symbolic meta function/decomposition
xfail('special.modified_bessel_i0', ''), # aten.special_modified_bessel_i0.default - couldn't find symbolic meta funct...
xfail('special.modified_bessel_i1', ''), # aten.special_modified_bessel_i1.default - couldn't find symbolic meta funct...
xfail('special.modified_bessel_k0', ''), # aten.special_modified_bessel_k0.default - couldn't find symbolic meta funct...
xfail('special.modified_bessel_k1', ''), # aten.special_modified_bessel_k1.default - couldn't find symbolic meta funct...
xfail('special.ndtri', ''), # aten.special_ndtri.default - couldn't find symbolic meta function/decomposition
xfail('special.polygamma', 'special_polygamma_n_0'), # aten.polygamma.default - couldn't find symbolic meta function/...
xfail('special.scaled_modified_bessel_k0', ''), # aten.special_scaled_modified_bessel_k0.default - couldn't find symbo...
xfail('special.scaled_modified_bessel_k1', ''), # aten.special_scaled_modified_bessel_k1.default - couldn't find symbo...
xfail('special.spherical_bessel_j0', ''), # aten.special_spherical_bessel_j0.default - couldn't find symbolic meta fun...
xfail('special.xlog1py', ''), # aten.special_xlog1py.default - couldn't find symbolic meta function/decomposition
xfail('split', ''), # 'torch._C.SymIntNode' and 'int'
xfail('split', 'list_args'), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('split_with_sizes', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('stack', ''), # argument 'size' must be tuple of ints, but found element of type torch._C.SymIntNode a...
xfail('std', ''), # Unexpected type <class 'torch.SymIntNode'> when computing elementwise type promotion!
xfail('std_mean', ''), # Unexpected type <class 'torch.SymIntNode'> when computing elementwise type promotion!
xfail('stft', ''), # argument 'size' must be tuple of ints, but found element of type torch._C.SymIntNode at...
xfail('sum_to_size', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('svd', ''), # aten._linalg_svd.default - couldn't find symbolic meta function/decomposition
xfail('svd_lowrank', ''), # aten.mm.default - couldn't find symbolic meta function/decomposition
xfail('symeig', ''), # aten.symeig.default - couldn't find symbolic meta function/decomposition
xfail('take_along_dim', ''), # dtype of indices should be Long but got Float
xfail('take', ''), # aten.take.default - couldn't find symbolic meta function/decomposition
xfail('tensordot', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('tile', ''), # aten.repeat.default - couldn't find symbolic meta function/decomposition
xfail('topk', ''), # aten.topk.default - couldn't find symbolic meta function/decomposition
xfail('trapz', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('trapezoid', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('triangular_solve', ''), # aten.triangular_solve.default - couldn't find symbolic meta function/decomposition
xfail('tril', ''), # aten.tril.default - couldn't find symbolic meta function/decomposition
xfail('triu', ''), # aten.triu.default - couldn't find symbolic meta function/decomposition
xfail('unfold', ''), # aten.unfold.default - couldn't find symbolic meta function/decomposition
xfail('var_mean', ''), # Unexpected type <class 'torch.SymIntNode'> when computing elementwise type promotion!
xfail('var', ''), # Unexpected type <class 'torch.SymIntNode'> when computing elementwise type promotion!
xfail('vdot', ''), # aten.vdot.default - couldn't find symbolic meta function/decomposition
xfail('view_as_complex', ''), # aten.view_as_complex.default - couldn't find symbolic meta function/decomposition
xfail('view_as', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('view', ''), # Tensors of type TensorImpl do not have numel
xfail('vsplit', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('where', ''), # expected predicate to be bool, got torch.float32
xfail('zero_', ''), # aten.clone.default - couldn't find symbolic meta function/decomposition
xfail('zeros_like', ''), # aten.zeros_like.default - couldn't find symbolic meta function/decomposition
xfail('unbind', ''), # aten.unbind.int - couldn't find symbolic meta function/decomposition
}
def _test_make_fx_helper(self, device, dtype, op, tracing_mode):
def f(args, kwargs):
return op.op(*args, **kwargs)
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
new_f = None
for sample_input in sample_inputs_itr:
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
try:
new_f = make_fx(f, tracing_mode=tracing_mode)(args, kwargs)
except DynamicOutputShapeException as e:
self.skipTest("Dynamic output shape operation in trace")
for arg in args:
if isinstance(arg, torch.Tensor) and arg.dtype == torch.float:
arg.uniform_(0, 1)
try:
old_out = f(args, kwargs)
except Exception:
continue
new_out = wrapper_set_seed(new_f, args, kwargs)
self.assertEqual(new_out, old_out)
class TestProxyTensorOpInfo(TestCase):
@ops(op_db, allowed_dtypes=(torch.float,))
@skipOps('TestProxyTensorOpInfo', 'test_make_fx_exhaustive', make_fx_failures)
def test_make_fx_exhaustive(self, device, dtype, op):
_test_make_fx_helper(self, device, dtype, op, "real")
@ops(op_db, allowed_dtypes=(torch.float,))
@skipOps('TestProxyTensorOpInfo', 'test_make_fx_fake_exhaustive', make_fx_failures.union(fake_tensor_failures))
def test_make_fx_fake_exhaustive(self, device, dtype, op):
_test_make_fx_helper(self, device, dtype, op, "fake")
@skipIfNoSympy
@ops(op_db, allowed_dtypes=(torch.float,))
@skipOps('TestProxyTensorOpInfo', 'test_make_fx_symbolic_exhaustive',
make_fx_failures | fake_tensor_failures | symbolic_tensor_failures)
def test_make_fx_symbolic_exhaustive(self, device, dtype, op):
_test_make_fx_helper(self, device, dtype, op, "symbolic")
only_for = ("cpu")
instantiate_device_type_tests(TestProxyTensorOpInfo, globals(), only_for=only_for)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_proxy_tensor.py |
# Owner(s): ["module: hub"]
import unittest
from unittest.mock import patch
import os
import tempfile
import warnings
import torch
import torch.hub as hub
from torch.testing._internal.common_utils import retry, IS_SANDCASTLE, TestCase
def sum_of_state_dict(state_dict):
s = 0
for _, v in state_dict.items():
s += v.sum()
return s
SUM_OF_HUB_EXAMPLE = 431080
TORCHHUB_EXAMPLE_RELEASE_URL = 'https://github.com/ailzhang/torchhub_example/releases/download/0.1/mnist_init_ones'
@unittest.skipIf(IS_SANDCASTLE, 'Sandcastle cannot ping external')
class TestHub(TestCase):
def setUp(self):
super().setUp()
self.previous_hub_dir = torch.hub.get_dir()
self.tmpdir = tempfile.TemporaryDirectory('hub_dir')
torch.hub.set_dir(self.tmpdir.name)
self.trusted_list_path = os.path.join(torch.hub.get_dir(), "trusted_list")
def tearDown(self):
super().tearDown()
torch.hub.set_dir(self.previous_hub_dir) # probably not needed, but can't hurt
self.tmpdir.cleanup()
def _assert_trusted_list_is_empty(self):
with open(self.trusted_list_path) as f:
assert not f.readlines()
def _assert_in_trusted_list(self, line):
with open(self.trusted_list_path) as f:
assert line in (l.strip() for l in f.readlines())
@retry(Exception, tries=3)
def test_load_from_github(self):
hub_model = hub.load('ailzhang/torchhub_example', 'mnist', source='github', pretrained=True, verbose=False)
self.assertEqual(sum_of_state_dict(hub_model.state_dict()), SUM_OF_HUB_EXAMPLE)
@retry(Exception, tries=3)
def test_load_from_local_dir(self):
local_dir = hub._get_cache_or_reload(
'ailzhang/torchhub_example',
force_reload=False,
trust_repo=True,
calling_fn=None
)
hub_model = hub.load(local_dir, 'mnist', source='local', pretrained=True, verbose=False)
self.assertEqual(sum_of_state_dict(hub_model.state_dict()), SUM_OF_HUB_EXAMPLE)
@retry(Exception, tries=3)
def test_load_from_branch(self):
hub_model = hub.load('ailzhang/torchhub_example:ci/test_slash', 'mnist', pretrained=True, verbose=False)
self.assertEqual(sum_of_state_dict(hub_model.state_dict()), SUM_OF_HUB_EXAMPLE)
@retry(Exception, tries=3)
def test_get_set_dir(self):
previous_hub_dir = torch.hub.get_dir()
with tempfile.TemporaryDirectory('hub_dir') as tmpdir:
torch.hub.set_dir(tmpdir)
self.assertEqual(torch.hub.get_dir(), tmpdir)
self.assertNotEqual(previous_hub_dir, tmpdir)
hub_model = hub.load('ailzhang/torchhub_example', 'mnist', pretrained=True, verbose=False)
self.assertEqual(sum_of_state_dict(hub_model.state_dict()), SUM_OF_HUB_EXAMPLE)
assert os.path.exists(os.path.join(tmpdir, 'ailzhang_torchhub_example_master'))
# Test that set_dir properly calls expanduser()
# non-regression test for https://github.com/pytorch/pytorch/issues/69761
new_dir = os.path.join("~", "hub")
torch.hub.set_dir(new_dir)
self.assertEqual(torch.hub.get_dir(), os.path.expanduser(new_dir))
@retry(Exception, tries=3)
def test_list_entrypoints(self):
entry_lists = hub.list('ailzhang/torchhub_example', trust_repo=True)
self.assertObjectIn('mnist', entry_lists)
@retry(Exception, tries=3)
def test_download_url_to_file(self):
with tempfile.TemporaryDirectory() as tmpdir:
f = os.path.join(tmpdir, 'temp')
hub.download_url_to_file(TORCHHUB_EXAMPLE_RELEASE_URL, f, progress=False)
loaded_state = torch.load(f)
self.assertEqual(sum_of_state_dict(loaded_state), SUM_OF_HUB_EXAMPLE)
@retry(Exception, tries=3)
def test_load_state_dict_from_url(self):
loaded_state = hub.load_state_dict_from_url(TORCHHUB_EXAMPLE_RELEASE_URL)
self.assertEqual(sum_of_state_dict(loaded_state), SUM_OF_HUB_EXAMPLE)
# with name
file_name = "the_file_name"
loaded_state = hub.load_state_dict_from_url(TORCHHUB_EXAMPLE_RELEASE_URL, file_name=file_name)
expected_file_path = os.path.join(torch.hub.get_dir(), 'checkpoints', file_name)
self.assertTrue(os.path.exists(expected_file_path))
self.assertEqual(sum_of_state_dict(loaded_state), SUM_OF_HUB_EXAMPLE)
@retry(Exception, tries=3)
def test_load_legacy_zip_checkpoint(self):
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
hub_model = hub.load('ailzhang/torchhub_example', 'mnist_zip', pretrained=True, verbose=False)
self.assertEqual(sum_of_state_dict(hub_model.state_dict()), SUM_OF_HUB_EXAMPLE)
assert any("will be deprecated in favor of default zipfile" in str(w) for w in ws)
# Test the default zipfile serialization format produced by >=1.6 release.
@retry(Exception, tries=3)
def test_load_zip_1_6_checkpoint(self):
hub_model = hub.load(
'ailzhang/torchhub_example',
'mnist_zip_1_6',
pretrained=True,
verbose=False,
trust_repo=True
)
self.assertEqual(sum_of_state_dict(hub_model.state_dict()), SUM_OF_HUB_EXAMPLE)
@retry(Exception, tries=3)
def test_hub_parse_repo_info(self):
# If the branch is specified we just parse the input and return
self.assertEqual(
torch.hub._parse_repo_info('a/b:c'),
('a', 'b', 'c')
)
# For torchvision, the default branch is main
self.assertEqual(
torch.hub._parse_repo_info('pytorch/vision'),
('pytorch', 'vision', 'main')
)
# For the torchhub_example repo, the default branch is still master
self.assertEqual(
torch.hub._parse_repo_info('ailzhang/torchhub_example'),
('ailzhang', 'torchhub_example', 'master')
)
@retry(Exception, tries=3)
def test_load_commit_from_forked_repo(self):
with self.assertRaisesRegex(ValueError, 'If it\'s a commit from a forked repo'):
torch.hub.load('pytorch/vision:4e2c216', 'resnet18')
@retry(Exception, tries=3)
@patch('builtins.input', return_value='')
def test_trust_repo_false_emptystring(self, patched_input):
with self.assertRaisesRegex(Exception, 'Untrusted repository.'):
torch.hub.load('ailzhang/torchhub_example', 'mnist_zip_1_6', trust_repo=False)
self._assert_trusted_list_is_empty()
patched_input.assert_called_once()
patched_input.reset_mock()
with self.assertRaisesRegex(Exception, 'Untrusted repository.'):
torch.hub.load('ailzhang/torchhub_example', 'mnist_zip_1_6', trust_repo=False)
self._assert_trusted_list_is_empty()
patched_input.assert_called_once()
@retry(Exception, tries=3)
@patch('builtins.input', return_value='no')
def test_trust_repo_false_no(self, patched_input):
with self.assertRaisesRegex(Exception, 'Untrusted repository.'):
torch.hub.load('ailzhang/torchhub_example', 'mnist_zip_1_6', trust_repo=False)
self._assert_trusted_list_is_empty()
patched_input.assert_called_once()
patched_input.reset_mock()
with self.assertRaisesRegex(Exception, 'Untrusted repository.'):
torch.hub.load('ailzhang/torchhub_example', 'mnist_zip_1_6', trust_repo=False)
self._assert_trusted_list_is_empty()
patched_input.assert_called_once()
@retry(Exception, tries=3)
@patch('builtins.input', return_value='y')
def test_trusted_repo_false_yes(self, patched_input):
torch.hub.load('ailzhang/torchhub_example', 'mnist_zip_1_6', trust_repo=False)
self._assert_in_trusted_list("ailzhang_torchhub_example")
patched_input.assert_called_once()
# Loading a second time with "check", we don't ask for user input
patched_input.reset_mock()
torch.hub.load('ailzhang/torchhub_example', 'mnist_zip_1_6', trust_repo="check")
patched_input.assert_not_called()
# Loading again with False, we still ask for user input
patched_input.reset_mock()
torch.hub.load('ailzhang/torchhub_example', 'mnist_zip_1_6', trust_repo=False)
patched_input.assert_called_once()
@retry(Exception, tries=3)
@patch('builtins.input', return_value='no')
def test_trust_repo_check_no(self, patched_input):
with self.assertRaisesRegex(Exception, 'Untrusted repository.'):
torch.hub.load('ailzhang/torchhub_example', 'mnist_zip_1_6', trust_repo="check")
self._assert_trusted_list_is_empty()
patched_input.assert_called_once()
patched_input.reset_mock()
with self.assertRaisesRegex(Exception, 'Untrusted repository.'):
torch.hub.load('ailzhang/torchhub_example', 'mnist_zip_1_6', trust_repo="check")
patched_input.assert_called_once()
@retry(Exception, tries=3)
@patch('builtins.input', return_value='y')
def test_trust_repo_check_yes(self, patched_input):
torch.hub.load('ailzhang/torchhub_example', 'mnist_zip_1_6', trust_repo="check")
self._assert_in_trusted_list("ailzhang_torchhub_example")
patched_input.assert_called_once()
# Loading a second time with "check", we don't ask for user input
patched_input.reset_mock()
torch.hub.load('ailzhang/torchhub_example', 'mnist_zip_1_6', trust_repo="check")
patched_input.assert_not_called()
@retry(Exception, tries=3)
def test_trust_repo_true(self):
torch.hub.load('ailzhang/torchhub_example', 'mnist_zip_1_6', trust_repo=True)
self._assert_in_trusted_list("ailzhang_torchhub_example")
@retry(Exception, tries=3)
def test_trust_repo_builtin_trusted_owners(self):
torch.hub.load('pytorch/vision', 'resnet18', trust_repo="check")
self._assert_trusted_list_is_empty()
@retry(Exception, tries=3)
def test_trust_repo_none(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
torch.hub.load('ailzhang/torchhub_example', 'mnist_zip_1_6', trust_repo=None)
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert "You are about to download and run code from an untrusted repository" in str(w[-1].message)
self._assert_trusted_list_is_empty()
@retry(Exception, tries=3)
def test_trust_repo_legacy(self):
# We first download a repo and then delete the allowlist file
# Then we check that the repo is indeed trusted without a prompt,
# because it was already downloaded in the past.
torch.hub.load('ailzhang/torchhub_example', 'mnist_zip_1_6', trust_repo=True)
os.remove(self.trusted_list_path)
torch.hub.load('ailzhang/torchhub_example', 'mnist_zip_1_6', trust_repo="check")
self._assert_trusted_list_is_empty()
| pytorch-master | test/test_hub.py |
# Owner(s): ["module: named tensor"]
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_NUMPY
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_device_type import get_all_device_types
from collections import namedtuple, OrderedDict
import itertools
import functools
import torch
from torch import Tensor
import torch.nn.functional as F
from multiprocessing.reduction import ForkingPickler
import pickle
import io
import sys
import warnings
def pass_name_to_python_arg_parser(name):
x = torch.empty(2, names=(name,))
def flatten(lst):
return [item for sublist in lst for item in sublist]
Function = namedtuple('TestCase', ['name', 'lambd'])
def parse_compressed_namedshape(string):
# This is a metalanguage for describing a shape of a tensor compactly.
# 'N:3,C:2' -> size = [3, 2], names: ['N', 'C']
# 'None:3,None:2' -> size = [3, 2], names: ['None', 'None']
# '3,2' -> size = [3, 2], names=None passed to ctor.
def parse_name(maybe_name):
maybe_name = maybe_name.strip()
if maybe_name == 'None':
return None
return maybe_name
string = string.strip()
# '' -> size: [], names:None
if len(string) == 0:
return None, []
# '3, 2' -> size = [3, 2], None names.
if ':' not in string:
return None, [int(size) for size in string.split(',')]
dims = string.split(',')
tuples = [dim.split(':') for dim in dims]
return zip(*[(parse_name(name), int(size)) for name, size in tuples])
def create(namedshape, factory=torch.randn):
# namedshape: str
names, shape = parse_compressed_namedshape(namedshape)
return factory(shape, names=names)
def out_fn(operator):
@functools.wraps(operator)
def fn(*inputs):
return operator(*inputs[1:], out=inputs[0])
return fn
class TestNamedTensor(TestCase):
def test_aaa_must_run_first_check_experimental_warning(self):
# TODO(rzou): It would be nice for this to be a "real" python warning.
# Right now this error message only prints once and doesn't respect
# warnings.simplefilter behavior (where python users can control whether
# or not to display warnings once, all the time, or never).
with warnings.catch_warnings(record=True) as warns:
x = torch.randn(3, 3, names=('N', 'C'))
self.assertEqual(len(warns), 1)
self.assertTrue(str(warns[0].message).startswith(
'Named tensors and all their associated APIs are an experimental feature'))
def test_trivial(self):
pass
def _test_name_inference(self, op, args=(), expected_names=(), device='cpu',
maybe_raises_regex=None):
casted_args = [arg.to(device) if isinstance(arg, torch.Tensor) else arg
for arg in args]
if maybe_raises_regex is not None:
with self.assertRaisesRegex(RuntimeError, maybe_raises_regex):
result = op(*args)
return
result = op(*args)
self.assertEqual(result.names, expected_names,
msg='Name inference for {} on device {} failed'.format(
op.__name__, device))
# TODO(rzou): Some form of this check should be added to self.assertEqual.
# Right now I don't know what it should look like.
def assertTensorDataAndNamesEqual(self, x, y):
self.assertEqual(x.names, y.names)
unnamed_x = x.rename(None)
unnamed_y = y.rename(None)
self.assertEqual(unnamed_x, unnamed_y)
def _test_factory(self, factory, device):
x = factory([], device=device)
self.assertEqual(x.names, ())
x = factory(1, 2, 3, device=device)
self.assertEqual(x.names, (None, None, None))
x = factory(1, 2, 3, names=None, device=device)
self.assertEqual(x.names, (None, None, None))
x = factory(1, 2, 3, names=('N', 'T', 'D'), device=device)
self.assertEqual(x.names, ('N', 'T', 'D'))
x = factory(1, 2, 3, names=('N', None, 'D'), device=device)
self.assertEqual(x.names, ('N', None, 'D'))
x = factory(1, 2, 3, names=('_1', 'batch9', 'BATCH_5'), device=device)
self.assertEqual(x.names, ('_1', 'batch9', 'BATCH_5'))
with self.assertRaisesRegex(RuntimeError,
'a valid identifier contains only'):
x = factory(2, names=('1',), device=device)
with self.assertRaisesRegex(RuntimeError,
'a valid identifier contains only'):
x = factory(2, names=('?',), device=device)
with self.assertRaisesRegex(RuntimeError, 'Number of names'):
x = factory(2, 1, names=('N',), device=device)
with self.assertRaisesRegex(TypeError, 'invalid combination of arguments'):
x = factory(2, 1, names='N', device=device)
with self.assertRaisesRegex(RuntimeError, 'construct a tensor with duplicate names'):
x = factory(2, 1, 1, names=('N', 'C', 'N'), device=device)
names64 = ['A' * i for i in range(1, 65)]
x = factory([1] * 64, names=names64, device=device)
self.assertEqual(x.names, names64)
with self.assertRaisesRegex(
RuntimeError,
'only support up to 64 dims'):
names65 = ['A' * i for i in range(1, 66)]
x = factory([1] * 65, names=names64, device=device)
def test_none_names_refcount(self, N=10):
def scope():
unnamed = torch.empty(2, 3)
unnamed.names # materialize [None, None]
prev_none_refcnt = sys.getrefcount(None)
# Ran it N times to reduce flakiness
[scope() for i in range(N)]
after_none_refcnt = sys.getrefcount(None)
self.assertTrue(after_none_refcnt - prev_none_refcnt < N / 2,
msg='Using tensor.names should not change '
'the refcount of Py_None')
def test_has_names(self):
unnamed = torch.empty(2, 3)
none_named = torch.empty(2, 3, names=(None, None))
partially_named = torch.empty(2, 3, names=('N', None))
fully_named = torch.empty(2, 3, names=('N', 'C'))
self.assertFalse(unnamed.has_names())
self.assertFalse(none_named.has_names())
self.assertTrue(partially_named.has_names())
self.assertTrue(fully_named.has_names())
def test_py3_ellipsis(self):
tensor = torch.randn(2, 3, 5, 7)
output = tensor.refine_names('N', ..., 'C')
self.assertEqual(output.names, ['N', None, None, 'C'])
def test_refine_names(self):
# Unnamed tensor -> Unnamed tensor
self._test_name_inference(Tensor.refine_names,
[create('None:1,None:2,None:3'), 'N', 'C', 'H'],
['N', 'C', 'H'])
# Named tensor -> Named tensor
self._test_name_inference(Tensor.refine_names,
[create('N:1,C:2,H:3'), 'N', 'C', 'H'],
['N', 'C', 'H'])
# Partially named tensor -> named tensor
self._test_name_inference(Tensor.refine_names,
[create('None:1,C:2,None:3'), None, 'C', 'H'],
[None, 'C', 'H'])
# Too few names
self._test_name_inference(Tensor.refine_names,
[create('None:2,None:3'), 'N', 'C', 'H'],
maybe_raises_regex="different number of dims")
# Cannot change Tensor[D] to Tensor[N]
self._test_name_inference(Tensor.refine_names,
[create('D:3'), 'N'],
maybe_raises_regex="is different from")
# Cannot change Tensor[D] to Tensor[None]
self._test_name_inference(Tensor.refine_names,
[create('D:3'), None],
maybe_raises_regex="'D' is more specific than None")
# globbing behavior exists
self._test_name_inference(Tensor.refine_names,
[create('None:1,None:1,None:2,None:3'), '...', 'C', 'H'],
[None, None, 'C', 'H'])
def test_detach(self):
names = ['N']
self._test_name_inference(
Tensor.detach_,
[torch.randn(3, requires_grad=True, names=names)],
names)
self._test_name_inference(
Tensor.detach,
[torch.randn(3, requires_grad=True, names=names)],
names)
def test_index_fill(self):
for device in get_all_device_types():
expected_names = ('N', 'C')
x = torch.randn(3, 5, device=device, names=expected_names)
output = x.index_fill_('C', torch.tensor([0, 1], device=device), 5)
self.assertEqual(output.names, expected_names)
output = x.index_fill_('C', torch.tensor([0, 1], device=device), torch.tensor(4.))
self.assertEqual(output.names, expected_names)
output = x.index_fill('C', torch.tensor([0, 1], device=device), 5)
self.assertEqual(output.names, expected_names)
output = x.index_fill('C', torch.tensor([0, 1], device=device), torch.tensor(4.))
self.assertEqual(output.names, expected_names)
def test_equal(self):
for device in get_all_device_types():
tensor = torch.randn(2, 3, device=device)
other = tensor.clone()
self.assertTrue(torch.equal(tensor.rename('N', 'C'), other.rename('N', 'C')))
self.assertFalse(torch.equal(tensor.rename('M', 'C'), other.rename('N', 'C')))
self.assertFalse(torch.equal(tensor.rename(None, 'C'), other.rename('N', 'C')))
def test_squeeze(self):
x = create('N:3,C:1,H:1,W:1')
output = x.squeeze('C')
self.assertEqual(output.names, ['N', 'H', 'W'])
output = x.squeeze()
self.assertEqual(output.names, ['N'])
def test_repr(self):
named_tensor = torch.zeros(2, 3).rename_('N', 'C')
expected = "tensor([[0., 0., 0.],\n [0., 0., 0.]], names=('N', 'C'))"
self.assertEqual(repr(named_tensor), expected)
unnamed_tensor = torch.zeros(2, 3)
expected = "tensor([[0., 0., 0.],\n [0., 0., 0.]])"
self.assertEqual(repr(unnamed_tensor), expected)
none_named_tensor = torch.zeros(2, 3).rename_(None, None)
self.assertEqual(repr(none_named_tensor), expected)
def test_diagonal(self):
named_tensor = torch.zeros(2, 3, 5, 7, names=list('ABCD'))
self.assertEqual(named_tensor.diagonal().names, ['C', 'D', None])
self.assertEqual(named_tensor.diagonal(1, 3).names, ['A', 'C', None])
self.assertEqual(named_tensor.diagonal(outdim='E', dim1='B', dim2='D').names,
['A', 'C', 'E'])
def test_max_pooling(self):
def check_tuple_return(op, inputs, expected_names):
values, indices = op(*inputs)
self.assertEqual(values.names, expected_names)
self.assertEqual(indices.names, expected_names)
for device in get_all_device_types():
named_tensor_1d = torch.zeros(2, 3, 5, device=device, names=list('ABC'))
named_tensor_2d = torch.zeros(2, 3, 5, 7, device=device, names=list('ABCD'))
named_tensor_3d = torch.zeros(2, 3, 5, 7, 9, device=device, names=list('ABCDE'))
self.assertEqual(F.max_pool1d(named_tensor_1d, 2).names, named_tensor_1d.names)
self.assertEqual(F.max_pool2d(named_tensor_2d, [2, 2]).names, named_tensor_2d.names)
self.assertEqual(F.max_pool3d(named_tensor_3d, [2, 2, 2]).names, named_tensor_3d.names)
check_tuple_return(F.max_pool1d_with_indices, [named_tensor_1d, 2], named_tensor_1d.names)
check_tuple_return(F.max_pool2d_with_indices, [named_tensor_2d, [2, 2]], named_tensor_2d.names)
check_tuple_return(F.max_pool3d_with_indices, [named_tensor_3d, [2, 2, 2]], named_tensor_3d.names)
def test_max_pooling_without_names_does_not_warn(self):
for device in get_all_device_types():
tensor_2d = torch.zeros(2, 3, 5, 7, device=device, requires_grad=True)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
result = F.max_pool2d(tensor_2d, [2, 2])
result.sum().backward()
self.assertEqual(len(warns), 0)
def test_no_save_support(self):
named_tensor = torch.zeros(2, 3, names=('N', 'C'))
buf = io.BytesIO()
with self.assertRaisesRegex(RuntimeError, "NYI"):
torch.save(named_tensor, buf)
def test_no_pickle_support(self):
named_tensor = torch.zeros(2, 3, names=('N', 'C'))
with self.assertRaisesRegex(RuntimeError, "NYI"):
serialized = pickle.dumps(named_tensor)
def test_no_multiprocessing_support(self):
named_tensor = torch.zeros(2, 3, names=('N', 'C'))
buf = io.BytesIO()
with self.assertRaisesRegex(RuntimeError, "NYI"):
ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(named_tensor)
def test_big_tensor_repr_has_names(self):
def check_repr(named_tensor):
unnamed_tensor = named_tensor.rename(None)
names_tag = 'names={}'.format(named_tensor.names)
self.assertIn(names_tag, repr(named_tensor))
check_repr(torch.randn(128, 3, 64, 64, names=('N', 'C', 'H', 'W')))
def test_noncontig_contiguous(self):
# This type of contiguous is special-cased and therefore needs its own test
for device in get_all_device_types():
x = torch.randn(2, 3, device=device).t().rename_('N', 'C')
self.assertEqual(x.contiguous().names, ('N', 'C'))
def test_copy_transpose(self):
# This type of copy is special-cased and therefore needs its own test
def _test(self_names, other_names, expected_names):
x = torch.empty(2, 5, names=self_names)
y = torch.empty(5, 2).t().rename_(*other_names)
x.copy_(y)
self.assertEqual(x.names, expected_names)
_test(('N', 'C'), ('N', 'C'), ('N', 'C'))
_test(None, ('N', 'C'), ('N', 'C'))
def test_rename_(self):
tensor = torch.empty(1, 1, names=('N', 'C'))
self.assertEqual(tensor.rename_(None).names, (None, None))
self.assertEqual(tensor.rename_('H', 'W').names, ('H', 'W'))
with self.assertRaisesRegex(RuntimeError, 'Number of names'):
tensor.rename_('N', 'C', 'W')
with self.assertRaisesRegex(RuntimeError, 'duplicate names'):
tensor.rename_('N', 'N')
def test_rename(self):
tensor = torch.empty(1, 1, names=('N', 'C'))
self.assertEqual(tensor.rename(None).names, (None, None))
self.assertEqual(tensor.rename('H', 'W').names, ('H', 'W'))
# Check that we didn't modify tensor.names
self.assertEqual(tensor.names, ('N', 'C'))
with self.assertRaisesRegex(RuntimeError, 'Number of names'):
tensor.rename('N', 'C', 'W')
with self.assertRaisesRegex(RuntimeError, 'duplicate names'):
tensor.rename('N', 'N')
with self.assertRaisesRegex(RuntimeError, 'either positional args or keyword args'):
tensor.rename(None, N='batch')
# rename returns a view on the tensor
self.assertEqual(tensor.rename('H', 'W').data_ptr(), tensor.data_ptr())
self.assertEqual(tensor.rename(None).data_ptr(), tensor.data_ptr())
def test_rename_globber(self):
scalar = torch.randn([])
unnamed_tensor = torch.empty(1, 1, 1, 1)
named_tensor = torch.empty(1, 1, 1, 1, names=('N', 'C', 'H', 'W'))
self.assertEqual(scalar.rename(None).names, [])
self.assertEqual(scalar.rename('...').names, [])
# Check that it works with unnamed tensors
self.assertEqual(unnamed_tensor.rename('...').names, unnamed_tensor.names)
self.assertEqual(unnamed_tensor.rename('...', 'H', 'W').names,
[None, None, 'H', 'W'])
self.assertEqual(unnamed_tensor.rename('N', '...', 'W').names,
['N', None, None, 'W'])
self.assertEqual(unnamed_tensor.rename('N', 'C', '...').names,
['N', 'C', None, None])
# Check that it works with named tensors
self.assertEqual(named_tensor.rename('...').names, named_tensor.names)
self.assertEqual(named_tensor.rename('...', 'width').names,
['N', 'C', 'H', 'width'])
self.assertEqual(named_tensor.rename('batch', 'channels', '...', 'width').names,
['batch', 'channels', 'H', 'width'])
self.assertEqual(named_tensor.rename('batch', '...').names,
['batch', 'C', 'H', 'W'])
# Test empty glob
self.assertEqual(unnamed_tensor.rename('...', None, None, None, None).names,
[None, None, None, None])
self.assertEqual(named_tensor.rename('N', 'C', 'H', '...', 'W').names,
['N', 'C', 'H', 'W'])
# Multiple globs throw
with self.assertRaisesRegex(RuntimeError, 'More than one '):
named_tensor.rename('...', 'channels', '...')
def test_rename_rename_map(self):
scalar = torch.randn([])
unnamed_tensor = torch.empty(1, 1, 1, 1)
named_tensor = torch.empty(1, 1, 1, 1, names=('N', 'C', 'H', 'W'))
with self.assertRaisesRegex(RuntimeError, "dim 'N' does not exist"):
scalar.rename(N='batch')
with self.assertRaisesRegex(RuntimeError, "dim 'N' does not exist"):
unnamed_tensor.rename(N='batch')
with self.assertRaisesRegex(RuntimeError, "dim 'B' does not exist"):
named_tensor.rename(B='batch')
with self.assertRaisesRegex(RuntimeError, "dim 'B' does not exist"):
named_tensor.rename(H='height', B='batch')
self.assertEqual(named_tensor.rename(N='batch').data_ptr(),
named_tensor.data_ptr())
self.assertEqual(named_tensor.rename(N='batch').names,
['batch', 'C', 'H', 'W'])
self.assertEqual(named_tensor.rename(N='batch', H='height').names,
['batch', 'C', 'height', 'W'])
def test_set_names_property(self):
tensor = torch.empty(1, 1, names=('N', 'C'))
tensor.names = None
self.assertEqual(tensor.names, (None, None))
tensor.names = ('N', 'W')
self.assertEqual(tensor.names, ('N', 'W'))
with self.assertRaisesRegex(RuntimeError, 'Number of names'):
tensor.names = ['N', 'C', 'W']
with self.assertRaisesRegex(RuntimeError, 'duplicate names'):
tensor.names = ['N', 'N']
def test_factory_edge_cases(self):
for device in get_all_device_types():
self._test_factory(torch.empty, device)
def test_factory_coverage(self):
def _test(factory, device):
names = ('N', 'T', 'D')
torch.manual_seed(0)
result = factory(1, 2, 3, names=names, device=device)
torch.manual_seed(0)
expected = factory(1, 2, 3, device=device).rename_(*names)
self.assertTensorDataAndNamesEqual(result, expected)
supported = [
torch.ones,
torch.rand,
torch.randn,
torch.zeros,
]
for op, device in itertools.product(supported, get_all_device_types()):
_test(op, device)
# Test torch.full
for device in get_all_device_types():
names = ('N', 'T', 'D')
result = torch.full([1, 2, 3], 2., names=names, device=device)
expected = torch.full([1, 2, 3], 2., device=device).rename_(*names)
self.assertTensorDataAndNamesEqual(result, expected)
def test_tensor_from_lists(self):
names = ('N', 'C')
tensor = torch.tensor([[1]], names=names)
self.assertEqual(tensor.names, names)
names = ('N',)
tensor = torch.tensor([1], names=names)
self.assertEqual(tensor.names, names)
with self.assertRaisesRegex(RuntimeError, 'Number of names'):
names = ('N', 'C')
tensor = torch.tensor([1], names=names)
@unittest.skipIf(not TEST_NUMPY, "no numpy")
def test_tensor_from_numpy(self):
import numpy as np
arr = np.array([[1]])
names = ('N', 'C')
tensor = torch.tensor([[1]], names=names)
self.assertEqual(tensor.names, names)
def test_tensor_from_tensor(self):
x = torch.randn(1, 1)
names = ('N', 'C')
tensor = torch.tensor(x, names=names)
self.assertEqual(tensor.names, names)
def test_tensor_from_named_tensor(self):
x = torch.randn(1, 1, names=('N', 'D'))
tensor = torch.tensor(x)
self.assertEqual(tensor.names, ('N', 'D'))
# there's no way to distinguish between names=None and not passing in names.
# If the user passes in names=None they are asking for trouble.
x = torch.randn(1, 1, names=('N', 'D'))
tensor = torch.tensor(x, names=None)
self.assertEqual(tensor.names, ('N', 'D'))
x = torch.randn(1, 1, names=('N', 'D'))
with self.assertRaisesRegex(RuntimeError, "Name mismatch"):
tensor = torch.tensor(x, names=('N', 'C'))
def test_size(self):
t = torch.empty(2, 3, 5, names=('N', None, 'C'))
self.assertEqual(t.size('N'), 2)
self.assertEqual(t.size('C'), 5)
with self.assertRaisesRegex(RuntimeError, 'Please look up dimensions by name*'):
t.size(None)
with self.assertRaisesRegex(RuntimeError, 'Name \'channels\' not found in '):
t.size('channels')
with self.assertRaisesRegex(RuntimeError, 'Name \'N\' not found in '):
torch.empty(2, 3, 4).size('N')
def test_stride(self):
t = torch.empty(2, 3, 5, names=('N', None, 'C'))
self.assertEqual(t.stride('N'), 3 * 5)
self.assertEqual(t.stride('C'), 1)
with self.assertRaisesRegex(RuntimeError, 'Please look up dimensions by name'):
t.stride(None)
with self.assertRaisesRegex(RuntimeError, 'Name \'channels\' not found in '):
t.stride('channels')
with self.assertRaisesRegex(RuntimeError, 'Name \'N\' not found in '):
torch.empty(2, 3, 4).stride('N')
def test_transpose_variants(self):
t = torch.randn(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
self.assertEqual(t.transpose('N', 'C').names, ['C', 'N', 'H', 'W'])
self.assertEqual(t.transpose(1, 3).names, ['N', 'W', 'H', 'C'])
t = torch.randn(2, 3, names=('N', 'C'))
self.assertEqual(t.t().names, ['C', 'N'])
def test_resize(self):
for device in get_all_device_types():
named = torch.randn(2, names=('N',), device=device)
named.resize_([2])
self.assertEqual(named.names, ['N'])
with self.assertRaisesRegex(RuntimeError, "Cannot resize named tensor"):
named.resize_([3])
other_named = torch.randn(2, names=('N',), device=device)
named.resize_as_(other_named)
self.assertEqual(other_named.names, ['N'])
unnamed = torch.randn(2, device=device)
with self.assertRaisesRegex(
RuntimeError, r'names .* are not the same as the computed output names'):
named.resize_as_(unnamed)
unnamed = torch.randn(1, device=device)
unnamed.resize_as_(named)
self.assertEqual(unnamed.names, ['N'])
def test_cdist(self):
for device in get_all_device_types():
tensor = torch.randn(3, 1, 2, 7, names=('M', 'N', 'first_group', 'features'),
device=device)
other = torch.randn(5, 11, 7, names=('N', 'second_group', 'features'),
device=device)
result = torch.cdist(tensor, other)
self.assertEqual(result.names, ['M', 'N', 'first_group', 'second_group'])
def test_info_smoke(self):
# Smoke test for info functions / methods / attributes on named tensors.
tensor = torch.empty(1, 1, names=('N', 'D'))
tensor.device
tensor.dtype
tensor.get_device()
tensor.is_complex()
tensor.is_floating_point()
tensor.is_nonzero()
torch.is_same_size(tensor, tensor)
torch.is_signed(tensor)
tensor.layout
tensor.numel()
tensor.dim()
tensor.element_size()
tensor.is_contiguous()
tensor.is_cuda
tensor.is_leaf
tensor.is_pinned()
tensor.is_shared()
tensor.is_sparse
tensor.ndimension()
tensor.nelement()
tensor.shape
tensor.size()
tensor.size(1)
tensor.storage()
tensor.storage_offset()
tensor.storage_type()
tensor.stride()
tensor.stride(1)
tensor.data
tensor.data_ptr()
tensor.ndim
tensor.item()
tensor.type()
tensor.is_shared()
tensor.is_signed()
def test_autograd_smoke(self):
x = torch.randn(3, 3, names=('N', 'D'), requires_grad=True)
y = x.clone()
y.retain_grad()
y.register_hook(lambda x: x)
y.sum().backward()
# autograd related attributes
tensor = torch.empty(1, 1, names=('N', 'D'), requires_grad=True)
tensor = tensor.relu()
tensor.output_nr
tensor.grad_fn
tensor.requires_grad
def test_split_fns_propagates_names(self):
fns = [
lambda x: x.split(1, 0),
lambda x: x.split([1, 1], 1),
lambda x: x.chunk(2, 0),
]
for device in get_all_device_types():
orig_tensor = torch.empty(2, 2, names=('N', 'D'), device=device)
for fn in fns:
splits = fn(orig_tensor)
for split in splits:
self.assertEqual(split.names, orig_tensor.names)
def test_any_all(self):
for device in get_all_device_types():
x = torch.zeros(3, dtype=torch.bool, device=device, names=('C',))
self.assertEqual(x.any().names, [])
self.assertEqual(x.all().names, [])
def test_addcmul_addcdiv(self):
for device in get_all_device_types():
names = ['N']
a = torch.rand(3, device=device, names=names)
b = torch.rand(3, device=device, names=names)
# avoid division by 0
c = torch.rand(3, device=device, names=names).clamp_min_(0.1)
out = torch.randn(3, device=device, names=names)
self.assertEqual(torch.addcmul(a, b, c).names, names)
self.assertEqual(torch.addcmul(a, b, c, out=out).names, names)
self.assertEqual(a.addcmul_(b, c).names, names)
self.assertEqual(torch.addcdiv(a, b, c).names, names)
self.assertEqual(torch.addcdiv(a, b, c, out=out).names, names)
self.assertEqual(a.addcdiv_(b, c).names, names)
def test_binary_ops(self):
def test_basic(op):
a = torch.empty(2, 3, names=('N', 'C'))
b = torch.empty(3, 2, names=('C', 'N'))
c = torch.empty(3, names=('C',))
d = torch.empty(5, names=('W',))
self.assertEqual(op(a, a).names, ('N', 'C'))
self.assertEqual(op(a, c).names, ('N', 'C'))
with self.assertRaisesRegex(RuntimeError, "do not match"):
op(a, d)
with self.assertRaisesRegex(RuntimeError, "do not match"):
op(a, b)
def test_wildcard(op):
a = torch.empty(2, 3, names=('N', 'C'))
c = torch.empty(2, 3, names=(None, 'C'))
self.assertEqual(op(a, c).names, ('N', 'C'))
b = torch.empty(2, 3)
self.assertEqual(op(a, b).names, ('N', 'C'))
d = torch.empty(2, 3, names=('C', None))
with self.assertRaisesRegex(RuntimeError, "Misaligned"):
op(d, c)
def test_mixed_unnamed_named(op, is_inplace):
named2 = torch.randn(1, 1, names=('N', 'C'))
unnamed1 = torch.randn(1)
unnamed2 = torch.randn(1, 1)
unnamed3 = torch.randn(1, 1, 1)
def compute_expected_names(tensor, other):
assert tensor.has_names() ^ other.has_names()
named = tensor if tensor.has_names() else other
unnamed = other if tensor.has_names() else tensor
unnamed_dim = unnamed.dim()
if unnamed_dim > named.dim():
return [None] * (unnamed_dim - named.dim()) + list(named.names)
else:
return named.names
inputs = itertools.chain(
itertools.product([named2], [unnamed1, unnamed2, unnamed3]),
itertools.product([unnamed1, unnamed2, unnamed3], [named2]),
)
if is_inplace:
# In-place ops have the constraint that they must not change shape.
inputs = [(a, b) for (a, b) in inputs if a.dim() >= b.dim()]
for tensor, other in inputs:
expected_names = compute_expected_names(tensor, other)
self.assertEqual(op(tensor, other).names, expected_names)
def method(name, *args, **kwargs):
return [Function(name, lambda a, b: getattr(a, name)(b, *args, **kwargs))]
def function(name, *args, **kwargs):
return [Function(name, lambda a, b: getattr(torch, name)(a, b, *args, **kwargs))]
def out_function(name, *args, **kwargs):
out_fn = getattr(torch, name)
def fn(a, b):
result = torch.empty([0], dtype=a.dtype, device=a.device)
out_fn(a, b, *args, out=result, **kwargs)
return result
return [Function(name, fn)]
def fn_method_and_inplace(name, *args, **kwargs):
return (
method(name, *args, **kwargs) +
method(name + '_', *args, **kwargs) +
out_function(name, *args, **kwargs)
)
tests = [
fn_method_and_inplace('add'),
fn_method_and_inplace('div'),
fn_method_and_inplace('mul'),
fn_method_and_inplace('sub'),
fn_method_and_inplace('pow'),
fn_method_and_inplace('atan2'),
method('copy_'),
function('floor_divide'),
function('true_divide'),
]
tests = flatten(tests)
for name, op in tests:
test_basic(op)
test_wildcard(op)
test_mixed_unnamed_named(op, is_inplace=name.endswith('_'))
def test_logical_ops(self):
# Implemented via TensorIterator, so just check that each version
# (out-of-place, inplace, out=) propagates names.
def zeros(*args, **kwargs):
return torch.zeros(*args, dtype=torch.bool, **kwargs)
for op in ('logical_xor', 'logical_and', 'logical_or'):
self._test_name_inference(
getattr(torch, op),
(create('N:2,C:3', zeros), create('N:2,C:3', zeros)),
expected_names=['N', 'C'])
self._test_name_inference(
getattr(Tensor, op + '_'),
(create('N:2,C:3', zeros), create('N:2,C:3', zeros)),
expected_names=['N', 'C'])
self._test_name_inference(
lambda out, x, y: getattr(torch, op)(x, y, out=out),
(create('0', zeros), create('N:2,C:3', zeros), create('N:2,C:3', zeros)),
expected_names=['N', 'C'])
def test_pow_special(self):
# There are a few pow cases that don't go through TensorIterator.
# Test them here.
for device in get_all_device_types():
named = torch.randn(2, 3, names=('N', 'C'), device=device)
unnamed = torch.randn([0], device=device)
result = torch.pow(named, 0, out=unnamed.clone())
self.assertEqual(result.names, named.names)
result = torch.pow(named, 1, out=unnamed.clone())
self.assertEqual(result.names, named.names)
result = torch.pow(1, named, out=unnamed.clone())
self.assertEqual(result.names, named.names)
def test_out_fn_semantics(self):
out_fn = torch.abs
unnamed_tensor = torch.randn(3, 2)
none_named_tensor = torch.randn(3, 2, names=(None, None))
named_tensor = torch.randn(3, 2, names=('N', 'C'))
partially_named_tensor = torch.randn(3, 2, names=('N', None))
with self.assertRaisesRegex(RuntimeError, "Name mismatch"):
out_fn(partially_named_tensor, out=named_tensor)
with self.assertRaisesRegex(RuntimeError, "Name mismatch"):
out_fn(named_tensor, out=partially_named_tensor)
with self.assertRaisesRegex(RuntimeError, "Name mismatch"):
out_fn(none_named_tensor, out=named_tensor)
with self.assertRaisesRegex(RuntimeError, "Name mismatch"):
out_fn(unnamed_tensor, out=named_tensor)
output = torch.randn(3, 2)
out_fn(unnamed_tensor, out=output)
self.assertFalse(output.has_names())
output = torch.randn(3, 2, names=(None, None))
out_fn(named_tensor, out=output)
self.assertEqual(output.names, named_tensor.names)
output = torch.randn(3, 2)
out_fn(named_tensor, out=output)
self.assertEqual(output.names, named_tensor.names)
output = torch.randn(3, 2, names=(None, None))
out_fn(unnamed_tensor, out=output)
self.assertFalse(output.has_names())
def test_unary_propagate_names_fns(self):
def _test(testcase, names=('N', 'D'), device='cpu'):
sizes = [2] * len(names)
tensor = torch.empty(sizes, names=names, device=device)
try:
out = testcase.lambd(tensor)
except RuntimeError as err:
# Get a better error message by catching the error and asserting.
raise RuntimeError('{}: {}'.format(testcase.name, err)) from err
self.assertEqual(out.names, tensor.names,
msg=testcase.name)
def fn(name, *args, **kwargs):
return [Function(name, lambda t: getattr(torch, name)(t, *args, **kwargs))]
def method(name, *args, **kwargs):
return [Function(name, lambda t: getattr(t, name)(*args, **kwargs))]
def out_function(name, *args, **kwargs):
out_fn = getattr(torch, name)
def fn(tensor):
result = torch.empty([0], dtype=tensor.dtype, device=tensor.device)
out_fn(tensor, *args, out=result, **kwargs)
return result
return [Function(name + '_out', fn)]
def fn_method_and_inplace(name, *args, **kwargs):
return (
method(name, *args, **kwargs) +
method(name + '_', *args, **kwargs) +
out_function(name, *args, **kwargs)
)
# All of these operate on 2x2 tensors.
tests = [
# unary pointwise
fn_method_and_inplace('abs'),
fn_method_and_inplace('acos'),
fn_method_and_inplace('asin'),
fn_method_and_inplace('atan'),
fn_method_and_inplace('ceil'),
fn_method_and_inplace('clamp', -1, 1),
fn_method_and_inplace('clamp_min', -2),
fn_method_and_inplace('clamp_max', 2),
method('cauchy_'),
method('clone'),
method('contiguous'),
fn_method_and_inplace('cos'),
fn_method_and_inplace('cosh'),
fn_method_and_inplace('digamma'),
fn_method_and_inplace('erf'),
fn_method_and_inplace('erfc'),
fn_method_and_inplace('erfinv'),
fn_method_and_inplace('exp'),
fn_method_and_inplace('expm1'),
method('exponential_'),
fn_method_and_inplace('floor'),
fn_method_and_inplace('frac'),
method('geometric_', p=0.5),
fn_method_and_inplace('lgamma'),
fn_method_and_inplace('log'),
fn_method_and_inplace('log10'),
fn_method_and_inplace('log1p'),
fn_method_and_inplace('log2'),
method('log_normal_'),
fn_method_and_inplace('neg'),
method('normal_'),
[Function('polygamma', lambda t: torch.polygamma(1, t))],
method('polygamma_', 1),
fn_method_and_inplace('reciprocal'),
method('random_', 0, 1),
method('random_', 1),
method('random_'),
method('relu_'),
method('requires_grad_'),
method('relu'),
fn_method_and_inplace('round'),
fn_method_and_inplace('rsqrt'),
fn_method_and_inplace('sigmoid'),
fn_method_and_inplace('sign'),
fn_method_and_inplace('sin'),
fn_method_and_inplace('sinh'),
fn_method_and_inplace('sqrt'),
fn_method_and_inplace('tan'),
fn_method_and_inplace('tanh'),
fn('threshold', 0, 1),
fn('threshold_', 0, 1),
out_function('threshold', 0, 1),
fn_method_and_inplace('trunc'),
method('uniform_'),
method('zero_'),
method('fill_', 1),
method('fill_', torch.tensor(3.14)),
# conversions
method('to', dtype=torch.long),
method('to', device='cpu'),
method('to', torch.empty([])),
method('bool'),
method('byte'),
method('char'),
method('cpu'),
method('double'),
method('float'),
method('long'),
method('half'),
method('int'),
method('short'),
method('type', dtype=torch.long),
# cumsum and cumprod
fn('cumsum', 0),
fn('cumsum', 'D'),
out_function('cumsum', 'D'),
fn('cumprod', 0),
fn('cumprod', 'D'),
out_function('cumprod', 'D'),
# views
method('narrow', 0, 0, 1),
# creation functions
fn('empty_like'),
fn('zeros_like'),
fn('ones_like'),
fn('full_like', 3.14),
fn('rand_like'),
fn('randn_like'),
# bernoulli variants
method('bernoulli_', 0.5),
method('bernoulli_', torch.tensor(0.5)),
method('softmax', dim=1),
method('softmax', dim='D'),
method('log_softmax', dim=1),
method('log_softmax', dim='D'),
[Function('F.dropout(inplace)', lambda t: F.dropout(t, p=0.5, inplace=True))],
[Function('F.dropout(outplace)', lambda t: F.dropout(t, p=0.5, inplace=False))],
]
tests = flatten(tests)
for testcase, device in itertools.product(tests, get_all_device_types()):
_test(testcase, device=device)
def test_cummax_cummin(self):
def test_ops(op):
for device in get_all_device_types():
names = ('N', 'D')
tensor = torch.rand(2, 3, names=names)
result = op(tensor, 0)
self.assertEqual(result[0].names, names)
self.assertEqual(result[1].names, names)
test_ops(torch.cummax)
test_ops(torch.cummin)
def test_logcumsumexp(self):
for device in get_all_device_types():
names = ('N', 'D')
tensor = torch.rand(2, 3, names=names)
result = torch.logcumsumexp(tensor, 'D')
self.assertEqual(result.names, names)
def test_bitwise_not(self):
for device in get_all_device_types():
names = ('N', 'D')
tensor = torch.zeros(2, 3, names=names, dtype=torch.bool)
result = torch.empty(0, dtype=torch.bool)
self.assertEqual(tensor.bitwise_not().names, names)
self.assertEqual(torch.bitwise_not(tensor, out=result).names, names)
self.assertEqual(tensor.bitwise_not_().names, names)
def test_logical_not(self):
for device in get_all_device_types():
names = ('N', 'D')
tensor = torch.zeros(2, 3, names=names, dtype=torch.bool)
result = torch.empty(0, dtype=torch.bool)
self.assertEqual(tensor.logical_not().names, names)
self.assertEqual(torch.logical_not(tensor, out=result).names, names)
self.assertEqual(tensor.logical_not_().names, names)
def test_bernoulli(self):
for device in get_all_device_types():
names = ('N', 'D')
tensor = torch.rand(2, 3, names=names)
result = torch.empty(0)
self.assertEqual(tensor.bernoulli().names, names)
torch.bernoulli(tensor, out=result)
self.assertEqual(result.names, names)
def test_flatten(self):
tensor = torch.randn(2, 3, 5, 7, 11, names=('N', 'C', 'D', 'H', 'W'))
# basic
out = tensor.flatten('D', 'W', 'features')
self.assertEqual(out.names, ['N', 'C', 'features'])
self.assertEqual(out.rename(None), tensor.rename(None).view(2, 3, -1))
# int overload
out = tensor.flatten(2, 4, 'features')
self.assertEqual(out.names, ['N', 'C', 'features'])
self.assertEqual(out.rename(None), tensor.rename(None).view(2, 3, -1))
# list overload
out = tensor.flatten(['D', 'H', 'W'], 'features')
self.assertEqual(out.names, ['N', 'C', 'features'])
self.assertEqual(out.rename(None), tensor.rename(None).view(2, 3, -1))
# Non-contiguous flatten: N and H are not "adjacent" in memory.
sentences = torch.randn(2, 3, 5, 7, names=('N', 'T', 'H', 'D'))
sentences = sentences.transpose('T', 'H')
out = sentences.flatten('N', 'H', 'N_H')
self.assertEqual(out.names, ['N_H', 'T', 'D'])
with self.assertRaisesRegex(RuntimeError, "Name 'L' not found in"):
tensor.flatten(['D', 'L'], 'features')
with self.assertRaisesRegex(RuntimeError, "must be consecutive in"):
tensor.flatten(['D', 'W'], 'features')
with self.assertRaisesRegex(RuntimeError, "must be consecutive in"):
tensor.flatten(['H', 'D', 'W'], 'features')
def test_flatten_nodims(self):
tensor = torch.empty((2, 3))
with self.assertRaisesRegex(RuntimeError, "cannot be empty"):
tensor.flatten((), 'abcd')
def test_unflatten(self):
# test args: tensor, int, namedshape
self.assertTrue(torch.equal(
torch.ones(4, names=('A',)).unflatten('A', (('A', 2), ('B', 2))),
torch.ones(2, 2, names=('A', 'B'))))
self.assertTrue(torch.equal(
torch.ones(4, names=('A',)).unflatten('A', [('A', 2), ('B', 2)]),
torch.ones(2, 2, names=('A', 'B'))))
self.assertTrue(torch.equal(
torch.ones(4, names=('A',)).unflatten('A', (['A', 2], ['B', 2])),
torch.ones(2, 2, names=('A', 'B'))))
self.assertTrue(torch.equal(
torch.ones(2, 10, names=('A', 'B')).unflatten('B', (['B1', -1],)),
torch.ones(2, 10, names=('A', 'B1'))))
self.assertTrue(torch.equal(
torch.ones(2, 3 * 4 * 5 * 6, names=('A', 'B'))
.unflatten('B', (['B1', 3], ['B2', 4], ['B3', -1], ['B4', 6])),
torch.ones(2, 3, 4, 5, 6, names=('A', 'B1', 'B2', 'B3', 'B4'))))
self.assertTrue(torch.equal(
torch.ones(2, 0, names=('A', 'B'))
.unflatten('B', (['B1', 3], ['B2', -1], ['B3', 4])),
torch.ones(2, 3, 0, 4, names=('A', 'B1', 'B2', 'B3'))))
# test args: namedtensor, str, namedshape
self.assertTrue(torch.equal(
torch.ones(2, 4, names=('A', 'B')).unflatten('B', (('B1', 2), ('B2', 2))),
torch.ones(2, 2, 2, names=('A', 'B1', 'B2'))))
# test invalid args: namedtensor, str, sizes
with self.assertRaisesRegex(TypeError, r"unflatten\(\): argument 'dim' \(position 1\) must be int, not str"):
torch.tensor([1], names=('A',)).unflatten('A', (1, 1))
# test invalid args: namedtensor, int, sizes
with self.assertRaisesRegex(RuntimeError, r"input is a named tensor but no names were given for unflattened sizes"):
torch.tensor([1], names=("A",)).unflatten(0, (1, 1))
with self.assertRaisesRegex(RuntimeError,
r"Provided sizes \[3, -1\] don't multiply up to the "
r"size of dim 1 \('B': 4\) in Tensor\['A', 'B'\]"):
torch.ones(2, 4, names=('A', 'B')).unflatten('B', (('B1', 3), ('B2', -1)))
with self.assertRaisesRegex(RuntimeError,
r"the unspecified dimension size -1 can be any value and is ambiguous"):
torch.ones(2, 0, names=('A', 'B')).unflatten('B', (('B1', 0), ('B2', -1)))
tensor = torch.randn(7, 2 * 3 * 5, 11, names=('N', 'D', 'K'))
# accepts OrderedDict
out = tensor.unflatten('D', OrderedDict((('C', 2), ('H', 3), ('W', 5))))
self.assertEqual(out.names, ('N', 'C', 'H', 'W', 'K'))
self.assertEqual(out.shape, (7, 2, 3, 5, 11))
# Unflatten left-most
out = tensor.unflatten('N', (('N', 7), ('H', 1)))
self.assertEqual(out.names, ('N', 'H', 'D', 'K'))
self.assertEqual(out.shape, (7, 1, 2 * 3 * 5, 11))
# Unflatten right-most
out = tensor.unflatten('K', (('K', 11), ('H', 1)))
self.assertEqual(out.names, ('N', 'D', 'K', 'H'))
self.assertEqual(out.shape, (7, 2 * 3 * 5, 11, 1))
with self.assertRaisesRegex(RuntimeError, "don't multiply up to"):
tensor.unflatten('D', (('H', 3), ('W', 5)))
with self.assertRaisesRegex(RuntimeError, 'sizes must be non-empty'):
tensor.unflatten('D', None)
with self.assertRaisesRegex(RuntimeError, 'non-empty'):
tensor.unflatten('D', OrderedDict())
def test_unsupported_op_error_msg(self):
named = torch.randn(3, 3, names=('N', 'C'))
with self.assertRaisesRegex(
RuntimeError, r"pdist.+is not yet supported with named tensors"):
torch.pdist(named)
with self.assertRaisesRegex(
RuntimeError, r"as_strided_.+is not yet supported with named tensors"):
named.as_strided_((3, 3), (3, 1))
def test_reduction_fns(self):
def check_output(output, expected_names):
if isinstance(output, torch.Tensor):
self.assertEqual(output.names, expected_names)
return
for out in output:
self.assertEqual(out.names, expected_names)
def sum_all_outputs(output):
if isinstance(output, torch.Tensor):
return output.sum()
result = 0
for out in output:
result = out + result
return result.sum()
def test_simple_reduce(op, device):
t = torch.empty(2, 3, 5, names=('N', 'C', 'L'), device=device)
check_output(op(t, 1), ['N', 'L'])
check_output(op(t, -1), ['N', 'C'])
check_output(op(t, 'C'), ['N', 'L'])
ops_support_dim_none = [
'sum',
'mean',
'std',
'var',
'std_mean',
'var_mean',
'nanmean',
'nansum',
]
if op.__name__ in ops_support_dim_none:
check_output(op(t, None), [])
else:
with self.assertRaisesRegex(RuntimeError, 'Please look up dimensions by name'):
op(t, None)
with self.assertRaisesRegex(RuntimeError, 'Name \'H\' not found'):
op(t, 'H')
def test_autograd_supports_dimname_overload(op, device):
t = torch.empty(2, 3, 5, names=('N', 'C', 'L'), device=device, requires_grad=True)
sum_all_outputs(op(t, 'C')).backward()
self.assertIsNotNone(t.grad)
def test_complete_reduce(op, device):
t = torch.empty(2, 3, 5, names=('N', 'C', 'L'), device=device)
check_output(op(t), [])
def test_multidim_reduce(op, device):
t = torch.empty(2, 3, 5, names=('N', 'C', 'L'), device=device)
check_output(op(t, [1, 2]), ['N'])
check_output(op(t, [0, -1]), ['C'])
check_output(op(t, ['C', 'L']), ['N'])
with self.assertRaisesRegex(RuntimeError, 'Please look up dimensions by name'):
op(t, [None, 'C'])
def test_out_variant(op, output_lambda, device):
t = torch.empty(2, 3, 5, names=('N', 'C', 'L'), device=device)
if output_lambda:
out = output_lambda(t)
else:
out = torch.empty([0], device=device)
op(t, 'C', out=out)
check_output(out, ['N', 'L'])
def test_keepdim(op, device):
t = torch.empty(2, 3, 5, names=('N', 'C', 'L'), device=device)
check_output(op(t, 'C', keepdim=True), ['N', 'C', 'L'])
def values_and_indices(t):
return (torch.empty([0], device=t.device),
torch.empty([0], device=t.device, dtype=torch.long))
def kthvalue_wrapper(tensor, *args, **kwargs):
# Return the 0-th value
return torch.kthvalue(tensor, 1, *args, **kwargs)
Case = namedtuple('Case', [
'op',
'supports_complete_reduce',
'supports_multidim_reduce',
'supports_out_variant',
'supports_keepdim',
'output_lambda',
])
tests = [
Case(torch.sum, True, True, True, True, None),
Case(torch.prod, True, False, True, True, None),
Case(torch.mean, True, True, True, True, None),
Case(torch.var, True, True, True, True, None),
Case(torch.std, True, True, True, True, None),
Case(torch.std_mean, True, True, False, True, None),
Case(torch.var_mean, True, True, False, True, None),
Case(torch.min, True, False, True, True, values_and_indices),
Case(torch.max, True, False, True, True, values_and_indices),
Case(torch.unbind, False, False, False, False, None),
Case(torch.logsumexp, False, True, True, True, None),
Case(torch.mode, False, False, True, True, values_and_indices),
Case(kthvalue_wrapper, False, False, True, True, values_and_indices),
Case(torch.median, True, False, True, True, values_and_indices),
Case(torch.nanmedian, True, False, True, True, values_and_indices),
]
for testcase, device in itertools.product(tests, get_all_device_types()):
op = testcase.op
test_simple_reduce(op, device)
test_autograd_supports_dimname_overload(op, device)
if testcase.supports_keepdim:
test_keepdim(op, device)
if testcase.supports_out_variant:
test_out_variant(op, testcase.output_lambda, device)
if testcase.supports_complete_reduce:
test_complete_reduce(op, device)
if testcase.supports_multidim_reduce:
test_multidim_reduce(op, device)
def test_masked_select(self):
# simple
self._test_name_inference(
torch.masked_select,
(create('N:2,C:3'), (create('2,3') > 0).rename('N', 'C')),
expected_names=[None])
# left broadcast
self._test_name_inference(
torch.masked_select,
(create('C:3'), (create('2,3') > 0).rename('N', 'C')),
expected_names=[None])
# right broadcast
self._test_name_inference(
torch.masked_select,
(create('N:2,C:3'), (create('3') > 0).rename('C')),
expected_names=[None])
# error
self._test_name_inference(
torch.masked_select,
(create('N:2,C:3'), (create('3') > 0).rename('D')),
maybe_raises_regex='do not match')
# out=
self._test_name_inference(
out_fn(torch.masked_select),
(create('0'), create('N:2,C:3'), (create('2,3') > 0).rename('N', 'C')),
expected_names=[None])
def test_cat(self):
# simple
self._test_name_inference(
torch.cat,
[[create('N:2,C:3'), create('N:2,C:3')]],
expected_names=['N', 'C'])
# error: zero dim
self._test_name_inference(
torch.cat,
[[create(''), create('')]],
maybe_raises_regex='zero-dim')
# error: names don't match
self._test_name_inference(
torch.cat,
[[create('N:2,C:3'), create('C:3,N:2')]],
maybe_raises_regex='do not match')
# error: different number of dims
self._test_name_inference(
torch.cat,
[[create('N:2,C:3'), create('C:3')]],
maybe_raises_regex='must have same number of dimensions')
# out=
self._test_name_inference(
out_fn(torch.cat),
[create('0'), [create('N:2,C:3'), create('N:2,C:3')]],
expected_names=['N', 'C'])
def test_masked_fill(self):
# simple
self._test_name_inference(
Tensor.masked_fill,
(create('N:2,C:3'), (create('2,3') > 0).rename('N', 'C'), 3.14),
expected_names=['N', 'C'])
# left broadcast
self._test_name_inference(
Tensor.masked_fill,
(create('C:3'), (create('2,3') > 0).rename('N', 'C'), 3.14),
maybe_raises_regex="must be less than or equal to")
# right broadcast
self._test_name_inference(
Tensor.masked_fill,
(create('N:2,C:3'), (create('3') > 0).rename('C'), 3.14),
expected_names=['N', 'C'])
# error
self._test_name_inference(
Tensor.masked_fill,
(create('N:2,C:3'), (create('3') > 0).rename('D'), 3.14),
maybe_raises_regex='do not match')
# inplace
self._test_name_inference(
Tensor.masked_fill_,
(create('N:2,C:3'), (create('2,3') > 0).rename('N', 'C'), 3.14),
expected_names=['N', 'C'])
# inplace, computed names don't match output tensor names
self._test_name_inference(
Tensor.masked_fill_,
(create('N:2,None:3'), (create('2,3') > 0).rename('N', 'C'), 3.14),
maybe_raises_regex="not the same as the computed output names")
def test_using_seen_interned_string_doesnt_bump_refcount(self):
def see_name():
seen_name = 'N'
pass_name_to_python_arg_parser(seen_name)
see_name()
seen_name = 'N'
old_refcnt = sys.getrefcount(seen_name)
pass_name_to_python_arg_parser(seen_name)
new_refcnt = sys.getrefcount(seen_name)
self.assertEqual(new_refcnt, old_refcnt)
def test_using_unseen_interned_string_bumps_refcount_permanently(self):
# Please don't use this as a name in a different test.
unseen_name = 'abcdefghi'
old_refcnt = sys.getrefcount(unseen_name)
pass_name_to_python_arg_parser(unseen_name)
new_refcnt = sys.getrefcount(unseen_name)
self.assertEqual(new_refcnt, old_refcnt + 1)
def test_using_unseen_uninterned_string_refcounts(self):
# Please don't use this as a name in a different test.
# non-compile-time constants are not interned
unseen_name = ''.join(['abc', 'def', 'ghi', 'jkl'])
interned_unseen_name = 'abcdefghijkl'
self.assertFalse(unseen_name is interned_unseen_name)
old_uninterned_refcnt = sys.getrefcount(unseen_name)
old_interned_refcnt = sys.getrefcount(interned_unseen_name)
pass_name_to_python_arg_parser(unseen_name)
new_uninterned_refcnt = sys.getrefcount(unseen_name)
new_interned_refcnt = sys.getrefcount(interned_unseen_name)
# Internally, PyTorch should not hold a reference to the uninterned string
self.assertEqual(new_uninterned_refcnt, old_uninterned_refcnt)
# Instead, we should hold a new reference to the interned version.
self.assertEqual(new_interned_refcnt, old_interned_refcnt + 1)
def _test_select(self, device):
x = torch.empty(2, 3, 4, 5, names=('N', 'C', 'H', 'W'), device=device)
y = x.select(1, 1)
self.assertEqual(y.names, ('N', 'H', 'W'))
y = x.select('C', 1)
self.assertEqual(y.names, ('N', 'H', 'W'))
with self.assertRaisesRegex(
RuntimeError, 'Please look up dimensions by name'):
y = x.select(None, 1)
def test_select(self):
self._test_select('cpu')
@unittest.skipIf(not TEST_CUDA, 'no CUDA')
def test_select_cuda(self):
self._test_select('cuda')
def _test_as_strided(self, device):
x = torch.empty(2, 3, 4, 5, names=('N', 'C', 'H', 'W'), device=device)
y = x.as_strided([2 * 3 * 4 * 5], [1])
self.assertEqual(y.names, (None,))
def test_as_strided(self):
self._test_as_strided('cpu')
@unittest.skipIf(not TEST_CUDA, 'no CUDA')
def test_as_strided_cuda(self):
self._test_as_strided('cuda')
def test_no_jit_tracer_support(self):
def foo(x):
return torch.full(x.shape, 2., names=('N',))
with self.assertRaisesRegex(RuntimeError, 'not supported with the tracer'):
x = torch.randn(3)
torch.jit.trace(foo, example_inputs=x)
def bar(x):
return x.select('N', 1)
with self.assertRaisesRegex(RuntimeError, 'not supported with the tracer'):
x = torch.randn(3)
torch.jit.trace(bar, example_inputs=x)
def test_no_jit_script_support(self):
@torch.jit.script
def foo(x):
return x + 1
with self.assertRaisesRegex(RuntimeError, 'NYI'):
foo(torch.randn(2, 3, names=('N', 'C')))
@torch.jit.ignore
def add_names(x):
x.names = ('N', 'C')
@torch.jit.script
def return_named_tensor(input):
add_names(input)
return input
with self.assertRaisesRegex(RuntimeError, "NYI"):
return_named_tensor(torch.randn(1, 1))
def test_align_to(self):
# trivial
tensor = create('N:3')
output = tensor.align_to('N')
self.assertEqual(output.names, ['N'])
self.assertEqual(output.shape, [3])
# unsqueeze behavior
tensor = create('N:3')
output = tensor.align_to('N', 'D')
self.assertEqual(output.names, ['N', 'D'])
self.assertEqual(output.shape, [3, 1])
# transpose behavior
tensor = create('N:3,C:2')
output = tensor.align_to('C', 'N')
self.assertEqual(output.names, ['C', 'N'])
self.assertEqual(output.shape, [2, 3])
# unsqueeze / transpose
tensor = create('C:2,N:3,H:5')
output = tensor.align_to('N', 'H', 'W', 'C')
self.assertEqual(output.names, ['N', 'H', 'W', 'C'])
self.assertEqual(output.shape, [3, 5, 1, 2])
# All input dimensions must be named
with self.assertRaisesRegex(RuntimeError, "All input dims must be named. Found unnamed dim at index 0"):
create('None:2,C:3').align_to('N', 'C')
# not enough names
with self.assertRaisesRegex(RuntimeError, "Cannot find dim 'N'"):
create('N:2,C:3').align_to('C')
# names not found
with self.assertRaisesRegex(RuntimeError, "Cannot find dim 'C'"):
create('N:2,C:3').align_to('D', 'N')
def test_align_to_ellipsis(self):
tensor = create('N:7,H:3,W:5,C:2')
# ... = ['N', 'H', 'W', 'C']
output = tensor.align_to('...')
self.assertEqual(output.names, ['N', 'H', 'W', 'C'])
self.assertEqual(output.shape, [7, 3, 5, 2])
# ... = ['H', 'C']
output = tensor.align_to('...', 'W', 'N')
self.assertEqual(output.names, ['H', 'C', 'W', 'N'])
self.assertEqual(output.shape, [3, 2, 5, 7])
# ... = ['N', 'W']
output = tensor.align_to('H', 'C', '...')
self.assertEqual(output.names, ['H', 'C', 'N', 'W'])
self.assertEqual(output.shape, [3, 2, 7, 5])
# ... = ['H', 'C']
output = tensor.align_to('W', '...', 'N')
self.assertEqual(output.names, ['W', 'H', 'C', 'N'])
self.assertEqual(output.shape, [5, 3, 2, 7])
# ... = []
output = tensor.align_to('N', '...', 'C', 'D', 'H', 'W')
self.assertEqual(output.names, ['N', 'C', 'D', 'H', 'W'])
self.assertEqual(output.shape, [7, 2, 1, 3, 5])
# Input tensor partially named
partially_named = create('None:2,None:3,None:5,C:7')
output = partially_named.align_to('C', '...')
self.assertEqual(output.names, ['C', None, None, None])
self.assertEqual(output.shape, [7, 2, 3, 5])
with self.assertRaisesRegex(RuntimeError, "order of dimensions cannot contain a None"):
partially_named.align_to('C', None, '...')
# Input order partially named
with self.assertRaisesRegex(RuntimeError, "cannot contain a None name"):
tensor.align_to('...', 'N', None)
# Input order duplicate names
with self.assertRaisesRegex(RuntimeError, "duplicate names"):
tensor.align_to('...', 'N', 'N')
def test_align_as(self):
# align_as calls align_to internally. align_to has pretty substantial tests,
# so just test some basic things here.
tensor = create('C:2,N:3,H:5')
other = create('N:1,H:1,W:1,C:1')
output = tensor.align_as(other)
self.assertEqual(output.names, ['N', 'H', 'W', 'C'])
self.assertEqual(output.shape, [3, 5, 1, 2])
@unittest.skip("Not implemented yet")
def test_align_tensors_two_inputs(self):
def _test(tensor_namedshape, align_names, expected_sizes, expected_error):
tensor_names, tensor_sizes = tensor_namedshape
tensor = torch.empty(*tensor_sizes, names=tensor_names)
other = torch.empty([1] * len(align_names), names=align_names)
if expected_error is not None:
with self.assertRaisesRegex(RuntimeError, expected_error):
torch.align_tensors(tensor, other)
return
output, _ = torch.align_tensors(tensor, other)
self.assertEqual(output.shape, expected_sizes)
self.assertEqual(output.names, align_names)
Case = namedtuple('Case', [
'tensor_namedshape',
'align_names',
'expected_sizes',
'expected_error',
])
tests = [
# basic tests
Case(tensor_namedshape=(['C'], [2]),
align_names=['C'],
expected_sizes=[2],
expected_error=None),
Case(tensor_namedshape=(['C'], [2]),
align_names=['D'],
expected_sizes=None,
expected_error='not a subsequence'),
# single-dim alignment test
Case(tensor_namedshape=(['C'], [2]),
align_names=['N', 'C'],
expected_sizes=[1, 2],
expected_error=None),
Case(tensor_namedshape=[['N'], [2]],
align_names=['N', 'C'],
expected_sizes=[2, 1],
expected_error=None),
# multiple dim alignment test
Case(tensor_namedshape=[['N', 'C'], [2, 3]],
align_names=['N', 'H', 'C', 'W'],
expected_sizes=[2, 1, 3, 1],
expected_error=None),
Case(tensor_namedshape=[['N', 'C'], [2, 3]],
align_names=['C', 'H', 'N', 'W'],
expected_sizes=None,
expected_error='not a subsequence'),
# scalar tensor tests
Case(tensor_namedshape=[None, [[]]],
align_names=['N', 'C'],
expected_sizes=[1, 1],
expected_error=None),
Case(tensor_namedshape=[[], [[]]],
align_names=[None, None],
expected_sizes=[1, 1],
expected_error=None),
# unnamed tensor tests
Case(tensor_namedshape=[None, [2, 3]],
align_names=[None, None],
expected_sizes=[2, 3],
expected_error=None),
Case(tensor_namedshape=[None, [2, 3]],
align_names=[None, None, None],
expected_sizes=[1, 2, 3],
expected_error=None),
Case(tensor_namedshape=[None, [2]],
align_names=['N'],
expected_sizes=None,
expected_error='not a subsequence'),
# unnamed dim alignment tests
Case(tensor_namedshape=[[None], [2]],
align_names=['N', None],
expected_sizes=[1, 2],
expected_error=None),
Case(tensor_namedshape=[[None], [2]],
align_names=['N', None, None, None],
expected_sizes=[1, 1, 1, 2],
expected_error=None),
Case(tensor_namedshape=[['N'], [2]],
align_names=['N', None, None, None],
expected_sizes=[2, 1, 1, 1],
expected_error=None),
Case(tensor_namedshape=[[None, 'N', None], [2, 3, 5]],
align_names=[None, None, 'N', None],
expected_sizes=[1, 2, 3, 5],
expected_error=None),
Case(tensor_namedshape=[[None], [2]],
align_names=[None, 'N'],
expected_sizes=None,
expected_error='absolute position from the right'),
Case(tensor_namedshape=[None, [2]],
align_names=[None, 'N'],
expected_sizes=None,
expected_error='absolute position from the right'),
Case(tensor_namedshape=[[None, 'N'], [2, 3]],
align_names=[None, 'C', 'N'],
expected_sizes=None,
expected_error='absolute position from the right'),
]
for test in tests:
_test(*test)
@unittest.skip("Not implemented yet")
def test_align_tensors(self):
def reference_fn(*tensors):
longest_names = tensors[0].names
for tensor in tensors:
if len(tensor.names) > len(longest_names):
longest_names = tensor.names
return [tensor.align_to(*longest_names) for tensor in tensors]
x = torch.empty(1, 1, names=('N', 'H'))
y = torch.empty(2, 3, 5, names=('N', 'C', 'H'))
z = torch.empty(2, names=('N',))
output = torch.align_tensors(x, y, z)
expected_tensors = reference_fn(x, y, z)
for tensor, expected in zip(output, expected_tensors):
self.assertTensorDataAndNamesEqual(tensor, expected)
def test_mm(self):
for device in get_all_device_types():
self._test_name_inference(
torch.mm, device=device,
args=(create('N:3,C:2'), create('W:2,H:5')),
expected_names=('N', 'H'))
# left arg is unnamed
self._test_name_inference(
torch.mm, device=device,
args=(create('3,2'), create('W:2,H:5')),
expected_names=(None, 'H'))
# right arg is unnamed
self._test_name_inference(
torch.mm, device=device,
args=(create('N:3,C:2'), create('2,5')),
expected_names=('N', None))
# out=
self._test_name_inference(
out_fn(torch.mm), device=device,
args=(create('0'), create('N:3,C:2'), create('W:2,H:5')),
expected_names=('N', 'H'))
self._test_name_inference(
torch.mm, device=device,
args=(create('N:3,C:2'), create('W:2,N:5')),
maybe_raises_regex='with duplicate names')
def test_expand(self):
for device in get_all_device_types():
self._test_name_inference(
Tensor.expand, device=device,
args=(create('D:1'), [3]), expected_names=('D',))
self._test_name_inference(
Tensor.expand, device=device,
args=(create('H:3,W:2'), [10, 3, 3, 2]),
expected_names=(None, None, 'H', 'W'))
self._test_name_inference(
Tensor.expand, device=device,
args=(create('3, 2'), [10, 3, 3, 2]),
expected_names=(None, None, None, None))
def test_addmm(self):
for device in get_all_device_types():
# full names
self._test_name_inference(
torch.addmm, device=device,
args=(create('N:3,H:5'), create('N:3,C:2'), create('W:2,H:5')),
expected_names=('N', 'H'))
# no name on bias
self._test_name_inference(
torch.addmm, device=device,
args=(create('3,5'), create('N:3,C:2'), create('W:2,H:5')),
expected_names=('N', 'H'))
# partially named bias
self._test_name_inference(
torch.addmm, device=device,
args=(create('N:3,None:5'), create('N:3,C:2'), create('W:2,H:5')),
expected_names=('N', 'H'))
# out=
self._test_name_inference(
out_fn(torch.addmm), device=device,
args=(create('0'), create('N:3,None:5'), create('N:3,C:2'), create('W:2,H:5')),
expected_names=('N', 'H'))
# inplace
self._test_name_inference(
torch.Tensor.addmm_, device=device,
args=(create('N:3,H:5'), create('N:3,C:2'), create('W:2,H:5')),
expected_names=('N', 'H'))
self._test_name_inference(
torch.addmm, device=device,
args=(create('N:3,H:5'), create('N:3,C:2'), create('W:2,N:5')),
maybe_raises_regex='with duplicate names')
def test_bmm(self):
for device in get_all_device_types():
# full names
self._test_name_inference(
torch.bmm, device=device,
args=(create('N:7,A:3,B:2'), create('N:7,A:2,B:5')),
expected_names=('N', 'A', 'B'))
# no name on left tensor
self._test_name_inference(
torch.bmm, device=device,
args=(create('7,3,2'), create('N:7,A:2,B:5')),
expected_names=('N', None, 'B'))
# no name on right tensor
self._test_name_inference(
torch.bmm, device=device,
args=(create('N:7,A:3,B:2'), create('7,2,5')),
expected_names=('N', 'A', None))
# out=
self._test_name_inference(
out_fn(torch.bmm), device=device,
args=(create('0'), create('N:7,A:3,B:2'), create('N:7,A:2,B:5')),
expected_names=('N', 'A', 'B'))
# duplicate names after mm
self._test_name_inference(
torch.bmm, device=device,
args=(create('N:7,A:3,B:2'), create('N:7,B:2,A:5')),
maybe_raises_regex='with duplicate names')
# matching error (batch dimensions must be alignable)
self._test_name_inference(
torch.bmm, device=device,
args=(create('N:3,A:3,B:3'), create('M:3,A:3,B:3')),
maybe_raises_regex='do not match')
# misalignment (batch dimension is getting contracted)
self._test_name_inference(
torch.bmm, device=device,
args=(create('N:3,A:3,B:3'), create('None:3,N:3,B:3')),
maybe_raises_regex='misaligned')
def test_matmul(self):
for device in get_all_device_types():
# input tensors are less than 1D
self._test_name_inference(
torch.matmul, device=device,
args=(create(''), create('A:2')),
maybe_raises_regex='at least 1D')
self._test_name_inference(
torch.matmul, device=device,
args=(create('A:2'), create('')),
maybe_raises_regex='at least 1D')
# 1D @ 1D
self._test_name_inference(
torch.matmul, device=device,
args=(create('A:2'), create('B:2')),
expected_names=[])
# ND @ 1D
self._test_name_inference(
torch.matmul, device=device,
args=(create('A:3,C:2'), create('B:2')),
expected_names=['A'])
self._test_name_inference(
torch.matmul, device=device,
args=(create('A:5,C:3,D:2'), create('B:2')),
expected_names=['A', 'C'])
# 1D @ ND
self._test_name_inference(
torch.matmul, device=device,
args=(create('C:2'), create('A:2,B:3')),
expected_names=['B'])
self._test_name_inference(
torch.matmul, device=device,
args=(create('C:2'), create('A:3,B:2,D:5')),
expected_names=['A', 'D'])
# 2D @ 2D
self._test_name_inference(
torch.matmul, device=device,
args=(create('A:3,B:2'), create('A:2,B:3')),
expected_names=['A', 'B'])
self._test_name_inference(
torch.matmul, device=device,
args=(create('A:3,B:2'), create('B:2,A:5')),
maybe_raises_regex='with duplicate names')
# ND @ ND where N >= 2
self._test_name_inference(
torch.matmul, device=device,
args=(create('C:5,A:3,B:2'), create('A:2,B:3')),
expected_names=['C', 'A', 'B'])
self._test_name_inference(
torch.matmul, device=device,
args=(create('C:5,A:3,B:2'), create('None:1,A:2,B:3')),
expected_names=['C', 'A', 'B'])
self._test_name_inference(
torch.matmul, device=device,
args=(create('C:5,A:3,B:2'), create('None:2,None:1,A:2,B:3')),
expected_names=[None, 'C', 'A', 'B'])
# out=
self._test_name_inference(
out_fn(torch.matmul), device=device,
args=(create('0'), create('N:7,A:3,B:2'), create('N:7,A:2,B:5')),
expected_names=('N', 'A', 'B'))
# duplicate names after mm
self._test_name_inference(
torch.bmm, device=device,
args=(create('N:7,A:3,B:2'), create('N:7,B:2,A:5')),
maybe_raises_regex='with duplicate names')
# misalignment (batch dimension is getting contracted)
self._test_name_inference(
torch.matmul, device=device,
args=(create('N:3,A:3,B:3'), create('A:3,N:3,B:3')),
maybe_raises_regex='do not match')
def test_mv(self):
for device in get_all_device_types():
self._test_name_inference(
torch.mv, device=device,
args=(create('N:3,C:2'), create('W:2')),
expected_names=('N',))
# left arg is unnamed
self._test_name_inference(
torch.mv, device=device,
args=(create('3,2'), create('W:2')),
expected_names=(None,))
# right arg is unnamed
self._test_name_inference(
torch.mv, device=device,
args=(create('N:3,C:2'), create('2')),
expected_names=('N',))
# out=
self._test_name_inference(
out_fn(torch.mv), device=device,
args=(create('0'), create('N:3,C:2'), create('W:2')),
expected_names=('N',))
def test_addmv(self):
for device in get_all_device_types():
# full names
self._test_name_inference(
torch.addmv, device=device,
args=(create('N:3'), create('N:3,C:2'), create('H:2')),
expected_names=['N'])
# no name on bias
self._test_name_inference(
torch.addmv, device=device,
args=(create('3'), create('N:3,C:2'), create('H:2')),
expected_names=('N',))
# out=
self._test_name_inference(
out_fn(torch.addmv), device=device,
args=(create('0'), create('N:3'), create('N:3,C:2'), create('H:2')),
expected_names=('N',))
# inplace
self._test_name_inference(
torch.Tensor.addmv_, device=device,
args=(create('N:3'), create('N:3,C:2'), create('H:2')),
expected_names=('N',))
def test_autograd_ignores_names(self):
# sigmoid forward is supported by named tensors, but sigmoid_backward
# is not (see native_functions.yaml). Test that autograd ignores names
# and that the sigmoid_backward succeeds.
x = torch.randn(3, 3, names=('N', 'C'), requires_grad=True)
x.sigmoid().sum().backward()
def test_tensor_grad_is_unnamed(self):
x = torch.randn(3, 3, names=(None, None), requires_grad=True)
y = torch.randn(3, 3, names=('N', 'C'), requires_grad=True)
(x * y).sum().backward()
# Check that names weren't propagated
self.assertEqual(y.grad.names, [None, None])
self.assertEqual(x.grad.names, [None, None])
def test_autograd_warns_named_grad(self):
base = torch.randn(3, 3, names=('N', 'C'))
named_grad = base.clone()
base.requires_grad_()
with warnings.catch_warnings(record=True) as warns:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
base.clone().backward(named_grad)
self.assertEqual(len(warns), 1)
self.assertTrue(
str(warns[0].message).startswith('Autograd was passed a named grad tensor'))
def test_nyi_dimname_overload_msg(self):
x = torch.randn(3, 3)
with self.assertRaisesRegex(RuntimeError, "squeeze: You passed a dimname"):
x.squeeze_("N")
def test_dot(self):
for device in get_all_device_types():
# torch.dot ignores the names of both tensors
self._test_name_inference(
torch.dot, device=device,
args=(create('C:2'), create('W:2')),
expected_names=[])
def test_comparison_ops(self):
for device in get_all_device_types():
a = torch.randn(3, 3, names=('N', 'C'), device=device)
b = torch.randn(3, 3, names=('N', 'C'), device=device)
scalar = torch.randn([], device=device)
self.assertEqual((a == b).names, ['N', 'C'])
self.assertEqual((a != b).names, ['N', 'C'])
self.assertEqual((a > b).names, ['N', 'C'])
self.assertEqual((a < b).names, ['N', 'C'])
self.assertEqual((a >= b).names, ['N', 'C'])
self.assertEqual((a <= b).names, ['N', 'C'])
self.assertEqual((a == 1).names, ['N', 'C'])
self.assertEqual((a != 1).names, ['N', 'C'])
self.assertEqual((a > 1).names, ['N', 'C'])
self.assertEqual((a < 1).names, ['N', 'C'])
self.assertEqual((a >= 1).names, ['N', 'C'])
self.assertEqual((a <= 1).names, ['N', 'C'])
self.assertEqual((a == scalar).names, ['N', 'C'])
self.assertEqual((a != scalar).names, ['N', 'C'])
self.assertEqual((a > scalar).names, ['N', 'C'])
self.assertEqual((a < scalar).names, ['N', 'C'])
self.assertEqual((a >= scalar).names, ['N', 'C'])
self.assertEqual((a <= scalar).names, ['N', 'C'])
res = torch.empty(3, 3, dtype=torch.bool, device=device)
torch.eq(a, b, out=res)
self.assertEqual(res.names, ['N', 'C'])
torch.ne(a, b, out=res)
self.assertEqual(res.names, ['N', 'C'])
torch.lt(a, b, out=res)
self.assertEqual(res.names, ['N', 'C'])
torch.gt(a, b, out=res)
self.assertEqual(res.names, ['N', 'C'])
torch.le(a, b, out=res)
self.assertEqual(res.names, ['N', 'C'])
torch.ge(a, b, out=res)
self.assertEqual(res.names, ['N', 'C'])
res = torch.isnan(a)
self.assertEqual(res.names, ['N', 'C'])
res = torch.isinf(a)
self.assertEqual(res.names, ['N', 'C'])
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_namedtensor.py |
# Owner(s): ["oncall: mobile"]
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile)
from torch.nn import functional as F
from torch._C import MobileOptimizerType
from torch.testing._internal.common_quantized import override_quantized_engine
try:
import torchvision
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
FileCheck = torch._C.FileCheck
class TestOptimizer(TestCase):
@skipIfNoXNNPACK
def test_optimize_for_mobile(self):
batch_size = 2
input_channels_per_group = 6
height = 16
width = 16
output_channels_per_group = 6
groups = 4
kernel_h = kernel_w = 3
stride_h = stride_w = 1
pad_h = pad_w = 1
dilation = 1
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
paddings = (pad_h, pad_w)
dilations = (dilation, dilation)
conv_weight_shape = (output_channels, input_channels_per_group, kernel_h, kernel_w)
conv_bias_shape = (output_channels)
input_data = torch.rand((batch_size, input_channels, height, width))
conv_weight = torch.rand((output_channels, input_channels_per_group, kernel_h, kernel_w))
conv_bias = torch.rand((output_channels))
result = F.conv2d(input_data, conv_weight, conv_bias, strides, paddings, dilations, groups)
weight_output_dim = 24
linear_input_shape = result.shape[1]
linear_weight_shape = (weight_output_dim, linear_input_shape)
class MyTestModule(torch.nn.Module):
def __init__(self):
super(MyTestModule, self).__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand((conv_bias_shape)))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
o = F.conv2d(x, self.conv_weight, self.conv_bias,
self.strides, self.paddings, self.dilations, self.groups)
o = F.relu(o)
x = o.permute([0, 2, 3, 1])
o = F.linear(x, self.linear_weight, self.linear_bias)
o = o + x
return F.relu(o)
@torch.jit.export
def foo(self, x):
o = F.conv2d(x, self.conv_weight, self.conv_bias,
self.strides, self.paddings, self.dilations, self.groups)
o = F.relu(o)
x = o.permute([0, 2, 3, 1])
o = F.linear(x, self.linear_weight, self.linear_bias)
o = o + x
return F.relu(o)
class BNTestModule(torch.nn.Module):
def __init__(self):
super(BNTestModule, self).__init__()
self.conv = torch.nn.Conv2d(1, 20, 5, 1)
self.bn = torch.nn.BatchNorm2d(num_features=20)
self.bn.eps = 0.0023
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
data_shape = (batch_size, input_channels, height, width)
input_data = torch.normal(1, 20, size=data_shape)
scripted_model = torch.jit.script(MyTestModule())
scripted_model.eval()
initial_result = scripted_model(input_data)
initial_foo_result = scripted_model.foo(input_data)
optimized_scripted_model = optimize_for_mobile(scripted_model, preserved_methods=['foo'])
optimized_result = optimized_scripted_model(input_data)
optimized_foo_result = optimized_scripted_model.foo(input_data)
FileCheck().check_not("Tensor = aten::conv2d") \
.check_not("Tensor = prim::CallFunction") \
.check_not("prepacked::conv2d_clamp_prepack") \
.check_count("prepacked::conv2d_clamp_run", 1, exactly=True) \
.check_not("prepacked::linear_clamp_prepack") \
.check_count("prepacked::linear_clamp_run", 1, exactly=True) \
.check_not("aten::add(") \
.check_not("aten::relu(") \
.check_count("aten::_add_relu(", 1, exactly=True) \
.run(optimized_scripted_model.graph)
torch.testing.assert_close(initial_result, optimized_result, rtol=1e-2, atol=1e-3)
FileCheck().check_not("Tensor = aten::conv2d") \
.check_not("Tensor = prim::CallFunction") \
.check_not("prepacked::conv2d_clamp_prepack") \
.check_count("prepacked::conv2d_clamp_run", 1, exactly=True) \
.check_not("prepacked::linear_clamp_prepack") \
.check_count("prepacked::linear_clamp_run", 1, exactly=True) \
.check_not("aten::add(") \
.check_not("aten::relu(") \
.check_count("aten::_add_relu(", 1, exactly=True) \
.run(optimized_scripted_model.foo.graph)
torch.testing.assert_close(initial_foo_result, optimized_foo_result, rtol=1e-2, atol=1e-3)
optimization_blocklist_no_prepack = {MobileOptimizerType.INSERT_FOLD_PREPACK_OPS}
optimized_scripted_model_no_prepack = optimize_for_mobile(scripted_model, optimization_blocklist_no_prepack)
optimized_result_no_prepack = optimized_scripted_model_no_prepack(input_data)
FileCheck().check_count("Tensor = aten::conv2d", 1, exactly=True) \
.check_not("prepacked::linear_clamp_run") \
.check_not("prepacked::conv2d_clamp_run") \
.run(optimized_scripted_model_no_prepack.graph)
torch.testing.assert_close(initial_result, optimized_result_no_prepack, rtol=1e-2, atol=1e-3)
bn_test_module = BNTestModule()
bn_scripted_module = torch.jit.script(bn_test_module)
bn_scripted_module.eval()
self.assertEqual(len(torch.jit.export_opnames(bn_scripted_module)), 11)
FileCheck().check_count("prim::CallMethod[name=\"forward\"]", 2, exactly=True) \
.run(str(get_forward(bn_scripted_module._c).graph))
optimization_blocklist_no_prepack = {MobileOptimizerType.INSERT_FOLD_PREPACK_OPS}
bn_fold_scripted_module = optimize_for_mobile(bn_scripted_module, optimization_blocklist_no_prepack)
self.assertEqual(len(torch.jit.export_opnames(bn_fold_scripted_module)), 1)
bn_input = torch.rand(1, 1, 6, 6)
torch.testing.assert_close(bn_scripted_module(bn_input), bn_fold_scripted_module(bn_input), rtol=1e-2, atol=1e-3)
optimization_blocklist_no_fold_bn = {MobileOptimizerType.CONV_BN_FUSION}
no_bn_fold_scripted_module = optimize_for_mobile(bn_scripted_module, optimization_blocklist_no_fold_bn)
FileCheck().check_count("aten::batch_norm", 1, exactly=True) \
.run(str(get_forward_graph(no_bn_fold_scripted_module._c)))
bn_input = torch.rand(1, 1, 6, 6)
torch.testing.assert_close(bn_scripted_module(bn_input), no_bn_fold_scripted_module(bn_input), rtol=1e-2, atol=1e-3)
class MyMobileOptimizedTagTest(torch.nn.Module):
def __init__(self):
super(MyMobileOptimizedTagTest, self).__init__()
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
def forward(self, x):
o = F.linear(x, self.linear_weight, self.linear_bias)
return F.relu(o)
mobile_optimized_tag_module = MyMobileOptimizedTagTest()
m = torch.jit.script(mobile_optimized_tag_module)
m.eval()
opt_m = optimize_for_mobile(m)
tag = getattr(opt_m, "mobile_optimized", None)
self.assertTrue(tag)
class MyPreserveMethodsTest(torch.nn.Module):
def __init__(self):
super(MyPreserveMethodsTest, self).__init__()
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
def forward(self, x):
o = F.linear(x, self.linear_weight, self.linear_bias)
return F.relu(o)
@torch.jit.export
def preserveThis(self):
pass
preserve_method_module = MyPreserveMethodsTest()
m = torch.jit.script(preserve_method_module)
m.eval()
opt_m = optimize_for_mobile(m)
no_preserveThis = getattr(opt_m, "preserveThis", None)
self.assertEqual(no_preserveThis, None)
opt_m = optimize_for_mobile(m, preserved_methods=["preserveThis"])
preserveThis = getattr(opt_m, "preserveThis", None)
self.assertNotEqual(preserveThis, None)
class OptimizeNoForwardTest(torch.nn.Module):
def __init__(self):
super(OptimizeNoForwardTest, self).__init__()
self.l = nn.Linear(10, 100)
self.l2 = nn.Linear(100, 1)
self.d = nn.Dropout(p=0.2)
@torch.jit.export
def foo(self, x):
x = self.d(F.relu(self.l(x)))
x = self.l2(x)
x = x + torch.ones(1, 100)
return F.relu(x)
input_data = torch.ones(1, 10)
m = torch.jit.script(OptimizeNoForwardTest())
m.eval()
initial_result = m.foo(input_data)
optimized_scripted_model = optimize_for_mobile(m, preserved_methods=['foo'])
optimized_result = optimized_scripted_model.foo(input_data)
FileCheck().check_not("dropout.__") \
.check_count("aten::_add_relu(", 1, exactly=True) \
.run(optimized_scripted_model.foo.graph)
torch.testing.assert_close(initial_result, optimized_result, rtol=1e-2, atol=1e-3)
class BNTestNoForwardModule(torch.nn.Module):
def __init__(self):
super(BNTestNoForwardModule, self).__init__()
self.conv = torch.nn.Conv2d(1, 20, 5, 1)
self.bn = torch.nn.BatchNorm2d(num_features=20)
self.bn.eps = 0.0023
@torch.jit.export
def foo(self, x):
x = self.conv(x)
x = self.bn(x)
return x
bn_test_no_forward_module = BNTestNoForwardModule()
bn_no_forward_scripted_module = torch.jit.script(bn_test_no_forward_module)
bn_no_forward_scripted_module.eval()
self.assertEqual(len(torch.jit.export_opnames(bn_no_forward_scripted_module)), 11)
FileCheck().check_count("prim::CallMethod[name=\"forward\"]", 2, exactly=True) \
.run(bn_no_forward_scripted_module.foo.graph)
bn_fold_no_forward_scripted_module = optimize_for_mobile(bn_no_forward_scripted_module, preserved_methods=['foo'])
self.assertEqual(len(torch.jit.export_opnames(bn_fold_no_forward_scripted_module)), 1)
bn_input = torch.rand(1, 1, 6, 6)
torch.testing.assert_close(
bn_no_forward_scripted_module.foo(bn_input),
bn_fold_no_forward_scripted_module.foo(bn_input),
rtol=1e-2,
atol=1e-3)
@skipIfNoXNNPACK
def test_quantized_conv_no_asan_failures(self):
# There were ASAN failures when fold_conv_bn was run on
# already quantized conv modules. Verifying that this does
# not happen again.
if 'qnnpack' not in torch.backends.quantized.supported_engines:
return
class Child(nn.Module):
def __init__(self):
super(Child, self).__init__()
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv2(x)
return x
class Parent(nn.Module):
def __init__(self):
super(Parent, self).__init__()
self.quant = torch.ao.quantization.QuantStub()
self.conv1 = nn.Conv2d(1, 1, 1)
self.child = Child()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv1(x)
x = self.child(x)
x = self.dequant(x)
return x
with override_quantized_engine('qnnpack'):
model = Parent()
model.qconfig = torch.ao.quantization.get_default_qconfig('qnnpack')
torch.ao.quantization.prepare(model, inplace=True)
model(torch.randn(4, 1, 4, 4))
torch.ao.quantization.convert(model, inplace=True)
model = torch.jit.script(model)
# this line should not have ASAN failures
model_optim = optimize_for_mobile(model)
def test_generate_mobile_module_lints(self):
class MyTestModule(torch.nn.Module):
def __init__(self):
super(MyTestModule, self).__init__()
self.fc = torch.nn.Linear(4, 4)
self.dropout = torch.nn.Dropout(p=0.5)
def forward(self, inputs):
out = self.fc(inputs)
out = self.dropout(out)
return out
class MyBNModule(torch.nn.Module):
def __init__(self):
super(MyBNModule, self).__init__()
self.bn = torch.nn.BatchNorm2d(4, affine=True)
def forward(self, inputs):
bn = self.bn(inputs)
return bn
class MyBundledInputModule(torch.nn.Module):
def __init__(self):
super(MyBundledInputModule, self).__init__()
def forward(self, inputs):
return inputs
def get_lint_count_by_type(lint_type, module_lint_List):
return len([lint_dict for lint_dict in module_lint_List if lint_dict['name'] == lint_type.name])
test_module = torch.jit.script(MyTestModule())
test_module_lint_list = generate_mobile_module_lints(test_module)
self.assertEqual(len(test_module_lint_list), 4)
self.assertEqual(get_lint_count_by_type(LintCode.BUNDLED_INPUT, test_module_lint_list), 1)
self.assertEqual(get_lint_count_by_type(LintCode.DROPOUT, test_module_lint_list), 1)
self.assertEqual(get_lint_count_by_type(LintCode.REQUIRES_GRAD, test_module_lint_list), 2)
bn_module = torch.jit.script(MyBNModule())
bn_module_lint_list = generate_mobile_module_lints(bn_module)
self.assertEqual(len(bn_module_lint_list), 4)
self.assertEqual(get_lint_count_by_type(LintCode.BUNDLED_INPUT, bn_module_lint_list), 1)
self.assertEqual(get_lint_count_by_type(LintCode.BATCHNORM, bn_module_lint_list), 1)
self.assertEqual(get_lint_count_by_type(LintCode.REQUIRES_GRAD, bn_module_lint_list), 2)
bi_module = torch.jit.script(MyBundledInputModule())
torch.utils.bundled_inputs.augment_model_with_bundled_inputs(
bi_module, [(torch.tensor([1]),)], [])
bi_module_lint_list = generate_mobile_module_lints(bi_module)
self.assertEqual(len(bi_module_lint_list), 0)
@skipIfNoXNNPACK
def test_preserve_bundled_inputs_methods(self):
class MyBundledInputModule(torch.nn.Module):
def __init__(self):
super(MyBundledInputModule, self).__init__()
def forward(self, inputs):
return inputs
class MyIncompleteBundledInputModule(torch.nn.Module):
def __init__(self):
super(MyIncompleteBundledInputModule, self).__init__()
def forward(self, inputs):
return inputs
@torch.jit.export
def get_all_bundled_inputs(self):
pass
bi_module = torch.jit.script(MyBundledInputModule())
module_optim_bi_not_preserved = optimize_for_mobile(bi_module)
# Expected to be False since no bundled inputs methods were added
self.assertFalse(
hasattr(module_optim_bi_not_preserved, 'get_all_bundled_inputs') or
hasattr(module_optim_bi_not_preserved, 'get_num_bundled_inputs')
)
# Add bundled inputs methods to the module
torch.utils.bundled_inputs.augment_model_with_bundled_inputs(
bi_module, [(torch.tensor([1]),)], [])
# Now they should be preserved
module_optim_bi_preserved = optimize_for_mobile(bi_module)
# All of the bundled inputs methods were preserved
self.assertTrue(
hasattr(module_optim_bi_preserved, 'get_all_bundled_inputs') and
hasattr(module_optim_bi_preserved, 'get_num_bundled_inputs')
)
bundled_input = module_optim_bi_preserved.get_all_bundled_inputs()[0]
module_optim_bi_preserved(*bundled_input)
# If not all 3 bundled inputs methods are present in the module,
# we will not try to preserve them unless specified by the user.
incomplete_bi_module = torch.jit.script(MyIncompleteBundledInputModule())
incomplete_bi_module_optim = optimize_for_mobile(incomplete_bi_module)
self.assertFalse(hasattr(incomplete_bi_module_optim, 'get_all_bundled_inputs'))
# Specifically preserve get_all_bundled_inputs even if it's the only one
# bundled inputs method available.
incomplete_bi_module_optim = optimize_for_mobile(incomplete_bi_module, preserved_methods=['get_all_bundled_inputs'])
self.assertTrue(hasattr(incomplete_bi_module_optim, 'get_all_bundled_inputs'))
@skipIfNoXNNPACK
def test_hoist_conv_packed_params(self):
if 'qnnpack' not in torch.backends.quantized.supported_engines:
return
class Standalone(nn.Module):
def __init__(self):
super(Standalone, self).__init__()
self.quant = torch.ao.quantization.QuantStub()
self.conv1 = nn.Conv2d(1, 1, 1)
self.conv2 = nn.Conv2d(1, 1, 1)
self.relu = nn.ReLU()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.relu(x)
x = self.dequant(x)
return x
def fuse_model(self):
torch.ao.quantization.fuse_modules(self, [['conv2', 'relu']], inplace=True)
pass
class Child(nn.Module):
def __init__(self):
super(Child, self).__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
return x
class Parent(nn.Module):
def __init__(self):
super(Parent, self).__init__()
self.quant = torch.ao.quantization.QuantStub()
self.conv1 = nn.Conv2d(1, 1, 1)
self.child = Child()
# TODO: test nn.Sequential after #42039 is fixed
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv1(x)
x = self.child(x)
x = self.dequant(x)
return x
def fuse_model(self):
pass
with override_quantized_engine('qnnpack'):
def _quant_script_and_optimize(model):
model.qconfig = torch.ao.quantization.get_default_qconfig('qnnpack')
model.fuse_model()
torch.ao.quantization.prepare(model, inplace=True)
model(torch.randn(4, 1, 4, 4))
torch.ao.quantization.convert(model, inplace=True)
model = torch.jit.script(model)
model_optim = optimize_for_mobile(model)
return model, model_optim
# basic case
m, m_optim = _quant_script_and_optimize(Standalone())
FileCheck().check_not("Conv2d = prim::GetAttr[name=\"conv1\"]") \
.check_count("__torch__.torch.classes.quantized.Conv2dPackedParamsBase = prim::Constant", 2, exactly=True) \
.run(m_optim.graph)
self.assertFalse(hasattr(m_optim, "conv1"))
self.assertFalse(hasattr(m_optim, "conv2"))
data = torch.randn(4, 1, 4, 4)
m_res = m(data)
m_optim_res = m_optim(data)
torch.testing.assert_close(m_res, m_optim_res, rtol=1e-2, atol=1e-3)
# generic case
m, m_optim = _quant_script_and_optimize(Parent())
FileCheck().check_not("Conv2d = prim::GetAttr[name=\"conv1\"]") \
.check_count("__torch__.torch.classes.quantized.Conv2dPackedParamsBase = prim::Constant", 2, exactly=True) \
.run(m_optim.graph)
self.assertFalse(hasattr(m_optim, "conv1"))
self.assertFalse(hasattr(m_optim, "child"))
data = torch.randn(4, 1, 4, 4)
m_res = m(data)
m_optim_res = m_optim(data)
torch.testing.assert_close(m_res, m_optim_res, rtol=1e-2, atol=1e-3)
@skipIfNoXNNPACK
@unittest.skipUnless(HAS_TORCHVISION, "Needs torchvision")
def test_mobilenet_optimize_for_mobile(self):
m = torchvision.models.mobilenet_v3_small()
m = torch.jit.script(m)
m = optimize_for_mobile(m)
# run forward 3 times until segfault, see https://github.com/pytorch/pytorch/issues/52463
x = torch.zeros(1, 3, 56, 56)
self.assertEqual(m(x).numel(), 1000)
self.assertEqual(m(x).numel(), 1000)
self.assertEqual(m(x).numel(), 1000)
def test_clone_module_with_class(self):
class MyInnerTestModule(torch.nn.Module):
def __init__(self):
super(MyInnerTestModule, self).__init__()
self.pqr = torch.Tensor([10., 20., 30.])
def forward(self, inputs):
return inputs
@torch.jit.export
def dummy_method_not_cloned(self):
return 20
class MyTestModule(torch.nn.Module):
def __init__(self):
super(MyTestModule, self).__init__()
self.abc = 23
self.pqr = torch.Tensor([1., 2., 3.])
self.inner = MyInnerTestModule()
def forward(self, inputs):
x = self.dummy_method_cloned()
# The call to self.inner.dummy_method_not_cloned should not raise an error
y = self.inner.dummy_method_not_cloned()
# The call to self.inner.pqr should not raise an error
z = self.inner.pqr
return (inputs, x, y, z)
@torch.jit.export
def dummy_method_not_cloned2(self):
# The call to self.inner.dummy_method_not_cloned should not raise an error
y = self.inner.dummy_method_not_cloned()
# The call to self.inner.pqr should not raise an error
z = self.inner.pqr
return self.pqr, self.dummy_method_not_cloned(), y, z
@torch.jit.export
def dummy_method_not_cloned(self):
return None
@torch.jit.export
def dummy_method_cloned(self):
return None
@torch.jit.export
def dummy_method_ref_attr_pqr(self):
return self.pqr, self.inner.pqr
m = torch.jit.script(MyTestModule())
# Check that the methods exist on the original model.
self.assertEqual(hasattr(m, "dummy_method_not_cloned"), True)
self.assertEqual(hasattr(m, "dummy_method_cloned"), True)
self.assertEqual(hasattr(m, "dummy_method_not_cloned2"), True)
self.assertEqual(hasattr(m, "pqr"), True)
# Case-1: Successfully clone, ignoring 2 methods, keeping all attributes.
cloned = torch._C._hack_do_not_use_clone_module_with_class(
m._c,
["dummy_method_not_cloned", "dummy_method_not_cloned2"], # ignored_methods
[], # ignored_attributes
)
# Check that the ignored methods don't exist on the cloned model.
self.assertEqual(hasattr(cloned, "dummy_method_not_cloned"), False)
self.assertEqual(hasattr(cloned, "dummy_method_cloned"), True)
self.assertEqual(hasattr(cloned, "dummy_method_not_cloned2"), False)
self.assertEqual(hasattr(cloned, "pqr"), True)
# Check that the cloned class has a classname that starts with __torch__.
self.assertTrue(
cloned.qualified_name.startswith('__torch__.'),
("Expected the cloned module's name to start with the string "
"'__torch__.', but got: {0}").format(cloned.qualified_name),
)
# Case-2: Successfully clone the module, ignoring the attribute pqr, and the method that references it.
cloned = torch._C._hack_do_not_use_clone_module_with_class(
m._c,
["dummy_method_not_cloned", "dummy_method_not_cloned2", "dummy_method_ref_attr_pqr"],
["pqr"],
)
# Check that the ignored methods don't exist on the cloned model.
self.assertEqual(hasattr(cloned, "dummy_method_not_cloned"), False)
self.assertEqual(hasattr(cloned, "dummy_method_cloned"), True)
self.assertEqual(hasattr(cloned, "dummy_method_not_cloned2"), False)
self.assertEqual(hasattr(cloned, "dummy_method_ref_attr_pqr"), False)
self.assertEqual(hasattr(cloned, "pqr"), False)
# Case-3: The statement below will throw since dummy_method_cloned2 is preserved,
# and references dummy_method_not_cloned, which is not cloned.
with self.assertRaises(RuntimeError):
cloned = torch._C._hack_do_not_use_clone_module_with_class(m._c, ["dummy_method_not_cloned"], [])
# Case-4: The statement below will throw since dummy_method_ref_attr_pqr
# is preserved, and references "pqr", which is not cloned.
with self.assertRaises(RuntimeError):
cloned = torch._C._hack_do_not_use_clone_module_with_class(
m._c,
["dummy_method_not_cloned", "dummy_method_not_cloned2"],
["pqr"],
)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_mobile_optimizer.py |
# Owner(s): ["module: nn"]
import inspect
import torch
from unittest import mock
from unittest.mock import MagicMock, patch
from torch.testing import floating_types
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_utils import TestCase, run_tests
# Returns a database of args & kwargs that can be used to construct each module.
# Each entry is in class -> (args, kwargs) format.
# Example: torch.nn.Linear -> ([10, 5], {})
# TODO: Merge this in with the initial ModuleInfo implementation.
def build_constructor_arg_db():
return {
torch.nn.AdaptiveAvgPool1d: ((5,), {}),
torch.nn.AdaptiveAvgPool2d: ((5,), {}),
torch.nn.AdaptiveAvgPool3d: ((5,), {}),
torch.nn.AdaptiveLogSoftmaxWithLoss: ((100, 20, [5, 10, 15]), {}),
torch.nn.AdaptiveMaxPool1d: ((5,), {}),
torch.nn.AdaptiveMaxPool2d: ((5,), {}),
torch.nn.AdaptiveMaxPool3d: ((5,), {}),
torch.nn.AlphaDropout: ((), {}),
torch.nn.AvgPool1d: ((3,), {}),
torch.nn.AvgPool2d: ((3,), {}),
torch.nn.AvgPool3d: ((3,), {}),
torch.nn.BCELoss: ((), {}),
torch.nn.BCEWithLogitsLoss: ((), {}),
torch.nn.BatchNorm1d: ((5,), {}),
torch.nn.BatchNorm2d: ((5,), {}),
torch.nn.BatchNorm3d: ((5,), {}),
torch.nn.Bilinear: ((2, 3, 4), {}),
torch.nn.CELU: ((), {}),
torch.nn.CTCLoss: ((), {}),
torch.nn.ChannelShuffle: ((4,), {}),
torch.nn.ConstantPad1d: ((2, 3.5), {}),
torch.nn.ConstantPad2d: ((2, 3.5), {}),
torch.nn.ConstantPad3d: ((2, 3.5), {}),
torch.nn.Conv1d: ((3, 3, 3), {}),
torch.nn.Conv2d: ((3, 3, 3), {}),
torch.nn.Conv3d: ((3, 3, 3), {}),
torch.nn.ConvTranspose1d: ((3, 3, 3), {}),
torch.nn.ConvTranspose2d: ((3, 3, 3), {}),
torch.nn.ConvTranspose3d: ((3, 3, 3), {}),
torch.nn.CosineEmbeddingLoss: ((), {}),
torch.nn.CosineSimilarity: ((), {}),
torch.nn.CrossEntropyLoss: ((), {}),
torch.nn.CrossMapLRN2d: ((5,), {}),
torch.nn.Dropout1d: ((), {}),
torch.nn.Dropout2d: ((), {}),
torch.nn.Dropout3d: ((), {}),
torch.nn.Dropout: ((), {}),
torch.nn.ELU: ((), {}),
torch.nn.Embedding: ((10, 5), {}),
torch.nn.EmbeddingBag: ((10, 5), {}),
torch.nn.FeatureAlphaDropout: ((), {}),
torch.nn.Flatten: ((), {}),
torch.nn.Fold: ((5, 2), {}),
torch.nn.FractionalMaxPool2d: ((5, 2), {}),
torch.nn.FractionalMaxPool3d: ((5, 2), {}),
torch.nn.GELU: ((), {}),
torch.nn.GLU: ((), {}),
torch.nn.GRU: ((5, 10), {}),
torch.nn.GRUCell: ((5, 10), {}),
torch.nn.GaussianNLLLoss: ((), {}),
torch.nn.GroupNorm: ((3, 6, 1e-5, True), {}),
torch.nn.Hardshrink: ((), {}),
torch.nn.Hardsigmoid: ((), {}),
torch.nn.Hardswish: ((), {}),
torch.nn.Hardtanh: ((), {}),
torch.nn.HingeEmbeddingLoss: ((), {}),
torch.nn.HuberLoss: ((), {}),
torch.nn.Identity: ((), {}),
torch.nn.InstanceNorm1d: ((5, 1e-5, 0.1, True), {}),
torch.nn.InstanceNorm2d: ((5, 1e-5, 0.1, True), {}),
torch.nn.InstanceNorm3d: ((5, 1e-5, 0.1, True), {}),
torch.nn.KLDivLoss: ((), {}),
torch.nn.L1Loss: ((), {}),
torch.nn.LPPool1d: ((2, 3), {}),
torch.nn.LPPool2d: ((2, 3), {}),
torch.nn.LSTM: ((5, 10), {}),
torch.nn.LSTMCell: ((5, 10), {}),
torch.nn.LayerNorm: ((2,), {}),
torch.nn.LazyBatchNorm1d: ((), {}),
torch.nn.LazyBatchNorm2d: ((), {}),
torch.nn.LazyBatchNorm3d: ((), {}),
torch.nn.LazyConv1d: ((5, 2), {}),
torch.nn.LazyConv2d: ((5, 2), {}),
torch.nn.LazyConv3d: ((5, 2), {}),
torch.nn.LazyConvTranspose1d: ((5, 2), {}),
torch.nn.LazyConvTranspose2d: ((5, 2), {}),
torch.nn.LazyConvTranspose3d: ((5, 2), {}),
torch.nn.LazyInstanceNorm1d: ((), {}),
torch.nn.LazyInstanceNorm2d: ((), {}),
torch.nn.LazyInstanceNorm3d: ((), {}),
torch.nn.LazyLinear: ((5,), {}),
torch.nn.LeakyReLU: ((), {}),
torch.nn.Linear: ((10, 5), {}),
torch.nn.LocalResponseNorm: ((2,), {}),
torch.nn.LogSigmoid: ((), {}),
torch.nn.LogSoftmax: ((), {}),
torch.nn.MSELoss: ((), {}),
torch.nn.MarginRankingLoss: ((), {}),
torch.nn.MaxPool1d: ((3,), {}),
torch.nn.MaxPool2d: ((3,), {}),
torch.nn.MaxPool3d: ((3,), {}),
torch.nn.MaxUnpool1d: ((5,), {}),
torch.nn.MaxUnpool2d: ((5,), {}),
torch.nn.MaxUnpool3d: ((5,), {}),
torch.nn.Mish: ((), {}),
torch.nn.ModuleDict: ((), {}),
torch.nn.ModuleList: ((), {}),
torch.nn.MultiLabelMarginLoss: ((), {}),
torch.nn.MultiLabelSoftMarginLoss: ((), {}),
torch.nn.MultiMarginLoss: ((), {}),
torch.nn.MultiheadAttention: ((100, 2), {}),
torch.nn.NLLLoss2d: ((), {}),
torch.nn.NLLLoss: ((), {}),
torch.nn.PReLU: ((), {}),
torch.nn.PairwiseDistance: ((), {}),
torch.nn.ParameterDict: ((), {}),
torch.nn.ParameterList: ((), {}),
torch.nn.PixelShuffle: ((2,), {}),
torch.nn.PixelUnshuffle: ((2,), {}),
torch.nn.PoissonNLLLoss: ((), {}),
torch.nn.RNN: ((5, 10), {}),
torch.nn.RNNBase: (('LSTM', 5, 10), {}),
torch.nn.RNNCell: ((5, 10), {}),
torch.nn.RNNCellBase: ((5, 10, True, 2), {}),
torch.nn.RReLU: ((), {}),
torch.nn.ReLU6: ((), {}),
torch.nn.ReLU: ((), {}),
torch.nn.ReflectionPad1d: ((2,), {}),
torch.nn.ReflectionPad2d: ((2,), {}),
torch.nn.ReflectionPad3d: ((2,), {}),
torch.nn.ReplicationPad1d: ((2,), {}),
torch.nn.ReplicationPad2d: ((2,), {}),
torch.nn.ReplicationPad3d: ((2,), {}),
torch.nn.SELU: ((), {}),
torch.nn.Sequential: ((), {}),
torch.nn.SiLU: ((), {}),
torch.nn.Sigmoid: ((), {}),
torch.nn.SmoothL1Loss: ((), {}),
torch.nn.SoftMarginLoss: ((), {}),
torch.nn.Softmax2d: ((), {}),
torch.nn.Softmax: ((), {}),
torch.nn.Softmin: ((), {}),
torch.nn.Softplus: ((), {}),
torch.nn.Softshrink: ((), {}),
torch.nn.Softsign: ((), {}),
torch.nn.SyncBatchNorm: ((5,), {}),
torch.nn.Tanh: ((), {}),
torch.nn.Tanhshrink: ((), {}),
torch.nn.Threshold: ((0.1, 20), {}),
torch.nn.Transformer: ((), {}),
torch.nn.TransformerDecoder: ((torch.nn.TransformerDecoderLayer, 3), {}),
torch.nn.TransformerDecoderLayer: ((10, 2), {}),
torch.nn.TransformerEncoder: ((torch.nn.TransformerEncoderLayer, 3), {}),
torch.nn.TransformerEncoderLayer: ((10, 2), {}),
torch.nn.TripletMarginLoss: ((), {}),
torch.nn.TripletMarginWithDistanceLoss: ((), {}),
torch.nn.Unflatten: ((1, (2, 5, 5)), {}),
torch.nn.Unfold: ((3,), {}),
torch.nn.Upsample: ((), {}),
torch.nn.UpsamplingBilinear2d: ((), {}),
torch.nn.UpsamplingNearest2d: ((), {}),
torch.nn.ZeroPad2d: ((0,), {}),
torch.nn.qat.Conv1d: ((3, 3, 3), {
'qconfig': torch.ao.quantization.default_qconfig,
}),
torch.nn.qat.Conv2d: ((3, 3, 3), {
'qconfig': torch.ao.quantization.default_qconfig,
}),
torch.nn.qat.Conv3d: ((3, 3, 3), {
'qconfig': torch.ao.quantization.default_qconfig,
}),
torch.nn.qat.Linear: ((5, 2), {
'qconfig': torch.ao.quantization.default_qconfig,
}),
torch.nn.qat.Embedding: ((10, 12), {
'qconfig': torch.ao.quantization.float_qparams_weight_only_qconfig,
}),
torch.nn.qat.EmbeddingBag: ((10, 12), {
'qconfig': torch.ao.quantization.float_qparams_weight_only_qconfig,
}),
torch.nn.quantizable.LSTM: ((5, 6), {}),
torch.nn.quantizable.LSTMCell: ((5, 6), {}),
torch.nn.quantizable.MultiheadAttention: ((10, 2), {}),
torch.nn.quantized.BatchNorm2d: ((2,), {}),
torch.nn.quantized.BatchNorm3d: ((2,), {}),
torch.nn.quantized.Dropout: ((), {}),
torch.nn.quantized.Conv1d: ((3, 3, 3), {}),
torch.nn.quantized.Conv2d: ((3, 3, 3), {}),
torch.nn.quantized.Conv3d: ((3, 3, 3), {}),
torch.nn.quantized.ConvTranspose1d: ((3, 3, 3), {}),
torch.nn.quantized.ConvTranspose2d: ((3, 3, 3), {}),
torch.nn.quantized.ConvTranspose3d: ((16, 33, (3, 3, 5)), {
'stride': (2, 1, 1),
'padding': (4, 2, 2),
'output_padding': (2, 2, 2),
'dilation': (1, 1, 1),
}),
torch.nn.quantized.DeQuantize: ((), {}),
torch.nn.quantized.ELU: ((0.01, 0), {}),
torch.nn.quantized.Embedding: ((10, 3), {
'factory_kwargs': {},
}),
torch.nn.quantized.EmbeddingBag: ((10, 3), {
'factory_kwargs': {},
}),
torch.nn.quantized.GroupNorm: ((2, 4, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.nn.quantized.Hardswish: ((0.1, 0,), {}),
torch.nn.quantized.InstanceNorm1d: ((2, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.nn.quantized.InstanceNorm2d: ((2, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.nn.quantized.InstanceNorm3d: ((2, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.nn.quantized.LayerNorm: ((2, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.nn.quantized.LeakyReLU: ((0.01, 0), {}),
torch.nn.quantized.Linear: ((5, 2), {
'factory_kwargs': {},
}),
torch.nn.quantized.MaxPool2d: ((3,), {}),
torch.nn.quantized.PReLU: ((0.01, 0), {}),
torch.nn.quantized.Quantize: ((0.1, 0), {
'dtype': torch.int16,
'factory_kwargs': {},
}),
torch.nn.quantized.ReLU6: ((), {}),
torch.nn.quantized.Sigmoid: ((0.1, 0), {}),
torch.nn.quantized.Softmax: ((), {}),
torch.nn.quantized.FloatFunctional: ((), {}),
torch.nn.quantized.FXFloatFunctional: ((), {}),
torch.nn.quantized.QFunctional: ((), {}),
}
# Instantiates the given class with the given args, kwargs, optionally on a given device.
def instantiate_class(cls, args, kwargs, extra_kwargs):
return cls(*args, **kwargs) if extra_kwargs is None else cls(*args, **kwargs, **extra_kwargs)
# Returns a function that calls the real implementation of a method
# in addition to passing args to a mock object.
def mock_wrapper(method):
mock = MagicMock()
def wrapper(self, *args, **kwargs):
mock(*args, **kwargs)
return method(self, *args, **kwargs)
wrapper.mock = mock
return wrapper
# Returns a set of args / kwargs that can be used to construct the module.
def get_example_args(module_cls, constructor_arg_db, extra_kwargs=None):
assert module_cls in constructor_arg_db, \
f"No entry for {module_cls} in the constructor arg DB. Please add it to pass these tests."
args, kwargs = constructor_arg_db[module_cls]
extra_kwargs = {} if extra_kwargs is None else extra_kwargs
# Recursively instantiate args / kwargs that are class objects.
args = [instantiate_class(arg, *get_example_args(arg, constructor_arg_db), extra_kwargs=extra_kwargs)
if inspect.isclass(arg) else torch.nn.Parameter(arg.to(**extra_kwargs))
if isinstance(arg, torch.nn.Parameter) else arg for arg in args]
kwargs = {k: instantiate_class(v, *get_example_args(v, constructor_arg_db), extra_kwargs=extra_kwargs)
if inspect.isclass(v) else torch.nn.Parameter(v.to(*extra_kwargs))
if isinstance(v, torch.nn.Parameter) else v for k, v in kwargs.items()}
kwargs.update(extra_kwargs)
return args, kwargs
def generate_test_func(test_cls, module_cls, constructor_arg_db,
verify_kwargs=True, module_is_lazy=False, check_nonexistent_arg=True):
# Generate a function for testing the given module.
@dtypes(*floating_types())
def run_test(test_cls, device, dtype, module_cls=module_cls):
# Check if this module creates parameters or registers buffers.
# The mock magic here passes through to the real Parameter / register_buffer
# logic and is only used to check for calls.
args, kwargs = get_example_args(module_cls, constructor_arg_db)
# Some modules need to pass factory_kwargs so as not to conflict with existing args such as dtype.
module_needs_factory_kwargs = 'factory_kwargs' in kwargs
if module_needs_factory_kwargs:
del kwargs['factory_kwargs']
extra_kwargs = {
'factory_kwargs': {
'device': device,
'dtype': dtype,
}
}
else:
extra_kwargs = {
'device': device,
'dtype': dtype,
}
parameter_new = mock_wrapper(torch.nn.Parameter.__new__)
with patch.object(torch.nn.Parameter, '__new__', parameter_new):
register_buffer = mock_wrapper(torch.nn.Module.register_buffer)
with patch.object(torch.nn.Module, 'register_buffer', register_buffer):
m = module_cls(*args, **kwargs)
module_creates_params_or_buffers = parameter_new.mock.called or register_buffer.mock.called
# == Verify factory kwargs are supported. ==
if verify_kwargs and module_creates_params_or_buffers:
args, kwargs = get_example_args(module_cls, constructor_arg_db,
extra_kwargs=extra_kwargs)
if module_is_lazy:
# Ensure device and dtype are passed to all UninitializedParameters and UninitializedBuffers.
uninit_param_new = mock_wrapper(torch.nn.UninitializedParameter.__new__)
with patch.object(torch.nn.UninitializedParameter, '__new__', uninit_param_new):
uninit_buffer_new = mock_wrapper(torch.nn.UninitializedBuffer.__new__)
with patch.object(torch.nn.UninitializedBuffer, '__new__', uninit_buffer_new):
m = module_cls(*args, **kwargs)
uninit_param_new.mock.assert_has_calls(
[mock.call(device=device, dtype=dtype) for _ in uninit_param_new.mock.mock_calls])
uninit_buffer_new.mock.assert_has_calls(
[mock.call(device=device, dtype=dtype) for _ in uninit_buffer_new.mock.mock_calls])
else:
# Check device placement and dtype for parameters and buffers.
# Only verify floating point dtypes since that's what the kwarg applies to.
# Note that dtype verification is also skipped if the module requires factory_kwargs.
m = module_cls(*args, **kwargs)
for name, param in m.named_parameters():
test_cls.assertEqual(
str(param.device), device,
f'Parameter {name} is on {param.device.type} instead of the expected device {device}')
if param.dtype.is_floating_point and not module_needs_factory_kwargs:
test_cls.assertEqual(
param.dtype, dtype,
f'Parameter {name} is of dtype {param.dtype} instead of the expected dtype {dtype}')
for name, buffer in m.named_buffers():
test_cls.assertEqual(
str(buffer.device), device,
f'Buffer {name} is on {buffer.device.type} instead of the expected device {device}')
if buffer.dtype.is_floating_point and not module_needs_factory_kwargs:
test_cls.assertEqual(
buffer.dtype, dtype,
f'Buffer {name} is of dtype {buffer.dtype} instead of the expected dtype {dtype}')
# == Verify passing a nonexistent arg errors out. ==
if check_nonexistent_arg:
with test_cls.assertRaises(TypeError):
m = module_cls(*args, **kwargs, nonexistent_arg='foo')
return run_test
def generate_tests(test_cls, constructor_arg_db):
# test all modules underneath these namespaces...
NAMESPACES = [
torch.nn,
torch.nn.qat,
torch.nn.quantizable,
torch.nn.quantized,
]
# ...except these
MODULES_TO_SKIP = {
torch.nn.Module,
torch.nn.Container, # deprecated
torch.nn.NLLLoss2d, # deprecated
# TODO: Remove these 2 from this list once the ASan issue is fixed.
# See https://github.com/pytorch/pytorch/issues/55396
torch.nn.quantized.Embedding,
torch.nn.quantized.EmbeddingBag,
torch.nn.quantized.LSTM,
torch.nn.quantized.MultiheadAttention,
}
# no need to support kwargs for these modules even though
# they have parameters / buffers because they are passed in
# already instantiated s
MODULES_WITHOUT_KWARGS_SUPPORT = {
torch.nn.BCELoss,
torch.nn.BCEWithLogitsLoss,
torch.nn.CrossEntropyLoss,
torch.nn.FractionalMaxPool2d,
torch.nn.FractionalMaxPool3d,
torch.nn.MultiLabelSoftMarginLoss,
torch.nn.MultiMarginLoss,
torch.nn.NLLLoss,
torch.nn.TransformerDecoder,
torch.nn.TransformerEncoder,
}
# modules that supported kwargs before
MODULES_WITH_PREVIOUS_KWARGS = {
torch.nn.Identity,
}
# lazy modules don't instantiate parameters right away
LAZY_MODULES = {
torch.nn.LazyBatchNorm1d,
torch.nn.LazyBatchNorm2d,
torch.nn.LazyBatchNorm3d,
torch.nn.LazyConv1d,
torch.nn.LazyConv2d,
torch.nn.LazyConv3d,
torch.nn.LazyConvTranspose1d,
torch.nn.LazyConvTranspose2d,
torch.nn.LazyConvTranspose3d,
torch.nn.LazyConvTranspose3d,
torch.nn.LazyInstanceNorm1d,
torch.nn.LazyInstanceNorm2d,
torch.nn.LazyInstanceNorm3d,
torch.nn.LazyLinear,
}
# these modules requires FBGEMM backend to instantiate
MODULES_THAT_REQUIRE_FBGEMM = {
torch.nn.quantized.Conv1d,
torch.nn.quantized.Conv2d,
torch.nn.quantized.Conv3d,
torch.nn.quantized.ConvTranspose1d,
torch.nn.quantized.ConvTranspose2d,
torch.nn.quantized.ConvTranspose3d,
torch.nn.quantized.Linear,
}
for namespace in NAMESPACES:
# the "nn" in "torch.nn"
namespace_basename = namespace.__name__.split('.')[-1]
for module_name in namespace.modules.__all__:
# class object for this module (e.g. torch.nn.Linear)
module_cls = getattr(namespace.modules, module_name)
if module_cls in MODULES_TO_SKIP:
continue
verify_kwargs = module_cls not in MODULES_WITHOUT_KWARGS_SUPPORT
module_is_lazy = module_cls in LAZY_MODULES
check_nonexistent_arg = module_cls not in MODULES_WITH_PREVIOUS_KWARGS
# Generate a function for testing this module and setattr it onto the test class.
run_test = generate_test_func(test_cls, module_cls, constructor_arg_db,
verify_kwargs=verify_kwargs,
module_is_lazy=module_is_lazy,
check_nonexistent_arg=check_nonexistent_arg)
test_name = f'test_{namespace_basename}_{module_name}'
if module_cls in MODULES_THAT_REQUIRE_FBGEMM:
run_test = skipIfNoFBGEMM(run_test)
setattr(TestModuleInit, test_name, run_test)
class TestModuleInit(TestCase):
_ignore_not_implemented_error = False
generate_tests(TestModuleInit, build_constructor_arg_db())
instantiate_device_type_tests(TestModuleInit, globals())
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_module_init.py |
# Owner(s): ["oncall: mobile"]
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
class TestMetalRewritePass(TestCase):
@staticmethod
def validate_transformed_module(
# To please flake
self,
pattern_count_map,
data_shape,
prepack_removal=False,
fuse_clamping_ops=False):
module_instance = self
scripted_model = torch.jit.script(module_instance)
scripted_model.eval()
input_data = torch.normal(1, 20, size=data_shape)
ref_result = scripted_model(input_data)
torch._C._jit_pass_metal_insert_prepacked_ops(scripted_model._c)
if fuse_clamping_ops or prepack_removal:
scripted_model._c = torch._C._freeze_module(scripted_model._c)
if fuse_clamping_ops:
torch._C._jit_pass_metal_fuse_clamp_w_prepacked_conv(scripted_model._c)
if prepack_removal:
torch._C._jit_pass_metal_fold_prepacking_ops(scripted_model._c)
buffer = io.BytesIO()
torch.jit.save(scripted_model, buffer)
buffer.seek(0)
deserialized_scripted_model = torch.jit.load(buffer)
for pattern, v in pattern_count_map.items():
if (v == 0):
FileCheck().check(pattern).run(deserialized_scripted_model.graph)
elif (v == -1):
FileCheck().check_not(pattern).run(deserialized_scripted_model.graph)
else:
FileCheck().check_count(pattern, v, exactly=True).run(deserialized_scripted_model.graph)
def test_conv(self):
# Conv params
batch_size = 2
input_channels_per_group = 6
height = 16
width = 16
output_channels_per_group = 6
groups = 4
kernel_h = kernel_w = 3
stride_h = stride_w = 1
pad_h = pad_w = 1
dilation = 1
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
paddings = (pad_h, pad_w)
dilations = (dilation, dilation)
conv_weight_shape = (output_channels, input_channels_per_group, kernel_h, kernel_w)
conv_bias_shape = (output_channels)
class Conv2D(torch.nn.Module):
def __init__(self):
super(Conv2D, self).__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
return F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"metal_prepack::conv2d_prepack": 1,
"metal_prepack::conv2d_run": 1}
TestMetalRewritePass.validate_transformed_module(Conv2D(), pattern_count_map, data_shape)
class Conv2DRelu(torch.nn.Module):
def __init__(self):
super(Conv2DRelu, self).__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
o = F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
o = F.relu(o)
return o
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"metal_prepack::conv2d_prepack": 1,
"metal_prepack::conv2d_run": 1}
TestMetalRewritePass.validate_transformed_module(
Conv2DRelu(), pattern_count_map, data_shape)
pattern_count_map["aten::relu"] = 1
pattern_count_map["metal_prepack::conv2d_prepack"] = -1
TestMetalRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True)
pattern_count_map["aten::relu"] = -1
TestMetalRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
class Conv2DHardtanh(torch.nn.Module):
def __init__(self):
super(Conv2DHardtanh, self).__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
o = F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
o = F.hardtanh(o)
return o
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"metal_prepack::conv2d_prepack": 1,
"metal_prepack::conv2d_run": 1}
TestMetalRewritePass.validate_transformed_module(Conv2DHardtanh(), pattern_count_map, data_shape)
pattern_count_map["aten::hardtanh"] = 1
pattern_count_map["metal_prepack::conv2d_prepack"] = -1
TestMetalRewritePass.validate_transformed_module(
Conv2DHardtanh(),
pattern_count_map,
data_shape,
prepack_removal=True)
pattern_count_map["aten::hardtanh"] = -1
TestMetalRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
if __name__ == "__main__":
run_tests()
| pytorch-master | test/test_metal.py |
# Owner(s): ["module: unknown"]
import os
import re
import yaml
import textwrap
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from collections import namedtuple
path = os.path.dirname(os.path.realpath(__file__))
aten_native_yaml = os.path.join(path, '../aten/src/ATen/native/native_functions.yaml')
all_operators_with_namedtuple_return = {
'max', 'min', 'aminmax', 'median', 'nanmedian', 'mode', 'kthvalue', 'svd', 'symeig', 'eig',
'qr', 'geqrf', 'slogdet', 'sort', 'topk', 'lstsq', 'linalg_inv_ex',
'triangular_solve', 'cummax', 'cummin', 'linalg_eigh', "_linalg_eigh", "_unpack_dual", 'linalg_qr',
'linalg_svd', '_linalg_svd', 'linalg_slogdet', '_linalg_slogdet', 'fake_quantize_per_tensor_affine_cachemask',
'fake_quantize_per_channel_affine_cachemask', 'linalg_lstsq', 'linalg_eig', 'linalg_cholesky_ex',
'frexp', 'lu_unpack', 'histogram', 'histogramdd',
'_fake_quantize_per_tensor_affine_cachemask_tensor_qparams',
'_fused_moving_avg_obs_fq_helper', 'linalg_lu_factor', 'linalg_lu_factor_ex', 'linalg_lu',
'_linalg_det', '_lu_with_info', 'linalg_ldl_factor_ex', 'linalg_ldl_factor', 'linalg_solve_ex', '_linalg_solve_ex'
}
class TestNamedTupleAPI(TestCase):
def test_import_return_types(self):
import torch.return_types # noqa: F401
exec('from torch.return_types import *')
def test_native_functions_yaml(self):
operators_found = set()
regex = re.compile(r"^(\w*)(\(|\.)")
with open(aten_native_yaml, 'r') as file:
for f in yaml.safe_load(file.read()):
f = f['func']
ret = f.split('->')[1].strip()
name = regex.findall(f)[0][0]
if name in all_operators_with_namedtuple_return:
operators_found.add(name)
continue
if '_backward' in name or name.endswith('_forward'):
continue
if not ret.startswith('('):
continue
if ret == '()':
continue
ret = ret[1:-1].split(',')
for r in ret:
r = r.strip()
self.assertEqual(len(r.split()), 1, 'only allowlisted '
'operators are allowed to have named '
'return type, got ' + name)
self.assertEqual(all_operators_with_namedtuple_return, operators_found, textwrap.dedent("""
Some elements in the `all_operators_with_namedtuple_return` of test_namedtuple_return_api.py
could not be found. Do you forget to update test_namedtuple_return_api.py after renaming some
operator?
"""))
def test_namedtuple_return(self):
a = torch.randn(5, 5)
per_channel_scale = torch.randn(5)
per_channel_zp = torch.zeros(5, dtype=torch.int64)
op = namedtuple('op', ['operators', 'input', 'names', 'hasout'])
operators = [
op(operators=['max', 'min', 'median', 'nanmedian', 'mode', 'sort', 'topk', 'cummax', 'cummin'], input=(0,),
names=('values', 'indices'), hasout=True),
op(operators=['kthvalue'], input=(1, 0),
names=('values', 'indices'), hasout=True),
op(operators=['svd'], input=(), names=('U', 'S', 'V'), hasout=True),
op(operators=['linalg_svd', '_linalg_svd'], input=(), names=('U', 'S', 'Vh'), hasout=True),
op(operators=['slogdet', 'linalg_slogdet'], input=(), names=('sign', 'logabsdet'), hasout=True),
op(operators=['_linalg_slogdet'], input=(), names=('sign', 'logabsdet', 'LU', 'pivots'), hasout=True),
op(operators=['qr', 'linalg_qr'], input=(), names=('Q', 'R'), hasout=True),
op(operators=['geqrf'], input=(), names=('a', 'tau'), hasout=True),
op(operators=['symeig', 'eig'], input=(True,), names=('eigenvalues', 'eigenvectors'), hasout=True),
op(operators=['triangular_solve'], input=(a,), names=('solution', 'cloned_coefficient'), hasout=True),
op(operators=['lstsq'], input=(a,), names=('solution', 'QR'), hasout=True),
op(operators=['linalg_eig'], input=(), names=('eigenvalues', 'eigenvectors'), hasout=True),
op(operators=['linalg_eigh'], input=("L",), names=('eigenvalues', 'eigenvectors'), hasout=True),
op(operators=['_linalg_eigh'], input=("L",), names=('eigenvalues', 'eigenvectors'), hasout=True),
op(operators=['linalg_cholesky_ex'], input=(), names=('L', 'info'), hasout=True),
op(operators=['linalg_inv_ex'], input=(), names=('inverse', 'info'), hasout=True),
op(operators=['linalg_solve_ex'], input=(a,), names=('result', 'info'), hasout=True),
op(operators=['_linalg_solve_ex'], input=(a,), names=('result', 'LU', 'pivots', 'info'), hasout=True),
op(operators=['linalg_lu_factor'], input=(), names=('LU', 'pivots'), hasout=True),
op(operators=['linalg_lu_factor_ex'], input=(), names=('LU', 'pivots', 'info'), hasout=True),
op(operators=['linalg_ldl_factor'], input=(), names=('LD', 'pivots'), hasout=True),
op(operators=['linalg_ldl_factor_ex'], input=(), names=('LD', 'pivots', 'info'), hasout=True),
op(operators=['linalg_lu'], input=(), names=('P', 'L', 'U'), hasout=True),
op(operators=['fake_quantize_per_tensor_affine_cachemask'],
input=(0.1, 0, 0, 255), names=('output', 'mask',), hasout=False),
op(operators=['fake_quantize_per_channel_affine_cachemask'],
input=(per_channel_scale, per_channel_zp, 1, 0, 255),
names=('output', 'mask',), hasout=False),
op(operators=['_unpack_dual'], input=(0,), names=('primal', 'tangent'), hasout=False),
op(operators=['linalg_lstsq'], input=(a,), names=('solution', 'residuals', 'rank', 'singular_values'), hasout=False),
op(operators=['frexp'], input=(), names=('mantissa', 'exponent'), hasout=True),
op(operators=['lu_unpack'],
input=(torch.tensor([3, 2, 1, 4, 5], dtype=torch.int32), True, True),
names=('P', 'L', 'U'), hasout=True),
op(operators=['histogram'], input=(1,), names=('hist', 'bin_edges'), hasout=True),
op(operators=['histogramdd'], input=(1,), names=('hist', 'bin_edges'), hasout=False),
op(operators=['_fake_quantize_per_tensor_affine_cachemask_tensor_qparams'],
input=(torch.tensor([1.0]), torch.tensor([0], dtype=torch.int), torch.tensor([1]), 0, 255),
names=('output', 'mask',), hasout=False),
op(operators=['_fused_moving_avg_obs_fq_helper'],
input=(torch.tensor([1]), torch.tensor([1]), torch.tensor([0.1]), torch.tensor([0.1]),
torch.tensor([0.1]), torch.tensor([1]), 0.01, 0, 255, 0), names=('output', 'mask',), hasout=False),
op(operators=['_linalg_det'],
input=(), names=('result', 'LU', 'pivots'), hasout=True),
op(operators=['aminmax'], input=(), names=('min', 'max'), hasout=True),
op(operators=['_lu_with_info'],
input=(), names=('LU', 'pivots', 'info'), hasout=False),
]
def get_func(f):
"Return either torch.f or torch.linalg.f, where 'f' is a string"
mod = torch
if f.startswith('linalg_'):
mod = torch.linalg
f = f[7:]
if f.startswith('_'):
mod = torch._VF
return getattr(mod, f, None)
def check_namedtuple(tup, names):
"Check that the namedtuple 'tup' has the given names"
for i, name in enumerate(names):
self.assertIs(getattr(tup, name), tup[i])
def check_torch_return_type(f, names):
"""
Check that the return_type exists in torch.return_types
and they can constructed.
"""
return_type = getattr(torch.return_types, f)
inputs = [torch.randn(()) for _ in names]
self.assertEqual(type(return_type(inputs)), return_type)
for op in operators:
for f in op.operators:
# 1. check the namedtuple returned by calling torch.f
func = get_func(f)
if func:
ret1 = func(a, *op.input)
check_namedtuple(ret1, op.names)
check_torch_return_type(f, op.names)
#
# 2. check the out= variant, if it exists
if func and op.hasout:
ret2 = func(a, *op.input, out=tuple(ret1))
check_namedtuple(ret2, op.names)
check_torch_return_type(f + "_out", op.names)
#
# 3. check the Tensor.f method, if it exists
meth = getattr(a, f, None)
if meth:
ret3 = meth(*op.input)
check_namedtuple(ret3, op.names)
all_covered_operators = set([x for y in operators for x in y.operators])
self.assertEqual(all_operators_with_namedtuple_return, all_covered_operators, textwrap.dedent('''
The set of covered operators does not match the `all_operators_with_namedtuple_return` of
test_namedtuple_return_api.py. Do you forget to add test for that operator?
'''))
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_namedtuple_return_api.py |
# Owner(s): ["module: nn"]
from dataclasses import dataclass
from functools import partial
from itertools import product, chain
import unittest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from torch.nn.utils._per_sample_grad import call_for_per_sample_grads
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_device_type import OpDTypes, instantiate_device_type_tests, ops
from torch.testing._internal.common_nn import TestBase, module_tests, new_module_tests
from torch.testing._internal.common_utils import TestCase, freeze_rng_state, make_tensor, run_tests, parametrize
from torch.testing._internal.common_methods_invocations import SampleInput, op_db
from torch.nn.utils._expanded_weights import ExpandedWeight
from torch.nn.utils._expanded_weights.expanded_weights_utils import forward_helper, set_grad_sample_if_exists, \
unpack_expanded_weight_or_tensor, sum_over_all_but_batch_and_last_n, standard_kwargs
class TestContext:
pass
class TestExpandedWeightHelperFunction(TestCase):
def test_forward_helper(self, device):
input = torch.randn(3, 4, device=device)
weight = torch.randn(5, 4, device=device)
bias = torch.randn(5, device=device)
for (weight_batched, bias_batched) in product([True, False], [True, False]):
maybe_batched_weight = weight
maybe_batched_bias = bias
if weight_batched:
maybe_batched_weight = ExpandedWeight(weight.clone().requires_grad_(), 3, loss_reduction="sum")
if bias_batched:
maybe_batched_bias = ExpandedWeight(bias.clone().requires_grad_(), 3, loss_reduction="sum")
args = (input, maybe_batched_weight, maybe_batched_bias)
expanded_args, expanded_kwargs = standard_kwargs(('bias',), args)
res = forward_helper(nn.functional.linear, expanded_args, expanded_kwargs)
expected = nn.functional.linear(input, weight, bias)
self.assertEqual(res, expected)
self.assertEqual(len(expanded_args), 2)
assert expanded_args[0] is args[0] # avoids property checks in assertEquals
assert expanded_args[1] is args[1] # avoids property checks in assertEquals
self.assertEqual(len(expanded_kwargs), 1)
assert expanded_kwargs['bias'] is args[2] # avoids property checks in assertEquals
def test_forward_helper_failure_args(self, device):
weight = torch.randn(5, 4, device=device)
bias = torch.randn(5, device=device)
with self.assertRaisesRegex(RuntimeError, r"do not support inputs that are also ExpandedWeights."):
input = ExpandedWeight(torch.randn(3, 4, requires_grad=True), 3, loss_reduction="sum")
expanded_args, expanded_kwargs = standard_kwargs(('bias',), (input, weight, bias))
forward_helper(nn.functional.linear, expanded_args, expanded_kwargs)
with self.assertRaisesRegex(RuntimeError, r"requires a Tensor as the first input"):
expanded_args, expanded_kwargs = standard_kwargs(('bias',), (3, weight, bias))
forward_helper(nn.functional.linear, expanded_args, expanded_kwargs)
with self.assertRaisesRegex(RuntimeError, r"requires a batch dimension but got an input of size 0"):
expanded_args, expanded_kwargs = standard_kwargs(('bias',), (torch.tensor(3), weight, bias))
forward_helper(nn.functional.linear, expanded_args, expanded_kwargs)
with self.assertRaisesRegex(RuntimeError, r"0 is not a valid batch size for Expanded Weights"):
expanded_args, expanded_kwargs = standard_kwargs(('bias',), (torch.randn(0, 1, 2), weight, bias))
forward_helper(nn.functional.linear, expanded_args, expanded_kwargs)
input = torch.randn(3, 4)
for (weight_batched, bias_batched) in product([True, False], [True, False]):
if not weight_batched and not bias_batched:
continue
maybe_batched_weight = weight
maybe_batched_bias = bias
if weight_batched:
maybe_batched_weight = ExpandedWeight(weight.clone().requires_grad_(), 4, loss_reduction="sum")
if bias_batched:
maybe_batched_bias = ExpandedWeight(bias.clone().requires_grad_(), 4, loss_reduction="sum")
with self.assertRaisesRegex(RuntimeError, r"Expected ExpandedWeights to have batch size matching input"):
expanded_args, expanded_kwargs = standard_kwargs(('bias',), (input, maybe_batched_weight, maybe_batched_bias))
forward_helper(nn.functional.linear, expanded_args, expanded_kwargs)
def test_set_grad_sample_if_exists(self, device):
def test_fn(a):
return True
orig_weight = torch.randn(4, device=device, requires_grad=True)
expanded_weight = ExpandedWeight(orig_weight, 3, loss_reduction="sum")
set_grad_sample_if_exists(expanded_weight, test_fn)
self.assertTrue(hasattr(orig_weight, 'grad_sample'))
self.assertTrue(orig_weight.grad_sample)
basic_tensor = torch.randn(4, device=device)
set_grad_sample_if_exists(basic_tensor, test_fn)
self.assertFalse(hasattr(basic_tensor, 'grad_sample'))
non_tensor = 3
set_grad_sample_if_exists(non_tensor, test_fn)
self.assertFalse(hasattr(non_tensor, 'grad_sample'))
def test_set_grad_sample_if_exists_failure(self, device):
def test_fn(a):
return True
grad_tensor = torch.randn(4, requires_grad=True, device=device)
with self.assertRaisesRegex(RuntimeError, r"does not support a mixture of ExpandedWeight parameters and normal Parameters"):
set_grad_sample_if_exists(grad_tensor, test_fn)
def test_unpack_expanded_weight_or_tensor(self, device):
input = torch.randn(3, requires_grad=True, device=device)
self.assertEqual(input, unpack_expanded_weight_or_tensor(ExpandedWeight(input, 3, loss_reduction="sum")))
input.requires_grad_(False)
self.assertEqual(input, unpack_expanded_weight_or_tensor(input))
self.assertTrue(unpack_expanded_weight_or_tensor(4) is None)
def test_unpack_expanded_weight_or_tensor_with_custom_function(self, device):
input = torch.randn(3, requires_grad=True, device=device)
self.assertTrue(unpack_expanded_weight_or_tensor(ExpandedWeight(input, 3, loss_reduction="sum"), lambda x: x is input))
input.requires_grad_(False)
self.assertTrue(unpack_expanded_weight_or_tensor(input, lambda x: x is input))
self.assertTrue(unpack_expanded_weight_or_tensor(4, lambda x: x is input) is None)
def test_unpack_expanded_weight_or_tensor_failure(self, device):
input = torch.randn(3, requires_grad=True, device=device)
with self.assertRaisesRegex(RuntimeError, r"does not support a mixture of ExpandedWeight parameters and normal Parameters"):
unpack_expanded_weight_or_tensor(input)
with self.assertRaisesRegex(RuntimeError, r"does not support a mixture of ExpandedWeight parameters and normal Parameters"):
unpack_expanded_weight_or_tensor(input, lambda x: x is input)
def test_sum_over_all_but_batch_and_last_n(self, device):
input = torch.randn(1, 2, 3, 4, 5, device=device)
res = sum_over_all_but_batch_and_last_n(input, 2)
expected = input.sum((1, 2))
self.assertEqual(res, expected)
res = sum_over_all_but_batch_and_last_n(input, 0)
expected = input.sum((1, 2, 3, 4))
self.assertEqual(res, expected)
res = sum_over_all_but_batch_and_last_n(input, 4)
self.assertEqual(res, input)
class TestExpandedWeightFunctional(TestCase):
def _compare_ew_and_for_loop_per_sample_grads(self, op, sample_input, reduction):
input = sample_input.input
args = sample_input.args
kwargs = sample_input.kwargs
batch_size = input.shape[0] if len(input.shape) > 1 else 1
# get per sample grads with ExpandedWeights objects
loss_reduction = "sum" if reduction == torch.sum else "mean"
(ew_input, ew_args, ew_kwargs) = make_expanded_weight(sample_input, batch_size, loss_reduction)
diff_input_list = (ew_input,) + tuple(ew_args) + tuple(ew_kwargs.values())
diff_input_list = [i for i in diff_input_list if is_diff_tensor(i)]
diff_input_list = [i.orig_weight if isinstance(i, ExpandedWeight) else i for i in diff_input_list]
if not diff_input_list:
return
result = run_op(op, ew_input, *ew_args, **ew_kwargs)
reduction(result).backward() # grad doesn't work with ExpandedWeight because it calls __torch_function__
expanded_weight_grad = tuple(i.grad_sample if hasattr(i, "grad_sample") else i.grad for i in diff_input_list)
# get per sample grads with for loop
func = partial(run_op, op)
per_sample_grad = for_loop_per_sample_grad(batch_size, reduction, input, func, *args, **kwargs)
# check equality
self.assertEqual(len(per_sample_grad), len(expanded_weight_grad))
if loss_reduction == "mean":
# don't check equality of `input.grad`s since these vanilla tensors won't be scaled
expanded_weight_grad = expanded_weight_grad[1:]
per_sample_grad = per_sample_grad[1:]
for (result_grad, expected_grad) in zip(expanded_weight_grad, per_sample_grad):
self.assertEqual(result_grad, expected_grad)
@ops(filter(lambda op: op.supports_expanded_weight, op_db), dtypes=OpDTypes.supported, allowed_dtypes=(torch.double,))
def test_expanded_weight_per_sample_grad_sum(self, device, dtype, op):
sample_inputs = op.sample_inputs(device, dtype, requires_grad=True)
for sample_input in supported_inputs(op, sample_inputs):
if op.name == "nn.functional.embedding": # embedding flips its argument order for autograd tests
sample_input = SampleInput(sample_input.args[0], args=(sample_input.input,), kwargs=sample_input.kwargs)
def reduction(x):
return x.sum()
self._compare_ew_and_for_loop_per_sample_grads(op, sample_input, torch.sum)
@ops(filter(lambda op: op.supports_expanded_weight, op_db), dtypes=OpDTypes.supported, allowed_dtypes=(torch.double,))
def test_expanded_weight_per_sample_grad_mean(self, device, dtype, op):
sample_inputs = op.sample_inputs(device, dtype, requires_grad=True)
for sample_input in supported_inputs(op, sample_inputs):
if op.name == "nn.functional.embedding": # embedding flips its argument order for autograd tests
sample_input = SampleInput(sample_input.args[0], args=(sample_input.input,), kwargs=sample_input.kwargs)
self._compare_ew_and_for_loop_per_sample_grads(op, sample_input, torch.mean)
@ops(filter(lambda op: op.supports_expanded_weight, op_db), dtypes=OpDTypes.supported, allowed_dtypes=(torch.double,))
def test_expanded_weights_per_sample_grad_input_no_grad(self, device, dtype, op):
sample_inputs = op.sample_inputs(device, dtype, requires_grad=True)
for sample_input in supported_inputs(op, sample_inputs):
if op.name == "nn.functional.embedding": # embedding flips its argument order for autograd tests
sample_input = SampleInput(sample_input.args[0], args=(sample_input.input,), kwargs=sample_input.kwargs)
sample_input.input.requires_grad_(False)
self._compare_ew_and_for_loop_per_sample_grads(op, sample_input, torch.mean)
@ops(filter(lambda op: op.supports_expanded_weight, op_db), dtypes=OpDTypes.supported, allowed_dtypes=(torch.double,))
def test_unsupported_expand_weights(self, device, dtype, op):
sample_inputs = op.sample_inputs(device, dtype, requires_grad=True)
unsupported_inputs = supported_inputs(op, sample_inputs, supported_inputs=False)
for sample_input in unsupported_inputs:
with self.assertRaisesRegex(RuntimeError, r"Expanded Weights"):
if op.name == "nn.functional.embedding": # embedding flips its argument order for autograd tests
sample_input = SampleInput(sample_input.args[0], args=(sample_input.input,), kwargs=sample_input.kwargs)
input = sample_input.input
batch_size = input.shape[0] if len(input.shape) > 1 else 1
# get per sample grads with ExpandedWeights objects
(ew_input, ew_args, ew_kwargs) = make_expanded_weight(sample_input, batch_size)
result = run_op(op, ew_input, *ew_args, **ew_kwargs)
diff_input_list = (ew_input,) + tuple(ew_args) + tuple(ew_kwargs.values())
diff_input_list = [i for i in diff_input_list if is_diff_tensor(i)]
diff_input_list = [i.orig_weight if isinstance(i, ExpandedWeight) else i for i in diff_input_list]
result.sum().backward() # grad doesn't work with ExpandedWeight because it calls __torch_function__
@ops(filter(lambda op: op.supports_expanded_weight, op_db), dtypes=OpDTypes.supported)
def test_expanded_weight_forward(self, device, dtype, op):
sample_inputs = op.sample_inputs(device, dtype)
for sample_input in supported_inputs(op, sample_inputs):
if op.name == "nn.functional.embedding": # embedding flips its argument order for autograd tests
sample_input = SampleInput(sample_input.args[0].clone(),
args=(sample_input.input.clone(),),
kwargs=sample_input.kwargs)
if "cuda" in device and "max_norm" in sample_input.kwargs and "padding_idx" in sample_input.kwargs:
self.skipTest("embedding is non-determinstic in this case, see issue #74679")
batch_size = sample_input.input.shape[0] if len(sample_input.input.shape) > 1 else 1
for loss_reduction in ["sum", "mean"]:
(ew_input, ew_args, ew_kwargs) = make_expanded_weight(sample_input, batch_size, loss_reduction)
expanded_weight_result = run_op(op, ew_input, *ew_args, **ew_kwargs)
normal_result = run_op(op, sample_input.input, *sample_input.args, **sample_input.kwargs)
self.assertEqual(expanded_weight_result, normal_result)
def test_expanded_weight_error(self, device):
batch_size = 3
sample_input = make_tensor((batch_size, 4), dtype=torch.float32, device=device, requires_grad=True)
sample_weight = make_tensor((4), dtype=torch.float32, device=device, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, r"Expanded Weights encountered but cannot handle function"):
torch.add(sample_input, ExpandedWeight(sample_weight, batch_size, loss_reduction="sum"))
def _test_embedding_model(self, model, num_embedding, device):
batch_size = 32
input = torch.randint(0, num_embedding, (batch_size, 5, 5), device=device)
return self._test_model(partial(model, num_embedding=num_embedding), batch_size, input, device)
def _test_conv_model(self, model, input_size, num_dim, device, loss_reduction="sum"):
batch_size = 32
input_ending = [input_size] * num_dim
input = torch.randn([batch_size, 3] + input_ending, device=device)
return self._test_model(partial(model, num_dim=num_dim), batch_size, input, device, loss_reduction)
def _test_model(self, model, batch_size, input, device, loss_reduction="sum"):
model = model(10).to(device)
targets = torch.randint(0, 10, (batch_size,), device=device)
criterion = CrossEntropyLoss(reduction=loss_reduction)
result = call_for_per_sample_grads(model, loss_reduction=loss_reduction)(input)
loss = criterion(result, targets)
loss.backward()
result = []
for weight in model.parameters():
result.append(weight.grad_sample)
del weight.grad_sample
expected = []
for i in range(batch_size):
loss = criterion(model(input[i].unsqueeze(0)), targets[i].unsqueeze(0))
expected.append(torch.autograd.grad(loss, model.parameters(), torch.ones_like(loss)))
expected = [torch.stack(grad) for grad in zip(*expected)]
for (res, exp) in zip(result, expected):
self.assertEqual(res, exp, atol=1e-4, rtol=5e-5)
def test_cnn_model_sum(self, device):
def convnet(num_classes, num_dim):
return nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(128, num_classes, bias=True),
)
return self._test_conv_model(convnet, 28, 2, device)
def test_cnn_model_mean(self, device):
def convnet(num_classes, num_dim):
return nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(128, num_classes, bias=True),
)
return self._test_conv_model(convnet, 28, 2, device, loss_reduction="mean")
@parametrize('num_dim', [1, 2, 3])
def test_instance_norm_model(self, num_dim, device):
def instance_norm_model(num_classes, num_dim):
conv_layer = nn.Conv1d if num_dim == 1 else nn.Conv2d if num_dim == 2 else nn.Conv3d
norm_layer = nn.InstanceNorm1d if num_dim == 1 else nn.InstanceNorm2d if num_dim == 2 else nn.InstanceNorm3d
return nn.Sequential(
conv_layer(3, 32, kernel_size=3, stride=1, padding=1),
norm_layer(32, affine=True),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(32 * (7 ** num_dim), num_classes, bias=True),
)
return self._test_conv_model(instance_norm_model, 7, num_dim, device)
@parametrize('num_dim', [1, 2, 3])
def test_group_norm_model(self, num_dim, device):
def group_norm_model(num_classes, num_dim):
conv_layer = nn.Conv1d if num_dim == 1 else nn.Conv2d if num_dim == 2 else nn.Conv3d
return nn.Sequential(
conv_layer(3, 32, kernel_size=3, stride=1, padding=1),
nn.GroupNorm(8, 32, affine=True),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(32 * (7 ** num_dim), num_classes, bias=True),
)
return self._test_conv_model(group_norm_model, 7, num_dim, device)
@parametrize('num_dim', [1, 2, 3])
def test_layer_norm_model(self, num_dim, device):
def layer_norm_model(num_classes, num_dim):
conv_layer = nn.Conv1d if num_dim == 1 else nn.Conv2d if num_dim == 2 else nn.Conv3d
normalized_shape = [7] * num_dim
return nn.Sequential(
conv_layer(3, 32, kernel_size=3, stride=1, padding=1),
nn.LayerNorm(normalized_shape, elementwise_affine=True),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(32 * (7 ** num_dim), num_classes, bias=True),
)
return self._test_conv_model(layer_norm_model, 7, num_dim, device)
def test_embedding_model(self, device):
def embedding_model(num_classes, num_embedding):
return nn.Sequential(
nn.Embedding(num_embedding, 15),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(375, num_classes, bias=True)
)
return self._test_embedding_model(embedding_model, 16, device)
def test_group_norm_error(self, device):
# group norm has to call native_group_norm. This checks that it hits the same errors
# that normal group norm would
N = 3
C = 5
inp = torch.randn(N, C)
with self.assertRaisesRegex(RuntimeError, r"Expected number of channels in input to be divisible"):
F.group_norm(inp, 2) # 5 is not divisible by 2
class TestExpandedWeightModule(TestCase):
def _do_test(self, module, input):
batch_size = input.shape[0]
diff_input = input.dtype == torch.float or input.dtype == torch.double
if diff_input:
input.requires_grad_()
with freeze_rng_state():
# get per sample grads with ExpandedWeights context manager
actual_res = call_for_per_sample_grads(module, loss_reduction="sum")(input).sum()
actual_res.backward()
actual_grads = []
for param in module.parameters():
actual_grads.append(param.grad_sample)
del param.grad_sample
if diff_input:
actual_grads.append(input.grad.clone())
input.grad = torch.zeros_like(input.grad)
# get per sample grads with a for loop
expected_res = torch.tensor(0., device=input.device, dtype=torch.double)
expected_grads = []
for i in range(batch_size):
input_slice = input[i]
diff_params = module.parameters()
if diff_input:
diff_params = chain(diff_params, (input_slice,))
res = module(input_slice.unsqueeze(0)).sum()
out_grads = torch.autograd.grad(res, diff_params, torch.ones_like(res), allow_unused=True)
expected_grads.append(out_grads)
expected_res += res
expected_grads = tuple(torch.stack(grad) for grad in zip(*expected_grads))
self.assertEqual(actual_res, expected_res)
[self.assertEqual(actual, expected) for (actual, expected) in zip(actual_grads, expected_grads)]
def _do_test_multi_input(self, module, input):
class TestModule(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, input):
return self.module(input) + self.module(input)
batch_size = input.shape[0]
diff_input = input.dtype == torch.float or input.dtype == torch.double
if diff_input:
input.requires_grad_()
with freeze_rng_state():
# get per sample grads with ExpandedWeights context manager, calling .backward() twice
test_module = TestModule(module)
actual_res = call_for_per_sample_grads(test_module, loss_reduction="sum")(input).sum()
actual_res.backward()
actual_grads = []
for param in module.parameters():
actual_grads.append(param.grad_sample)
del param.grad_sample
if diff_input:
actual_grads.append(input.grad.clone())
input.grad = torch.zeros_like(input.grad)
# get per sample grads with a for loop, running over the input twice
expected_grads = []
for i in range(batch_size):
input_slice = input[i]
diff_params = module.parameters()
if diff_input:
diff_params = chain(diff_params, (input_slice,))
res = module(input_slice.unsqueeze(0)).sum()
out_grads = torch.autograd.grad(res, diff_params, torch.ones_like(res), allow_unused=True)
expected_grads.append(out_grads)
expected_grads = tuple(torch.stack(grad) for grad in zip(*expected_grads))
expected_grads = tuple(expected_grad for expected_grad in expected_grads if expected_grad is not None)
assert [self.assertEqual(actual, 2 * expected) for (actual, expected) in zip(actual_grads, expected_grads)]
def test_per_sample_api_failing(self):
module = nn.Linear(10, 10)
input = torch.randn(64, 10)
with self.assertRaisesRegex(RuntimeError, r"Module passed must be nn.Module"):
call_for_per_sample_grads("fail")(input)
with self.assertRaisesRegex(RuntimeError, r"Batch size passed must be None or an integer"):
call_for_per_sample_grads(module, batch_size=6.4)(input)
with self.assertRaisesRegex(RuntimeError, r"Batch size must be positive"):
call_for_per_sample_grads(module, batch_size=-64)(input)
with self.assertRaisesRegex(RuntimeError, r"incorrect for multiple calls"):
loss = call_for_per_sample_grads(module)(input).sum()
loss.backward() # populate grad_sample fields
call_for_per_sample_grads(module)(input)
module = nn.Linear(10, 10) # reset to not have grad_sample fields
with self.assertRaisesRegex(RuntimeError, r"Expected loss_reduction argument to be sum or mean"):
call_for_per_sample_grads(module, loss_reduction="")(input)
def test_per_sample_api_compute_batch_size(self):
class CustomModule(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(5, 5)
def forward(self, input1, input2):
return self.linear(input1) + self.linear(input2)
module = CustomModule()
input1 = torch.randn(4, 5)
input2 = torch.randn(5, 5)
with self.assertRaisesRegex(RuntimeError, "found at least one input with batch size 4 and one with batch size 5"):
call_for_per_sample_grads(module)(input1, input2)
input2 = torch.randn(4, 5)
call_for_per_sample_grads(module)(input1, input2)
module = CustomModule()
call_for_per_sample_grads(module)(input1, input2=input2)
module = CustomModule()
call_for_per_sample_grads(module)(input1=input1, input2=input2)
def test_per_sample_api_compute_batch_size_not_pytreeable(self):
@dataclass
class NonPytreeableTuple:
elem1: torch.Tensor
elem2: torch.Tensor
class CustomModule(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(5, 5)
def forward(self, input1, input2):
return self.linear(input1.elem1) + self.linear(input1.elem2)
input = NonPytreeableTuple(torch.randn(4, 5), torch.randn(4, 5))
model = CustomModule()
with self.assertRaisesRegex(RuntimeError, "ExpandedWeights cannot compute the batch size from the inputs"):
call_for_per_sample_grads(model)(input, "")
# would prefer for it to error because input is not pytree-able but that's hard to detect
with self.assertRaisesRegex(RuntimeError, "Expected ExpandedWeights to have batch size matching input"):
call_for_per_sample_grads(model)(input, torch.randn(5))
model = CustomModule() # TODO: functional call bug, sam will fix
call_for_per_sample_grads(model)(input, torch.randn(4, 5))
model = CustomModule()
call_for_per_sample_grads(model, batch_size=4)(input, torch.randn(5))
class ContextManagerTests(TestBase):
def __init__(self, *args, **kwargs):
self.test_cpu = kwargs.get('test_cpu', True)
self.test_cuda = kwargs.get('test_cuda', True)
super().__init__(*args, **kwargs)
@property
def constructor_args(self):
return self._get_arg('constructor_args', False)
def test_context_manager(self, test_case, device):
kwargs = {'device': device, 'dtype': torch.double}
module = self.constructor(*self.constructor_args).to(**kwargs)
if 'Embedding' in self.get_name():
kwargs['dtype'] = torch.long
input = self._get_input().to(**kwargs)
if len(input.shape) == 0 or input.shape[0] == 0:
raise unittest.SkipTest("Can't get per sample gradients when no batch dim or batch dim is 0")
if self.constructor == torch.nn.Linear and len(input.shape) == 1:
raise unittest.SkipTest("Can't get per sample gradients for input of rank 1")
test_case._do_test(module, input)
def test_context_manager_multiple_inputs(self, test_case, device):
module = self.constructor(*self.constructor_args).to(device)
input = self._get_input()
if len(input.shape) == 0 or input.shape[0] == 0:
raise unittest.SkipTest("Can't get per sample gradients when no batch dim or batch dim is 0")
if self.constructor == torch.nn.Linear and len(input.shape) == 1:
raise unittest.SkipTest("Can't get per sample gradients for input of rank 1")
test_case._do_test_multi_input(module, input)
def filter_supported_tests(t):
supported_modules = ['Linear', 'Conv1d', 'Conv2d', 'Conv3d', 'Embedding', 'LayerNorm', 'GroupNorm', 'InstanceNorm']
if 'module_name' in t and t['module_name'] in supported_modules:
return True
if 'fullname' in t and any([module + "_" in t['fullname'] for module in supported_modules]):
return not('Conv' in t['fullname'] and 'pad' in t['fullname'])
# TODO: Once all of these use ModuleInfo, replace with ModuleInfo tests
# These currently use the legacy nn tests
supported_modules = ['Linear', 'Conv1d', 'Conv2d', 'Conv3d', 'Embedding', 'LayerNorm', 'GroupNorm', 'InstanceNorm']
supported_tests = [t for t in module_tests + new_module_tests if filter_supported_tests(t)]
for test_param in supported_tests:
if 'constructor' not in test_param:
name = test_param.pop('module_name')
test_param['constructor'] = getattr(nn, name)
decorator = test_param.pop('decorator', None)
test = ContextManagerTests(**test_param)
test_name = test.get_name()
if hasattr(TestExpandedWeightModule, test_name):
raise RuntimeError('Found two tests with the same name: ' + test_name)
test_name_multi_input = test.get_name() + "_multiple_inputs"
if hasattr(TestExpandedWeightModule, test_name_multi_input):
raise RuntimeError('Found two tests with the same name: ' + test_name)
if decorator is not None:
fn = decorator(fn)
if test.test_cpu:
setattr(TestExpandedWeightModule, test_name, lambda self, test=test: test.test_context_manager(self, 'cpu'))
setattr(TestExpandedWeightModule, test_name_multi_input,
lambda self, test=test: test.test_context_manager_multiple_inputs(self, 'cpu'))
if TEST_CUDA and test.test_cuda:
# since this checks derivatives, only use double for precision
setattr(TestExpandedWeightModule, test_name + '_cuda_double',
lambda self, test=test: test.test_context_manager(self, 'cuda'))
# ------------- HELPER FUNCTIONS -----------------
def run_op(op, input, *args, **kwargs):
r"""
OpInfo for Embedding switches the input and weight so autograd tests will only check the derivative
of the weight, not the input, which can't be differentiable since its dtype is int. Calls op,
using the special ordering that Embedding's OpInfo expects for that case.
"""
if op.name == "nn.functional.embedding":
return op(args[0], input, **kwargs)
else:
return op(input, *args, **kwargs)
def make_expanded_weight(sample_input, batch_size, loss_reduction="sum"):
def expanded_weight_or_clone(arg):
if is_diff_tensor(arg):
return ExpandedWeight(torch.clone(arg), batch_size, loss_reduction)
return clone_if_tensor(arg)
ew_input = clone_if_tensor(sample_input.input)
ew_args = tuple(expanded_weight_or_clone(arg) for arg in sample_input.args)
ew_kwargs = {name: expanded_weight_or_clone(arg) for (name, arg) in sample_input.kwargs.items()}
return ew_input, ew_args, ew_kwargs
def supported_inputs(op, sample_inputs, supported_inputs=True):
r"""
ExpandedWeights currently does not support some use cases when there's no batch dimension or
operations that would cause inter-batch operations. Removes all of the cases it cannot deal with
"""
def filter_fn(input):
convolutions = ["nn.functional.conv1d", "nn.functional.conv2d", "nn.functional.conv3d"]
batched_input_size = dict(zip(convolutions, [3, 4, 5]))
if op.name == "nn.functional.linear":
is_supported_input = input.input.dim() > 1 # input of rank 1 means no batch dim
elif op.name == "nn.functional.layer_norm":
normalized_shape = input.args[0]
is_supported_input = input.input.shape != normalized_shape # would cause inter-batch operations
elif op.name in convolutions:
# currently can't deal with padding computation on Python level
is_supported_input = 'padding' not in input.kwargs or not isinstance(input.kwargs['padding'], str)
is_supported_input = is_supported_input and input.input.dim() == batched_input_size[op.name]
elif op.name == "nn.functional.embedding":
idx = input.args[0]
is_supported_input = len(idx.shape) > 1 # there's no batch size
else:
is_supported_input = True
is_supported_input = is_supported_input and input.input.shape[0] > 0 # 0 is not a valid batch size
return is_supported_input if supported_inputs else not is_supported_input
return [input for input in sample_inputs if filter_fn(input)]
def for_loop_per_sample_grad(batch_size, reduction, input, func, *args, **kwargs):
# get per sample grads by getting derivative for each input in a for loop
per_sample_grad = []
for i in range(batch_size):
per_sample_input = input[i]
result = reduction(func(per_sample_input.unsqueeze(0), *args, **kwargs))
diff_input_list = (per_sample_input,) + tuple(args) + tuple(kwargs.values())
diff_input_list = [i for i in diff_input_list if isinstance(i, torch.Tensor) and i.requires_grad]
per_sample_grad.append(torch.autograd.grad(result, diff_input_list, torch.ones_like(result), allow_unused=True))
if len(per_sample_grad) == batch_size:
per_sample_grad = tuple(torch.stack(grad) for grad in zip(*per_sample_grad))
return per_sample_grad
def is_diff_tensor(t):
return isinstance(t, ExpandedWeight) or (isinstance(t, torch.Tensor) and t.requires_grad)
def clone_if_tensor(t):
if isinstance(t, torch.Tensor):
res = torch.clone(t).detach()
res.requires_grad_(t.requires_grad)
return res
else:
return t
instantiate_device_type_tests(TestExpandedWeightHelperFunction, globals())
instantiate_device_type_tests(TestExpandedWeightFunctional, globals())
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_expanded_weights.py |
# Owner(s): ["oncall: jit"]
import contextlib
import unittest
import os
import random
import enum
import copy
from functools import reduce
import operator
import warnings
import torch
from torch.nn import functional
from torch.profiler import profile, ProfilerActivity
from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes
from torch.testing._internal.common_jit import JitCommonTestCase
from torch.testing._internal.common_methods_invocations import op_db, SampleInput
from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \
is_iterable_of_tensors, freeze_rng_state
from torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA
from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn
from torch.testing import FileCheck
from jit.test_fuser_common import TestFuserCommon # noqa: F401
import itertools
import numpy as np
import math
from torch.autograd.gradcheck import gradcheck
from typing import List
RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM
CUDA_MAJOR, CUDA_MINOR = 0, 0
if RUN_NVFUSER and torch.version.cuda is not None:
CUDA_MAJOR, CUDA_MINOR = (int(x) for x in torch.version.cuda.split('.')[:2])
os.environ['PYTORCH_NVFUSER_ENABLE'] = 'linear_decomposition,conv_decomposition'
os.environ['PYTORCH_NVFUSER_DISABLE'] = 'fallback,fma,unroll_with_rng'
os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0'
# TODO: enable complex when we fixes the extremal cases in OpInfo
# see issue https://github.com/csarofeen/pytorch/issues/1730"
# os.environ['PYTORCH_NVFUSER_ENABLE'] = 'complex'
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
FUSION_GROUP = 'prim::CudaFusionGroup'
FUSION_GUARD = 'prim::CudaFusionGuard'
# TODO: revert disabled alias ops
ALIAS_TEST_DISABLED = True
@contextlib.contextmanager
def nvfuser_singleton_fusion(flag):
old_value = torch._C._jit_set_nvfuser_single_node_mode(flag)
try:
yield
finally:
torch._C._jit_set_nvfuser_single_node_mode(old_value)
@contextlib.contextmanager
def nvfuser_horizontal_fusion(flag):
old_value = torch._C._jit_set_nvfuser_horizontal_mode(flag)
try:
yield
finally:
torch._C._jit_set_nvfuser_horizontal_mode(old_value)
def is_pre_volta():
if not RUN_NVFUSER:
return False
prop = torch.cuda.get_device_properties(torch.cuda.current_device())
return prop.major < 7
TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported()
TEST_LARGE_TENSOR = RUN_NVFUSER
if RUN_NVFUSER:
torch.ones(1).cuda() # initialize cuda context
TEST_LARGE_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 12e9
class CudaFuserTestOptions():
def __init__(self):
self.old_cpu_fuse = torch._C._jit_can_fuse_on_cpu()
self.old_gpu_fuse = torch._C._jit_can_fuse_on_gpu()
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
self.old_guard = torch._C._jit_set_nvfuser_guard_mode(False)
torch._C._debug_set_autodiff_subgraph_inlining(False)
self.old_value = torch._C._jit_set_autocast_mode(True)
if(RUN_CUDA):
self.old_nvfuser = torch._C._jit_set_nvfuser_enabled(True)
def restore(self):
if(RUN_CUDA):
torch._C._jit_set_nvfuser_enabled(self.old_nvfuser)
torch._C._jit_override_can_fuse_on_cpu(self.old_cpu_fuse)
torch._C._jit_override_can_fuse_on_gpu(self.old_gpu_fuse)
torch._C._jit_set_nvfuser_guard_mode(self.old_guard)
torch._C._debug_set_autodiff_subgraph_inlining(True)
torch._C._jit_set_autocast_mode(self.old_value)
class TestCudaFuser(JitTestCase):
def assertEqual(self, *args, **kwargs):
kwargs["exact_layout"] = True
super(JitTestCase, self).assertEqual(*args, **kwargs)
def _getSubgraphInFusion(self, graph):
num_node = 0
subgraph = None
def count(block, ret):
for n in block.nodes():
if n.kind() == FUSION_GROUP:
ret[0] = ret[0] + 1
self.assertTrue(n.hasAttribute('Subgraph'))
ret[1] = n.g('Subgraph')
for block in n.blocks():
count(block, ret)
ret = [num_node, subgraph]
count(graph, ret)
self.assertEqual(ret[0], 1)
return ret[1]
def setUp(self):
super(TestCudaFuser, self).setUp()
self.skip_node_list = []
disabled_ops = ("aten::batch_norm",
"aten::_batch_norm_impl_index",
"aten::_batch_norm_impl_index_backward",
"aten::native_batch_norm_backward")
for op in disabled_ops:
disabled_flag = torch._C._jit_set_nvfuser_skip_node_kind(op, False)
if disabled_flag:
torch._C._jit_set_nvfuser_skip_node_kind(op, True)
self.skip_node_list.append(op)
# cpu backup to avoid errors in case this is run on a CPU-only machine
dev = 'cuda' if RUN_NVFUSER else 'cpu'
self.special_values = torch.tensor(
[float("-inf"), -10, -math.pi,
-1, -0.5, 0, 1, 0.5,
math.pi, 10, float("inf"),
float("nan")], dtype=torch.float, device=dev)
self.int_types = [
torch.int8,
torch.uint8,
torch.int16,
torch.int32,
torch.int64
]
self.support_tensor_dtypes = [
torch.int32,
torch.int64,
torch.float16,
torch.float32,
torch.float64,
torch.bool,
torch.complex64,
torch.complex128,
]
if TEST_BF16:
self.support_tensor_dtypes.append(torch.bfloat16)
if(RUN_NVFUSER):
self.cuda_fuser_options = CudaFuserTestOptions()
def tearDown(self):
# restoring skip node to the configuration before tests
for op in self.skip_node_list:
disabled_flag = torch._C._jit_set_nvfuser_skip_node_kind(op, False)
if not disabled_flag:
torch._C._jit_set_nvfuser_skip_node_kind(op, True)
if(RUN_NVFUSER):
self.cuda_fuser_options.restore()
super(TestCudaFuser, self).tearDown()
def _run_helper(self, jit_op, op, *args, check_stride=False, num_fusion=1, check_runs=1):
seed = 123
torch.cuda.manual_seed_all(seed)
jit_o = jit_op(*args)
for i in range(check_runs):
torch.cuda.manual_seed_all(seed + i)
jit_o = jit_op(*args)
torch.cuda.manual_seed_all(seed + i)
o = op(*args)
if type(jit_o) is torch.Tensor:
jit_o = [jit_o, ]
o = [o, ]
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
if check_stride:
self.assertEqual(oo.stride(), jit_oo.stride())
self.assertGraphContainsExactly(jit_op.graph_for(*args), FUSION_GUARD, num_fusion, consider_subgraphs=True)
def _run_training_helper(self, jit_op, op, grads, *args):
torch.cuda.manual_seed_all(123)
jit_o = jit_op(*args)
jit_g = jit_o.backward(grads)
torch.cuda.manual_seed_all(123)
jit_o = jit_op(*args)
jit_g = jit_o.backward(grads)
torch.cuda.manual_seed_all(123)
jit_o = jit_op(*args)
jit_g = jit_o.backward(grads)
torch.cuda.manual_seed_all(123)
o = op(*args)
g = o.backward(grads)
self.assertEqual(o, jit_o)
self.assertEqual(g, jit_g)
self.assertGraphContainsExactly(jit_op.graph_for(*args), FUSION_GUARD, 1, consider_subgraphs=True)
bwd_graph = list(
list(jit_op.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
self.assertGraphContainsExactly(bwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_half(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float):
o_16 = torch.add(x, y)
o_32_a = torch.add(y, z, alpha=alpha)
o_32_b = torch.add(o_16, z)
return (o_16, o_32_a, o_32_b)
t_jit = torch.jit.script(t)
alpha = 0.5
# stick to integers, this avoid the numerical difference due to our
# promotion
x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda")
y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda")
z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda")
jit_o = t_jit(x, y, z, alpha)
jit_o = t_jit(x, y, z, alpha)
o = t(x, y, z, alpha)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_bfloat(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float):
o_16 = torch.add(x, y)
o_32_a = torch.add(y, z, alpha=alpha)
o_32_b = torch.add(o_16, z)
return (o_16, o_32_a, o_32_b)
t_jit = torch.jit.script(t)
alpha = 0.5
# stick to integers, this avoid the numerical difference due to our
# promotion
x = torch.randint(0, 256, (4, 8)).to(dtype=torch.bfloat16, device="cuda")
y = torch.randint(0, 256, (4, 8)).to(dtype=torch.bfloat16, device="cuda")
z = torch.randint(0, 256, (4, 8)).to(dtype=torch.bfloat16, device="cuda")
jit_o = t_jit(x, y, z, alpha)
jit_o = t_jit(x, y, z, alpha)
o = t(x, y, z, alpha)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_const(self):
def t(x, y):
o = x + y
o = o + 2.0
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_chunk(self):
def t(x, y, z, q):
o = x + q
x0, x1 = torch.chunk(o, 2)
o = x0 + x1
o = o + y
o = o * z
o = torch.relu(o)
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, dtype=torch.float, device="cuda")
y = torch.randn(2, 8, dtype=torch.float, device="cuda")
z = torch.randn(2, 8, dtype=torch.float, device="cuda")
q = torch.randn(4, 8, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, z, q)
jit_o = t_jit(x, y, z, q)
o = t(x, y, z, q)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z, q), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_reduction_dtypes_axis(self):
for op in [torch.sum, torch.mean, torch.amax, torch.var, torch.std]:
for dtype in [torch.float16, torch.float32, torch.double]:
for axis in [-1, 2, 0]:
def make_func(op):
def func(x: torch.Tensor):
o = torch.mul(x, 2.0)
o = op(o, dim=[axis])
return o
return func
x = torch.randn(8, 4, 16, dtype=dtype, device="cuda")
t = make_func(op)
t_jit = torch.jit.trace(t, x)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-4))
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_variance(self):
for op in [torch.var, torch.std]:
for dtype in [torch.float16, torch.float32, torch.double]:
for axis in [-2, -1, 2, 1]:
for unbiased in [False, True]:
def make_func(op):
def func(x: torch.Tensor):
o = torch.mul(x, 2.0)
o = op(o, dim=[axis])
return o
return func
x = torch.randn(8, 4, 16, dtype=dtype, device="cuda")
t = make_func(op)
t_jit = torch.jit.trace(t, x)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-4))
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_scalar_input(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, 1, 32, dtype=torch.float, device="cuda")
y = y.expand(4, 8, 32, 32)
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, 2.0), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_0(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_1(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(1, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_2(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 1, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(8, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_3(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(8, 17, 8, dtype=torch.float, device="cuda")
y = torch.randn(8, 17, 1, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)
# test_broadcasting_partition_logic_X
# Testing partition logic that is capable to avoid creating unsupported
# broadcasting semantics in CudaFusionGroup
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_partition_logic_0(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
x = x + 12.0
o1 = x + y
o2 = x + z
o = o1 + o2
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 6, 8, dtype=torch.float32, device="cuda")
y = torch.randn(8, 6, 8, dtype=torch.float32, device="cuda")
z = torch.randn(6, 8, dtype=torch.float32, device="cuda")
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, z))
self.assertGraphContainsExactly(subgraph, 'aten::add', 4, consider_subgraphs=False)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_partition_logic_1(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
x = x + 12.0
o1 = x + y
o2 = x + z
o = o1 + o2
return o
t_jit = torch.jit.script(t)
x = torch.randn(8, 6, 8, dtype=torch.float32, device="cuda")
y = torch.randn(4, 8, 6, 8, dtype=torch.float32, device="cuda")
z = torch.randn(4, 1, 6, 8, dtype=torch.float32, device="cuda")
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, z))
self.assertGraphContainsExactly(subgraph, 'aten::add', 4, consider_subgraphs=False)
@unittest.skipIf(True, "Broadcast with different output not supported yet")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_multiple_output_shape(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = x + 12
o1 = o + y
o2 = o + z
oo = o1.sum() + o2.sum()
return oo
t_jit = torch.jit.script(t)
x = torch.randn(32, 32, dtype=torch.float, device="cuda")
y = torch.randn(2, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o, jit_o)
# Currently cannot fuse this
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(True, "broadcast on branches can't be resolved yet")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_multiple_output(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = x + 12
o1 = o + y
o2 = o + z
oo = o1.sum() + o2.sum()
return oo
t_jit = torch.jit.script(t)
x = torch.randn(32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o, jit_o)
# Currently cannot fuse this
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
def _unary_test_helper(self, operation, dtype, random_data):
gradient_check = (dtype == torch.float64) and random_data
shape = self.special_values.shape
torch.cuda.manual_seed_all(211)
# need additional def of t for boolean ops
def t(x: torch.Tensor, y: torch.Tensor):
o = x * y
o = o + 5e-3
o = operation(o)
return o
y = torch.rand(shape, dtype=torch.float32, device="cuda", requires_grad=gradient_check)
y = y.to(dtype=dtype)
if random_data:
x = torch.rand(shape, dtype=torch.float32, device="cuda", requires_grad=gradient_check)
if dtype in self.int_types:
# prefer a larger variance for integer types
x = x * 5
x = x.to(dtype=dtype)
else:
x = self.special_values.to(dtype=dtype)
try:
ref = t(x, y)
except Exception:
# same way as TE checker, if eager mode throws, ignore this test
return
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
if gradient_check:
if jit_o.dtype != torch.bool:
# bool dtype has no `-`
gradcheck(t_jit, [x, y], nondet_tol=1e-5)
elif dtype in self.support_tensor_dtypes:
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
if dtype == torch.bfloat16:
# compare with the actual ground truth for
# bfloat16 kernels instead of eager mode
# implementation, since mismatch in cast
# adds excessive noise.
o = t(x.to(torch.float64), y.to(torch.float64))
if o.dtype.is_floating_point:
o = o.to(torch.bfloat16)
else:
o = t(x, y)
self.assertTrue(self._compare("failing case {}\n{}\n{}\n{}".format(dtype, operation, x, y), o, jit_o, 1e-2))
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_unary_ops(self):
data_types = [
*self.int_types,
torch.float16,
torch.float32,
torch.float64,
# TODO: revert this
# see issue https://github.com/csarofeen/pytorch/issues/1730"
# torch.cfloat,
# torch.cdouble,
]
if TEST_BF16:
data_types.append(torch.bfloat16)
operations = [torch.neg,
torch.abs,
torch.log,
torch.log10,
torch.log1p,
torch.log2,
torch.lgamma,
torch.exp,
torch.expm1,
torch.erf,
torch.erfc,
torch.cos,
torch.acos,
torch.cosh,
torch.sin,
torch.asin,
torch.sinh,
torch.tan,
torch.atan,
torch.sqrt,
torch.rsqrt,
torch.ceil,
torch.floor,
torch.round,
torch.trunc,
torch.frac,
torch.reciprocal,
torch.isfinite,
torch.isinf,
torch.isnan,
torch.isneginf,
torch.isposinf,
torch.isreal,
torch.nn.functional.softplus,
torch.nn.functional.gelu,
torch.nn.functional.leaky_relu,
torch.nn.functional.silu,
torch.relu,
torch.sigmoid,
torch.bitwise_not,
torch.tan,
torch.tanh]
skip_complex = {torch.rsqrt, torch.reciprocal}
for op, dtype in itertools.product(operations, data_types):
if dtype.is_complex and op in skip_complex:
continue
self._unary_test_helper(op, dtype, False) # test special numbers
self._unary_test_helper(op, dtype, True) # test random data
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_category_rule(self):
def run_tensor(x, z):
def t(x: torch.Tensor, z: torch.Tensor):
o = x + z
o = torch.abs(o)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, z)
jit_o = t_jit(x, z)
o = t(x, z)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, z), FUSION_GUARD)
def run_scalar(x, z):
def t(x: torch.Tensor, z: float):
o = x + z
o = torch.abs(o)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, z)
jit_o = t_jit(x, z)
o = t(x, z)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, z), FUSION_GUARD)
# n-dim with 0-dim (no type-promote)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
z = torch.tensor(2.0, dtype=torch.double, device="cuda")
run_tensor(x, z)
# n-dim with 0-dim (type-promote)
x = torch.randn(4, 8, 32, 32, device="cuda").to(dtype=torch.long)
z = torch.tensor(2.0, dtype=torch.double, device="cuda")
run_tensor(x, z)
# n-dim with n-dim (type-promote)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 8, 32, 32, dtype=torch.double, device="cuda")
run_tensor(x, z)
# n-dim with scalar (no type-promote)
x = torch.randn(4, 8, 32, 32, dtype=torch.float16, device="cuda")
z = torch.tensor(3., dtype=torch.double)
run_scalar(x, z)
if TEST_BF16:
# n-dim with scalar (no type-promote)
x = torch.randn(4, 8, 32, 32, dtype=torch.bfloat16, device="cuda")
z = torch.tensor(3., dtype=torch.double)
run_scalar(x, z)
# n-dim with scalar (type-promote)
x = torch.randn(4, 8, 32, 32, device="cuda").to(dtype=torch.long)
z = torch.tensor(3., dtype=torch.double)
run_scalar(x, z)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_unary_bitwise(self):
def bit_not(x: torch.Tensor):
return ~(x + 1)
jitted = torch.jit.script(bit_not)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda").mul(5).to(torch.long)
jit_o = jitted(x)
jit_o = jitted(x)
o = bit_not(x)
self.assertEqual(o, jit_o)
jitted.graph_for(x) # Shows up in second instance, not first
self.assertGraphContains(jitted.graph_for(x), FUSION_GUARD)
def bool_not(x: torch.Tensor, y: torch.Tensor):
return ~(x & y)
jitted = torch.jit.script(bool_not)
x = torch.rand(4, 8, 32, 32, dtype=torch.float, device="cuda").round().to(torch.bool)
y = torch.rand(4, 8, 32, 32, dtype=torch.float, device="cuda").round().to(torch.bool)
jit_o = jitted(x, y)
jit_o = jitted(x, y)
o = bool_not(x, y)
self.assertEqual(o, jit_o)
jitted.graph_for(x, y) # Shows up in second instance, not first
self.assertGraphContains(jitted.graph_for(x, y), FUSION_GUARD)
def _get_scalar_binary_test_fn(self, category_and_type1, category_and_type2, operation):
category1, dtype_arg1 = category_and_type1
category2, dtype_arg2 = category_and_type2
def t_intx_tensory(x: int, y: torch.Tensor):
o = operation(x, y)
o = 2 + o
return o
def t_doublex_tensory(x: float, y: torch.Tensor):
o = operation(x, y)
o = 2 + o
return o
def t_cdoublex_tensory(x: complex, y: torch.Tensor):
o = operation(x, y)
o = 2 + o
return o
# Omit both scalar cases and swap cases
assert category1 == "scalar" and category2 != "scalar"
if dtype_arg1.is_floating_point:
return t_doublex_tensory
if dtype_arg1 == torch.int64 or dtype_arg1 == torch.int32:
return t_intx_tensory
if dtype_arg1.is_complex or dtype_arg1 == torch.int32:
return t_cdoublex_tensory
raise NotImplementedError
def _binary_test_helper(self, operation, dtypes, random_data, categories="ndim"):
if isinstance(dtypes, tuple):
dtype_arg1, dtype_arg2 = dtypes
else:
dtype_arg1 = dtype_arg2 = dtypes
if isinstance(categories, tuple) and random_data:
category1, category2 = categories
elif not random_data:
category1 = category2 = "ndim"
else:
category1 = category2 = categories
def is_cpu_category(x):
return x == "0dimcpu" or x == "scalar"
# skip unsupported cases
if is_cpu_category(category1) and is_cpu_category(category2):
return
# only test cases with first operand as scalar
if category2 == "scalar":
return
# skip ops that doesn't support scalar inputs in eager
if operation in [
torch.atan2,
torch.max,
torch.min,
torch.remainder, # unsupported in nvfuser
]:
if category1 == "scalar" or category2 == "scalar":
return
if operation in [
torch.fmod,
torch.eq,
torch.ne,
torch.ge,
torch.gt,
torch.le,
torch.lt
]:
if category1 == "scalar":
return
# operators that does not support bfloat16
if operation in [torch.fmod]:
if dtype_arg1 == torch.bfloat16 or dtype_arg2 == torch.bfloat16:
return
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = operation(x, y)
o = o + z
return o
shape = (4, 32, 32)
shapex = shape if category1 == "ndim" else ()
shapey = shape if category2 == "ndim" else ()
if random_data:
x = (torch.randn(shapex, dtype=torch.float, device="cuda") * 5).to(dtype_arg1)
y = (torch.randn(shapey, dtype=torch.float, device="cuda") * 5).to(dtype_arg2)
else:
x = self.special_values.to(dtype=dtype_arg1)
y = (torch.rand_like(self.special_values) * 5).to(dtype_arg2)
r"""
Category conversion
"""
has_scalar = False
if category1 == "scalar":
has_scalar = True
x = x.item()
if category1 == "0dimcpu":
x = x.to(device="cpu")
if category2 == "scalar":
has_scalar = True
y = y.item()
if category2 == "0dimcpu":
y = y.to(device="cpu")
z = torch.tensor([2], device="cuda").to(dtype_arg1)
is_dtype_arg1_int = dtype_arg1 == torch.int32 or dtype_arg1 == torch.int64
is_dtype_arg2_int = dtype_arg2 == torch.int32 or dtype_arg2 == torch.int64
if operation in [torch.pow]:
if is_dtype_arg1_int and is_dtype_arg2_int:
if category2 == "scalar":
# RuntimeError: Integers to negative integer powers are not allowed
y = abs(y)
if category2 == "0dimcpu" and y == -1:
# https://github.com/pytorch/pytorch/issues/73196
y = y - 1
if category2 == "0dimcpu" and y == -2:
# avoid pow(0, -2), which gives inconsistent results on integer tensor
y = y - 1
# Avoid division by zero for integer tensors
div_like = [torch.div, torch.fmod, torch.remainder]
if operation in div_like and (dtype_arg2 == torch.int32 or dtype_arg2 == torch.int64):
y[y == 0] = 1
test_value = True
if dtype_arg1 == torch.half or dtype_arg2 == torch.half:
test_value = False
if dtype_arg1 == torch.bfloat16 or dtype_arg2 == torch.bfloat16:
test_value = False
try:
if not has_scalar:
o = t(x, y, z)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
self.assertEqual(o.dtype, jit_o.dtype)
if test_value:
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
elif category2 != "scalar": # only test the case where first is scalar
test_fn = self._get_scalar_binary_test_fn((category1, dtype_arg1), (category2, dtype_arg2), operation)
o = test_fn(x, y)
t_jit = torch.jit.script(test_fn)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
if test_value:
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
except Exception as e:
print("failing test for op: ", operation.__name__)
print("with input\n\tx: ", x)
print("\ty: ", y)
print("\tz: ", z)
raise e
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_binary_ops(self):
data_types = [
torch.int32,
torch.int64,
torch.float16,
torch.float32,
torch.float64,
]
if TEST_BF16:
data_types.append(torch.bfloat16)
operations = [torch.mul,
torch.div,
torch.atan2,
torch.max,
torch.min,
torch.pow,
torch.remainder,
torch.fmod,
torch.eq,
torch.ne,
torch.ge,
torch.gt,
torch.le,
torch.lt]
category_types = [
"scalar",
"0dim",
"0dimcpu",
"ndim"
]
binary_dtype_combinations = list(itertools.combinations(data_types, 2))
category_combinations = list(itertools.combinations(category_types, 2))
for op, dtypes, categories in itertools.product(operations, binary_dtype_combinations, category_combinations):
self._binary_test_helper(op, dtypes, True, categories) # random data
for op, dtypes in itertools.product(operations, binary_dtype_combinations):
self._binary_test_helper(op, dtypes, False) # special numbers
# TODO: revert this
@unittest.skipIf(True, "see issue https://github.com/csarofeen/pytorch/issues/1730")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_binary_ops_complex(self):
data_types = [torch.cfloat, torch.cdouble]
operations = [torch.mul, torch.div, torch.pow, torch.eq, torch.ne]
category_types = [
"scalar",
"0dim",
"0dimcpu",
"ndim"
]
binary_dtype_combinations = list(itertools.combinations(data_types, 2))
category_combinations = list(itertools.combinations(category_types, 2))
for op, dtypes, categories in itertools.product(operations, binary_dtype_combinations, category_combinations):
self._binary_test_helper(op, dtypes, True, categories) # random data
for op, dtypes in itertools.product(operations, binary_dtype_combinations):
self._binary_test_helper(op, dtypes, False) # special numbers
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_binary_bitwise(self):
dtypes = [torch.bool, torch.int32, torch.int64]
for dtype1, dtype2, dtype3 in itertools.product(dtypes, repeat=3):
def jit_and(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return torch.bitwise_and(x, y) & z
def jit_or(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return torch.bitwise_or(x, y) | z
def jit_xor(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return torch.bitwise_xor(x, y) ^ z
def jit_lshift(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return torch.bitwise_left_shift(x, y) << z
def jit_rshift(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return torch.bitwise_right_shift(x, y) >> z
for jit_func in [jit_and, jit_or, jit_xor, jit_lshift, jit_rshift]:
if torch.bool in {dtype1, dtype2, dtype3} and jit_func in {jit_lshift, jit_rshift}:
continue
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda").mul(5).to(dtype1)
y = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda").mul(5).to(dtype2)
z = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda").mul(2).to(dtype3)
jitted = torch.jit.script(jit_func)
jit_o = jitted(x, y, z)
jit_o = jitted(x, y, z)
o = jit_func(x, y, z)
self.assertEqual(o, jit_o)
self.assertGraphContains(jitted.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_type_as_op(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = torch.lt(x, z)
o = o.type_as(y)
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 0.5)
jit_o = t_jit(x, y, 0.5)
o = t(x, y, 0.5)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, 0.5), FUSION_GUARD)
def _ternary_integer_test_helper(self, dtype_arg1):
shape = (4, 8, 32, 32)
magnitude = 100
if (dtype_arg1 in self.int_types):
x = torch.randint(-magnitude, magnitude, shape, dtype=dtype_arg1, device="cuda")
else:
x = torch.randn(shape, dtype=dtype_arg1, device="cuda") * magnitude
arg2 = int(0)
arg3 = int(magnitude * 0.1)
def clamp0(x: torch.Tensor, f: int):
o = 2. * torch.clamp(x, min=f)
return o
clamp0_jit = torch.jit.script(clamp0)
self._run_helper(clamp0_jit, clamp0, x, arg2)
def clamp1(x: torch.Tensor, f: int, ff: int):
o = 2. * torch.clamp(x, min=f, max=ff)
return o
clamp1_jit = torch.jit.script(clamp1)
self._run_helper(clamp1_jit, clamp1, x, arg2, arg3)
def clamp2(x: torch.Tensor, f: float, ff: int):
o = 2. * torch.clamp(x, min=f, max=ff)
return o
clamp2_jit = torch.jit.script(clamp2)
self._run_helper(clamp2_jit, clamp2, x, float(arg2), arg3)
def clamp3(x: torch.Tensor, f: int, ff: float):
o = 2. * torch.clamp(x, min=f, max=ff)
return o
clamp3_jit = torch.jit.script(clamp3)
self._run_helper(clamp3_jit, clamp3, x, arg2, float(arg3))
def threshold(x: torch.Tensor, th: int, val: int):
o = 2. * torch.threshold(x, th, val)
return o
threshold_jit = torch.jit.script(threshold)
self._run_helper(threshold_jit, threshold, x, arg2, arg3)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_ternary_ops_integer_compatibility(self):
data_types = [
torch.float16,
torch.float32,
torch.float64
]
for dtype in data_types:
self._ternary_integer_test_helper(dtype)
def _ternary_test_helper(self, operation, dtypes, random_data):
if isinstance(dtypes, tuple):
dtype_arg1, dtype_arg2, dtype_arg3 = dtypes
else:
dtype_arg1 = dtype_arg2 = dtype_arg3 = dtypes
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: torch.Tensor):
o = operation(x, y, z)
o = o + alpha
return o
shape = (4, 32, 32)
if operation is torch.where:
dtype_arg1 = torch.bool
if random_data:
x = torch.randint(0, 2, shape).to(dtype=torch.bool, device="cuda")
y = (torch.randn(shape, dtype=torch.float, device="cuda") * 5).to(dtype_arg2)
z = (torch.randn(shape, dtype=torch.float, device="cuda") * 5).to(dtype_arg3)
else:
x = torch.randint(0, 2, self.special_values.size()).to(dtype=torch.bool, device="cuda")
y = self.special_values.to(dtype=dtype_arg2)
z = (torch.rand_like(self.special_values) * 5).to(dtype_arg3)
elif random_data:
x = (torch.randn(shape, dtype=torch.float, device="cuda") * 5).to(dtype_arg1)
y = (torch.randn(shape, dtype=torch.float, device="cuda") * 5).to(dtype_arg2)
z = (torch.randn(shape, dtype=torch.float, device="cuda") * 5).to(dtype_arg3)
else:
x = self.special_values.to(dtype=dtype_arg1)
y = (torch.rand_like(self.special_values) * 5).to(dtype_arg2)
z = (torch.rand_like(self.special_values) * 5).to(dtype_arg3)
alpha = torch.tensor([2], device="cuda").to(dtype_arg1)
o = t(x, y, z, alpha)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z, alpha)
jit_o = t_jit(x, y, z, alpha)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_ternary_ops_type_promotion(self):
# TODO: update accuracy tolerance for bf16 / fp16 data types
data_types = [
# torch.float16,
torch.float32,
torch.float64
]
'''
if TEST_BF16:
data_types.append(torch.bfloat16)
'''
# TODO: Add Tensor support for clamp
operations = [torch.clamp]
ternary_dtype_combinations = itertools.combinations(data_types, 3)
for op, dtypes in itertools.product(operations, ternary_dtype_combinations):
self._ternary_test_helper(op, dtypes, True) # random data
self._ternary_test_helper(op, dtypes, False) # special numbers
# We can't test the scalar version of rsub from python
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "Requires fusion optimization pass to be effective")
def test_rsub(self):
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
def rsub(x: torch.Tensor, y: torch.Tensor):
o = torch.rsub(x, y)
o = o * 2.
return o
rsub_jit = torch.jit.script(rsub)
self._run_helper(rsub_jit, rsub, x, y)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
# legacy fuser does not work for rand_like, see issue #34361
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "Requires fusion optimization pass to be effective")
def test_ternary_ops(self):
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
cond = torch.randint(0, 2, (4, 8, 32, 32)).to(dtype=torch.bool, device="cuda")
def add(x: torch.Tensor, other: torch.Tensor, alpha: float):
o = torch.relu(x)
o = torch.add(o, other=other, alpha=alpha)
return o
add_jit = torch.jit.script(add)
self._run_helper(add_jit, add, x, y, 2.0)
def clamp0(x: torch.Tensor, f: float):
o = 2. * torch.clamp(x, min=f)
return o
clamp0_jit = torch.jit.script(clamp0)
self._run_helper(clamp0_jit, clamp0, x, 0.5)
def clamp1(x: torch.Tensor, f: float, ff: float):
o = 2. * torch.clamp(x, min=f, max=ff)
return o
clamp1_jit = torch.jit.script(clamp1)
self._run_helper(clamp1_jit, clamp1, x, -0.2, 0.7)
def threshold(x: torch.Tensor, th: float, val: float):
o = 2. * torch.threshold(x, th, val)
return o
threshold_jit = torch.jit.script(threshold)
self._run_helper(threshold_jit, threshold, x, 0.2, 0.9)
def where(x: torch.Tensor, y: torch.Tensor, cond: torch.Tensor):
o = 2. * torch.where(cond, x, y)
return o
where_jit = torch.jit.script(where)
self._run_helper(where_jit, where, x, y, cond)
def lerp(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = 2. * torch.lerp(x, y, z)
return o
lerp_jit = torch.jit.script(lerp)
self._run_helper(lerp_jit, lerp, x, y, z)
def lerp_scale(x: torch.Tensor, y: torch.Tensor, z: float):
o = 2. * torch.lerp(x, y, z)
return o
lerp_scale_jit = torch.jit.script(lerp_scale)
self._run_helper(lerp_scale_jit, lerp_scale, x, y, 0.5)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "Requires profiling node to run cuda fuser")
def test_addcmul_ops(self):
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
def addcmul(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, value: float):
o = torch.add(x, 0.5)
o = torch.addcmul(o, y, z, value=value)
return o
addcmul_jit = torch.jit.script(addcmul)
self._run_helper(addcmul_jit, addcmul, x, y, z, 2.0)
def addcmul_no_alpha(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = torch.add(x, 0.5)
o = torch.addcmul(o, y, z)
return o
addcmul_no_alpha_jit = torch.jit.script(addcmul_no_alpha)
self._run_helper(addcmul_no_alpha_jit, addcmul_no_alpha, x, y, z)
def addcmul_const_alpha(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = torch.add(x, 0.5)
o = torch.addcmul(o, y, z, value=0.75)
return o
addcmul_const_alpha_jit = torch.jit.script(addcmul_const_alpha)
self._run_helper(addcmul_const_alpha_jit, addcmul_const_alpha, x, y, z)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_dynamic_size(self):
old_guard = torch._C._jit_set_nvfuser_guard_mode(True)
torch._C._jit_set_bailout_depth(20)
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)
# this test is not ideal, as we rely on the bailout to test it and we
# don't know a way to verify the bailout graph to validate the proper
# fusion.
x = torch.randn(8, 32, 16, 8, dtype=torch.float, device="cuda")
y = torch.randn(16, 8, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, 2.0), FUSION_GUARD)
x = torch.randn(8, 17, 8, dtype=torch.float, device="cuda")
y = torch.randn(8, 17, 1, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, 2.0), FUSION_GUARD)
torch._C._jit_set_nvfuser_guard_mode(old_guard)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_random_topo(self):
os.environ["PYTORCH_NVFUSER_DISABLE_FALLBACK"] = "1"
self.assertTrue(runDefaultTestWithSeed(28449))
def _compare(self, desc, inp1, inp2, error):
a = inp1.clone()
b = inp2.clone()
close = torch.allclose(a, b, rtol=error, atol=error, equal_nan=True)
if not close:
print(desc, close)
z = a - b
index = (torch.abs(z) >= error + error * torch.abs(b)).nonzero()
print("dif : ", z[index])
print("inp1 : ", a[index])
print("inp2 : ", b[index])
print("maximum difference", z[index].max())
return close
# Permutation helper that applies binary operation between two tensors:
# 1. applies separate permutation `perm0` & `perm1` to two inputs
# 2. reduce dimension `broadcast_axis` of operand two to size 1
# The purpose of this test is to ensure permutation works well in
# complicated cases with arbitrary stride order and broadcasting dimensions
def _permutation_helper(self, sizes, broadcast_axis, dtype, device, perm0, perm1):
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.relu(o)
return o
x = torch.randn([sizes[i] for i in perm0], dtype=dtype, device=device).permute(
[perm0.index(i) for i in range(len(sizes))])
if broadcast_axis >= 0:
sizes[broadcast_axis] = 1
y = torch.randn([sizes[i] for i in perm1], dtype=dtype, device=device).permute(
[perm1.index(i) for i in range(len(sizes))])
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertEqual(o.stride(), jit_o.stride())
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
# end-2-end test of permutation & contiguity handling in integration.
# we are testing inputs with all combination of permutation order, just to
# ensure that integration would be able to generate functionally correct
# kernels
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_binary_ops_permutation(self):
# note that num_dim is exclusive from len(x), so we are not reducing
# to single element (codegen limitation at this moment)
x = [7, 8, 12]
b_axes = range(-1, len(x))
for b_axis in b_axes:
for perm0 in itertools.permutations(range(len(x))):
for perm1 in itertools.permutations(range(len(x))):
x = [7, 8, 12]
self._permutation_helper(x, b_axis, torch.float32, "cuda", perm0, perm1)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_binary_ops_channels_last_with_bcast(self):
device = "cuda"
x = torch.randn([4, 3, 2, 5], device=device).to(memory_format=torch.channels_last)
w = torch.randn([2, 5], device=device)
def t(x: torch.Tensor, b: torch.Tensor):
o = x + b
return torch.relu(o)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, w)
jit_o = t_jit(x, w)
jit_o = t_jit(x, w)
o = t(x, w)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-4))
self.assertGraphContains(t_jit.graph_for(x, w), FUSION_GUARD)
def _reduction_helper(self, sizes, reduction_axis, dtype, device, perm0, perm1, keepdim=False):
class MyReduction(torch.nn.Module):
__constants__ = ['reduction_axis', 'keepdim']
def __init__(self):
super(MyReduction, self).__init__()
self.reduction_axis = reduction_axis
self.keepdim = keepdim
def forward(self, x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.sum(o, dim=self.reduction_axis, keepdim=self.keepdim)
return o
t = MyReduction()
x = torch.randn([sizes[i] for i in perm0], dtype=dtype, device=device).permute(
[perm0.index(i) for i in range(len(sizes))])
y = torch.randn([sizes[i] for i in perm1], dtype=dtype, device=device).permute(
[perm1.index(i) for i in range(len(sizes))])
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
# numerical issues here due to our scheduling.
# can't use `self.assertEqual(o, jit_o)`
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-4))
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_reduction(self):
for x in ([7, 8, 12], [12, 8, 7, 9, 15], [128, 16, 8, 32]):
# note that num_dim is exclusive from len(x), so we are not reducing
# to single element (codegen limitation at this moment)
for num_reduce_dim in range(1, len(x)):
for axes in itertools.combinations(range(len(x)), num_reduce_dim):
for keepdim in (True, False):
perm0 = range(len(x))
perm1 = range(len(x))
self._reduction_helper(x, axes, torch.float32, "cuda", perm0, perm1, keepdim)
def _layer_norm_autodiff_helper(self, model, grad, shapes, args):
jit_model = torch.jit.script(model)
eps = np.random.random() * 1e-4
use_cudnn = bool(np.random.randint(0, 2))
# profile/optimization runs
for i in range(3):
jit_o = jit_model(shapes, *args, eps, use_cudnn)
jit_o.backward(grad)
ref_args = [t.detach().clone().requires_grad_() for t in args]
[t.grad.zero_() for t in args]
jit_o = jit_model(shapes, *args, eps, use_cudnn)
jit_o.backward(grad)
o = model(shapes, *ref_args, eps, use_cudnn)
o.backward(grad)
self.assertEqual(jit_o, o)
for arg, ref_arg in zip(args, ref_args):
self.assertEqual(arg.grad, ref_arg.grad)
# check fusion in fw & bw
g = jit_model.graph_for(shapes, *args, eps, use_cudnn)
for node in g.nodes():
n = node
dbg_state = jit_model.get_debug_state()
for val in dbg_state.execution_plans.values():
v = val
state2 = v.code.grad_executor_states()
for val in state2[0].execution_plans.values():
v2 = val
FileCheck().check(FUSION_GUARD).run(g)
FileCheck().check(FUSION_GUARD).run(v2.graph)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_layer_norm_autodiff(self):
def t_wb(shapes: List[int], x, w, b, eps: float, cudnn: bool):
o = torch.layer_norm(x, shapes, w, b, eps, cudnn)
o = torch.relu(o)
return o
def t_w(shapes: List[int], x, w, eps: float, cudnn: bool):
o = torch.layer_norm(x, shapes, w, None, eps, cudnn)
o = torch.relu(o)
return o
def t_b(shapes: List[int], x, b, eps: float, cudnn: bool):
o = torch.layer_norm(x, shapes, None, b, eps, cudnn)
o = torch.relu(o)
return o
def t(shapes: List[int], x, eps: float, cudnn: bool):
o = torch.layer_norm(x, shapes, None, None, eps, cudnn)
o = torch.relu(o)
return o
model = {3: t_wb, 2: t_w, 1: t_b, 0: t}
for w, b in itertools.product([True, False], repeat=2):
batch = [2]
# note: awkward shape here to avoid vectorized fast kernel, which is
# buggy in aten
shapes = [2, 7, 3]
m = model[w * 2 + b]
grad = torch.randn(batch + shapes, dtype=torch.float32, device="cuda")
args = [torch.randn(batch + shapes, dtype=torch.float32, device="cuda").requires_grad_()]
if w:
args.append(torch.randn(shapes, dtype=torch.float32, device="cuda").requires_grad_())
if b:
args.append(torch.randn(shapes, dtype=torch.float32, device="cuda").requires_grad_())
self._layer_norm_autodiff_helper(m, grad, shapes, args)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_layer_norm_parser(self):
dtype = torch.float32
device = "cuda"
x = torch.randn([4, 4, 2], dtype=dtype, device=device)
w = torch.randn([4, 2], dtype=dtype, device=device)
b = torch.randn([4, 2], dtype=dtype, device=device)
def t(x: torch.Tensor, w: torch.Tensor, b: torch.Tensor):
o = torch.relu(x)
o = torch.layer_norm(o, [4, 2], w, b, 1e-5)
return o
o = t(x, w, b)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, w, b)
jit_o = t_jit(x, w, b)
o = t(x, w, b)
self.assertGraphContains(t_jit.graph_for(x, w, b), FUSION_GUARD)
def _native_layer_norm_helper(self, shape, norm_shape, dtype, device, error, affine=True):
class MyLayerNorm(torch.nn.Module):
__constants__ = ['norm_shape']
def __init__(self, elementwise_affine=True):
super(MyLayerNorm, self).__init__()
self.norm_shape = norm_shape
if elementwise_affine:
self.weight = torch.randn(norm_shape, dtype=dtype, device=device)
self.bias = torch.randn(norm_shape, dtype=dtype, device=device)
with torch.no_grad():
self.weight.fill_(1)
self.bias.fill_(0)
else:
self.weight = None
self.bias = None
def forward(self, x: torch.Tensor):
o = torch.relu(x)
o = torch.native_layer_norm(o, self.norm_shape, self.weight, self.bias, 1e-5)
return o
t = MyLayerNorm(affine)
x = torch.randn(shape, dtype=dtype, device=device)
t_jit = torch.jit.script(t)
jit_o, jit_mean, jit_rstd = t_jit(x)
jit_o, jit_mean, jit_rstd = t_jit(x)
o, mean, rstd = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
# numerical issues here due to our scheduling.
# can't use `self.assertEqual(o, jit_o)`
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
self.assertTrue(self._compare("comparing mean failed", mean, jit_mean, error))
self.assertTrue(self._compare("comparing rstd failed", rstd, jit_rstd, error))
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_native_layer_norm(self):
dims = 4
rnds = 3
for idx in range(rnds):
for offset in range(1, dims):
for affine in (True, False):
input_shape = [random.randint(10, 30) for idx in range(dims)]
norm_shape = [input_shape[idx] for idx in range(dims - offset, dims)]
self._native_layer_norm_helper(input_shape, norm_shape, torch.float32, "cuda", 1e-4, affine)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_native_layer_norm_half(self):
dims = 4
rnds = 3
for idx in range(rnds):
for offset in range(1, dims):
input_shape = [random.randint(10, 30) for idx in range(dims)]
norm_shape = [input_shape[idx] for idx in range(dims - offset, dims)]
self._native_layer_norm_helper(input_shape, norm_shape, torch.float16, "cuda", 5e-3)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_native_layer_norm_bfloat(self):
dims = 4
rnds = 3
for idx in range(rnds):
for offset in range(1, dims):
input_shape = [random.randint(10, 30) for idx in range(dims)]
norm_shape = [input_shape[idx] for idx in range(dims - offset, dims)]
self._native_layer_norm_helper(input_shape, norm_shape, torch.bfloat16, "cuda", 1e-1)
def _norm_helper(self,
shape,
dtype,
device,
error,
is_batch_norm_else_instance_norm,
memory_format=torch.contiguous_format,
*,
layer_dtype=torch.float32):
class MyBatchNorm(torch.nn.Module):
def __init__(self):
super(MyBatchNorm, self).__init__()
def forward(self, x: torch.Tensor, r_mean: torch.Tensor, r_var: torch.Tensor):
o = torch.nn.functional.batch_norm(x, r_mean, r_var, training=True)
o = torch.relu(o)
return o
class MyInstanceNorm(torch.nn.Module):
def __init__(self):
super(MyInstanceNorm, self).__init__()
def forward(self, x: torch.Tensor, r_mean: torch.Tensor, r_var: torch.Tensor):
o = torch.nn.functional.instance_norm(x, r_mean, r_var, use_input_stats=True)
o = torch.relu(o)
return o
t = MyBatchNorm() if is_batch_norm_else_instance_norm else MyInstanceNorm()
x = torch.randn(shape, dtype=dtype, device=device).to(memory_format=memory_format)
running_mean = torch.zeros(shape[1], dtype=layer_dtype, device=device)
running_var = torch.ones(shape[1], dtype=layer_dtype, device=device)
t_jit = torch.jit.script(t)
eager_running_mean = running_mean.clone()
eager_running_var = running_var.clone()
jit_running_mean = running_mean.clone()
jit_running_var = running_var.clone()
jit_o = t_jit(x, running_mean.clone(), running_var.clone())
self.assertTrue(self._compare("prerun comparing running_mean failed", eager_running_mean, jit_running_mean, error))
self.assertTrue(self._compare("prerun comparing running_var failed", eager_running_var, jit_running_var, error))
jit_o = t_jit(x, jit_running_mean, jit_running_var)
o = t(x, eager_running_mean, eager_running_var)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.stride(), jit_o.stride())
# numerical issues here due to our scheduling.
# can't use `self.assertEqual(o, jit_o)`
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
self.assertTrue(self._compare("comparing running_mean failed", eager_running_mean, jit_running_mean, error))
self.assertTrue(self._compare("comparing running_var failed", eager_running_var, jit_running_var, error))
self.assertGraphContains(t_jit.graph_for(x, running_mean, running_var), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_layer_norm_trivial_reduce_dim(self):
def t_wb(shapes: List[int], x, w, b, eps: float, cudnn: bool):
o = torch.layer_norm(x, shapes, w, b, eps, cudnn)
o = torch.relu(o)
return o
batch = [1]
shapes = [2, 7, 3]
grad = torch.randn(batch + shapes, dtype=torch.float32, device="cuda")
args = [torch.randn(batch + shapes, dtype=torch.float32, device="cuda").requires_grad_()]
args.append(torch.randn(shapes, dtype=torch.float32, device="cuda").requires_grad_())
args.append(torch.randn(shapes, dtype=torch.float32, device="cuda").requires_grad_())
self._layer_norm_autodiff_helper(t_wb, grad, shapes, args)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_norm_half_layer(self):
size = [2, 4, 2, 2]
for is_batch_norm_else_instance_norm in [False, True]:
for mf in [torch.channels_last, torch.contiguous_format]:
self._norm_helper(size, torch.float16, "cuda", 1e-3, is_batch_norm_else_instance_norm,
memory_format=mf, layer_dtype=torch.float16)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_norm_channels_last(self):
size = [3, 4, 5, 6]
with torch.backends.cudnn.flags(enabled=False):
for is_batch_norm_else_instance_norm in [False, True]:
for mf in [torch.channels_last, torch.contiguous_format]:
self._norm_helper(size, torch.float32, "cuda", 1e-4, is_batch_norm_else_instance_norm, memory_format=mf)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_norm(self):
output_elements = 10000
channel_sizes = [67, 457, 1024, 4096]
with torch.backends.cudnn.flags(enabled=False):
for is_batch_norm_else_instance_norm in [False, True]:
for dims in range(3, 6):
output_size = int(pow(output_elements, 1. / (dims - 1)))
for C in channel_sizes:
x = [output_size for idx in range(dims)]
x[1] = C
self._norm_helper(x, torch.float32, "cuda", 1e-4, is_batch_norm_else_instance_norm)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_norm_large(self):
output_elements = 262144
channel_sizes = 67, 457, 1024
for is_batch_norm_else_instance_norm in [True, False]:
for dims in range(3, 6):
output_size = int(pow(output_elements, 1. / (dims - 1)))
for C in channel_sizes:
x = [output_size for idx in range(dims)]
x[1] = C
self._norm_helper(x, torch.float32, "cuda", 1e-4, is_batch_norm_else_instance_norm)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_norm_half(self):
output_elements = 10000
channel_sizes = [67, 457, 1024, 4096]
with torch.backends.cudnn.flags(enabled=False):
for is_batch_norm_else_instance_norm in [False, True]:
for dims in range(3, 6):
output_size = int(pow(output_elements, 1. / (dims - 1)))
for C in channel_sizes:
x = [output_size for idx in range(dims)]
x[1] = C
self._norm_helper(x, torch.float16, "cuda", 5e-3, is_batch_norm_else_instance_norm)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_norm_bfloat(self):
output_elements = 10000
channel_sizes = [67, 457, 1024, 4096]
with torch.backends.cudnn.flags(enabled=False):
for is_batch_norm_else_instance_norm in [False, True]:
for dims in range(3, 6):
output_size = int(pow(output_elements, 1. / (dims - 1)))
for C in channel_sizes:
x = [output_size for idx in range(dims)]
x[1] = C
self._norm_helper(x, torch.bfloat16, "cuda", 1e-1, is_batch_norm_else_instance_norm)
def _softmax_helper(self, shape, reduction_axis, is_log_softmax, dtype, device, error):
class MySoftmax(torch.nn.Module):
__constants__ = ['reduction_axis']
def __init__(self):
super(MySoftmax, self).__init__()
self.reduction_axis = reduction_axis
def forward(self, x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.nn.functional.softmax(o, dim=self.reduction_axis)
return o
class MyLogSoftmax(torch.nn.Module):
__constants__ = ['reduction_axis']
def __init__(self):
super(MyLogSoftmax, self).__init__()
self.reduction_axis = reduction_axis
def forward(self, x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.nn.functional.log_softmax(o, dim=self.reduction_axis)
return o
gradient_check = (dtype == torch.float64)
t = MyLogSoftmax() if is_log_softmax else MySoftmax()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=gradient_check)
y = torch.randn(shape, dtype=dtype, device=device, requires_grad=gradient_check)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
if gradient_check:
gradcheck(t_jit.forward, [x, y], nondet_tol=1e-5)
else:
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
# numerical issues here due to our scheduling.
# can't use `self.assertEqual(o, jit_o)`
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_softmax_dtype(self):
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.mul(x, y)
o = torch.nn.functional.softmax(o, dim=0, dtype=torch.float32)
return o
x = torch.randn([4, 4], dtype=torch.float16, device="cuda").requires_grad_()
y = torch.randn_like(x).requires_grad_()
grad = torch.randn_like(x).float()
ref_x = x.detach().requires_grad_()
ref_y = y.detach().requires_grad_()
o = t(ref_x, ref_y)
o.backward(grad)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o.backward(grad)
jit_o = t_jit(x, y)
jit_o.backward(grad)
jit_o = t_jit(x, y)
jit_o.backward(grad)
x.grad.zero_()
y.grad.zero_()
jit_o = t_jit(x, y)
jit_o.backward(grad)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(ref_x.grad, x.grad)
self.assertEqual(ref_y.grad, y.grad)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-3))
self.assertGraphContainsExactly(t_jit.graph_for(x, y), FUSION_GUARD, 1, consider_subgraphs=True)
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GUARD).run(bwd_graph)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test__softmax_function(self):
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.mul(x, y)
o = torch._softmax(o, dim=-1, half_to_float=False)
return o
x = torch.randn([4, 4], dtype=torch.float16, device="cuda")
y = torch.randn_like(x)
o = t(x, y)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-3))
self.assertGraphContainsExactly(t_jit.graph_for(x, y), FUSION_GUARD, 1, consider_subgraphs=True)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test__softmax_function_half_to_float(self):
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.mul(x, y)
o = torch._softmax(o, dim=-1, half_to_float=True)
return o
x = torch.randn([4, 4], dtype=torch.float16, device="cuda")
y = torch.randn_like(x)
o = t(x, y)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-3))
self.assertGraphContainsExactly(t_jit.graph_for(x, y), FUSION_GUARD, 1, consider_subgraphs=True)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_softmax(self):
output_size = 10000
dims = 4
output_size = int(pow(output_size, 1. / dims))
reduction_sizes = [67, 256, 1024, 4096]
# gradient check
for reduction_dim in range(dims):
for is_log_softmax in [False, True]:
shape = [output_size for idx in range(dims)]
self._softmax_helper(shape, reduction_dim, is_log_softmax, torch.float64, "cuda", 1e-4)
for reduction_dim in range(dims):
for reduction_size in reduction_sizes:
x = [output_size for idx in range(dims)]
x[reduction_dim] = reduction_size
for is_log_softmax in [False, True]:
self._softmax_helper(x, reduction_dim, is_log_softmax, torch.float32, "cuda", 1e-4)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_softmax_half(self):
output_size = 10000
dims = 4
output_size = int(pow(output_size, 1. / dims))
reduction_sizes = [67, 256, 1024, 4096]
for reduction_dim in range(dims):
for reduction_size in reduction_sizes:
x = [output_size for idx in range(dims)]
x[reduction_dim] = reduction_size
for is_log_softmax in [False, True]:
self._softmax_helper(x, reduction_dim, is_log_softmax, torch.float16, "cuda", 5e-3)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_softmax_bfloat(self):
output_size = 10000
dims = 4
output_size = int(pow(output_size, 1. / dims))
reduction_sizes = [67, 256, 1024, 4096]
for reduction_dim in range(dims):
for reduction_size in reduction_sizes:
x = [output_size for idx in range(dims)]
x[reduction_dim] = reduction_size
for is_log_softmax in [False, True]:
self._softmax_helper(x, reduction_dim, is_log_softmax, torch.bfloat16, "cuda", 1e-1)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_reduction_permutation(self):
x = [7, 8, 12]
# note that num_dim is exclusive from len(x), so we are not reducing
# to single element (codegen limitation at this moment)
for num_reduce_dim in range(1, len(x)):
for axes in itertools.combinations(range(len(x)), num_reduce_dim):
for perm0 in itertools.permutations(range(len(x))):
for perm1 in itertools.permutations(range(len(x))):
self._reduction_helper(x, axes, torch.float32, "cuda", perm0, perm1)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_reduction_multiple_output(self):
old_guard = torch._C._jit_set_nvfuser_guard_mode(True)
torch._C._jit_set_bailout_depth(20)
def t(x: torch.Tensor, y: torch.Tensor, scale: float, z: torch.Tensor):
o = torch.mul(x, y)
o = torch.mul(o, scale)
out1 = torch.mul(o, z)
out2 = torch.sum(out1, dim=[2])
return out1, out2
t_jit = torch.jit.script(t)
x = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
y = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
z = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
scale = 0.5
jit_o = t_jit(x, y, scale, z)
jit_o = t_jit(x, y, scale, z)
o = t(x, y, scale, z)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, scale, z), FUSION_GUARD)
x = x.to(memory_format=torch.channels_last)
y = y.to(memory_format=torch.channels_last)
z = z.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y, scale, z)
jit_o = t_jit(x, y, scale, z)
o = t(x, y, scale, z)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, scale, z), FUSION_GUARD)
torch._C._jit_set_nvfuser_guard_mode(old_guard)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_channels_last_with_broadcast(self):
# setting this true forces a new graph to be generated with a new
# input a different broadcast shape
torch._C._jit_set_nvfuser_guard_mode(True)
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.mul(x, y)
o = o + 2.0
return o
t_jit = torch.jit.script(t)
# Single Channel broadcasts
# Test 1
x = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
x = x.to(memory_format=torch.channels_last)
y = torch.randn(8, 4, 10, 1, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
# Test 2
y = torch.randn(8, 4, 1, 16, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
# Test 3
y = torch.randn(8, 1, 10, 16, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
# Test 3
y = torch.randn(1, 4, 10, 16, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
'''
Currently, the JIT doesn't have tensor merge logic to handle adding
a broadcast tensor with more than one broadcast into a non-broadcast
tensor. Therefore, either of these tests can fail depending on the
sort implementation. The second test is known to fail.
# Two Channel broadcasts
# Test 1
y = torch.randn(8, 4, 1, 1, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
# Test 2
y = torch.randn(8, 4, 1, 1, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last).transpose(2,3)
x = x.transpose(2,3)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
'''
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_pw_single_reduction_partition(self):
sizes = [2, 2, 2]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device)
y = torch.randn(sizes, dtype=dtype, device=device)
z = torch.randn(sizes, dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = torch.add(x, y)
o = torch.sum(o, dim=[0])
o = torch.add(o, z)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_permutation_preservation(self):
sizes = [2, 3, 4, 5]
dtype = torch.float
device = "cuda"
with nvfuser_singleton_fusion(True):
def t(x: torch.Tensor):
return torch.relu(x)
t_jit = torch.jit.script(t)
x = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)
self._run_helper(t_jit, t, x, check_stride=True)
def t(x: torch.Tensor, y: torch.Tensor):
return torch.add(x, y)
t_jit = torch.jit.script(t)
x = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)
y = torch.randn(sizes[1:], dtype=dtype, device=device)
self._run_helper(t_jit, t, x, y, check_stride=True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_permutation_preservation_edge_case_0(self):
sizes = [2, 3, 4, 5]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)
# mismatch rank with *note* different permutation recognized by PE
bias = torch.randn(3, dtype=dtype, device=device).unsqueeze(-1).unsqueeze(-1)
def t(x, y):
return x + y
t_jit = torch.jit.script(t)
with nvfuser_singleton_fusion(True):
self._run_helper(t_jit, t, x, bias, check_stride=True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_permutation_preservation_edge_case_1_broken(self):
sizes = [2, 3, 4, 5]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)
# in-compatible permutation, this will cause format propagation to break
bias = torch.randn(4, 5, dtype=dtype, device=device)
def t(x, y):
return x + y
t_jit = torch.jit.script(t)
with nvfuser_singleton_fusion(True):
for _ in range(5):
jit_o = t_jit(x, bias)
o = t(x, bias)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
try:
# nvfuser does not support in-compatible permutation, this will throw
self.assertEqual(o.stride(), jit_o.stride())
except Exception as e:
warnings.warn(
"permutation propagation is broken, proper support should come after nvfuser permutation scheduler update")
self.assertGraphContains(t_jit.graph_for(x, bias), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_permutation_preservation_edge_case_2(self):
sizes = [2, 3, 4, 5]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)
y = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)
z = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)
def t(x, y, w):
tmp = torch.lerp(x, y, w)
tmp = torch.clamp(tmp, -1.0, 0.5)
tmp = torch.nn.functional.softplus(tmp)
return torch.threshold(tmp, -2.0, 0.5)
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y, z, check_stride=True)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_normalization_partition(self):
sizes = [3, 8, 5]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device)
y = torch.randn(sizes, dtype=dtype, device=device)
z = torch.randn(sizes, dtype=dtype, device=device)
r_m = torch.randn(8, dtype=dtype, device=device)
r_v = torch.randn(8, dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, r_mean: torch.Tensor, r_var: torch.Tensor):
o = torch.add(x, y)
o = torch.nn.functional.softmax(o, dim=0)
o = torch.add(o, z)
o = torch.nn.functional.batch_norm(o, r_mean, r_var, training=True)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z, r_m, r_v)
jit_o = t_jit(x, y, z, r_m, r_v)
o = t(x, y, z, r_m, r_v)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z, r_m, r_v), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_sum_to_one(self):
dtype = torch.float
device = "cuda"
x = torch.randn([4, 5, 6], dtype=dtype, device=device)
def t(x: torch.Tensor):
o = torch.add(x, 1)
o = torch.sum(o, dim=[0, 1, 2])
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_single_reduction_broadcast(self):
dtype = torch.float
device = "cuda"
x = torch.randn([7, 4, 8], dtype=dtype, device=device)
y = torch.randn([4, 8], dtype=dtype, device=device)
z = torch.randn([1, 4, 8], dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = torch.add(x, y)
o = torch.add(o, z)
o = torch.sum(o, dim=[0])
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_trivial_reduction(self):
dtype = torch.float
device = "cuda"
x = torch.randn([1, 4, 8], dtype=dtype, device=device)
def t(x: torch.Tensor):
o = torch.add(x, 1)
o = torch.sum(o, dim=[0])
o = torch.sum(o, dim=[0])
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_profiling_node(self):
dtype = torch.float
device = "cuda"
x = torch.randn(4, 8, 8, 8, dtype=dtype, device=device)
def repro(x: torch.Tensor, alpha: float):
o = torch.rand_like(x)
o = torch.add(o, alpha)
return o
repro_jit = torch.jit.script(repro)
self._run_helper(repro_jit, repro, x, 0.6)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_rand_like(self):
dtype = torch.float
device = "cuda"
def t(x: torch.Tensor, alpha: float):
o = torch.rand_like(x)
o = torch.add(o, alpha)
return o
# disabling cache so new inputs would generate new graph
t.__disable_jit_function_caching__ = True
for m_format in [torch.contiguous_format, torch.channels_last]:
x = torch.randn(4, 5, 6, 7, dtype=dtype, device=device).to(memory_format=m_format)
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, 0.6, check_stride=True)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_reduction_sizes_op(self):
dtype = torch.float
device = "cuda"
x = torch.randn(2, 3, 4, 5, dtype=dtype, device=device)
y = torch.randn(2, 3, 4, 5, dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor):
o = x + y
o = torch.relu(o)
o = o.sum((1, 3))
return o.size()
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o, jit_o)
# since the output value is not used at all, the fusion operator should
# have been optimized away
self.assertGraphContainsExactly(t_jit.graph_for(x, y), FUSION_GUARD, 0)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_profile_ivalue(self):
dtype = torch.float
device = "cuda"
x = torch.randn([7, 4, 7], dtype=dtype, device=device)
y = torch.randn([7, 4, 7], dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, dim: List[int], keepdim: bool):
o = torch.add(x, y)
o = o.sum(dim, keepdim=keepdim)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, (0, 1), False)
jit_o = t_jit(x, y, (0, 1), False)
o = t(x, y, (0, 1), False)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, (0, 1), False), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_profile_ivalue_multiple_profiles(self):
dtype = torch.float
device = "cuda"
x = torch.randn([7, 4, 7], dtype=dtype, device=device)
def t(x, num: int):
for i in range(num):
# varying reduction axes should break profile_ivalue
tmp = x.sum(i, keepdim=True)
# inplace add on input/output, can't be functionalized/fused
x += tmp
return x
with nvfuser_singleton_fusion(True):
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, 3, num_fusion=0)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_sum_to_size(self):
dtype = torch.float
device = "cuda"
x = torch.randn([2, 4, 4], dtype=dtype, device=device)
y = torch.randn([2, 4, 4], dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, new_size: List[int]):
o = torch.add(x, y)
o = o.sum_to_size(new_size)
return o
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y, (4, 1))
# update shape: old kernel should handle dynamic shape well without
# recompilation
x = torch.randn([2, 5, 8], dtype=dtype, device=device)
y = torch.randn([2, 5, 8], dtype=dtype, device=device)
# (TODO) check executed kernel, should extend autograd.profiler to fused
# kernels
self._run_helper(t_jit, t, x, y, (5, 1))
with nvfuser_singleton_fusion(True):
x = torch.randn([2, 5, 8], dtype=dtype, device=device)
def t(x: torch.Tensor):
# no-op reduction
return x.sum_to_size((2, 5, 8))
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_grad_sum_to_size(self):
dtype = torch.float
device = "cuda"
x = torch.randn([2, 4, 4], dtype=dtype, device=device).requires_grad_()
y = torch.randn([4], dtype=dtype, device=device).requires_grad_()
grad = torch.randn([2, 4, 4], dtype=dtype, device=device)
ref_x = x.detach().clone().requires_grad_()
ref_y = y.detach().clone().requires_grad_()
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.relu(o)
return o
# profiling runs for forward & backward
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o.backward(grad)
jit_o = t_jit(x, y)
jit_o.backward(grad)
x.grad = None
y.grad = None
jit_o = t_jit(x, y)
jit_o.backward(grad)
o = t(ref_x, ref_y)
o.backward(grad)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertEqual(x.grad, ref_x.grad)
self.assertEqual(y.grad, ref_y.grad)
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GUARD).run(bwd_graph)
# update shape: old kernel should handle dynamic shape well without
# recompilation
x = torch.randn([2, 5, 8], dtype=dtype, device=device).requires_grad_()
y = torch.randn([8], dtype=dtype, device=device).requires_grad_()
ref_x = x.detach().clone().requires_grad_()
ref_y = y.detach().clone().requires_grad_()
grad = torch.randn([2, 5, 8], dtype=dtype, device=device)
jit_o = t_jit(x, y)
# (TODO) check executed kernel, should extend autograd.profiler to fused
# kernels
jit_o.backward(grad)
o = t(ref_x, ref_y)
o.backward(grad)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertEqual(x.grad, ref_x.grad)
self.assertEqual(y.grad, ref_y.grad)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_dropout_inference_fusion(self):
dtype = torch.float
device = "cuda"
x = torch.randn([10, 4, 8], dtype=dtype, device=device)
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o + 1.0
return o
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, 0.15, False)
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_dropout_train_nograd_fusion(self):
dtype = torch.float
device = "cuda"
x = torch.randn([64, 128, 1024], dtype=dtype, device=device)
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o + 1.0
return o
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, 0.0, True, check_runs=20)
self._run_helper(t_jit, t, x, 1.0, True, check_runs=20)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_dropout_train_nograd_prob_check(self):
dtype = torch.float
device = "cuda"
x = torch.randn([1024, 1024], dtype=dtype, device=device)
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o * 2.0
return o
t_jit = torch.jit.script(t)
for prob in [0.0, 0.15, 0.5, 0.85, 1.]:
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
self.assertTrue(jit_o.detach().isfinite().all().item())
num_elems = x.numel()
num_zeros = num_elems - jit_o.detach().count_nonzero().item()
percent_zeros = num_zeros / num_elems
self.assertTrue((percent_zeros >= (prob - 0.01)) and (percent_zeros <= (prob + 0.01)))
self.assertGraphContainsExactly(t_jit.graph_for(x, prob, True), FUSION_GUARD, 1, consider_subgraphs=True)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_dropout_training_fusion(self):
dtype = torch.float
device = "cuda"
sizes = [2, 3, 4, 5]
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o * 2.0
return o
def t2(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.softmax(x, dim=-1)
o = torch.nn.functional.dropout(o, p, training=train)
return o
# disabling cache so new inputs would generate new graph
t.__disable_jit_function_caching__ = True
t2.__disable_jit_function_caching__ = True
for fn in [t, t2]:
for m_format in [torch.contiguous_format, torch.channels_last]:
fn_jit = torch.jit.script(fn)
x = torch.randn(sizes, dtype=dtype, device=device, requires_grad=True).to(memory_format=m_format)
grads = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=m_format)
# The drop probability needs to be set to zero given that the order of picking random
# numbers between eager mode and the jit is different
self._run_training_helper(fn_jit, fn, grads, x, 0.0, True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_gelu(self):
old_guard = torch._C._jit_set_nvfuser_guard_mode(True)
dtype = torch.float
device = "cuda"
x = torch.randn([1024, 1024], dtype=dtype, device=device, requires_grad=True)
grads = torch.randn([1024, 1024], dtype=dtype, device=device, requires_grad=False)
def t(x: torch.Tensor, mode: str):
o = torch.nn.functional.gelu(x, approximate=mode)
o = o * 2.0
return o
t_jit = torch.jit.script(t)
self._run_training_helper(t_jit, t, grads, x, 'none')
self._run_training_helper(t_jit, t, grads, x, 'tanh')
torch._C._jit_set_nvfuser_guard_mode(old_guard)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_dropout_training_prob_check(self):
dtype = torch.float
device = "cuda"
x = torch.randn([1024, 1024], dtype=dtype, device=device, requires_grad=True)
x_nograd = torch.randn([1024, 1024], dtype=dtype, device=device)
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o * 2.0
return o
t_jit = torch.jit.script(t)
for prob in [0.0, 0.15, 0.5, 0.85, 1.]:
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
self.assertTrue(jit_o.detach().isfinite().all().item())
num_elems = x.numel()
num_zeros = num_elems - jit_o.detach().count_nonzero().item()
percent_zeros = num_zeros / num_elems
self.assertTrue((percent_zeros >= (prob - 0.01)) and (percent_zeros <= (prob + 0.01)))
self.assertGraphContainsExactly(t_jit.graph_for(x, prob, True), FUSION_GUARD, 1, consider_subgraphs=True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_linear(self):
in_feature = 2
out_feature = 8
# Changing the input dims to be 3-D to avoid eager mode bias fusion
# The bias fusion causes some precision issues with TF-32
weight = torch.randn(out_feature, in_feature, dtype=torch.float32, device='cuda')
bias = torch.randn(out_feature, dtype=torch.float32, device='cuda')
def t(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor):
o = torch.nn.functional.linear(x, weight, bias)
o = torch.relu(o)
return o
# disabling cache so new inputs would generate new graph
t.__disable_jit_function_caching__ = True
sizes = [in_feature, ]
for i in range(4):
# increase input rank in each iteration
sizes.insert(0, i + 2)
x = torch.randn(*sizes, dtype=torch.float32, device='cuda')
t_jit = torch.jit.script(t)
# fusion only happens for input rank >= 4
has_fusion = 0 if len(sizes) < 4 else 1
self._run_helper(t_jit, t, x, weight, bias, check_stride=True, num_fusion=has_fusion)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_linear_symbolic_shapes(self):
def fn(x: int):
y = torch.zeros((3, 4, x, x + 2)).cuda()
for i in range(2):
inp = torch.rand((3, 4, x, x + i)).cuda()
weight = torch.rand((x + 2, x + i)).cuda()
bias = torch.rand((x, x + 2)).cuda()
y += torch.sin(torch.nn.functional.linear(inp, weight, bias))
return y
fn_s = torch.jit.script(fn)
fn_s(5)
fn_s(5)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_conv2d_symbolic_shapes(self):
def fn(x: int):
responses = []
for i in range(2):
inp = torch.rand((3, 3, 32, 32)).cuda()
weight = torch.rand((x + i, 3, 7, 7)).cuda()
bias = torch.rand((x + i)).cuda()
res = torch.nn.functional.conv2d(inp, weight, bias, padding=3)
responses.append(res)
return responses
fn_s = torch.jit.script(fn)
fn_s(5)
fn_s(5)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_backward_type(self):
# not super useful to check gradient of integer/bool, so skipping here
type_pairs = [
(torch.float, torch.half),
(torch.double, torch.half),
(torch.float, torch.double),
]
if TEST_BF16:
type_pairs += [
(torch.float, torch.bfloat16),
(torch.double, torch.bfloat16),
]
for x_type, y_type in type_pairs:
x = torch.randn(4, 2, dtype=x_type, device='cuda', requires_grad=True)
y = torch.randn(4, 2, dtype=y_type, device='cuda', requires_grad=True)
grad = torch.randn(4, 2, dtype=torch.float, device='cuda')
def test1(x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.add(o, y)
o = torch.add(o, y)
o = torch.add(o, y)
o = o + 1.0
return o
test1_jit = torch.jit.script(test1)
for i in range(3):
jit_o = test1_jit(x, y)
jit_o.backward(grad)
bwd_graph = list(
list(test1_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GROUP).run(bwd_graph)
self.assertEqual(x.grad.dtype, x.dtype)
self.assertEqual(y.grad.dtype, y.dtype)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_autocast_1(self):
def t(x: torch.Tensor, y: torch.Tensor):
o = x * 2.0
o = torch.softmax(o, dim=-1)
o = o * 3.0
o = torch._C._nn.linear(o, y)
return o
x = torch.randn(8, 4, dtype=torch.half, device='cuda', requires_grad=True)
y = torch.randn(4, 4, dtype=torch.float, device='cuda', requires_grad=True)
grad = torch.randn(8, 4, dtype=torch.half, device='cuda', requires_grad=False)
t_jit = torch.jit.script(t)
for i in range(3):
with torch.cuda.amp.autocast():
jit_o = t_jit(x, y)
if i == 2:
fwd_graph = t_jit.graph_for(x, y)
jit_o.backward(grad)
self.assertGraphContainsExactly(fwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)
with torch.cuda.amp.autocast():
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GROUP).run(bwd_graph)
self.assertEqual(jit_o.dtype, torch.half)
self.assertEqual(x.grad.dtype, x.dtype)
self.assertEqual(y.grad.dtype, y.dtype)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_autocast_2(self):
def t(x: torch.Tensor):
o = x * 2.0
o = torch.softmax(o, dim=-1)
o = o * 3.0
o = torch.softmax(o, dim=-1)
o = o * 4.0
return o
x = torch.randn(8, 4, dtype=torch.half, device='cuda', requires_grad=True)
grad = torch.randn(8, 4, dtype=torch.float, device='cuda', requires_grad=False)
t_jit = torch.jit.script(t)
for i in range(3):
with torch.cuda.amp.autocast():
jit_o = t_jit(x)
if i == 2:
fwd_graph = t_jit.graph_for(x)
jit_o.backward(grad)
self.assertGraphContainsExactly(fwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)
with torch.cuda.amp.autocast():
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GROUP).run(bwd_graph)
self.assertEqual(jit_o.dtype, torch.float)
self.assertEqual(x.grad.dtype, x.dtype)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_autocast_1_bfloat(self):
def t(x: torch.Tensor, y: torch.Tensor):
o = x * 2.0
o = torch.softmax(o, dim=-1)
o = o * 3.0
o = torch._C._nn.linear(o, y)
return o
x = torch.randn(8, 4, dtype=torch.bfloat16, device='cuda', requires_grad=True)
y = torch.randn(4, 4, dtype=torch.float, device='cuda', requires_grad=True)
grad = torch.randn(8, 4, dtype=torch.bfloat16, device='cuda', requires_grad=False)
t_jit = torch.jit.script(t)
for i in range(3):
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
jit_o = t_jit(x, y)
if i == 2:
fwd_graph = t_jit.graph_for(x, y)
jit_o.backward(grad)
self.assertGraphContainsExactly(fwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GROUP).run(bwd_graph)
self.assertEqual(jit_o.dtype, torch.bfloat16)
self.assertEqual(x.grad.dtype, x.dtype)
self.assertEqual(y.grad.dtype, y.dtype)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_autocast_2_bfloat(self):
def t(x: torch.Tensor):
o = x * 2.0
o = torch.softmax(o, dim=-1)
o = o * 3.0
o = torch.softmax(o, dim=-1)
o = o * 4.0
return o
x = torch.randn(8, 4, dtype=torch.bfloat16, device='cuda', requires_grad=True)
grad = torch.randn(8, 4, dtype=torch.float, device='cuda', requires_grad=False)
t_jit = torch.jit.script(t)
for i in range(3):
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
jit_o = t_jit(x)
if i == 2:
fwd_graph = t_jit.graph_for(x)
jit_o.backward(grad)
self.assertGraphContainsExactly(fwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GROUP).run(bwd_graph)
self.assertEqual(jit_o.dtype, torch.float)
self.assertEqual(x.grad.dtype, x.dtype)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_to_dtype_fp32_to_fp16(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.half)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.float, device='cuda')
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.half)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_to_dtype_fp16_to_fp32(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.float)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.half, device='cuda')
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.float)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_to_dtype_fp16_to_fp16(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.half)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.half, device='cuda')
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.half)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_to_dtype_fp32_to_bf16(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.bfloat16)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.float, device='cuda')
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.bfloat16)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_to_dtype_bf16_to_fp32(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.float)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.bfloat16, device='cuda')
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.float)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_to_dtype_bf16_to_bf16(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.bfloat16)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.bfloat16, device='cuda')
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.bfloat16)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(not TEST_MULTIGPU, "requires multiple CUDA device")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_multiple_device_pw(self):
def t(x):
o = x + 1.0
o = torch.relu(o)
return o
x = torch.randn(2, dtype=torch.float32, device="cuda")
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
torch.cuda.device(1)
x = x.to("cuda:1")
jit_o = t_jit(x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_graph_for_with_missing_optimized_engine(self):
x = torch.randn(8, 4, 2, dtype=torch.float, device="cuda").requires_grad_()
def t(x: torch.Tensor, flag: bool):
x = x + 1.0
x = torch.relu(x)
if flag:
o = x + 1.0
o = torch.relu(o)
else:
o = x + 2.0
o = torch.relu(o)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, False)
jit_o = t_jit(x, False)
jit_o = t_jit(x, True)
o = t(x, True)
self.assertEqual(o, jit_o)
# since the output value is not used at all, the fusion operator should
# have been optimized away
self.assertGraphContainsExactly(t_jit.graph_for(x, True), FUSION_GUARD, 1, True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_branches(self):
in_feature = 2
out_feature = 4
x = torch.randn(4, in_feature, dtype=torch.float32, device='cuda')
weight = torch.randn(out_feature, in_feature, dtype=torch.float32, device='cuda')
bias = torch.randn(out_feature, dtype=torch.float32, device='cuda')
def t(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, flag: bool):
if flag:
o = torch.nn.functional.linear(x, weight, bias)
o = o + 1.0
o = torch.relu(o)
else:
o = x.sum()
o = o + 2.0
o = torch.relu(o)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, weight, bias, True)
jit_o = t_jit(x, weight, bias, True)
o = t(x, weight, bias, True)
self.assertEqual(o, jit_o)
# since the output value is not used at all, the fusion operator should
# have been optimized away
self.assertGraphContainsExactly(t_jit.graph_for(x, weight, bias, True), FUSION_GUARD, 1)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_scalar_tensor(self):
x = torch.empty([], device="cuda", dtype=torch.float32)
def t(x: torch.Tensor):
o = x + 1.0
o = torch.nn.functional.relu(o)
return o
# bias set to true.
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o, jit_o)
# since the output value is not used at all, the fusion operator should
# have been optimized away
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
@unittest.skipIf(os.environ.get('PYTORCH_NO_CUDA_MEMORY_CACHING') is not None,
"skipping graph_rng when caching allocator is disabled")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(CUDA_MAJOR < 11, "requires CUDA11 or above")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_graph_rng(self):
self.assertTrue(torch._C._jit_nvfuser_enabled())
size = 10000
a = torch.randn((size,), device="cuda", dtype=torch.float)
def t(x):
o = x + 1.0
o = torch.nn.functional.dropout(o, p=0.1)
o = o + 1.0
o = torch.nn.functional.dropout(o, p=0.1)
return o
t_jit = torch.jit.script(t)
for _ in range(3):
t_jit(a)
self.assertGraphContainsExactly(t_jit.graph_for(a), FUSION_GUARD, 1)
# Control (jitted, ungraphed)
torch.cuda.manual_seed(5)
eager_out = a.clone()
for _ in range(3):
eager_out = t_jit(eager_out)
graph_in = a.clone()
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
torch.cuda.manual_seed(5)
g.capture_begin()
graph_out = t_jit(graph_in)
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
# g is now a jitted, graphed version of t.
# Runs a (jitted, graphed) -> (jitted, ungraphed) -> (jitted, graphed) sequence.
# The ops in the overall sequence should be the same as Control.
g.replay()
# graph_out is now filled with g's result. Use it as ungraphed input.
out = t_jit(graph_out)
graph_in.copy_(out)
g.replay()
# If replay() updated RNG state correctly, graph_out should now equal eager_out
self.assertEqual(graph_out, eager_out)
def _test_batch_norm_impl_index_helper(self, batch, c, hw, affine=True,
track_running_stats=True, train=True,
dtype=torch.float32):
# enabling inlining to avoid counter increment in BN forward
torch._C._debug_set_autodiff_subgraph_inlining(True)
class MyModule(torch.nn.Module):
def __init__(self, num_features=10, affine=True, track_running_stats=True):
super(MyModule, self).__init__()
self.bn = torch.nn.BatchNorm2d(num_features,
1e-5,
affine=affine,
track_running_stats=track_running_stats).to(dtype=dtype)
def forward(self, x):
o = self.bn(x)
o = o * 2.0
return o
x = torch.randn(batch, c, hw, hw, dtype=torch.float, device="cuda").to(dtype=dtype).requires_grad_()
grad = torch.randint(-20, 20, (batch, c, hw, hw), device="cuda").to(dtype=dtype).div(-10)
my_module = MyModule(c, affine, track_running_stats).cuda()
ref_module = MyModule(c, affine, track_running_stats).cuda()
if not train:
my_module.eval()
ref_module.eval()
t_jit = torch.jit.script(my_module)
ref_module.load_state_dict(my_module.state_dict())
ref_x = x.detach().requires_grad_()
for i in range(0, 3):
jit_o = t_jit(x)
jit_o.backward(grad)
# TODO: remove this run?
o = ref_module(ref_x)
o.backward(grad)
has_affine = ref_module.bn.weight is not None
has_running_stats = ref_module.bn.running_mean is not None
if has_running_stats:
my_module.bn.running_mean.zero_()
my_module.bn.running_var.fill_(1.0)
ref_module.bn.running_mean.zero_()
ref_module.bn.running_var.fill_(1.0)
# Verify that when train is False, we don't have grad for weight/bias.
if has_affine and train:
my_module.bn.weight.grad.zero_()
my_module.bn.bias.grad.zero_()
ref_module.bn.weight.grad.zero_()
ref_module.bn.bias.grad.zero_()
x.grad.zero_()
ref_x.grad.zero_()
# real runs
jit_o = t_jit(x)
jit_o.backward(grad)
o = ref_module(ref_x)
o.backward(grad)
# assert forward graph fusion
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1, consider_subgraphs=True)
# assert backward graph fusion
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[0].code.grad_executor_states()[0]
.execution_plans.values())[0].graph
self.assertGraphContainsExactly(bwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)
e0 = 1e-5 if dtype is not torch.half else 1e-3
e1 = 1e-4 if dtype is not torch.half else 1e-3
e2 = 1e-3 if dtype is not torch.half else 1e-2
self.assertTrue(self._compare("comparing output failed", jit_o, o, e0))
self.assertTrue(self._compare("comparing input grad failed", x.grad, ref_x.grad, e1))
# TODO: switch to welford and reduce this to 1e-5
# The 1e-3 looks bad, but we don't have welford in codegen, so numeric
# is very different between reference and codegen.
if has_affine and train:
self.assertTrue(self._compare("comparing weight grad failed",
my_module.bn.weight.grad,
ref_module.bn.weight.grad,
e2))
self.assertTrue(self._compare("comparing bias grad failed",
my_module.bn.bias.grad,
ref_module.bn.bias.grad,
e1))
if has_running_stats:
self.assertTrue(self._compare("comparing running_mean failed",
my_module.bn.running_mean,
ref_module.bn.running_mean,
e0))
self.assertTrue(self._compare("comparing running_var failed",
my_module.bn.running_var,
ref_module.bn.running_var,
e0))
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_batch_norm_half(self):
with torch.backends.cudnn.flags(enabled=True):
setups = [
[True, True],
[False, False],
[True, False],
[False, True]]
for training_and_track, affine in itertools.product(setups, [True, False]):
training, track_running_stats = training_and_track
self._test_batch_norm_impl_index_helper(4, 8, 5, affine, track_running_stats, training, torch.half)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_batch_norm_impl_index_inner_bcast(self):
# the repro
self._test_batch_norm_impl_index_helper(2, 1, 1, False, True, True)
# running the full set
setups = [
[True, True],
[False, False],
[True, False],
[False, True]]
for training_and_track, affine in itertools.product(setups, [True, False]):
training, track_running_stats = training_and_track
self._test_batch_norm_impl_index_helper(2, 1, 1, affine, track_running_stats, training)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_batch_norm_impl_index_correctness(self):
with torch.backends.cudnn.flags(enabled=True):
batch = [2, 7, 16]
channels = [4, 89, 19, 32]
hw = [1, 8, 17, 32]
# avoid tolerance failure in CI
torch.cuda.manual_seed_all(211)
# failing sizes (2, 1, 1, 1)
# failing sizes (2, 89, 8, 8) training False, track True, affine: False
for b, c, hw in itertools.product(batch, channels, hw):
setups = [
[True, True],
[False, False],
[True, False],
[False, True]]
for training_and_track, affine in itertools.product(setups, [True, False]):
training, track_running_stats = training_and_track
self._test_batch_norm_impl_index_helper(b, c, hw, affine, track_running_stats, training)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_softplus_fuser(self):
def shifted_softplus(x: torch.Tensor, shift: float):
return functional.softplus(x) - shift
jitted = torch.jit.script(shifted_softplus)
inp = torch.randn(4, 2, dtype=torch.float32, device="cuda").requires_grad_()
inp_ref = inp.detach().clone().requires_grad_()
grad = torch.randn(4, 2, dtype=torch.float32, device="cuda")
aten_o = shifted_softplus(inp_ref, 0.693147)
aten_o.backward(grad)
aten_grad = inp_ref.grad
for i in range(3):
jit_o = jitted(inp, 0.693147)
inp.grad = None # avoid accumulation on grad
jit_o.backward(grad)
jit_grad = inp.grad
assert torch.allclose(jit_o, aten_o)
assert torch.allclose(jit_grad, aten_grad)
self.assertGraphContains(jitted.graph_for(inp, 0.693147), FUSION_GROUP, True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_inplace_removal(self):
def t(x: torch.Tensor):
o = torch.nn.functional.softmax(x, dim=0)
o += x
return o.relu_()
jitted = torch.jit.script(t)
inp = torch.randn(4, 2, dtype=torch.float32, device="cuda")
for i in range(3):
jit_o = jitted(inp)
graph = jitted.graph_for(inp)
self.assertGraphContains(graph, FUSION_GROUP, True)
self.assertGraphContains(graph, 'aten::add', True)
self.assertGraphContains(graph, 'aten::relu', True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_conv2d_bias(self):
def t(x: torch.Tensor, w: torch.Tensor, bias: torch.Tensor):
o = torch.nn.functional.conv2d(x, w, bias)
return o.relu()
jitted = torch.jit.script(t)
inp = torch.randn(4, 5, 3, 3, dtype=torch.float32, device="cuda")
weight = torch.randn(2, 5, 2, 2, dtype=torch.float32, device="cuda")
bias = torch.randn(2, dtype=torch.float32, device="cuda")
for i in range(3):
jit_o = jitted(inp, weight, bias)
graph = jitted.graph_for(inp)
self.assertGraphContains(graph, FUSION_GROUP, True)
def t_not_fused(x: torch.Tensor, w: torch.Tensor):
o = torch.nn.functional.conv2d(x, w)
return o.relu()
jitted_not_fused = torch.jit.script(t_not_fused)
for i in range(3):
jit_o = jitted_not_fused(inp, weight)
graph = jitted_not_fused.graph_for(inp)
self.assertGraphContainsExactly(graph, FUSION_GROUP, 0)
self.assertGraphContains(graph, 'aten::relu', True)
def t_bias(x: torch.Tensor, w: torch.Tensor, bias: torch.Tensor):
o = torch.nn.functional.conv2d(x, w, bias)
return o.relu()
jitted_bias = torch.jit.script(t_bias)
for i in range(3):
jit_o = jitted_bias(inp, weight, bias)
graph = jitted_bias.graph_for(inp)
self.assertGraphContains(graph, FUSION_GROUP, True)
self.assertGraphContains(graph, 'prim::add_optional', True)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_remove_output_used_only_in_dtype(self):
class MyModule(torch.nn.Module):
def __init__(self, num_features=4):
super(MyModule, self).__init__()
self.bn0 = torch.nn.BatchNorm2d(num_features)
self.bn1 = torch.nn.BatchNorm2d(num_features)
def forward(self, x, y):
o1 = self.bn0(x)
o2 = self.bn1(y)
return torch.relu(o1 + o2)
t = MyModule(4).float().cuda()
jitted = torch.jit.script(t)
x = torch.randn(3, 4, 2, 5, dtype=torch.float32, device="cuda")
y = torch.randn(3, 4, 2, 5, dtype=torch.float32, device="cuda")
with torch.cuda.amp.autocast(True):
for i in range(5):
jit_o = jitted(x, y)
jit_o = jitted(x, y)
o = t(x, y)
self.assertTrue(torch.allclose(jit_o, o))
graph = jitted.graph_for(x, y)
self.assertGraphContains(graph, FUSION_GROUP, True)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_fix_shape_expression_bn(self):
class MyModule(torch.nn.Module):
def __init__(self, num_features=4):
super(MyModule, self).__init__()
self.bn = torch.nn.BatchNorm2d(num_features)
def forward(self, x, y):
out1 = self.bn(x)
out2 = out1 + y
out3 = torch.relu(out2)
return out3
t = MyModule(4).float().cuda()
jitted = torch.jit.script(t)
x = torch.randn(3, 4, 2, 5, dtype=torch.float32, device="cuda")
y = torch.randn(3, 4, 2, 5, dtype=torch.float32, device="cuda")
with torch.cuda.amp.autocast(True):
for i in range(5):
jit_o = jitted(x, y)
jit_o = jitted(x, y)
o = t(x, y)
self.assertTrue(torch.allclose(jit_o, o))
graph = jitted.graph_for(x, y)
self.assertGraphContains(graph, FUSION_GROUP, True)
def _run_fwd_helper(self, func, ops, *args):
jitted = torch.jit.script(func)
for i in range(3):
jit_o = jitted(*args)
jit_o = jitted(*args)
o = func(*args)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
graph = jitted.graph_for(*args)
self.assertGraphContains(graph, FUSION_GROUP, True)
for op in ops:
self.assertGraphContainsExactly(graph, op, 0)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_sibling_fusion(self):
device = "cuda"
dtype = torch.float
x = torch.randn(2, 5, dtype=dtype, device=device)
y = torch.randn(2, 5, dtype=dtype, device=device)
def t(x: torch.Tensor):
o1 = x + 1.0
o2 = x * 0.5
return o1, o2
self._run_fwd_helper(t, ['aten::add', 'aten::mul'], x)
def t2(x: torch.Tensor, y: torch.Tensor):
o1 = x.sum(0)
o2 = (x * y).sum(0)
return o1, o2
self._run_fwd_helper(t2, ['aten::sum', 'aten::mul'], x, y)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_clean_profile_ivalue(self):
device = "cuda"
dtype = torch.float
x = torch.randn(2, 5, dtype=dtype, device=device, requires_grad=True)
# turn on autodiff subgraph inlining
# this is to verify that we clean up profile_ivalue node out side of
# fusion code path.
torch._C._debug_set_autodiff_subgraph_inlining(True)
def t(x: torch.Tensor, flag: bool):
return torch.dropout(x, 0.5, flag)
jit_t = torch.jit.script(t)
for idx in range(5):
out = jit_t(x, True)
graph = jit_t.graph_for(x, True)
out = jit_t(x, False)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_sibling_fusion_no_scalar_inputs(self):
device = "cuda"
dtype = torch.float
x = torch.randn(2, 5, dtype=dtype, device=device)
y = torch.randn(3, dtype=dtype, device=device)
# no tensor dependency between o1/o2, we shouldn't be fusing them
def t(x: torch.Tensor, y: torch.Tensor):
o1 = x + 1
o2 = y - 1
return o1, o2
jitted = torch.jit.script(t)
for i in range(3):
jit_o = jitted(x, y)
graph = jitted.graph_for(x, y)
self.assertGraphContainsExactly(graph, FUSION_GROUP, 0)
def _bias_view_relu_helper(self, shape, output_shape, dtype, device, error):
class BiasViewRelu(torch.nn.Module):
def __init__(self):
super(BiasViewRelu, self).__init__()
self.bias = torch.nn.Parameter(torch.randn(shape, dtype=dtype, device=device), requires_grad=False)
with torch.no_grad():
self.bias.fill_(10)
def forward(self, inputs: torch.Tensor, view_shape: List[int]):
o = inputs + self.bias
o = o.view(view_shape)
return torch.relu(o)
t = BiasViewRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
# profiling
jit_o = t_jit(x, output_shape)
# optimization
jit_o = t_jit(x, output_shape)
# final
jit_o = t_jit(x, output_shape)
# eager - baseline
o = t(x, output_shape)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, output_shape)
has_inferred_dimension = any([dim == -1 for dim in output_shape])
if has_inferred_dimension:
# prohibit fusing when view_shape contains an inferred dimension
self.assertGraphContainsExactly(graph, FUSION_GROUP, 0)
self.assertGraphContainsExactly(graph, 'prim::view_copy', 0)
else:
self.assertGraphContains(graph, FUSION_GUARD)
self.assertGraphContains(graph, 'prim::view_copy', True)
def _alias_bias_view_relu_helper(self, shape, output_shape, dtype, device, error):
class BiasViewRelu(torch.nn.Module):
def __init__(self):
super(BiasViewRelu, self).__init__()
self.bias = torch.nn.Parameter(torch.randn(shape, dtype=dtype, device=device), requires_grad=False)
with torch.no_grad():
self.bias.fill_(10)
def forward(self, inputs : torch.Tensor, bias : torch.Tensor, view_shape : List[int]):
o = inputs.view(view_shape)
inputs.add_(bias)
return torch.relu(o)
t = BiasViewRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
# profiling
jit_o = t_jit(x.clone(), bias, output_shape)
# optimization
jit_o = t_jit(x.clone(), bias, output_shape)
# final
jit_o = t_jit(x.clone(), bias, output_shape)
# eager - baseline
o = t(x.clone(), bias, output_shape)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, bias, output_shape)
self.assertGraphContainsExactly(graph, FUSION_GUARD, 0)
self.assertGraphContainsExactly(graph, 'prim::view_copy', 0)
# generate random view given original view
def _random_view(self, original_view, max_len=8, max_views=10000):
class Moves(enum.Enum):
Merge = 0
Split = 1
Broadcast = 2
ImplicitBroadcast = 3
Keep = 4
def valid(old_view, new_view):
old_view_size = reduce(operator.mul, old_view)
new_view_size = reduce(operator.mul, new_view)
return old_view_size == new_view_size
# given a random starting number, find the nearest divisor
def find_nearest_divisor(N):
if 2 >= (N - 1):
return -1
result = random.randint(2, N - 1)
while (N % result) != 0:
result += 1
return result
complete_views = set([tuple(original_view)])
to_visit = []
# empty new view, curent originaal view, start pos=0, move count = 0, last_move
to_visit.append(([], original_view, 0, [], Moves.Keep))
# depth-first search of view shapes, starting from the original view
while len(to_visit) > 0 and len(complete_views) < max_views:
new_view, old_view, odx, move_list, last_move = to_visit[-1]
to_visit.pop()
# iterate over each move type
for idx in range(len(Moves)):
state = Moves(idx)
new_view_clone = copy.deepcopy(new_view)
old_view_clone = copy.deepcopy(old_view)
new_move_list = move_list + [state]
new_odx = odx
# Update state using Move state
if state == Moves.Keep:
new_size = old_view_clone[odx]
new_view_clone.append(new_size)
new_odx += 1
elif state == Moves.Merge:
if odx + 1 < len(old_view_clone):
new_size = old_view_clone[odx] * old_view_clone[odx + 1]
new_view_clone.append(new_size)
new_odx += 2
else:
continue
elif state == Moves.Broadcast and last_move != Moves.Broadcast:
new_view_clone.append(1)
elif state == Moves.Split:
new_size = find_nearest_divisor(old_view_clone[odx])
if new_size == -1:
continue
new_view_clone.append(new_size)
old_view_clone[odx] = int(old_view[odx] / new_size)
if old_view_clone[odx] == 1:
new_odx += 1
elif state == Moves.ImplicitBroadcast:
old_view_clone.insert(odx + 1, 1)
new_size = old_view[odx] * 1
new_view_clone.append(new_size)
new_odx += 2
if new_odx < len(old_view_clone) and len(new_move_list) < max_len:
to_visit.append((new_view_clone, old_view_clone, new_odx, new_move_list, state))
elif (valid(original_view, new_view_clone)):
final_new_view = tuple(new_view_clone)
complete_views.add(final_new_view)
return list(complete_views)
# ndims - number of dimensions
# test_fn - view test function
def _view_test_generator(self, ndims, test_fn):
# create random tensor
# max value for each dimension
max_size = 10e7
max_value = max(int(pow(max_size, 1. / ndims)), 1)
sizes = [random.randint(1, max_value) for idx in range(ndims)]
x = torch.randn(sizes)
original_sizes = list(x.size())
all_views = self._random_view(original_sizes)
random.shuffle(all_views)
max_samples = 20
max_views = min(len(all_views), max_samples)
total = 0
correct = 0
# test random combinations of compatible views
for idx in range(max_views):
for jdx in range(idx + 1, max_views):
total += 1
test_fn(all_views[idx], all_views[jdx], torch.float, 'cuda', 1e-6)
@unittest.skipIf(ALIAS_TEST_DISABLED, "skipping this test since view is disabled now")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_view(self):
torch._C._jit_set_nvfuser_guard_mode(True)
self._bias_view_relu_helper([2, 3, 4, 5], [-1, 4, 5], torch.float, 'cuda', 1e-6)
for ndims in range(1, 5):
self._view_test_generator(ndims, self._bias_view_relu_helper)
self._alias_bias_view_relu_helper([2, 3, 4, 5], [1, 6, 1, 2, 2, 5, 1], torch.float, 'cuda', 1e-6)
def _bias_flatten_relu_helper(self, shape, start_dim, end_dim, dtype, device, error):
class BiasFlattenRelu(torch.nn.Module):
def __init__(self):
super(BiasFlattenRelu, self).__init__()
self.bias = torch.nn.Parameter(torch.randn(shape, dtype=dtype, device=device), requires_grad=False)
with torch.no_grad():
self.bias.fill_(10)
def forward(self, inputs : torch.Tensor, start_dim : int, end_dim : int):
o = inputs + self.bias
o = o.flatten(start_dim, end_dim)
return torch.relu(o)
t = BiasFlattenRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, start_dim, end_dim)
self.assertGraphContains(t_jit.graph_for(x, start_dim, end_dim), 'prim::flatten_copy', True)
def _alias_bias_flatten_relu_helper(self, shape, start_dim, end_dim, dtype, device, error):
class BiasFlattenRelu(torch.nn.Module):
def __init__(self):
super(BiasFlattenRelu, self).__init__()
self.bias = torch.nn.Parameter(torch.randn(shape, dtype=dtype, device=device), requires_grad=False)
with torch.no_grad():
self.bias.fill_(10)
def forward(self, inputs : torch.Tensor, bias : torch.Tensor, start_dim : int, end_dim : int):
o = inputs.flatten(start_dim, end_dim)
inputs.add_(bias)
return torch.relu(o)
t = BiasFlattenRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
# profiling
jit_o = t_jit(x.clone(), bias, start_dim, end_dim)
# optimization
jit_o = t_jit(x.clone(), bias, start_dim, end_dim)
# final
jit_o = t_jit(x.clone(), bias, start_dim, end_dim)
# eager - baseline
o = t(x.clone(), bias, start_dim, end_dim)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, bias, start_dim, end_dim)
self.assertGraphContainsExactly(graph, FUSION_GUARD, 0)
self.assertGraphContainsExactly(graph, 'prim::flatten_copy', 0)
@unittest.skipIf(ALIAS_TEST_DISABLED, "skipping this test since flatten is disabled now")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_flatten(self):
torch._C._jit_set_nvfuser_guard_mode(True)
self._bias_flatten_relu_helper([2, 3, 4, 5], 0, -1, torch.float, 'cuda', 1e-6)
self._bias_flatten_relu_helper([2, 3, 4, 5], 1, -1, torch.float, 'cuda', 1e-6)
self._bias_flatten_relu_helper([2, 3, 4, 5], 2, -1, torch.float, 'cuda', 1e-6)
self._bias_flatten_relu_helper([2, 3, 4, 5], 0, 3, torch.float, 'cuda', 1e-6)
self._bias_flatten_relu_helper([2, 3, 4, 5], 1, 2, torch.float, 'cuda', 1e-6)
self._bias_flatten_relu_helper([2, 3, 4, 5], 2, 2, torch.float, 'cuda', 1e-6)
self._alias_bias_flatten_relu_helper([2, 3, 4, 5], 0, -1, torch.float, 'cuda', 1e-6)
self._alias_bias_flatten_relu_helper([2, 3, 4, 5], 1, -1, torch.float, 'cuda', 1e-6)
self._alias_bias_flatten_relu_helper([2, 3, 4, 5], 2, -1, torch.float, 'cuda', 1e-6)
self._alias_bias_flatten_relu_helper([2, 3, 4, 5], 0, 3, torch.float, 'cuda', 1e-6)
self._alias_bias_flatten_relu_helper([2, 3, 4, 5], 1, 2, torch.float, 'cuda', 1e-6)
self._alias_bias_flatten_relu_helper([2, 3, 4, 5], 2, 2, torch.float, 'cuda', 1e-6)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_strict_fusion(self):
def success(x):
with torch.jit.strict_fusion():
return x + x + x
scripted = self.checkScript(success, (torch.rand([4], device='cuda'),))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check_not("aten::add").check("prim::CudaFusionGroup").run(g)
def failure(x):
with torch.jit.strict_fusion():
return x + torch.mm(x, x) + x
with self.assertRaises(Exception) as error_out:
foo_s = torch.jit.script(failure)
foo_s(torch.rand([4, 4]))
foo_s(torch.rand([4, 4]))
fc = FileCheck().check("Found unfused operators")
fc.check("aten::mm").run(str(error_out.exception))
def _ltc_helper(self, shape, dtype, device, error, approximate=True):
# modeled after LTC linear layer
class LTC(torch.nn.Module):
def __init__(self):
super(LTC, self).__init__()
self.weight = torch.nn.Parameter(torch.randn([1024, 1024], dtype=dtype, device=device), requires_grad=False)
self.bias = torch.nn.Parameter(torch.randn([1, 1024], dtype=dtype, device=device), requires_grad=False)
def forward(self, inputs : torch.Tensor):
o = inputs.view([32768, 1024])
o = torch.mm(o, self.weight)
o = o.view([256, 128, 1024])
o = o + self.bias
o = o.view([32768, 1024])
o = o.view([256, 128, 1024])
return torch.nn.functional.gelu(o)
t = LTC()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
# profile/optimization runs
for i in range(3):
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x)
self.assertGraphContains(graph, FUSION_GUARD)
self.assertGraphContains(graph, 'prim::view_copy', True)
@unittest.skipIf(ALIAS_TEST_DISABLED, "skipping this test since view is disabled now")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_nested_view(self):
self._ltc_helper([256, 128, 1024], torch.float, 'cuda', 1e-6)
def _bias_squeeze_relu_helper(self, shape, dtype, device, error):
class BiasSqueezeRelu(torch.nn.Module):
def __init__(self):
super(BiasSqueezeRelu, self).__init__()
def forward(self, inputs: torch.Tensor, bias: torch.Tensor):
o = inputs + bias
o = torch.squeeze(o)
return torch.relu(o)
t = BiasSqueezeRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, bias)
jit_o = t_jit(x, bias)
jit_o = t_jit(x, bias)
o = t(x, bias)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, bias)
self.assertGraphContains(graph, FUSION_GUARD)
self.assertGraphContains(graph, 'prim::squeeze_copy', True)
def _alias_bias_squeeze_relu_helper(self, shape, dtype, device, error):
class BiasSqueezeRelu(torch.nn.Module):
def __init__(self):
super(BiasSqueezeRelu, self).__init__()
def forward(self, inputs: torch.Tensor, bias: torch.Tensor):
o = torch.squeeze(inputs)
inputs.add_(bias)
return torch.relu(o)
t = BiasSqueezeRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
jit_o = t_jit(x.clone(), bias)
jit_o = t_jit(x.clone(), bias)
jit_o = t_jit(x.clone(), bias)
o = t(x.clone(), bias)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, bias)
self.assertGraphContainsExactly(graph, FUSION_GUARD, 0)
self.assertGraphContainsExactly(graph, 'prim::squeeze_copy', 0)
@unittest.skipIf(ALIAS_TEST_DISABLED, "skipping this test since squeeze/unsqueeze is disabled now")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_squeeze(self):
self._bias_squeeze_relu_helper([1, 6, 1, 2, 2, 5, 1], torch.float, 'cuda', 1e-6)
self._alias_bias_squeeze_relu_helper([1, 6, 1, 2, 2, 5, 1], torch.float, 'cuda', 1e-6)
@unittest.skipIf(ALIAS_TEST_DISABLED, "skipping this test since squeeze/unsqueeze is disabled now")
# remove this after opinfo tests are enabled
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_squeeze_zero(self):
x = torch.tensor(1.0, dtype=torch.float, device="cuda")
def squeeze_0(x: torch.Tensor):
o = x + 1.
o = torch.squeeze(o, 0)
o = o * 2.
return o
def squeeze_1(x: torch.Tensor):
o = x + 1.
o = torch.squeeze(o, -1)
o = o + .5
return o
squeeze_0_jit = torch.jit.script(squeeze_0)
self._run_helper(squeeze_0_jit, squeeze_0, x)
squeeze_1_jit = torch.jit.script(squeeze_1)
self._run_helper(squeeze_1_jit, squeeze_1, x)
def _bias_unsqueeze_relu_helper(self, shape, dtype, device, error):
class BiasUnsqueezeRelu(torch.nn.Module):
def __init__(self):
super(BiasUnsqueezeRelu, self).__init__()
def forward(self, inputs: torch.Tensor, bias: torch.Tensor):
o = inputs + bias
o = torch.unsqueeze(o, 0)
return torch.relu(o)
t = BiasUnsqueezeRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, bias)
jit_o = t_jit(x, bias)
jit_o = t_jit(x, bias)
o = t(x, bias)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, bias)
self.assertGraphContains(graph, FUSION_GUARD)
self.assertGraphContains(graph, 'prim::unsqueeze_copy', True)
def _alias_bias_unsqueeze_relu_helper(self, shape, dtype, device, error):
class BiasUnsqueezeRelu(torch.nn.Module):
def __init__(self):
super(BiasUnsqueezeRelu, self).__init__()
def forward(self, inputs : torch.Tensor, bias : torch.Tensor):
o = torch.unsqueeze(inputs, 0)
inputs.add_(bias)
return torch.relu(o)
t = BiasUnsqueezeRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
jit_o = t_jit(x.clone(), bias)
jit_o = t_jit(x.clone(), bias)
jit_o = t_jit(x.clone(), bias)
o = t(x.clone(), bias)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, bias)
self.assertGraphContainsExactly(graph, FUSION_GUARD, 0)
self.assertGraphContainsExactly(graph, 'prim::unsqueeze_copy', 0)
@unittest.skipIf(ALIAS_TEST_DISABLED, "skipping this test since squeeze/unsqueeze is disabled now")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_unsqueeze(self):
self._bias_unsqueeze_relu_helper([2, 3, 4, 5], torch.float, 'cuda', 1e-6)
self._alias_bias_unsqueeze_relu_helper([2, 3, 4, 5], torch.float, 'cuda', 1e-6)
@unittest.skipIf(ALIAS_TEST_DISABLED, "skipping this test since unsqueeze is disabled now")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_alias_pass_fix(self):
x = torch.randn(4, 24, 2, 2, dtype=torch.float, device="cuda")
w = torch.randn(24, 24, 1, 1, dtype=torch.float, device="cuda")
b = torch.randn(24, dtype=torch.float, device="cuda")
def t(x, w, b):
b2 = b + 1.0
o = torch.conv2d(x, w, b2)
return o
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, w, b)
@unittest.skipIf(ALIAS_TEST_DISABLED, "skipping this test since squeeze/unsqueeze is disabled now")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_squeeze_negative_dim(self):
x = torch.randn(4, 24, 1, 2, dtype=torch.float, device="cuda")
def t(x):
o = x + 1.0
o = o.squeeze(-2)
o = o * 2.0
return o
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_singleton_fusion(self):
x = torch.randn(4, 2, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x):
return x.relu()
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_issue1445_fusion(self):
def f(t0, t1, t2, t3):
masked_input = torch.where(t1, t2, t3)
total = masked_input.sum([0, 1, 2, 3])
sizes : List[int] = []
t10 = torch.reshape(t0, sizes)
t7 = total / t10
t4 = t7.to(dtype=torch.float)
return t4
x = torch.randn(1, 1, 1, 1, device='cuda').to(dtype=torch.long)
y = torch.randn(3, 2, 1, 1, device='cuda').to(dtype=torch.bool).expand([3, 2, 1, 2])
z = torch.randn(3, 2, 1, 2, device='cuda')
w = torch.tensor(1.5, device='cuda')
f_jit = torch.jit.script(f)
for i in range(5):
out_jit = f_jit(x, y, z, w)
out = f(x, y, z, w)
self.assertEqual(out, out_jit)
self.assertGraphContainsExactly(f_jit.graph_for(x, y, z, w), FUSION_GROUP, 1)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_disable_sibling_fuse(self):
x = torch.randn(4, 2, device="cuda")
y = torch.randn(8, device="cuda")
s = torch.tensor(1.5, device="cuda")
with nvfuser_horizontal_fusion(False):
def t(x, y, s):
o1 = x + s
o2 = y + s
return o1, o2
t_jit = torch.jit.script(t)
for i in range(5):
t_jit(x, y, s)
# sibling fusion should be disabled with the flag
self.assertGraphContainsExactly(t_jit.graph_for(x, y, s), FUSION_GUARD, 0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_build_shape_expression_native_dropout(self):
x = torch.randn(4, 2, device="cuda")
def t(x):
o, mask = torch.native_dropout(x, 0.0, True)
o1 = o.sigmoid()
o2 = mask.float().sigmoid()
return (o1, o2)
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_scalar_tensor_permuted(self):
x = torch.randn(4, 2, 3, device="cuda").permute([1, 2, 0])
y = torch.tensor(1.0, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x, y):
return x + y
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_cpu_scalar(self):
x = torch.randn(4, 2, 3, device="cuda")
y = torch.tensor(1.0, device="cpu")
z = torch.tensor(2.0, device="cpu")
with nvfuser_singleton_fusion(True):
# testing cpu scalar tensor promotion
def t(x, y):
return x + y
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y)
# scalar cpu tensor add should NOT be fused
@torch.jit.script
def t1(y, z):
return y * z
for _ in range(5):
t1(y, z)
self.assertGraphContainsExactly(t1.graph_for(y, z), FUSION_GUARD, 0)
# everything, including scalar cpu tensor add should be fused
@torch.jit.script
def t2(x, y, z):
tmp = y + z
return tmp + x
for _ in range(5):
t2(x, y, z)
self.assertGraphContainsExactly(t2.graph_for(x, y, z), 'aten::add', 0)
self.assertGraphContainsExactly(t2.graph_for(x, y, z), FUSION_GUARD, 1)
# 'cpu_tmp = y + z' shouldn't be fused.
@torch.jit.script
def t3(x, y, z):
cpu_tmp = y + z
out = x + y
return cpu_tmp, out
for _ in range(5):
t3(x, y, z)
self.assertGraphContainsExactly(t3.graph_for(x, y, z), FUSION_GUARD, 1)
self.assertGraphContainsExactly(t3.graph_for(x, y, z), 'aten::add', 1)
@unittest.skipIf(ALIAS_TEST_DISABLED, "skipping this test since squeeze/unsqueeze is disabled now")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_shape_expression(self):
x = torch.randn(4, 2, 1, 3, device="cuda")
def t_unsqueeze(x):
t0 = x.relu()
t1 = t0.unsqueeze(1)
t2 = t1 + 1.0
t3 = t1.size()
return t2, t3
def t_squeeze(x):
t0 = x.relu()
t1 = t0.squeeze()
t2 = t1 + 1.0
t3 = t1.size()
return t2, t3
def t_squeeze_dim(x):
t0 = x.relu()
t1 = t0.squeeze(-2)
t2 = t1 + 1.0
t3 = t1.size()
return t2, t3
# squeezing a non-size 1 dimension should be a no op
def t_squeeze_dim_no_op(x):
t0 = x.relu()
t1 = t0.squeeze(1)
t2 = t1 + 1.0
t3 = t1.size()
return t2, t3
def run(fn):
jit_fn = torch.jit.script(fn)
jit_o = jit_fn(x)
jit_o = jit_fn(x)
jit_o = jit_fn(x)
o = fn(x)
# output 0 is a tensor, so we check dtype and value
self.assertEqual(o[0].dtype, jit_o[0].dtype)
self.assertEqual(o[0], jit_o[0])
# output 1 is shape
self.assertEqual(o[1], jit_o[1])
self.assertGraphContainsExactly(jit_fn.graph_for(x), FUSION_GUARD, 1)
for t in [t_unsqueeze, t_squeeze, t_squeeze_dim, t_squeeze_dim_no_op]:
run(t)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_scalar_cuda_tensor(self):
x = torch.tensor(2.0, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x):
return x + 1.0
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@torch.jit.script
def t_jitted(x):
return x.sum(0)
for i in range(5):
t_jitted(x)
self.assertGraphContainsExactly(t_jitted.graph_for(x), FUSION_GUARD, 0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_overlapped_input(self):
x = torch.randn(8, device="cuda").as_strided((2, 4), (1, 1))
with nvfuser_singleton_fusion(True):
def t(x):
return x + 1.0
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
def test_reduction_empty_axes(self):
x = torch.randn(4, 2, 3, device="cuda").permute([1, 2, 0])
with nvfuser_singleton_fusion(True):
def t(x):
sizes : List[int] = []
return x.sum(sizes)
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
def test_int_tensor_input(self):
x = torch.randn(4, 2, device="cuda").to(dtype=torch.int)
with nvfuser_singleton_fusion(True):
def t(x):
return x.amax(dim=0)
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_to_boolean(self):
x = torch.randn(4, 2, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x):
return x.to(dtype=torch.bool)
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_to_copy(self):
x = torch.randn(4, 2, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x, dtype : torch.dtype):
o = torch.ops.aten._to_copy(x, dtype=dtype)
return o
t.__disable_jit_function_caching__ = True
t_jit = torch.jit.script(t)
for dtype in [torch.float16, torch.bool, torch.float64]:
self._run_helper(t_jit, t, x, dtype)
def t_none(x):
with torch.jit.strict_fusion():
o = torch.ops.aten._to_copy(x, dtype=None)
return o
t_jit_none = torch.jit.script(t_none)
self._run_helper(t_jit_none, t_none, x)
@unittest.skipIf(ALIAS_TEST_DISABLED, "skipping this test since reshape is disabled now")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_view_copy_graph_guard(self):
x = torch.randn(4, 2, 3, device="cuda").permute([1, 2, 0])
y = [4, 6]
with nvfuser_singleton_fusion(True):
def t(x, y : List[int]):
t1 = x + 1.0
t2 = t1 * 1.0
out = t2.reshape(y)
return out.relu()
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y)
@unittest.skipIf(ALIAS_TEST_DISABLED, "skipping this test since view is disabled now")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_view_copy_graph_guard_double_fusion(self):
x = torch.randn(2, 2, 5, device="cuda")
w = torch.randn(5, 5, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x, w):
o = x.view([4, x.size()[-1]])
o = torch.matmul(o, w)
o = o.view([2, 2, o.size()[1]])
return o
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x, w)
o = t(x, w)
self.assertEqual(jit_o, o)
self.assertGraphContainsExactly(t_jit.graph_for(x, w), FUSION_GUARD, 2, consider_subgraphs=True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_input_output_passthrough(self):
def t(t0, t1, t2):
mask = t1.to(dtype=torch.bool)
masked_input = torch.where(t0, mask, t2)
return masked_input, mask
t_jit = torch.jit.script(t)
# stick to integers, this avoid the numerical difference due to our
# promotion
x = torch.randn(4, 4, device='cuda').to(dtype=torch.bool)
y = torch.randn(4, 4, device='cuda').to(dtype=torch.bool)
z = torch.tensor(1.0, device='cuda').to(dtype=torch.bool)
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_pointwise_reference_tensor(self):
def t(input1, input2, scalar):
_unsafe_view = torch.ops.aten._unsafe_view(input1, [2, 4, 16])
add_ = torch.ops.aten.add_(_unsafe_view, input2)
gelu_ = torch.ops.aten.gelu(add_)
view_ = torch.ops.aten.view(gelu_, [8, 16])
mul_ = torch.ops.aten.mul(add_, scalar)
return [view_, mul_]
x = torch.randn(8, 16, device="cuda")
bias = torch.randn(16, device="cuda")
scalar = torch.ones(torch.Size([]), device="cuda")
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x, bias, scalar)
o = t(x, bias, scalar)
self.assertEqual(jit_o, o)
self.assertGraphContains(t_jit.graph_for(x, bias, scalar), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
def test_native_batch_norm_backward(self):
grad_output = torch.randn(4, 2, 3, device="cuda")
input = torch.randn(4, 2, 3, device="cuda")
weight = torch.randn(2, device="cuda")
r_m = torch.randn(2, device="cuda")
r_v = torch.randn(2, device="cuda").abs()
save_mean = torch.randn(2, device="cuda")
save_invstd = torch.randn(2, device="cuda").abs()
with nvfuser_singleton_fusion(True):
def t(grad_out, input, weight, r_m, r_v, save_mean, save_invstd, train: bool, eps: float, mask: List[bool]):
return torch.ops.aten.native_batch_norm_backward(grad_out, input, weight, r_m, r_v, save_mean,
save_invstd, train, eps, mask)
t_jit = torch.jit.script(t)
for i in range(4):
jit_o = t_jit(grad_output, input, weight, r_m.clone(), r_v.clone(),
save_mean, save_invstd, True, 1e-5, [True, True, True])
ref_m = r_m.clone()
ref_v = r_v.clone()
jit_o = t_jit(grad_output, input, weight, r_m, r_v, save_mean, save_invstd, True, 1e-5, [True, True, True])
o = t(grad_output, input, weight, ref_m, ref_v, save_mean, save_invstd, True, 1e-5, [True, True, True])
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertEqual(ref_m.dtype, r_m.dtype)
self.assertEqual(ref_m, r_m)
self.assertEqual(ref_v.dtype, r_v.dtype)
self.assertEqual(ref_v, r_v)
self.assertGraphContains(t_jit.graph_for(grad_output, input, weight, r_m.clone(), r_v.clone, save_mean,
save_invstd, True, 1e-5, [True, True, True]), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_contiguous_on_broadcasted(self):
x = torch.randn(4, 1, device="cuda")
y = torch.randn(4, 128, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x, y):
t1 = x.expand([4, 128])
t2 = t1 * y
return t2
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_skip_parser(self):
x = torch.randn(4, 12, device="cuda")
with nvfuser_singleton_fusion(True):
def fn(x):
t1 = x + 1.0
return t1.relu()
fn_jit = torch.jit.script(fn)
self._run_helper(fn_jit, fn, x)
# add node should have been merged into fusion
self.assertGraphContains(fn_jit.graph_for(x), FUSION_GUARD)
self.assertGraphContainsExactly(fn_jit.graph_for(x), 'aten::add', 0)
# flips skip parse for `aten::add`, following fusion should skip the
# add node
self.assertFalse(torch._C._jit_set_nvfuser_skip_node_kind("aten::add", True))
def fn_1(x):
t1 = x + 2.0 # change const value so we'll not reuse plan
return t1.relu()
fn_1_jit = torch.jit.script(fn_1)
self._run_helper(fn_1_jit, fn_1, x)
# add node should have been merged into fusion
self.assertGraphContains(fn_1_jit.graph_for(x), FUSION_GUARD)
self.assertGraphContainsExactly(fn_1_jit.graph_for(x), 'aten::add', 1)
# flips skip parse for `aten::add`, next fusion should fuse add node
self.assertTrue(torch._C._jit_set_nvfuser_skip_node_kind("aten::add", True))
def fn_2(x):
t1 = x + 2.0 # change const value so we'll not reuse plan
return t1.relu()
fn_2_jit = torch.jit.script(fn_2)
self._run_helper(fn_2_jit, fn_2, x)
# add node should have been merged into fusion
self.assertGraphContains(fn_2_jit.graph_for(x), FUSION_GUARD)
self.assertGraphContainsExactly(fn_2_jit.graph_for(x), 'aten::add', 0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_cuda_fusion_guard(self):
old_guard = torch._C._jit_set_nvfuser_guard_mode(True)
class ConvModule(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.sin().sigmoid()
mod = ConvModule().to(device="cuda")
inputs = [torch.randn(20, 16, 50, 100, device="cuda", requires_grad=True)]
def reduce_scalar(temp):
return temp.sum()
scripted = torch.jit.script(mod)
with torch.no_grad():
scripted(*inputs)
res = scripted(*inputs)
reduce_scalar(res).backward()
torch._C._jit_set_nvfuser_guard_mode(old_guard)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_nvfuser_comparison_callbacks_with_fallback(self):
try:
fused_result = None
unfused_result = None
graph_ir = None
def callback(fused_outputs, unfused_outputs, graph_str):
nonlocal unfused_result
nonlocal fused_result
nonlocal graph_ir
unfused_result = unfused_outputs[-1]
fused_result = fused_outputs[-1]
graph_ir = graph_str
torch._C._jit_nvfuser_set_comparison_callback(True, callback)
def fn(x, y):
z = torch.add(x, y)
return torch.relu(z)
x = torch.rand((4, 4)).cuda() - 0.5
y = torch.rand((4, 4)).cuda() - 0.5
fn_s = torch.jit.script(fn)
fn_s(x, y)
fn_s(x, y)
fn_s(x, y)
expected = fn(x, y)
self.assertEqual(expected, fused_result)
self.assertEqual(expected, unfused_result)
FileCheck().check("aten::add").run(graph_ir)
finally:
torch._C._jit_nvfuser_clear_comparison_callback()
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_nvfuser_comparison_callbacks_without_fallback(self):
try:
fused_result = None
unfused_result = None
graph_ir = None
def callback(fused_outputs, unfused_outputs, graph_str):
nonlocal unfused_result
nonlocal fused_result
nonlocal graph_ir
if len(unfused_outputs) > 0:
unfused_result = unfused_outputs[-1]
fused_result = fused_outputs[-1]
graph_ir = graph_str
torch._C._jit_nvfuser_set_comparison_callback(False, callback)
def fn(x, y):
z = torch.add(x, y)
return torch.relu(z)
x = torch.rand((4, 4)).cuda() - 0.5
y = torch.rand((4, 4)).cuda() - 0.5
fn_s = torch.jit.script(fn)
fn_s(x, y)
fn_s(x, y)
fn_s(x, y)
expected = fn(x, y)
self.assertEqual(expected, fused_result)
self.assertEqual(None, unfused_result)
FileCheck().check("aten::add").run(graph_ir)
finally:
torch._C._jit_nvfuser_clear_comparison_callback()
@unittest.skipIf(not RUN_NVFUSER, "requires NVFuser")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_cuda_fusion_guard_backward(self):
old_guard = torch._C._jit_set_nvfuser_guard_mode(True)
inp = torch.randn(10, device="cuda", requires_grad=True)
grad = torch.randn(10, device="cuda")
def f(x):
a = x.cos().cos()
return a
scripted = torch.jit.script(f)
with profile(activities=[ProfilerActivity.CPU]) as prof:
for _ in range(5):
inp.grad = None
out = scripted(inp)
out.backward(grad)
# check that we do not have fallback triggered
self.assertEqual(prof.events().table().find("fallback"), -1)
torch._C._jit_set_nvfuser_guard_mode(old_guard)
# TODO: generalize this
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
def test_inf_quick_patch(self):
inputs = [torch.tensor([-float('inf'), float('inf'), 4.0], device="cuda"),
torch.tensor([1.0, float('inf'), 4.0], device="cuda"),
torch.tensor([-float('inf'), -1.5, 4.0], device="cuda"),
torch.tensor([1.0, -3.0, float('nan')], device="cuda"),
torch.tensor([-float('inf'), -float('inf'), -float('inf')], device="cuda"),
torch.tensor([float('inf'), float('inf'), float('inf')], device="cuda"),
torch.tensor([float('nan'), float('nan'), float('nan')], device="cuda")]
def fn_amax(x):
return x.amax(dim=0)
def fn_amin(x):
return x.amin(dim=0)
def fn_add_nan(x):
return x.relu() + float('nan')
def fn_add(x):
return x + 1.0
with nvfuser_singleton_fusion(True):
for t in [fn_amax, fn_amin, fn_add, fn_add_nan]:
for x in inputs:
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_clamp_reversed_bound(self):
x = torch.tensor([1., -float('inf'), 2., float('inf'), float('nan')], device="cuda")
def t(x):
return x.clamp(min=1., max=0.5)
with nvfuser_singleton_fusion(True):
jit_t = torch.jit.script(t)
self._run_helper(jit_t, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_issue_1785(self):
class Fusion(torch.nn.Module):
def __init__(self):
super(Fusion, self).__init__()
def forward(self, x, a, b):
out = torch.mul(x.unsqueeze(-1), a)
out = out + b
return out
x = torch.randn(1024, 192, 3, device='cuda')
a = torch.randn(3, 128, device='cuda')
b = torch.randn(3, 128, device='cuda')
model = Fusion()
jit_model = torch.jit.script(model)
with torch.jit.fuser('fuser2'):
for _ in range(4):
out_ref = model(x, a, b)
out_jit = jit_model(x, a, b)
out_ref = model(x, a, b)
out_jit = jit_model(x, a, b)
self.assertTrue(self._compare("comparing output failed", out_ref, out_jit, 1e-5))
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_high_rank_fusion(self):
# currently we want to limit fusion to node with input where rank <= 8
rank_limit = 8
shapes = [4 for i in range(rank_limit + 1)]
x = torch.randn(shapes, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x):
return x.relu()
jit_t = torch.jit.script(t)
for i in range(5):
jit_t(x)
self.assertGraphContainsExactly(jit_t.graph_for(x), FUSION_GUARD, 0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_clamp(self):
x = torch.tensor([1., float('inf'), 2., float('nan'), float('-inf')], device="cuda")
def clamp_max(x):
return x.clamp(max=1.5)
def clamp_min_max(x):
return x.clamp(min=1.5)
def clamp_min(x):
return x.clamp(min=1., max=3.)
with nvfuser_singleton_fusion(True):
for t in [clamp_max, clamp_min, clamp_min_max]:
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_device_constant(self):
x = torch.randn(4, 2, device="cuda")
def t(x):
return torch.rand_like(x, device=torch.device(type='cuda'))
# cpu tensor shouldn't be fused
def t_cpu(x):
return torch.rand_like(x, device=torch.device(type='cpu'))
with nvfuser_singleton_fusion(True):
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
t_cpu_jit = torch.jit.script(t_cpu)
for i in range(5):
t_cpu_jit(x)
self.assertGraphContainsExactly(t_cpu_jit.graph_for(x), FUSION_GUARD, 0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_expand(self):
device = "cuda"
x = torch.randn(3, 5, device=device)
y = torch.randn(4, 2, 3, 5, device=device)
def t(x, y):
with torch.jit.strict_fusion():
x = x.relu()
o0 = x.expand(2, 3, 5)
o1 = x.expand_as(y)
return o0, o1
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y, check_stride=True)
def t2(x, y):
o0 = x.expand(2, 3, 5)
o1 = x.expand_as(y)
x.add_(1)
return o0, o1
t2_jit = torch.jit.script(t2)
self._run_helper(t2_jit, t2, x, y, check_stride=True, num_fusion=0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_scheduler_with_polymorphic_broadcast(self):
device = "cuda"
x0 = torch.randn(10, 128, device=device)
x1 = torch.rand_like(x0)
x2 = torch.randn(10, device=device)
def t(x0, x1, x2):
x3 = x2.unsqueeze(-1)
x4 = x3 + x0
x5 = x3 + x1
x6 = x5.sum(0)
return x4, x6
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x0, x1, x2, check_stride=True)
x2 = torch.randn(128, device=device)
def t2(x0, x1, x2):
x3 = x2.unsqueeze(0)
x4 = x3 + x0
x5 = x3 + x1
x6 = x5.sum(1)
return x4, x6
t2_jit = torch.jit.script(t2)
self._run_helper(t2_jit, t2, x0, x1, x2, check_stride=True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_type_inference(self):
device = "cuda"
x0 = torch.randn(10, 128, device=device)
x1 = torch.rand_like(x0)
x2 = torch.rand_like(x0)
def t(x0, x1, x2, flag : bool = True):
x3 = 2.0 * x0
x4 = 2.0 * x1
x5 = 2.0 * x2
if flag:
return torch.stack([x3, x4, x5], dim=-1)
# second code path doesn't run through profiling
# hence would utilize type inference with profiling information
return x0 + x1 + x2
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x0, x1, x2, check_stride=True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_disable_const_chunk_propagation_for_normalization(self):
device = "cuda"
x0 = torch.randn(10, 12, device=device)
x1 = torch.randn(10, 4, device=device)
w0 = torch.randn(12, device=device)
w1 = torch.randn(4, device=device)
def t(x, y, w0, w1):
ih = torch.layer_norm(x, (12,), w0)
i_r, i_z, i_n = ih.chunk(3, dim=1)
i_n = torch.layer_norm(i_n, (4,), w1)
r = torch.sigmoid(i_r)
n = torch.tanh(i_n + r * i_z)
h = n + r * y
return h
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x0, x1, w0, w1, check_stride=True)
class TestEnableDisableCudaFuser(JitTestCase):
def setUp(self):
super().setUp()
if RUN_NVFUSER:
self.is_enabled = torch._C._jit_set_nvfuser_enabled(False)
def tearDown(self):
if RUN_NVFUSER:
torch._C._jit_set_nvfuser_enabled(self.is_enabled)
super().tearDown()
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_context_manager_test(self):
x = torch.randn(4, 8, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, dtype=torch.float, device="cuda")
with torch.jit.fuser('fuser2'):
with torch.jit.fuser('fuser2'):
def t1(x, y):
o = x + y
o = o + 2.0
return o
t_jit = torch.jit.script(t1)
t_jit(x, y)
t_jit(x, y)
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
def t2(x, y):
o = x + y
o = o + 3.0
return o
t_jit_2 = torch.jit.script(t2)
t_jit_2(x, y)
t_jit_2(x, y)
self.assertGraphContains(t_jit_2.graph_for(x, y), FUSION_GUARD)
def t3(x, y):
o = x + y
o = o + 4.0
return o
t_jit_3 = torch.jit.script(t3)
t_jit_3(x, y)
t_jit_3(x, y)
self.assertGraphContainsExactly(t_jit_3.graph_for(x, y), FUSION_GUARD, 0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
def test_register_fuser(self):
self.assertFalse(torch._C._jit_set_nvfuser_enabled(True))
self.assertTrue(torch._C._jit_nvfuser_enabled())
self.assertTrue(torch._C._jit_set_nvfuser_enabled(True))
self.assertTrue(torch._C._jit_nvfuser_enabled())
self.assertTrue(torch._C._jit_set_nvfuser_enabled(False))
self.assertFalse(torch._C._jit_nvfuser_enabled())
@unittest.skipIf(RUN_CUDA, "Testing on CPU only")
def test_register_fuser_cpu(self):
with self.assertRaises(RuntimeError):
torch._C._jit_set_nvfuser_enabled(True)
torch._C._jit_set_nvfuser_enabled(False)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(not TEST_WITH_ROCM, "ROCM test only")
def test_register_fuser_rocm(self):
with self.assertRaises(RuntimeError):
torch._C._jit_set_nvfuser_enabled(True)
torch._C._jit_set_nvfuser_enabled(False)
def test_can_be_enabled_nvfuser(self):
if TEST_WITH_ROCM:
expected = False
else:
expected = RUN_CUDA
self.assertEqual(expected, torch._C._jit_nvfuser_can_be_enabled())
# See TestNNCOpInfoParent
class TestCudaFuserOpInfoParent(JitCommonTestCase):
pass
class TestCudaFuserOpInfo(TestCudaFuserOpInfoParent):
def setUp(self):
super(TestCudaFuserOpInfoParent, self).setUp()
if RUN_NVFUSER:
self.cuda_fuser_options = CudaFuserTestOptions()
# enables guard mode since tracing could change graph to violate guard.
torch._C._jit_set_nvfuser_guard_mode(True)
self.nvfuser_single_node_mode = torch._C._jit_set_nvfuser_single_node_mode(True)
def tearDown(self):
if RUN_NVFUSER:
self.cuda_fuser_options.restore()
torch._C._jit_set_nvfuser_single_node_mode(self.nvfuser_single_node_mode)
super(TestCudaFuserOpInfoParent, self).tearDown()
@slowTest
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@ops(op_db, dtypes=OpDTypes.supported)
def test_nvfuser_correctness(self, device, dtype, op):
if not op.supports_tracing:
self.skipTest("nvfuser requires tracing support")
variant_sample_pairs = get_traced_sample_variant_pairs(device, dtype, op)
for variant, sample in variant_sample_pairs:
trace = create_traced_fn(self, variant, cache_traced_fn=True)
ref = variant(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
val = trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
self.assertEqual(ref, val, exact_layout=True)
# Note: Clearing CU after NVFuser tests
# https://github.com/pytorch/pytorch/issues/35600
# each torch.jit.trace adds state to the _python_cu compilation unit
# since this test traces a lot of functions, out-of-memory can occur
# if the CU is not cleared.
torch.jit._state._python_cu.drop_all_functions()
@slowTest
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@ops(op_db, allowed_dtypes=(torch.float16, torch.bfloat16, torch.float32,
torch.float64, torch.complex64, torch.complex128))
def test_nvfuser_extremal_values(self, device, dtype, op):
if not op.supports_tracing:
self.skipTest("nvfuser requires tracing support")
variant_sample_pairs = get_traced_sample_variant_pairs(device, dtype, op)
def _get_extremal_tensor(x, val, dtype):
if x.dtype != dtype:
return x
return torch.full_like(x, val)
def _get_extremal_input(x, val, dtype):
if isinstance(x, torch.Tensor):
return _get_extremal_tensor(x, val, dtype)
elif is_iterable_of_tensors(x):
return [_get_extremal_tensor(y, val, dtype) for y in x]
return x
def _get_extremal_sample(sample: SampleInput, val, dtype):
extremal_sample = SampleInput(
input=_get_extremal_input(sample.input, val, dtype),
args=tuple(_get_extremal_input(x, val, dtype) for x in sample.args),
kwargs={k: _get_extremal_input(v, val, dtype) for k, v in sample.kwargs.items()},
)
return extremal_sample
def _get_extremal_samples(sample: SampleInput, dtype):
vals = [float('inf'), float('-inf'), float('nan')]
if dtype.is_complex:
complex_vals = itertools.product(vals, vals)
vals = tuple(map(lambda x: complex(*x), complex_vals))
for val in vals:
yield _get_extremal_sample(sample, val, dtype)
variant_sample_pairs = get_traced_sample_variant_pairs(device, dtype, op)
for variant, sample in variant_sample_pairs:
trace = create_traced_fn(self, variant, cache_traced_fn=True)
trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
for extremal_sample in _get_extremal_samples(sample, dtype):
try:
with freeze_rng_state():
ref = variant(*clone_inputs((extremal_sample.input, *extremal_sample.args)),
**extremal_sample.kwargs)
except (torch._C._LinAlgError, RuntimeError, ValueError):
# if eager errors out, then don't expect NVFuser to pass
continue
with freeze_rng_state():
val = trace(*clone_inputs((extremal_sample.input, *extremal_sample.args)),
**extremal_sample.kwargs)
self.assertEqual(val, ref, equal_nan=True, exact_device=True)
# See [Note: Clearing CU after NVFuser tests]
torch.jit._state._python_cu.drop_all_functions()
instantiate_device_type_tests(TestCudaFuserOpInfo, globals(), only_for=("cuda"))
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_jit_cuda_fuser.py |
# Owner(s): ["module: unknown"]
import unittest
import torch.testing._internal.common_utils as common
from torch.testing._internal.common_utils import TEST_NUMPY
from torch.testing._internal.common_cuda import TEST_NUMBA_CUDA, TEST_CUDA, TEST_MULTIGPU
import torch
if TEST_NUMPY:
import numpy
if TEST_NUMBA_CUDA:
import numba.cuda
class TestNumbaIntegration(common.TestCase):
@unittest.skipIf(not TEST_NUMPY, "No numpy")
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_cuda_array_interface(self):
"""torch.Tensor exposes __cuda_array_interface__ for cuda tensors.
An object t is considered a cuda-tensor if:
hasattr(t, '__cuda_array_interface__')
A cuda-tensor provides a tensor description dict:
shape: (integer, ...) Tensor shape.
strides: (integer, ...) Tensor strides, in bytes.
typestr: (str) A numpy-style typestr.
data: (int, boolean) A (data_ptr, read-only) tuple.
version: (int) Version 0
See:
https://numba.pydata.org/numba-doc/latest/cuda/cuda_array_interface.html
"""
types = [
torch.DoubleTensor,
torch.FloatTensor,
torch.HalfTensor,
torch.LongTensor,
torch.IntTensor,
torch.ShortTensor,
torch.CharTensor,
torch.ByteTensor,
]
dtypes = [
numpy.float64,
numpy.float32,
numpy.float16,
numpy.int64,
numpy.int32,
numpy.int16,
numpy.int8,
numpy.uint8,
]
for tp, npt in zip(types, dtypes):
# CPU tensors do not implement the interface.
cput = tp(10)
self.assertFalse(hasattr(cput, "__cuda_array_interface__"))
self.assertRaises(AttributeError, lambda: cput.__cuda_array_interface__)
# Sparse CPU/CUDA tensors do not implement the interface
if tp not in (torch.HalfTensor,):
indices_t = torch.empty(1, cput.size(0), dtype=torch.long).clamp_(min=0)
sparse_t = torch.sparse_coo_tensor(indices_t, cput)
self.assertFalse(hasattr(sparse_t, "__cuda_array_interface__"))
self.assertRaises(
AttributeError, lambda: sparse_t.__cuda_array_interface__
)
sparse_cuda_t = torch.sparse_coo_tensor(indices_t, cput).cuda()
self.assertFalse(hasattr(sparse_cuda_t, "__cuda_array_interface__"))
self.assertRaises(
AttributeError, lambda: sparse_cuda_t.__cuda_array_interface__
)
# CUDA tensors have the attribute and v2 interface
cudat = tp(10).cuda()
self.assertTrue(hasattr(cudat, "__cuda_array_interface__"))
ar_dict = cudat.__cuda_array_interface__
self.assertEqual(
set(ar_dict.keys()), {"shape", "strides", "typestr", "data", "version"}
)
self.assertEqual(ar_dict["shape"], (10,))
self.assertIs(ar_dict["strides"], None)
# typestr from numpy, cuda-native little-endian
self.assertEqual(ar_dict["typestr"], numpy.dtype(npt).newbyteorder("<").str)
self.assertEqual(ar_dict["data"], (cudat.data_ptr(), False))
self.assertEqual(ar_dict["version"], 2)
@unittest.skipIf(not TEST_CUDA, "No cuda")
@unittest.skipIf(not TEST_NUMBA_CUDA, "No numba.cuda")
def test_array_adaptor(self):
"""Torch __cuda_array_adaptor__ exposes tensor data to numba.cuda."""
torch_dtypes = [
torch.complex64,
torch.complex128,
torch.float16,
torch.float32,
torch.float64,
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
]
for dt in torch_dtypes:
# CPU tensors of all types do not register as cuda arrays,
# attempts to convert raise a type error.
cput = torch.arange(10).to(dt)
npt = cput.numpy()
self.assertTrue(not numba.cuda.is_cuda_array(cput))
with self.assertRaises(TypeError):
numba.cuda.as_cuda_array(cput)
# Any cuda tensor is a cuda array.
cudat = cput.to(device="cuda")
self.assertTrue(numba.cuda.is_cuda_array(cudat))
numba_view = numba.cuda.as_cuda_array(cudat)
self.assertIsInstance(numba_view, numba.cuda.devicearray.DeviceNDArray)
# The reported type of the cuda array matches the numpy type of the cpu tensor.
self.assertEqual(numba_view.dtype, npt.dtype)
self.assertEqual(numba_view.strides, npt.strides)
self.assertEqual(numba_view.shape, cudat.shape)
# Pass back to cuda from host for all equality checks below, needed for
# float16 comparisons, which aren't supported cpu-side.
# The data is identical in the view.
self.assertEqual(cudat, torch.tensor(numba_view.copy_to_host()).to("cuda"))
# Writes to the torch.Tensor are reflected in the numba array.
cudat[:5] = 11
self.assertEqual(cudat, torch.tensor(numba_view.copy_to_host()).to("cuda"))
# Strided tensors are supported.
strided_cudat = cudat[::2]
strided_npt = cput[::2].numpy()
strided_numba_view = numba.cuda.as_cuda_array(strided_cudat)
self.assertEqual(strided_numba_view.dtype, strided_npt.dtype)
self.assertEqual(strided_numba_view.strides, strided_npt.strides)
self.assertEqual(strided_numba_view.shape, strided_cudat.shape)
# As of numba 0.40.0 support for strided views is ...limited...
# Cannot verify correctness of strided view operations.
@unittest.skipIf(not TEST_CUDA, "No cuda")
@unittest.skipIf(not TEST_NUMBA_CUDA, "No numba.cuda")
def test_conversion_errors(self):
"""Numba properly detects array interface for tensor.Tensor variants."""
# CPU tensors are not cuda arrays.
cput = torch.arange(100)
self.assertFalse(numba.cuda.is_cuda_array(cput))
with self.assertRaises(TypeError):
numba.cuda.as_cuda_array(cput)
# Sparse tensors are not cuda arrays, regardless of device.
sparset = torch.sparse_coo_tensor(cput[None, :], cput)
self.assertFalse(numba.cuda.is_cuda_array(sparset))
with self.assertRaises(TypeError):
numba.cuda.as_cuda_array(sparset)
sparse_cuda_t = sparset.cuda()
self.assertFalse(numba.cuda.is_cuda_array(sparset))
with self.assertRaises(TypeError):
numba.cuda.as_cuda_array(sparset)
# Device-status overrides gradient status.
# CPU+gradient isn't a cuda array.
cpu_gradt = torch.zeros(100).requires_grad_(True)
self.assertFalse(numba.cuda.is_cuda_array(cpu_gradt))
with self.assertRaises(TypeError):
numba.cuda.as_cuda_array(cpu_gradt)
# CUDA+gradient raises a RuntimeError on check or conversion.
#
# Use of hasattr for interface detection causes interface change in
# python2; it swallows all exceptions not just AttributeError.
cuda_gradt = torch.zeros(100).requires_grad_(True).cuda()
# conversion raises RuntimeError
with self.assertRaises(RuntimeError):
numba.cuda.is_cuda_array(cuda_gradt)
with self.assertRaises(RuntimeError):
numba.cuda.as_cuda_array(cuda_gradt)
@unittest.skipIf(not TEST_CUDA, "No cuda")
@unittest.skipIf(not TEST_NUMBA_CUDA, "No numba.cuda")
@unittest.skipIf(not TEST_MULTIGPU, "No multigpu")
def test_active_device(self):
"""'as_cuda_array' tensor device must match active numba context."""
# Both torch/numba default to device 0 and can interop freely
cudat = torch.arange(10, device="cuda")
self.assertEqual(cudat.device.index, 0)
self.assertIsInstance(
numba.cuda.as_cuda_array(cudat), numba.cuda.devicearray.DeviceNDArray
)
# Tensors on non-default device raise api error if converted
cudat = torch.arange(10, device=torch.device("cuda", 1))
with self.assertRaises(numba.cuda.driver.CudaAPIError):
numba.cuda.as_cuda_array(cudat)
# but can be converted when switching to the device's context
with numba.cuda.devices.gpus[cudat.device.index]:
self.assertIsInstance(
numba.cuda.as_cuda_array(cudat), numba.cuda.devicearray.DeviceNDArray
)
@unittest.skip("Test is temporary disabled, see https://github.com/pytorch/pytorch/issues/54418")
@unittest.skipIf(not TEST_NUMPY, "No numpy")
@unittest.skipIf(not TEST_CUDA, "No cuda")
@unittest.skipIf(not TEST_NUMBA_CUDA, "No numba.cuda")
def test_from_cuda_array_interface(self):
"""torch.as_tensor() and torch.tensor() supports the __cuda_array_interface__ protocol.
If an object exposes the __cuda_array_interface__, .as_tensor() and .tensor()
will use the exposed device memory.
See:
https://numba.pydata.org/numba-doc/latest/cuda/cuda_array_interface.html
"""
dtypes = [
numpy.complex64,
numpy.complex128,
numpy.float64,
numpy.float32,
numpy.int64,
numpy.int32,
numpy.int16,
numpy.int8,
numpy.uint8,
]
for dtype in dtypes:
numpy_arys = [
numpy.arange(6).reshape(2, 3).astype(dtype),
numpy.arange(6).reshape(2, 3).astype(dtype)[1:], # View offset should be ignored
numpy.arange(6).reshape(2, 3).astype(dtype)[:, None], # change the strides but still contiguous
]
# Zero-copy when using `torch.as_tensor()`
for numpy_ary in numpy_arys:
numba_ary = numba.cuda.to_device(numpy_ary)
torch_ary = torch.as_tensor(numba_ary, device="cuda")
self.assertEqual(numba_ary.__cuda_array_interface__, torch_ary.__cuda_array_interface__)
self.assertEqual(torch_ary.cpu().data.numpy(), numpy.asarray(numba_ary, dtype=dtype))
# Check that `torch_ary` and `numba_ary` points to the same device memory
torch_ary += 42
self.assertEqual(torch_ary.cpu().data.numpy(), numpy.asarray(numba_ary, dtype=dtype))
# Implicit-copy because `torch_ary` is a CPU array
for numpy_ary in numpy_arys:
numba_ary = numba.cuda.to_device(numpy_ary)
torch_ary = torch.as_tensor(numba_ary, device="cpu")
self.assertEqual(torch_ary.data.numpy(), numpy.asarray(numba_ary, dtype=dtype))
# Check that `torch_ary` and `numba_ary` points to different memory
torch_ary += 42
self.assertEqual(torch_ary.data.numpy(), numpy.asarray(numba_ary, dtype=dtype) + 42)
# Explicit-copy when using `torch.tensor()`
for numpy_ary in numpy_arys:
numba_ary = numba.cuda.to_device(numpy_ary)
torch_ary = torch.tensor(numba_ary, device="cuda")
self.assertEqual(torch_ary.cpu().data.numpy(), numpy.asarray(numba_ary, dtype=dtype))
# Check that `torch_ary` and `numba_ary` points to different memory
torch_ary += 42
self.assertEqual(torch_ary.cpu().data.numpy(), numpy.asarray(numba_ary, dtype=dtype) + 42)
@unittest.skipIf(not TEST_NUMPY, "No numpy")
@unittest.skipIf(not TEST_CUDA, "No cuda")
@unittest.skipIf(not TEST_NUMBA_CUDA, "No numba.cuda")
def test_from_cuda_array_interface_inferred_strides(self):
"""torch.as_tensor(numba_ary) should have correct inferred (contiguous) strides"""
# This could, in theory, be combined with test_from_cuda_array_interface but that test
# is overly strict: it checks that the exported protocols are exactly the same, which
# cannot handle differing exported protocol versions.
dtypes = [
numpy.float64,
numpy.float32,
numpy.int64,
numpy.int32,
numpy.int16,
numpy.int8,
numpy.uint8,
]
for dtype in dtypes:
numpy_ary = numpy.arange(6).reshape(2, 3).astype(dtype)
numba_ary = numba.cuda.to_device(numpy_ary)
self.assertTrue(numba_ary.is_c_contiguous())
torch_ary = torch.as_tensor(numba_ary, device="cuda")
self.assertTrue(torch_ary.is_contiguous())
@unittest.skip("Test is temporary disabled, see https://github.com/pytorch/pytorch/issues/54418")
@unittest.skipIf(not TEST_NUMPY, "No numpy")
@unittest.skipIf(not TEST_CUDA, "No cuda")
@unittest.skipIf(not TEST_NUMBA_CUDA, "No numba.cuda")
def test_from_cuda_array_interface_lifetime(self):
"""torch.as_tensor(obj) tensor grabs a reference to obj so that the lifetime of obj exceeds the tensor"""
numba_ary = numba.cuda.to_device(numpy.arange(6))
torch_ary = torch.as_tensor(numba_ary, device="cuda")
self.assertEqual(torch_ary.__cuda_array_interface__, numba_ary.__cuda_array_interface__) # No copy
del numba_ary
self.assertEqual(torch_ary.cpu().data.numpy(), numpy.arange(6)) # `torch_ary` is still alive
@unittest.skip("Test is temporary disabled, see https://github.com/pytorch/pytorch/issues/54418")
@unittest.skipIf(not TEST_NUMPY, "No numpy")
@unittest.skipIf(not TEST_CUDA, "No cuda")
@unittest.skipIf(not TEST_NUMBA_CUDA, "No numba.cuda")
@unittest.skipIf(not TEST_MULTIGPU, "No multigpu")
def test_from_cuda_array_interface_active_device(self):
"""torch.as_tensor() tensor device must match active numba context."""
# Zero-copy: both torch/numba default to device 0 and can interop freely
numba_ary = numba.cuda.to_device(numpy.arange(6))
torch_ary = torch.as_tensor(numba_ary, device="cuda")
self.assertEqual(torch_ary.cpu().data.numpy(), numpy.asarray(numba_ary))
self.assertEqual(torch_ary.__cuda_array_interface__, numba_ary.__cuda_array_interface__)
# Implicit-copy: when the Numba and Torch device differ
numba_ary = numba.cuda.to_device(numpy.arange(6))
torch_ary = torch.as_tensor(numba_ary, device=torch.device("cuda", 1))
self.assertEqual(torch_ary.get_device(), 1)
self.assertEqual(torch_ary.cpu().data.numpy(), numpy.asarray(numba_ary))
if1 = torch_ary.__cuda_array_interface__
if2 = numba_ary.__cuda_array_interface__
self.assertNotEqual(if1["data"], if2["data"])
del if1["data"]
del if2["data"]
self.assertEqual(if1, if2)
if __name__ == "__main__":
common.run_tests()
| pytorch-master | test/test_numba_integration.py |
# Owner(s): ["module: cpp"]
import torch
# NN tests use double as the default dtype
torch.set_default_dtype(torch.double)
import os
import torch.testing._internal.common_utils as common
import torch.testing._internal.common_nn as common_nn
from cpp_api_parity.parity_table_parser import parse_parity_tracker_table
from cpp_api_parity.utils import is_torch_nn_functional_test
from cpp_api_parity import module_impl_check, functional_impl_check, sample_module, sample_functional
# NOTE: turn this on if you want to print source code of all C++ tests (e.g. for debugging purpose)
PRINT_CPP_SOURCE = False
devices = ['cpu', 'cuda']
PARITY_TABLE_PATH = os.path.join(os.path.dirname(__file__), 'cpp_api_parity', 'parity-tracker.md')
parity_table = parse_parity_tracker_table(PARITY_TABLE_PATH)
class TestCppApiParity(common.TestCase):
module_test_params_map = {}
functional_test_params_map = {}
expected_test_params_dicts = []
if not common.IS_ARM64:
for test_params_dicts, test_instance_class in [
(sample_module.module_tests, common_nn.NewModuleTest),
(sample_functional.functional_tests, common_nn.NewModuleTest),
(common_nn.module_tests, common_nn.NewModuleTest),
(common_nn.new_module_tests, common_nn.NewModuleTest),
(common_nn.criterion_tests, common_nn.CriterionTest),
]:
for test_params_dict in test_params_dicts:
if test_params_dict.get('test_cpp_api_parity', True):
if is_torch_nn_functional_test(test_params_dict):
functional_impl_check.write_test_to_test_class(
TestCppApiParity, test_params_dict, test_instance_class, parity_table, devices)
else:
module_impl_check.write_test_to_test_class(
TestCppApiParity, test_params_dict, test_instance_class, parity_table, devices)
expected_test_params_dicts.append(test_params_dict)
# Assert that all NN module/functional test dicts appear in the parity test
assert len([name for name in TestCppApiParity.__dict__ if 'test_torch_nn_' in name]) == \
len(expected_test_params_dicts) * len(devices)
# Assert that there exists auto-generated tests for `SampleModule` and `sample_functional`.
# 4 == 2 (number of test dicts that are not skipped) * 2 (number of devices)
assert len([name for name in TestCppApiParity.__dict__ if 'SampleModule' in name]) == 4
# 4 == 2 (number of test dicts that are not skipped) * 2 (number of devices)
assert len([name for name in TestCppApiParity.__dict__ if 'sample_functional' in name]) == 4
module_impl_check.build_cpp_tests(TestCppApiParity, print_cpp_source=PRINT_CPP_SOURCE)
functional_impl_check.build_cpp_tests(TestCppApiParity, print_cpp_source=PRINT_CPP_SOURCE)
if __name__ == "__main__":
common.run_tests()
| pytorch-master | test/test_cpp_api_parity.py |
import torch
class LinearMod(torch.nn.Linear):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
return torch._C._nn.linear(input, self.weight, self.bias)
print(torch.jit.trace(LinearMod(20, 20), torch.rand([20, 20])).graph)
| pytorch-master | test/linear.py |
# Owner(s): ["module: multiprocessing"]
import os
import pickle
import random
import signal
import sys
import time
import unittest
from torch.testing._internal.common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN)
import torch.multiprocessing as mp
def _test_success_func(i):
pass
def _test_success_single_arg_func(i, arg):
if arg:
arg.put(i)
def _test_exception_single_func(i, arg):
if i == arg:
raise ValueError("legitimate exception from process %d" % i)
time.sleep(1.0)
def _test_exception_all_func(i):
time.sleep(random.random() / 10)
raise ValueError("legitimate exception from process %d" % i)
def _test_terminate_signal_func(i):
if i == 0:
os.kill(os.getpid(), signal.SIGABRT)
time.sleep(1.0)
def _test_terminate_exit_func(i, arg):
if i == 0:
sys.exit(arg)
time.sleep(1.0)
def _test_success_first_then_exception_func(i, arg):
if i == 0:
return
time.sleep(0.1)
raise ValueError("legitimate exception")
def _test_nested_child_body(i, ready_queue, nested_child_sleep):
ready_queue.put(None)
time.sleep(nested_child_sleep)
def _test_infinite_task(i):
while True:
time.sleep(1)
def _test_process_exit(idx):
sys.exit(12)
def _test_nested(i, pids_queue, nested_child_sleep, start_method):
context = mp.get_context(start_method)
nested_child_ready_queue = context.Queue()
nprocs = 2
mp_context = mp.start_processes(
fn=_test_nested_child_body,
args=(nested_child_ready_queue, nested_child_sleep),
nprocs=nprocs,
join=False,
daemon=False,
start_method=start_method,
)
pids_queue.put(mp_context.pids())
# Wait for both children to have started, to ensure that they
# have called prctl(2) to register a parent death signal.
for _ in range(nprocs):
nested_child_ready_queue.get()
# Kill self. This should take down the child processes as well.
os.kill(os.getpid(), signal.SIGTERM)
class _TestMultiProcessing(object):
start_method = None
def test_success(self):
mp.start_processes(_test_success_func, nprocs=2, start_method=self.start_method)
def test_success_non_blocking(self):
mp_context = mp.start_processes(_test_success_func, nprocs=2, join=False, start_method=self.start_method)
# After all processes (nproc=2) have joined it must return True
mp_context.join(timeout=None)
mp_context.join(timeout=None)
self.assertTrue(mp_context.join(timeout=None))
def test_first_argument_index(self):
context = mp.get_context(self.start_method)
queue = context.SimpleQueue()
mp.start_processes(_test_success_single_arg_func, args=(queue,), nprocs=2, start_method=self.start_method)
self.assertEqual([0, 1], sorted([queue.get(), queue.get()]))
def test_exception_single(self):
nprocs = 2
for i in range(nprocs):
with self.assertRaisesRegex(
Exception,
"\nValueError: legitimate exception from process %d$" % i,
):
mp.start_processes(_test_exception_single_func, args=(i,), nprocs=nprocs, start_method=self.start_method)
def test_exception_all(self):
with self.assertRaisesRegex(
Exception,
"\nValueError: legitimate exception from process (0|1)$",
):
mp.start_processes(_test_exception_all_func, nprocs=2, start_method=self.start_method)
def test_terminate_signal(self):
# SIGABRT is aliased with SIGIOT
message = "process 0 terminated with signal (SIGABRT|SIGIOT)"
# Termination through with signal is expressed as a negative exit code
# in multiprocessing, so we know it was a signal that caused the exit.
# This doesn't appear to exist on Windows, where the exit code is always
# positive, and therefore results in a different exception message.
# Exit code 22 means "ERROR_BAD_COMMAND".
if IS_WINDOWS:
message = "process 0 terminated with exit code 22"
with self.assertRaisesRegex(Exception, message):
mp.start_processes(_test_terminate_signal_func, nprocs=2, start_method=self.start_method)
def test_terminate_exit(self):
exitcode = 123
with self.assertRaisesRegex(
Exception,
"process 0 terminated with exit code %d" % exitcode,
):
mp.start_processes(_test_terminate_exit_func, args=(exitcode,), nprocs=2, start_method=self.start_method)
def test_success_first_then_exception(self):
exitcode = 123
with self.assertRaisesRegex(
Exception,
"ValueError: legitimate exception",
):
mp.start_processes(_test_success_first_then_exception_func, args=(exitcode,), nprocs=2, start_method=self.start_method)
@unittest.skipIf(
sys.platform != "linux",
"Only runs on Linux; requires prctl(2)",
)
def _test_nested(self):
context = mp.get_context(self.start_method)
pids_queue = context.Queue()
nested_child_sleep = 20.0
mp_context = mp.start_processes(
fn=_test_nested,
args=(pids_queue, nested_child_sleep, self.start_method),
nprocs=1,
join=False,
daemon=False,
start_method=self.start_method,
)
# Wait for nested children to terminate in time
pids = pids_queue.get()
start = time.time()
while len(pids) > 0:
for pid in pids:
try:
os.kill(pid, 0)
except ProcessLookupError:
pids.remove(pid)
break
# This assert fails if any nested child process is still
# alive after (nested_child_sleep / 2) seconds. By
# extension, this test times out with an assertion error
# after (nested_child_sleep / 2) seconds.
self.assertLess(time.time() - start, nested_child_sleep / 2)
time.sleep(0.1)
@unittest.skipIf(
NO_MULTIPROCESSING_SPAWN,
"Disabled for environments that don't support the spawn start method")
class SpawnTest(TestCase, _TestMultiProcessing):
start_method = 'spawn'
def test_exception_raises(self):
with self.assertRaises(mp.ProcessRaisedException):
mp.spawn(_test_success_first_then_exception_func, args=(), nprocs=1)
def test_signal_raises(self):
context = mp.spawn(_test_infinite_task, args=(), nprocs=1, join=False)
for pid in context.pids():
os.kill(pid, signal.SIGTERM)
with self.assertRaises(mp.ProcessExitedException):
context.join()
def _test_process_exited(self):
with self.assertRaises(mp.ProcessExitedException) as e:
mp.spawn(_test_process_exit, args=(), nprocs=1)
self.assertEqual(12, e.exit_code)
@unittest.skipIf(
IS_WINDOWS,
"Fork is only available on Unix",
)
class ForkTest(TestCase, _TestMultiProcessing):
start_method = 'fork'
class ErrorTest(TestCase):
def test_errors_pickleable(self):
for error in (
mp.ProcessRaisedException("Oh no!", 1, 1),
mp.ProcessExitedException("Oh no!", 1, 1, 1),
):
pickle.loads(pickle.dumps(error))
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_multiprocessing_spawn.py |
# Owner(s): ["module: multiprocessing"]
import contextlib
import gc
import os
import sys
import time
import unittest
import copy
from sys import platform
import torch
import torch.cuda
import torch.multiprocessing as mp
import torch.utils.hooks
from torch.nn import Parameter
from torch.testing._internal.common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, TEST_WITH_ASAN,
load_tests, slowTest, TEST_WITH_TSAN, TEST_WITH_ROCM)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
TEST_REPEATS = 30
HAS_SHM_FILES = os.path.isdir('/dev/shm')
TEST_CUDA_IPC = torch.cuda.is_available() and \
sys.platform != 'darwin' and \
sys.platform != 'win32'
TEST_MULTIGPU = TEST_CUDA_IPC and torch.cuda.device_count() > 1
class SubProcess(mp.Process):
def __init__(self, tensor):
super(SubProcess, self).__init__()
self.tensor = tensor
self.daemon = True
def run(self):
self.tensor.add_(3)
def _test_cuda_ipc_deadlock_actor(queue, iterations):
for i in range(iterations):
if not queue.empty():
queue.get()
time.sleep(.01)
def _test_cuda_ipc_deadlock_learner(queue, iterations):
net = torch.nn.LSTM(1, 1).cuda()
for i in range(iterations):
if not queue.full():
queue.put(copy.deepcopy(net.state_dict()))
time.sleep(.01)
def simple_fill(queue, event):
data = queue.get()
data[0][:] = 4
event.set()
def simple_pool_fill(tensor):
tensor.fill_(4)
return tensor.add(1)
def send_tensor(queue, event, device, dtype):
t = torch.ones(5, 5, device=device, dtype=dtype)
queue.put(t)
queue.put(t)
event.wait()
def send_and_delete_tensors(queue, event, device, dtype, count, size=5):
for i in range(count):
t = torch.full([size], i, device=device, dtype=dtype)
queue.put(t)
del t
event.wait()
def receive_and_send_sum(queue, out_queue, event, device, dtype, count, size=5):
s = torch.full([size], 0, device=device, dtype=dtype)
for i in range(count):
t = queue.get()
s += t
out_queue.put(s)
event.wait()
def receive_and_send(queue, out_queue, event, count):
for i in range(count):
t = queue.get()
out_queue.put(t.clone())
event.wait()
def sum_tensors(inq, outq):
with torch.cuda.device(1):
tensors = inq.get()
for tensor in tensors:
outq.put((tensor.sum().item(), tensor.get_device(),
tensor.numel(), tensor.storage().size()))
def queue_get_exception(inqueue, outqueue):
os.close(2) # hide expected error message
try:
torch.zeros(5, 5).cuda()
except Exception as e:
outqueue.put(e)
else:
outqueue.put('no exception')
# Multiply by two in a separate stream
def cuda_multiply_two(queue, ready, done):
ready.set()
with torch.cuda.stream(torch.cuda.Stream()):
cuda_event, tensor = queue.get()
cuda_event.wait()
tensor.mul_(2)
cuda_event.record()
done.set()
del cuda_event
def requires_grad_variable_sharing(queue, ready):
var = queue.get()
ready.set()
queue.put(var.requires_grad)
def integer_parameter_serialization(iparam):
iparam + 1
def autograd_sharing(queue, ready, master_modified, device, is_parameter):
var = queue.get()
ready.set()
master_modified.wait()
expected_var = torch.arange(1., 26, device=device).view(5, 5)
expected_var[0, 0] = 1000
is_ok = var.data.equal(expected_var)
var.data[:] = torch.ones(5, 5, device=device)
is_ok &= var.grad is None
is_ok &= not var._backward_hooks
if is_parameter:
is_ok &= type(var) == Parameter
else:
is_ok &= type(var) == torch.Tensor
var._grad = torch.ones(5, 5, device=device)
queue.put(is_ok)
def mixed_type_producer(queue, event):
for _ in range(10):
float_tensor = torch.ones(2, 2).float().cuda()
byte_tensor = torch.zeros(2, 2).byte().cuda()
queue.put(float_tensor)
queue.put(byte_tensor)
event.wait()
event.clear()
def simple_autograd_function(a=1):
torch.rand(3).requires_grad_(True).mean().backward()
return a ** 2
@contextlib.contextmanager
def fs_sharing():
prev_strategy = mp.get_sharing_strategy()
mp.set_sharing_strategy('file_system')
try:
yield
finally:
mp.set_sharing_strategy(prev_strategy)
class leak_checker(object):
def __init__(self, test_case):
self.checked_pids = [os.getpid()]
self.test_case = test_case
def __enter__(self):
self.next_fds = self._get_next_fds(10)
return self
def __exit__(self, *args):
if torch.cuda.is_available():
torch.cuda.ipc_collect()
if args[0] is None:
# Check that the 10th available file-descriptor at the end of the
# test is no more than 4 higher than the 10th available at the
# start. This attempts to catch file descriptor leaks, but allows
# one-off initialization that may use up a file descriptor
# TODO: Disabled because this check is too flaky
# available_fds = self._get_next_fds(10)
# self.test_case.assertLessEqual(
# available_fds[-1] - self.next_fds[-1], 5)
self.test_case.assertFalse(self.has_shm_files())
return False
def check_pid(self, pid):
self.checked_pids.append(pid)
def _get_next_fds(self, n=1):
# dup uses the lowest-numbered unused descriptor for the new descriptor
fds = [os.dup(0) for i in range(n)]
for fd in fds:
os.close(fd)
return fds
def has_shm_files(self, wait=True):
if not HAS_SHM_FILES:
return False
result = self._has_shm_files()
if result and mp.get_sharing_strategy() == 'file_system' and wait:
time.sleep(0.5)
return self._has_shm_files()
return result
def _has_shm_files(self):
gc.collect()
names = ['torch_' + str(pid) for pid in self.checked_pids]
for filename in os.listdir('/dev/shm'):
for name in names:
if filename.startswith(name):
return True
return False
@unittest.skipIf(TEST_WITH_TSAN, "TSAN is not fork-safe since we're forking in a multi-threaded environment")
class TestMultiprocessing(TestCase):
def tearDown(self):
# This will keep tests isolated from each-other
if torch.cuda.is_available():
torch.cuda.ipc_collect()
def _test_sharing(self, ctx=mp, device='cpu', dtype=torch.float, repeat=1):
def test_fill():
x = torch.zeros(5, 5).to(device, dtype)
q = ctx.Queue()
e = ctx.Event()
data = [x, x[:, 1]]
q.put(data)
p = ctx.Process(target=simple_fill, args=(q, e))
p.daemon = True
lc.check_pid(p.pid)
p.start()
e.wait(10)
self.assertTrue(e.is_set())
self.assertTrue(data[0].eq(4).all())
self.assertTrue(data[1].eq(4).all())
p.join(100)
self.assertFalse(p.is_alive())
def test_receive():
q = ctx.Queue()
e = ctx.Event()
p = ctx.Process(target=send_tensor, args=(q, e, device, dtype))
p.daemon = True
lc.check_pid(p.pid)
p.start()
t1 = q.get()
t2 = q.get()
self.assertTrue(t1.eq(1).all())
s1 = t1.storage()
s2 = t2.storage()
self.assertEqual(type(s1), type(s2))
self.assertEqual(s1.data_ptr(), s1.data_ptr())
self.assertEqual(s1, s2)
# We need to delete this tensors to allow producer (child process)
# collect them properly
del t1, t2
e.set()
p.join(100)
self.assertFalse(p.is_alive())
with leak_checker(self) as lc:
for _ in range(repeat):
test_fill()
test_receive()
def _test_preserve_sharing(self, ctx=mp, repeat=1):
def do_test():
x = torch.randn(5, 5)
data = [x.storage(), x, x[2], x[:, 1]]
q = ctx.Queue()
q.put(data)
new_data = q.get(timeout=1)
self.assertEqual(new_data, data, atol=0, rtol=0)
storage_cdata = data[0]._cdata
self.assertEqual(new_data[0]._cdata, storage_cdata)
for t in new_data[1:]:
self.assertEqual(t.storage()._cdata, storage_cdata)
with leak_checker(self):
for _ in range(repeat):
do_test()
def _test_pool(self, ctx=mp, repeat=1):
def do_test():
p = ctx.Pool(2)
for proc in p._pool:
lc.check_pid(proc.pid)
buffers = [torch.zeros(2, 2) for i in range(4)]
results = p.map(simple_pool_fill, buffers, 1)
self.assertEqual(len(results), len(buffers))
for r in results:
self.assertEqual(r, torch.ones(2, 2) * 5, atol=0, rtol=0)
for b in buffers:
self.assertEqual(b, torch.ones(2, 2) * 4, atol=0, rtol=0)
p.close()
p.join()
with leak_checker(self) as lc:
for _ in range(repeat):
do_test()
@unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on macOS")
@unittest.skipIf(TEST_WITH_ASAN,
"seems to hang with ASAN, see https://github.com/pytorch/pytorch/issues/5326")
def test_fd_sharing(self):
self._test_sharing(repeat=TEST_REPEATS)
@unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on macOS")
def test_fd_preserve_sharing(self):
self._test_preserve_sharing(repeat=TEST_REPEATS)
@unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on macOS")
def test_fd_pool(self):
self._test_pool(repeat=TEST_REPEATS)
@unittest.skipIf(TEST_WITH_ASAN,
"seems to hang with ASAN, see https://github.com/pytorch/pytorch/issues/5326")
def test_fs_sharing(self):
with fs_sharing():
self._test_sharing(repeat=TEST_REPEATS)
def test_fs_preserve_sharing(self):
with fs_sharing():
self._test_preserve_sharing(repeat=TEST_REPEATS)
def test_fs_pool(self):
with fs_sharing():
self._test_pool(repeat=TEST_REPEATS)
@unittest.skipIf(not HAS_SHM_FILES, "don't not how to check if shm files exist")
def test_fs(self):
def queue_put():
x = torch.DoubleStorage(4)
q = mp.Queue()
self.assertFalse(lc.has_shm_files())
q.put(x)
time.sleep(0.05) # queue serializes asynchronously
self.assertTrue(lc.has_shm_files(wait=False))
q.get()
with fs_sharing(), leak_checker(self) as lc:
for _ in range(TEST_REPEATS):
queue_put()
def test_inherit_tensor(self):
t = torch.zeros(5, 5)
p = SubProcess(t.share_memory_())
p.start()
p.join(2)
if p.exitcode is None:
print("test_inherit_tensor: SubProcess too slow")
else:
self.assertEqual(t, torch.ones(5, 5) * 3, atol=0, rtol=0)
@unittest.skipIf(IS_WINDOWS, "Test needs to use fork multiprocessing")
def test_autograd_errors(self):
ctx = mp.get_context('fork')
simple_autograd_function()
# Autograd only uses thread when GPUs are involved
if torch.cuda.is_available() or torch.backends.mps.is_available():
with self.assertRaisesRegex(RuntimeError, r'Unable to handle autograd'):
with ctx.Pool(3) as pool:
pool.map(simple_autograd_function, [1, 2, 3])
else:
with ctx.Pool(3) as pool:
pool.map(simple_autograd_function, [1, 2, 3])
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Test needs to use spawn multiprocessing")
def test_autograd_fine_with_spawn(self):
ctx = mp.get_context('spawn')
simple_autograd_function()
with ctx.Pool(3) as pool:
pool.map(simple_autograd_function, [1, 2, 3])
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_cuda_simple(self):
torch.cuda.FloatTensor([1]) # initialize CUDA outside of leak checker
self._test_sharing(mp.get_context('spawn'), 'cuda', torch.float)
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_cuda_memory_allocation(self):
ctx = mp.get_context('spawn')
q = ctx.Queue()
e = ctx.Event()
p = ctx.Process(target=send_and_delete_tensors, args=(q, e, 'cuda', torch.int, 5))
p.start()
t = []
for _ in range(5):
t.append(q.get())
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(t[0], torch.full([5], 0.))
del t
e.set()
p.join(1)
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_cuda_ipc_deadlock(self):
ctx = mp.get_context('spawn')
queue = ctx.Queue(1)
processes = dict(
a=ctx.Process(target=_test_cuda_ipc_deadlock_actor, args=(queue, 100)),
l=ctx.Process(target=_test_cuda_ipc_deadlock_learner, args=(queue, 100)))
for p in processes.values():
p.start()
for p in processes.values():
p.join(10)
for p in processes.values():
self.assertFalse(p.is_alive())
@slowTest
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_cuda_send_many(self, name=None, size=5, count=100000):
ctx = mp.get_context('spawn')
q1 = ctx.Queue()
q2 = ctx.Queue()
q3 = ctx.Queue()
e1 = ctx.Event()
e2 = ctx.Event()
e3 = ctx.Event()
p1 = ctx.Process(target=send_and_delete_tensors, args=(q1, e1, 'cuda', torch.long, count, size))
p2 = ctx.Process(target=receive_and_send, args=(q1, q2, e2, count))
p3 = ctx.Process(target=receive_and_send_sum, args=(q2, q3, e3, 'cuda', torch.long, count, size))
p1.start()
p2.start()
p3.start()
result = q3.get()
self.assertEqual(result[0], int(count * (count - 1) / 2))
del result
e1.set()
e2.set()
e3.set()
p1.join(1)
p2.join(1)
p3.join(1)
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
@unittest.skipIf(not TEST_MULTIGPU, 'found only 1 GPU')
def test_cuda_small_tensors(self):
# Check multiple small tensors which will likely use the same
# underlying cached allocation
ctx = mp.get_context('spawn')
tensors = []
for i in range(5):
device = i % 2
tensors += [torch.arange(i * 5., (i + 1) * 5).cuda(device)]
inq = ctx.Queue()
outq = ctx.Queue()
inq.put(tensors)
p = ctx.Process(target=sum_tensors, args=(inq, outq))
p.start()
results = []
for _ in range(5):
results.append(outq.get())
p.join()
for i, _tensor in enumerate(tensors):
v, device, tensor_size, storage_size = results[i]
self.assertEqual(v, torch.arange(i * 5., (i + 1) * 5).sum())
self.assertEqual(device, i % 2)
self.assertEqual(tensor_size, 5)
# You might think this should be the case, but it's not! After
# data from the CUDA caching allocator goes through IPC, the
# size of the storage is the size of the *cached cudaMalloc for
# the entire memory block* of the storage, not just the storage.
# See Note [CUDA IPC and the caching allocator] for more info
#
# self.assertEqual(storage_size, 5)
# Collect current process (producer) files, make sure nothing holds
# ref to the sent tensors
del _tensor
del tensors
# We need to collect, as CUDA MP implementation holds one shared
# memory 'file' for performance reason
torch.cuda.ipc_collect()
@unittest.skipIf(IS_WINDOWS, 'not applicable to Windows (only fails with fork)')
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA not available')
def test_cuda_bad_call(self):
# Initialize CUDA
t = torch.zeros(5, 5).cuda().cpu()
inq = mp.Queue()
outq = mp.Queue()
p = mp.Process(target=queue_get_exception, args=(inq, outq))
p.start()
inq.put(t)
p.join()
self.assertIsInstance(outq.get(), RuntimeError)
@unittest.skipIf(IS_WINDOWS, 'not applicable to Windows (only fails with fork)')
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA not available')
def test_wrong_cuda_fork(self):
stderr = TestCase.runWithPytorchAPIUsageStderr("""\
import torch
from torch.multiprocessing import Process
def run(rank):
torch.cuda.set_device(rank)
if __name__ == "__main__":
size = 2
processes = []
for rank in range(size):
# it would work fine without the line below
x = torch.rand(20, 2).cuda()
p = Process(target=run, args=(rank,))
p.start()
processes.append(p)
for p in processes:
p.join()
""")
self.assertRegex(stderr, "Cannot re-initialize CUDA in forked subprocess.")
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_event(self):
ctx = mp.get_context('spawn')
queue = ctx.Queue()
ready = ctx.Event()
done = ctx.Event()
p = ctx.Process(target=cuda_multiply_two, args=(queue, ready, done))
p.start()
ready.wait()
with torch.cuda.stream(torch.cuda.Stream()):
tensor = torch.cuda.FloatTensor([1, 1, 1, 1])
# Use a sleep kernel to test events. Without the event, the
# multiply happens before the add.
event = torch.cuda.Event(interprocess=True)
torch.cuda._sleep(20000000) # about 30 ms
tensor.add_(1)
event.record()
queue.put((event, tensor))
done.wait() # must wait until subprocess records event
event.synchronize()
self.assertEqual(list(tensor), [4, 4, 4, 4])
p.join()
@staticmethod
def _test_event_multiprocess_child(event, p2c, c2p):
c2p.put(0) # notify parent child is ready
p2c.get() # wait for record in parent
event.synchronize()
c2p.put(1) # notify parent synchronization is done
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
@unittest.skipIf(TEST_WITH_ROCM, 'Skip the test for ROCm')
def test_event_multiprocess(self):
event = torch.cuda.Event(enable_timing=False, interprocess=True)
self.assertTrue(event.query())
ctx = mp.get_context('spawn')
p2c = ctx.SimpleQueue()
c2p = ctx.SimpleQueue()
p = ctx.Process(
target=TestMultiprocessing._test_event_multiprocess_child,
args=(event, p2c, c2p))
p.start()
c2p.get() # wait for until child process is ready
torch.cuda._sleep(50000000) # spin for about 50 ms
event.record()
p2c.put(0) # notify child event is recorded
self.assertFalse(event.query())
c2p.get() # wait for synchronization in child
self.assertTrue(event.query())
p.join()
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
@unittest.skipIf(not TEST_MULTIGPU, 'found only 1 GPU')
def test_event_handle_multi_gpu(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
e0 = torch.cuda.Event(enable_timing=False, interprocess=True)
with torch.cuda.device(d1):
# create handle on different device from un-recorded event
e0.ipc_handle()
with torch.cuda.device(d0):
e1 = torch.cuda.Event(enable_timing=False, interprocess=True)
stream = torch.cuda.Stream()
torch.cuda._sleep(50000000) # spin for about 50 ms
e1.record(stream)
with torch.cuda.device(d1):
# create handle on different device from recorded event
e1.ipc_handle()
@staticmethod
def _test_event_handle_importer_consumer(handle, p2c, c2p):
e1 = torch.cuda.Event.from_ipc_handle(0, handle)
c2p.put(0) # notify parent child is ready
p2c.get() # wait for record in parent
e1.synchronize()
c2p.put(1) # nofity synchronization is done in child
p2c.get() # wait for parent to finish before destructing child event
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
@unittest.skipIf(TEST_WITH_ROCM, 'Skip the test for ROCm')
def test_event_handle_importer(self):
e0 = torch.cuda.Event(enable_timing=False, interprocess=True)
self.assertTrue(e0.query())
ctx = mp.get_context('spawn')
p2c = ctx.SimpleQueue()
c2p = ctx.SimpleQueue()
p = ctx.Process(
target=TestMultiprocessing._test_event_handle_importer_consumer,
args=(e0.ipc_handle(), p2c, c2p))
p.start()
c2p.get() # wait for child to become ready
torch.cuda._sleep(50000000) # spin for about 50 ms
e0.record()
p2c.put(0) # notify child event is recorded
self.assertFalse(e0.query())
c2p.get() # wait for synchronization in child
self.assertTrue(e0.query())
p2c.put(1) # notify child that parent is done
p.join()
@staticmethod
def _test_event_handle_exporter_consumer(handle, p2c, c2p):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
e1 = torch.cuda.Event.from_ipc_handle(
torch.cuda.current_device(), handle)
torch.cuda._sleep(50000000) # spin for about 50 ms
e1.record()
c2p.put(0)
# wait for parent process finished synchronization before
# destructing e1
p2c.get()
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
@unittest.skipIf(TEST_WITH_ROCM, 'Skip the test for ROCm')
def test_event_handle_exporter(self):
e0 = torch.cuda.Event(enable_timing=False, interprocess=True)
ctx = mp.get_context('spawn')
p2c = ctx.SimpleQueue()
c2p = ctx.SimpleQueue()
p = ctx.Process(
target=TestMultiprocessing._test_event_handle_exporter_consumer,
args=(e0.ipc_handle(), p2c, c2p))
p.start()
# wait for event in child process is recorded
c2p.get()
self.assertFalse(e0.query())
e0.synchronize()
self.assertTrue(e0.query())
p2c.put(0)
p.join()
def _test_empty_tensor_sharing(self, dtype, device):
q = mp.Queue()
empty = torch.tensor([], dtype=dtype, device=device)
q.put(empty)
out = q.get(timeout=1)
self.assertEqual(out, empty)
def test_empty_tensor_sharing(self):
self._test_empty_tensor_sharing(torch.float32, torch.device('cpu'))
self._test_empty_tensor_sharing(torch.int64, torch.device('cpu'))
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA not available')
def test_empty_tensor_sharing_cuda(self):
self._test_empty_tensor_sharing(torch.float32, torch.device('cuda'))
self._test_empty_tensor_sharing(torch.int64, torch.device('cuda'))
def _test_autograd_sharing(self, var, ctx=mp, is_parameter=False):
device = 'cuda' if var.is_cuda else 'cpu'
ready = ctx.Event()
master_modified = ctx.Event()
queue = ctx.Queue()
p = ctx.Process(target=autograd_sharing, args=(queue, ready, master_modified, device, is_parameter))
p.daemon = True
p.start()
# This would cause an error if we tried to serialize the hooks,
# because it's a closure and pickle doesn't support closures.
@torch.utils.hooks.unserializable_hook
def hook(*unused):
pass
if var.requires_grad:
var.register_hook(hook)
var._grad = torch.zeros(5, 5, device=device)
queue.put(var)
ready.wait()
var.data[0, 0] = 1000
var.grad.data[:] = torch.ones(5, 5, device=device) * 4
master_modified.set()
worker_ok = queue.get()
self.assertTrue(worker_ok)
self.assertEqual(var.data, torch.ones(5, 5, device=device))
self.assertEqual(var.grad.data, torch.ones(5, 5, device=device) * 4)
p.join(100)
self.assertFalse(p.is_alive())
# Check sharing a cudaMalloc allocation with different types of storage.
# (Issue #11422)
def _test_mixed_types_cuda_sharing(self, ctx=mp):
all_ones = torch.ones(2, 2).float()
all_zeros = torch.zeros(2, 2).byte()
queue = ctx.Queue()
event = ctx.Event()
p = ctx.Process(target=mixed_type_producer, args=(queue, event))
p.start()
for _ in range(10):
float_tensor = queue.get()
byte_tensor = queue.get()
self.assertEqual(float_tensor, all_ones)
self.assertEqual(byte_tensor, all_zeros)
del float_tensor, byte_tensor
event.set()
time.sleep(5)
p.join()
def test_variable_sharing(self):
for requires_grad in [True, False]:
var = torch.arange(1., 26).view(5, 5).requires_grad_(requires_grad)
self._test_autograd_sharing(var)
# See https://github.com/pytorch/pytorch/issues/14997
@unittest.skipIf(TEST_WITH_ASAN,
"non-deterministically hangs with ASAN")
def test_leaf_variable_sharing(self):
devices = ['cpu']
if torch.cuda.is_available() and not NO_MULTIPROCESSING_SPAWN and TEST_CUDA_IPC:
devices.append('cuda')
for device in devices:
for requires_grad in [True, False]:
var = torch.arange(1., 26, device=device).view(5, 5).requires_grad_(requires_grad)
self.assertTrue(var.is_leaf)
ctx = mp.get_context('spawn') if device == 'cuda' else mp
ready = ctx.Event()
queue = ctx.Queue()
p = ctx.Process(target=requires_grad_variable_sharing, args=(queue, ready))
p.daemon = True
p.start()
queue.put(var)
ready.wait()
worker_requires_grad = queue.get()
self.assertTrue(worker_requires_grad == requires_grad)
def test_non_leaf_variable_sharing(self):
devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
for device in devices:
var0 = torch.arange(1., 26, device=device).view(5, 5).requires_grad_(True)
var = var0 * 2
# Don't use a regular Queue; it uses a background thread (which
# means we can't catch the exceptions)
queue = mp.SimpleQueue()
self.assertRaisesRegex(RuntimeError, r'requires_grad', lambda: queue.put(var))
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_cuda_variable_sharing(self):
for requires_grad in [True, False]:
var = torch.arange(1., 26, device='cuda').view(5, 5).requires_grad_(requires_grad)
self._test_autograd_sharing(var, mp.get_context('spawn'))
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_mixed_types_cuda_sharing(self):
self._test_mixed_types_cuda_sharing(mp.get_context('spawn'))
def test_parameter_sharing(self):
param = Parameter(torch.arange(1., 26).view(5, 5))
self._test_autograd_sharing(param, is_parameter=True)
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_cuda_parameter_sharing(self):
param = Parameter(torch.arange(1., 26, device='cuda').view(5, 5))
self._test_autograd_sharing(param, mp.get_context('spawn'), is_parameter=True)
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
def test_integer_parameter_serialization_cpu(self):
self._test_integer_parameter_serialization(device='cpu')
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_integer_parameter_serialization_cuda(self):
self._test_integer_parameter_serialization(device='cuda')
def _test_integer_parameter_serialization(self, device):
param = torch.nn.Parameter(
torch.tensor(0, dtype=torch.int64, device=device),
requires_grad=False
)
ctx = mp.get_context('spawn')
p = ctx.Process(target=integer_parameter_serialization, args=(param,))
p.start()
p.join()
self.assertEqual(
0, p.exitcode,
msg=f'Failed to serialize successfully for "{device}" device!'
)
def test_empty_shared(self):
t = torch.tensor([])
t.share_memory_()
def _test_is_shared(self):
t = torch.randn(5, 5)
self.assertFalse(t.is_shared())
t.share_memory_()
self.assertTrue(t.is_shared())
@unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on macOS")
def test_is_shared(self):
self._test_is_shared()
def test_fs_is_shared(self):
with fs_sharing():
self._test_is_shared()
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA not available')
def test_is_shared_cuda(self):
t = torch.randn(5, 5).cuda()
self.assertTrue(t.is_shared())
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_multiprocessing.py |
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
from torch.utils._pytree import tree_map
from torch.testing._internal.common_utils import run_tests
from torch.fx.operator_schemas import normalize_function
from torch.testing._internal.schema_check_mode import SchemaCheckMode
from torch.utils._python_dispatch import enable_torch_dispatch_mode, TorchDispatchMode
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_device_type import ops, OpDTypes, instantiate_device_type_tests
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
# This TorchDispatchTensor Subclass is used to simulate an incorrect schema
# which is then used to test that SchemaCheckMode behaves as expected
class IncorrectAliasTensor(torch.Tensor):
ALIAS_ARG_OUT = {"aten::add"}
ALIAS_OUT_OUT = {"aten::aminmax"}
MUTATE_ARGS_OUT = {"aten::sub"}
elem: torch.Tensor
__slots__ = ['elem']
__torch_function__ = torch._C._disabled_torch_function_impl
@staticmethod
def __new__(cls, elem, *args, **kwargs):
# The wrapping tensor (IncorrectAliasTensor) shouldn't hold any
# memory for the class in question, but it should still
# advertise the same device as before
r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
cls, elem.size(),
strides=elem.stride(), storage_offset=elem.storage_offset(),
# TODO: clone storage aliasing
dtype=elem.dtype, layout=elem.layout,
device=elem.device, requires_grad=kwargs.get("requires_grad", False)
)
# ...the real tensor is held as an element on the tensor.
r.elem = elem.detach() if r.requires_grad else elem
return r
def __repr__(self):
return super().__repr__(tensor_contents=f"{self.elem}")
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
def unwrap(e):
return e.elem if isinstance(e, cls) else e
def wrap(e):
return cls(e) if isinstance(e, torch.Tensor) else e
unwrapped_args = tree_map(unwrap, args)
out = func(*unwrapped_args, **tree_map(unwrap, kwargs))
if func._schema.name in IncorrectAliasTensor.ALIAS_ARG_OUT:
args[0].elem = out
if func._schema.name in IncorrectAliasTensor.MUTATE_ARGS_OUT:
args[0].elem = torch.rand(args[0].elem.shape)
if func._schema.name in IncorrectAliasTensor.ALIAS_OUT_OUT:
incorrect_out = list(out)
incorrect_out[0] = incorrect_out[1]
return tree_map(wrap, tuple(incorrect_out))
return tree_map(wrap, out)
# Tests various schema checking functionalities.
class TestSchemaCheck(JitTestCase):
# Tests that SchemaCheckMode records operator order with grad
def test_schema_check_mode_operator_order(self):
schema_check = SchemaCheckMode()
with enable_torch_dispatch_mode(schema_check):
x = torch.rand((3, 3), requires_grad=True)
x.relu().sin()
self.assertEqual(["aten::rand", "aten::relu", "aten::detach", "aten::sin"], schema_check.ops)
# Tests that SchemaCheckMode records operator order without grad
def test_schema_check_mode_operator_order_without_grad(self):
schema_check = SchemaCheckMode()
with enable_torch_dispatch_mode(schema_check):
x = torch.rand((3, 3), requires_grad=False)
x.relu().sin()
self.assertEqual(["aten::rand", "aten::relu", "aten::sin"], schema_check.ops)
# Tests that SchemaCheckMode records mutations and aliases with none expected
def test_schema_check_mode_mutated_aliasing_none(self):
# NB: previously requires_grad=True, but this induces a detach for
# saved variable
x = torch.rand((3, 3))
schema_check = SchemaCheckMode()
with enable_torch_dispatch_mode(schema_check):
actual = x.relu().sin()
self.assertEqual([], schema_check.mutated)
self.assertEqual([], schema_check.aliasing)
# Tests that SchemaCheckMode records mutations and aliases with mutation expected
def test_schema_check_mode_mutated_aliasing_mutation(self):
actual = torch.rand((3, 3), requires_grad=False)
schema_check = SchemaCheckMode()
with enable_torch_dispatch_mode(schema_check):
actual.sinh_()
self.assertEqual([('aten::sinh_', 'input')], schema_check.mutated)
self.assertEqual([('aten::sinh_', 'input', 'output_0')], schema_check.aliasing)
# Tests that SchemaCheckMode records mutations and aliases with resize_
def test_schema_check_mode_mutated_aliasing_resize_(self):
actual = torch.rand((3, 3), requires_grad=False)
schema_check = SchemaCheckMode()
with enable_torch_dispatch_mode(schema_check):
actual.resize_(9)
self.assertEqual([('aten::resize_', 'input')], schema_check.mutated)
self.assertEqual([('aten::resize_', 'input', 'output_0')], schema_check.aliasing)
# Tests that SchemaCheckMode records mutations and aliases with aliasing inputs
def test_schema_check_mode_mutated_aliasing_aliasing_inputs(self):
actual = torch.rand((3, 3))
y = actual
schema_check = SchemaCheckMode()
with enable_torch_dispatch_mode(schema_check):
actual.add_(y)
self.assertEqual(
[
('aten::add_', 'input'),
('aten::add_', 'other')
],
schema_check.mutated
)
self.assertEqual(
[
('aten::add_', 'input', 'output_0'),
('aten::add_', 'other', 'output_0')
],
schema_check.aliasing
)
# Tests that SchemaCheckMode records mutations and alias with as_strided
def test_schema_check_mode_mutated_aliasing_as_strided(self):
x = torch.rand((3, 6, 4))
schema_check = SchemaCheckMode()
with enable_torch_dispatch_mode(schema_check):
x.as_strided_([3, 6, 4], [9, 1, 1])
self.assertEqual(
[
('aten::as_strided_', 'input')
],
schema_check.mutated
)
self.assertEqual(
[
('aten::as_strided_', 'input', 'output_0')
],
schema_check.aliasing
)
# Tests that SchemaCheckMode records mutations and aliases with multiple outputs
def test_schema_check_mode_mutated_aliasing_multiple_outputs(self):
x = torch.arange(9.)
m_actual = torch.arange(9.)
e_actual = torch.zeros([9], dtype=torch.int32)
schema_check = SchemaCheckMode()
with enable_torch_dispatch_mode(schema_check):
torch.frexp(x, out=(m_actual, e_actual))
self.assertEqual(
[
('aten::frexp', 'mantissa'),
('aten::frexp', 'exponent')
],
schema_check.mutated
)
self.assertEqual(
[
('aten::frexp', 'mantissa', 'output_0'),
('aten::frexp', 'exponent', 'output_1')
],
schema_check.aliasing
)
# Tests that SchemaCheckMode records mutations and aliases with aliasing outputs
def test_schema_check_mode_mutated_aliasing_aliasing_outputs(self):
x = torch.rand((3, 3))
actual = torch.zeros(3)
schema_check = SchemaCheckMode()
with enable_torch_dispatch_mode(schema_check):
torch.aminmax(x, dim=0, out=[actual, actual])
self.assertEqual(
[
('aten::aminmax', 'min'),
('aten::aminmax', 'max')
],
schema_check.mutated
)
self.assertEqual(
[
('aten::aminmax', 'min', 'output_0'),
('aten::aminmax', 'min', 'output_1'),
('aten::aminmax', 'max', 'output_0'),
('aten::aminmax', 'max', 'output_1')
],
schema_check.aliasing
)
# Tests that SchemaCheckMode wraps torch.Tensor
def test_schema_check_mode_functionality(self):
x = torch.rand((3, 3), requires_grad=True)
expected = x.relu().sin()
with enable_torch_dispatch_mode(SchemaCheckMode()):
actual = x.relu().sin()
self.assertEqual(expected, actual)
# Tests that SchemaCheckMode wraps torch.Tensor when an argument's default is overriden
def test_schema_check_mode_functionality_default_replaced(self):
x = torch.rand((3, 3), requires_grad=True)
expected = x.add(x, alpha=2)
with enable_torch_dispatch_mode(SchemaCheckMode()):
actual = x.add(x, alpha=2)
self.assertEqual(expected, actual)
# Tests that SchemaCheckMode wraps torch.Tensor when there is a Tensor[] argument
def test_schema_check_mode_functionality_list_input(self):
a = torch.rand((3, 3))
b = torch.rand((3, 3))
c = torch.rand((3, 3))
expected = torch.linalg.multi_dot([a, b, c])
with enable_torch_dispatch_mode(SchemaCheckMode()):
actual = torch.linalg.multi_dot([a, b, c])
self.assertEqual(expected, actual)
# Tests that SchemaCheckMode wraps torch.Tensor with an op that has the (a -> *) notation
def test_schema_check_mode_functionality_wildcard_after(self):
x = torch.rand((3, 3))
expected = x.chunk(6)
with enable_torch_dispatch_mode(SchemaCheckMode()):
actual = x.chunk(6)
self.assertEqual(expected, actual)
# Tests that SchemaCheckMode wraps torch.Tensor when there is a kwarg tensor input
def test_schema_check_mode_functionality_kwarg_tensor(self):
x = torch.rand((3, 5))
w = torch.rand((4))
expected = torch.stft(x, 4, win_length=4, window=w, return_complex=True)
with enable_torch_dispatch_mode(SchemaCheckMode()):
actual = torch.stft(x, 4, win_length=4, window=w, return_complex=True)
self.assertEqual(expected, actual)
# Tests that SchemaCheckMode wraps torch.Tensor with a mutable op
def test_schema_check_mode_functionality_mutable_inputs(self):
expected = torch.rand((3, 3), requires_grad=False)
actual = torch.clone(expected)
expected.sinh_()
with enable_torch_dispatch_mode(SchemaCheckMode()):
actual.sinh_()
self.assertEqual(expected, actual)
# Tests that SchemaCheckMode wraps Torch.tensor when inputs alias
def test_schema_check_mode_functionality_aliasing_inputs(self):
expected = torch.rand((3, 3))
x = expected
actual = torch.clone(expected)
y = actual
expected.add_(x)
with enable_torch_dispatch_mode(SchemaCheckMode()):
actual.add_(y)
self.assertEqual(expected, actual)
# Tests that SchemaCheckMode wraps Torch.tensor with multiple tensor outputs
def test_schema_check_mode_functionality_with_multiple_outputs(self):
x = torch.arange(9.)
m_expected, e_expected = torch.frexp(x)
m_actual = torch.arange(9.)
e_actual = torch.zeros([9], dtype=torch.int32)
with enable_torch_dispatch_mode(SchemaCheckMode()):
torch.frexp(x, out=(m_actual, e_actual))
self.assertEqual(m_expected, m_actual)
self.assertEqual(e_expected, e_actual)
# Tests that SchemaCheckMode wraps Torch.tensor with aliasing ouputs due to aliasing inputs
def test_schema_check_mode_functionality_with_multiple_outputs_aliasing(self):
x = torch.rand((3, 3))
actual = torch.zeros(3)
with enable_torch_dispatch_mode(SchemaCheckMode()):
torch.aminmax(x, dim=0, out=[actual, actual])
self.assertEqual(torch.amax(x, dim=0), actual)
# Tests that SchemaCheckMode wraps Torch.tensor in ops with real Device input
def test_schema_check_mode_functionality_device_input(self):
with enable_torch_dispatch_mode(SchemaCheckMode()):
x = torch.rand((3, 3), device="cpu", dtype=torch.double)
y = x + x
self.assertEqual(x + x, y)
# Tests that SchemaCheckMode wraps Torch.tensor in special training op edge case
def test_schema_check_mode_functionality_training_op(self):
x = torch.rand((3, 3), requires_grad=True)
batch = torch.nn.BatchNorm1d(3, track_running_stats=True)
expected = batch(x)
with enable_torch_dispatch_mode(SchemaCheckMode()):
actual = batch(x)
self.assertEqual(expected, actual)
# Tests that SchemaCheckMode wraps Torch.tensor with nested training op edge case
def test_schema_check_mode_functionality_nested_training_op(self):
actual = torch.rand((3, 3))
batch = torch.nn.BatchNorm1d(3, track_running_stats=True)
expected = torch.clone(actual)
expected.sinh_()
expected.tanh_()
expected.relu_()
expected = batch(expected)
with enable_torch_dispatch_mode(SchemaCheckMode()):
actual.sinh_()
actual.tanh_()
actual.relu_()
actual = batch(actual)
self.assertEqual(expected, actual)
# Tests that SchemaCheckMode wraps Torch.tensor with empty list input
def test_schema_check_mode_empty_list_input(self):
expected = torch.atleast_1d([])
with enable_torch_dispatch_mode(SchemaCheckMode()):
actual = torch.atleast_1d([])
self.assertEqual(expected, actual)
# Tests that an exception is raised for a mismatching mutation
def test_mutation_check_fail(self):
with self.assertRaisesRegex(RuntimeError, "Argument input is not defined as mutable but was mutated"):
x = torch.rand((3, 3))
y = torch.rand((3, 3))
with enable_torch_dispatch_mode(SchemaCheckMode()):
IncorrectAliasTensor(x).sub(IncorrectAliasTensor(y))
# # Tests that an exception is raised for a mismatching mutation over multiple ops
def test_mutation_check_fail_multiple_operators(self):
with self.assertRaisesRegex(RuntimeError, "Argument input is not defined as mutable but was mutated"):
x = torch.rand((3, 3))
y = torch.rand((3, 3))
with enable_torch_dispatch_mode(SchemaCheckMode()):
IncorrectAliasTensor(x).sin().cos().sub(IncorrectAliasTensor(y))
# Tests that an exception is raised for a mismatching alias
def test_alias_check_fail_simple(self):
with self.assertRaisesRegex(RuntimeError, "Argument input is not defined to alias output but was aliasing"):
x = torch.rand((3, 3), requires_grad=True)
y = torch.rand((3, 3))
with enable_torch_dispatch_mode(SchemaCheckMode()):
IncorrectAliasTensor(x).add(IncorrectAliasTensor(y), alpha=2)
# Tests that an exception is raised for a mismatching alias over multiple ops
def test_alias_check_fail_multiple_operators(self):
with self.assertRaisesRegex(RuntimeError, "Argument input is not defined to alias output but was aliasing"):
x = torch.rand((3, 3), requires_grad=True)
y = torch.zeros((3, 3), requires_grad=True)
with enable_torch_dispatch_mode(SchemaCheckMode()):
IncorrectAliasTensor(x).sin().relu().add(IncorrectAliasTensor(y), alpha=2)
# Tests that an exception is raised for a centered mismatching alias over multiple ops
def test_alias_check_fail_multiple_operators_centered(self):
with self.assertRaisesRegex(RuntimeError, "Argument input is not defined to alias output but was aliasing"):
x = torch.rand((3, 3), requires_grad=True)
y = torch.zeros((3, 3), requires_grad=True)
with enable_torch_dispatch_mode(SchemaCheckMode()):
IncorrectAliasTensor(x).sin().add(IncorrectAliasTensor(y), alpha=2).relu()
# Tests that an exception is raised for a centered mismatching alias over multiple ops
def test_alias_check_fail_outputs_unexpectedly_aliasing(self):
with self.assertRaisesRegex(RuntimeError, "Outputs 0 and 1 alias unexpectedly"):
x = torch.rand((3, 3))
s = SchemaCheckMode()
with enable_torch_dispatch_mode(s):
IncorrectAliasTensor(x).aminmax(dim=0)
# Tests that is_alias_of returns as expected
def test_is_alias_of_basic(self):
x = torch.rand((3, 3), requires_grad=True)
y = torch.rand((3, 3), requires_grad=True)
y = x.add(x, alpha=2)
self.assertTrue(torch._C._is_alias_of(x, x))
self.assertFalse(torch._C._is_alias_of(x, y))
# Tests that is_alias_of returns as expected with empty containers
def test_is_alias_of_empty_container(self):
x = []
y = torch.rand((3, 3), requires_grad=True)
self.assertFalse(torch._C._is_alias_of(x, x))
self.assertFalse(torch._C._is_alias_of(x, y))
# Tests that overlaps returns as expected
def test_overlaps_basic(self):
x = torch.rand((3, 3), requires_grad=True)
y = torch.rand((3, 3), requires_grad=True)
z = [x, y]
self.assertTrue(torch._C._overlaps(x, x))
self.assertFalse(torch._C._overlaps(x, y))
self.assertTrue(torch._C._overlaps(z, x))
self.assertTrue(torch._C._overlaps(z, y))
# Tests that overlaps returns correctly with empty containers
def test_overlaps_empty_container(self):
x = []
y = [torch.rand((3, 3), requires_grad=True)]
# Empty containers return false
self.assertFalse(torch._C._overlaps(y, x))
self.assertTrue(torch._C._overlaps(y, y))
# Tests that SchemaInfo Bindings work as expected
def test_schema_info_bind_basic(self):
class SchemaInfoBindTestMode(TorchDispatchMode):
def __init__(self, test_self):
self.test_self = test_self
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
named_arg_list = normalize_function(
func,
args,
kwargs,
normalize_to_only_use_kwargs=True
).kwargs
schema_info_value_test = torch._C._SchemaInfo(func._schema)
schema_info_values_test = torch._C._SchemaInfo(func._schema)
self.test_self.assertFalse(schema_info_value_test.may_alias(
torch._C._SchemaArgument(torch._C._SchemaArgType.input, 0),
torch._C._SchemaArgument(torch._C._SchemaArgType.input, 1)))
self.test_self.assertFalse(schema_info_values_test.may_alias(
torch._C._SchemaArgument(torch._C._SchemaArgType.input, 0),
torch._C._SchemaArgument(torch._C._SchemaArgType.input, 1)))
for i in named_arg_list:
schema_info_value_test.add_argument_value(i, named_arg_list[i])
schema_info_values_test.add_argument_values(named_arg_list)
self.test_self.assertTrue(schema_info_value_test.may_alias(
torch._C._SchemaArgument(torch._C._SchemaArgType.input, 0),
torch._C._SchemaArgument(torch._C._SchemaArgType.input, 1)))
self.test_self.assertTrue(schema_info_values_test.may_alias(
torch._C._SchemaArgument(torch._C._SchemaArgType.input, 0),
torch._C._SchemaArgument(torch._C._SchemaArgType.input, 1)))
return func(*args, **kwargs)
x = torch.rand((3, 3))
schemaInfoCheck = SchemaInfoBindTestMode(self)
with enable_torch_dispatch_mode(schemaInfoCheck):
x.add(x)
class TestSchemaCheckModeOpInfo(JitTestCase):
@ops(op_db, dtypes=OpDTypes.supported)
def test_schema_correctness(self, device, dtype, op):
# Currently torch.equal isn't supported with torch.complex32
# There's also errors with complex64 and complex128
if (dtype == torch.complex32):
return
for sample in op.sample_inputs(device, dtype, requires_grad=False):
with enable_torch_dispatch_mode(SchemaCheckMode()):
op(sample.input, *sample.args, **sample.kwargs)
instantiate_device_type_tests(TestSchemaCheckModeOpInfo, globals(), only_for=("cpu", "cuda"))
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_schema_check.py |
# Owner(s): ["module: typing"]
# based on NumPy numpy/typing/tests/test_typing.py
import itertools
import os
import re
import shutil
from collections import defaultdict
from typing import IO, Dict, List, Optional
import pytest
try:
from mypy import api
except ImportError:
NO_MYPY = True
else:
NO_MYPY = False
DATA_DIR = os.path.join(os.path.dirname(__file__), "typing")
REVEAL_DIR = os.path.join(DATA_DIR, "reveal")
PASS_DIR = os.path.join(DATA_DIR, "pass")
FAIL_DIR = os.path.join(DATA_DIR, "fail")
MYPY_INI = os.path.join(DATA_DIR, os.pardir, os.pardir, "mypy.ini")
CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache")
#: A dictionary with file names as keys and lists of the mypy stdout as values.
#: To-be populated by `run_mypy`.
OUTPUT_MYPY: Dict[str, List[str]] = {}
def _key_func(key: str) -> str:
"""Split at the first occurance of the ``:`` character.
Windows drive-letters (*e.g.* ``C:``) are ignored herein.
"""
drive, tail = os.path.splitdrive(key)
return os.path.join(drive, tail.split(":", 1)[0])
def _strip_filename(msg: str) -> str:
"""Strip the filename from a mypy message."""
_, tail = os.path.splitdrive(msg)
return tail.split(":", 1)[-1]
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.fixture(scope="module", autouse=True)
def run_mypy() -> None:
"""Clears the cache and run mypy before running any of the typing tests.
The mypy results are cached in `OUTPUT_MYPY` for further use.
"""
if os.path.isdir(CACHE_DIR):
shutil.rmtree(CACHE_DIR)
for directory in (REVEAL_DIR, PASS_DIR, FAIL_DIR):
# Run mypy
stdout, stderr, _ = api.run(
[
"--show-absolute-path",
"--config-file",
MYPY_INI,
"--cache-dir",
CACHE_DIR,
directory,
]
)
assert not stderr, directory
stdout = stdout.replace("*", "")
# Parse the output
iterator = itertools.groupby(stdout.split("\n"), key=_key_func)
OUTPUT_MYPY.update((k, list(v)) for k, v in iterator if k)
def get_test_cases(directory):
for root, _, files in os.walk(directory):
for fname in files:
if os.path.splitext(fname)[-1] == ".py":
fullpath = os.path.join(root, fname)
# Use relative path for nice py.test name
relpath = os.path.relpath(fullpath, start=directory)
yield pytest.param(
fullpath,
# Manually specify a name for the test
id=relpath,
)
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
def test_success(path):
# Alias `OUTPUT_MYPY` so that it appears in the local namespace
output_mypy = OUTPUT_MYPY
if path in output_mypy:
msg = "Unexpected mypy output\n\n"
msg += "\n".join(_strip_filename(v) for v in output_mypy[path])
raise AssertionError(msg)
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(FAIL_DIR))
def test_fail(path):
__tracebackhide__ = True
with open(path) as fin:
lines = fin.readlines()
errors = defaultdict(lambda: "")
output_mypy = OUTPUT_MYPY
assert path in output_mypy
for error_line in output_mypy[path]:
error_line = _strip_filename(error_line)
match = re.match(
r"(?P<lineno>\d+): (error|note): .+$",
error_line,
)
if match is None:
raise ValueError(f"Unexpected error line format: {error_line}")
lineno = int(match.group('lineno'))
errors[lineno] += f'{error_line}\n'
for i, line in enumerate(lines):
lineno = i + 1
if line.startswith('#') or (" E:" not in line and lineno not in errors):
continue
target_line = lines[lineno - 1]
if "# E:" in target_line:
marker = target_line.split("# E:")[-1].strip()
expected_error = errors.get(lineno)
_test_fail(path, marker, expected_error, lineno)
else:
pytest.fail(f"Unexpected mypy output\n\n{errors[lineno]}")
_FAIL_MSG1 = """Extra error at line {}
Extra error: {!r}
"""
_FAIL_MSG2 = """Error mismatch at line {}
Expected error: {!r}
Observed error: {!r}
"""
def _test_fail(path: str, error: str, expected_error: Optional[str], lineno: int) -> None:
if expected_error is None:
raise AssertionError(_FAIL_MSG1.format(lineno, error))
elif error not in expected_error:
raise AssertionError(_FAIL_MSG2.format(lineno, expected_error, error))
def _construct_format_dict():
dct = {
'ModuleList': 'torch.nn.modules.container.ModuleList',
'AdaptiveAvgPool2d': 'torch.nn.modules.pooling.AdaptiveAvgPool2d',
'AdaptiveMaxPool2d': 'torch.nn.modules.pooling.AdaptiveMaxPool2d',
'Tensor': 'torch._tensor.Tensor',
'Adagrad': 'torch.optim.adagrad.Adagrad',
'Adam': 'torch.optim.adam.Adam',
}
return dct
#: A dictionary with all supported format keys (as keys)
#: and matching values
FORMAT_DICT: Dict[str, str] = _construct_format_dict()
def _parse_reveals(file: IO[str]) -> List[str]:
"""Extract and parse all ``" # E: "`` comments from the passed file-like object.
All format keys will be substituted for their respective value from `FORMAT_DICT`,
*e.g.* ``"{Tensor}"`` becomes ``"torch.tensor.Tensor"``.
"""
string = file.read().replace("*", "")
# Grab all `# E:`-based comments
comments_array = list(map(lambda str: str.partition(" # E: ")[2], string.split("\n")))
comments = "/n".join(comments_array)
# Only search for the `{*}` pattern within comments,
# otherwise there is the risk of accidently grabbing dictionaries and sets
key_set = set(re.findall(r"\{(.*?)\}", comments))
kwargs = {
k: FORMAT_DICT.get(k, f"<UNRECOGNIZED FORMAT KEY {k!r}>") for k in key_set
}
fmt_str = comments.format(**kwargs)
return fmt_str.split("/n")
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(REVEAL_DIR))
def test_reveal(path):
__tracebackhide__ = True
with open(path) as fin:
lines = _parse_reveals(fin)
output_mypy = OUTPUT_MYPY
assert path in output_mypy
for error_line in output_mypy[path]:
match = re.match(
r"^.+\.py:(?P<lineno>\d+): note: .+$",
error_line,
)
if match is None:
raise ValueError(f"Unexpected reveal line format: {error_line}")
lineno = int(match.group("lineno")) - 1
assert "Revealed type is" in error_line
marker = lines[lineno]
_test_reveal(path, marker, error_line, 1 + lineno)
_REVEAL_MSG = """Reveal mismatch at line {}
Expected reveal: {!r}
Observed reveal: {!r}
"""
def _test_reveal(path: str, reveal: str, expected_reveal: str, lineno: int) -> None:
if reveal not in expected_reveal:
raise AssertionError(_REVEAL_MSG.format(lineno, expected_reveal, reveal))
if __name__ == '__main__':
pytest.main([__file__])
| pytorch-master | test/test_typing.py |
# Owner(s): ["oncall: mobile"]
import unittest
import torch
import torch.backends.xnnpack
from torch.nn import functional as F
from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.testing import FileCheck
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest
from hypothesis import given, assume
from hypothesis import strategies as st
import io
import itertools
from torch.testing._internal.common_utils import TEST_WITH_TSAN
@unittest.skipUnless(torch.backends.xnnpack.enabled,
" XNNPACK must be enabled for these tests."
" Please build with USE_XNNPACK=1.")
@unittest.skipIf(TEST_WITH_TSAN, "TSAN fails with XNNPACK. Does not seem to have a good reason for failures.")
class TestXNNPACKOps(TestCase):
@unittest.skip("Fails on some platforms, see https://github.com/pytorch/pytorch/issues/73488")
@given(batch_size=st.integers(0, 3),
data_shape=hu.array_shapes(1, 3, 2, 64),
weight_output_dim=st.integers(2, 64),
use_bias=st.booleans())
def test_linear(self, batch_size, data_shape, weight_output_dim, use_bias):
data_shape = [batch_size] + list(data_shape)
input_data = torch.rand(data_shape)
weight = torch.rand((weight_output_dim, data_shape[-1]))
if use_bias:
bias = torch.rand((weight_output_dim))
else:
bias = None
ref_result = F.linear(input_data, weight, bias)
packed_weight_bias = torch.ops.prepacked.linear_clamp_prepack(weight, bias)
output_linearprepacked = torch.ops.prepacked.linear_clamp_run(input_data, packed_weight_bias)
torch.testing.assert_close(ref_result, output_linearprepacked, rtol=1e-2, atol=1e-3)
@given(input_size=st.integers(2, 32),
weight_output_dim=st.integers(2, 64),
use_bias=st.booleans())
def test_linear_1d_input(self, input_size, weight_output_dim, use_bias):
input_data = torch.rand(input_size)
weight = torch.rand((weight_output_dim, input_data.shape[-1]))
if use_bias:
bias = torch.rand((weight_output_dim))
else:
bias = None
ref_result = F.linear(input_data, weight, bias)
packed_weight_bias = torch.ops.prepacked.linear_clamp_prepack(weight, bias)
output_linearprepacked = torch.ops.prepacked.linear_clamp_run(input_data, packed_weight_bias)
torch.testing.assert_close(ref_result, output_linearprepacked, rtol=1e-2, atol=1e-3)
@given(batch_size=st.integers(0, 3),
input_channels_per_group=st.integers(1, 32),
height=st.integers(5, 64),
width=st.integers(5, 64),
output_channels_per_group=st.integers(1, 32),
groups=st.integers(1, 16),
kernel_h=st.integers(1, 7),
kernel_w=st.integers(1, 7),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
use_bias=st.booleans(),
format=st.sampled_from([None, torch.preserve_format, torch.contiguous_format, torch.channels_last]))
def test_conv2d(self,
batch_size,
input_channels_per_group,
height,
width,
output_channels_per_group,
groups,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w,
dilation,
use_bias,
format):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
paddings = (pad_h, pad_w)
dilations = (dilation, dilation)
assume(height + 2 * paddings[0]
>= dilations[0] * (kernels[0] - 1) + 1)
assume(width + 2 * paddings[1]
>= dilations[1] * (kernels[1] - 1) + 1)
input_data = torch.rand((batch_size, input_channels, height, width))
if (format is not None):
input_data = input_data.contiguous(memory_format=format)
weight = torch.rand((output_channels, input_channels_per_group, kernel_h, kernel_w))
bias = None
if use_bias:
bias = torch.rand((output_channels))
ref_result = F.conv2d(input_data, weight, bias,
strides, paddings, dilations, groups)
packed_weight_bias = torch.ops.prepacked.conv2d_clamp_prepack(weight, bias,
strides, paddings, dilations, groups)
xnnpack_result = torch.ops.prepacked.conv2d_clamp_run(input_data, packed_weight_bias)
torch.testing.assert_close(ref_result, xnnpack_result, rtol=1e-2, atol=1e-3)
@given(batch_size=st.integers(1, 3),
input_channels_per_group=st.integers(1, 32),
height=st.integers(5, 64),
width=st.integers(5, 64),
output_channels_per_group=st.integers(1, 32),
groups=st.integers(1, 16),
kernel_h=st.integers(1, 7),
kernel_w=st.integers(1, 7),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
output_pad_h=st.integers(0, 2),
output_pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
use_bias=st.booleans(),
format=st.sampled_from([None, torch.preserve_format, torch.contiguous_format, torch.channels_last]))
def test_conv2d_transpose(self,
batch_size,
input_channels_per_group,
height,
width,
output_channels_per_group,
groups,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w,
output_pad_h,
output_pad_w,
dilation,
use_bias,
format):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
paddings = (pad_h, pad_w)
output_paddings = (output_pad_h, output_pad_w)
dilations = (dilation, dilation)
assume(height + 2 * paddings[0]
>= dilations[0] * (kernels[0] - 1) + 1)
assume(width + 2 * paddings[1]
>= dilations[1] * (kernels[1] - 1) + 1)
assume((output_pad_h < stride_h) and (output_pad_h < dilation))
assume((output_pad_w < stride_w) and (output_pad_w < dilation))
input_data = torch.rand((batch_size, input_channels, height, width))
if (format is not None):
input_data = input_data.contiguous(memory_format=format)
weight = torch.rand((input_channels, output_channels_per_group, kernel_h, kernel_w))
bias = None
if use_bias:
bias = torch.rand((output_channels))
# Note that groups/dilation is in reverse order from conv2d
ref_result = F.conv_transpose2d(input_data, weight, bias,
strides, paddings, output_paddings, groups, dilation)
packed_weight_bias = torch.ops.prepacked.conv2d_transpose_clamp_prepack(weight, bias,
strides, paddings,
output_paddings, dilations,
groups)
xnnpack_result = torch.ops.prepacked.conv2d_transpose_clamp_run(input_data, packed_weight_bias)
torch.testing.assert_close(ref_result.contiguous(), xnnpack_result.contiguous(), rtol=1e-2, atol=1e-3)
@unittest.skipUnless(torch.backends.xnnpack.enabled,
" XNNPACK must be enabled for these tests."
" Please build with USE_XNNPACK=1.")
@unittest.skipIf(TEST_WITH_TSAN, "TSAN fails with XNNPACK. Does not seem to have a good reason for failures.")
class TestXNNPACKSerDes(TestCase):
@unittest.skip("Fails on some platforms, see https://github.com/pytorch/pytorch/issues/73488")
@given(batch_size=st.integers(0, 3),
data_shape=hu.array_shapes(1, 3, 2, 64),
weight_output_dim=st.integers(2, 64),
use_bias=st.booleans())
def test_linear(self, batch_size, data_shape, weight_output_dim, use_bias):
class Linear(torch.nn.Module):
def __init__(self, weight, bias=None):
super(Linear, self).__init__()
self.weight = weight
self.bias = bias
def forward(self, x):
return F.linear(x, self.weight, self.bias)
class LinearPrePacked(torch.nn.Module):
def __init__(self, weight, bias=None):
super(LinearPrePacked, self).__init__()
self.packed_weight_bias = torch.ops.prepacked.linear_clamp_prepack(weight, bias)
def forward(self, x):
return torch.ops.prepacked.linear_clamp_run(x, self.packed_weight_bias)
data_shape = [batch_size] + list(data_shape)
weight = torch.rand((weight_output_dim, data_shape[-1]))
if use_bias:
bias = torch.rand((weight_output_dim))
else:
bias = None
scripted_linear = torch.jit.script(Linear(weight, bias))
scripted_linear_clamp_prepacked = torch.jit.script(LinearPrePacked(weight, bias))
input_data = torch.rand(data_shape)
ref_result = scripted_linear(input_data)
output_linearprepacked = scripted_linear_clamp_prepacked(input_data)
torch.testing.assert_close(ref_result, output_linearprepacked, rtol=1e-2, atol=1e-3)
# Serialize the modules and then deserialize
input_data = torch.rand(data_shape)
buffer = io.BytesIO()
torch.jit.save(scripted_linear, buffer)
buffer.seek(0)
deserialized_linear = torch.jit.load(buffer)
buffer = io.BytesIO()
torch.jit.save(scripted_linear_clamp_prepacked, buffer)
buffer.seek(0)
deserialized_linear_clamp_prepacked = torch.jit.load(buffer)
ref_result = deserialized_linear(input_data)
output_linearprepacked = deserialized_linear_clamp_prepacked(input_data)
torch.testing.assert_close(ref_result, output_linearprepacked, rtol=1e-2, atol=1e-3)
@given(batch_size=st.integers(0, 3),
input_channels_per_group=st.integers(1, 32),
height=st.integers(5, 64),
width=st.integers(5, 64),
output_channels_per_group=st.integers(1, 32),
groups=st.integers(1, 16),
kernel_h=st.integers(1, 7),
kernel_w=st.integers(1, 7),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
use_bias=st.booleans(),
format=st.sampled_from([None, torch.preserve_format, torch.contiguous_format, torch.channels_last]))
def test_conv2d(self,
batch_size,
input_channels_per_group,
height,
width,
output_channels_per_group,
groups,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w,
dilation,
use_bias,
format):
class Conv2D(torch.nn.Module):
def __init__(self, weight, bias, strides, paddings, dilations, groups):
super(Conv2D, self).__init__()
self.weight = weight
self.bias = bias
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
return F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
class Conv2DPrePacked(torch.nn.Module):
def __init__(self, weight, bias, strides, paddings, dilations, groups):
super(Conv2DPrePacked, self).__init__()
self.packed_weight_bias = torch.ops.prepacked.conv2d_clamp_prepack(weight, bias,
strides, paddings, dilations, groups)
def forward(self, x):
return torch.ops.prepacked.conv2d_clamp_run(x, self.packed_weight_bias)
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
paddings = (pad_h, pad_w)
dilations = (dilation, dilation)
assume(height + 2 * paddings[0] >=
dilations[0] * (kernels[0] - 1) + 1)
assume(width + 2 * paddings[1] >=
dilations[1] * (kernels[1] - 1) + 1)
input_data = torch.rand((batch_size, input_channels, height, width))
if (format is not None):
input_data = input_data.contiguous(memory_format=format)
weight = torch.rand((output_channels, input_channels_per_group, kernel_h, kernel_w))
bias = None
if use_bias:
bias = torch.rand((output_channels))
scripted_conv2d = torch.jit.script(Conv2D(weight, bias,
strides, paddings, dilations, groups))
scripted_conv2d_clamp_prepacked = torch.jit.script(Conv2DPrePacked(
weight, bias, strides, paddings, dilations, groups))
ref_result = scripted_conv2d(input_data)
xnnpack_result = scripted_conv2d_clamp_prepacked(input_data)
torch.testing.assert_close(ref_result, xnnpack_result, rtol=1e-2, atol=1e-3)
# Serialize the modules and then deserialize
input_data = torch.rand((batch_size, input_channels, height, width))
if (format is not None):
input_data = input_data.contiguous(memory_format=format)
buffer = io.BytesIO()
torch.jit.save(scripted_conv2d, buffer)
buffer.seek(0)
deserialized_conv2d = torch.jit.load(buffer)
buffer = io.BytesIO()
torch.jit.save(scripted_conv2d_clamp_prepacked, buffer)
buffer.seek(0)
deserialized_conv2d_clamp_prepacked = torch.jit.load(buffer)
ref_result = deserialized_conv2d(input_data)
xnnpack_result = deserialized_conv2d_clamp_prepacked(input_data)
torch.testing.assert_close(ref_result, xnnpack_result, rtol=1e-2, atol=1e-3)
@given(batch_size=st.integers(0, 3),
input_channels_per_group=st.integers(1, 32),
height=st.integers(5, 64),
width=st.integers(5, 64),
output_channels_per_group=st.integers(1, 32),
groups=st.integers(1, 16),
kernel_h=st.integers(1, 7),
kernel_w=st.integers(1, 7),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
output_pad_h=st.integers(0, 2),
output_pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
use_bias=st.booleans(),
format=st.sampled_from([None, torch.preserve_format, torch.contiguous_format, torch.channels_last]))
def test_conv2d_transpose(self,
batch_size,
input_channels_per_group,
height,
width,
output_channels_per_group,
groups,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w,
output_pad_h,
output_pad_w,
dilation,
use_bias,
format):
class Conv2DT(torch.nn.Module):
def __init__(self, weight, bias, strides, paddings, output_paddings, dilations, groups):
super(Conv2DT, self).__init__()
self.weight = weight
self.bias = bias
self.strides = strides
self.paddings = paddings
self.output_paddings = output_paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
return F.conv_transpose2d(x, self.weight, self.bias,
self.strides, self.paddings, self.output_paddings, self.groups, self.dilations)
class Conv2DTPrePacked(torch.nn.Module):
def __init__(self, weight, bias, strides, paddings, output_paddings, dilations, groups):
super(Conv2DTPrePacked, self).__init__()
self.packed_weight_bias = torch.ops.prepacked.conv2d_transpose_clamp_prepack(weight, bias,
strides, paddings,
output_paddings,
dilations, groups)
def forward(self, x):
return torch.ops.prepacked.conv2d_transpose_clamp_run(x, self.packed_weight_bias)
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
paddings = (pad_h, pad_w)
output_paddings = (output_pad_h, output_pad_w)
dilations = (dilation, dilation)
assume(height + 2 * paddings[0] >=
dilations[0] * (kernels[0] - 1) + 1)
assume(width + 2 * paddings[1] >=
dilations[1] * (kernels[1] - 1) + 1)
assume((output_pad_h < stride_h) and (output_pad_h < dilation))
assume((output_pad_w < stride_w) and (output_pad_w < dilation))
input_data = torch.rand((batch_size, input_channels, height, width))
if (format is not None):
input_data = input_data.contiguous(memory_format=format)
weight = torch.rand((input_channels, output_channels_per_group, kernel_h, kernel_w))
bias = None
if use_bias:
bias = torch.rand((output_channels))
scripted_conv2d = torch.jit.script(Conv2DT(weight, bias,
strides, paddings,
output_paddings, dilations, groups))
scripted_conv2d_clamp_prepacked = torch.jit.script(Conv2DTPrePacked(
weight, bias, strides, paddings, output_paddings, dilations, groups))
ref_result = scripted_conv2d(input_data)
xnnpack_result = scripted_conv2d_clamp_prepacked(input_data)
torch.testing.assert_close(ref_result, xnnpack_result, rtol=1e-2, atol=1e-3)
# Serialize the modules and then deserialize
input_data = torch.rand((batch_size, input_channels, height, width))
if (format is not None):
input_data = input_data.contiguous(memory_format=format)
buffer = io.BytesIO()
torch.jit.save(scripted_conv2d, buffer)
buffer.seek(0)
deserialized_conv2d = torch.jit.load(buffer)
buffer = io.BytesIO()
torch.jit.save(scripted_conv2d_clamp_prepacked, buffer)
buffer.seek(0)
deserialized_conv2d_clamp_prepacked = torch.jit.load(buffer)
ref_result = deserialized_conv2d(input_data)
xnnpack_result = deserialized_conv2d_clamp_prepacked(input_data)
torch.testing.assert_close(ref_result, xnnpack_result, rtol=1e-2, atol=1e-3)
@unittest.skip("Fails on some platforms, see https://github.com/pytorch/pytorch/issues/73488")
@given(batch_size=st.integers(0, 3),
input_channels_per_group=st.integers(1, 32),
height=st.integers(5, 64),
width=st.integers(5, 64),
output_channels_per_group=st.integers(1, 32),
groups=st.integers(1, 16),
kernel_h=st.integers(1, 7),
kernel_w=st.integers(1, 7),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
linear_weight_output_dim=st.integers(2, 64),
use_bias=st.booleans(),
format=st.sampled_from([None, torch.preserve_format, torch.contiguous_format, torch.channels_last]))
def test_combined_model(self,
batch_size,
input_channels_per_group,
height,
width,
output_channels_per_group,
groups,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w,
dilation,
linear_weight_output_dim,
use_bias,
format):
class M(torch.nn.Module):
def __init__(self, conv_weight, conv_bias, linear_weight, linear_bias,
strides, paddings, dilations, groups):
super(M, self).__init__()
self.conv_weight = conv_weight
self.conv_bias = conv_bias
self.linear_weight = linear_weight
self.linear_bias = linear_bias
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
o = F.conv2d(x, self.conv_weight, self.conv_bias,
self.strides, self.paddings, self.dilations, self.groups)
o = o.permute([0, 2, 3, 1])
o = F.linear(o, self.linear_weight, self.linear_bias)
return F.relu(o)
class MPrePacked(torch.nn.Module):
def __init__(self, conv_weight, conv_bias, linear_weight, linear_bias,
strides, paddings, dilations, groups):
super(MPrePacked, self).__init__()
self.conv2d_clamp_run_weight_bias = \
torch.ops.prepacked.conv2d_clamp_prepack(conv_weight, conv_bias,
strides, paddings, dilations, groups)
self.linear_clamp_run_weight_bias = \
torch.ops.prepacked.linear_clamp_prepack(linear_weight, linear_bias)
def forward(self, x):
o = torch.ops.prepacked.conv2d_clamp_run(x, self.conv2d_clamp_run_weight_bias)
o = o.permute([0, 2, 3, 1])
o = torch.ops.prepacked.linear_clamp_run(o, self.linear_clamp_run_weight_bias)
return F.relu(o)
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
paddings = (pad_h, pad_w)
dilations = (dilation, dilation)
assume(height + 2 * paddings[0]
>= dilations[0] * (kernels[0] - 1) + 1)
assume(width + 2 * paddings[1]
>= dilations[1] * (kernels[1] - 1) + 1)
input_data = torch.rand((batch_size, input_channels, height, width))
if (format is not None):
input_data = input_data.contiguous(memory_format=format)
conv_weight = torch.rand((output_channels, input_channels_per_group, kernel_h, kernel_w))
conv_bias = None
if use_bias:
conv_bias = torch.rand((output_channels))
# This is done just to find the output shape of the result
# so that the shape of weight for the following linear layer
# can be determined.
result = F.conv2d(input_data, conv_weight, conv_bias,
strides, paddings, dilations, groups)
linear_input_shape = result.shape[1]
linear_weight = torch.rand((linear_weight_output_dim, linear_input_shape))
linear_bias = None
if use_bias:
linear_bias = torch.rand((linear_weight_output_dim))
scripted_m = torch.jit.script(M(conv_weight, conv_bias, linear_weight,
linear_bias, strides, paddings, dilations, groups))
scripted_m_prepacked = torch.jit.script(
MPrePacked(
conv_weight,
conv_bias,
linear_weight,
linear_bias,
strides,
paddings,
dilations,
groups))
ref_result = scripted_m(input_data)
xnnpack_result = scripted_m_prepacked(input_data)
torch.testing.assert_close(ref_result, xnnpack_result, rtol=1e-2, atol=1e-3)
# Serialize the modules and then deserialize
input_data = torch.rand((batch_size, input_channels, height, width))
input_data = input_data.contiguous(memory_format=torch.channels_last)
buffer = io.BytesIO()
torch.jit.save(scripted_m, buffer)
buffer.seek(0)
deserialized_m = torch.jit.load(buffer)
buffer = io.BytesIO()
torch.jit.save(scripted_m_prepacked, buffer)
buffer.seek(0)
deserialized_m_prepacked = torch.jit.load(buffer)
ref_result = deserialized_m(input_data)
xnnpack_result = deserialized_m_prepacked(input_data)
torch.testing.assert_close(ref_result, xnnpack_result, rtol=1e-2, atol=1e-3)
@unittest.skipUnless(torch.backends.xnnpack.enabled,
" XNNPACK must be enabled for these tests."
" Please build with USE_XNNPACK=1.")
@unittest.skipIf(TEST_WITH_TSAN, "TSAN fails with XNNPACK. Does not seem to have a good reason for failures.")
class TestXNNPACKRewritePass(TestCase):
@staticmethod
def validate_transformed_module(
# To please flake
self,
pattern_count_map,
data_shape,
prepack_removal=False,
fuse_clamping_ops=False):
input_data = torch.normal(1, 20, size=data_shape)
for jit_method in ["script", "trace"]:
module_instance = self
if jit_method == "script":
scripted_model = torch.jit.script(module_instance)
else:
scripted_model = torch.jit.trace(module_instance, input_data)
scripted_model.eval()
ref_result = scripted_model(input_data)
torch._C._jit_pass_insert_prepacked_ops(scripted_model._c)
if fuse_clamping_ops or prepack_removal:
scripted_model._c = torch._C._freeze_module(scripted_model._c)
if fuse_clamping_ops:
torch._C._jit_pass_fuse_clamp_w_prepacked_linear_conv(scripted_model._c)
if (prepack_removal):
torch._C._jit_pass_fold_prepacking_ops(scripted_model._c)
buffer = io.BytesIO()
torch.jit.save(scripted_model, buffer)
buffer.seek(0)
deserialized_scripted_model = torch.jit.load(buffer)
for pattern, v in pattern_count_map.items():
if (v == 0):
FileCheck().check(pattern).run(deserialized_scripted_model.graph)
elif (v == -1):
FileCheck().check_not(pattern).run(deserialized_scripted_model.graph)
else:
FileCheck().check_count(pattern, v, exactly=True).run(deserialized_scripted_model.graph)
xnnpack_result = deserialized_scripted_model(input_data)
torch.testing.assert_close(ref_result, xnnpack_result, rtol=1e-2, atol=1e-3)
def test_linear(self):
data_shape = [2, 3, 32]
weight_output_dim = 24
weight_shape = (weight_output_dim, data_shape[-1])
class Linear(torch.nn.Module):
def __init__(self):
super(Linear, self).__init__()
self.weight = torch.nn.Parameter(torch.rand(weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand((weight_output_dim)), requires_grad=False)
def forward(self, x):
return F.linear(x, self.weight, self.bias)
class LinearNoBias(torch.nn.Module):
def __init__(self):
super(LinearNoBias, self).__init__()
self.weight = torch.nn.Parameter(torch.rand(weight_shape), requires_grad=False)
def forward(self, x):
return F.linear(x, self.weight, None)
# Linear with bias pattern.
pattern_count_map = {"Tensor = prim::CallFunction": -1,
"prepacked::linear_clamp_prepack": 1,
"prepacked::linear_clamp_run": 1}
TestXNNPACKRewritePass.validate_transformed_module(Linear(), pattern_count_map, data_shape)
TestXNNPACKRewritePass.validate_transformed_module(LinearNoBias(), pattern_count_map, data_shape)
# Conv params
batch_size = 2
input_channels_per_group = 6
height = 16
width = 16
output_channels_per_group = 6
groups = 4
kernel_h = kernel_w = 3
stride_h = stride_w = 1
pad_h = pad_w = 1
output_pad_h = output_pad_w = 0
dilation = 1
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
paddings = (pad_h, pad_w)
output_paddings = (output_pad_h, output_pad_w)
dilations = (dilation, dilation)
conv_weight_shape = (output_channels, input_channels_per_group, kernel_h, kernel_w)
conv_transpose_weight_shape = (input_channels, output_channels_per_group, kernel_h, kernel_w)
conv_bias_shape = (output_channels)
class Conv2D(torch.nn.Module):
def __init__(self):
super(Conv2D, self).__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
return F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
class Conv2DT(torch.nn.Module):
def __init__(self):
super(Conv2DT, self).__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_transpose_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.output_paddings = output_paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
return F.conv_transpose2d(x, self.weight, self.bias,
self.strides, self.paddings, self.output_paddings, self.groups, self.dilations)
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"prepacked::conv2d_clamp_prepack": 1,
"prepacked::conv2d_clamp_run": 1}
TestXNNPACKRewritePass.validate_transformed_module(Conv2D(), pattern_count_map, data_shape)
transpose_data_shape = (batch_size, input_channels, height, width)
transpose_pattern_count_map = {"Tensor = aten::conv_transpose2d": -1,
"prepacked::conv2d_transpose_clamp_prepack": 1,
"prepacked::conv2d_transpose_clamp_run": 1}
TestXNNPACKRewritePass.validate_transformed_module(Conv2DT(), transpose_pattern_count_map, data_shape)
input_data = torch.rand((batch_size, input_channels, height, width))
conv_weight = torch.rand((output_channels, input_channels_per_group, kernel_h, kernel_w))
conv_bias = torch.rand((output_channels))
result = F.conv2d(input_data, conv_weight, conv_bias,
strides, paddings, dilations, groups)
linear_input_shape = result.shape[1]
linear_weight_shape = (weight_output_dim, linear_input_shape)
class M(torch.nn.Module):
def __init__(self, activation_fn=F.relu):
super(M, self).__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.conv_bias = torch.nn.Parameter(torch.rand((conv_bias_shape)), requires_grad=False)
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape), requires_grad=False)
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
self.activation_fn = activation_fn
def forward(self, x):
o = F.conv2d(x, self.conv_weight, self.conv_bias,
self.strides, self.paddings, self.dilations, self.groups)
o = self.activation_fn(o)
o = o.permute([0, 2, 3, 1])
o = F.linear(o, self.linear_weight, self.linear_bias)
return self.activation_fn(o)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"prepacked::conv2d_clamp_prepack": 1,
"prepacked::conv2d_clamp_run": 1,
"prepacked::linear_clamp_prepack": 1,
"prepacked::linear_clamp_run": 1}
TestXNNPACKRewritePass.validate_transformed_module(M(), pattern_count_map, data_shape)
pattern_count_map["prepacked::conv2d_clamp_prepack"] = -1
pattern_count_map["Tensor = prim::CallFunction"] = -1
pattern_count_map["prepacked::linear_clamp_prepack"] = -1
TestXNNPACKRewritePass.validate_transformed_module(M(), pattern_count_map, data_shape, prepack_removal=True)
# Not inplace relu fusion test.
pattern_count_map = {"aten::relu": 2,
"prepacked::conv2d_clamp_prepack": -1,
"prepacked::conv2d_clamp_run": 1,
"prepacked::linear_clamp_prepack": -1,
"prepacked::linear_clamp_run": 1}
TestXNNPACKRewritePass.validate_transformed_module(M(), pattern_count_map, data_shape, prepack_removal=True)
pattern_count_map["prepacked::conv2d_clamp_prepack"] = -1
pattern_count_map["prepacked::linear_clamp_prepack"] = -1
pattern_count_map["aten::relu"] = -1
TestXNNPACKRewritePass.validate_transformed_module(
M(),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
# Inplace relu fusion test.
pattern_count_map = {"aten::relu": 2,
"prepacked::conv2d_clamp_prepack": -1,
"prepacked::conv2d_clamp_run": 1,
"prepacked::linear_clamp_prepack": -1,
"prepacked::linear_clamp_run": 1}
TestXNNPACKRewritePass.validate_transformed_module(
M(F.relu_),
pattern_count_map,
data_shape,
prepack_removal=True)
pattern_count_map["prepacked::conv2d_clamp_prepack"] = -1
pattern_count_map["prepacked::linear_clamp_prepack"] = -1
pattern_count_map["aten::relu"] = -1
TestXNNPACKRewritePass.validate_transformed_module(
M(F.relu_),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
# Not inplace hardtanh fusion test.
pattern_count_map = {"aten::hardtanh": 2,
"prepacked::conv2d_clamp_prepack": -1,
"prepacked::conv2d_clamp_run": 1,
"prepacked::linear_clamp_prepack": -1,
"prepacked::linear_clamp_run": 1}
TestXNNPACKRewritePass.validate_transformed_module(
M(F.hardtanh),
pattern_count_map,
data_shape,
prepack_removal=True)
pattern_count_map["prepacked::conv2d_clamp_prepack"] = -1
pattern_count_map["prepacked::linear_clamp_prepack"] = -1
pattern_count_map["aten::hardtanh"] = -1
TestXNNPACKRewritePass.validate_transformed_module(
M(F.hardtanh),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
# Inplace hardtanh fusion test.
pattern_count_map = {"aten::hardtanh_": 2,
"prepacked::conv2d_clamp_prepack": -1,
"prepacked::conv2d_clamp_run": 1,
"prepacked::linear_clamp_prepack": -1,
"prepacked::linear_clamp_run": 1}
TestXNNPACKRewritePass.validate_transformed_module(
M(F.hardtanh_),
pattern_count_map,
data_shape,
prepack_removal=True)
pattern_count_map["prepacked::conv2d_clamp_prepack"] = -1
pattern_count_map["prepacked::linear_clamp_prepack"] = -1
pattern_count_map["aten::hardtanh_"] = -1
TestXNNPACKRewritePass.validate_transformed_module(
M(F.hardtanh_),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
class MFusionAntiPattern(torch.nn.Module):
def __init__(self):
super(MFusionAntiPattern, self).__init__()
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape), requires_grad=False)
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
o = F.linear(x, self.linear_weight, self.linear_bias)
o = F.relu(o)
o = F.hardtanh(o)
return o
# Unfusable hardtanh.
pattern_count_map = {"aten::hardtanh": 1, # hardtanh cannot be.
"aten::relu": -1, # relu is fused.
"prepacked::linear_clamp_prepack": -1,
"prepacked::linear_clamp_run": 1}
TestXNNPACKRewritePass.validate_transformed_module(
MFusionAntiPattern(),
pattern_count_map,
(16, linear_weight_shape[1]),
prepack_removal=True,
fuse_clamping_ops=True)
class MFusionAntiPatternParamMinMax(torch.nn.Module):
def __init__(self):
super(MFusionAntiPatternParamMinMax, self).__init__()
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape), requires_grad=False)
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
min = x[0, 0]
max = min + 10
o = F.linear(x, self.linear_weight, self.linear_bias)
o = F.hardtanh(o, min, max)
return o
# Unfusable hardtanh.
pattern_count_map = {"aten::hardtanh": 1, # hardtanh cannot be.
"prepacked::linear_clamp_prepack": -1,
"prepacked::linear_clamp_run": 1}
TestXNNPACKRewritePass.validate_transformed_module(
MFusionAntiPatternParamMinMax(),
pattern_count_map,
(16, linear_weight_shape[1]),
prepack_removal=True,
fuse_clamping_ops=True)
def test_decomposed_linear(self):
data_shape = [2, 32]
weight_output_dim = 24
weight_shape = (weight_output_dim, data_shape[-1])
class DecomposedLinearAddmm(torch.nn.Module):
def __init__(self):
super(DecomposedLinearAddmm, self).__init__()
self.weight = torch.nn.Parameter(torch.rand(weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand((weight_output_dim)), requires_grad=False)
def forward(self, x):
weight_t = self.weight.t()
return torch.addmm(self.bias, x, weight_t)
class DecomposedLinearMatmulAdd(torch.nn.Module):
def __init__(self):
super(DecomposedLinearMatmulAdd, self).__init__()
self.weight = torch.nn.Parameter(torch.rand(weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand((weight_output_dim)), requires_grad=False)
def forward(self, x):
weight_t = self.weight.t()
y = torch.matmul(x, weight_t)
res = y.add_(self.bias)
return res
class DecomposedLinearMatmul(torch.nn.Module):
def __init__(self):
super(DecomposedLinearMatmul, self).__init__()
self.weight = torch.nn.Parameter(torch.rand(weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand((weight_output_dim)), requires_grad=False)
def forward(self, x):
weight_t = self.weight.t()
res = torch.matmul(x, weight_t)
return res
# Linear with bias pattern.
pattern_count_map = {"Tensor = prim::CallFunction": -1,
"prepacked::linear_clamp_prepack": 1,
"prepacked::linear_clamp_run": 1}
TestXNNPACKRewritePass.validate_transformed_module(DecomposedLinearAddmm(), pattern_count_map, data_shape)
TestXNNPACKRewritePass.validate_transformed_module(DecomposedLinearMatmulAdd(), pattern_count_map, data_shape)
TestXNNPACKRewritePass.validate_transformed_module(DecomposedLinearMatmul(), pattern_count_map, data_shape)
@unittest.skipUnless(torch.backends.xnnpack.enabled,
" XNNPACK must be enabled for these tests."
" Please build with USE_XNNPACK=1.")
@unittest.skipIf(TEST_WITH_TSAN, "TSAN is not fork-safe since we're forking in a multi-threaded environment")
class TestXNNPACKConv1dTransformPass(TestCase):
@staticmethod
def validate_transform_conv1d_to_conv2d(
self,
pattern_count_transformed_map,
pattern_count_optimized_map,
data_shape):
input_data = torch.normal(1, 20, size=data_shape)
for jit_method in ["script", "trace"]:
module_instance = self
if jit_method == "script":
scripted_model = torch.jit.script(module_instance)
else:
scripted_model = torch.jit.trace(module_instance, input_data)
scripted_model.eval()
ref_result = scripted_model(input_data)
torch._C._jit_pass_transform_conv1d_to_conv2d(scripted_model._c)
optimized_scripted_model = optimize_for_mobile(scripted_model)
buffer = io.BytesIO()
torch.jit.save(scripted_model, buffer)
buffer.seek(0)
deserialized_scripted_model = torch.jit.load(buffer)
for pattern, v in pattern_count_transformed_map.items():
if (v == 0):
FileCheck().check(pattern).run(deserialized_scripted_model.graph)
elif (v == -1):
FileCheck().check_not(pattern).run(deserialized_scripted_model.graph)
else:
FileCheck().check_count(pattern, v, exactly=True).run(deserialized_scripted_model.graph)
transformed_result = deserialized_scripted_model(input_data)
torch.testing.assert_close(ref_result, transformed_result, rtol=1e-2, atol=1e-3)
optimized_buffer = io.BytesIO()
torch.jit.save(optimized_scripted_model, optimized_buffer)
optimized_buffer.seek(0)
deserialized_optimized_scripted_model = torch.jit.load(optimized_buffer)
for pattern, v in pattern_count_optimized_map.items():
if (v == 0):
FileCheck().check(pattern).run(deserialized_optimized_scripted_model.graph)
elif (v == -1):
FileCheck().check_not(pattern).run(deserialized_optimized_scripted_model.graph)
else:
FileCheck().check_count(pattern, v, exactly=True).run(deserialized_optimized_scripted_model.graph)
xnnpack_result = deserialized_optimized_scripted_model(input_data)
torch.testing.assert_close(ref_result, xnnpack_result, rtol=1e-2, atol=1e-3)
def test_conv1d_basic(self):
batch_size_list = range(1, 3)
input_channels_per_group_list = range(10, 12)
width_list = range(10, 12)
output_channels_per_group_list = range(10, 12)
groups_list = range(1, 3)
kernel_list = range(1, 4)
stride_list = range(1, 3)
padding_list = range(0, 3)
dilation_list = range(1, 3)
for hparams in itertools.product(batch_size_list,
input_channels_per_group_list,
width_list,
output_channels_per_group_list,
groups_list,
kernel_list,
stride_list,
padding_list,
dilation_list):
batch_size, input_channels_per_group, width, output_channels_per_group, \
groups, kernel, stride, padding, dilation = hparams
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
conv_weight_shape = (output_channels, input_channels_per_group, kernel)
conv_bias_shape = (output_channels)
class Conv1D(torch.nn.Module):
def __init__(self):
super(Conv1D, self).__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
def forward(self, x):
return F.conv1d(x, self.weight, self.bias,
self.stride, self.padding, self.dilation, self.groups)
data_shape = (batch_size, input_channels, width)
pattern_count_transformed_map = {"Tensor = aten::conv1d": -1,
"Tensor = aten::conv2d": 1}
pattern_count_optimized_map = {"Tensor = aten::conv1d": -1,
"Tensor = aten::conv2d": -1,
"prepacked::conv2d_clamp_prepack" : -1,
"prepacked::conv2d_clamp_run": 1}
TestXNNPACKConv1dTransformPass.validate_transform_conv1d_to_conv2d(Conv1D(),
pattern_count_transformed_map,
pattern_count_optimized_map,
data_shape)
# See https://github.com/pytorch/pytorch/issues/46066
@slowTest
def test_conv1d_with_relu_fc(self):
batch_size_list = range(1, 3)
input_channels_per_group_list = range(10, 12)
width_list = range(10, 12)
output_channels_per_group_list = range(10, 12)
groups_list = range(1, 3)
kernel_list = range(1, 4)
stride_list = range(1, 3)
padding_list = range(0, 3)
dilation_list = range(1, 3)
output_features_list = range(1, 3)
for hparams in itertools.product(batch_size_list,
input_channels_per_group_list,
width_list,
output_channels_per_group_list,
groups_list,
kernel_list,
stride_list,
padding_list,
dilation_list,
output_features_list):
batch_size, input_channels_per_group, width, output_channels_per_group, \
groups, kernel, stride, padding, dilation, output_features = hparams
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
conv_weight_shape = (output_channels, input_channels_per_group, kernel)
conv_bias_shape = (output_channels)
conv_output_width = int((width + 2 * padding - dilation * (kernel - 1) - 1) / stride) + 1
fc_weight_shape = (output_features, output_channels * conv_output_width)
fc_bias_shape = (output_features)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.fc_weight = torch.nn.Parameter(torch.rand(fc_weight_shape), requires_grad=False)
self.fc_bias = torch.nn.Parameter(torch.rand(fc_bias_shape), requires_grad=False)
def forward(self, x):
x = F.conv1d(x, self.conv_weight, self.conv_bias,
self.stride, self.padding, self.dilation, self.groups)
x = F.relu(x)
x = x.view(x.size(0), -1)
x = F.linear(x, self.fc_weight, self.fc_bias)
return x
data_shape = (batch_size, input_channels, width)
pattern_count_transformed_map = {"Tensor = aten::conv1d": -1,
"Tensor = aten::conv2d": 1}
pattern_count_optimized_map = {"Tensor = aten::conv1d": -1,
"Tensor = aten::conv2d": -1,
"prepacked::conv2d_clamp_prepack" : -1,
"prepacked::conv2d_clamp_run": 1}
TestXNNPACKConv1dTransformPass.validate_transform_conv1d_to_conv2d(Net(),
pattern_count_transformed_map,
pattern_count_optimized_map,
data_shape)
if __name__ == "__main__":
run_tests()
| pytorch-master | test/test_xnnpack_integration.py |
#!/usr/bin/env python3
# Owner(s): ["oncall: mobile"]
import io
import textwrap
from typing import List, Optional, Dict
import torch
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests
def model_size(sm):
buffer = io.BytesIO()
torch.jit.save(sm, buffer)
return len(buffer.getvalue())
def save_and_load(sm):
buffer = io.BytesIO()
torch.jit.save(sm, buffer)
buffer.seek(0)
return torch.jit.load(buffer)
class TestBundledInputs(TestCase):
def test_single_tensors(self):
class SingleTensorModel(torch.nn.Module):
def forward(self, arg):
return arg
sm = torch.jit.script(SingleTensorModel())
original_size = model_size(sm)
get_expr : List[str] = []
samples = [
# Tensor with small numel and small storage.
(torch.tensor([1]),),
# Tensor with large numel and small storage.
(torch.tensor([[2, 3, 4]]).expand(1 << 16, -1)[:, ::2],),
# Tensor with small numel and large storage.
(torch.tensor(range(1 << 16))[-8:],),
# Large zero tensor.
(torch.zeros(1 << 16),),
# Large channels-last ones tensor.
(torch.ones(4, 8, 32, 32).contiguous(memory_format=torch.channels_last),),
# Special encoding of random tensor.
(torch.utils.bundled_inputs.bundle_randn(1 << 16),),
# Quantized uniform tensor.
(torch.quantize_per_tensor(torch.zeros(4, 8, 32, 32), 1, 0, torch.qint8),),
]
torch.utils.bundled_inputs.augment_model_with_bundled_inputs(
sm, samples, get_expr)
# print(get_expr[0])
# print(sm._generate_bundled_inputs.code)
# Make sure the model only grew a little bit,
# despite having nominally large bundled inputs.
augmented_size = model_size(sm)
self.assertLess(augmented_size, original_size + (1 << 12))
loaded = save_and_load(sm)
inflated = loaded.get_all_bundled_inputs()
self.assertEqual(loaded.get_num_bundled_inputs(), len(samples))
self.assertEqual(len(inflated), len(samples))
self.assertTrue(loaded(*inflated[0]) is inflated[0][0])
for idx, inp in enumerate(inflated):
self.assertIsInstance(inp, tuple)
self.assertEqual(len(inp), 1)
self.assertIsInstance(inp[0], torch.Tensor)
if idx != 5:
# Strides might be important for benchmarking.
self.assertEqual(inp[0].stride(), samples[idx][0].stride())
self.assertEqual(inp[0], samples[idx][0], exact_dtype=True)
# This tensor is random, but with 100,000 trials,
# mean and std had ranges of (-0.0154, 0.0144) and (0.9907, 1.0105).
self.assertEqual(inflated[5][0].shape, (1 << 16,))
self.assertEqual(inflated[5][0].mean().item(), 0, atol=0.025, rtol=0)
self.assertEqual(inflated[5][0].std().item(), 1, atol=0.02, rtol=0)
def test_large_tensor_with_inflation(self):
class SingleTensorModel(torch.nn.Module):
def forward(self, arg):
return arg
sm = torch.jit.script(SingleTensorModel())
sample_tensor = torch.randn(1 << 16)
# We can store tensors with custom inflation functions regardless
# of size, even if inflation is just the identity.
sample = torch.utils.bundled_inputs.bundle_large_tensor(sample_tensor)
torch.utils.bundled_inputs.augment_model_with_bundled_inputs(
sm, [(sample,)])
loaded = save_and_load(sm)
inflated = loaded.get_all_bundled_inputs()
self.assertEqual(len(inflated), 1)
self.assertEqual(inflated[0][0], sample_tensor)
def test_rejected_tensors(self):
def check_tensor(sample):
# Need to define the class in this scope to get a fresh type for each run.
class SingleTensorModel(torch.nn.Module):
def forward(self, arg):
return arg
sm = torch.jit.script(SingleTensorModel())
with self.assertRaisesRegex(Exception, "Bundled input argument"):
torch.utils.bundled_inputs.augment_model_with_bundled_inputs(
sm, [(sample,)])
# Plain old big tensor.
check_tensor(torch.randn(1 << 16))
# This tensor has two elements, but they're far apart in memory.
# We currently cannot represent this compactly while preserving
# the strides.
small_sparse = torch.randn(2, 1 << 16)[:, 0:1]
self.assertEqual(small_sparse.numel(), 2)
check_tensor(small_sparse)
def test_non_tensors(self):
class StringAndIntModel(torch.nn.Module):
def forward(self, fmt: str, num: int):
return fmt.format(num)
sm = torch.jit.script(StringAndIntModel())
samples = [
("first {}", 1),
("second {}", 2),
]
torch.utils.bundled_inputs.augment_model_with_bundled_inputs(
sm, samples)
loaded = save_and_load(sm)
inflated = loaded.get_all_bundled_inputs()
self.assertEqual(inflated, samples)
self.assertTrue(loaded(*inflated[0]) == "first 1")
def test_multiple_methods_with_inputs(self):
class MultipleMethodModel(torch.nn.Module):
def forward(self, arg):
return arg
@torch.jit.export
def foo(self, arg):
return arg
mm = torch.jit.script(MultipleMethodModel())
samples = [
# Tensor with small numel and small storage.
(torch.tensor([1]),),
# Tensor with large numel and small storage.
(torch.tensor([[2, 3, 4]]).expand(1 << 16, -1)[:, ::2],),
# Tensor with small numel and large storage.
(torch.tensor(range(1 << 16))[-8:],),
# Large zero tensor.
(torch.zeros(1 << 16),),
# Large channels-last ones tensor.
(torch.ones(4, 8, 32, 32).contiguous(memory_format=torch.channels_last),),
]
info = [
'Tensor with small numel and small storage.',
'Tensor with large numel and small storage.',
'Tensor with small numel and large storage.',
'Large zero tensor.',
'Large channels-last ones tensor.',
'Special encoding of random tensor.',
]
torch.utils.bundled_inputs.augment_many_model_functions_with_bundled_inputs(
mm,
inputs={
mm.forward : samples,
mm.foo : samples
},
info={
mm.forward : info,
mm.foo : info
}
)
loaded = save_and_load(mm)
inflated = loaded.get_all_bundled_inputs()
# Make sure these functions are all consistent.
self.assertEqual(inflated, samples)
self.assertEqual(inflated, loaded.get_all_bundled_inputs_for_forward())
self.assertEqual(inflated, loaded.get_all_bundled_inputs_for_foo())
# Check running and size helpers
self.assertTrue(loaded(*inflated[0]) is inflated[0][0])
self.assertEqual(loaded.get_num_bundled_inputs(), len(samples))
# Check helper that work on all functions
all_info = loaded.get_bundled_inputs_functions_and_info()
self.assertEqual(set(all_info.keys()), set(['forward', 'foo']))
self.assertEqual(all_info['forward']['get_inputs_function_name'], ['get_all_bundled_inputs_for_forward'])
self.assertEqual(all_info['foo']['get_inputs_function_name'], ['get_all_bundled_inputs_for_foo'])
self.assertEqual(all_info['forward']['info'], info)
self.assertEqual(all_info['foo']['info'], info)
# example of how to turn the 'get_inputs_function_name' into the actual list of bundled inputs
for func_name in all_info.keys():
input_func_name = all_info[func_name]['get_inputs_function_name'][0]
func_to_run = getattr(loaded, input_func_name)
self.assertEqual(func_to_run(), samples)
def test_multiple_methods_with_inputs_both_defined_failure(self):
class MultipleMethodModel(torch.nn.Module):
def forward(self, arg):
return arg
@torch.jit.export
def foo(self, arg):
return arg
samples = [(torch.tensor([1]),)]
# inputs defined 2 ways so should fail
with self.assertRaises(Exception):
mm = torch.jit.script(MultipleMethodModel())
definition = textwrap.dedent("""
def _generate_bundled_inputs_for_forward(self):
return []
""")
mm.define(definition)
torch.utils.bundled_inputs.augment_many_model_functions_with_bundled_inputs(
mm,
inputs={
mm.forward : samples,
mm.foo : samples,
},
)
def test_multiple_methods_with_inputs_neither_defined_failure(self):
class MultipleMethodModel(torch.nn.Module):
def forward(self, arg):
return arg
@torch.jit.export
def foo(self, arg):
return arg
samples = [(torch.tensor([1]),)]
# inputs not defined so should fail
with self.assertRaises(Exception):
mm = torch.jit.script(MultipleMethodModel())
mm._generate_bundled_inputs_for_forward()
torch.utils.bundled_inputs.augment_many_model_functions_with_bundled_inputs(
mm,
inputs={
mm.forward : None,
mm.foo : samples,
},
)
def test_bad_inputs(self):
class SingleTensorModel(torch.nn.Module):
def forward(self, arg):
return arg
# Non list for input list
with self.assertRaises(TypeError):
m = torch.jit.script(SingleTensorModel())
torch.utils.bundled_inputs.augment_model_with_bundled_inputs(
m,
inputs="foo" # type: ignore[arg-type]
)
# List of non tuples. Most common error using the api.
with self.assertRaises(TypeError):
m = torch.jit.script(SingleTensorModel())
torch.utils.bundled_inputs.augment_model_with_bundled_inputs(
m,
inputs=[torch.ones(1, 2), ] # type: ignore[list-item]
)
def test_double_augment_fail(self):
class SingleTensorModel(torch.nn.Module):
def forward(self, arg):
return arg
m = torch.jit.script(SingleTensorModel())
torch.utils.bundled_inputs.augment_model_with_bundled_inputs(
m,
inputs=[(torch.ones(1),)]
)
with self.assertRaisesRegex(Exception, "Models can only be augmented with bundled inputs once."):
torch.utils.bundled_inputs.augment_model_with_bundled_inputs(
m,
inputs=[(torch.ones(1),)]
)
def test_double_augment_non_mutator(self):
class SingleTensorModel(torch.nn.Module):
def forward(self, arg):
return arg
m = torch.jit.script(SingleTensorModel())
bundled_model = torch.utils.bundled_inputs.bundle_inputs(
m,
inputs=[(torch.ones(1),)]
)
with self.assertRaises(AttributeError):
m.get_all_bundled_inputs()
self.assertEqual(bundled_model.get_all_bundled_inputs(), [(torch.ones(1),)])
self.assertEqual(bundled_model.forward(torch.ones(1)), torch.ones(1))
def test_double_augment_success(self):
class SingleTensorModel(torch.nn.Module):
def forward(self, arg):
return arg
m = torch.jit.script(SingleTensorModel())
bundled_model = torch.utils.bundled_inputs.bundle_inputs(
m,
inputs={m.forward : [(torch.ones(1),)]}
)
self.assertEqual(bundled_model.get_all_bundled_inputs(), [(torch.ones(1),)])
bundled_model2 = torch.utils.bundled_inputs.bundle_inputs(
bundled_model,
inputs=[(torch.ones(2),)]
)
self.assertEqual(bundled_model2.get_all_bundled_inputs(), [(torch.ones(2),)])
def test_dict_args(self):
class MyModel(torch.nn.Module):
def forward(
self,
arg1: Optional[Dict[str, torch.Tensor]],
arg2: Optional[List[torch.Tensor]],
arg3: torch.Tensor,
):
if arg1 is None:
return arg3
elif arg2 is None:
return arg1["a"] + arg1["b"]
else:
return arg1["a"] + arg1["b"] + arg2[0]
small_sample = dict(
a=torch.zeros([10, 20]),
b=torch.zeros([1, 1]),
c=torch.zeros([10, 20]),
)
small_list = [torch.zeros([10, 20])]
big_sample = dict(
a=torch.zeros([1 << 5, 1 << 8, 1 << 10]),
b=torch.zeros([1 << 5, 1 << 8, 1 << 10]),
c=torch.zeros([1 << 5, 1 << 8, 1 << 10]),
)
big_list = [torch.zeros([1 << 5, 1 << 8, 1 << 10])]
def condensed(t):
ret = torch.empty_like(t).flatten()[0].clone().expand(t.shape)
assert ret.storage().size() == 1
# ret.storage()[0] = 0
return ret
def bundle_optional_dict_of_randn(template):
return torch.utils.bundled_inputs.InflatableArg(
value=(
None
if template is None
else {k: condensed(v) for (k, v) in template.items()}
),
fmt="{}",
fmt_fn="""
def {}(self, value: Optional[Dict[str, Tensor]]):
if value is None:
return None
output = {{}}
for k, v in value.items():
output[k] = torch.randn_like(v)
return output
""",
)
def bundle_optional_list_of_randn(template):
return torch.utils.bundled_inputs.InflatableArg(
value=(None if template is None else [condensed(v) for v in template]),
fmt="{}",
fmt_fn="""
def {}(self, value: Optional[List[Tensor]]):
if value is None:
return None
output = []
for v in value:
output.append(torch.randn_like(v))
return output
""",
)
out : List[str] = []
sm = torch.jit.script(MyModel())
original_size = model_size(sm)
small_inputs = (
bundle_optional_dict_of_randn(small_sample),
bundle_optional_list_of_randn(small_list),
torch.zeros([3, 4]),
)
big_inputs = (
bundle_optional_dict_of_randn(big_sample),
bundle_optional_list_of_randn(big_list),
torch.zeros([1 << 5, 1 << 8, 1 << 10]),
)
torch.utils.bundled_inputs.augment_model_with_bundled_inputs(
sm,
[
big_inputs,
small_inputs,
],
_receive_inflate_expr=out,
)
augmented_size = model_size(sm)
# assert the size has not increased more than 8KB
self.assertLess(augmented_size, original_size + (1 << 13))
loaded = save_and_load(sm)
inflated = loaded.get_all_bundled_inputs()
self.assertEqual(len(inflated[0]), len(small_inputs))
methods, _ = torch.utils.bundled_inputs._get_bundled_inputs_attributes_and_methods(
loaded
)
# One Function (forward)
# two bundled inputs (big_inputs and small_inputs)
# two args which have InflatableArg with fmt_fn
# 1 * 2 * 2 = 4
self.assertEqual(
sum([method.startswith("_inflate_helper") for method in methods]), 4
)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_bundled_inputs.py |
# -*- coding: utf-8 -*-
# Owner(s): ["module: tests"]
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
from torch.utils.dlpack import from_dlpack, to_dlpack
from torch._six import inf, nan, string_classes
from itertools import product, combinations, permutations
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TestCase, TEST_WITH_ROCM, run_tests,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, slowTest,
TEST_WITH_CROSSREF, skipIfTorchDynamo,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta,
PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
expectedAlertNondeterministic, get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN)
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types,
)
# Protects against includes accidentally setting the default dtype
assert torch.get_default_dtype() is torch.float32
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
@contextlib.contextmanager
def torch_vital_set(value):
stash = None
if 'TORCH_VITAL' in os.environ:
stash = os.environ['TORCH_VITAL']
os.environ['TORCH_VITAL'] = value
try:
yield
finally:
if stash:
os.environ['TORCH_VITAL'] = stash
else:
del os.environ['TORCH_VITAL']
# Tests Vital Signs for Torch
# FIXME: document or deprecate whatever this is
class TestBasicVitalSigns(TestCase):
def test_basic_vitals(self):
with torch_vital_set(''):
self.assertFalse(torch.vitals_enabled())
with torch_vital_set('ON'):
self.assertTrue(torch.vitals_enabled())
def test_basic_vitals_read_write(self):
with torch_vital_set('ON'):
self.assertTrue(torch.vitals_enabled())
# This tests the code path of setting a vital
self.assertTrue(torch.set_vital('Dataloader', 'basic_unit_test', 'TEST_VALUE_STRING'))
self.assertIn('TEST_VALUE_STRING', torch.read_vitals())
self.assertIn('CUDA.used', torch.read_vitals())
def test_dataloader_vitals(self):
with torch_vital_set('ON'):
inps = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
tgts = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
dataset = torch.utils.data.TensorDataset(inps, tgts)
loader = torch.utils.data.DataLoader(dataset, batch_size=2)
self.assertIn('Dataloader.enabled\t\t True', torch.read_vitals())
# FIXME: document or deprecate whatever this is
class TestVitalSignsCuda(TestCase):
@onlyCUDA
def test_cuda_vitals_gpu_only(self, device):
with torch_vital_set('ON'):
self.assertIn('CUDA.used\t\t true', torch.read_vitals())
class TestTorchDeviceType(TestCase):
exact_dtype = True
# TODO: move all tensor creation to common ops
def _rand_shape(self, dim, min_size, max_size):
shape = []
for i in range(dim):
shape.append(random.randint(min_size, max_size))
return tuple(shape)
# Validates that mathematical constants are defined properly, as required by
# the Python Array API (https://data-apis.org/array-api/latest/API_specification/constants.html)
@onlyCPU
def test_constants(self, device):
self.assertIsInstance(torch.e, float)
self.assertEqual(torch.e, math.e, atol=0, rtol=0)
self.assertIsInstance(torch.pi, float)
self.assertEqual(torch.pi, math.pi, atol=0, rtol=0)
self.assertIsInstance(torch.nan, float)
self.assertEqual(torch.nan, math.nan, equal_nan=True)
self.assertIsInstance(torch.inf, float)
self.assertEqual(torch.inf, math.inf)
@onlyNativeDeviceTypes
@dtypes(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64,
torch.bool, torch.float32, torch.complex64, torch.float64,
torch.complex128)
def test_bytes_to_scalar(self, device, dtype):
def rand_byte():
if dtype == torch.bool:
return torch.randint(0, 2, ()).item()
else:
return torch.randint(0, 256, ()).item()
element_size = torch._utils._element_size(dtype)
for i in range(10):
bytes_list = [rand_byte() for _ in range(element_size)]
scalar = bytes_to_scalar(bytes_list, dtype, device)
self.assertEqual(scalar.storage().untyped().tolist(), bytes_list)
@dtypes(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64,
torch.bool, torch.float32, torch.complex64, torch.float64,
torch.complex128)
def test_storage(self, device, dtype):
v = make_tensor((3, 5), dtype=dtype, device=device, low=-9, high=9)
self.assertEqual(v.storage()[0], v[0][0])
self.assertEqual(v.storage()[14], v[2][4])
v_s = v.storage()
for el_num in range(v.numel()):
dim0 = el_num // v.size(1)
dim1 = el_num % v.size(1)
self.assertEqual(
v_s[el_num],
v[dim0][dim1])
v_s_byte = v.storage().untyped()
el_size = v.element_size()
for el_num in range(v.numel()):
start = el_num * el_size
end = start + el_size
dim0 = el_num // v.size(1)
dim1 = el_num % v.size(1)
self.assertEqual(
bytes_to_scalar(v_s_byte[start:end], dtype, device),
v[dim0][dim1])
@onlyNativeDeviceTypes
@dtypes(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64,
torch.bool, torch.float32, torch.complex64, torch.float64,
torch.complex128, torch.quint8, torch.qint8, torch.qint32,
torch.quint4x2)
def test_storage_setitem(self, device, dtype):
# Skip quantized dtypes for CUDA, since they're not supported
if torch.device(device).type == 'cuda':
if dtype in [torch.quint8, torch.qint8, torch.qint32, torch.quint4x2]:
return
storage_type_name = torch.storage._dtype_to_storage_type_map()[dtype]
if torch.device(device).type == 'cuda':
storage_type = eval('torch.cuda.' + storage_type_name)
else:
storage_type = eval('torch.' + storage_type_name)
N = 10
s = storage_type(N)
s[:] = 0
l = [0] * N
self.assertEqual(s, storage_type(l))
for i in range(N):
s[i] = i
l[i] = i
self.assertEqual(s, storage_type(l))
l[2:7] = [1] * 5
s[2:7] = 1
self.assertEqual(s, storage_type(l))
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_tensor_storage_type(self, device, dtype):
a = make_tensor((10,), dtype=dtype, device=device, low=-9, high=9)
module = torch.cuda if (torch.device(device).type == 'cuda') else torch
expected_storage_type = getattr(module, torch.storage._dtype_to_storage_type_map()[dtype])
self.assertEqual(a.storage_type(), expected_storage_type)
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_tensor_from_storage(self, device, dtype):
a = make_tensor((4, 5, 3), dtype=dtype, device=device, low=-9, high=9)
a_s = a.storage()
b = torch.tensor(a_s, device=device, dtype=dtype).reshape(a.size())
self.assertEqual(a, b)
c = torch.tensor(a_s.untyped(), device=device, dtype=dtype).reshape(a.size())
self.assertEqual(a, c)
for error_dtype in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16):
if error_dtype == dtype:
continue
with self.assertRaisesRegex(RuntimeError, r'Expected a Storage of type'):
error_storage = a.to(error_dtype).storage()
torch.tensor(error_storage, device=device, dtype=dtype)
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_set_storage(self, device, dtype):
a = make_tensor((4, 5, 3), dtype=dtype, device=device, low=-9, high=9)
a_s = a.storage()
b = torch.tensor([], device=device, dtype=dtype).set_(a_s).reshape(a.size())
self.assertEqual(a, b)
c = torch.tensor([], device=device, dtype=dtype).set_(a_s.untyped()).reshape(a.size())
self.assertEqual(a, c)
for error_dtype in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16):
if error_dtype == dtype:
continue
with self.assertRaisesRegex(RuntimeError, r'Expected a Storage of type'):
error_storage = a.to(error_dtype).storage()
b = torch.tensor([], device=device, dtype=dtype).set_(error_storage)
def _check_storage_meta(self, s, s_check):
self.assertTrue(
isinstance(s, (torch.UntypedStorage, torch.TypedStorage)) and
isinstance(s_check, type(s)),
(
's and s_check must both be one of UntypedStorage or '
'TypedStorage, but got'
f' {type(s).__name__} and {type(s_check).__name__}'))
self.assertEqual(s.device.type, 'meta')
self.assertEqual(s.nbytes(), s_check.nbytes())
self.assertEqual(s.size(), s_check.size())
self.assertEqual(s.data_ptr(), 0)
with self.assertRaisesRegex(NotImplementedError, r'Not available'):
s[0]
if isinstance(s, torch.TypedStorage):
self.assertEqual(s.dtype, s_check.dtype)
self._check_storage_meta(s.untyped(), s_check.untyped())
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_typed_storage_meta(self, device, dtype):
args_list = [
[],
[0],
[100],
[[1, 2, 3, 4, 5, 6]],
]
for args in args_list:
s_check = torch.TypedStorage(*args, dtype=dtype, device=device)
s = torch.TypedStorage(*args, dtype=dtype, device='meta')
self._check_storage_meta(s, s_check)
@onlyNativeDeviceTypes
def test_untyped_storage_meta(self, device):
args_list = [
[],
[0],
[100],
[[1, 2, 3, 4, 5, 6]],
]
for args in args_list:
s_check = torch.UntypedStorage(*args, device=device)
s = torch.UntypedStorage(*args, device='meta')
self._check_storage_meta(s, s_check)
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_storage_meta_from_tensor(self, device, dtype):
t_check = make_tensor((4, 5, 3), dtype=dtype, device=device, low=-9, high=9)
t = t_check.to('meta')
s_check = t_check.storage()
s = t.storage()
self._check_storage_meta(s, s_check)
@onlyCPU
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_storage_meta_errors(self, device, dtype):
s0 = torch.TypedStorage([1, 2, 3, 4], device='meta', dtype=dtype)
with self.assertRaisesRegex(NotImplementedError, r'Cannot copy out'):
s0.cpu()
with self.assertRaisesRegex(RuntimeError, r'only available on CPU'):
s0._share_fd_cpu_()
with self.assertRaisesRegex(RuntimeError, r'only available on CPU'):
s0._share_filename_cpu_()
if torch.cuda.is_available():
with self.assertRaisesRegex(NotImplementedError, r'Cannot copy out'):
s0.cuda()
with self.assertRaisesRegex(RuntimeError, r'only available on CUDA'):
s0._share_cuda_()
with self.assertRaisesRegex(NotImplementedError, r'Cannot copy out'):
s0.pin_memory()
with self.assertRaisesRegex(RuntimeError, r'got unexpected device type'):
s0.resize_(10)
with self.assertRaisesRegex(RuntimeError, r'only available on CPU'):
s0.share_memory_()
with self.assertRaisesRegex(NotImplementedError, r'Not available'):
s0.tolist()
with tempfile.NamedTemporaryFile() as f:
with self.assertRaisesRegex(RuntimeError, r'Device not recognized'):
s0._write_file(f, True, True, s0.element_size())
for device in ['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']:
s1 = torch.TypedStorage([1, 2, 3, 4], device=device, dtype=dtype)
with self.assertRaisesRegex(NotImplementedError, r'Cannot copy out'):
s1.copy_(s0)
@onlyCUDA
def test_module_share_memory(self):
# Test fix for issue #80733
# See https://github.com/pytorch/pytorch/issues/80733
model = torch.nn.Linear(3, 1)
model_cuda = model.to('cuda')
model.share_memory()
@dtypes(torch.float32, torch.complex64)
def test_deepcopy(self, device, dtype):
from copy import deepcopy
a = torch.randn(5, 5, dtype=dtype, device=device)
b = torch.randn(5, 5, dtype=dtype, device=device)
c = a.view(25)
q = [a, [a.storage(), b.storage()], b, c]
w = deepcopy(q)
self.assertEqual(w[0], q[0], atol=0, rtol=0)
self.assertEqual(w[1][0], q[1][0], atol=0, rtol=0)
self.assertEqual(w[1][1], q[1][1], atol=0, rtol=0)
self.assertEqual(w[1], q[1], atol=0, rtol=0)
self.assertEqual(w[2], q[2], atol=0, rtol=0)
# Check that deepcopy preserves sharing
w[0].add_(1)
for i in range(a.numel()):
self.assertEqual(w[1][0][i], q[1][0][i] + 1)
self.assertEqual(w[3], c + 1)
w[2].sub_(1)
for i in range(a.numel()):
self.assertEqual(w[1][1][i], q[1][1][i] - 1)
# Check that deepcopy preserves attributes
a.foo = 3
self.assertEqual(deepcopy(a).foo, 3)
@dtypes(torch.float32, torch.complex64)
def test_deepcopy_scalar(self, device, dtype):
from copy import deepcopy
a = torch.tensor(5, dtype=dtype, device=device)
self.assertEqual(a.size(), deepcopy(a).size())
self.assertEqual(a, deepcopy(a))
def check_internal_mem_overlap(self, inplace_op, num_inputs,
dtype, device,
expected_failure=False):
if isinstance(inplace_op, str):
inplace_op = getattr(torch.Tensor, inplace_op)
input = torch.randn(1, dtype=dtype, device=device).expand(3, 3)
inputs = [input] + [torch.randn_like(input)
for i in range(num_inputs - 1)]
if not expected_failure:
with self.assertRaisesRegex(RuntimeError, 'single memory location'):
inplace_op(*inputs)
else:
with self.assertRaises(AssertionError):
with self.assertRaisesRegex(RuntimeError, 'single memory location'):
inplace_op(*inputs)
def unary_check_input_output_mem_overlap(self, data, sz, op,
expected_failure=False):
def _test(op, output, input):
output_exp = torch.empty_like(output)
op(input, out=output_exp)
self.assertEqual(op(input, out=output), output_exp, msg=op.__name__)
# output is identical to input:
_test(op, output=data[0:sz], input=data[0:sz])
# output and input are independent:
_test(op, output=data[0:sz], input=data[sz:2 * sz])
# output partially overlaps with input:
if not expected_failure:
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
_test(op, data[0:sz], data[1:sz + 1])
else:
with self.assertRaises(AssertionError):
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
_test(op, data[0:sz], data[1:sz + 1])
# output is transpose of input:
length = int(math.sqrt(sz))
input = data[:length**2].view([length, length])
out = input.t()
if not expected_failure:
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
_test(op, out, input)
else:
with self.assertRaises(AssertionError):
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
_test(op, out, input)
def ternary_check_input_output_mem_overlap(self, op, device,
expected_failure=False):
sz = 9
data = torch.randn(2 * sz, device=device)
other1 = torch.randn(sz, device=device)
other2 = torch.randn(sz, device=device)
self.unary_check_input_output_mem_overlap(
data, sz, lambda input, out:
op(input, other1.view(input.shape), other2.view(input.shape), out=out),
expected_failure=expected_failure)
self.unary_check_input_output_mem_overlap(
data, sz, lambda input, out:
op(other1.view(input.shape), input, other2.view(input.shape), out=out),
expected_failure=expected_failure)
self.unary_check_input_output_mem_overlap(
data, sz, lambda input, out:
op(other1.view(input.shape), other2.view(input.shape), input, out=out),
expected_failure=expected_failure)
def _select_broadcastable_dims(self, dims_full=None):
# select full dimensionality
if dims_full is None:
dims_full = []
ndims = random.randint(1, 4)
dims_full = [random.randint(1, 8) for _ in range(ndims)]
else:
ndims = len(dims_full)
# select actual dimensions for ops:
# larger: full ndims, individual sizes may be reduced
# smaller: possibly reduced ndims, sizes may be reduced
smaller_ndims = random.randint(1, ndims)
dims_small = []
dims_large = []
for i in range(ndims - 1, -1, -1):
j = random.randint(1, 3)
if j == 1: # no reduced singleton dimension
ds = dims_full[i]
dl = dims_full[i]
elif j == 2: # larger may have reduced singleton dimension
ds = dims_full[i]
dl = 1 if len(dims_small) < smaller_ndims else dims_full[i]
elif j == 3: # smaller may have reduced singleton dimension
ds = 1
dl = dims_full[i]
dims_large = [dl] + dims_large
if len(dims_small) < smaller_ndims:
dims_small = [ds] + dims_small
return (dims_small, dims_large, dims_full)
# collected tests of ops that used scalar_check in Declarations.cwrap for
# correctness
def test_scalar_check(self, device):
zero_d = torch.randn((), device=device)
one_d = torch.randn((1,), device=device)
# remainder
self.assertEqual((), torch.remainder(zero_d, zero_d).shape)
self.assertEqual((), torch.remainder(zero_d, 2).shape)
self.assertEqual((1,), torch.remainder(zero_d, one_d).shape)
self.assertEqual((1,), torch.remainder(one_d, zero_d).shape)
# fmod
self.assertEqual((), torch.fmod(zero_d, zero_d).shape)
self.assertEqual((), torch.fmod(zero_d, 2).shape)
self.assertEqual((1,), torch.fmod(zero_d, one_d).shape)
self.assertEqual((1,), torch.fmod(one_d, zero_d).shape)
# exp, cos, cosh, tan, atan, tanh, erf, erfc, reciprocal
self.assertEqual((), torch.exp(zero_d).shape)
self.assertEqual((), torch.cos(zero_d).shape)
self.assertEqual((), torch.cosh(zero_d).shape)
self.assertEqual((), torch.tan(zero_d).shape)
self.assertEqual((), torch.atan(zero_d).shape)
self.assertEqual((), torch.acosh(zero_d).shape)
self.assertEqual((), torch.asinh(zero_d).shape)
self.assertEqual((), torch.atanh(zero_d).shape)
self.assertEqual((), torch.tanh(zero_d).shape)
self.assertEqual((), torch.erf(zero_d).shape)
self.assertEqual((), torch.erfc(zero_d).shape)
self.assertEqual((), torch.reciprocal(zero_d).shape)
self.assertEqual((1,), torch.exp(one_d).shape)
self.assertEqual((1,), torch.cos(one_d).shape)
self.assertEqual((1,), torch.cosh(one_d).shape)
self.assertEqual((1,), torch.tan(one_d).shape)
self.assertEqual((1,), torch.atan(one_d).shape)
self.assertEqual((1,), torch.acosh(one_d).shape)
self.assertEqual((1,), torch.asinh(one_d).shape)
self.assertEqual((1,), torch.atanh(one_d).shape)
self.assertEqual((1,), torch.tanh(one_d).shape)
self.assertEqual((1,), torch.erf(one_d).shape)
self.assertEqual((1,), torch.erfc(one_d).shape)
self.assertEqual((1,), torch.reciprocal(one_d).shape)
# clamp
self.assertEqual((), torch.clamp(zero_d, min=0, max=1).shape)
self.assertEqual((), torch.clamp(zero_d, min=0).shape)
self.assertEqual((), torch.clamp(zero_d, max=1).shape)
self.assertEqual((1,), torch.clamp(one_d, min=0, max=1).shape)
self.assertEqual((1,), torch.clamp(one_d, min=0).shape)
self.assertEqual((1,), torch.clamp(one_d, max=1).shape)
# cumsum, cumprod, cummax, cummin
self.assertEqual((), torch.logcumsumexp(zero_d, 0).shape)
self.assertEqual((), torch.cumsum(zero_d, 0).shape)
self.assertEqual((), torch.cumprod(zero_d, 0).shape)
self.assertEqual((), torch.cummax(zero_d, 0)[0].shape)
self.assertEqual((), torch.cummin(zero_d, 0)[0].shape)
# sort, topk
self.assertEqual([(), ()], [x.shape for x in torch.sort(zero_d, 0, False)])
self.assertEqual([(), ()], [x.shape for x in torch.sort(zero_d, 0, True)])
self.assertEqual([(), ()], [x.shape for x in torch.topk(zero_d, 1, 0, False)])
self.assertEqual([(), ()], [x.shape for x in torch.topk(zero_d, 1, 0, True)])
# max, min
self.assertEqual((), torch.max(zero_d, zero_d).shape)
self.assertEqual((1,), torch.max(one_d, zero_d).shape)
self.assertEqual((1,), torch.max(zero_d, one_d).shape)
self.assertEqual((), torch.min(zero_d, zero_d).shape)
self.assertEqual((1,), torch.min(one_d, zero_d).shape)
self.assertEqual((1,), torch.min(zero_d, one_d).shape)
zero_d_int = torch.tensor(1, device=device)
one_d_int = torch.tensor([1], device=device)
# lshift, rshift
self.assertEqual((), (zero_d_int >> zero_d_int).shape)
self.assertEqual((), (zero_d_int >> 1).shape)
self.assertEqual((1,), (one_d_int >> zero_d_int).shape)
self.assertEqual((1,), (zero_d_int >> one_d_int).shape)
self.assertEqual((1,), (one_d_int >> 1).shape)
self.assertEqual((), (zero_d_int << zero_d_int).shape)
self.assertEqual((), (zero_d_int << 1).shape)
self.assertEqual((1,), (one_d_int << zero_d_int).shape)
self.assertEqual((1,), (zero_d_int << one_d_int).shape)
self.assertEqual((1,), (one_d_int << 1).shape)
# or
self.assertEqual((), (zero_d_int | zero_d_int).shape)
self.assertEqual((), (zero_d_int | 1).shape)
self.assertEqual((1,), (one_d_int | zero_d_int).shape)
self.assertEqual((1,), (zero_d_int | one_d_int).shape)
self.assertEqual((1,), (one_d_int | 1).shape)
# and
self.assertEqual((), (zero_d_int & zero_d_int).shape)
self.assertEqual((), (zero_d_int & 1).shape)
self.assertEqual((1,), (one_d_int & zero_d_int).shape)
self.assertEqual((1,), (zero_d_int & one_d_int).shape)
self.assertEqual((1,), (one_d_int & 1).shape)
# clone
self.assertEqual((), zero_d.clone().shape)
zero_d_bool = torch.tensor(True, device=device)
one_d_bool = torch.tensor([True], device=device)
# masked_select
self.assertEqual((1,), torch.masked_select(zero_d_bool, zero_d_bool).shape)
self.assertEqual((1,), torch.masked_select(zero_d_bool, one_d_bool).shape)
self.assertEqual((1,), torch.masked_select(one_d_bool, zero_d_bool).shape)
zero_d_uint8 = torch.tensor(1, dtype=torch.uint8, device=device)
one_d_uint8 = torch.tensor([1], dtype=torch.uint8, device=device)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertEqual((1,), torch.masked_select(zero_d_uint8, zero_d_uint8).shape)
self.assertEqual((1,), torch.masked_select(zero_d_uint8, one_d_uint8).shape)
self.assertEqual((1,), torch.masked_select(one_d_uint8, zero_d_uint8).shape)
# mode
self.assertEqual([(), ()], [x.shape for x in torch.mode(zero_d, dim=0, keepdim=True)])
self.assertEqual([(), ()], [x.shape for x in torch.mode(zero_d, dim=0, keepdim=False)])
self.assertEqual([(1,), (1,)], [x.shape for x in torch.mode(one_d, dim=0, keepdim=True)])
self.assertEqual([(), ()], [x.shape for x in torch.mode(one_d, dim=0, keepdim=False)])
# max
self.assertEqual([(), ()], [x.shape for x in torch.max(zero_d, dim=0, keepdim=True)])
self.assertEqual([(), ()], [x.shape for x in torch.max(zero_d, dim=0, keepdim=False)])
self.assertEqual([(1,), (1,)], [x.shape for x in torch.max(one_d, dim=0, keepdim=True)])
self.assertEqual([(), ()], [x.shape for x in torch.max(one_d, dim=0, keepdim=False)])
# amax
self.assertEqual((), torch.amax(zero_d, dim=0, keepdim=True).shape)
self.assertEqual((), torch.amax(zero_d, dim=0, keepdim=False).shape)
self.assertEqual((1,), torch.amax(one_d, dim=0, keepdim=True).shape)
self.assertEqual((), torch.amax(one_d, dim=0, keepdim=False).shape)
# min
self.assertEqual([(), ()], [x.shape for x in torch.min(zero_d, dim=0, keepdim=True)])
self.assertEqual([(), ()], [x.shape for x in torch.min(zero_d, dim=0, keepdim=False)])
self.assertEqual([(1,), (1,)], [x.shape for x in torch.min(one_d, dim=0, keepdim=True)])
self.assertEqual([(), ()], [x.shape for x in torch.min(one_d, dim=0, keepdim=False)])
# amin
self.assertEqual((), torch.amin(zero_d, dim=0, keepdim=True).shape)
self.assertEqual((), torch.amin(zero_d, dim=0, keepdim=False).shape)
self.assertEqual((1,), torch.amin(one_d, dim=0, keepdim=True).shape)
self.assertEqual((), torch.amin(one_d, dim=0, keepdim=False).shape)
# set_
zero_d_clone = zero_d.clone()
one_d_clone = one_d.clone()
self.assertEqual((), zero_d_clone.set_(one_d.storage(), 0, (), ()).shape)
self.assertEqual((1,), zero_d_clone.set_(one_d.storage(), 0, (1,), (1,)).shape)
self.assertEqual((), one_d_clone.set_(one_d.storage(), 0, (), ()).shape)
self.assertEqual((1,), one_d_clone.set_(one_d.storage(), 0, (1,), (1,)).shape)
self.assertEqual((), zero_d.clone().set_(zero_d).shape)
self.assertEqual((), one_d.clone().set_(zero_d).shape)
self.assertEqual((1,), zero_d.clone().set_(one_d).shape)
self.assertEqual((1,), one_d.clone().set_(one_d).shape)
# take
self.assertEqual((), torch.randn((2, 3), device=device).take(zero_d_int).shape)
self.assertEqual((1,), torch.randn((2, 3), device=device).take(one_d_int).shape)
# gather
self.assertEqual((), torch.gather(zero_d, 0, torch.zeros((), dtype=torch.int64, device=device)).shape)
self.assertEqual((1,), torch.gather(zero_d, 0, torch.zeros((1,), dtype=torch.int64, device=device)).shape)
self.assertEqual((), torch.gather(one_d, 0, torch.zeros((), dtype=torch.int64, device=device)).shape)
self.assertEqual((1,), torch.gather(one_d, 0, torch.zeros((1,), dtype=torch.int64, device=device)).shape)
# normal
# std must be >= 0
zero_d_ge_0 = torch.rand((), device=device)
# documentation says out shape matches shape of mean
self.assertEqual((), torch.normal(zero_d, zero_d_ge_0).shape)
self.assertEqual((1,), torch.normal(one_d, zero_d_ge_0).shape)
self.assertEqual((), torch.normal(1, zero_d_ge_0).shape)
self.assertEqual((), torch.normal(zero_d, 1).shape)
self.assertEqual((1,), torch.normal(one_d, 1).shape)
# TODO: this behavior differs on CPU and GPU, see https://github.com/pytorch/pytorch/issues/30480.
# self.assertEqual((), torch.normal(zero_d, one_d).shape)
# self.assertEqual((), torch.normal(1, one_d).shape)
# convolutions. Yes, we are testing nn.functional here; seems justified
# given its similar to the other tests
w = torch.randn(2, 1, 3, 3, device=device).div_(2).requires_grad_()
self.assertRaises(RuntimeError, lambda: torch.nn.functional.conv2d(zero_d, w, groups=1))
self.assertRaises(RuntimeError, lambda: torch.nn.functional.conv2d(zero_d, w, groups=2))
# nll_loss -- verify input can't be 0-dimensional.
self.assertRaises(ValueError, lambda: torch.nn.functional.nll_loss(zero_d, zero_d, reduction='none'))
self.assertRaises(ValueError, lambda: torch.nn.functional.nll_loss(zero_d, one_d, reduction='none'))
# verify output is 0-dimensional when reduction != 'none'
for (input, target) in ((torch.randn(1, 1, device=device), torch.tensor([0], device=device)),
(torch.randn(1, 1, 1, 1, device=device), torch.tensor([[[0]]], device=device))):
self.assertEqual((), torch.nn.functional.nll_loss(input, target, reduction='mean').shape)
self.assertEqual((), torch.nn.functional.nll_loss(input, target, reduction='sum').shape)
# multilabel_margin_loss
for input in (zero_d, one_d, torch.randn(1, 1, device=device)):
for target in (torch.tensor(0, device=device), torch.tensor([0], device=device), torch.tensor([[0]], device=device)):
if (input.dim() <= 1 and target.dim() <= 1) or (input.dim() == 2 and target.dim() == 2):
output_shape = (target.shape[0],) if target.dim() == 2 else ()
self.assertEqual(output_shape,
torch.nn.functional.multilabel_margin_loss(input, target, reduction='none').shape)
self.assertEqual((), torch.nn.functional.multilabel_margin_loss(input, target, reduction='mean').shape)
self.assertEqual((), torch.nn.functional.multilabel_margin_loss(input, target, reduction='sum').shape)
else:
self.assertRaises(RuntimeError,
lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='none'))
self.assertRaises(RuntimeError,
lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='mean'))
self.assertRaises(RuntimeError,
lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='sum'))
# multi_margin_loss
for input in (zero_d, one_d, torch.randn(1, 1, device=device)):
for target in (torch.tensor(0, device=device), torch.tensor([0], device=device)):
self.assertEqual(target.shape, torch.nn.functional.multi_margin_loss(input, target, reduction='none').shape)
self.assertEqual((), torch.nn.functional.multi_margin_loss(input, target, reduction='mean').shape)
self.assertEqual((), torch.nn.functional.multi_margin_loss(input, target, reduction='sum').shape)
# Uses mismatched arange out size to trigger a warning
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
@unittest.skipIf(TEST_WITH_CROSSREF, "crossref perturbs line numbering")
def test_cpp_warnings_have_python_context(self, device):
# Creates long string in advance to avoid a too-long Python line
s = ".+Triggered internally at.+RangeFactories.+"
def cpp_warn_fn():
out = torch.empty((5,))
torch.arange(0, 3, out=out)
return out
# Checks eager-mode cpp warning
with warnings.catch_warnings(record=True) as w:
cpp_warn_fn()
frameinfo = inspect.getframeinfo(inspect.currentframe())
warning = w[0]
# Checks for cpp context in the warning message
escaped_warning_message = str(warning.message).encode('unicode_escape')
self.assertTrue(re.search(s, repr(escaped_warning_message), re.IGNORECASE) is not None)
# Checks the Python features of the warning
# Note: the eager mode warning refers to the line in the function
# that throws the warning.
self.assertEqual(frameinfo.lineno - 6, warning.lineno)
self.assertEqual(len(w), 1)
# Checks jitted cpp warning
with warnings.catch_warnings(record=True) as w:
scripted_cpp_warn_fn = torch.jit.script(cpp_warn_fn)
scripted_cpp_warn_fn()
warning = w[0]
# Checks for cpp context in the warning message
escaped_warning_message = str(warning.message).encode('unicode_escape')
self.assertTrue(re.search(s, repr(escaped_warning_message), re.IGNORECASE) is not None)
# Checks the Python features of the warning
# Note: the jitted warning's lineno refers to the call to the jitted
# function, which in our test suite has a layer of indirection
# that makes checking the Python lineno fragile
self.assertEqual(len(w), 1)
# Checks jitted Python warning
def warn_fn():
warnings.warn("Warning!")
# The jit mimics an eager-mode Python warning in this case
with warnings.catch_warnings(record=True) as w:
scripted_warn_fn = torch.jit.script(warn_fn)
scripted_warn_fn()
frameinfo = inspect.getframeinfo(inspect.currentframe())
warning = w[0]
self.assertTrue(re.search('Warning!', str(warning.message)) is not None)
# Checks the Python features of the warning
self.assertEqual(frameinfo.lineno - 6, warning.lineno)
self.assertEqual(len(w), 1)
# FIXME: move to test_testing
@onlyCPU
def test_warn_always_caught(self, device):
# Check that we can catch a TORCH_WARN_ONCE warning twice
# since assertWarnsOnceRegex uses set_warn_always(True) which changes
# TORCH_WARN_ONCE to TORCH_WARN
a = np.arange(10)
a.flags.writeable = False
with self.assertWarnsOnceRegex(UserWarning, '.*non-writable.*'):
torch.from_numpy(a)
# OK, got it once, now try again
with self.assertWarnsOnceRegex(UserWarning, '.*non-writable.*'):
torch.from_numpy(a)
# Make sure emitting two warnings will pass the assertWarnsOnceRegex
# context manager
with self.assertWarnsOnceRegex(UserWarning, '.*non-writable.*'):
torch.from_numpy(a)
torch.from_numpy(a)
@onlyNativeDeviceTypes
def test_complex_half_experimental_warning(self, device):
msg = 'ComplexHalf support is experimental'
with self.assertWarnsOnceRegex(UserWarning, msg):
t = torch.randn(3, dtype=torch.chalf, device=device)
with self.assertWarnsOnceRegex(UserWarning, msg):
torch.rand(3, dtype=torch.chalf, device=device)
with self.assertWarnsOnceRegex(UserWarning, msg):
torch.empty(3, dtype=torch.chalf, device=device)
with self.assertWarnsOnceRegex(UserWarning, msg):
torch.ones(3, dtype=torch.chalf, device=device)
with self.assertWarnsOnceRegex(UserWarning, msg):
torch.zeros(3, dtype=torch.chalf, device=device)
with self.assertWarnsOnceRegex(UserWarning, msg):
torch.randn_like(t)
with self.assertWarnsOnceRegex(UserWarning, msg):
torch.rand_like(t)
with self.assertWarnsOnceRegex(UserWarning, msg):
torch.empty_like(t)
with self.assertWarnsOnceRegex(UserWarning, msg):
torch.ones_like(t)
with self.assertWarnsOnceRegex(UserWarning, msg):
torch.zeros_like(t)
with self.assertWarnsOnceRegex(UserWarning, msg):
# t + 1 allocates a new tensor for result using empty
t + 1
# TODO: this test should be in test_nn.py
def test_conv_transposed_backward_agnostic_to_memory_format(self, device):
in_channels = 64
out_channels = 128
scale_factor = 8
batch_size = 8
length = 16
conv = torch.nn.ConvTranspose1d(
in_channels, out_channels, kernel_size=scale_factor * 2, stride=scale_factor).to(device)
layer_norm = torch.nn.LayerNorm(out_channels).to(device)
input_ = torch.randn(batch_size, in_channels, length).to(device).contiguous()
input_ = conv(input_).contiguous()
input_ = layer_norm(input_.transpose(1, 2).contiguous()).contiguous()
input_.sum().backward()
# 3d
conv = torch.nn.ConvTranspose3d(3, 3, kernel_size=3).to(device)
input = torch.randn(batch_size, 3, length, length, length, device=device)
out = conv(input)
out.backward(torch.ones_like(out).transpose(-2, -1))
# TODO: this test should be in test_nn.py
@onlyCUDA
@largeTensorTest('12GB')
def test_conv_transposed_large(self, device):
# ConvTranspose3d works for large input tensors (gh-32866)
in_channels = 64
out_channels = 128
kernel_size = 5
conv = torch.nn.ConvTranspose3d(
in_channels, out_channels, kernel_size=kernel_size,
stride=2, padding=2, output_padding=1).to(device)
x = torch.rand([1, 64, 8, 128, 172]).to(device)
y = conv(x)
def test_is_set_to(self, device):
t1 = torch.empty(3, 4, 9, 10, device=device)
t2 = torch.empty(3, 4, 9, 10, device=device)
t3 = torch.tensor([], device=device).set_(t1)
t4 = t3.clone().resize_(12, 90)
self.assertFalse(t1.is_set_to(t2))
self.assertTrue(t1.is_set_to(t3))
self.assertTrue(t3.is_set_to(t1), "is_set_to should be symmetric")
self.assertFalse(t1.is_set_to(t4))
self.assertFalse(torch.tensor([]).is_set_to(torch.tensor([])),
"Tensors with no storages should not appear to be set "
"to each other")
t1 = torch.tensor([True, True], dtype=torch.bool, device=device)
t2 = torch.tensor([0], dtype=torch.bool, device=device).set_(t1)
self.assertTrue(t1.is_set_to(t2))
# test that sizes must match
t1 = torch.empty([2, 3, 4], device=device)
t2 = t1.view(4, 3, 2)
self.assertFalse(t1.is_set_to(t2))
self.assertFalse(t2.is_set_to(t1))
# test that legacy empty size behavior used to be respected (i.e. all
# empty tensors were logically collapsed to size [0]).
t1 = torch.empty([2, 5, 0], device=device)
t2 = t1.view([0])
self.assertFalse(t1.is_set_to(t2))
self.assertFalse(t2.is_set_to(t1))
# See https://github.com/pytorch/pytorch/issues/72650
@skipIfMps
@skipMeta
@parametrize(
"fn",
[
"dist", "atan2", "pow", "lerp", "add", "sub", "mul", "div", "fmod", "remainder", "eq", "ge", "gt", "le",
"lt", "max", "min", "ne", "addcdiv", "addcmul", "masked_scatter", "masked_select", "masked_fill", "map",
"map2", "copy",
],
)
def test_broadcast(self, fn, device):
# functions with three tensor arguments
fns_3_args = {"map2"}
fns_value_kwarg = {"addcdiv", "addcmul"}
(dims_small, dims_large, dims_full) = self._select_broadcastable_dims()
full1d = torch.randn(*dims_full, device=device).flatten().float()
small = torch.randn(*dims_small, device=device).float()
large = torch.randn(*dims_large, device=device).float()
small_expanded = small.expand(*dims_full)
large_expanded = large.expand(*dims_full)
small2 = None
small2_expanded = None
if fn in fns_3_args or fn in fns_value_kwarg:
# create another smaller tensor
(dims_small2, _, _) = self._select_broadcastable_dims(dims_full)
small2 = torch.randn(*dims_small2, device=device).float()
small2_expanded = small2.expand(*dims_full)
if small.is_cuda and fn in ['map', 'map2']:
# map and map2 are not implementd on CUDA tensors
return
if hasattr(large_expanded, fn):
# run through tensor versions of functions
# and verify fully expanded inputs give same results
expanded = {large: large_expanded, small: small_expanded, small2: small2_expanded}
def tensorfn(myfn, t1, t2):
if fn == "lerp":
return myfn(t1, 0.5)
elif fn == "masked_select":
return myfn(t1 < 0)
elif fn == "masked_scatter":
return myfn(t1 < 0.5, full1d)
elif fn == "masked_fill":
return myfn(t1 < 0.5, 1.0)
elif fn in fns_3_args:
return myfn(1, t1, t2)
elif fn in fns_value_kwarg:
return myfn(t1, t2, value=1)
else:
return myfn(t1)
# test various orders
for first, second, third in [(large, small, small2), (small, large, small2),
(small2, small, large), (small2, large, small)]:
if first is None:
break # ignore last iter when small2 is None
method_expanded = getattr(expanded[first], fn)
method = getattr(first, fn)
r1 = tensorfn(method_expanded, expanded[second], expanded[third])
r2 = tensorfn(method, second, third)
self.assertEqual(r1, r2)
# now for torch. versions of functions
if hasattr(torch, fn):
fntorch = getattr(torch, fn)
expanded = {large: large_expanded, small: small_expanded, small2: small2_expanded}
def torchfn(t1, t2, t3):
if fn == "lerp":
return fntorch(t1, t2, 0.5)
elif fn == "masked_select":
return fntorch(t1, t2 < 0)
elif fn == "masked_scatter":
return fntorch(t1, t2 < 0.5, full1d)
elif fn == "masked_fill":
return fntorch(t1, t2 < 0.5, 1.0)
elif fn in fns_3_args:
return fntorch(t1, 1.0, t2, t3)
elif fn in fns_value_kwarg:
return fntorch(t1, t2, t3, value=1.0)
else:
return fntorch(t1, t2)
# test various orders
for first, second, third in [(large, small, small2), (small, large, small2),
(small2, small, large), (small2, large, small)]:
if first is None:
break # ignore last iter when small2 is None
r1 = torchfn(expanded[first], expanded[second], expanded[third])
r2 = torchfn(first, second, third)
self.assertEqual(r1, r2)
# now for in place functions
# in-place tensor is not broadcastable; test only guaranteed
# to work by broadcasting other argument(s)
if not hasattr(large_expanded, fn + "_"):
return
# need to clone largeExpanded so we can reuse, since functions are in-place
large_expanded_clone = large_expanded.clone()
def tensorfn_inplace(t0, t1, t2=None):
t0_fn = getattr(t0, fn + "_")
if fn == "lerp":
return t0_fn(t1, 0.5)
elif fn == "masked_scatter":
return t0_fn(t1 < 0.5, full1d)
elif fn == "masked_fill":
return t0_fn(t1 < 0.5, 1.0)
elif fn == "map":
return t0_fn(t1, lambda x, y: x + y)
elif fn == "map2":
return t0_fn(t1, t2, lambda x, y, z: x + y + z)
elif fn in fns_3_args:
return t0_fn(1.0, t1, t2)
elif fn in fns_value_kwarg:
return t0_fn(t1, t2, value=1.0)
else:
return t0_fn(t1)
# in-place pointwise operations don't actually work if the in-place
# tensor is 0-strided (numpy has the same issue)
if (0 not in large_expanded.stride() and 0 not in large_expanded_clone.stride()):
r1 = tensorfn_inplace(large_expanded, small_expanded, small2_expanded)
r2 = tensorfn_inplace(large_expanded_clone, small, small2)
self.assertEqual(r1, r2)
def broadcastable(t0, t1, t2=None):
try:
t1.expand_as(t0)
if t2 is not None:
t2.expand_as(t0)
except RuntimeError:
return False
return True
def _test_in_place_broadcastable(t0, t1, t2=None):
if not broadcastable(t0, t1, t2):
same_size = t0.numel() == t1.numel() and (t0.numel() == t2.numel() if t2 is not None else True)
if not same_size:
self.assertRaises(RuntimeError, lambda: tensorfn_inplace(t0, t1, t2))
else:
tensorfn_inplace(t0, t1, t2)
if fn not in fns_3_args and fn not in fns_value_kwarg:
_test_in_place_broadcastable(small, large_expanded)
_test_in_place_broadcastable(small, large)
else:
_test_in_place_broadcastable(small2, small_expanded, large_expanded)
_test_in_place_broadcastable(small2, small, large)
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
@onlyCUDA
@wrapDeterministicFlagAPITest
def test_cublas_config_nondeterministic_alert(self, device):
test_cases = [
# (function, (tensor sizes))
('mm', ((2, 2), (2, 2),)),
('mv', ((2, 2), (2,),)),
('bmm', ((1, 2, 2), (1, 2, 2),))]
test_configs = [
# (CuBLAS workspace config, is deterministic)
('garbage', False),
(None, False),
(':4096:8', True),
(':16:8', True)]
cublas_var_name = 'CUBLAS_WORKSPACE_CONFIG'
is_cuda10_2_or_higher = (
(torch.version.cuda is not None)
and ([int(x) for x in torch.version.cuda.split(".")] >= [10, 2]))
def test_case_info(fn_name, config):
return f'function "{fn_name}" with config "{"" if config is None else config}"'
# Create processes to test each combination of test cases and config settings
processes = []
for fn_name, arg_sizes in test_cases:
for config, is_config_deterministic in test_configs:
env = os.environ.copy()
if config is None:
if env.get(cublas_var_name) is not None:
del env[cublas_var_name]
else:
env[cublas_var_name] = config
should_throw_error = is_cuda10_2_or_higher and not is_config_deterministic
script = f"""
import torch
torch.use_deterministic_algorithms(True)
fn = torch.{fn_name}
arg_sizes = {arg_sizes}
device = '{device}'
should_throw_error = {should_throw_error}
args = []
for arg_size in arg_sizes:
args.append(torch.randn(*arg_size, device=device))
try:
fn(*args)
except RuntimeError as e:
if not should_throw_error:
raise RuntimeError('Did not expect any error to be raised')
elif 'Deterministic behavior was enabled with either' not in str(e):
raise RuntimeError('Expected a CuBLAS nondeterministic error, but got a different error')
else:
if should_throw_error:
raise RuntimeError('Expected a CuBLAS nondeterministic error, but it was not raised')
"""
try:
subprocess.check_output(
[sys.executable, '-c', script],
stderr=subprocess.STDOUT,
# On Windows, opening the subprocess with the default CWD makes `import torch`
# fail, so just set CWD to this script's directory
cwd=os.path.dirname(os.path.realpath(__file__)),
env=env)
except subprocess.CalledProcessError as e:
self.fail(msg=(
f'Subprocess exception while attempting to run {test_case_info(fn_name, config)}:\n'
+ e.output.decode("utf-8")))
# FIXME: update OpInfos to support "nondeterministic samples" and port these tests
# to that architecture
@skipIfMps
def test_nondeterministic_alert_AvgPool3d(self, device):
module = torch.nn.AvgPool3d(3)
input = torch.randn(2, 3, 3, 3, requires_grad=True, device=device)
res = module(input)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('avg_pool3d_backward_cuda', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
@skipIfMps
def test_nondeterministic_alert_AdaptiveAvgPool2d(self, device):
module = torch.nn.AdaptiveAvgPool2d(3)
input = torch.randn(2, 3, 3, requires_grad=True, device=device)
res = module(input)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('adaptive_avg_pool2d_backward_cuda', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
@skipIfMps
def test_nondeterministic_alert_AdaptiveAvgPool3d(self, device):
module = torch.nn.AdaptiveAvgPool3d(3)
input = torch.randn(2, 3, 3, 3, requires_grad=True, device=device)
res = module(input)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('adaptive_avg_pool3d_backward_cuda', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
@skipIfMps
def test_nondeterministic_alert_MaxPool3d(self, device):
module = torch.nn.MaxPool3d(3)
input = torch.randn(2, 3, 3, 3, requires_grad=True, device=device)
res = module(input)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('max_pool3d_with_indices_backward_cuda', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
@skipIfMps
def test_nondeterministic_alert_AdaptiveMaxPool2d(self, device):
module = torch.nn.AdaptiveMaxPool2d(3)
input = torch.randn(2, 3, 3, requires_grad=True, device=device)
res = module(input)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('adaptive_max_pool2d_backward_cuda', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
@skipIfMps
def test_nondeterministic_alert_FractionalMaxPool2d(self, device):
module = torch.nn.FractionalMaxPool2d(2, output_ratio=0.5)
input = torch.randn(2, 3, 3, 3, requires_grad=True, device=device)
res = module(input)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('fractional_max_pool2d_backward_cuda', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
@skipIfMps
def test_nondeterministic_alert_FractionalMaxPool3d(self, device):
module = torch.nn.FractionalMaxPool3d(2, output_ratio=0.5)
input = torch.randn(2, 3, 3, 3, 3, requires_grad=True, device=device)
res = module(input)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('fractional_max_pool3d_backward_cuda', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
@skipIfMps
def test_nondeterministic_alert_interpolate_linear(self, device):
input = torch.randn(1, 2, 4, device=device, requires_grad=True)
res = torch.nn.functional.interpolate(
input,
size=12,
mode='linear',
align_corners=False)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('upsample_linear1d_backward_out_cuda', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
def test_nondeterministic_alert_interpolate_bilinear(self, device):
input = torch.randn(1, 2, 4, 4, device=device, requires_grad=True)
res = torch.nn.functional.interpolate(
input,
size=12,
mode='bilinear',
align_corners=False)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('upsample_bilinear2d_backward_out_cuda', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
@skipIfMps
def test_nondeterministic_alert_interpolate_bicubic(self, device):
input = torch.randn(1, 2, 4, 4, device=device, requires_grad=True)
res = torch.nn.functional.interpolate(
input,
size=12,
mode='bicubic',
align_corners=False)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('upsample_bicubic2d_backward_out_cuda', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
@skipIfMps
def test_nondeterministic_alert_interpolate_trilinear(self, device):
input = torch.randn(1, 2, 4, 4, 4, device=device, requires_grad=True)
res = torch.nn.functional.interpolate(
input,
size=12,
mode='trilinear',
align_corners=False)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('upsample_trilinear3d_backward_out_cuda', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
@skipIfMps
def test_nondeterministic_alert_ReflectionPad1d(self, device):
module = torch.nn.ReflectionPad1d((1, 2))
input = torch.randn(2, 3, 8, device=device, requires_grad=True)
res = module(input)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('reflection_pad1d_backward_out_cuda', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
def test_nondeterministic_alert_ReflectionPad2d(self, device):
module = torch.nn.ReflectionPad2d((1, 2, 3, 4))
input = torch.randn(2, 3, 8, 8, device=device, requires_grad=True)
res = module(input)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('reflection_pad2d_backward_cuda', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
@skipIfMps
def test_nondeterministic_alert_ReflectionPad3d(self, device):
module = torch.nn.ReflectionPad3d((1, 2, 3, 4, 5, 6))
input = torch.randn(2, 3, 8, 8, 8, device=device, requires_grad=True)
res = module(input)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('reflection_pad3d_backward_out_cuda', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
@skipIfMps
def test_nondeterministic_alert_ReplicationPad1d(self, device):
module = torch.nn.ReplicationPad1d((1, 2))
input = torch.randn(2, 3, 4, device=device, requires_grad=True)
res = module(input)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('replication_pad1d_backward_cuda', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
def test_nondeterministic_alert_ReplicationPad2d(self, device):
module = torch.nn.ReplicationPad2d((1, 2, 3, 4))
input = torch.randn(2, 3, 4, 4, device=device, requires_grad=True)
res = module(input)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('replication_pad2d_backward_cuda', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
@skipIfMps
def test_nondeterministic_alert_ReplicationPad3d(self, device):
module = torch.nn.ReplicationPad3d((1, 2, 3, 4, 5, 6))
input = torch.randn(2, 3, 4, 4, 4, device=device, requires_grad=True)
res = module(input)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('replication_pad3d_backward_cuda', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
def test_nondeterministic_alert_NLLLoss(self, device):
module = torch.nn.NLLLoss()
input = torch.randn(2, 3, 5, 5, device=device)
target = torch.rand(2, 5, 5, device=device).mul(3).floor().long()
@expectedAlertNondeterministic('nll_loss2d_forward_out_cuda_template', ['cuda'])
def forward_func(slf, device):
module(input, target)
forward_func(self, device)
def test_nondeterministic_alert_CTCLoss(self, device):
module = torch.nn.CTCLoss()
input = torch.randn(50, 3, 15, device=device, requires_grad=True)
target = torch.randint(0, 14, (3, 30), device=device)
input_lengths = [50, 50, 50]
target_lengths = [30, 25, 20]
res = module(input, target, input_lengths, target_lengths)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('ctc_loss_backward_gpu', ['cuda'])
def backward_func(slf, device):
res.backward(grad, retain_graph=True)
backward_func(self, device)
def test_nondeterministic_alert_EmbeddingBag_max(self, device):
module = torch.nn.EmbeddingBag(
4, 3, None, 2., False, 'max',
_weight=torch.randn(4, 3, device=device, requires_grad=True))
input = torch.randint(0, 3, (4, 3), device=device)
res = module(input)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('embedding_bag_backward_cuda_max', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
@dtypes(*all_types_and_complex_and(torch.bool))
def test_nondeterministic_alert_cumsum(self, device, dtype):
def test_func(op_call):
input = make_tensor((10,), dtype=dtype, device=device, low=-9, high=9)
@expectedAlertNondeterministic('cumsum_cuda_kernel', ['cuda'])
def forward_func_alert(slf, device):
op_call(input, 0)
if dtype.is_floating_point or dtype.is_complex:
forward_func_alert(self, device)
else:
with DeterministicGuard(True):
op_call(input, 0)
test_func(torch.Tensor.cumsum)
test_func(torch.cumsum)
def test_nondeterministic_alert_scatter_add(self, device):
def test_func(op_call):
input = torch.randn(5, 4, device=device)
dim = 0
index = torch.tensor([[3]], device=device)
src = torch.tensor([[1.0]], device=device)
@expectedAlertNondeterministic('scatter_add_cuda_kernel', ['cuda'])
def forward_func(slf, device):
op_call(input, dim, index, src)
forward_func(self, device)
test_func(torch.Tensor.scatter_add_)
test_func(torch.Tensor.scatter_add)
test_func(torch.scatter_add)
@expectedFailureMeta # expected a non-determinitic error, but it was not raised
@onlyNativeDeviceTypes
def test_nondeterministic_alert_put(self, device):
def test_func(op_call):
a = torch.randn(10, device=device)
indices = torch.tensor([0, 0], device=device)
values = torch.tensor([0., 1.], device=device)
@expectedAlertNondeterministic('put_')
def forward_func(slf, device):
op_call(a, indices, values, accumulate=False)
forward_func(self, device)
test_func(torch.Tensor.put)
test_func(torch.Tensor.put_)
def test_nondeterministic_alert_put_accumulate(self, device):
def test_func(op_call):
a = torch.randn(10, device=device)
indices = torch.tensor([0, 0], device=device)
values = torch.tensor([0., 1.], device=device)
@expectedAlertNondeterministic('put_', ['cuda'])
def forward_func(slf, device):
op_call(a, indices, values, accumulate=True)
forward_func(self, device)
test_func(torch.Tensor.put)
test_func(torch.Tensor.put_)
@skipIfMps
def test_nondeterministic_alert_histc(self, device):
def test_func(op_call):
a = torch.tensor([], device=device)
@expectedAlertNondeterministic('_histc_cuda', ['cuda'])
def forward_func(slf, device):
res = op_call(a, min=0, max=3)
forward_func(self, device)
test_func(torch.histc)
test_func(torch.Tensor.histc)
@skipIfMps
def test_nondeterministic_alert_bincount(self, device):
def test_func(op_call):
a = torch.tensor([], device=device, dtype=torch.long)
@expectedAlertNondeterministic('_bincount_cuda', ['cuda'])
def forward_func(slf, device):
res = op_call(a)
forward_func(self, device)
test_func(torch.bincount)
test_func(torch.Tensor.bincount)
# Ensures that kthvalue throws nondeterministic alerts in the correct cases
@dtypes(torch.double)
def test_nondeterministic_alert_kthvalue(self, device, dtype):
@expectedAlertNondeterministic('kthvalue CUDA', ['cuda'])
def test_func(slf, device, call_type):
S = 10
k = 5
a = torch.randn(S, device=device)
if call_type == 'function':
torch.kthvalue(a, k)
elif call_type == 'method':
a.kthvalue(k)
elif call_type == 'out':
values = torch.empty_like(a)
indices = torch.empty((), device=device, dtype=torch.long)
torch.kthvalue(a, k, out=(values, indices))
else:
self.fail(f"'{call_type}' is not a valid call type")
test_func(self, device, 'function')
test_func(self, device, 'method')
test_func(self, device, 'out')
@onlyNativeDeviceTypes
def test_nondeterministic_alert_gather(self, device):
def test_func(op_call):
a = torch.randn(3, 3, device=device, requires_grad=True)
dim = 0
index = torch.tensor([[0]], device=device)
res = op_call(a, dim, index)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('scatter_add_cuda_kernel', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
test_func(torch.gather)
test_func(torch.Tensor.gather)
@skipIfMps
def test_nondeterministic_alert_grid_sample_2d(self, device):
input = torch.empty(1, 1, 2, 2, device=device, requires_grad=True)
grid = torch.empty(1, 1, 1, 2, device=device)
res = torch.nn.functional.grid_sample(input, grid, align_corners=False)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('grid_sampler_2d_backward_cuda', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
@skipIfMps
def test_nondeterministic_alert_grid_sample_3d(self, device):
input = torch.empty(1, 1, 2, 2, 2, device=device, requires_grad=True)
grid = torch.empty(1, 1, 1, 2, 3, device=device)
res = torch.nn.functional.grid_sample(input, grid, align_corners=False)
grad = torch.ones_like(res)
@expectedAlertNondeterministic('grid_sampler_3d_backward_cuda', ['cuda'])
def backward_func(slf, device):
res.backward(grad)
backward_func(self, device)
def test_invalid_shapes_grid_sampler(self, device):
make_arg = partial(
make_tensor, device=device, dtype=torch.float64, requires_grad=True)
inputs = (
# input, grid
((5, 5, 5, 5, 5,), (1, 1, 1, 4, 4,)), # 3d
((5, 5, 5, 5,), (1, 1, 4, 4,)), # 2d
)
interpolation_mode = 0
padding_mode = 0
align_corners = True
err = "expected grid and input to have same batch size"
for input, grid in inputs:
input = make_arg(input)
grid = make_arg(grid, low=-1, high=1)
# Wrapper for the 2d, 3d, and cuDNN functions listed below.
with self.assertRaisesRegex(RuntimeError, err):
torch.grid_sampler(
input, grid, interpolation_mode, padding_mode,
align_corners)
# Expects 2d input.
with self.assertRaisesRegex(RuntimeError, err):
torch.grid_sampler_2d(
input, grid, interpolation_mode, padding_mode,
align_corners)
# Expects 3d input.
with self.assertRaisesRegex(RuntimeError, err):
torch.grid_sampler_3d(
input, grid, interpolation_mode, padding_mode,
align_corners)
# Expects 2d input.
with self.assertRaisesRegex(RuntimeError, err):
torch._grid_sampler_2d_cpu_fallback(
input, grid, interpolation_mode, padding_mode,
align_corners)
# Expects 2d input, on CUDA.
# Doesn't work on CPU and ROCm.
if device != 'cpu' and TEST_CUDNN and not TEST_WITH_ROCM:
with self.assertRaisesRegex(RuntimeError, err):
torch.cudnn_grid_sampler(input, grid)
def test_dist(self, device):
def run_test(x, y):
for p in [0, 1, 2, 3, 4, inf, -inf]:
dist_xy = torch.dist(x, y, p)
dist_xy_norm = torch.norm(x - y, p)
self.assertEqual(dist_xy, dist_xy_norm)
run_test(torch.randn(5, device=device), torch.randn(5, device=device))
x = torch.zeros(3, device=device)
y = torch.zeros(3, device=device)
y[1] = 1.
run_test(x, y)
# Ensures that median throws nondeterministic alerts in the correct cases
@dtypes(torch.double)
def test_nondeterministic_alert_median(self, device, dtype):
def test_func(slf, device, call_type):
S = 10
a = torch.randn(S, device=device)
if call_type == 'function':
torch.median(a)
elif call_type == 'function with indices':
torch.median(a, 0)
elif call_type == 'method':
a.median()
elif call_type == 'method with indices':
a.median(0)
elif call_type == 'out with indices':
result = torch.empty_like(a)
indices = torch.empty((), dtype=torch.long, device=device)
torch.median(a, 0, out=(result, indices))
else:
self.fail(f"'{call_type}' is not a valid call type")
@expectedAlertNondeterministic('median CUDA with indices output', ['cuda'])
def test_func_expect_error(slf, device, call_type):
test_func(slf, device, call_type)
test_func(self, device, 'function')
test_func_expect_error(self, device, 'function with indices')
test_func(self, device, 'method')
test_func_expect_error(self, device, 'method with indices')
test_func_expect_error(self, device, 'out with indices')
# FIXME: move to test_scatter_gather_ops
def _test_gather_backward_one_dim(self, device, deterministic: bool = False) -> None:
with DeterministicGuard(deterministic):
m = random.randint(2000, 3000)
elems = random.randint(10 * m, 20 * m)
dim = 0
src = torch.randn(m, device=device, requires_grad=True)
idx = torch.randint(m, (elems,), device=device)
res = torch.gather(src, dim, idx)
weight = torch.rand_like(res, device=device) * 10 ** 6
res.backward(weight)
assert src.grad is not None
grad = src.grad.detach().clone()
if torch.device(device).type == 'cuda':
for _ in range(2):
src.grad.data.zero_()
res = torch.gather(src, dim, idx)
res.backward(weight)
self.assertEqual(src.grad, grad, atol=0, rtol=0)
else:
expected = torch.zeros_like(src, device=device)
for i in range(elems):
expected[idx[i]] += weight[i]
self.assertEqual(grad, expected, atol=0, rtol=0)
# FIXME: move to test_scatter_gather_ops
@onlyNativeDeviceTypes
def test_gather_backward_deterministic_path(self, device) -> None:
self._test_gather_backward_one_dim(device, True)
# FIXME: move to test_scatter_gather_ops
@onlyCPU
def test_gather_backward_one_dim(self, device) -> None:
self._test_gather_backward_one_dim(device, False)
# FIXME: move to test_scatter_gather_ops
@onlyNativeDeviceTypes
def test_scatter_add_one_dim_deterministic(self, device) -> None:
with DeterministicGuard(True):
m = random.randint(20, 30)
elems = random.randint(2000 * m, 3000 * m)
dim = 0
src = torch.randn(elems, device=device)
idx = torch.randint(m, (elems,), device=device)
x = torch.zeros(m, device=device)
res = x.scatter_add(dim, idx, src)
expected = torch.zeros(m, device=device)
for i in range(elems):
expected[idx[i]] += src[i]
self.assertEqual(res, expected, atol=0, rtol=0)
# FIXME: move to test_scatter_gather_ops
@onlyNativeDeviceTypes
def test_scatter_zero_size_index(self, device) -> None:
null_index = torch.zeros((0, 4), dtype=torch.int64)
null_arr = torch.zeros((0, 4))
original = torch.arange(4, dtype=torch.float32)
result = original.scatter(0, null_index, null_arr)
self.assertEqual(result, original, atol=0, rtol=0)
@onlyCUDA
def test_sync_warning(self, device):
def _sync_raises_helper(f, level):
with CudaSyncGuard(level):
if level == 1:
with self.assertWarnsRegex(UserWarning, "called a synchronizing "):
f()
elif level == 2:
with self.assertRaisesRegex(RuntimeError, "called a synchronizing "):
f()
def _no_sync_helper(f, level):
with CudaSyncGuard(level):
f()
def _ind_put_fn(x, ind, val):
x[ind] = val
return x
def _ind_get_fn(x, ind):
return x[ind]
def _cond_fn(x):
if x: # taking boolean value of a tensor synchronizes
return x
else:
return 2 * x
# prepare inputs for subsequent ops
size = 4
x = torch.rand(size, device=device)
y = torch.rand((), device=device)
ind = torch.randint(size, (3,), device=device)
ind_cpu = ind.cpu()
repeats = torch.full((1,), 2, device=device)
mask = torch.randint(2, (size,), device=device, dtype=bool)
expect_no_sync = (lambda: _ind_put_fn(x, mask, 1.),
lambda: _ind_put_fn(x, ind, y),
lambda: _ind_get_fn(x, ind),
lambda: torch.nn.functional.one_hot(ind, num_classes=size),
lambda: torch.randperm(20000, device=device),
lambda: torch.repeat_interleave(x, 2, output_size=2 * size),
lambda: torch.repeat_interleave(x, repeats, output_size=2 * size))
expect_sync = (lambda: _ind_put_fn(x, mask, y),
lambda: _ind_put_fn(x, ind_cpu, y),
lambda: _ind_get_fn(x, mask),
lambda: _ind_get_fn(x, ind_cpu),
lambda: x.nonzero(),
lambda: _cond_fn(y),
lambda: torch.nn.functional.one_hot(ind),
lambda: torch.repeat_interleave(x, 2),
lambda: torch.repeat_interleave(x, repeats))
for f, level in product(expect_no_sync, (1, 2)):
_no_sync_helper(f, level)
for f, level in product(expect_sync, (1, 2)):
_sync_raises_helper(f, level)
@dtypes(*floating_types_and(torch.half, torch.bfloat16))
@skipIfMps
def test_log_normal(self, device, dtype):
a = torch.tensor([10], dtype=dtype, device=device).log_normal_()
self.assertEqual(a.dtype, dtype)
self.assertEqual(a.size(), torch.Size([1]))
@dtypes(*all_types_and(torch.half, torch.bfloat16))
@skipIfMps
def test_geometric(self, device, dtype):
a = torch.tensor([10], dtype=dtype, device=device).geometric_(0.5)
self.assertEqual(a.dtype, dtype)
self.assertEqual(a.size(), torch.Size([1]))
@skipIfMps
def test_repeat_interleave(self, device):
y = torch.tensor([[1, 2], [3, 4]], device=device)
# exercise single argument function signature
temp = y.repeat_interleave(2)
self.assertEqual(torch.Size([8]), temp.size())
for dtype in [torch.int, torch.long]:
lengths = torch.tensor([1, 2], dtype=dtype, device=device)
output_size = torch.sum(lengths)
a = torch.repeat_interleave(
y,
lengths,
dim=0,
)
self.assertEqual(a.dtype, y.dtype)
self.assertEqual(a.size(), torch.Size([3, 2]))
a_with_output = torch.repeat_interleave(
y,
lengths,
dim=0,
output_size=output_size,
)
self.assertEqual(a_with_output.dtype, y.dtype)
self.assertEqual(a_with_output.size(), torch.Size([3, 2]))
@dtypes(*floating_types())
@dtypesIfCPU(*floating_types_and(torch.bfloat16))
@dtypesIfCUDA(*floating_types_and(torch.half))
def test_bernoulli_p(self, device, dtype):
for trivial_p in ([0, 1], [1, 0, 1, 1, 0, 1]):
x = torch.tensor(trivial_p, dtype=dtype, device=device)
self.assertEqual(x.bernoulli().tolist(), trivial_p)
def isBinary(t):
return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum().item() == 0
p = torch.rand(5, 5, dtype=dtype, device=device)
self.assertTrue(isBinary(p.bernoulli()))
p = torch.rand(5, dtype=dtype, device=device).expand(5, 5)
self.assertTrue(isBinary(p.bernoulli()))
p = torch.rand(5, 5, dtype=dtype, device=device)
torch.bernoulli(torch.rand_like(p), out=p)
self.assertTrue(isBinary(p))
# RngUniform not implemented for Integral type in XLA test
@dtypes(*floating_types())
@dtypesIfCPU(*all_types_and(torch.bool))
@dtypesIfCUDA(*all_types_and(torch.bool, torch.half))
def test_bernoulli_self(self, device, dtype):
def isBinary(t):
return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum().item() == 0
t = torch.empty(10, 10, dtype=dtype, device=device)
t.fill_(2)
t.bernoulli_(0.5)
self.assertTrue(isBinary(t))
for p_dtype in floating_types_and(*[torch.half] if device.startswith('cuda') else []):
p = torch.rand(10, dtype=p_dtype, device=device).expand(10, 10)
t.fill_(2)
t.bernoulli_(p)
self.assertTrue(isBinary(t))
t.fill_(2)
torch.bernoulli(torch.rand_like(t, dtype=p_dtype), out=t)
self.assertTrue(isBinary(t))
t.fill_(2)
t.bernoulli_(torch.rand_like(t, dtype=p_dtype))
self.assertTrue(isBinary(t))
@slowTest
@dtypes(*floating_types())
@dtypesIfCUDA(*floating_types_and(torch.half))
def test_bernoulli_edge_cases(self, device, dtype):
# Need to draw a lot of samples to cover every random floating point number.
a = torch.zeros(10000, 10000, dtype=dtype, device=device) # probability of drawing "1" is 0
num_ones = (torch.bernoulli(a) == 1).sum()
self.assertEqual(num_ones, 0)
b = torch.ones(10000, 10000, dtype=dtype, device=device) # probability of drawing "1" is 1
num_zeros = (torch.bernoulli(b) == 0).sum()
self.assertEqual(num_zeros, 0)
@dtypes(*floating_types_and(torch.half, torch.bfloat16))
@skipIfMps
def test_exponential(self, device, dtype):
a = torch.tensor([10], dtype=dtype, device=device).exponential_(0.5)
self.assertEqual(a.dtype, dtype)
self.assertEqual(a.size(), torch.Size([1]))
# Tests extremal behavior
tests = ((-0, float('inf')), (0, float('inf')), (float('inf'), 0))
for test in tests:
t = torch.empty((1,), device=device, dtype=dtype).exponential_(test[0])
self.assertTrue(t.item() == test[1])
# Tests that negative lambda fails
with self.assertRaises(RuntimeError):
torch.empty((1,), device=device, dtype=dtype).exponential_(-0.5)
@onlyCUDA
@dtypes(torch.half, torch.float)
def test_exponential_no_zero(self, device, dtype):
# naively, 0 in exponential can be generated with probability 2^-24
# so we need more samples to check if it's not generated
# instead of doing one
# don't test CPU, that would be a long test
x = torch.empty(50000000, device=device, dtype=dtype).exponential_()
self.assertTrue(x.min() > 0)
def _generate_correlation_tensors(self, device, dtype):
yield make_tensor((0, 0), dtype=dtype, device=device)
yield make_tensor((1, 0), dtype=dtype, device=device)
yield make_tensor((0, 1), dtype=dtype, device=device)
yield make_tensor((2,), dtype=dtype, device=device)
yield make_tensor((2, 1), dtype=dtype, device=device)
yield make_tensor((2, 2), dtype=dtype, device=device)
yield make_tensor((2, 3), dtype=dtype, device=device)
yield make_tensor((5, 10), dtype=dtype, device=device)
yield make_tensor((5, 10), dtype=dtype, device=device, noncontiguous=True)
if dtype != torch.int:
yield torch.tensor([0, -2, nan, 10.2, inf], dtype=dtype, device=device)
@onlyNativeDeviceTypes
@dtypes(torch.int, torch.float, torch.cfloat)
def test_corrcoef(self, device, dtype):
for x in self._generate_correlation_tensors(device, dtype):
res = torch.corrcoef(x)
ref = np.corrcoef(x.cpu().numpy())
self.assertEqual(res, ref, exact_dtype=False)
@dtypes(torch.int, torch.float, torch.cfloat)
def test_cov(self, device, dtype):
def check(t, correction=1, fweights=None, aweights=None):
res = torch.cov(t, correction=correction, fweights=fweights, aweights=aweights)
t = t.cpu().numpy()
fweights = fweights.cpu().numpy() if fweights is not None else None
aweights = aweights.cpu().numpy() if aweights is not None else None
ref = np.cov(t, ddof=correction, fweights=fweights, aweights=aweights)
self.assertEqual(res, ref, atol=1e-05, rtol=1e-05, exact_dtype=False)
for x in self._generate_correlation_tensors(device, dtype):
check(x)
num_observations = x.numel() if x.ndim < 2 else x.size(1)
if num_observations > 0:
fweights = torch.randint(1, 10, (num_observations,), device=device)
aweights = make_tensor((num_observations,), dtype=torch.float, device=device, low=1)
for correction, fw, aw in product([0, 1, 2], [None, fweights], [None, aweights]):
check(x, correction, fweights, aweights)
@skipIfNoSciPy
@dtypes(*floating_types_and(torch.half, torch.bfloat16))
def test_uniform_kstest(self, device, dtype):
from scipy import stats
size = 1000
for from_ in [-42, 0, 4.2]:
for to_ in [-4.2, 0, 42]:
if to_ > from_:
t = torch.empty(size, dtype=dtype, device=device).uniform_(from_, to_)
res = stats.kstest(t.cpu().to(torch.double), 'uniform', args=(from_, (to_ - from_)))
self.assertTrue(res.statistic < 0.1)
@skipIfNoSciPy
@dtypes(*floating_types_and(torch.half))
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
def test_normal_kstest(self, device, dtype):
from scipy import stats
size = 1000
for mean in [-10, 0, 50]:
for std in [1, 5, 10]:
t = torch.empty(size, dtype=dtype, device=device).normal_(mean=mean, std=std)
res = stats.kstest(t.cpu().to(torch.double), 'norm', args=(mean, std))
self.assertTrue(res.statistic < 0.1)
@skipIfMps
@skipIfNoSciPy
@dtypes(*floating_types_and(torch.half, torch.bfloat16))
def test_lognormal_kstest(self, device, dtype):
from scipy import stats
size = 1000
for mean in [-3, 0, 7]:
for std in [1, 5, 7]:
t = torch.empty(size, dtype=dtype, device=device).log_normal_(mean=mean, std=std)
res = stats.kstest(t.cpu().to(torch.double), 'lognorm', args=(std, 0, math.exp(mean)))
if dtype == torch.half:
self.assertTrue(res.statistic < 0.3)
else:
self.assertTrue(res.statistic < 0.1)
@skipIfMps
@skipIfNoSciPy
@dtypes(*floating_types_and(torch.half, torch.bfloat16))
def test_exponential_kstest(self, device, dtype):
from scipy import stats
size = 1000
for lambd in [0.5, 1.0, 5.0]:
t = torch.empty(size, dtype=dtype, device=device).exponential_(lambd=lambd)
res = stats.kstest(t.cpu().to(torch.double), 'expon', args=(0, 1 / lambd,))
self.assertTrue(res.statistic < 0.1)
@skipIfMps
@skipIfNoSciPy
@dtypes(*floating_types_and(torch.half, torch.bfloat16))
def test_cauchy_kstest(self, device, dtype):
from scipy import stats
size = 1000
for median in [-10, 0, 50]:
for sigma in [0.5, 1.0, 10.0]:
t = torch.empty(size, dtype=dtype, device=device).cauchy_(median=median, sigma=sigma)
res = stats.kstest(t.cpu().to(torch.double), 'cauchy', args=(median, sigma))
self.assertTrue(res.statistic < 0.1)
@slowTest
@onlyCUDA
@dtypes(torch.bfloat16, torch.float32)
def test_cauchy_no_inf(self, device, dtype):
# torch.float16 will have `inf` because of its smaller range.
for _ in range((2**16) * 2):
x = torch.empty((2**16), dtype=dtype, device=device)
x.cauchy_()
self.assertFalse(x.isinf().sum())
@skipIfMps
@skipIfNoSciPy
@dtypes(*all_types_and(torch.half, torch.bfloat16))
def test_geometric_kstest(self, device, dtype):
from scipy import stats
size = 1000
for p in [0.2, 0.5, 0.8]:
t = torch.empty(size, dtype=dtype, device=device).geometric_(p=p)
actual = np.histogram(t.cpu().to(torch.double), np.arange(1, 100))[0]
expected = stats.geom(p).pmf(np.arange(1, 99)) * size
res = stats.chisquare(actual, expected)
self.assertEqual(res.pvalue, 1.0, atol=0.1, rtol=0)
# FIXME: find test suite for pdist and cdist
def test_pairwise_distance_empty(self, device):
shape = (2, 0)
x = torch.randn(shape, device=device)
y = torch.randn(shape, device=device)
self.assertEqual(torch.zeros(2, device=device), torch.pairwise_distance(x, y))
self.assertEqual(torch.zeros((2, 1), device=device), torch.pairwise_distance(x, y, keepdim=True))
shape = (0, 2)
x = torch.randn(shape, device=device)
y = torch.randn(shape, device=device)
self.assertEqual(torch.zeros(0, device=device), torch.pairwise_distance(x, y))
self.assertEqual(torch.zeros((0, 1), device=device), torch.pairwise_distance(x, y, keepdim=True))
def test_pdist_empty(self, device):
shape = (0, 2)
x = torch.randn(shape, device=device)
self.assertEqual(torch.empty(0, device=device), torch.pdist(x))
shape = (1, 2)
x = torch.randn(shape, device=device)
self.assertEqual(torch.empty(0, device=device), torch.pdist(x))
shape = (3, 0)
x = torch.randn(shape, device=device)
self.assertEqual(torch.zeros(3, device=device), torch.pdist(x))
def test_cdist_empty(self, device):
x = torch.randn((0, 5), device=device)
y = torch.randn((4, 5), device=device)
self.assertEqual(torch.empty(0, 4, device=device), torch.cdist(x, y))
x = torch.randn((2, 5), device=device)
y = torch.randn((0, 5), device=device)
self.assertEqual(torch.empty(2, 0, device=device), torch.cdist(x, y))
x = torch.randn((2, 0), device=device)
y = torch.randn((3, 0), device=device)
self.assertEqual(torch.zeros(2, 3, device=device), torch.cdist(x, y))
x = torch.randn((2, 0), device=device)
y = torch.randn((0, 0), device=device)
self.assertEqual(torch.empty(2, 0, device=device), torch.cdist(x, y))
def _brute_cdist(self, x, y, p=2):
r1 = x.shape[-2]
r2 = y.shape[-2]
if r1 == 0 or r2 == 0:
return torch.empty(r1, r2, device=x.device)
return torch.norm(x[..., None, :] - y[..., None, :, :], p=p, dim=-1)
@skipIfMps
def test_cdist_norm(self, device):
for r1 in [3, 4, 5, 6]:
for m in [2, 3, 4, 10]:
for r2 in [4, 6, 7, 8]:
for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]:
x = torch.randn(r1, m, device=device)
y = torch.randn(r2, m, device=device)
if p == 2:
for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']:
actual = torch.cdist(x, y, p=2, compute_mode=cm)
expected = self._brute_cdist(x, y, p=2)
self.assertEqual(expected, actual, rtol=0, atol=0.02)
else:
actual = torch.cdist(x, y, p=p)
expected = self._brute_cdist(x, y, p=p)
self.assertEqual(expected, actual)
@skipIfMps
def test_cdist_norm_batch(self, device):
for r1 in [3, 4, 5, 6]:
for m in [2, 3, 4, 10]:
for r2 in [4, 6, 7, 8]:
for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]:
x = torch.randn(2, 3, 6, r1, m, device=device)
y = torch.randn(2, 3, 6, r2, m, device=device)
if p == 2:
for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']:
actual = torch.cdist(x, y, p=2, compute_mode=cm)
expected = self._brute_cdist(x, y, p=2)
self.assertEqual(expected, actual, rtol=0, atol=0.02)
else:
actual = torch.cdist(x, y, p=p)
expected = self._brute_cdist(x, y, p=p)
self.assertEqual(expected, actual)
@onlyCUDA
def test_cdist_cuda_backward(self, device):
for l1 in [1, 511, 513]:
for l2 in [1, 511, 513]:
for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]:
x1 = torch.randn(4, l1, 32, device=device, requires_grad=True)
x2 = x1.clone().detach_().requires_grad_()
y1 = torch.randn(4, l2, 32, device=device, requires_grad=True)
y2 = y1.clone().detach_().requires_grad_()
if p == 2:
for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']:
z1 = torch.cdist(x1, y1, p=2, compute_mode=cm).mean()
z2 = self._brute_cdist(x2, y2, p=2).mean()
z1.backward()
z2.backward()
self.assertEqual(x1.grad, x2.grad, rtol=0, atol=0.001)
self.assertEqual(y1.grad, y2.grad, rtol=0, atol=0.001)
else:
z1 = torch.cdist(x1, y1, p=p).mean()
z2 = self._brute_cdist(x2, y2, p=p).mean()
self.assertEqual(x1.grad, x2.grad, rtol=0, atol=0.001)
self.assertEqual(y1.grad, y2.grad, rtol=0, atol=0.001)
@tf32_on_and_off(0.005)
def test_cdist_large(self, device):
for cm in ['use_mm_for_euclid_dist_if_necessary', 'use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']:
x = torch.randn(1000, 10, device=device)
y = torch.randn(1000, 10, device=device)
actual = torch.cdist(x, y, p=2, compute_mode=cm)
expected = self._brute_cdist(x, y, p=2)
self.assertEqual(expected, actual)
@slowTest
@tf32_on_and_off(0.01)
def test_cdist_large_batch(self, device):
for cm in ['use_mm_for_euclid_dist_if_necessary', 'use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']:
x = torch.randn(4, 3, 1000, 10, device=device)
y = torch.randn(4, 3, 1000, 10, device=device)
actual = torch.cdist(x, y, p=2, compute_mode=cm)
expected = self._brute_cdist(x, y, p=2)
self.assertEqual(expected, actual)
@tf32_on_and_off(0.005)
def test_cdist_non_contiguous(self, device):
for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']:
x = torch.randn(5, 7, device=device).mT
y = torch.randn(5, 3, device=device).mT
actual = torch.cdist(x, y, p=2, compute_mode=cm)
expected = self._brute_cdist(x, y, p=2)
self.assertFalse(x.is_contiguous())
self.assertFalse(y.is_contiguous())
self.assertEqual(expected, actual)
x = torch.randn(7, 5, device=device)
y = torch.randn(5, 3, device=device).t()
actual = torch.cdist(x, y, p=2, compute_mode=cm)
expected = self._brute_cdist(x, y, p=2)
self.assertTrue(x.is_contiguous())
self.assertFalse(y.is_contiguous())
self.assertEqual(expected, actual)
x = torch.randn(5, 7, device=device).t()
y = torch.randn(3, 5, device=device)
actual = torch.cdist(x, y, p=2, compute_mode=cm)
expected = self._brute_cdist(x, y, p=2)
self.assertFalse(x.is_contiguous())
self.assertTrue(y.is_contiguous())
self.assertEqual(expected, actual)
@tf32_on_and_off()
def test_cdist_non_contiguous_batch(self, device):
for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']:
x = torch.randn(4, 3, 2, 5, 7, device=device).mT
y = torch.randn(4, 3, 2, 5, 3, device=device).mT
actual = torch.cdist(x, y, p=2, compute_mode=cm)
expected = self._brute_cdist(x, y, p=2)
self.assertFalse(x.is_contiguous())
self.assertFalse(y.is_contiguous())
self.assertEqual(expected, actual)
x = torch.randn(7, 2, 7, 5, device=device)
y = torch.randn(7, 2, 5, 3, device=device).mT
actual = torch.cdist(x, y, p=2, compute_mode=cm)
expected = self._brute_cdist(x, y, p=2)
self.assertTrue(x.is_contiguous())
self.assertFalse(y.is_contiguous())
self.assertEqual(expected, actual)
x = torch.randn(4, 5, 7, device=device).mT
y = torch.randn(4, 3, 5, device=device)
actual = torch.cdist(x, y, p=2, compute_mode=cm)
expected = self._brute_cdist(x, y, p=2)
self.assertFalse(x.is_contiguous())
self.assertTrue(y.is_contiguous())
self.assertEqual(expected, actual)
# Maybe merge into OpInfo?
def test_cdist_euclidean_large(self, device):
def _test_euclidean_large_cdist(sizex, sizey=None):
if sizey is None:
sizey = sizex
x = torch.randn(sizex, device=device, dtype=torch.float)
y = torch.randn(sizey, device=device, dtype=torch.float)
eps = 1e-6
# to avoid extremum
x = x - (((x - y) < eps).float() * 2 * eps)
x.requires_grad = True
y.requires_grad = True
dist = torch.cdist(x, y, p=2)
# Do a backward pass to check that it is valid for large
# matrices
loss = dist.sum()
loss.backward()
_test_euclidean_large_cdist((2000, 5))
# Ensure that cdist backward with p<1 does not produce NaNs
@skipIfMps
def test_cdist_grad_p_lt_1_no_nan(self, device):
for p in [0.99, 0.7, 0.5, 0.1, 0.01]:
x = torch.randn(1, 2, device=device)
y = x.clone().detach() + torch.tensor([[1., 0.]], device=device)
x.requires_grad = True
y.requires_grad = True
result = torch.cdist(x, y, p=p)
result.backward(torch.ones_like(result))
self.assertFalse(torch.isnan(x.grad).any())
self.assertFalse(torch.isnan(y.grad).any())
def test_cdist_same_inputs(self, device):
# Test to detect issues in cdist gradient calculation
# When the distances are 0
sizex = (1, 27, 32)
for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]:
x = torch.randn(sizex, device=device, dtype=torch.float)
dist_grad = torch.randn((1, 27, 27), device=device, dtype=torch.float)
y = x.clone()
eps = 1e-6
x.requires_grad = True
d = torch.cdist(x, y)
d.backward(dist_grad)
# Check that the backward passs does not contain invalid
# values such as nan or inf
assert torch.isfinite(x.grad).all()
@skipIfMps
def test_cumsum(self, device):
x = torch.rand(100, 100, device=device)
res1 = torch.cumsum(x, 1)
res2 = torch.tensor([]).to(device)
torch.cumsum(x, 1, out=res2)
self.assertEqual(res1, res2)
x.cumsum_(1)
self.assertEqual(res1, x)
a = torch.tensor([[True, False, True],
[False, False, False],
[True, True, True]], device=device)
b = a.byte()
aRes = torch.cumsum(a, 0)
bRes = torch.cumsum(b, 0)
self.assertEqual(aRes, bRes)
self.assertEqual(aRes, torch.tensor([[1, 0, 1],
[1, 0, 1],
[2, 1, 2]]))
aRes = torch.cumsum(a, 1)
bRes = torch.cumsum(b, 1)
self.assertEqual(aRes, bRes)
self.assertEqual(aRes, torch.tensor([[1, 1, 2],
[0, 0, 0],
[1, 2, 3]]))
# Check that cummulative sum over a zero length dimension doesn't crash on backprop.
# Also check that cumsum over other dimensions in a tensor with a zero-length
# dimensiuon also works
# Also include a basic suite of similar tests for other bases cases.
shapes = [[2, 0], [2, 1, 4], [0, 2, 3], [1], [5]]
for shape in shapes:
for dim in range(len(shape)):
raw_tensor = torch.zeros(*shape, requires_grad=True)
integrated = raw_tensor.cumsum(dim=dim)
# Check that backward does not crash
integrated.sum().backward()
# Check that output maintained correct shape
self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape)
# Check a scalar example
raw_tensor = torch.tensor(3., requires_grad=True)
integrated = raw_tensor.cumsum(dim=-1)
self.assertEqual(raw_tensor, integrated)
# Check that backward does not crash
integrated.sum().backward()
# Check that output maintained correct shape
self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape)
@skipIfMps
def test_cumprod(self, device):
x = torch.rand(100, 100, device=device)
res1 = torch.cumprod(x, 1)
res2 = torch.tensor([]).to(device)
torch.cumprod(x, 1, out=res2)
self.assertEqual(res1, res2)
x.cumprod_(1)
self.assertEqual(res1, x)
a = torch.tensor([[True, False, True],
[False, False, False],
[True, True, True]], dtype=torch.bool, device=device)
b = a.byte()
aRes = torch.cumprod(a, 0)
bRes = torch.cumprod(b, 0)
self.assertEqual(aRes, bRes)
self.assertEqual(aRes, torch.tensor([[1, 0, 1],
[0, 0, 0],
[0, 0, 0]]))
aRes = torch.cumprod(a, 1)
bRes = torch.cumprod(b, 1)
self.assertEqual(aRes, bRes)
self.assertEqual(aRes, torch.tensor([[1, 0, 0],
[0, 0, 0],
[1, 1, 1]]))
# Check that cummulative prod over a zero length dimension doesn't crash on backprop.
# Also check that cumprod over other dimensions in a tensor with a zero-length
# dimensiuon also works
# Also include a basic suite of similar tests for other bases cases.
shapes = [[2, 0], [2, 1, 4], [0, 2, 3], [1], [5]]
for shape in shapes:
for dim in range(len(shape)):
raw_tensor = torch.zeros(*shape, requires_grad=True)
integrated = raw_tensor.cumprod(dim=dim)
# Check that backward does not crash
integrated.sum().backward()
# Check that output maintained correct shape
self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape)
# Check a scalar example
raw_tensor = torch.tensor(3., requires_grad=True)
integrated = raw_tensor.cumprod(dim=-1)
self.assertEqual(raw_tensor, integrated)
# Check that backward does not crash
integrated.sum().backward()
# Check that output maintained correct shape
self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape)
@skipIfMps
def test_cummax_cummin(self, device):
def test_ops(op, string_of_function_name, expected_output1, expected_output2):
x = torch.rand(100, 100, device=device)
out1 = op(x, 1)
res2 = torch.empty(0, device=device)
indices2 = torch.empty(0, dtype=torch.int64, device=device)
op(x, 1, out=(res2, indices2))
self.assertEqual(out1[0], res2)
self.assertEqual(out1[1], indices2)
a = torch.tensor([[True, False, True],
[False, False, False],
[True, True, True]], dtype=torch.bool, device=device)
b = a.byte()
aRes = op(a, 0)
bRes = op(b, 0)
self.assertEqual(aRes[0], bRes[0].bool())
self.assertEqual(aRes[0], expected_output1.bool())
# test inf and nan input
x = torch.tensor([4, inf, 1.5, -inf, 0, nan, 1])
xRes = op(x, 0)[0]
self.assertEqual(xRes, expected_output2)
# op shouldn't support values, indices with a dtype, device type or layout
# different from that of input tensor
t = torch.randn(10)
values = torch.empty(0, dtype=torch.int16)
indices = torch.empty(0, dtype=torch.int64)
with self.assertRaisesRegex(
RuntimeError,
'expected scalar_type Float but found Short'):
op(t, 0, out=(values, indices))
# Check that op over a zero length dimension doesn't crash on backprop.
# Also check that op over other dimensions in a tensor with a zero-length
# dimension also works
# Also include a basic suite of similar tests for other bases cases.
shapes = [[2, 0], [2, 1, 4], [0, 2, 3], [1], [5]]
for shape in shapes:
for dim in range(len(shape)):
raw_tensor = torch.zeros(*shape, requires_grad=True)
integrated = getattr(raw_tensor, string_of_function_name)(dim=dim)
# Check that backward does not crash
integrated[0].sum().backward()
# Check that output maintained correct shape
self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape)
# Check a scalar example
raw_tensor = torch.tensor(3., requires_grad=True)
integrated = getattr(raw_tensor, string_of_function_name)(dim=-1)
# Check that backward does not crash
integrated[0].sum().backward()
# Check that output maintained correct shape
self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape)
expected_out = torch.tensor([4, inf, inf, inf, inf, nan, nan])
test_ops(torch.cummax, "cummax", torch.tensor([[1, 0, 1],
[1, 0, 1],
[1, 1, 1]]), expected_out)
expected_out = torch.tensor([4, 4, 1.5, -inf, -inf, nan, nan])
test_ops(torch.cummin, "cummin", torch.tensor([[1, 0, 1],
[0, 0, 0],
[0, 0, 0]]), expected_out)
@skipIfMps
def test_logcumsumexp(self, device):
def logcumsumexp(a, axis):
return torch.cumsum(a.exp(), axis=axis).log_()
axis = -1
a = torch.randn(100, 100, device=device)
actual = a.logcumsumexp(axis)
expected = logcumsumexp(a, axis)
self.assertEqual(a.dtype, actual.dtype)
self.assertEqual(expected.shape, actual.shape)
self.assertEqual(expected, actual)
# check -inf and nan handling
x = torch.tensor([-float('inf'), -float('inf'), 1.0, 1.0, float('inf'),
float('inf'), float('nan'), 1.0, 1.0], device=device)
x2d = x.unsqueeze(0).expand(2, -1)
for inp in (x, x2d):
actual = inp.logcumsumexp(axis)
expected = logcumsumexp(inp, axis)
self.assertEqual(expected, actual)
# Check that out is actually inplace
b = torch.randn(5, 2, device=device)
inplace_out = torch.zeros(5, 2, device=device)
expected = logcumsumexp(b, axis)
torch.logcumsumexp(b, axis=axis, out=inplace_out)
self.assertEqual(inplace_out, expected)
# Check input and inplace_output type mismatch
b = torch.randn(5, 2, device=device, dtype=torch.float64)
inplace_out = torch.zeros(5, 2, device=device, dtype=torch.float32)
with self.assertRaisesRegex(
RuntimeError,
'expected scalar_type Double but found Float'):
torch.logcumsumexp(b, axis, out=inplace_out)
def _test_diff_numpy(self, t, dims=None):
# Helper for test_diff to compare with NumPy reference implementation
def to_np(t):
if t.dtype == torch.bfloat16:
return t.to(dtype=torch.float, device="cpu").numpy()
else:
return t.cpu().numpy()
for dim in dims if dims else range(t.dim()):
prepend = t.narrow(dim, 0, 1)
append = t.narrow(dim, 0, 1)
np_t = to_np(t)
# test when no prepend and append
for n in range(t.size(dim)):
actual = torch.diff(t, dim=dim, n=n)
expected = torch.from_numpy(np.diff(np_t, axis=dim, n=n))
self.assertEqual(actual, expected.to(t.dtype))
# test when prepend and append's size along dim is 1
for n in range(1, t.size(dim) + 4):
actual = torch.diff(t, dim=dim, n=n, prepend=prepend, append=append)
expected = torch.from_numpy(np.diff(np_t, axis=dim, n=n, prepend=to_np(prepend), append=to_np(append)))
self.assertEqual(actual, expected.to(t.dtype))
# test when prepend and append's size along dim != 1
for n in range(1, t.size(dim) * 3):
actual = torch.diff(t, dim=dim, n=n, prepend=t, append=t)
expected = torch.from_numpy(np.diff(np_t, axis=dim, n=n, prepend=np_t, append=np_t))
self.assertEqual(actual, expected.to(t.dtype))
# All tensors appear contiguous on XLA
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bool))
def test_diff_noncontig(self, device, dtype):
shapes = (
(1,),
(1, 5),
(3, 5),
(1, 5, 1),
(2, 3, 5))
for shape in shapes:
contig = make_tensor(shape, dtype=dtype, device=device, low=-9, high=9)
non_contig = torch.empty(shape + (2, 2), device=device, dtype=dtype)[..., 0]
non_contig = non_contig.select(-1, -1)
non_contig.copy_(contig)
self.assertTrue(not non_contig.is_contiguous() or shape == (1,))
self._test_diff_numpy(non_contig)
# RngNormal not implemented for type f16 for XLA
@dtypes(*all_types_and_complex_and(torch.bool))
@dtypesIfCPU(*all_types_and_complex_and(torch.half, torch.bool))
@dtypesIfCUDA(*all_types_and_complex_and(torch.half, torch.bool))
def test_diff(self, device, dtype):
shapes = (
(1,),
(1, 5),
(3, 5),
(1, 5, 1),
(2, 3, 5))
for shape in shapes:
contig = make_tensor(shape, dtype=dtype, device=device, low=-9, high=9)
self._test_diff_numpy(contig)
t = torch.ones(2, 3)
with self.assertRaisesRegex(
RuntimeError, 'diff expects prepend or append to be the same dimension as input'):
invalid_prepend = torch.tensor([1, 2, 3], device=device, dtype=dtype)
t.diff(dim=0, prepend=invalid_prepend)
with self.assertRaisesRegex(
RuntimeError, 'diff expects the shape of tensor to prepend or append to match that of input'):
invalid_prepend = torch.tensor([[0, 1]], device=device, dtype=dtype)
t.diff(dim=0, prepend=invalid_prepend)
with self.assertRaisesRegex(
RuntimeError, 'diff expects input to be at least one-dimensional'):
scalar = torch.tensor(2, device=device, dtype=dtype)
torch.diff(scalar)
# if the given input arg is not a list, it returns a list of single element: [arg]
def _wrap_to_list(self, input_array):
return input_array if isinstance(input_array, list) else [input_array]
# To ensure inf, -inf, and nan values do not cause divergence between Numpy and PyTorch.
# There are two types of possible divergence:
# 1. When we compute a,b both real numbers and has very small absolute values (i.e. very near to 0.0)
# then, result of a/b be inf, -inf and nan, and this cause divergence.
# 2. When we are dividing complex numbers by zero. For example, when a = torch.tensor(3+5j) we have
# a/0 to be equal to nan + nan*j in PyTorch and inf + inf*j in Numpy.
def _inf_nan_preprocess(self, actual, expected):
for i in range(len(expected)):
expected[i] = np.nan_to_num(expected[i], nan=nan, posinf=nan, neginf=nan)
# nan_to_num is not defined for complex tensors in PyTorch.
if actual[i].dtype == torch.complex64 :
actual[i].real = torch.nan_to_num(actual[i].real, nan=nan, posinf=nan, neginf=nan)
actual[i].imag = torch.nan_to_num(actual[i].imag, nan=nan, posinf=nan, neginf=nan)
else:
actual[i] = torch.nan_to_num(actual[i], nan=nan, posinf=nan, neginf=nan)
return actual, expected
@onlyNativeDeviceTypes
@dtypes(torch.long, torch.float32, torch.complex64)
def test_gradient_all(self, device, dtype):
def create_scalar(shape):
return make_tensor((1,), device='cpu', dtype=dtype, low=1.).item()
def create_list(shape):
return make_tensor((len(shape),), device='cpu', dtype=dtype, low=1.).tolist()
def create_coordinate_tensors(shape):
tensor_list = []
for i in range(len(shape)):
tensor_list.append(make_tensor((shape[i],), device=device, dtype=dtype))
return tensor_list
def filter_shape(shape, dim):
filtered_shape = []
for i in range(len(dim)):
filtered_shape.append(shape[dim[i]])
return filtered_shape
# shape, dims format
test_cases = (
((5,), (0,)),
((4, 4), (0, 1)),
((3, 3, 3), (-1, 0)),
((4, 4, 4), (2,)),
((4, 4, 4), (0, 1)),
((4, 4, 4, 3), (0, 2, 3)),
((4, 5, 3, 4, 3), (1, 2)),
((4, 3, 6, 5, 3), (2, 4)),
((4, 3, 3, 5, 3), (0, 1, 2, 3, 4)),
((1, 3, 3), (1, 2)),
((1, 5), (1,)),
)
for case, contig, edge_order, space_fn in product(test_cases, [True, False], [1, 2],
(create_scalar, create_list, create_coordinate_tensors)):
shape, dims = case
# filter shape by dims before passing filtered shape to create_* functions
filtered_shape = filter_shape(shape, dims)
spacing = space_fn(filtered_shape)
t = make_tensor(shape, device=device, dtype=dtype, noncontiguous=not contig)
t_np = t.cpu().numpy()
actual = torch.gradient(t, spacing=spacing, dim=dims, edge_order=edge_order)
if space_fn == create_coordinate_tensors and spacing[0].device != 'cpu':
spacing = [space.cpu().detach().numpy() for space in spacing]
expected = np.gradient(t_np, *self._wrap_to_list(spacing), axis=dims, edge_order=edge_order)
actual, expected = self._inf_nan_preprocess(list(actual), self._wrap_to_list(expected))
self.assertEqual(actual, expected, equal_nan=True, atol=1e-4, rtol=0, exact_dtype=False)
@onlyNativeDeviceTypes
@dtypes(torch.long, torch.float32, torch.complex64)
def test_gradient_extreme_cases(self, device, dtype):
# Test behaviour for inf and nan values
actual = torch.gradient(torch.tensor([2, -2, inf, inf, -inf, -inf, inf, 3, -inf, 2, nan, nan, 3, inf, nan]))
expected = np.gradient(np.array([2, -2, inf, inf, -inf, -inf, inf, 3, -inf, 2, nan, nan, 3, inf, nan]))
self.assertEqual(actual, self._wrap_to_list(expected), exact_dtype=False)
# Test behaviour in very big tensors
large_size = 100000
t = make_tensor((large_size,), dtype=dtype, device=device)
t_np = t.cpu().numpy()
coordinates_np = list(np.random.randn(large_size))
coordinates = [torch.tensor(coordinates_np, device=device)]
actual = torch.gradient(t, spacing=coordinates, dim=0, edge_order=1)
expected = [np.gradient(t_np, coordinates_np, axis=0, edge_order=1)]
self.assertEqual(actual, expected, exact_dtype=False)
actual = torch.gradient(t, spacing=coordinates, dim=0, edge_order=2)
expected = [np.gradient(t_np, coordinates_np, axis=0, edge_order=2)]
self.assertEqual(actual, expected, exact_dtype=False)
@onlyNativeDeviceTypes
def test_gradient_type_promotion(self, device):
inputs = (
make_tensor((4, 4), device=device, dtype=torch.float32),
make_tensor((4, 4), device=device, dtype=torch.complex64),
make_tensor((4, 4), device=device, dtype=torch.int64),
)
spacing = (
make_tensor((1,), device='cpu', dtype=torch.float32).item(),
make_tensor((1,), device='cpu', dtype=torch.int64).item(),
make_tensor((1,), device='cpu', dtype=torch.complex64).item(),
make_tensor((2,), device='cpu', dtype=torch.float32, low=0.1).tolist(),
make_tensor((2,), device='cpu', dtype=torch.int64, low=1).tolist(),
make_tensor((2,), device='cpu', dtype=torch.complex64).tolist(),
[make_tensor((4,), device=device, dtype=torch.float32),
make_tensor((4,), device=device, dtype=torch.float32)],
[make_tensor((4,), device=device, dtype=torch.int64),
make_tensor((4,), device=device, dtype=torch.int64)],
[make_tensor((4,), device=device, dtype=torch.complex64),
make_tensor((4,), device=device, dtype=torch.complex64)],
)
for input, spacing_or_coord, edge_order in product(inputs, spacing, [1, 2]):
input_np = input.cpu().numpy()
input_np = input.cpu().numpy()
actual = torch.gradient(input, spacing=spacing_or_coord, dim=(0, 1), edge_order=edge_order)
spacing_or_coord_wrapped = self._wrap_to_list(spacing_or_coord)
spacing_or_coord_np = []
if torch.is_tensor(spacing_or_coord_wrapped[0]) and torch.device(spacing_or_coord_wrapped[0].device).type != 'cpu':
for i in range(len(spacing_or_coord_wrapped)):
spacing_or_coord_np.append(spacing_or_coord_wrapped[i].detach().clone().cpu().numpy())
else:
spacing_or_coord_np = spacing_or_coord_wrapped
expected = np.gradient(input_np, *spacing_or_coord_np, axis=(0, 1), edge_order=edge_order)
if actual[0].dtype == torch.complex64 and input.dtype != torch.complex64:
for i in range(len(actual)):
self.assertEqual(actual[i].real, expected[i].real, exact_dtype=False)
# Type promotion fails on Numpy when spacing is given as complex number and input is given as real.
# Result is given just as real number and all the imaginary parts to be equal to zero.
self.assertEqual(expected[i].imag, torch.zeros(actual[i].shape), exact_dtype=False)
else:
actual, expected = self._inf_nan_preprocess(list(actual), expected)
self.assertEqual(actual, expected, equal_nan=True, exact_dtype=False)
def _test_large_cum_fn_helper(self, x, fn):
x_cpu = x.cpu().float()
expected = fn(x_cpu)
actual = fn(x).cpu().float()
self.assertEqual(expected, actual.cpu().float())
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "sandcastle OOM with current tpx gpu/re configuration")
@onlyCUDA
@dtypes(torch.half) # only small dtype not to get oom
def test_large_cumsum(self, device, dtype):
# initialization to avoid overflow and half caveats
x = torch.empty(2**30 + 200, device=device, dtype=dtype)
x[::3] = -3
x[1::3] = 2
x[2::3] = 1
self._test_large_cum_fn_helper(x, lambda x: torch.cumsum(x, 0))
@onlyCUDA
@dtypes(torch.half) # only small dtype not to get oom
def test_large_cumprod(self, device, dtype):
# initialization to avoid overflow and half caveats
x = torch.empty(2**30 + 200, device=device, dtype=dtype)
x[::3] = 8
x[1::3] = .25
x[2::3] = .5
self._test_large_cum_fn_helper(x, lambda x: torch.cumprod(x, 0))
@skipIfTorchDynamo("Torchdynamo fails with unknown reason")
@skipIfMps
def test_discontiguous_out_cumsum(self, device):
x = torch.randn(4, 8, device=device)
y = torch.empty(4, 16, device=device)[:, ::2]
out = torch.cumsum(x, 0)
torch.cumsum(x, 0, out=y)
self.assertFalse(y.is_contiguous())
self.assertEqual(out, y, atol=0., rtol=0.)
def _test_cumminmax_helper(self, x, fn, expected_val, expected_ind):
val, ind = fn(x, -1)
self.assertEqual(val, expected_val, atol=0, rtol=0)
self.assertEqual(ind, expected_ind, atol=0, rtol=0)
out_val = torch.empty_like(val).t().contiguous().t()
out_ind = torch.empty_like(ind).t().contiguous().t()
fn(x, -1, out=(out_val, out_ind))
self.assertFalse(out_val.is_contiguous())
self.assertFalse(out_ind.is_contiguous())
self.assertEqual(out_val, expected_val, atol=0, rtol=0)
self.assertEqual(out_ind, expected_ind, atol=0, rtol=0)
@skipIfMps
def test_cummax_discontiguous(self, device):
x = torch.tensor([[0, 1, 2, 3, 2, 1], [4, 5, 6, 5, 6, 7]], device=device, dtype=torch.float).t().contiguous().t()
expected_val = torch.tensor([[0, 1, 2, 3, 3, 3], [4, 5, 6, 6, 6, 7]], device=device, dtype=torch.float)
expected_ind = torch.tensor([[0, 1, 2, 3, 3, 3], [0, 1, 2, 2, 4, 5]], device=device, dtype=torch.long)
self._test_cumminmax_helper(x, torch.cummax, expected_val, expected_ind)
@skipIfMps
def test_cummin_discontiguous(self, device):
x = torch.tensor([[3, 2, 1, 0, 1, 2], [7, 6, 5, 4, 5, 2]], device=device, dtype=torch.float).t().contiguous().t()
expected_val = torch.tensor([[3, 2, 1, 0, 0, 0], [7, 6, 5, 4, 4, 2]], device=device, dtype=torch.float)
expected_ind = torch.tensor([[0, 1, 2, 3, 3, 3], [0, 1, 2, 3, 3, 5]], device=device, dtype=torch.long)
self._test_cumminmax_helper(x, torch.cummin, expected_val, expected_ind)
def test_bool_tensor_value_change(self, device):
x = torch.tensor([True, False], dtype=torch.bool, device=device)
x[0] = False
x[1] = True
self.assertEqual(x, torch.tensor([False, True], dtype=torch.bool, device=device))
# FIXME: move to shape ops test suite
def test_unfold_all_devices_and_dtypes(self, device):
for dt in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16):
if dt == torch.bool:
x = torch.empty((0, 1, 3, 0), dtype=dt, device=device)
self.assertEqual((0, 1, 1, 0, 3), x.unfold(2, 3, 2).shape)
else:
x = torch.empty((0, 1, 3, 0), dtype=dt, device=device)
self.assertEqual((0, 1, 1, 0, 3), x.unfold(2, 3, 2).shape)
# FIXME: move to shape ops test suite
def test_unfold_scalars(self, device):
x = torch.tensor(0.5, device=device)
# unfold on a 0-dimensional tensor should always return a 1-d dimensional
# tensor of shape [size] (i.e., the second parameter to unfold)
self.assertEqual(torch.empty(0, device=device), x.unfold(0, 0, 1))
self.assertEqual(torch.empty(0, device=device), x.unfold(0, 0, 2))
self.assertEqual(torch.tensor([0.5], device=device), x.unfold(0, 1, 1))
# FIXME: move to data movement test suite
def test_copy_all_dtypes_and_devices(self, device):
from copy import copy
for dt in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16):
x = torch.tensor([1, 2, 3, 4], dtype=dt, device=device)
x_clone = x.clone()
y = copy(x)
y.fill_(1)
# copy is a shallow copy, only copies the tensor view,
# not the data
self.assertEqual(x, y)
@onlyCPU
def test_bfloat16_float_copy(self, device):
for shape in [(20, 7), (249, 137), (1029, 917), (1, 7, 19, 17), (3, 77, 1091)]:
input = torch.randn(shape, dtype=torch.float, device=device)
out1 = input.to(torch.bfloat16)
self.assertEqual(input, out1, atol=0, rtol=1e-2, exact_dtype=False)
out2 = out1.to(torch.float)
self.assertEqual(out2, out1, atol=0, rtol=0, exact_dtype=False)
input_s = input[..., ::2, :]
out1 = input_s.to(torch.bfloat16)
self.assertEqual(input_s, out1, atol=0, rtol=1e-2, exact_dtype=False)
out2 = out1.to(torch.float)
self.assertEqual(out2, out1, atol=0, rtol=0, exact_dtype=False)
# FIXME: move to data movement test suite
@onlyNativeDeviceTypes
def test_copy_math_view(self, device):
for dst_dtype, src_dtype in [
(torch.float32, torch.float32),
(torch.float64, torch.float32),
(torch.int64, torch.int32),
(torch.complex128, torch.complex64),
]:
src = make_tensor((100,), dtype=src_dtype, device=device)
dst = torch.empty(100, dtype=dst_dtype, device=device)
dst.copy_(src)
self.assertEqual(dst, src, exact_dtype=False)
dst.copy_(src._neg_view())
self.assertEqual(dst, src.neg(), exact_dtype=False)
dst._neg_view().copy_(torch._neg_view(src))
self.assertEqual(dst, src, exact_dtype=False)
dst._neg_view().copy_(src)
self.assertEqual(dst, src.neg(), exact_dtype=False)
for dst_dtype, src_dtype in [
(torch.complex64, torch.complex64),
(torch.complex128, torch.complex64),
]:
src = make_tensor((100,), dtype=src_dtype, device=device)
dst = torch.empty(100, dtype=dst_dtype, device=device)
dst.conj().copy_(src)
self.assertEqual(dst, src.conj_physical(), exact_dtype=False)
dst.conj().copy_(src._neg_view())
self.assertEqual(dst, src.neg().conj_physical(), exact_dtype=False)
# FIXME: move to data movement test suite
@onlyNativeDeviceTypes
@dtypes(torch.int64, torch.float32, torch.complex64)
def test_copy_transpose_math_view(self, device, dtype):
src = make_tensor((100, 100), dtype=dtype, device=device).transpose(0, 1)
dst = torch.empty((100, 100), dtype=dtype, device=device)
dst._neg_view().copy_(src)
self.assertEqual(dst, -src)
dst._neg_view().copy_(src._neg_view())
self.assertEqual(dst, src)
dst.copy_(src._neg_view())
self.assertEqual(dst, -src)
if dtype.is_complex:
dst.conj().copy_(src)
self.assertEqual(dst, src.conj_physical())
dst.conj().copy_(src.conj())
self.assertEqual(dst, src)
dst.copy_(src.conj())
self.assertEqual(dst, src.conj_physical())
def test_clone_all_dtypes_and_devices(self, device):
for dt in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16):
x = torch.tensor((1, 1), dtype=dt, device=device)
y = x.clone()
self.assertEqual(x, y)
def test_clone_zero_stride_dim(self, device):
# stride zero, size 1 axis, not contiguous
x = torch.randn(10)
y = x.as_strided([2, 1, 5], [1, 0, 2])
self.assertEqual(y, y.clone())
def test_clone_not_memory_dense(self):
# github issue: https://github.com/pytorch/pytorch/issues/64176
x = torch.randn(10, 8).t()[::2, ::2]
y = x.clone()
# should retain permutation after densification
self.assertTrue(y.stride() == (1, 4))
# FIXME: move to elementwise ternary test suite
@dtypesIfCUDA(*set(get_all_math_dtypes('cuda')))
@dtypes(*set(get_all_math_dtypes('cpu')))
def test_addcmul(self, device, dtype):
# Returns floating or integral scalar corresponding to dtype
def _number(floating, integer, dtype):
if dtype in [torch.half, torch.float, torch.double, torch.bfloat16]:
return floating
elif dtype in [torch.cfloat, torch.cdouble]:
return floating * (1 + 1j)
else:
return integer
def rand_tensor(size, dtype, device):
if dtype.is_floating_point or dtype.is_complex:
return torch.rand(size=size, dtype=dtype, device=device)
if dtype == torch.uint8:
return torch.randint(1, 5, size=size, dtype=dtype, device=device)
else:
return torch.randint(-5, 5, size=size, dtype=dtype, device=device)
a = rand_tensor((2, 2), dtype=dtype, device=device)
b = rand_tensor((2, 2), dtype=dtype, device=device)
c = rand_tensor((2, 2), dtype=dtype, device=device)
alpha = _number(0.5, 3, dtype)
actual = torch.addcmul(a, b, c, value=alpha)
expected = a + alpha * b * c
self.assertEqual(expected, actual)
with self.assertWarnsOnceRegex(
UserWarning, "This overload of addcmul is deprecated"):
self.assertEqual(actual, torch.addcmul(a, alpha, b, c))
if self.device_type == 'cuda' and dtype == torch.half:
a = torch.tensor([60000.0], device=device, dtype=dtype)
b = torch.tensor([60000.0], device=device, dtype=dtype)
c = torch.tensor([2.0], device=device, dtype=dtype)
out = torch.addcmul(a, b, c, value=-1)
self.assertTrue(not (out.isnan() or out.isinf()))
# FIXME: move to shape ops test suite
def test_narrow_empty(self, device):
x = torch.randn(2, 3, 4, device=device)
for d in range(x.dim()):
y = x.narrow(d, x.size(d), 0)
sz = list(x.size())
sz[d] = 0
self.assertEqual(sz, y.size())
# FIXME: move to indexing test suite
@parametrize("reduce", ['prod', 'amin', 'amax', 'mean'])
@dtypes(*all_types_and(torch.half, torch.bfloat16))
def test_index_reduce(self, device, dtype, reduce):
size = (3, 4, 5)
index_dtypes = [torch.int, torch.long]
include_selfs = [True, False]
amin_init = float('inf') if dtype.is_floating_point else torch.iinfo(dtype).max
amax_init = -float('inf') if dtype.is_floating_point else torch.iinfo(dtype).min
reduction_init = {'prod': 1, 'mean': 0, 'amin': amin_init, 'amax': amax_init}
for dest_noncontig, src_noncontig, index_noncontig in product([True, False], repeat=3):
for idx_dtype, include_self in product(index_dtypes, include_selfs):
for dim in range(len(size)):
num_src = np.random.randint(10)
num_dest = size[dim]
dest = make_tensor(size, device=device, dtype=dtype, noncontiguous=dest_noncontig)
src_size = size[:dim] + (num_src,) + size[dim + 1:]
src = make_tensor(src_size, device=device, dtype=dtype, noncontiguous=src_noncontig)
idx = torch.randint(num_dest, (num_src,), dtype=idx_dtype, device=device)
if index_noncontig:
# noncontiguous_like fails with RuntimeError: XLA tensors do not have storage
idx = torch.testing.make_non_contiguous(idx)
expected = dest.clone()
dest.index_reduce_(dim, idx, src, reduce, include_self=include_self)
# fill rows in idx with reduction inits if include_self=False
if (not include_self):
expected.index_fill_(dim, idx.long(), reduction_init[reduce])
expected = expected.transpose(0, dim)
src = src.transpose(0, dim)
for i in range(num_src):
if reduce == 'prod':
expected[idx[i]] *= src[i]
elif reduce == 'amin':
torch.minimum(expected[idx[i]], src[i], out=expected[idx[i]])
elif reduce == 'amax':
torch.maximum(expected[idx[i]], src[i], out=expected[idx[i]])
else:
expected[idx[i]] += src[i]
if reduce == 'mean':
counts = torch.ones_like(expected) if include_self else torch.zeros_like(expected)
counts.index_add_(0, idx, torch.ones_like(src))
counts.masked_fill_(counts == 0, 1)
if (dtype.is_floating_point):
expected.div_(counts)
else:
expected.div_(counts, rounding_mode="floor")
expected = expected.transpose(0, dim)
self.assertEqual(dest, expected)
# FIXME: move to test indexing
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_index_copy(self, device, dtype):
# We just test for num_copy <= num_dest, as otherwise there are repeated indices
# and the behavior is undefined
num_copy, num_dest = 3, 5
def make_arg(batch_sizes, n, dim, contig):
size_arg = batch_sizes[:dim] + (n,) + batch_sizes[dim:]
return make_tensor(size_arg, dtype=dtype, device=device, low=None, high=None, noncontiguous=not contig)
def ref_index_copy(tgt, dim, idx, src):
for i in range(idx.size(0)):
idx_dest = dim * (slice(None),) + (idx[i],)
idx_src = dim * (slice(None),) + (i,)
tgt[idx_dest] = src[idx_src]
# More thorough testing as in index_add
for dest_contig, src_contig, index_contig in product([True, False], repeat=3):
for other_sizes in ((), (4, 5)):
for dim in range(len(other_sizes)):
dest = make_arg(other_sizes, num_dest, dim, dest_contig)
src = make_arg(other_sizes, num_copy, dim, src_contig)
idx = torch.randperm(num_dest, dtype=torch.int64, device=device)[:num_copy]
if not index_contig:
idx = torch.repeat_interleave(idx, 2, dim=-1)
idx = idx[..., ::2]
dest2 = dest.clone()
dest.index_copy_(dim, idx, src)
ref_index_copy(dest2, dim, idx, src)
self.assertEqual(dest, dest2)
# FIXME: move to test indexing
# onlyNativeDeviceTypes due to an XLA error:
# https://github.com/pytorch/pytorch/issues/53256
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_index_copy_scalars(self, device, dtype):
# Create the 8 possible combinations of scalar sizes for target / index / source
scalars = ((make_tensor(size_t, dtype=dtype, device=device, low=None, high=None),
make_tensor(size_i, dtype=torch.int64, device=device, low=0, high=1),
make_tensor(size_s, dtype=dtype, device=device, low=None, high=None))
for size_t, size_i, size_s in product([(), (1,)], repeat=3))
for target, idx, source in scalars:
target.index_copy_(0, idx, source)
self.assertEqual(target.item(), source.item())
# FIXME: move to test indexing
@onlyCPU
def test_errors_index_copy(self, device):
# We do not test the GPU as the CUDA_ASSERT would break the CUDA context
idx_dim = 8
tgt_dim = 5
batch_dim = 3
# Too large of an index
a = torch.randn(batch_dim, tgt_dim, device=device)
idx = torch.full((idx_dim,), tgt_dim, device=device)
c = torch.zeros(batch_dim, idx_dim, device=device)
with self.assertRaises(IndexError):
a.index_copy_(1, idx, c)
# Too small (negative indices)
idx = torch.full((idx_dim,), -1, device=device)
with self.assertRaises(IndexError):
a.index_copy_(1, idx, c)
# Too small (very negative indices) - they should be unsupported even
# when support for negative indices is implemented for index_copy_
idx = torch.full((idx_dim,), -tgt_dim - 1, device=device)
with self.assertRaises(IndexError):
a.index_copy_(1, idx, c)
def _prepare_data_for_index_copy_and_add_deterministic(
self, dim: int, device: torch.device
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
assert (dim >= 0 and dim < 3)
a = [5, 4, 3]
a[dim] = 2000
x = torch.zeros(a, device=device)
b = a.copy()
elems = a[dim] * 20
b[dim] = elems
src = torch.rand(b, device=device)
index = torch.randint(a[dim], (elems,), device=device)
return (x, index, src)
# FIXME: move to test indexing
@onlyNativeDeviceTypes
def test_index_copy_deterministic(self, device: torch.device) -> None:
for dim in range(3):
x, index, src = self._prepare_data_for_index_copy_and_add_deterministic(dim, device)
with DeterministicGuard(True):
y0 = torch.index_copy(x, dim, index, src)
x0 = x.clone().detach()
index_list = index.tolist()
for i in range(len(index_list)):
if dim == 0:
x0[index_list[i], :, :] = src[i, :, :]
elif dim == 1:
x0[:, index_list[i], :] = src[:, i, :]
elif dim == 2:
x0[:, :, index_list[i]] = src[:, :, i]
self.assertEqual(x0, y0, atol=0, rtol=0)
# FIXME: move to test indexing
@onlyNativeDeviceTypes
def test_index_add_deterministic(self, device: torch.device) -> None:
for dim in range(3):
x, index, src = self._prepare_data_for_index_copy_and_add_deterministic(dim, device)
alpha = random.random() + 1
# on CPU it should be deterministic regardless of the deterministic mode
with DeterministicGuard(True):
y0 = torch.index_add(x, dim, index, src, alpha=alpha)
for _ in range(3):
y = torch.index_add(x, dim, index, src, alpha=alpha)
self.assertEqual(y, y0, atol=0, rtol=0)
with DeterministicGuard(False):
for _ in range(3):
y_nd = torch.index_add(x, dim, index, src, alpha=alpha)
self.assertEqual(y_nd, y0, atol=1e-3, rtol=1e-5)
# FIXME: find a test suite for the put operator
@onlyNativeDeviceTypes
def test_index_put_non_accumulate_deterministic(self, device) -> None:
with DeterministicGuard(True):
for i in range(3):
m = random.randint(10, 20)
elems = random.randint(20000, 30000)
values = torch.rand(elems, device=device)
indices = torch.randint(m, (elems,), device=device)
input = torch.rand(m, device=device)
output = input.index_put((indices,), values, accumulate=False)
input_list = input.tolist()
indices_list = indices.tolist()
values_list = values.tolist()
for i, v in zip(indices_list, values_list):
input_list[i] = v
self.assertEqual(output, input_list)
# FIXME: move to test indexing
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
@skipIfMps
def test_index_fill(self, device, dtype):
x = torch.tensor([[1, 2], [4, 5]], dtype=dtype, device=device)
index = torch.tensor([0], device=device)
x.index_fill_(1, index, 0)
self.assertEqual(x, torch.tensor([[0, 2], [0, 5]], dtype=dtype, device=device))
if not x.is_complex() and not device == "meta":
with self.assertRaisesRegex(RuntimeError, r"Scalar"):
x.index_fill_(1, index, 1 + 1j)
# Make sure that the result stays 0-dim while applied to
# a 0-dim input
x = torch.tensor(1, dtype=dtype, device=device)
self.assertEqual(0, x.index_fill(0, index, -1).dim())
self.assertEqual(0, x.index_fill_(0, index, -1).dim())
# FIXME: move to test indexing
# The test fails for zero-dimensional tensors on XLA
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_index_select(self, device, dtype):
num_src, num_out = 3, 5
def make_arg(batch_sizes, n, dim, contig):
size_arg = batch_sizes[:dim] + (n,) + batch_sizes[dim:]
return make_tensor(size_arg, dtype=dtype, device=device, low=None, high=None, noncontiguous=not contig)
def ref_index_select(src, dim, idx):
# bfloat16 is just used on GPU, so it's not supported on numpy
if dtype == torch.bfloat16:
src = src.float()
out = torch.from_numpy(np.take(src.cpu().numpy(), idx.cpu().numpy(), axis=dim))
if dtype == torch.bfloat16:
out = out.to(device=device, dtype=dtype)
return out
for src_contig, idx_contig in product([True, False], repeat=2):
for other_sizes in ((), (4, 5)):
for dim in range(len(other_sizes)):
src = make_arg(other_sizes, num_src, dim, src_contig)
idx = make_tensor(
(num_out,), dtype=torch.int64, device=device, low=0, high=num_src, noncontiguous=not idx_contig
)
out = torch.index_select(src, dim, idx)
out2 = ref_index_select(src, dim, idx)
self.assertEqual(out, out2)
for idx_type in (torch.int32, torch.int64):
other_sizes = (3, 2)
dim = 1
src = make_arg(other_sizes, num_src, dim, True)
idx = make_tensor((num_out,), dtype=idx_type, device=device, low=0, high=num_src, noncontiguous=False)
out = torch.index_select(src, dim, idx)
out2 = ref_index_select(src, dim, idx)
self.assertEqual(out, out2)
# Create the 4 possible combinations of scalar sizes for index / source
scalars = ((make_tensor(size_s, dtype=dtype, device=device),
torch.zeros(size_i, dtype=torch.int64, device=device))
for size_s, size_i in product([(), (1,)], repeat=2))
for source, idx in scalars:
out = source.index_select(0, idx)
self.assertEqual(out.item(), source.item())
# FIXME: find a test suite for the take operator
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_take(self, device, dtype):
idx_size = (4,)
make_arg = partial(make_tensor, device=device, dtype=dtype)
make_idx = partial(make_tensor, low=0, device=device, dtype=torch.int64)
def ref_take(src, idx):
if dtype == torch.bfloat16:
src = src.half()
src = src.cpu().numpy()
idx = idx.cpu().numpy()
out = torch.from_numpy(np.take(src, idx)).to(device=device, dtype=dtype)
return out
for src_contig, idx_contig, idx_reshape in product([True, False], repeat=3):
for src_size in ((5,), (4, 5)):
src = make_arg(src_size, noncontiguous=not src_contig)
idx = make_idx(idx_size, high=src.numel(), noncontiguous=not idx_contig)
if idx_reshape:
idx = idx.reshape(2, 2)
out = torch.take(src, idx)
out2 = ref_take(src, idx)
self.assertEqual(out, out2)
# Create the 4 possible combinations of scalar sizes for source / index
for size_s, size_i in product([(), (1,)], repeat=2):
source = make_arg(size_s)
idx = make_idx(size_i, high=1)
out = source.take(idx)
self.assertEqual(out.item(), source.item())
# FIXME: find a test suite for the put operator
# The bool instance does not work on GPU. See
# https://github.com/pytorch/pytorch/issues/54317
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_put(self, device, dtype):
src_size = (4,)
make_arg = partial(make_tensor, device=device, dtype=dtype)
make_idx = partial(make_tensor, low=0, device=device, dtype=torch.int64)
def ref_put(dst, idx, src, accumulate):
new_dst = dst.clone(memory_format=torch.contiguous_format).view(-1)
new_idx = idx.contiguous().view(-1)
new_src = src.contiguous().view(-1)
method = new_dst.index_add_ if accumulate else new_dst.index_copy_
return method(0, new_idx, new_src).view_as(dst)
for dst_contig, src_contig, idx_contig, idx_reshape, accumulate in product([True, False], repeat=5):
for dst_size in ((5,), (4, 5)):
dst = make_arg(dst_size, noncontiguous=not dst_contig)
src = make_arg(src_size, noncontiguous=not src_contig)
# If accumulate=True, `put_` should be deterministic regardless of the inputs on CPU
# On CUDA it may not be, but the test has enough tolerance to account for this
if accumulate:
idx = make_idx(src_size, high=dst.numel())
else:
idx = torch.randperm(dst.numel(), dtype=torch.int64, device=device)[:src_size[0]]
if not idx_contig:
idx = torch.repeat_interleave(idx, 2, dim=-1)[..., ::2]
if idx_reshape:
idx = idx.reshape(2, 2)
out = torch.put(dst, idx, src, accumulate)
# out-place
reference = ref_put(dst, idx, src, accumulate)
self.assertEqual(out, reference)
# in-place
dst.put_(idx, src, accumulate)
self.assertEqual(dst, reference)
# Create the 8 possible combinations of scalar sizes for target / index / source
scalars = ((make_arg(size_t),
make_idx(size_i, high=1),
make_arg(size_s))
for size_t, size_i, size_s in product([(), (1,)], repeat=3))
for (dest, idx, source), accumulate in product(scalars, [True, False]):
dest_init = dest.clone()
# out-place
out = torch.put(dest, idx, source, accumulate=accumulate)
# in-place
dest1 = dest.clone()
dest1.put_(idx, source, accumulate=accumulate)
for d in [out, dest1]:
if accumulate:
self.assertEqual(d.item(), (dest_init + source).item())
else:
self.assertEqual(d.item(), source.item())
# Empty case
dest = make_arg((3, 2))
reference = dest.clone()
idx = make_idx((0,), high=1)
source = make_arg((0,))
for accumulate in [True, False]:
out = torch.put(dest, idx, source, accumulate=accumulate)
self.assertEqual(out, reference)
dest.put_(idx, source, accumulate=accumulate)
self.assertEqual(dest, reference)
# FIXME: find a test suite for the put operator
# The bool instance does not work on GPU. See
# https://github.com/pytorch/pytorch/issues/54317
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_put_accumulate(self, device, dtype):
# Test for parallel adds with accumulate == True
low_precision = dtype == torch.half or dtype == torch.bfloat16
# Less numbers to avoid overflow with low_precision
# Grainsize is 3000 for the for_loop to be parallized on CPU
sizes = ((100,)) if low_precision else ((200,), (3002,))
# Bfloat16 has a particularly bad performance here
# This operation is nondeterministic on GPU, so we are generous with the rtol
rtol, atol = (1e-1, 1e-2) if low_precision else (1e-3, 1e-4)
make_arg = partial(make_tensor, low=-2, high=3, device=device, dtype=dtype)
# Dump everything into the 0-th position
make_idx = partial(torch.zeros, device=device, dtype=torch.int64)
args = ((make_idx(size), make_arg(size)) for size in sizes)
for idx, source in args:
orig = make_arg((1,))
out = orig.put(idx, source, accumulate=True)
self.assertEqual(out, orig + source.sum(), rtol=rtol, atol=atol)
# FIXME: find a test suite for the take operator
@skipIfMps
def test_take_empty(self, device):
for input_shape in [(0,), (0, 1, 2, 0), (1, 2, 3)]:
for indices_shape in [(0,), (0, 1, 2, 0)]:
input = torch.empty(input_shape, device=device)
indices = torch.empty(indices_shape, dtype=torch.int64, device=device)
self.assertEqual(indices, torch.take(input, indices), exact_dtype=False)
# FIXME: find a test suite for the put operator
def test_put_empty(self, device):
for dst_shape in [(0,), (0, 1, 2, 0), (1, 2, 3)]:
for indices_shape in [(0,), (0, 1, 2, 0)]:
for accumulate in [False, True]:
dst = torch.randn(dst_shape, device=device)
indices = torch.empty(indices_shape, dtype=torch.int64, device=device)
src = torch.randn(indices_shape, device=device)
self.assertEqual(dst, dst.put_(indices, src, accumulate=accumulate))
# FIXME: port to test_scatter_gather_ops.py
def scatter_allow_reduce(self, device, dtype, reduceop):
device_type = torch.device(device).type
return device_type != 'cuda' or (reduceop == 'multiply' and dtype.is_floating_point)
@dtypes(*floating_and_complex_types())
@dtypesIfCPU(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
@dtypesIfCUDA(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_scatter_reduce_operations_to_large_input(self, device, dtype):
index = torch.tensor([[1], [2]], device=device, dtype=torch.long)
test_data = [
(torch.zeros(4, 4, device=device, dtype=dtype),
torch.ones(2, 2, device=device, dtype=dtype),
torch.tensor([[0, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]],
device=device, dtype=dtype), "add"),
(torch.tensor([2], device=device, dtype=dtype).repeat(4, 4),
torch.tensor([6], device=device, dtype=dtype).repeat(2, 2),
torch.tensor([[2, 2, 2, 2],
[12, 2, 2, 2],
[12, 2, 2, 2],
[2, 2, 2, 2]], device=device, dtype=dtype), "multiply"),
]
for input, src, result, operation in test_data:
if not self.scatter_allow_reduce(device, dtype, operation):
continue
input.scatter_(0, index, src, reduce=operation)
self.assertEqual(input, result)
@dtypes(*floating_and_complex_types())
@dtypesIfCPU(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
@dtypesIfCUDA(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_scatter_reduce_scalar(self, device, dtype):
index = torch.tensor([[1], [2]], device=device, dtype=torch.long)
test_data = [
(torch.zeros(4, 4, device=device, dtype=dtype), 1,
torch.tensor([[0, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]],
device=device, dtype=dtype), "add"),
(torch.tensor([2], device=device, dtype=dtype).repeat(4, 4), 2,
torch.tensor([[2, 2, 2, 2],
[4, 2, 2, 2],
[4, 2, 2, 2],
[2, 2, 2, 2]], device=device, dtype=dtype), "multiply"),
]
for input, src, result, operation in test_data:
if not self.scatter_allow_reduce(device, dtype, operation):
continue
input.scatter_(0, index, src, reduce=operation)
self.assertEqual(input, result)
# FIXME: port to test_scatter_gather_ops.py
# TODO: remove this after scatter_add_ is deprecated.
def test_scatter_add_non_unique_index(self, device):
height = 2
width = 65536
input = torch.ones(height, width, device=device)
index = torch.zeros(height, width, dtype=torch.long, device=device)
src = torch.ones(height, width, device=device)
input.scatter_add_(0, index, src)
self.assertEqual(input,
torch.tensor([[3], [1]], device=device,
dtype=torch.float32).repeat(1, width))
@dtypes(*floating_and_complex_types())
@dtypesIfCPU(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
@dtypesIfCUDA(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_scatter_reduce_non_unique_index(self, device, dtype):
height = 2
width = 2
index = torch.zeros(height, width, dtype=torch.long, device=device)
test_data = [
(torch.ones(height, width, device=device, dtype=dtype),
torch.ones(height, width, device=device, dtype=dtype),
torch.tensor([[3], [1]], device=device, dtype=dtype).repeat(1, width), "add"),
(torch.tensor([2], device=device, dtype=dtype).repeat(height, width),
torch.tensor([2], device=device, dtype=dtype).repeat(height, width),
torch.tensor([[8], [2]], device=device,
dtype=dtype).repeat(1, width), "multiply"),
]
for input, src, result, operation in test_data:
if not self.scatter_allow_reduce(device, dtype, operation):
continue
input.scatter_(0, index, src, reduce=operation)
self.assertEqual(input, result, msg=f"result: {result} input: {input} method: {str(operation)}")
@onlyCUDA
@dtypes(*complex_types())
def test_scatter_reduce_multiply_unsupported_dtypes(self, device, dtype):
height = 2
width = 2
index = torch.zeros(height, width, dtype=torch.long, device=device)
input = torch.ones(height, width, device=device, dtype=dtype)
src = torch.ones(height, width, device=device, dtype=dtype)
with self.assertRaises(RuntimeError):
input.scatter_(0, index, src, reduce="multiply")
# FIXME: port to test_scatter_gather_ops.py
def test_scatter_to_large_input(self, device):
input = torch.zeros(4, 4, device=device)
src = torch.ones(2, 2, device=device)
index = torch.tensor([[1], [2]], device=device, dtype=torch.long)
input.scatter_(0, index, src)
self.assertEqual(input, torch.tensor([[0, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]], device=device, dtype=torch.float32))
# FIXME: port to test_scatter_gather_ops.py
def test_scatter_add_to_large_input(self, device):
input = torch.zeros(4, 4, device=device)
src = torch.ones(2, 2, device=device)
index = torch.tensor([[1], [2]], device=device, dtype=torch.long)
input.scatter_add_(0, index, src)
self.assertEqual(input, torch.tensor([[0, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]], device=device, dtype=torch.float32))
# FIXME: port to test_scatter_gather_ops.py
def test_scatter_bool(self, device):
x = torch.tensor([[True, True, True], [True, True, True]], device=device)
res = torch.zeros(3, 3, dtype=torch.bool, device=device)
res = res.scatter_(0, torch.tensor([[0, 1, 2], [0, 1, 2]], device=device), x)
self.assertEqual(res, torch.tensor([[True, False, False],
[False, True, False],
[False, False, True]], device=device))
# FIXME: port to test_scatter_gather_ops.py
def test_scatter_add_bool(self, device):
x = torch.tensor([[True, True, True, True, True], [True, True, True, True, True]], device=device)
res = torch.zeros(3, 5, dtype=torch.bool, device=device)
res = res.scatter_add_(0, torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]], device=device), x)
self.assertEqual(res, torch.tensor([[True, True, True, True, True],
[False, True, False, True, False],
[True, False, True, False, True]], device=device))
# FIXME: find a test suite for the masked scatter operator
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_masked_scatter(self, device, dtype):
dt = dtype
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
for maskType in [torch.uint8, torch.bool]:
num_copy, num_dest = 3, 10
dest = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dt, device=device)
dest2 = dest.clone()
dest_ones = dest.clone()
dest_ones_expected = dest.clone()
src = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=dt, device=device)
src_ones = torch.tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=dt, device=device)
mask = torch.tensor((0, 0, 0, 0, 1, 0, 1, 0, 1, 0), dtype=maskType, device=device)
if dt == torch.bool:
# torch.bool is a special case and is being tested
# in a separate test
return
dest.masked_scatter_(mask, src)
j = 0
for i in range(num_dest):
if mask[i]:
dest2[i] = src[j]
dest_ones_expected[i] = src_ones[j]
j += 1
self.assertEqual(dest, dest2, atol=0, rtol=0)
dest_ones.masked_scatter_(mask, src_ones)
self.assertEqual(dest_ones, dest_ones_expected, atol=0, rtol=0)
# Bound checking in CUDA is done inside a kernel
# in order to avoid synchronization, but this means
# we can not clear the failures. So there is no way
# to test it then recover.
if self.device_type != 'cuda':
# make src smaller. this should fail
src = torch.zeros(num_copy - 1, dtype=dt, device=device)
with self.assertRaises(RuntimeError):
dest.masked_scatter_(mask, src)
# empty tensor
dest = torch.empty((5, 0, 5), dtype=dt, device=device)
mask = torch.ones_like(dest, dtype=maskType, device=device)
src = torch.empty((0,), dtype=dt, device=device)
dest.masked_scatter_(mask, src)
dest = torch.empty((5, 0, 5), dtype=dt, device=device)
mask = torch.ones((5, 1, 5), dtype=maskType, device=device)
src = torch.empty((0,), dtype=dt, device=device)
dest.masked_scatter_(mask, src)
if self.device_type != 'cuda':
self.assertEqual(len(w), 5)
else:
self.assertEqual(len(w), 4)
warn = 'masked_scatter_ received a mask with dtype torch.uint8,'
for wi in w:
self.assertEqual(str(wi.message)[0:55], str(warn))
# FIXME: find a test suite for the masked scatter operator
@skipIfMps
def test_masked_scatter_bool_tensor(self, device):
src = torch.tensor([True, True, True], device=device)
dst = torch.tensor([False, False, False], device=device)
mask = torch.tensor([False, True, False], device=device)
dst.masked_scatter_(mask, src)
self.assertEqual(dst, torch.tensor([False, True, False], device=device))
mask = torch.tensor([True, False, True], device=device)
dst = dst.masked_scatter(mask, src)
self.assertEqual(dst, torch.tensor([True, True, True], device=device))
# FIXME: find a test suite for the masked scatter operator
# test_scatter_gather_ops or test_masked_ops?
@onlyCUDA
@largeTensorTest('30GB')
def test_masked_scatter_large_tensor(self, device):
t_cpu = torch.empty(2**31 + 1, dtype=torch.bool).random_()
t = t_cpu.to(device)
result_cpu = t_cpu.masked_scatter(t_cpu, t_cpu)
result = t.masked_scatter(t, t)
self.assertEqual(result, result_cpu)
# FIXME: find a test suite for the masked select operator
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_masked_select(self, device, dtype):
if device == 'cpu':
warn = 'masked_select received a mask with dtype torch.uint8,'
else:
warn = 'indexing with dtype torch.uint8 is now deprecated, pl'
for maskType in [torch.uint8, torch.bool]:
num_src = 10
src = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=dtype, device=device)
mask = torch.randint(2, (num_src,), device=device, dtype=maskType)
with warnings.catch_warnings(record=True) as w:
dst = src.masked_select(mask)
if maskType is torch.uint8:
self.assertEqual(len(w), 1)
self.assertEqual(str(w[0].message)[0:53], str(warn))
dst2 = []
for i in range(num_src):
if mask[i]:
dst2 += [src[i]]
self.assertEqual(dst, torch.tensor(dst2), atol=0, rtol=0)
dst3 = torch.empty(0, device=device, dtype=dtype)
torch.masked_select(src, mask, out=dst3)
self.assertEqual(dst3, torch.tensor(dst2, dtype=dst3.dtype), atol=0, rtol=0)
# Since half on CPU is not supported, need to skip the remaining test cases
if dtype == torch.half and torch.device(device).type == 'cpu':
return
# Ensure that masks are expanded to match tensor properly
a = torch.rand(100, 100, device=device).mul(100).to(dtype)
mask_first_el_each_row = torch.zeros(100, device=device, dtype=torch.bool)
mask_first_el_each_row[0] = True
a_masked = a.masked_select(mask_first_el_each_row)
self.assertEqual(a_masked, a[:, 0])
mask_first_row = torch.zeros(100, 1, device=device, dtype=torch.bool)
mask_first_row[0][0] = True
a_masked = a.masked_select(mask_first_row)
self.assertEqual(a_masked, a[0, :])
# Ensure that tensor is expanded to match mask properly
a = torch.rand(100, device=device).mul(100).to(dtype)
mask_copy_3_times = torch.tensor([[True], [True], [False], [True]], device=device)
a_masked = a.masked_select(mask_copy_3_times)
self.assertEqual(a_masked, a.unsqueeze(0).expand(3, 100).flatten())
# FIXME: find a test suite for the masked select operator
def test_masked_select_discontiguous(self, device):
for size in (10, 200):
vals = torch.rand(size, size, device=device)
mask = torch.full((size, size), False, dtype=torch.bool, device=device)
mask[:, ::2] = True
vals_list = (vals, vals.t())
mask_list = (mask, mask.t())
out_dc = torch.empty(size * size, device=device)[::2]
for v, m in product(vals_list, mask_list):
if m.is_contiguous():
expected = v[:, ::2].clone().reshape((-1, ))
else:
expected = v[::2].clone().reshape((-1, ))
out = torch.masked_select(v, m)
self.assertEqual(out, expected, atol=0, rtol=0)
torch.masked_select(v, m, out=out_dc)
self.assertEqual(out_dc, expected, atol=0, rtol=0)
# FIXME: find a test suite for the masked fill operator
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16), (torch.uint8, torch.bool)))
def test_masked_fill(self, device, dtypes):
dtype = dtypes[0]
mask_dtype = dtypes[1]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
num_dest = 10
dst = torch.zeros(num_dest, dtype=dtype)
mask = torch.randint(2, (num_dest,), dtype=mask_dtype)
val = random.random()
dst2 = dst.clone()
dst.masked_fill_(mask, val)
for i in range(num_dest):
if mask[i]:
dst2[i] = val
self.assertEqual(dst, dst2, atol=0, rtol=0)
# test non-contiguous case
dst = ((torch.randn(num_dest, num_dest, num_dest) * 10).to(dtype)).permute((2, 0, 1))
dst2 = dst.contiguous()
if dtype.is_complex:
mask = dst.abs() > 0
else:
mask = dst > 0
self.assertTrue(not dst.is_contiguous())
self.assertTrue(dst2.is_contiguous())
dst.masked_fill_(mask.to(mask_dtype), val)
dst2.masked_fill_(mask.to(mask_dtype), val)
self.assertEqual(dst, dst2, atol=0, rtol=0)
if mask_dtype == torch.uint8:
self.assertEqual(len(w), 3)
warn = 'masked_fill_ received a mask with dtype torch.uint8,'
for wi in w:
self.assertEqual(str(wi.message)[0:52], str(warn))
else:
self.assertEqual(len(w), 0)
# FIXME: find a test suite for the masked fill operator
def test_masked_fill_bool_tensor(self, device):
dst = torch.tensor([True, False, True], device=device)
mask = torch.tensor([False, True, False], device=device)
dst.masked_fill_(mask, True)
self.assertEqual(dst, torch.tensor([True, True, True], device=device))
dst = dst.masked_fill(mask, False)
self.assertEqual(dst, torch.tensor([True, False, True], device=device))
def test_tensor_shape_empty(self, device):
x = torch.randn((0, 1, 3, 0), device=device)
# flatten
self.assertEqual((0,), torch.flatten(x, 0, 3).shape)
self.assertEqual((0, 0), torch.flatten(x, 0, 2).shape)
self.assertEqual((0, 3, 0), torch.flatten(x, 1, 2).shape)
# squeeze, unsqueeze
self.assertEqual((0, 1, 1, 3, 0), torch.unsqueeze(x, 1).shape)
self.assertEqual((0, 3, 0), torch.squeeze(x, 1).shape)
self.assertEqual((0, 3, 0), torch.squeeze(x).shape)
# transpose, t
self.assertEqual((0, 0, 3, 1), torch.transpose(x, 1, 3).shape)
y = torch.randn((5, 0), device=device)
self.assertEqual((0, 5), y.t().shape)
# select
self.assertEqual((0, 1, 0), torch.select(x, 2, 2).shape)
# repeat, permute
self.assertEqual((9, 0, 5, 6, 0), x.repeat(9, 7, 5, 2, 3).shape)
self.assertEqual((3, 0, 0, 1), x.permute(2, 3, 0, 1).shape)
# diagonal, diagflat
self.assertEqual((0,), torch.diagonal(torch.randn((5, 0), device=device)).shape)
self.assertEqual((0,), torch.diagonal(torch.randn((0, 5), device=device)).shape)
# off the end offsets are valid
self.assertEqual((0,), torch.diagonal(torch.randn((5, 0), device=device), offset=1).shape)
self.assertEqual((0,), torch.diagonal(torch.randn((0, 5), device=device), offset=1).shape)
# check non-zero sized offsets off the end
self.assertEqual((5, 6, 0), torch.diagonal(torch.randn((3, 4, 5, 6), device=device), offset=45252).shape)
self.assertEqual((5, 6, 0), torch.diagonal(torch.randn((3, 4, 5, 6), device=device), offset=-45252).shape)
self.assertEqual((0, 0), torch.diagflat(torch.tensor([], device=device)).shape)
self.assertEqual(torch.zeros(1, 1), torch.diagflat(torch.tensor([], device=device), offset=1))
self.assertEqual((0, 0), torch.diagflat(torch.tensor([[]], device=device)).shape)
self.assertEqual(torch.zeros(1, 1), torch.diagflat(torch.tensor([[]], device=device), offset=1))
# stack, split, chunk
self.assertEqual((4, 0, 1, 3, 0), torch.stack((x, x, x, x)).shape)
self.assertEqual([(0, 1, 3, 0)],
[z.shape for z in torch.chunk(x, 1, dim=0)])
self.assertEqual([(0, 1, 3, 0), ] * 3, [z.shape for z in torch.chunk(x, 3, dim=0)])
self.assertEqual([(0, 1, 1, 0), ] * 3, [z.shape for z in torch.chunk(x, 3, dim=2)])
# NOTE: split_with_sizes behaves differently than NumPy in that it
# takes sizes rather than offsets
self.assertEqual([(0, 1, 0, 0), (0, 1, 1, 0), (0, 1, 2, 0)],
[z.shape for z in torch.split(x, (0, 1, 2), dim=2)])
self.assertRaises(RuntimeError, lambda: torch.split(x, 0, dim=1))
# This is strange because the split size is larger than the dim size, but consistent with
# how split handles that case generally (when no 0s are involved).
self.assertEqual([(0, 1, 3, 0)], [z.shape for z in torch.split(x, 1, dim=0)])
self.assertEqual([(0, 1, 3, 0)], [z.shape for z in torch.split(x, 0, dim=0)])
# functions that operate over a dimension but don't reduce.
def test_dim_function_empty(self, device):
shape = (0, 1, 2, 0)
x = torch.randn(shape, device=device)
# size stride
self.assertEqual(0, x.size(3))
self.assertEqual(2, x.size(2))
self.assertEqual(2, x.stride(0))
self.assertEqual(1, x.stride(2))
self.assertEqual(x, torch.nn.functional.glu(x, 0))
self.assertEqual((0, 1, 1, 0), torch.nn.functional.glu(x, 2).shape)
# softmax, logsoftmax
self.assertEqual(x, torch.nn.functional.softmax(x, 0))
self.assertEqual(x, torch.nn.functional.softmax(x, 2))
self.assertEqual(x, torch.nn.functional.softmax(x, 3))
self.assertEqual(x, torch.nn.functional.log_softmax(x, 0))
self.assertEqual(x, torch.nn.functional.log_softmax(x, 2))
self.assertEqual(x, torch.nn.functional.log_softmax(x, 3))
# cumsum, cumprod, cummax, cummin
self.assertEqual(shape, torch.cumsum(x, 0).shape)
self.assertEqual(shape, torch.cumsum(x, 2).shape)
self.assertEqual(shape, torch.cumprod(x, 0).shape)
self.assertEqual(shape, torch.cumprod(x, 2).shape)
self.assertEqual(shape, torch.cummax(x, 0)[0].shape)
self.assertEqual(shape, torch.cummax(x, 2)[0].shape)
self.assertEqual(shape, torch.cummin(x, 0)[0].shape)
self.assertEqual(shape, torch.cummin(x, 2)[0].shape)
self.assertEqual(shape, torch.logcumsumexp(x, 0).shape)
self.assertEqual(shape, torch.logcumsumexp(x, 2).shape)
# flip
self.assertEqual(x, x.flip(0))
self.assertEqual(x, x.flip(2))
# roll
self.assertEqual(x, x.roll(0, 1).roll(0, -1))
self.assertEqual(x, x.roll(1, x.size(1)))
self.assertEqual(x, x.roll(1))
self.assertEqual(x, x.roll((1, 1), (3, 1)))
# unbind
self.assertEqual((), x.unbind(0))
self.assertEqual((torch.empty((0, 1, 0), device=device), torch.empty((0, 1, 0), device=device)),
x.unbind(2))
# cross
y = torch.randn((0, 1, 3, 0), device=device)
self.assertEqual(y.shape, torch.cross(y, y).shape)
# renorm
self.assertEqual(shape, torch.renorm(x, 1, 0, 5).shape)
self.assertEqual(shape, torch.renorm(x, 1, 2, 5).shape)
# sort
self.assertEqual([shape, shape], [z.shape for z in torch.sort(x, dim=0)])
self.assertEqual([shape, shape], [z.shape for z in torch.sort(x, dim=2)])
# topk
self.assertEqual([shape, shape], [z.shape for z in torch.topk(x, 0, dim=0)])
self.assertEqual([(0, 1, 1, 0), (0, 1, 1, 0)], [z.shape for z in torch.topk(x, 1, dim=2)])
y = torch.randn((2, 3, 4), device=device)
self.assertEqual([(2, 3, 0), (2, 3, 0)], [z.shape for z in torch.topk(y, 0)])
# gather
self.assertEqual(shape, torch.gather(x, 0, torch.empty(shape, dtype=torch.int64, device=device)).shape)
self.assertEqual(shape, torch.gather(x, 2, torch.empty(shape, dtype=torch.int64, device=device)).shape)
larger_shape = torch.empty((0, 1, 3, 0), dtype=torch.int64, device=device)
self.assertEqual(larger_shape.shape, torch.gather(x, 2, larger_shape).shape)
smaller_shape = torch.empty((0, 1, 0, 0), dtype=torch.int64, device=device)
self.assertEqual(smaller_shape.shape, torch.gather(x, 2, smaller_shape).shape)
y = torch.randn((2, 3, 4), device=device)
self.assertEqual((0, 3, 4),
torch.gather(y, 0, torch.empty((0, 3, 4), dtype=torch.int64, device=device)).shape)
# scatter, scatter_add
for dim in [0, 2]:
y = torch.randn(shape, device=device)
y_src = torch.randn(shape, device=device)
ind = torch.empty(shape, dtype=torch.int64, device=device)
self.assertEqual(shape, y.scatter_(dim, ind, y_src).shape)
self.assertEqual(shape, y.scatter_add_(dim, ind, y_src).shape)
z = torch.randn((2, 3, 4), device=device)
z_src = torch.randn((2, 3, 4), device=device)
self.assertEqual(z, z.scatter_(2, torch.empty((2, 3, 0), dtype=torch.int64, device=device), z_src))
self.assertEqual(z, z.scatter_add_(2, torch.empty((2, 3, 0), dtype=torch.int64, device=device), z_src))
# index_fill, index_copy, index_add
c = x.clone()
c_clone = c.clone()
ind_empty = torch.tensor([], dtype=torch.int64, device=device)
ind_01 = torch.tensor([0, 1], dtype=torch.int64, device=device)
self.assertEqual(c_clone, c.index_fill_(0, ind_empty, -1))
self.assertEqual(c_clone, c.index_fill_(2, ind_empty, -1))
self.assertEqual(c_clone, c.index_fill_(2, ind_01, -1))
self.assertEqual(c_clone, c.index_copy_(0, ind_empty, torch.empty((0, 1, 2, 0), device=device)))
self.assertEqual(c_clone, c.index_copy_(2, ind_empty, torch.empty((0, 1, 0, 0), device=device)))
self.assertEqual(c_clone, c.index_copy_(2, ind_01, torch.empty((0, 1, 2, 0), device=device)))
self.assertEqual(c_clone, c.index_add_(0, ind_empty, torch.empty((0, 1, 2, 0), device=device)))
self.assertEqual(c_clone, c.index_add_(2, ind_empty, torch.empty((0, 1, 0, 0), device=device)))
self.assertEqual(c_clone, c.index_add_(2, ind_01, torch.empty((0, 1, 2, 0), device=device)))
c = torch.randn((0, 1, 2), device=device)
c_clone = c.clone()
self.assertEqual(c_clone, c.index_fill_(0, ind_empty, -1))
self.assertEqual(c_clone, c.index_copy_(0, ind_empty, torch.empty((0, 1, 2), device=device)))
self.assertEqual(c_clone, c.index_add_(0, ind_empty, torch.empty((0, 1, 2), device=device)))
self.assertEqual(c_clone, c.index_fill_(0, ind_empty, -1))
self.assertEqual(c_clone, c.index_copy_(0, ind_empty, torch.empty((0, 1, 2), device=device)))
self.assertEqual(c_clone, c.index_add_(0, ind_empty, torch.empty((0, 1, 2), device=device)))
# index fill/copy/add non-empty
z = torch.randn((2, 3, 4), device=device)
self.assertEqual(z, z.index_fill_(0, ind_empty, -1))
z = torch.randn((2, 3, 4), device=device)
self.assertEqual(z, z.index_copy_(0, ind_empty, torch.empty((0, 3, 4), device=device)))
z = torch.randn((2, 3, 4), device=device)
self.assertEqual(z, z.index_add_(0, ind_empty, torch.empty((0, 3, 4), device=device)))
# index_select
self.assertEqual(x, x.index_select(0, ind_empty))
self.assertEqual((0, 1, 0, 0), x.index_select(2, ind_empty).shape)
self.assertEqual(x, x.index_select(2, ind_01))
z = torch.randn((2, 3, 4), device=device) # non-empty
self.assertEqual((0, 3, 4), z.index_select(0, ind_empty).shape)
c = torch.randn((0, 1, 2), device=device)
self.assertEqual(c, c.index_select(0, ind_empty))
c = torch.randn((0, 1, 2), device=device)
self.assertEqual(c, c.index_select(0, ind_empty))
w = torch.randn((0, 3), device=device)
self.assertEqual((0, 2), w.index_select(1, ind_01).shape)
w = torch.randn((3, 0), device=device)
self.assertEqual((2, 0), w.index_select(0, ind_01).shape)
ind_01_int32 = torch.tensor([0, 1], dtype=torch.int32, device=device)
self.assertEqual((2, 0), w.index_select(0, ind_01_int32).shape)
if device == 'cpu':
w = torch.randn((0, 3), device=device)
with self.assertRaisesRegex(RuntimeError, "self indexing axis dim should be positive"):
torch.index_select(w, 0, ind_01)
ind_05 = torch.tensor([0, 5], dtype=torch.int64, device=device)
with self.assertRaisesRegex(RuntimeError, "INDICES element is out of DATA bounds"):
torch.index_select(w, 1, ind_05)
# FIXME: find a test suite for the pdist operator
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "sandcastle OOM with current tpx gpu/re configuration")
@skipIfRocm
@onlyCUDA
@largeTensorTest('10GB', device='cpu')
@largeTensorTest('5GB', device='cuda')
def test_pdist_norm_large(self, device):
# use dim0>=46342 for forward, see:
# https://github.com/pytorch/pytorch/issues/30583
# Compare output using GPU with the CPU implementation
x = torch.randn(50000, 1, dtype=torch.float32) # 50k * 4 bytes = 200 KB
# Will require 1249975000 float32s
expected_cpu = torch.pdist(x, p=2) # ~1250M * 4 bytes = 5 GB on CPU
actual_gpu = torch.pdist(x.to(device), p=2) # 5 GB on GPU
self.assertEqual(expected_cpu, actual_gpu.cpu()) # Another 5 GB on CPU
# FIXME: move to elementwise ternary test suite
@onlyNativeDeviceTypes
@dtypesIfCUDA(*set(get_all_math_dtypes('cuda')))
@dtypes(*set(get_all_math_dtypes('cpu')))
def test_addcdiv(self, device, dtype):
# Returns floating or integral scalar corresponding to dtype
def _number(floating, integer, dtype):
if dtype in [torch.half, torch.float, torch.double, torch.bfloat16]:
return floating
elif dtype in [torch.cfloat, torch.cdouble]:
return floating * (1 + 1j)
else:
return integer
def non_zero_rand(size, dtype, device):
if dtype.is_floating_point or dtype.is_complex:
a = torch.rand(size=size, dtype=dtype, device=device)
elif dtype == torch.uint8:
a = torch.randint(1, 5, size=size, dtype=dtype, device=device)
else:
a = torch.randint(-5, 5, size=size, dtype=dtype, device=device)
return a + (a == 0).to(dtype)
def _test_addcdiv():
a = non_zero_rand((2, 2), dtype=dtype, device=device)
b = non_zero_rand((2, 2), dtype=dtype, device=device)
c = non_zero_rand((2, 2), dtype=dtype, device=device)
alpha = _number(0.5, 3, dtype)
expected = a + (alpha * b) / c
actual = torch.addcdiv(a, b, c, value=alpha)
self.assertEqual(expected, actual)
with self.assertWarnsOnceRegex(
UserWarning, "This overload of addcdiv is deprecated"):
self.assertEqual(actual, torch.addcdiv(a, alpha, b, c))
if not (dtype.is_floating_point or dtype.is_complex):
# Integer division with addcdiv is prohibited
with self.assertRaises(RuntimeError):
_test_addcdiv()
else:
_test_addcdiv()
if self.device_type == 'cuda' and dtype == torch.half:
a = torch.tensor([60000.0], device=device, dtype=dtype)
b = torch.tensor([60000.0], device=device, dtype=dtype)
c = torch.tensor([1.0], device=device, dtype=dtype)
out = torch.addcmul(a, b, c, value=-2)
self.assertTrue(not (out.isnan() or out.isinf()))
def test_nullary_op_mem_overlap(self, device):
ops = (
("random_", ()),
("uniform_", ()),
("cauchy_", ()),
("log_normal_", ()),
("exponential_", ()),
("geometric_", (0.5,)),
("normal_", ()),
)
x = torch.rand((1, 3)).expand((3, 3))
for op, args in ops:
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
getattr(x, op)(*args)
# FIXME: move to an elementwise ternary test suite and make this an OpInfo test
@dtypes(torch.double)
def test_ternary_op_mem_overlap(self, device, dtype):
ops = [
("addcmul", True, True, 'cpu'),
("addcmul", True, True, 'cuda'),
("addcdiv", True, True, 'cpu'),
("addcdiv", True, True, 'cuda'),
("lerp", True, True, 'cpu'),
("lerp", True, True, 'cuda')
]
for (fn, has_input_output_mem_overlap_check,
has_internal_mem_overlap_check, dev) in ops:
if dev != device:
continue
out_op = getattr(torch, fn)
inplace_op = getattr(torch.Tensor, fn + '_')
self.check_internal_mem_overlap(
inplace_op, 3, dtype, device,
expected_failure=not has_internal_mem_overlap_check)
self.ternary_check_input_output_mem_overlap(out_op, dev,
expected_failure=not has_input_output_mem_overlap_check)
@expectedFailureMeta # RuntimeError not raised
@dtypes(torch.double)
@onlyNativeDeviceTypes
def test_copy_mem_overlap(self, device, dtype):
self.check_internal_mem_overlap(
torch.Tensor.copy_, num_inputs=2, dtype=dtype, device=device)
sz = 9
doubles = torch.randn(2 * sz, dtype=dtype, device=device)
self.unary_check_input_output_mem_overlap(
doubles, sz, lambda input, out: out.copy_(input))
# FIXME: convert to ErrorInputs
@onlyNativeDeviceTypes
def test_index_add_mem_overlap(self, device):
x = torch.rand((1,), device=device).expand((6,))
y = torch.rand((6,), device=device)
ind = torch.tensor([2, 1, 0], device=device)
value = torch.rand((3,), device=device)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
x.index_add_(0, ind, value)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
y.index_add_(0, ind, y[:3])
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
ind.index_add_(0, ind, ind.clone())
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
ind.index_add_(0, ind.clone(), ind)
# FIXME: convert to ErrorInputs
@onlyNativeDeviceTypes
def test_index_copy_mem_overlap(self, device):
x = torch.rand((1,), device=device).expand((6,))
y = torch.rand((6,), device=device)
ind = torch.tensor([2, 1, 0], device=device)
value = torch.rand((3,), device=device)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
x.index_copy_(0, ind, value)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
y.index_copy_(0, ind, y[:3])
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
ind.index_copy_(0, ind, ind.clone())
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
ind.index_copy_(0, ind.clone(), ind)
# FIXME: convert to ErrorInputs
@expectedFailureMeta # Warning not triggered
@onlyNativeDeviceTypes
def test_index_fill_mem_overlap(self, device):
x = torch.rand((1,), device=device).expand((6,))
y = torch.rand((6,), device=device)
ind = torch.tensor([2, 1, 0], device=device)
value = torch.rand((3,), device=device)
with self.assertWarnsRegex(UserWarning, "index_fill_ on expanded tensors"):
x.index_fill_(0, ind, 1.0)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
ind.index_fill_(0, ind, 0)
# FIXME: convert to ErrorInputs
@expectedFailureMeta # RuntimeError not raised
@onlyNativeDeviceTypes
def test_shift_mem_overlap(self, device):
x = torch.rand(3, device=device)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
x[:-1] <<= x[1:]
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
x[:-1] >>= x[1:]
# FIXME: convert to ErrorInputs
@expectedFailureMeta # RuntimeError not raised
@onlyNativeDeviceTypes
def test_bernoulli_mem_overlap(self, device):
x = torch.rand((1,), device=device).expand((6,))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
x.bernoulli_()
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
x.bernoulli_(p=0.1)
p = torch.rand(6, device=device)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
x.bernoulli_(p=p)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
torch.bernoulli(torch.rand_like(x), out=x)
# FIXME: convert to ErrorInputs
@expectedFailureMeta # RuntimeError not raised
@onlyNativeDeviceTypes
def test_put_mem_overlap(self, device):
x = torch.rand((1,), device=device).expand((6,))
y = torch.rand((6,), device=device)
ind = torch.tensor([2, 1, 0], device=device)
value = torch.rand((3,), device=device)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
x.put_(ind, value)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
y.put_(ind[0], y[0])
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
ind.put_(ind, ind)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
y.put_(ind, y[:3])
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
ind.put_(ind, ind.clone())
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
ind.put_(ind.clone(), ind)
# FIXME: convert to ErrorInputs
@expectedFailureMeta # UserWarning not triggered
@onlyNativeDeviceTypes
def test_index_put_mem_overlap(self, device):
x = torch.rand((1,), device=device).expand((6,))
y = torch.rand((6,), device=device)
ind = torch.tensor([2, 1, 0], device=device)
value = torch.rand((3,), device=device)
with self.assertWarnsRegex(UserWarning, 'expanded tensors'):
x.index_put_((ind,), value)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
y.index_put_((ind,), y[0])
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
ind.index_put_((ind,), ind)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
y.index_put_((ind,), y[:3])
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
ind.index_put_((ind,), ind.clone())
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
ind.index_put_((ind.clone(),), ind)
# FIXME: convert to ErrorInputs
@expectedFailureMeta # UserWarning not triggered
@onlyNativeDeviceTypes
def test_masked_fill_mem_overlap(self, device):
x = torch.rand((1,), device=device).expand((6,))
mask = torch.tensor([True, False, True, True, False, False], device=device)
with self.assertWarnsRegex(UserWarning, 'expanded tensors'):
x.masked_fill_(mask, 0.)
fill_val = torch.tensor(0., device=device)
with self.assertWarnsRegex(UserWarning, 'expanded tensors'):
x.masked_fill_(mask, fill_val)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
mask[1:].masked_fill_(mask[:-1], False)
# FIXME: convert to ErrorInputs
@expectedFailureMeta # RuntimeError not raised
@onlyNativeDeviceTypes
def test_masked_scatter_mem_overlap(self, device):
x = torch.rand((1,), device=device).expand((6,))
src = torch.rand((3,), device=device)
mask = torch.tensor([True, False, True, True, False, False], device=device)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
x.masked_scatter_(mask, src)
# FIXME: convert to ErrorInputs
@onlyNativeDeviceTypes
def test_scatter_mem_overlap(self, device):
x = torch.rand((1,), device=device).expand((6,))
src = torch.rand((3,), device=device)
ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
x.scatter_(0, ind, src)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
src.scatter_(0, ind, src)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
ind.scatter_(0, ind, ind.clone())
# FIXME: move to test distributions
@onlyCUDA
def test_multinomial_device_constrain(self, device):
x = torch.empty(0, device="cpu")
y = torch.empty(0, device=device)
self.assertRaisesRegex(
RuntimeError, "Expected all tensors to be on the same device",
lambda: torch.multinomial(x, 2, out=y))
# FIXME: move to test distributions
@deviceCountAtLeast(2)
@onlyCUDA
def test_multinomial_gpu_device_constrain(self, devices):
x = torch.empty(0, device=devices[0])
y = torch.empty(0, device=devices[1])
self.assertRaisesRegex(
RuntimeError, "Expected all tensors to be on the same device",
lambda: torch.multinomial(x, 2, out=y))
# FIXME: convert this to an automated OpInfo test
@deviceCountAtLeast(2)
@onlyCUDA
def test_device_guard(self, devices):
# verify that all operators with `device_guard: False` behave properly with multiple devices.
# TODO: if we had operator introspection we could figure out this set of operators automatically...
x = torch.randn((1, 2, 3), device=devices[1])
y = torch.zeros((1, 3, 2), device=devices[1])
scalar = torch.tensor(5, device=devices[1])
# property ops
torch.cudnn_is_acceptable(x)
x.is_distributed()
x.is_floating_point()
x.is_complex()
x.is_same_size(y)
x.is_signed()
x.size(0)
x.stride(0)
x.numel()
x.is_set_to(y)
x.data_ptr()
scalar.is_nonzero()
# sparse property ops
y[0][1] = 5
y_sparse = y.to_sparse()
y_sparse.sparse_dim()
y_sparse._dimI()
y_sparse.dense_dim()
y_sparse._dimV()
y_sparse._nnz()
y_sparse.is_coalesced()
y_sparse._indices()
y_sparse._values()
y_sparse.indices()
y_sparse.values()
# in-place ops
def inplace():
return torch.randn((1, 2, 3), device=devices[1])
inplace().as_strided_(y.size(), y.stride())
inplace().resize_(y.size())
inplace().squeeze_()
inplace().squeeze_(0)
inplace().unsqueeze_(2)
inplace().transpose_(1, 2)
inplace().squeeze_().t_()
inplace().set_(x.storage())
inplace().set_(x.storage(), x.storage_offset(), x.size(), x.stride())
inplace().set_(x)
inplace().set_()
y_sparse._coalesced_(True)
# shape modification
x.as_strided(y.size(), y.stride())
x.expand((5, 2, 3))
x.expand_as(x)
x.sum_to_size((1,))
torch.broadcast_tensors(x , x)
x.reshape((1, 3, 2))
x.reshape_as(y)
x.squeeze()
x.squeeze(0)
x.squeeze().t()
x.transpose(1, 2)
x.unsqueeze(2)
x.view((1, 3, 2))
x.view_as(y)
# chunk, split, etc.
x.chunk(2, dim=1)
x.split(1, dim=2)
x.split_with_sizes([1, 2], dim=2)
x.unfold(dimension=2, size=1, step=1)
x.narrow(1, 1, 1)
x.select(1, 1)
torch.isnan(x)
torch.empty((1, 3, 2), out=y)
torch.empty_like(x)
torch.empty_like(x, dtype=torch.int64)
# to
x.to(x)
x.to(y)
x.to(x, copy=True)
def test_is_signed(self, device):
self.assertEqual(torch.IntTensor(5).to(device).is_signed(), True)
self.assertEqual(torch.ByteTensor(5).to(device).is_signed(), False)
self.assertEqual(torch.CharTensor(5).to(device).is_signed(), True)
self.assertEqual(torch.FloatTensor(5).to(device).is_signed(), True)
self.assertEqual(torch.HalfTensor(10).to(device).is_signed(), True)
# Note - reports a leak of 512 bytes on CUDA device 1
@deviceCountAtLeast(2)
@skipCUDAMemoryLeakCheckIf(True)
@onlyCUDA
def test_tensor_set_errors_multigpu(self, devices):
f_cuda0 = torch.randn((2, 3), dtype=torch.float32, device=devices[0])
f_cuda1 = torch.randn((2, 3), dtype=torch.float32, device=devices[1])
self.assertRaises(RuntimeError, lambda: f_cuda0.set_(f_cuda1.storage()))
self.assertRaises(RuntimeError,
lambda: f_cuda0.set_(f_cuda1.storage(), 0, f_cuda1.size(), f_cuda1.stride()))
self.assertRaises(RuntimeError, lambda: f_cuda0.set_(f_cuda1))
# FIXME: move to test_serialization
@onlyCUDA
@deviceCountAtLeast(1) # Note: Tests works with one but prefers more devices
def test_serialization(self, devices):
def _test_serialization(filecontext_lambda):
t0 = torch.cuda.FloatTensor(5).fill_(1)
with torch.cuda.device(devices[-1]):
tn = torch.cuda.FloatTensor(3).fill_(2)
torch.cuda.set_device(devices[0])
b = (t0, tn)
with filecontext_lambda() as f:
torch.save(b, f)
f.seek(0)
c = torch.load(f)
self.assertEqual(b, c, atol=0, rtol=0)
u0, un = c
self.assertEqual(str(u0.device), devices[0])
self.assertEqual(str(un.device), devices[-1])
_test_serialization(tempfile.NamedTemporaryFile)
_test_serialization(BytesIOContext)
# FIXME: move memory format tests to their own test class/suite
def test_memory_format_preserved_after_permute(self, device):
x = torch.randn(4, 3, 8, 8, device=device)
nhwc = x.contiguous(memory_format=torch.channels_last)
y = nhwc.permute(0, 1, 3, 2).permute(0, 1, 3, 2)
self.assertTrue(y.is_contiguous(memory_format=torch.channels_last))
x = torch.randn(4, 3, 8, 8, 8, device=device)
ndhwc = x.contiguous(memory_format=torch.channels_last_3d)
y = ndhwc.permute(0, 1, 4, 3, 2).permute(0, 1, 4, 3, 2)
self.assertTrue(y.is_contiguous(memory_format=torch.channels_last_3d))
def test_memory_format_propagation_rules(self, device):
contiguous = torch.rand(10, 3, 5, 5, device=device)
cl = torch.rand(10, 3, 5, 5, device=device).contiguous(memory_format=torch.channels_last)
ambiguous = torch.rand(10, 3, 1, 1, device=device).contiguous(memory_format=torch.channels_last)
self.assertTrue(ambiguous.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ambiguous.is_contiguous(memory_format=torch.contiguous_format))
bias = torch.rand(1, 1, 1, 1, device=device).contiguous(memory_format=torch.channels_last)
def _test_propagation_rules(self, contiguous, cl, ambiguous, bias):
options = ((ambiguous, contiguous, torch.contiguous_format),
(ambiguous, cl, torch.channels_last),
(contiguous, ambiguous, torch.contiguous_format),
(contiguous, cl, torch.contiguous_format),
(cl, ambiguous, torch.channels_last),
(cl, contiguous, torch.channels_last),
(bias, cl, torch.channels_last),
(cl, bias, torch.channels_last),)
for a, b, mf in options:
result = a + b
self.assertTrue(result.is_contiguous(memory_format=mf))
_test_propagation_rules(self, contiguous, cl, ambiguous, bias)
cl = cl.to(memory_format=torch.channels_last)
ambiguous = ambiguous.to(memory_format=torch.channels_last)
bias = bias.to(memory_format=torch.channels_last)
_test_propagation_rules(self, contiguous, cl, ambiguous, bias)
# test cases when strides matter in ambiguous tensors
for mf in (torch.channels_last, torch.contiguous_format):
ambiguous = torch.rand(10, 3, 1, 1, device=device).to(memory_format=mf)
bias = torch.rand(3, 1, 1, device=device)
result = ambiguous + bias
self.assertEqual(ambiguous.stride(), result.stride())
result = bias + ambiguous
self.assertEqual(ambiguous.stride(), result.stride())
result = ambiguous * 5
self.assertEqual(ambiguous.stride(), result.stride())
@skipIfMps
def test_memory_format_empty_like(self, device):
def test_helper(x, memory_format):
xc = x.contiguous(memory_format=memory_format)
like = torch.empty_like(xc, memory_format=torch.preserve_format)
self.assertFalse(like.is_contiguous())
self.assertTrue(like.is_contiguous(memory_format=memory_format))
like_x = torch.empty_like(x, memory_format=torch.preserve_format)
self.assertTrue(like_x.is_contiguous())
self.assertFalse(like_x.is_contiguous(memory_format=memory_format))
like = torch.empty_like(x, memory_format=memory_format)
self.assertFalse(like.is_contiguous())
self.assertTrue(like.is_contiguous(memory_format=memory_format))
like = torch.empty_like(xc, memory_format=torch.contiguous_format)
self.assertTrue(like.is_contiguous())
self.assertFalse(like.is_contiguous(memory_format=memory_format))
like = torch.empty_like(xc)
self.assertFalse(like.is_contiguous())
self.assertTrue(like.is_contiguous(memory_format=memory_format))
sparse = x.to_sparse()
with self.assertRaises(RuntimeError):
z = torch.empty_like(sparse, memory_format=torch.preserve_format)
test_helper(torch.randn(4, 3, 8, 8, device=device), torch.channels_last)
test_helper(torch.randn(4, 3, 8, 8, 8, device=device), torch.channels_last_3d)
def test_memory_format_consistency(self, device):
x = torch.randn(10, 3, 1, 1, device=device)
x_rep = x.as_strided(x.size(), x.stride())
self.assertEqual(x.size(), x_rep.size())
self.assertEqual(x.stride(), x_rep.stride())
self.assertEqual(x.is_contiguous(), x_rep.is_contiguous())
self.assertEqual(x.is_contiguous(memory_format=torch.channels_last), x_rep.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(
x.is_contiguous(memory_format=torch.channels_last_3d), x_rep.is_contiguous(memory_format=torch.channels_last_3d))
# FIXME: make this a elementwise unary and elementwise binary OpInfo test
def test_memory_format_operators(self, device):
def _chunk_op(x, y):
x1, x2 = x.chunk(2, dim=1)
return x1 + x2
def _unsqueeze_op_add(x, y):
return x[0].unsqueeze(0) + 3
def _unsqueeze_op_clone(x, y):
return x[0].unsqueeze(0).clone()
def _test_helper(x, y, bias, memory_format):
return_contig_fns = [
lambda x, y: y + x,
lambda x, y: y * x,
lambda x, y: y.addcdiv(x, y, value=2),
lambda x, y: y.addcmul(x, y, value=2),
]
bias_fns = [
lambda x, b: x + b,
lambda x, b: b + x,
]
fns = [
lambda x, y: x.clone(),
lambda x, y: x + 3,
lambda x, y: 3 * x,
lambda x, y: x + y,
lambda x, y: x * y,
lambda x, y: abs(x),
lambda x, y: x.abs(),
lambda x, y: x.abs_(),
lambda x, y: x.acos(),
lambda x, y: x.acos_(),
lambda x, y: x.add(y, alpha=3),
lambda x, y: x.add_(y, alpha=3),
lambda x, y: x.addcdiv(y, y, value=2),
lambda x, y: x.addcdiv_(y, y, value=2),
lambda x, y: x.addcmul(y, y, value=2),
lambda x, y: x.addcmul_(y, y, value=2),
lambda x, y: x.acosh(),
lambda x, y: x.acosh_(),
lambda x, y: x.asinh(),
lambda x, y: x.asinh_(),
lambda x, y: x.atanh(),
lambda x, y: x.atanh_(),
lambda x, y: x.asin(),
lambda x, y: x.asin_(),
lambda x, y: x.atan(),
lambda x, y: x.atan2(y),
lambda x, y: x.atan2_(y),
lambda x, y: x.ceil(),
lambda x, y: x.ceil_(),
lambda x, y: x.clamp(-1, 1),
lambda x, y: x.cos(),
lambda x, y: x.cosh(),
lambda x, y: x.div(0.5),
lambda x, y: x.div_(0.5),
lambda x, y: x.div(y),
lambda x, y: x.div_(y),
lambda x, y: x.digamma(),
lambda x, y: x.digamma_(),
lambda x, y: x.erf(),
lambda x, y: x.erfc(),
lambda x, y: x.erfinv(),
lambda x, y: x.erfinv_(),
lambda x, y: x.exp(),
lambda x, y: x.expm1(),
lambda x, y: x.expm1_(),
lambda x, y: x.floor(),
lambda x, y: x.floor_(),
lambda x, y: x.fmod(2),
lambda x, y: x.frac(),
lambda x, y: x.hypot(y),
lambda x, y: x.hypot_(y),
lambda x, y: x.i0(),
lambda x, y: x.i0_(),
lambda x, y: x.lerp(y, 0.5),
lambda x, y: x.log(),
lambda x, y: x.log_(),
lambda x, y: x.log10(),
lambda x, y: x.log10_(),
lambda x, y: x.log1p(),
lambda x, y: x.log1p_(),
lambda x, y: x.log2(),
lambda x, y: x.log2_(),
lambda x, y: x.mul(3),
lambda x, y: x.mul_(3),
lambda x, y: x.neg(),
lambda x, y: x.neg_(),
lambda x, y: x.pow(3),
lambda x, y: x.pow_(3),
lambda x, y: x.pow(0.0),
lambda x, y: x.pow(1.0),
lambda x, y: x.reciprocal(),
lambda x, y: x.remainder(2),
lambda x, y: x.round(),
lambda x, y: x.round_(),
lambda x, y: x.rsqrt(),
lambda x, y: x.rsqrt_(),
lambda x, y: x.sigmoid(),
lambda x, y: x.sigmoid_(),
lambda x, y: x.logit(),
lambda x, y: x.logit_(),
lambda x, y: x.logit(1e-6),
lambda x, y: x.logit_(1e-6),
lambda x, y: x.sign(),
lambda x, y: x.sign_(),
lambda x, y: x.sgn(),
lambda x, y: x.sgn_(),
lambda x, y: x.sin(),
lambda x, y: x.sin_(),
lambda x, y: x.sinh(),
lambda x, y: x.sinh_(),
lambda x, y: x.sqrt(),
lambda x, y: x.sqrt_(),
lambda x, y: x.tan(),
lambda x, y: x.tanh(),
lambda x, y: x.trunc(),
lambda x, y: x.trunc_(),
_chunk_op,
_unsqueeze_op_add,
_unsqueeze_op_clone,
]
x_c = x.contiguous()
y_c = y.contiguous()
b_c = bias.contiguous()
for fn in fns:
is_inplace = '_(' in inspect.getsource(fn)
x_clone = x.clone() if is_inplace else x
x_c_clone = x_c.clone() if is_inplace else x_c
result_c = fn(x_c_clone, y_c)
result = fn(x_clone, y)
self.assertEqual(result, result_c, "Failed for '{}'".format(inspect.getsource(fn).strip()))
self.assertTrue(
result.is_contiguous(memory_format=memory_format),
"result of the '{}' is not in '{}' format".format(inspect.getsource(fn).strip(), memory_format))
for fn in bias_fns:
result_c = fn(x_c, b_c)
result = fn(x, bias)
self.assertEqual(result, result_c, "Failed for '{}'".format(inspect.getsource(fn).strip()))
self.assertTrue(
result.is_contiguous(memory_format=memory_format),
"result of the '{}' is not in '{}' format".format(inspect.getsource(fn).strip(), memory_format))
for fn in return_contig_fns:
result_c = fn(x_c, y_c)
result = fn(x, y)
self.assertEqual(result, result_c, "Failed for '{}'".format(inspect.getsource(fn).strip()))
self.assertTrue(
result.is_contiguous(memory_format=torch.contiguous_format),
"result of the '{}' is not in '{}' format".format(inspect.getsource(fn).strip(), torch.contiguous_format))
_test_helper(
torch.randn((4, 3, 8, 8), device=device).contiguous(memory_format=torch.channels_last),
abs(torch.randn((4, 3, 8, 8), device=device)) + 1,
torch.randn((1, 3, 1, 1), device=device).contiguous(memory_format=torch.channels_last),
torch.channels_last)
_test_helper(
torch.randn((4, 3, 8, 8, 8), device=device).contiguous(memory_format=torch.channels_last_3d),
abs(torch.randn((4, 3, 8, 8, 8), device=device)) + 1,
torch.randn((1, 3, 1, 1, 1), device=device).contiguous(memory_format=torch.channels_last_3d),
torch.channels_last_3d)
# FIXME: make this a elementwise unary and elementwise binary OpInfo test
@skipIfTorchDynamo("Torchdynamo fails with unknown reason")
def test_strides_propagation(self, device):
def _test_helper(x, op, unary=False):
def compare_strides(s1, s2, div):
sdiv = [s // div for s in s1]
self.assertEqual(sdiv, s2)
dim = x.dim()
# we produce memory dense outputs, so when input is strided on the last dimension
# we need to divide by that dimension stride to compare input and result strides
div = x.stride(-1)
for p in permutations(range(dim)):
xp = x.permute(p)
if not unary:
y = torch.randn(xp.size(-1), device=x.device, dtype=x.dtype)
for inputs in ((xp, xp), (xp, y), (y, xp)):
res = op(*inputs)
compare_strides(xp.stride(), res.stride(), div)
self.assertEqual(xp.size(), res.size())
out = torch.empty(0, device=xp.device, dtype=res.dtype)
res = op(*inputs, out=out)
compare_strides(xp.stride(), res.stride(), div)
self.assertEqual(xp.size(), res.size())
else:
res = op(xp)
compare_strides(xp.stride(), res.stride(), div)
self.assertEqual(xp.size(), res.size())
out = torch.empty(0, device=xp.device, dtype=res.dtype)
res = op(xp, out=out)
compare_strides(xp.stride(), res.stride(), div)
self.assertEqual(xp.size(), res.size())
# torch.eq by default calls TensorIterator with defined output, torch.add with undefined
binary_ops = (torch.eq, torch.add)
unary_ops = (torch.exp,)
# memory dense, sliced and ambiguous sliced (ambiguous dense loses permutation information)
xs = (torch.randn(2, 3, 4, device=device), torch.randn(2, 3, 8, device=device)[:, :, ::2],
torch.randn(1, 1, 4, 12, device=device)[:, :, :, ::2])
for op in binary_ops:
for x in xs:
_test_helper(x, op)
for op in unary_ops:
for x in xs:
_test_helper(x, op, unary=True)
# FIXME: move dlpack tests to their own test class/suite
@skipMeta
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_dlpack_capsule_conversion(self, device, dtype):
# DLpack does not explicitly support bool (xref dmlc/dlpack#75)
x = make_tensor((5,), dtype=dtype, device=device)
z = from_dlpack(to_dlpack(x))
self.assertEqual(z, x)
@skipMeta
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_dlpack_protocol_conversion(self, device, dtype):
x = make_tensor((5,), dtype=dtype, device=device)
z = from_dlpack(x)
self.assertEqual(z, x)
@skipMeta
@onlyNativeDeviceTypes
def test_dlpack_shared_storage(self, device):
x = make_tensor((5,), dtype=torch.float64, device=device)
z = from_dlpack(to_dlpack(x))
z[0] = z[0] + 20.0
self.assertEqual(z, x)
@skipMeta
@onlyCUDA
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_dlpack_conversion_with_streams(self, device, dtype):
# Create a stream where the tensor will reside
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
# Do an operation in the actual stream
x = make_tensor((5,), dtype=dtype, device=device) + 1
# DLPack protocol helps establish a correct stream order
# (hence data dependency) at the exchange boundary.
# DLPack manages this synchronization for us, so we don't need to
# explicitly wait until x is populated
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
z = from_dlpack(x)
stream.synchronize()
self.assertEqual(z, x)
@skipMeta
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_from_dlpack(self, device, dtype):
x = make_tensor((5,), dtype=dtype, device=device)
y = torch.from_dlpack(x)
self.assertEqual(x, y)
@skipMeta
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_from_dlpack_noncontinguous(self, device, dtype):
x = make_tensor((25,), dtype=dtype, device=device).reshape(5, 5)
y1 = x[0]
y1_dl = torch.from_dlpack(y1)
self.assertEqual(y1, y1_dl)
y2 = x[:, 0]
y2_dl = torch.from_dlpack(y2)
self.assertEqual(y2, y2_dl)
y3 = x[1, :]
y3_dl = torch.from_dlpack(y3)
self.assertEqual(y3, y3_dl)
y4 = x[1]
y4_dl = torch.from_dlpack(y4)
self.assertEqual(y4, y4_dl)
y5 = x.t()
y5_dl = torch.from_dlpack(y5)
self.assertEqual(y5, y5_dl)
@skipMeta
@onlyCUDA
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_dlpack_conversion_with_diff_streams(self, device, dtype):
stream_a = torch.cuda.Stream()
stream_b = torch.cuda.Stream()
# DLPack protocol helps establish a correct stream order
# (hence data dependency) at the exchange boundary.
# the `tensor.__dlpack__` method will insert a synchronization event
# in the current stream to make sure that it was correctly populated.
with torch.cuda.stream(stream_a):
x = make_tensor((5,), dtype=dtype, device=device) + 1
z = torch.from_dlpack(x.__dlpack__(stream_b.cuda_stream))
stream_a.synchronize()
stream_b.synchronize()
self.assertEqual(z, x)
@skipMeta
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_from_dlpack_dtype(self, device, dtype):
x = make_tensor((5,), dtype=dtype, device=device)
y = torch.from_dlpack(x)
assert x.dtype == y.dtype
@skipMeta
@onlyCUDA
def test_dlpack_default_stream(self, device):
class DLPackTensor:
def __init__(self, tensor):
self.tensor = tensor
def __dlpack_device__(self):
return self.tensor.__dlpack_device__()
def __dlpack__(self, stream=None):
if torch.version.hip is None:
assert stream == 1
else:
assert stream == 0
capsule = self.tensor.__dlpack__(stream)
converted = True
return capsule
# CUDA-based tests runs on non-default streams
with torch.cuda.stream(torch.cuda.default_stream()):
x = DLPackTensor(make_tensor((5,), dtype=torch.float32, device=device))
from_dlpack(x)
@skipMeta
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_dlpack_tensor_invalid_stream(self, device, dtype):
with self.assertRaises(TypeError):
x = make_tensor((5,), dtype=dtype, device=device)
x.__dlpack__(stream=object())
@skipMeta
def test_dlpack_error_on_bool_tensor(self):
x = torch.tensor([True], dtype=torch.bool)
with self.assertRaises(RuntimeError):
to_dlpack(x)
# TODO: increase tests once NumPy supports the `__dlpack__` protocol
@skipMeta
def test_dlpack_export_requires_grad(self):
x = torch.zeros(10, dtype=torch.float32, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, r"require gradient"):
x.__dlpack__()
@skipMeta
def test_dlpack_export_is_conj(self):
x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
y = torch.conj(x)
with self.assertRaisesRegex(RuntimeError, r"conjugate bit"):
y.__dlpack__()
@skipMeta
def test_dlpack_export_non_strided(self):
x = torch.sparse_coo_tensor([[0]], [1], size=(1,))
y = torch.conj(x)
with self.assertRaisesRegex(RuntimeError, r"strided"):
y.__dlpack__()
@onlyCUDA
@unittest.skipIf(PYTORCH_CUDA_MEMCHECK, "is_pinned uses failure to detect pointer property")
def test_pin_memory_from_constructor(self, device):
def _get_like(t, **kwargs):
return [
torch.rand_like(t, **kwargs),
torch.randn_like(t, **kwargs),
torch.empty_like(t, **kwargs),
torch.full_like(t, 4, **kwargs),
torch.zeros_like(t, **kwargs),
torch.ones_like(t, **kwargs),
]
def _get_tensors(**kwargs):
return [
torch.tensor([10, 11], **kwargs),
torch.randn(3, 5, **kwargs),
torch.rand(3, **kwargs),
# torch.randint(3, 5, **kwargs), // unsupported
torch.zeros(3, **kwargs),
torch.randperm(3, **kwargs),
torch.empty(6, **kwargs),
torch.ones(6, **kwargs),
torch.eye(6, **kwargs),
torch.arange(3, 5, **kwargs)]
pinned_tensors = _get_tensors(pin_memory=True) + _get_like(torch.empty(5, dtype=torch.float64), pin_memory=True)
for x in pinned_tensors:
self.assertTrue(x.is_pinned())
tensors = _get_tensors() + _get_like(torch.empty(5, dtype=torch.float64, pin_memory=True))
for x in tensors:
self.assertFalse(x.is_pinned())
@deviceCountAtLeast(1)
@onlyCUDA
def test_storage_all_devices(self, devices):
for device in devices:
t = torch.tensor((), device=device)
self.assertEqual(t.dtype, t.storage().dtype)
# FIXME: move to test distributions
@skipIfMps
@dtypesIfCUDA(torch.float, torch.double, torch.half)
@dtypes(torch.float, torch.double)
def test_multinomial(self, device, dtype):
def make_prob_dist(shape, is_contiguous):
if is_contiguous:
if dtype == torch.half:
return torch.zeros(shape, device=device).uniform_().to(dtype=torch.half)
return torch.zeros(shape, device=device, dtype=dtype).uniform_()
elif len(shape) == 1:
if dtype == torch.half:
return torch.zeros((shape + [5]), device=device).uniform_().to(dtype=torch.half)[:, 2]
return torch.zeros((shape + [5]), device=device, dtype=dtype).uniform_()[:, 2]
else:
# num dim = 2
new_shape = [2, shape[1], 7, 1, shape[0], 1, 10]
if dtype == torch.half:
prob_dist = torch.zeros(new_shape, device=device).uniform_().to(dtype=torch.half)
else:
prob_dist = torch.zeros(new_shape, device=device, dtype=dtype).uniform_()
prob_dist = prob_dist.transpose(1, 4)
prob_dist = prob_dist[1, :, 5, 0, :, 0, 4]
assert not prob_dist.is_contiguous() # sanity check
return prob_dist
for is_contiguous in (True, False):
# with replacement
n_row = 3
for n_col in range(4, 5 + 1):
prob_dist = make_prob_dist([n_row, n_col], is_contiguous)
# indices that shouldn't be sampled (<0 means none)
zero_prob_indices = torch.LongTensor(n_row).random_(-2, n_col).tolist()
for i, j in enumerate(zero_prob_indices):
if j >= 0:
prob_dist[i, j] = 0
n_sample = n_col * 3
sample_indices = torch.multinomial(prob_dist, n_sample, True)
self.assertEqual(prob_dist.dim(), 2)
self.assertEqual(sample_indices.size(1), n_sample)
for i in range(n_row):
zero_prob_idx = zero_prob_indices[i]
if zero_prob_idx < 0:
continue
for j in range(n_sample):
self.assertNotEqual(sample_indices[i, j], zero_prob_idx,
msg="sampled an index with zero probability")
# without replacement
n_row = 3
for n_col in range(2, 10 + 1, 2):
prob_dist = make_prob_dist([n_row, n_col], is_contiguous)
# indices that shouldn't be sampled (<0 means none)
zero_prob_indices = torch.LongTensor(n_row).random_(-1, n_col).tolist()
for i, j in enumerate(zero_prob_indices):
if j >= 0:
prob_dist[i, j] = 0
n_sample = max(1, n_col - 2)
sample_indices = torch.multinomial(prob_dist, n_sample, False)
self.assertEqual(prob_dist.dim(), 2)
self.assertEqual(sample_indices.size(1), n_sample)
for i in range(n_row):
row_samples = {}
zero_prob_idx = zero_prob_indices[i]
for j in range(n_sample):
sample_idx = sample_indices[i, j]
if zero_prob_idx >= 0:
self.assertNotEqual(sample_idx, zero_prob_idx,
msg="sampled an index with zero probability")
self.assertNotIn(sample_idx, row_samples, "sampled an index twice")
row_samples[sample_idx] = True
# vector
n_col = 4
prob_dist = make_prob_dist([n_col], is_contiguous).fill_(1)
zero_prob_idx = 1 # index that shouldn't be sampled
prob_dist[zero_prob_idx] = 0
n_sample = 20
sample_indices = torch.multinomial(prob_dist, n_sample, True)
for sample_index in sample_indices:
self.assertNotEqual(sample_index, zero_prob_idx, msg="sampled an index with zero probability")
s_dim = sample_indices.dim()
self.assertEqual(sample_indices.dim(), 1, msg="wrong number of dimensions")
self.assertEqual(prob_dist.dim(), 1, msg="wrong number of prob_dist dimensions")
self.assertEqual(sample_indices.size(0), n_sample, msg="wrong number of samples")
# CUDA misalignment issue (#46702)
n_row, n_col = 2, 3
prob_dist = make_prob_dist([n_row, n_col], True)
n_sample = 1
sample_indices = torch.multinomial(prob_dist, n_sample, True)
self.assertEqual(sample_indices.dim(), 2, msg="wrong number of dimensions")
self.assertEqual(sample_indices.size(1), n_sample, msg="wrong number of samples")
# FIXME: move to test distributions
@onlyCUDA
@dtypes(torch.float, torch.double, torch.half)
def test_multinomial_deterministic(self, device, dtype):
gen = torch.Generator(device=device)
trials = 5
seed = 0
prob_dist = torch.rand(10000, 1000, device=device, dtype=dtype)
n_sample = 1
for i in range(trials):
gen.manual_seed(seed)
samples_1 = torch.multinomial(prob_dist, n_sample, True, generator=gen)
gen.manual_seed(seed)
samples_2 = torch.multinomial(prob_dist, n_sample, True, generator=gen)
self.assertEqual(samples_1, samples_2)
self.assertEqual(samples_1.dim(), 2, msg="wrong number of dimensions")
self.assertEqual(samples_1.size(1), n_sample, msg="wrong number of samples")
# FIXME: move to test distributions
@slowTest
@dtypes(torch.float)
def test_multinomial_rng_state_advance(self, device, dtype):
corpus_size = 100000
freqs = torch.ones(corpus_size, dtype=torch.float, device=device)
n_sample = 100
samples1 = torch.multinomial(freqs, n_sample, replacement=True)
samples2 = torch.multinomial(freqs, n_sample, replacement=True)
samples = torch.cat([samples1, samples2])
# expect no more than 1 repeating elements generated in 2 attempts
# the probability of at least element being repeated is surprisingly large, 18%
self.assertLessEqual(2 * n_sample - samples.unique().size(0), 2)
samples1 = torch.multinomial(freqs, n_sample, replacement=False)
samples2 = torch.multinomial(freqs, n_sample, replacement=False)
samples = torch.cat([samples1, samples2])
# expect no more than 1 repeating elements generated in 2 attempts
self.assertLessEqual(2 * n_sample - samples.unique().size(0), 1)
def _test_memory_format_transformations(self, device, input_generator_fn, transformation_fn,
memory_format, compare_data=True, default_is_preserve=False):
assert(memory_format == torch.channels_last or memory_format == torch.channels_last_3d)
# xc is a channels last tensor
xc = input_generator_fn(device)
# xc is not memory dense, but looks like channels last
if memory_format == torch.channels_last:
xc = xc[..., ::2, ::2]
else:
xc = xc[..., ::2, ::2, ::2]
clone = transformation_fn(xc, memory_format=torch.preserve_format)
self.assertFalse(clone.is_contiguous())
self.assertTrue(clone.is_contiguous(memory_format=memory_format))
self.assertFalse(xc.is_contiguous())
self.assertFalse(xc.is_contiguous(memory_format=memory_format))
if compare_data:
self.assertEqual(xc, clone.to(xc))
xc = input_generator_fn(device)
clone = transformation_fn(xc, memory_format=torch.contiguous_format)
self.assertTrue(clone.is_contiguous())
self.assertFalse(clone.is_contiguous(memory_format=memory_format))
if compare_data:
self.assertEqual(xc, clone.to(xc))
xc = input_generator_fn(device)
clone = transformation_fn(xc)
if default_is_preserve:
self.assertFalse(clone.is_contiguous())
self.assertTrue(clone.is_contiguous(memory_format=memory_format))
else:
self.assertTrue(clone.is_contiguous())
self.assertFalse(clone.is_contiguous(memory_format=memory_format))
if compare_data:
self.assertEqual(xc, clone.to(xc))
x = torch.randn((3, 4, 5, 6, 7, 8, 9), device=device)
for _ in range(10):
permutation = list(range(len(x.shape)))
random.shuffle(permutation)
x = x.permute(permutation)
self.assertEqual(x.stride(), transformation_fn(x, memory_format=torch.preserve_format).stride())
def test_memory_format_to(self, device):
def get_generator(memory_format, shape):
def input_generator_fn(device):
return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format)
return input_generator_fn
def transformation_fn(tensor, **kwargs):
return tensor.to(dtype=torch.float64, **kwargs)
formats_shapes = (
(torch.channels_last, (4, 3, 8, 8)),
(torch.channels_last_3d, (4, 3, 8, 8, 8)))
for mf, shape in formats_shapes:
self._test_memory_format_transformations(
device, get_generator(mf, shape), transformation_fn, mf, default_is_preserve=True)
def test_memory_format_type(self, device):
def get_generator(memory_format, shape):
def input_generator_fn(device):
return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format)
return input_generator_fn
def transformation_fn(tensor, **kwargs):
return tensor.to(torch.float64, **kwargs)
formats_shapes = (
(torch.channels_last, (4, 3, 8, 8)),
(torch.channels_last_3d, (4, 3, 8, 8, 8)))
for mf, shape in formats_shapes:
self._test_memory_format_transformations(
device, get_generator(mf, shape), transformation_fn, mf, default_is_preserve=True)
def test_memory_format_clone(self, device):
def get_generator(memory_format, shape):
def input_generator_fn(device):
return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format)
return input_generator_fn
def transformation_fn(tensor, **kwargs):
return tensor.clone(**kwargs)
formats_shapes = (
(torch.channels_last, (4, 3, 8, 8)),
(torch.channels_last_3d, (4, 3, 8, 8, 8)))
for mf, shape in formats_shapes:
self._test_memory_format_transformations(
device, get_generator(mf, shape), transformation_fn, mf, True, default_is_preserve=True)
def test_memory_format_factory_like_functions_preserve(self, device):
def get_generator(memory_format, shape):
def input_generator_fn(device):
return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format)
return input_generator_fn
transformation_fns = [
lambda t, **kwargs: torch.zeros_like(t, **kwargs),
lambda t, **kwargs: torch.ones_like(t, **kwargs),
lambda t, **kwargs: torch.randint_like(t, 10, 100, **kwargs),
lambda t, **kwargs: torch.randint_like(t, 100, **kwargs),
lambda t, **kwargs: torch.randn_like(t, **kwargs),
lambda t, **kwargs: torch.rand_like(t, **kwargs),
lambda t, **kwargs: torch.full_like(t, 7, **kwargs),
lambda t, **kwargs: torch.empty_like(t, **kwargs)]
formats_shapes = (
(torch.channels_last, (4, 3, 8, 8)),
(torch.channels_last_3d, (4, 3, 8, 8, 8)))
for mf, shape, in formats_shapes:
for transformation_fn in transformation_fns:
self._test_memory_format_transformations(
device, get_generator(mf, shape), transformation_fn, mf, compare_data=False, default_is_preserve=True)
def test_memory_format_type_shortcuts(self, device):
def get_generator(memory_format, shape, dtype):
def input_generator_fn(device):
return torch.randn(shape, device=device, dtype=dtype).clamp(0, 1) \
.round().contiguous(memory_format=memory_format)
return input_generator_fn
def get_fn(fn_name):
def transformation_fn(tensor, **kwargs):
fn = getattr(tensor, fn_name)
return fn(**kwargs)
return transformation_fn
shortcuts = ['byte', 'char', 'double', 'bool', 'half', 'int', 'long', 'short']
if device == 'cpu':
shortcuts += ['bfloat16']
formats_shapes = (
(torch.channels_last, (4, 3, 8, 8)),
(torch.channels_last_3d, (4, 3, 8, 8, 8)))
for mf, shape in formats_shapes:
for fn_name in shortcuts:
self._test_memory_format_transformations(
device, get_generator(mf, shape, torch.float32), get_fn(fn_name), mf, default_is_preserve=True)
# Test 'float' separately to avoid float->float no-op.
for mf, shape in formats_shapes:
self._test_memory_format_transformations(
device, get_generator(mf, shape, torch.float64), get_fn('float'), mf, default_is_preserve=True)
@onlyCUDA
def test_memory_format_cpu_and_cuda_ops(self, device):
def get_generator(memory_format, shape):
def input_generator_fn(device):
return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format)
return input_generator_fn
def transformation_cpu_fn(tensor, **kwargs):
return tensor.cpu(**kwargs)
def transformation_cuda_fn(tensor, **kwargs):
return tensor.cuda(**kwargs)
formats_shapes = (
(torch.channels_last, (4, 3, 8, 8)),
(torch.channels_last_3d, (4, 3, 8, 8, 8)))
for mf, shape in formats_shapes:
self._test_memory_format_transformations(
'cuda', get_generator(mf, shape), transformation_cpu_fn, mf, default_is_preserve=True)
self._test_memory_format_transformations(
'cpu', get_generator(mf, shape), transformation_cuda_fn, mf, default_is_preserve=True)
# FIXME: move to test_serialization
def test_pickle_gradscaler(self, device):
# This test is not in test_cuda.py because it should pass in 3 cases:
# 1. cuda is not available.
# 2. cuda is available but device is not cuda.
# 3. cuda is available and device is cuda.
# In case 1, a and b disable themselves on construction and shouldn't try to pickle workhorse attributes.
# In case 2, a and b are enabled. Workhorse attributes participate in pickling, but none are lazy-inited
# to cuda Tensors, because I don't want to do cuda things if device is not cuda.
# In case 3, a and b are enabled and we may also try lazy-initing _scale to a cuda tensor.
device = torch.device(device)
try_lazy_inits = (True, False) if device.type == "cuda" else (False,)
for lazy_init_scale in try_lazy_inits:
a = torch.cuda.amp.GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2)
self.assertTrue(not a.is_enabled() if torch.cuda.amp.common.amp_definitely_not_available() else a.is_enabled())
if lazy_init_scale:
# Dummy a.scale() call lazy-inits a._scale Tensor.
a.scale(torch.tensor([4.0], dtype=torch.float32, device=device))
self.assertTrue(isinstance(a._scale, torch.cuda.FloatTensor))
# The following three lines should work whether or not cuda is available.
serialized = pickle.dumps(a)
b = pickle.loads(serialized)
self.assertEqual(b.is_enabled(), a.is_enabled())
if a.is_enabled():
self.assertEqual(b.get_scale(), 3.)
self.assertEqual(b.get_growth_factor(), 4.)
self.assertEqual(b.get_backoff_factor(), .5)
self.assertEqual(b.get_growth_interval(), 2)
self.assertEqual(b._init_growth_tracker, 0)
# supplies a dummy key to test the defaultdict's default_factory
self.assertEqual(b._per_optimizer_states["fdsa"],
torch.cuda.amp.grad_scaler._refresh_per_optimizer_state())
if lazy_init_scale:
self.assertEqual(b.scale(torch.tensor([4.0], dtype=torch.float32, device=device)), 12.0)
# FIXME: convert to ErrorInputs
@skipIfMps
def test_multinomial_invalid(self, device):
def test(probs):
with self.assertRaisesRegex(RuntimeError,
'probability tensor contains either `inf`, `nan` or element < 0'):
out = torch.multinomial(probs.to(device), 2)
if out.is_cuda:
torch.cuda.synchronize()
test(torch.tensor([1., -1., 1.]))
test(torch.tensor([1., inf, 1.]))
test(torch.tensor([1., -inf, 1.]))
test(torch.tensor([1., 1., nan]))
# FIXME: convert to ErrorInputs
@skipIfMps
def test_multinomial_invalid_distribution(self, device):
def test(probs, replacement):
with self.assertRaisesRegex(RuntimeError,
r"invalid multinomial distribution \(sum of probabilities <= 0\)"):
out = torch.multinomial(probs, 2, replacement)
if out.is_cuda:
torch.cuda.synchronize()
x = torch.zeros(3, device=device)
y = torch.zeros(3, 3, device=device)
z = torch.zeros(3, 3, device=device)
z[1, :] = 1
test(x, False)
test(y, False)
test(z, False)
# Verify only for CPU as replacement=True
# throws device side assert triggered.
if self.device_type == 'cpu':
test(x, True)
test(y, True)
test(z, True)
# FIXME: move to test distributions
def _test_multinomial_empty(self, device, replacement, num_samples):
probs = torch.ones(0, 3, device=device)
expected = torch.empty(0, num_samples, dtype=torch.int64)
out = torch.multinomial(probs, num_samples=num_samples, replacement=replacement)
self.assertEqual(out, expected)
# FIXME: move to test distributions
def test_multinomial_empty_w_replacement(self, device):
self._test_multinomial_empty(device, True, 1)
self._test_multinomial_empty(device, True, 2)
# FIXME: move to test distributions
def test_multinomial_empty_wo_replacement(self, device):
self._test_multinomial_empty(device, False, 1)
self._test_multinomial_empty(device, False, 2)
@dtypesIfCUDA(torch.float, torch.double, torch.half)
@dtypesIfCPU(torch.float, torch.double, torch.bfloat16)
@dtypes(torch.float, torch.double)
def test_multinomial_cpu(self, device, dtype):
def make_prob_dist(shape, is_contiguous):
if is_contiguous:
if dtype == torch.half or dtype == torch.bfloat16:
return torch.zeros(shape, device=device).uniform_().to(dtype=dtype)
return torch.zeros(shape, device=device, dtype=dtype).uniform_()
elif len(shape) == 1:
if dtype == torch.half or dtype == torch.bfloat16:
return torch.zeros((shape + [5]), device=device).uniform_().to(dtype=dtype)[:, 2]
return torch.zeros((shape + [5]), device=device, dtype=dtype).uniform_()[:, 2]
else:
# num dim = 2
new_shape = [2, shape[1], 7, 1, shape[0], 1, 10]
if dtype == torch.half or dtype == torch.bfloat16:
prob_dist = torch.zeros(new_shape, device=device).uniform_().to(dtype=dtype)
else:
prob_dist = torch.zeros(new_shape, device=device, dtype=dtype).uniform_()
prob_dist = prob_dist.transpose(1, 4)
prob_dist = prob_dist[1, :, 5, 0, :, 0, 4]
assert not prob_dist.is_contiguous() # sanity check
return prob_dist
# FIXME: move to elementwise ternary test suite
# As the test fails with Runtime Error not raised on XLA
@onlyNativeDeviceTypes
def test_where_scalar_handcrafted_values(self, device):
# Tests ScalarxScalar, ScalarxTensor and TensorxScalar
# variant of `where` against NumPy version with
# handcrafted values.
condition_shape = (5, 5)
dtypes = (
torch.bool, torch.uint8, torch.int8, torch.int16, torch.int64,
torch.float16, torch.float32, torch.float64,
torch.complex64, torch.complex128,
)
shapes = ((), (5,), (1, 5),)
with torch.no_grad():
tensors = (torch.empty(shape, dtype=dtype, device=device).fill_(17)
for shape, dtype in product(shapes, dtypes))
# Use different values for `x` and `y`
# as they are the output values which are compared.
x_vals = (True, 3, 7.0, 1 + 0.5j)
y_vals = itertools.chain((False, 4, 8.0, 2 + 0.5j), tensors)
for x in x_vals:
for y in y_vals:
condition = torch.empty(*condition_shape, dtype=torch.bool, device=device).bernoulli_()
common_dtype = torch.result_type(x, y)
def check_equal(condition, x, y):
condition_np = condition.cpu().numpy()
x_np = x.cpu().numpy() if isinstance(x, torch.Tensor) else x
y_np = y.cpu().numpy() if isinstance(y, torch.Tensor) else y
# NumPy aggressively promotes to double, hence cast to output to correct dtype
expected = torch.from_numpy(np.where(condition_np, x_np, y_np)).to(common_dtype)
result = torch.where(condition, x, y)
self.assertEqual(expected, result)
check_equal(condition, x, y)
check_equal(condition, y, x)
def test_hook_remove(self, device):
# Reference: https://github.com/pytorch/pytorch/issues/58354
def _test_helper(remove_hook):
def install_hook(tensor):
handle = None
def hook(tensor):
if remove_hook:
handle.remove()
return torch.zeros_like(tensor)
handle = tensor.register_hook(hook)
t = torch.ones((1, 5), device=device, requires_grad=True)
install_hook(t)
# First call to backward
t.mean().backward()
self.assertEqual(t.grad, torch.zeros_like(t))
# Second call to backward
t.mean().backward()
if remove_hook:
# After removing the hook, make sure the usual gradient is returned
self.assertEqual(t.grad, 0.2 * torch.ones_like(t))
else:
self.assertEqual(t.grad, torch.zeros_like(t))
_test_helper(remove_hook=True)
_test_helper(remove_hook=False)
# FIXME: get PyTorch/XLA to run test_testing
# This test should ideally be in test_testing.py,
# but since pytorch/xla runs tests from test_torch.py, we have it here.
@skipXLA
def test_skip_xla(self, device):
if self.device_type == 'xla':
# Should not reach here!
self.assertTrue(False)
# FIXME: get PyTorch/XLA to run test_testing
# This test should ideally be in test_testing.py,
# but since pytorch/xla runs tests from test_torch.py, we have it here.
@expectedFailureXLA
def test_expected_failure_xla(self, device):
if self.device_type == 'xla':
self.assertTrue(False)
# FIXME: get PyTorch/XLA to run test_testing
# This test should ideally be in test_testing.py,
# but since pytorch/xla runs tests from test_torch.py, we have it here.
def test_assertRaisesRegex_ignore_msg_non_native_device(self, device):
# Verify that self.assertRaisesRegex only checks the Error and ignores
# message for non-native devices.
x = torch.randn((10, 3), device=device)
t = torch.empty(10, dtype=torch.int64, device=device).random_(0, 3)
invalid_weight = torch.randn(4, device=device)
msg = "weight tensor should be defined either for all 3 classes or no classes"
# XLA raises RuntimeError with a different message.
with self.assertRaisesRegex(RuntimeError, msg):
torch.nn.functional.nll_loss(x, t, weight=invalid_weight)
@dtypes(*all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.complex32))
def test_copy_(self, device, dtype):
def can_cast(src_dtype, dst_dtype):
# torch.can_cast(torch.int16, torch.uint8) returns True
# which isn't actually safe-cast.
# This function returns False in this case.
def is_unsigned_int(dtype):
return dtype is torch.uint8
if is_unsigned_int(dst_dtype):
return is_unsigned_int(src_dtype)
return torch.can_cast(src_dtype, dst_dtype)
def make_tensor_wrapper(shape, dtype):
if dtype is not torch.complex32:
# Make tensor does not support generating
# complex32 tensor
return make_tensor(shape, device=device, dtype=dtype)
return torch.randn(shape, device=device, dtype=dtype)
t = make_tensor_wrapper((50,), dtype)
src_dtypes = all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.complex32)
for src_dtype in src_dtypes:
src = make_tensor_wrapper((50,), dtype=src_dtype)
t.copy_(src)
dst = make_tensor_wrapper((50, ), dtype=src_dtype)
if can_cast(src_dtype, dtype):
rtol = None
atol = None
if dtype in (torch.half, torch.complex32):
rtol = 1e-3
atol = 1e-3
if dtype in (torch.bfloat16,):
rtol = 1e-2
atol = 1e-2
self.assertEqual(src, dst.copy_(t), rtol=rtol, atol=atol)
@dtypes(*all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.complex32))
def test_item(self, device, dtype):
t = torch.ones((), device=device, dtype=dtype)
self.assertEqual(1, t.item())
# Tests that compare a device's computation with the (gold-standard) CPU's.
class TestDevicePrecision(TestCase):
exact_dtype = True
# FIXME: move to indexing test suite
@onlyCUDA
def test_index_add_bfloat16(self, device):
inp_tensor = torch.randn(5, 3, device='cpu').bfloat16()
t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.bfloat16, device='cpu')
index = torch.tensor([0, 4, 2], device='cpu')
out_cpu = inp_tensor.index_add(0, index, t)
inp_tensor = inp_tensor.to(device=device)
t = t.to(device=device)
index = index.to(device=device)
out_gpu = inp_tensor.index_add(0, index, t)
self.assertEqual(out_cpu, out_gpu, atol=1e-2, rtol=0)
# FIXME: move to serialization test suite
def test_device_serialization(self, device):
x = torch.randn(4, 4, device=device)
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f)
self.assertEqual(x_copy, x)
self.assertIs(type(x_copy), type(x))
self.assertEqual(x_copy.device, x.device)
# FIXME: move to serialization test suite
@deviceCountAtLeast(2)
def test_multidevice_serialization(self, devices):
x = [torch.randn(4, 4, device=devices[0]),
torch.randn(4, 4, device=devices[1])]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f)
for original, cp in zip(x, x_copy):
self.assertEqual(cp, original)
self.assertIs(type(cp), type(original))
self.assertEqual(cp.device, original.device)
# FIXME: move to data movement test suite
@deviceCountAtLeast(1)
def test_copy_noncontig(self, devices):
def do_test(d0, d1):
x = torch.tensor([1.5, 2.5, 3.5, 4.5, 5.5, 6.5], device=d0)
y = torch.tensor([0, 0, 0, 0, 0, 0], device=d1)
self.assertNotEqual(x.dtype, y.dtype)
y[::2].copy_(x[::2])
self.assertEqual(y, [1, 0, 3, 0, 5, 0])
do_test('cpu', devices[0])
do_test(devices[0], 'cpu')
if len(devices) > 1:
do_test(devices[0], devices[1])
@deviceCountAtLeast(2)
def test_type_conversions_same_device(self, devices):
x = torch.randn(5, 5, device=devices[1])
self.assertEqual(x.int().device, torch.device(devices[1]))
self.assertEqual(x.type(torch.int).device, torch.device(devices[1]))
self.assertEqual(x.to(torch.int).device, torch.device(devices[1]))
@dtypesIfCUDA(torch.half, torch.float, torch.double,
torch.int8, torch.short, torch.int, torch.long,
torch.uint8)
@dtypes(torch.float, torch.double,
torch.int8, torch.short, torch.int, torch.long,
torch.uint8)
def test_from_sequence(self, device, dtype):
seq = [list(range(i * 4, i * 4 + 4)) for i in range(5)]
reference = torch.arange(0, 20).resize_(5, 4)
self.assertEqual(torch.tensor(seq, dtype=dtype, device=device), reference, exact_dtype=False)
# FIXME: moved to indexing test suite
@deviceCountAtLeast(1)
def test_advancedindex_mixed_cpu_devices(self, devices) -> None:
def test(x: torch.Tensor, ia: torch.Tensor, ib: torch.Tensor) -> None:
# test getitem
self.assertEqual(x[:, ia, None, ib, 0].cpu(),
x.cpu()[:, ia.cpu(), None, ib.cpu(), 0])
self.assertEqual(x[ia], x.cpu()[ia.cpu()])
# test setitem
x_clone1 = x.clone()
x_clone2 = x.clone()
first_shape = x[:, ia, None, ib, 0].shape
second_shape = x[ia].shape
x_clone1[:, ia, None, ib, 0] = torch.randn(first_shape).to(x_clone1)
x_clone2[ia] = torch.randn(second_shape).to(x_clone2)
cpu = torch.device('cpu')
for device in devices:
x = torch.randn(3, 4, 4, 4, 3)
ia = torch.tensor([0, 2, 1])
ib = torch.tensor([0, 2, 1])
# Index device tensor with cpu tensor
x = x.to(device)
ia = ia.to(cpu)
ib = ib.to(cpu)
test(x, ia, ib)
# Index device tensor with mixed cpu, device tensors
x = x.to(device)
ia = ia.to(cpu)
ib = ib.to(device)
test(x, ia, ib)
@deviceCountAtLeast(1)
def test_advancedindex_mixed_devices_error(self, devices) -> None:
def test(x: torch.Tensor, ia: torch.Tensor, ib: torch.Tensor) -> None:
# test getitem
with self.assertRaisesRegex(RuntimeError, fr"indices should be either .* \({x.device}\)"):
value = x[:, ia, None, ib, 0]
with self.assertRaisesRegex(RuntimeError, fr"indices should be either .* \({x.device}\)"):
value = x[ib]
cpu = torch.device('cpu')
for device in devices:
# Index cpu tensor with device tensor
x = torch.randn(3, 4, 4, 4, 3)
ia = torch.tensor([0, 2, 1]).to(device)
ib = torch.tensor([0, 2, 1]).to(device)
test(x, ia, ib)
# Index cpu tensor with mixed cpu, device tensors
x = x.to(cpu)
ia = ia.to(cpu)
ib = ib.to(device)
test(x, ia, ib)
if len(devices) > 1:
other_device = devices[0] if device == devices[1] else devices[1]
# Index device tensor with mixed cpu, device tensors on different devices
x = x.to(device)
ia = ia.to(cpu)
ib = ib.to(other_device)
test(x, ia, ib)
# FIXME: move to data movement test suite
def test_copy_broadcast(self, device) -> None:
x = torch.randn(10, 5)
y = torch.randn(5, device=device)
x.copy_(y)
self.assertEqual(x[3], y)
x = torch.randn(10, 5, device=device)
y = torch.randn(5)
x.copy_(y)
self.assertEqual(x[3], y)
# FIXME: move to an elementwise ternary test suite
@dtypes(torch.int64, torch.float32, torch.float64)
def test_clamp(self, device, dtype):
test_args = [
*product(
[(100, 50), (10, 64), (97,)], # shape
(True, False), # non-contiguous
)
]
for shape, noncontig in test_args:
x = make_tensor(shape, device=device, dtype=dtype,
noncontiguous=noncontig)
ub = make_tensor(shape, device=device, dtype=dtype,
noncontiguous=noncontig)
lb = make_tensor(shape, device=device, dtype=dtype,
noncontiguous=noncontig)
expect = x.max(lb).min(ub)
actual = x.clamp(lb, ub)
self.assertEqual(expect, actual)
expect = np.clip(x.cpu().numpy(), lb.cpu().numpy(), ub.cpu().numpy())
self.assertEqual(expect, actual)
expect = x.max(lb)
actual = x.clamp(min=lb)
self.assertEqual(expect, actual)
expect = x.min(ub)
actual = x.clamp(max=ub)
self.assertEqual(expect, actual)
# Test broadcasting min & max
expect = x.max(lb[0]).min(ub[..., :1])
actual = x.clamp(lb[0], ub[..., :1])
self.assertEqual(expect, actual)
# Test broadcasting x
expect = x[..., :1].max(lb).min(ub)
actual = x[..., :1].clamp(lb, ub)
self.assertEqual(expect, actual)
def test_cuda_device_idx(self, device):
x = torch.zeros(3, device=device)
y = torch._efficientzerotensor(3, device=device)
self.assertEqual(x.device, y.device)
# we implemented custom deallocation for subclasses, so it behooves
# us to make sure all of these bits work. We'll use __del__ to
# track if objects die or not
class Tracker:
def __init__(self, marker):
self.marker = marker
@staticmethod
def make():
marker = [False]
return marker, Tracker(marker)
def __del__(self):
self.marker[0] = True
@contextlib.contextmanager
def disable_gc():
if gc.isenabled():
try:
gc.disable()
yield
finally:
gc.enable()
else:
yield
class TestTorch(TestCase):
exact_dtype = True
def test_dir(self):
dir(torch)
def test_wildcard_import(self):
exec('from torch import *')
def test_newaxis_numpy_comparison(self):
def run_test(tensor, *idx):
npt = tensor.numpy()
self.assertEqual(tensor[idx], npt[idx])
# 1D Tensor Tests
x = torch.arange(0, 10)
cases = [
[None],
[None, None],
[Ellipsis, None],
[None, Ellipsis],
[2, None],
[None, 2],
[Ellipsis, None, 2],
[Ellipsis, 2, None],
[2, Ellipsis, None],
[2, None, Ellipsis],
[None, 2, Ellipsis],
[None, Ellipsis, 2],
]
for case in cases:
run_test(x, *case)
# 2D Tensor Tests
x = torch.arange(0, 12).view(3, 4)
cases = [
[None],
[None, None],
[None, None, None],
[Ellipsis, None],
[Ellipsis, None, None],
[None, Ellipsis],
[None, Ellipsis, None],
[None, None, Ellipsis],
[2, None],
[2, None, Ellipsis],
[2, Ellipsis, None],
[None, 2, Ellipsis],
[Ellipsis, 2, None],
[Ellipsis, None, 2],
[None, Ellipsis, 2],
[1, 2, None],
[1, 2, Ellipsis, None],
[1, Ellipsis, 2, None],
[Ellipsis, 1, None, 2],
[Ellipsis, 1, 2, None],
[1, None, 2, Ellipsis],
[None, 1, Ellipsis, 2],
[None, 1, 2, Ellipsis],
]
for case in cases:
run_test(x, *case)
def _consecutive(self, size, start=1):
sequence = torch.ones(torch.tensor(size).prod(0)).cumsum(0)
sequence.add_(start - 1)
return sequence.resize_(*size)
def test_newindex(self):
reference = self._consecutive((3, 3, 3))
# This relies on __index__() being correct - but we have separate tests for that
def checkPartialAssign(index):
reference = torch.zeros(3, 3, 3)
reference[index] = self._consecutive((3, 3, 3))[index]
self.assertEqual(reference[index], self._consecutive((3, 3, 3))[index], atol=0, rtol=0)
reference[index] = 0
self.assertEqual(reference, torch.zeros(3, 3, 3), atol=0, rtol=0)
checkPartialAssign(0)
checkPartialAssign(1)
checkPartialAssign(2)
checkPartialAssign((0, 1))
checkPartialAssign((1, 2))
checkPartialAssign((0, 2))
checkPartialAssign(torch.LongTensor((0, 2)))
with self.assertRaises(IndexError):
reference[1, 1, 1, 1] = 1
with self.assertRaises(IndexError):
reference[1, 1, 1, (1, 1)] = 1
with self.assertRaises(IndexError):
reference[3, 3, 3, 3, 3, 3, 3, 3] = 1
with self.assertRaises(IndexError):
reference[0.0] = 1
with self.assertRaises(TypeError):
reference[0.0:2.0] = 1
with self.assertRaises(IndexError):
reference[0.0, 0.0:2.0] = 1
with self.assertRaises(IndexError):
reference[0.0, :, 0.0:2.0] = 1
with self.assertRaises(IndexError):
reference[0.0, ..., 0.0:2.0] = 1
with self.assertRaises(IndexError):
reference[0.0, :, 0.0] = 1
# FIXME: move to indexing test suite
def test_index_add(self):
for device in get_all_device_types():
for dest_contig, src_contig, index_contig in product([True, False], repeat=3):
for other_sizes in ((), (4, 5)):
for dtype in [torch.int, torch.long]:
num_copy, num_dest = 3, 3
dest = torch.randn(num_dest, *other_sizes, device=device)
if not dest_contig:
dest = make_tensor(dest.shape, device=device, dtype=dest.dtype, noncontiguous=True)
src = torch.randn(num_copy, *other_sizes, device=device)
if not src_contig:
src = torch.testing.make_non_contiguous(src)
idx = torch.randperm(num_dest, dtype=dtype, device=device).narrow(0, 0, num_copy)
if not index_contig:
idx = torch.testing.make_non_contiguous(idx)
# index_add_ without alpha argument
dest2 = dest.clone()
dest.index_add_(0, idx, src)
for i in range(idx.size(0)):
dest2[idx[i]] += src[i]
self.assertEqual(dest, dest2)
# index_add_ with alpha argument
dest2 = dest.clone()
dest.index_add_(0, idx, src, alpha=2)
for i in range(idx.size(0)):
dest2[idx[i]] += src[i] * 2
self.assertEqual(dest, dest2)
# FIXME: resolve comment below and move this to indexing test suite
# add coverage for issue with atomic add that appeared only for
# specific dtypes on cuda:
# https://github.com/pytorch/pytorch/issues/29153
def test_index_add_all_dtypes(self):
for device in get_all_device_types():
for dtype in get_all_math_dtypes(device):
for idx_dtype in [torch.int, torch.long]:
size = [5, 5]
if dtype.is_floating_point or dtype.is_complex:
tensor = torch.rand(size, dtype=dtype, device=device)
elif dtype.is_signed:
tensor = torch.randint(-5, 15, size, dtype=dtype, device=device)
else:
tensor = torch.randint(0, 10, size, dtype=dtype, device=device)
# index_add calls atomicAdd on cuda.
zeros = torch.zeros(size, dtype=dtype, device=device)
added = zeros.index_add(0, torch.arange(0, size[0], dtype=idx_dtype, device=device), tensor)
self.assertEqual(added, tensor)
added = zeros.index_add(0, torch.arange(0, size[0], dtype=idx_dtype, device=device), tensor, alpha=-1)
self.assertEqual(added, -tensor)
# FIXME: move to shape ops test suite
def test_unflatten(self):
# test args: tensor, int, sizes
self.assertEqual(torch.tensor([]).unflatten(0, (0, 1)), torch.empty(0, 1))
self.assertEqual(torch.tensor([1]).unflatten(0, (1, 1)), torch.tensor([[1]]))
self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, (2, 2)), torch.tensor([[1, 2], [3, 4]]))
self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, [2, 2]), torch.tensor([[1, 2], [3, 4]]))
self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, torch.Size([2, 2])), torch.tensor([[1, 2], [3, 4]]))
self.assertEqual(torch.ones(2, 10).unflatten(1, (5, 2)), torch.ones(2, 5, 2))
self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, (-1, 2)),
torch.tensor([[1, 2], [3, 4]]))
self.assertEqual(torch.ones(2, 10).unflatten(1, (5, -1)),
torch.ones(2, 5, 2))
self.assertEqual(torch.ones(2, 10).unflatten(1, (-1,)),
torch.ones(2, 10))
self.assertEqual(torch.ones(2, 3 * 4 * 5 * 6).unflatten(1, (3, 4, -1, 6)),
torch.ones(2, 3, 4, 5, 6))
self.assertEqual(torch.ones(2, 0, 2).unflatten(1, (3, -1, 4, 5)),
torch.ones(2, 3, 0, 4, 5, 2))
# test invalid args: tensor, str, sizes
with self.assertRaisesRegex(TypeError, r"unflatten\(\): argument 'dim' \(position 1\) must be int, not str"):
torch.tensor([1]).unflatten('A', (1, 1))
# test invalid args: tensor, str, namedshape
with self.assertRaisesRegex(RuntimeError, r"Name 'A' not found in Tensor\[None\]."):
torch.ones(4).unflatten('A', (('A', 2), ('B', 2)))
# test other invalid arguments
with self.assertRaisesRegex(RuntimeError, r"sizes must be non-empty"):
torch.tensor([1]).unflatten(0, [])
with self.assertRaisesRegex(RuntimeError, r"Provided sizes \[2, 2\] don't multiply up to the size of dim 0 \(1\)"):
torch.tensor([1]).unflatten(0, [2, 2])
with self.assertRaisesRegex(IndexError, r"dimension specified as 0 but tensor has no dimensions"):
torch.tensor(1).unflatten(0, [0])
with self.assertRaisesRegex(RuntimeError, r"only one dimension can be inferred"):
torch.randn(5, 10).unflatten(1, (-1, -1))
with self.assertRaisesRegex(RuntimeError,
r"Provided sizes \[-1, 4\] don't multiply up to the size of dim 1 \(10\)"):
torch.randn(5, 10).unflatten(1, (-1, 4))
with self.assertRaisesRegex(RuntimeError,
r"the unspecified dimension size -1 can be any value and is ambiguous"):
torch.randn(2, 0).unflatten(1, (2, -1, 0))
def test_structseq_repr(self):
a = torch.arange(250).reshape(5, 5, 10)
expected = """
torch.return_types.max(
values=tensor([[ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49],
[ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99],
[140, 141, 142, 143, 144, 145, 146, 147, 148, 149],
[190, 191, 192, 193, 194, 195, 196, 197, 198, 199],
[240, 241, 242, 243, 244, 245, 246, 247, 248, 249]]),
indices=tensor([[4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
[4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
[4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
[4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
[4, 4, 4, 4, 4, 4, 4, 4, 4, 4]]))"""
self.assertEqual(repr(a.max(1)), textwrap.dedent(expected).strip())
def test_is_same_size(self):
t1 = torch.empty(3, 4, 9, 10)
t2 = torch.empty(3, 4)
t3 = torch.empty(1, 9, 3, 3)
t4 = torch.empty(3, 4, 9, 10)
self.assertFalse(t1.is_same_size(t2))
self.assertFalse(t1.is_same_size(t3))
self.assertTrue(t1.is_same_size(t4))
nt1 = torch.nested_tensor([torch.ones(2, 4), torch.ones(3, 4), torch.ones(5, 4)])
nt2 = torch.nested_tensor([torch.ones(2, 4), torch.ones(2, 4), torch.ones(2, 4)])
nt3 = torch.nested_tensor([torch.ones(2, 4, 5), torch.ones(2, 6, 5)])
nt4 = torch.nested_tensor([torch.ones(2, 4), torch.ones(3, 4), torch.ones(5, 4)])
self.assertFalse(nt1.is_same_size(nt2))
self.assertFalse(nt1.is_same_size(nt3))
self.assertTrue(nt1.is_same_size(nt4))
with self.assertRaisesRegex(RuntimeError, "Expected both self and other to be nested tensors."):
t1.is_same_size(nt1)
with self.assertRaisesRegex(RuntimeError, "Expected both self and other to be nested tensors."):
nt1.is_same_size(t1)
def test_tensor_set(self):
t1 = torch.tensor([])
t2 = torch.empty(3, 4, 9, 10).uniform_()
t1.set_(t2)
self.assertEqual(t1.storage()._cdata, t2.storage()._cdata)
size = torch.Size([9, 3, 4, 10])
t1.set_(t2.storage(), 0, size)
self.assertEqual(t1.size(), size)
t1.set_(t2.storage(), 0, tuple(size))
self.assertEqual(t1.size(), size)
self.assertEqual(t1.stride(), (120, 40, 10, 1))
stride = (10, 360, 90, 1)
t1.set_(t2.storage(), 0, size, stride)
self.assertEqual(t1.stride(), stride)
t1.set_(t2.storage(), 0, size=size, stride=stride)
self.assertEqual(t1.size(), size)
self.assertEqual(t1.stride(), stride)
# test argument names
t1 = torch.tensor([])
# 1. case when source is tensor
t1.set_(source=t2)
self.assertEqual(t1.storage()._cdata, t2.storage()._cdata)
# 2. case when source is storage
t1.set_(source=t2.storage())
self.assertEqual(t1.storage()._cdata, t2.storage()._cdata)
# 3. case when source is storage, and other args also specified
t1.set_(source=t2.storage(), storage_offset=0, size=size, stride=stride)
self.assertEqual(t1.size(), size)
self.assertEqual(t1.stride(), stride)
t1 = torch.tensor([True, True], dtype=torch.bool)
t2 = torch.tensor([False, False], dtype=torch.bool)
t1.set_(t2)
self.assertEqual(t1.storage()._cdata, t2.storage()._cdata)
def test_tensor_set_errors(self):
f_cpu = torch.randn((2, 3), dtype=torch.float32)
d_cpu = torch.randn((2, 3), dtype=torch.float64)
# change dtype
self.assertRaises(RuntimeError, lambda: f_cpu.set_(d_cpu.storage()))
self.assertRaises(RuntimeError,
lambda: f_cpu.set_(d_cpu.storage(), 0, d_cpu.size(), d_cpu.stride()))
self.assertRaises(RuntimeError, lambda: f_cpu.set_(d_cpu))
# change device
if torch.cuda.is_available():
f_cuda = torch.randn((2, 3), dtype=torch.float32, device='cuda')
# cpu -> cuda
self.assertRaises(RuntimeError, lambda: f_cpu.set_(f_cuda.storage()))
self.assertRaises(RuntimeError,
lambda: f_cpu.set_(f_cuda.storage(), 0, f_cuda.size(), f_cuda.stride()))
self.assertRaises(RuntimeError, lambda: f_cpu.set_(f_cuda))
# cuda -> cpu
self.assertRaises(RuntimeError, lambda: f_cuda.set_(f_cpu.storage()))
self.assertRaises(RuntimeError,
lambda: f_cuda.set_(f_cpu.storage(), 0, f_cpu.size(), f_cpu.stride()))
self.assertRaises(RuntimeError, lambda: f_cuda.set_(f_cpu))
# FIXME: move this test test_testing.py (along with allclose testing)
# NOTE: test_equal will be deprecated in favor of torch.testing.assert_close
# once torch.testing is out of beta
def test_equal(self):
# Contiguous, 1D
t1 = torch.tensor((3., 4., 9., 10.))
t2 = t1.contiguous()
t3 = torch.tensor((1., 9., 3., 10.))
t4 = torch.tensor((3., 4., 9.))
t5 = torch.tensor([])
self.assertTrue(t1.equal(t2))
self.assertFalse(t1.equal(t3))
self.assertFalse(t1.equal(t4))
self.assertFalse(t1.equal(t5))
self.assertTrue(torch.equal(t1, t2))
self.assertFalse(torch.equal(t1, t3))
self.assertFalse(torch.equal(t1, t4))
self.assertFalse(torch.equal(t1, t5))
# Non contiguous, 2D
s = torch.tensor(((1, 2, 3, 4), (5, 6, 7, 8)))
s1 = s[:, 1:3]
s2 = s1.clone()
s3 = torch.tensor(((2, 3), (6, 7)))
s4 = torch.tensor(((0, 0), (0, 0)))
self.assertFalse(s1.is_contiguous())
self.assertTrue(s1.equal(s2))
self.assertTrue(s1.equal(s3))
self.assertFalse(s1.equal(s4))
self.assertTrue(torch.equal(s1, s2))
self.assertTrue(torch.equal(s1, s3))
self.assertFalse(torch.equal(s1, s4))
def test_element_size(self):
byte = torch.ByteStorage().element_size()
char = torch.CharStorage().element_size()
short = torch.ShortStorage().element_size()
int = torch.IntStorage().element_size()
long = torch.LongStorage().element_size()
float = torch.FloatStorage().element_size()
double = torch.DoubleStorage().element_size()
bool = torch.BoolStorage().element_size()
bfloat16 = torch.BFloat16Storage().element_size()
complexfloat = torch.ComplexFloatStorage().element_size()
complexdouble = torch.ComplexDoubleStorage().element_size()
self.assertEqual(byte, torch.ByteTensor().element_size())
self.assertEqual(char, torch.CharTensor().element_size())
self.assertEqual(short, torch.ShortTensor().element_size())
self.assertEqual(int, torch.IntTensor().element_size())
self.assertEqual(long, torch.LongTensor().element_size())
self.assertEqual(float, torch.FloatTensor().element_size())
self.assertEqual(double, torch.DoubleTensor().element_size())
self.assertEqual(bool, torch.BoolTensor().element_size())
self.assertEqual(bfloat16, torch.tensor([], dtype=torch.bfloat16).element_size())
self.assertEqual(complexfloat, torch.tensor([], dtype=torch.complex64).element_size())
self.assertEqual(complexdouble, torch.tensor([], dtype=torch.complex128).element_size())
self.assertGreater(byte, 0)
self.assertGreater(char, 0)
self.assertGreater(short, 0)
self.assertGreater(int, 0)
self.assertGreater(long, 0)
self.assertGreater(float, 0)
self.assertGreater(double, 0)
self.assertGreater(bool, 0)
self.assertGreater(bfloat16, 0)
self.assertGreater(complexfloat, 0)
self.assertGreater(complexdouble, 0)
# These tests are portable, not necessarily strict for your system.
self.assertEqual(byte, 1)
self.assertEqual(char, 1)
self.assertEqual(bool, 1)
self.assertGreaterEqual(short, 2)
self.assertGreaterEqual(int, 2)
self.assertGreaterEqual(int, short)
self.assertGreaterEqual(long, 4)
self.assertGreaterEqual(long, int)
self.assertGreaterEqual(double, float)
def test_permute(self):
orig = [1, 2, 3, 4, 5, 6, 7]
perm = torch.randperm(7).tolist()
x = torch.empty(*orig).fill_(0)
new = [i - 1 for i in x.permute(*perm).size()]
self.assertEqual(perm, new)
self.assertEqual(x.size(), orig)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_reversed(self):
val = torch.arange(0, 10)
self.assertEqual(reversed(val), torch.arange(9, -1, -1))
val = torch.arange(1, 10).view(3, 3)
self.assertEqual(reversed(val), torch.tensor([[7, 8, 9], [4, 5, 6], [1, 2, 3]]))
val = torch.tensor(42)
self.assertEqual(reversed(val), torch.tensor(42))
def test_contains(self):
x = torch.arange(0, 10)
self.assertEqual(4 in x, True)
self.assertEqual(12 in x, False)
x = torch.arange(1, 10).view(3, 3)
val = torch.arange(1, 4)
self.assertEqual(val in x, True)
val += 10
self.assertEqual(val in x, False)
self.assertRaisesRegex(
RuntimeError,
"Tensor.__contains__ only supports Tensor or scalar, but you passed in a {}.".format(type("foo")),
lambda: "foo" in x)
self.assertRaisesRegex(
RuntimeError,
"Tensor.__contains__ only supports Tensor or scalar, but you passed in a {}.".format(type([1, 2])),
lambda: [1, 2] in x)
def test_deepcopy_parameter(self):
from copy import deepcopy
l = torch.nn.Linear(10, 1)
s = l.state_dict(keep_vars=True)
self.assertEqual(torch.nn.Parameter, type(s['weight']))
self.assertEqual(torch.nn.Parameter, type(s['bias']))
s2 = deepcopy(s)
self.assertEqual(torch.nn.Parameter, type(s2['weight']))
self.assertEqual(torch.nn.Parameter, type(s2['bias']))
def test_pickle(self):
import pickle
a = torch.randn(5, 5)
serialized = pickle.dumps(a)
b = pickle.loads(serialized)
self.assertEqual(a, b)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_pickle_parameter(self):
import pickle
a = torch.nn.Parameter(torch.randn(5, 5))
serialized = pickle.dumps(a)
b = pickle.loads(serialized)
self.assertTrue(isinstance(b, torch.nn.Parameter))
self.assertEqual(a.requires_grad, b.requires_grad)
self.assertEqual(a, b)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_pickle_parameter_no_requires_grad(self):
import pickle
a = torch.nn.Parameter(torch.randn(5, 5), requires_grad=False)
serialized = pickle.dumps(a)
b = pickle.loads(serialized)
self.assertTrue(isinstance(b, torch.nn.Parameter))
self.assertEqual(a.requires_grad, b.requires_grad)
self.assertEqual(a, b)
def test_pickle_dtype(self):
t = torch.float32
serialized = pickle.dumps(t)
b = pickle.loads(serialized)
self.assertTrue(isinstance(b, torch.dtype))
self.assertEqual(id(b), id(t))
def test_pickle_size(self):
a = torch.rand(10).size()
serialized = pickle.dumps(a)
b = pickle.loads(serialized)
self.assertTrue(isinstance(b, torch.Size))
self.assertEqual(a, b)
def test_pickle_function(self):
# https://github.com/pytorch/pytorch/issues/37703
a = torch.tanh
serialized = pickle.dumps(a)
b = pickle.loads(serialized)
self.assertEqual(a, b)
def test_generator_cpu(self):
# test default generators are equal
self.assertEqual(torch.default_generator, torch.default_generator)
# tests Generator API
# manual_seed, seed, initial_seed, get_state, set_state
g1 = torch.Generator()
g2 = torch.Generator()
g1.manual_seed(12345)
g2.manual_seed(12345)
self.assertEqual(g1.initial_seed(), g2.initial_seed())
g1.seed()
g2.seed()
self.assertNotEqual(g1.initial_seed(), g2.initial_seed())
g1 = torch.Generator()
g2_state = g2.get_state()
g2_randn = torch.randn(1, generator=g2)
g1.set_state(g2_state)
g1_randn = torch.randn(1, generator=g1)
self.assertEqual(g1_randn, g2_randn)
default_state = torch.default_generator.get_state()
q = torch.empty(100)
g1_normal = q.normal_()
g2 = torch.Generator()
g2.set_state(default_state)
g2_normal = q.normal_(generator=g2)
self.assertEqual(g1_normal, g2_normal)
def test_invalid_generator_raises(self):
self.assertRaises(RuntimeError, lambda: torch.Generator('opengl'))
def _sobol_reference_samples(self, scramble: bool) -> torch.Tensor:
if not scramble:
# theoretical values from Joe Kuo 2010
return torch.tensor(
[
[0., 0.],
[0.5, 0.5],
[0.75, 0.25],
[0.25, 0.75],
[0.375, 0.375],
[0.875, 0.875],
[0.625, 0.125],
[0.125, 0.625],
],
)
else:
# theoretical values unknown: convergence properties checked
return torch.tensor(
[
[0.50860737, 0.29320504],
[0.07116939, 0.89594537],
[0.49354145, 0.11524881],
[0.93097717, 0.70244044],
[0.87266153, 0.23887917],
[0.31021884, 0.57600391],
[0.13687253, 0.42054182],
[0.69931293, 0.77336788],
],
)
def test_sobolengine_bounds(self, scramble: bool = False):
engine = torch.quasirandom.SobolEngine(100, scramble=scramble, seed=123456)
sample = engine.draw(512)
self.assertTrue(torch.all(sample >= 0))
self.assertTrue(torch.all(sample <= 1))
def test_sobolengine_bounds_scrambled(self):
self.test_sobolengine_bounds(scramble=True)
def test_sobolengine_draw(self, scramble: bool = False):
ref_sample = self._sobol_reference_samples(scramble=scramble)
engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456)
sample = engine.draw(n=len(ref_sample))
self.assertEqual(sample, ref_sample)
self.assertEqual(engine.num_generated, len(ref_sample))
def test_sobolengine_draw_scrambled(self):
self.test_sobolengine_draw(scramble=True)
def test_sobolengine_first_point(self):
for dtype in (torch.float, torch.double):
engine = torch.quasirandom.SobolEngine(2, scramble=False)
sample = engine.draw(1, dtype=dtype)
self.assertTrue(torch.all(sample == 0))
self.assertEqual(sample.dtype, dtype)
for dtype in (torch.float, torch.double):
engine = torch.quasirandom.SobolEngine(2, scramble=True, seed=123456)
sample = engine.draw(1, dtype=dtype)
self.assertTrue(torch.all(sample != 0))
self.assertEqual(sample.dtype, dtype)
def test_sobolengine_continuing(self, scramble: bool = False):
ref_sample = self._sobol_reference_samples(scramble=scramble)
engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456)
n_half = len(ref_sample) // 2
_ = engine.draw(n=n_half)
sample = engine.draw(n=n_half)
torch.testing.assert_close(sample, ref_sample[n_half:])
def test_sobolengine_continuing_scrambled(self):
self.test_sobolengine_continuing(scramble=True)
def test_sobolengine_reset(self, scramble: bool = False):
ref_sample = self._sobol_reference_samples(scramble=scramble)
engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456)
_ = engine.draw(n=len(ref_sample) // 2)
engine.reset()
self.assertEqual(engine.num_generated, 0)
sample = engine.draw(n=len(ref_sample))
torch.testing.assert_close(sample, ref_sample)
def test_sobolengine_reset_scrambled(self):
self.test_sobolengine_reset(scramble=True)
def test_sobolengine_fast_forward(self, scramble: bool = False):
ref_sample = self._sobol_reference_samples(scramble=scramble)
engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456)
engine.fast_forward(4)
sample = engine.draw(n=4)
torch.testing.assert_close(sample, ref_sample[4:])
# alternate fast forwarding with sampling
engine.reset()
even_draws = []
for i in range(8):
if i % 2 == 0:
even_draws.append(engine.draw())
else:
engine.fast_forward(1)
torch.testing.assert_close(
ref_sample[[i for i in range(8) if i % 2 == 0]],
torch.from_numpy(np.concatenate(even_draws)),
)
def test_sobolengine_fast_forward_scrambled(self):
self.test_sobolengine_fast_forward(scramble=True)
def test_sobolengine_distribution(self, scramble=False):
d = 50
engine = torch.quasirandom.SobolEngine(d, scramble=scramble, seed=123456)
sample = engine.draw(1024)
torch.testing.assert_close(
torch.mean(sample, dim=0), torch.full((d,), 0.5), atol=2, rtol=2
)
torch.testing.assert_close(
np.percentile(sample, 25, axis=0), np.repeat(0.25, d), atol=2, rtol=2
)
torch.testing.assert_close(
np.percentile(sample, 75, axis=0), np.repeat(0.75, d), atol=2, rtol=2
)
def test_sobolengine_distribution_scrambled(self):
self.test_sobolengine_distribution(scramble=True)
def test_sobolengine_draw_base2(self, scramble=False):
ref_sample = self._sobol_reference_samples(scramble=scramble)
engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456)
sample = engine.draw_base2(2)
self.assertEqual(ref_sample[:4], sample)
# resampling still having N=2**n
sample = engine.draw_base2(2)
self.assertEqual(ref_sample[4:8], sample)
def test_sobolengine_draw_base2_scrambled(self):
self.test_sobolengine_draw_base2(scramble=True)
def test_sobolengine_raise(self):
maxdim = torch.quasirandom.SobolEngine.MAXDIM
with self.assertRaises(ValueError):
torch.quasirandom.SobolEngine(maxdim + 1)
def test_sobolengine_high_dim(self):
engine = torch.quasirandom.SobolEngine(1111, scramble=False, seed=123456)
samples1 = engine.draw()
vals1, counts1 = torch.unique(samples1, return_counts=True)
samples2 = engine.draw()
vals2, counts2 = torch.unique(samples2, return_counts=True)
self.assertEqual(vals1.item(), 0.0)
self.assertEqual(counts1.item(), 1111)
self.assertEqual(vals2.item(), 0.5)
self.assertEqual(counts1.item(), 1111)
def test_parsing_int64(self):
# accepts integer arguments
x = torch.cumsum(torch.ones(5, 5), 0)
self.assertEqual(x, torch.cumsum(torch.ones(5, 5), torch.tensor(0)))
# doesn't accept floating point variables
self.assertRaises(TypeError, lambda: torch.cumsum(torch.ones(5, 5), torch.tensor(0.)))
def test_parsing_double(self):
# accepts floating point and integer arguments
x = torch.randn(2, 3)
torch.isclose(x, x, 1, 1)
self.assertTrue(torch.isclose(x, x, 1, 1).all())
self.assertTrue(torch.isclose(x, x, 1.5, 1.).all())
# accepts floating point and integer tensors
self.assertTrue(torch.isclose(x, x, torch.tensor(1), torch.tensor(1)).all())
self.assertTrue(torch.isclose(x, x, torch.tensor(1.5), torch.tensor(1.)).all())
# doesn't accept variables with requires_grad
self.assertRaises(TypeError,
lambda: torch.isclose(x, x, torch.tensor(1.5), torch.tensor(1., requires_grad=True)).all())
def test_parsing_intlist(self):
# parse with integer variables
self.assertEqual(torch.Size([3, 4]), torch.ones((torch.tensor(3), torch.tensor(4))).shape)
self.assertEqual(torch.Size([3, 4]), torch.ones(torch.tensor(3), torch.tensor(4)).shape)
# parse with numpy integers
self.assertEqual(torch.Size([3, 4]), torch.ones((np.array(3), np.int64(4))).shape)
self.assertEqual(torch.Size([3, 4]), torch.ones(np.array(3), np.int64(4)).shape)
self.assertEqual(torch.Size([3, 4]), torch.ones((np.int64(3), np.array(4))).shape)
self.assertEqual(torch.Size([3, 4]), torch.ones(np.int64(3), np.array(4)).shape)
# fail parse with float variables
self.assertRaises(TypeError, lambda: torch.ones((torch.tensor(3.), torch.tensor(4))))
# fail parse with numpy floats
self.assertRaises(TypeError, lambda: torch.ones((np.float(3.), torch.tensor(4))))
self.assertRaises(TypeError, lambda: torch.ones((np.array(3.), torch.tensor(4))))
# fail parse with > 1 element variables
self.assertRaises(TypeError, lambda: torch.ones(torch.tensor(3, 3)))
self.assertRaises(TypeError, lambda: torch.ones((torch.tensor(3, 3))))
self.assertRaises(TypeError, lambda: torch.ones(np.array(3, 3)))
self.assertRaises(TypeError, lambda: torch.ones((np.array(3, 3))))
# fail parse with additional positional args after intlist arg
self.assertRaisesRegex(TypeError,
"received an invalid combination of arguments",
lambda: torch.LongTensor((6, 0), 1, 1, 0))
self.assertRaisesRegex(TypeError,
"missing 1 required positional arguments",
lambda: torch.tensor().new_zeros((5, 5), 0))
def test_from_buffer(self):
a = bytearray([1, 2, 3, 4])
self.assertEqual(torch.ByteStorage.from_buffer(a).tolist(), [1, 2, 3, 4])
shorts = torch.ShortStorage.from_buffer(a, 'big')
self.assertEqual(shorts.size(), 2)
self.assertEqual(shorts.tolist(), [258, 772])
ints = torch.IntStorage.from_buffer(a, 'little')
self.assertEqual(ints.size(), 1)
self.assertEqual(ints[0], 67305985)
f = bytearray([0x40, 0x10, 0x00, 0x00])
floats = torch.FloatStorage.from_buffer(f, 'big')
self.assertEqual(floats.size(), 1)
self.assertEqual(floats[0], 2.25)
f = bytearray([0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x40])
bools = torch.BoolStorage.from_buffer(f, 'big')
self.assertEqual(bools.size(), 8)
self.assertEqual(bools.tolist(), [False, True, True, True, True, True, True, True])
self.assertEqual(bools.type(), 'torch.BoolStorage')
self.assertTrue(isinstance(bools, torch.BoolStorage))
f = bytearray(b'\x80\x02\x8a\nl\xfc\x9cF\xf9 j\xa8P\x19.\x80\x02M\xe9')
bools = torch.BoolStorage.from_buffer(f, 'big')
self.assertEqual(bools.size(), 19)
f = bytearray(b'\0x4A')
bools = torch.BoolStorage.from_buffer(f, 'big')
self.assertEqual(bools.size(), 4)
self.assertEqual(bools.tolist(), [False, True, True, True])
bytes = torch.ByteStorage.from_buffer(a)
self.assertEqual(bytes.nbytes(), 4)
self.assertEqual(bytes.tolist(), [1, 2, 3, 4])
self.assertTrue(isinstance(bytes, torch.ByteStorage))
def test_storage_error(self):
quantized_storages = [
torch.QInt32Storage,
torch.QInt8Storage,
torch.QUInt2x4Storage,
torch.QUInt4x2Storage,
torch.QUInt8Storage,
]
with self.assertRaisesRegex(RuntimeError, r"Only child classes of _LegacyStorage can be instantiated"):
torch.storage._LegacyStorage()
for storage_class in torch._storage_classes:
if storage_class in [torch.UntypedStorage, torch.TypedStorage]:
continue
device = 'cuda' if storage_class.__module__ == 'torch.cuda' else 'cpu'
dtype = storage_class.dtype
if device == 'cuda' and not torch.cuda.is_available():
continue
# Legacy <type>Storage constructor errors
with self.assertRaisesRegex(RuntimeError, r"'device' cannot be specified"):
storage_class(device='cpu')
with self.assertRaisesRegex(RuntimeError, r"'dtype' cannot be specified"):
storage_class(dtype=torch.float)
with self.assertRaisesRegex(TypeError, r"got an unexpected keyword"):
storage_class(sdlkjf=torch.float)
with self.assertRaisesRegex(RuntimeError, r"Too many positional arguments"):
storage_class(0, 0)
with self.assertRaisesRegex(TypeError, r"invalid data type"):
storage_class('string')
with self.assertRaisesRegex(TypeError, r"Argument type not recognized"):
storage_class(torch.tensor([]))
s = storage_class()
with self.assertRaisesRegex(RuntimeError, r"No positional arguments"):
storage_class(0, wrap_storage=s.untyped())
with self.assertRaisesRegex(TypeError, r"must be UntypedStorage"):
storage_class(wrap_storage=s)
if torch.cuda.is_available():
if storage_class in quantized_storages:
with self.assertRaisesRegex(RuntimeError, r"Cannot create CUDA storage with quantized dtype"):
s.cuda()
else:
if s.is_cuda:
s_other_device = s.cpu()
else:
s_other_device = s.cuda()
with self.assertRaisesRegex(RuntimeError, r"Device of 'wrap_storage' must be"):
storage_class(wrap_storage=s_other_device.untyped())
# TypedStorage constructor errors
with self.assertRaisesRegex(RuntimeError, r"No positional arguments"):
torch.TypedStorage(0, wrap_storage=s.untyped(), dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"Argument 'dtype' must be specified"):
torch.TypedStorage(wrap_storage=s.untyped())
with self.assertRaisesRegex(TypeError, r"Argument 'dtype' must be torch.dtype"):
torch.TypedStorage(wrap_storage=s.untyped(), dtype=0)
with self.assertRaisesRegex(RuntimeError, r"Argument 'device' should not be specified"):
torch.TypedStorage(wrap_storage=s.untyped(), dtype=dtype, device=device)
with self.assertRaisesRegex(TypeError, r"Argument 'wrap_storage' must be UntypedStorage"):
torch.TypedStorage(wrap_storage=s, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"Storage device not recognized"):
torch.TypedStorage(dtype=dtype, device='xla')
if torch.cuda.is_available():
if storage_class in quantized_storages:
with self.assertRaisesRegex(RuntimeError, r"Cannot create CUDA storage with quantized dtype"):
torch.TypedStorage(dtype=dtype, device='cuda')
with self.assertRaisesRegex(TypeError, r"Argument type not recognized"):
torch.TypedStorage(torch.tensor([]), dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, r"Too many positional arguments"):
torch.TypedStorage(0, 0, dtype=dtype, device=device)
if isinstance(s, torch.TypedStorage):
s_other = torch.TypedStorage([1, 2, 3, 4], device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'cannot set item'):
s.fill_(s_other)
def test_storage_error_no_attribute(self):
storage_classes = [
torch.cuda.ByteStorage,
torch.cuda.FloatStorage,
]
for storage_class in storage_classes:
with self.assertRaisesRegex(RuntimeError, r'Not available for CUDA storage'):
storage_class.from_buffer()
with self.assertRaisesRegex(RuntimeError, r'Not available for CUDA storage'):
storage_class._new_with_weak_ptr()
with self.assertRaisesRegex(RuntimeError, r'Not available for CUDA storage'):
storage_class._new_shared_filename(0, 0, 0)
def test_storage_casts(self):
storage = torch.IntStorage([-1, 0, 1, 2, 3, 4])
self.assertEqual(storage.size(), 6)
self.assertEqual(storage.tolist(), [-1, 0, 1, 2, 3, 4])
self.assertEqual(storage.type(), 'torch.IntStorage')
self.assertIs(storage.dtype, torch.int32)
floatStorage = storage.float()
self.assertEqual(floatStorage.size(), 6)
self.assertEqual(floatStorage.tolist(), [-1, 0, 1, 2, 3, 4])
self.assertEqual(floatStorage.type(), 'torch.FloatStorage')
self.assertEqual(floatStorage.int().tolist(), [-1, 0, 1, 2, 3, 4])
self.assertIs(floatStorage.dtype, torch.float32)
halfStorage = storage.half()
self.assertEqual(halfStorage.size(), 6)
self.assertEqual(halfStorage.tolist(), [-1, 0, 1, 2, 3, 4])
self.assertEqual(halfStorage.type(), 'torch.HalfStorage')
self.assertEqual(halfStorage.int().tolist(), [-1, 0, 1, 2, 3, 4])
self.assertIs(halfStorage.dtype, torch.float16)
bfloat16Storage = storage.bfloat16()
self.assertEqual(bfloat16Storage.size(), 6)
self.assertEqual(bfloat16Storage.tolist(), [-1, 0, 1, 2, 3, 4])
self.assertEqual(bfloat16Storage.type(), 'torch.BFloat16Storage')
self.assertEqual(bfloat16Storage.int().tolist(), [-1, 0, 1, 2, 3, 4])
self.assertIs(bfloat16Storage.dtype, torch.bfloat16)
longStorage = storage.long()
self.assertEqual(longStorage.size(), 6)
self.assertEqual(longStorage.tolist(), [-1, 0, 1, 2, 3, 4])
self.assertEqual(longStorage.type(), 'torch.LongStorage')
self.assertEqual(longStorage.int().tolist(), [-1, 0, 1, 2, 3, 4])
self.assertIs(longStorage.dtype, torch.int64)
shortStorage = storage.short()
self.assertEqual(shortStorage.size(), 6)
self.assertEqual(shortStorage.tolist(), [-1, 0, 1, 2, 3, 4])
self.assertEqual(shortStorage.type(), 'torch.ShortStorage')
self.assertEqual(shortStorage.int().tolist(), [-1, 0, 1, 2, 3, 4])
self.assertIs(shortStorage.dtype, torch.int16)
doubleStorage = storage.double()
self.assertEqual(doubleStorage.size(), 6)
self.assertEqual(doubleStorage.tolist(), [-1.0, 0.0, 1.0, 2.0, 3.0, 4.0])
self.assertEqual(doubleStorage.type(), 'torch.DoubleStorage')
self.assertEqual(doubleStorage.int().tolist(), [-1, 0, 1, 2, 3, 4])
self.assertIs(doubleStorage.dtype, torch.float64)
charStorage = storage.char()
self.assertEqual(charStorage.size(), 6)
self.assertEqual(charStorage.tolist(), [-1.0, 0.0, 1.0, 2.0, 3.0, 4.0])
self.assertEqual(charStorage.type(), 'torch.CharStorage')
self.assertEqual(charStorage.int().tolist(), [-1, 0, 1, 2, 3, 4])
self.assertIs(charStorage.dtype, torch.int8)
byteStorage = storage.byte()
self.assertEqual(byteStorage.size(), 6)
self.assertEqual(byteStorage.tolist(), [255, 0, 1, 2, 3, 4])
self.assertEqual(byteStorage.type(), 'torch.ByteStorage')
self.assertEqual(byteStorage.int().tolist(), [255, 0, 1, 2, 3, 4])
self.assertIs(byteStorage.dtype, torch.uint8)
boolStorage = storage.bool()
self.assertEqual(boolStorage.size(), 6)
self.assertEqual(boolStorage.tolist(), [True, False, True, True, True, True])
self.assertEqual(boolStorage.type(), 'torch.BoolStorage')
self.assertEqual(boolStorage.int().tolist(), [1, 0, 1, 1, 1, 1])
self.assertIs(boolStorage.dtype, torch.bool)
complexfloat_storage = torch.ComplexFloatStorage([-1, 0, 1 + 2j, 2.5j, 3.5, 4 - 2j])
self.assertEqual(complexfloat_storage.size(), 6)
self.assertEqual(complexfloat_storage.tolist(), [-1, 0, 1 + 2j, 2.5j, 3.5, 4 - 2j])
self.assertEqual(complexfloat_storage.type(), 'torch.ComplexFloatStorage')
self.assertIs(complexfloat_storage.dtype, torch.complex64)
complexdouble_storage = complexfloat_storage.complex_double()
self.assertEqual(complexdouble_storage.size(), 6)
self.assertEqual(complexdouble_storage.tolist(), [-1, 0, 1 + 2j, 2.5j, 3.5, 4 - 2j])
self.assertEqual(complexdouble_storage.type(), 'torch.ComplexDoubleStorage')
self.assertIs(complexdouble_storage.dtype, torch.complex128)
def test_from_file(self):
def assert_with_filename(filename):
size = 10000
s1 = torch.FloatStorage.from_file(filename, True, size)
t1 = torch.FloatTensor(s1).copy_(torch.randn(size))
self.assertEqual(s1.data_ptr(), torch.FloatTensor(s1).data_ptr())
# check mapping
s2 = torch.FloatStorage.from_file(filename, True, size)
t2 = torch.FloatTensor(s2)
self.assertEqual(t1, t2, atol=0, rtol=0)
# check changes to t1 from t2
rnum = random.uniform(-1, 1)
t1.fill_(rnum)
self.assertEqual(t1, t2, atol=0, rtol=0)
# check changes to t2 from t1
rnum = random.uniform(-1, 1)
t2.fill_(rnum)
self.assertEqual(t1, t2, atol=0, rtol=0)
# release the tensors
del s1, t1, s2, t2
with TemporaryFileName() as fname:
assert_with_filename(fname)
if IS_FILESYSTEM_UTF8_ENCODING:
with TemporaryDirectoryName(suffix='中文') as dname, TemporaryFileName(dir=dname) as fname:
assert_with_filename(fname)
def test_torch_from_file(self):
def assert_with_filename(filename):
size = 10000
s1 = torch.from_file(filename, True, size, dtype=torch.float)
t1 = torch.FloatTensor(s1).copy_(torch.randn(size))
# check mapping
s2 = torch.from_file(filename, True, size, dtype=torch.float)
t2 = torch.FloatTensor(s2)
self.assertEqual(t1, t2, atol=0, rtol=0)
# check changes to t1 from t2
rnum = random.uniform(-1, 1)
t1.fill_(rnum)
self.assertEqual(t1, t2, atol=0, rtol=0)
# check changes to t2 from t1
rnum = random.uniform(-1, 1)
t2.fill_(rnum)
self.assertEqual(t1, t2, atol=0, rtol=0)
# release the tensors
del s1, t1, s2, t2
with TemporaryFileName() as fname:
assert_with_filename(fname)
if IS_FILESYSTEM_UTF8_ENCODING:
with TemporaryDirectoryName(suffix='中文') as dname, TemporaryFileName(dir=dname) as fname:
assert_with_filename(fname)
def test_print(self):
default_type = torch.tensor([]).type()
for t in torch._tensor_classes:
if t == torch.HalfTensor:
continue # HalfTensor does not support fill
if t.is_sparse:
continue
if t.is_cuda and not torch.cuda.is_available():
continue
obj = t(100, 100).fill_(1)
obj.__repr__()
str(obj)
# test half tensor
obj = torch.rand(100, 100, device='cpu').half()
obj.__repr__()
str(obj)
for t in torch._storage_classes:
if t == torch.BFloat16Storage:
continue # Fix once fill is enabled for bfloat16
if t.is_cuda and not torch.cuda.is_available():
continue
if t == torch.BoolStorage or t == torch.cuda.BoolStorage:
obj = t(100).fill_(True)
else:
obj = t(100).fill_(1)
obj.__repr__()
str(obj)
# test complex tensor
# complex tensor print uses two formatters, one for real values
# and the other for imag values. this is consistent with numpy
x = torch.tensor([2.3 + 4j, 7 + 6j])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([2.3000+4.j, 7.0000+6.j])''')
# test complex half tensor
x = torch.tensor([1.25 + 4j, -7. + 6j], dtype=torch.chalf)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([ 1.2500+4.j, -7.0000+6.j], dtype=torch.complex32)''')
# test scientific notation for complex tensors
x = torch.tensor([1e28 + 2j , -1e-28j])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([1.0000e+28+2.0000e+00j, -0.0000e+00-1.0000e-28j])''')
# test big integer
x = torch.tensor(2341234123412341)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor(2341234123412341)''')
# test scientific notation
x = torch.tensor([1e28, 1e-28])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([1.0000e+28, 1.0000e-28])''')
# test scientific notation using set_printoptions
x = torch.tensor([1e2, 1e-2])
torch.set_printoptions(sci_mode=True)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([1.0000e+02, 1.0000e-02])''')
torch.set_printoptions(sci_mode=False)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([ 100.0000, 0.0100])''')
torch.set_printoptions(sci_mode=None) # reset to the default value
# test no leading space if all elements positive
x = torch.tensor([1, 2])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([1, 2])''')
# test for leading space if there are negative elements
x = torch.tensor([1, -2])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([ 1, -2])''')
# test inf and nan
x = torch.tensor([4, inf, 1.5, -inf, 0, nan, 1])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([4.0000, inf, 1.5000, -inf, 0.0000, nan, 1.0000])''')
y = torch.tensor([4, inf, complex(1.5, inf), complex(-inf, 4), 0, complex(nan, inf), complex(3, nan)])
self.assertEqual(y.__repr__(), str(y))
expected_str = '''\
tensor([4.0000+0.j, inf+0.j, 1.5000+infj, -inf+4.j, 0.0000+0.j, nan+infj,
3.0000+nanj])'''
self.assertExpectedInline(str(y), expected_str)
# test dtype
torch.set_default_dtype(torch.float)
x = torch.tensor([1e-324, 1e-323, 1e-322, 1e307, 1e308, 1e309], dtype=torch.float64)
self.assertEqual(x.__repr__(), str(x))
expected_str = '''\
tensor([ 0.0000e+00, 9.8813e-324, 9.8813e-323, 1.0000e+307, 1.0000e+308,
inf], dtype=torch.float64)'''
self.assertExpectedInline(str(x), expected_str)
# test changing default dtype
torch.set_default_dtype(torch.float64)
self.assertEqual(x.__repr__(), str(x))
expected_str = '''\
tensor([ 0.0000e+00, 9.8813e-324, 9.8813e-323, 1.0000e+307, 1.0000e+308,
inf])'''
self.assertExpectedInline(str(x), expected_str)
# test summary
x = torch.zeros(10000)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([0., 0., 0., ..., 0., 0., 0.])''')
# test internal summary function
x = torch.rand(1, 20, 5, 30)
summary = torch._tensor_str.get_summarized_data(x)
self.assertEqual(summary.shape, (1, 6, 5, 6))
first_and_last = [0, 1, 2, -3, -2, -1]
self.assertEqual(summary, x[:, first_and_last][..., first_and_last])
# test device
if torch.cuda.is_available():
x = torch.tensor([123], device='cuda:0')
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([123], device='cuda:0')''')
# test changing default to cuda
torch.set_default_tensor_type(torch.cuda.FloatTensor)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([123])''')
# test printing a tensor on a different gpu than current one.
if torch.cuda.device_count() >= 2:
with torch.cuda.device(1):
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([123], device='cuda:0')''')
# test printing cpu tensor when default device is cuda
y = torch.tensor([123], device='cpu')
self.assertEqual(y.__repr__(), str(y))
self.assertExpectedInline(str(y), '''tensor([123], device='cpu')''')
torch.set_default_tensor_type(default_type)
# test integral floats and requires_grad
x = torch.tensor([123.], requires_grad=True)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([123.], requires_grad=True)''')
# test non-contiguous print
# sliced tensor should have > PRINT_OPTS.threshold elements
x = torch.ones(100, 2, 2, 10)
y = x.as_strided(size=(100, 2, 10), stride=(2 * 2 * 10, 2 * 10, 1))
self.assertEqual(str(y), y.__repr__())
expected_str = '''\
tensor([[[1., 1., 1., ..., 1., 1., 1.],
[1., 1., 1., ..., 1., 1., 1.]],
[[1., 1., 1., ..., 1., 1., 1.],
[1., 1., 1., ..., 1., 1., 1.]],
[[1., 1., 1., ..., 1., 1., 1.],
[1., 1., 1., ..., 1., 1., 1.]],
...,
[[1., 1., 1., ..., 1., 1., 1.],
[1., 1., 1., ..., 1., 1., 1.]],
[[1., 1., 1., ..., 1., 1., 1.],
[1., 1., 1., ..., 1., 1., 1.]],
[[1., 1., 1., ..., 1., 1., 1.],
[1., 1., 1., ..., 1., 1., 1.]]])\
'''
self.assertExpectedInline(str(y), expected_str)
x = torch.ones(100, 2, 2, 10) * (1 + 1j)
y = x.as_strided(size=(100, 2, 10), stride=(2 * 2 * 10, 2 * 10, 1))
self.assertEqual(str(y), y.__repr__())
expected_str = '''\
tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]],
[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]],
[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]],
...,
[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]],
[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]],
[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]]])\
'''
self.assertExpectedInline(str(y), expected_str)
# test print 0-dim tensor: there's no 0-dim in Numpy, we match arrayprint style
x = torch.tensor(0.00002)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor(2.0000e-05)''')
# test print boolean tensor
x = torch.tensor([True])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([True])''')
x = torch.tensor(True)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor(True)''')
# [Numpy] test print float in sci_mode when min < 0.0001.
x = torch.tensor([0.00002])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([2.0000e-05])''')
# [Numpy] test print complex in sci_mode when real_min < 0.0001 and (or) imag_min < 0.0001.
x = torch.tensor([0.00002]) * (1 + 1j)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([2.0000e-05+2.0000e-05j])''')
# [Numpy] test print float in sci_mode when max > 1e8.
# TODO: Pytorch uses fixed precision to print, while Numpy uses dragon4_scientific
# to do automatic trimming and padding.
x = torch.tensor([123456789.])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([1.2346e+08])''')
# [Numpy] test print float in sci_mode when max / min > 1000.
x = torch.tensor([0.01, 11])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([1.0000e-02, 1.1000e+01])''')
# [Numpy] test print int max / min > 1000, no sci_mode
x = torch.tensor([1, 1010])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([ 1, 1010])''')
# [Numpy] test print int > 1e8, no sci_mode
x = torch.tensor([1000000000]) # 1e9
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([1000000000])''')
# [Numpy] test printing float in int_mode
x = torch.tensor([1., 1000.])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([ 1., 1000.])''')
# [Numpy] test printing float in int_mode in sci format when max / min > 1000.
x = torch.tensor([1., 1010.])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([1.0000e+00, 1.0100e+03])''')
def test_sizeof(self) -> None:
sizeof_empty = torch.randn(0).storage().__sizeof__()
sizeof_10 = torch.randn(10).storage().__sizeof__()
sizeof_100 = torch.randn(100).storage().__sizeof__()
self.assertEqual((sizeof_100 - sizeof_empty) // (sizeof_10 - sizeof_empty), 10)
self.assertEqual((sizeof_100 - sizeof_empty) % (sizeof_10 - sizeof_empty), 0)
sizeof_empty = torch.randn(0).to(torch.uint8).storage().__sizeof__()
sizeof_10 = torch.randn(10).to(torch.uint8).storage().__sizeof__()
sizeof_100 = torch.randn(100).to(torch.uint8).storage().__sizeof__()
self.assertEqual((sizeof_100 - sizeof_empty) // (sizeof_10 - sizeof_empty), 10)
self.assertEqual((sizeof_100 - sizeof_empty) % (sizeof_10 - sizeof_empty), 0)
def test_iter(self) -> None:
x = torch.randn(5, 5)
for i, sub in enumerate(x):
self.assertEqual(sub, x[i])
x = torch.tensor([])
self.assertEqual(list(x), [])
def test_new(self) -> None:
x = torch.autograd.Variable(torch.tensor([]))
y = torch.autograd.Variable(torch.randn(4, 4))
z = torch.autograd.Variable(torch.IntTensor([1, 2, 3]))
self.assertEqual(x.new().shape, [0])
self.assertEqual(x.new(), x)
self.assertEqual(x.new(1, 2).shape, [1, 2])
self.assertEqual(x.new(torch.Size([3, 4])).shape, [3, 4])
self.assertEqual(x.new([3, 4]).shape, [2])
self.assertEqual(x.new([3, 4]).tolist(), [3, 4])
self.assertEqual(x.new((3, 4)).tolist(), [3, 4])
self.assertEqual(x.new([np.int32(3), np.float64(4)]).tolist(), [3, 4])
self.assertEqual(x.new(np.array((3, 4))).tolist(), [3, 4])
self.assertEqual(x.new([z[2], z[0] + 3]).tolist(), [3, 4])
self.assertEqual(x.new(size=(3, 4)).shape, [3, 4])
self.assertEqual(x.new(()).shape, [0])
self.assertEqual(x.new(y.storage()).data_ptr(), y.data_ptr())
self.assertEqual(x.new(y).data_ptr(), y.data_ptr())
self.assertIsNot(x.new(y), y)
self.assertRaises(TypeError, lambda: x.new(z))
# TypeError would be better
self.assertRaises(RuntimeError, lambda: x.new(z.storage()))
@unittest.skipIf(PYTORCH_CUDA_MEMCHECK, "is_pinned uses failure to detect pointer property")
def test_pin_memory(self):
x = torch.randn(3, 5)
self.assertFalse(x.is_pinned())
if not torch.cuda.is_available():
self.assertRaises(RuntimeError, lambda: x.pin_memory())
else:
pinned = x.pin_memory()
self.assertTrue(pinned.is_pinned())
self.assertEqual(pinned, x)
self.assertNotEqual(pinned.data_ptr(), x.data_ptr())
# test that pin_memory on already pinned tensor has no effect
self.assertIs(pinned, pinned.pin_memory())
self.assertEqual(pinned.data_ptr(), pinned.pin_memory().data_ptr())
def test_error_msg_type_translation(self):
with self.assertRaisesRegex(
RuntimeError,
# message includes both Double and Long
'(?=.*Double)(?=.*Long)'):
# Calls model with a LongTensor input but DoubleTensor weights
input = torch.zeros(1, 1, 1, 6, dtype=torch.long)
weight = torch.nn.Parameter(torch.zeros(1, 1, 1, 3, dtype=torch.double))
model = torch.nn.Conv2d(1, 1, (1, 3), stride=1, padding=0, bias=False)
model.weight = weight
out = model(input)
def test_apply(self):
x = torch.arange(1, 6)
res = x.clone().apply_(lambda k: k + k)
self.assertEqual(res, x * 2)
self.assertRaises(TypeError, lambda: x.apply_(lambda k: "str"))
def test_map(self):
x = torch.autograd.Variable(torch.randn(3, 3))
y = torch.autograd.Variable(torch.randn(3))
res = x.clone()
res.map_(y, lambda a, b: a + b)
self.assertEqual(res, x + y)
self.assertRaisesRegex(TypeError, "not callable", lambda: res.map_(y, "str"))
def test_map2(self):
x = torch.autograd.Variable(torch.randn(3, 3))
y = torch.autograd.Variable(torch.randn(3))
z = torch.autograd.Variable(torch.randn(1, 3))
res = x.clone()
res.map2_(y, z, lambda a, b, c: a + b * c)
self.assertEqual(res, x + y * z)
z.requires_grad = True
self.assertRaisesRegex(
RuntimeError, "requires grad",
lambda: res.map2_(y, z, lambda a, b, c: a + b * c))
def test_Size(self):
x = torch.Size([1, 2, 3])
self.assertIsInstance(x, tuple)
self.assertEqual(x[0], 1)
self.assertEqual(x[1], 2)
self.assertEqual(x[2], 3)
self.assertEqual(len(x), 3)
self.assertRaises(TypeError, lambda: torch.Size(torch.ones(3)))
self.assertIsInstance(x * 2, torch.Size)
self.assertIsInstance(x[:-1], torch.Size)
self.assertIsInstance(x + x, torch.Size)
def test_Size_scalar(self):
three = torch.tensor(3)
two = torch.tensor(2)
x = torch.Size([0, 1, two, three, 4])
for i in range(1, 5):
self.assertEqual(x[i], i)
def test_Size_iter(self):
for sizes in [iter([1, 2, 3, 4, 5]), range(1, 6)]:
x = torch.Size(sizes)
for i in range(0, 5):
self.assertEqual(x[i], i + 1)
def test_t_not_2d_error(self):
self.assertRaises(RuntimeError, lambda: torch.randn(2, 3, 4).t())
self.assertRaises(RuntimeError, lambda: torch.randn(2, 3, 4).t_())
# skip this test for now as it affects all tests
@unittest.skipIf(True, "flush_denormal not supported")
def test_set_flush_denormal(self):
tiny_float = 1e-42
tiny_double = 1e-320
float_tensor = torch.FloatTensor([1.0, tiny_float])
double_tensor = torch.DoubleTensor([1.0, tiny_float, tiny_double])
self.assertEqual(float_tensor[0], 1.0, atol=0.0, rtol=0)
self.assertEqual(float_tensor[1], tiny_float, atol=tiny_float / 16, rtol=0)
self.assertEqual(double_tensor[0], 1.0, atol=0.0, rtol=0)
self.assertEqual(double_tensor[1], tiny_float, atol=0.0, rtol=0)
self.assertEqual(double_tensor[2], tiny_double, atol=0.0, rtol=0)
torch.set_flush_denormal(True)
self.assertEqual(float_tensor[0], 1.0, atol=0.0, rtol=0)
self.assertEqual(float_tensor[1], 0.0, atol=0.0, rtol=0) # tiny_float to zero
self.assertEqual(double_tensor[0], 1.0, atol=0.0, rtol=0)
# tiny_float is not converted to zero in double type
self.assertEqual(double_tensor[1], tiny_float, atol=0.0, rtol=0)
self.assertEqual(double_tensor[2], 0.0, atol=0.0, rtol=0) # tiny_double to zero
torch.set_flush_denormal(False)
def test_show_config(self):
# We can't usefully test the output; just make sure this doesn't crash
torch.__config__.show()
@unittest.skipIf(IS_FBCODE, "CXX_FLAGS is only for OSS build.")
def test_cxx_flags(self):
torch.__config__._cxx_flags()
def test_parallel_info(self):
torch.__config__.parallel_info()
@slowTest
def test_slow_test(self):
# Just a smoketest to make sure our slowTest decorator works.
pass
def test_is_nonzero(self):
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch.tensor([]).is_nonzero()
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"):
torch.tensor([0, 0]).is_nonzero()
self.assertFalse(torch.tensor(0).is_nonzero())
self.assertTrue(torch.tensor(1).is_nonzero())
self.assertFalse(torch.tensor([0]).is_nonzero())
self.assertTrue(torch.tensor([1]).is_nonzero())
self.assertFalse(torch.tensor([[0]]).is_nonzero())
self.assertTrue(torch.tensor([[1]]).is_nonzero())
self.assertTrue(torch.tensor(0.1).is_nonzero())
self.assertTrue(torch.tensor(-0.1).is_nonzero())
self.assertFalse(torch.tensor(0.0).is_nonzero())
self.assertTrue(torch.tensor(True).is_nonzero())
self.assertFalse(torch.tensor(False).is_nonzero())
self.assertFalse(torch.tensor(0 + 0j).is_nonzero())
self.assertTrue(torch.tensor(0 + 0.1j).is_nonzero())
def test_assert_async(self):
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch._assert_async(torch.tensor([]))
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"):
torch._assert_async(torch.tensor([0, 0]))
with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"):
torch._assert_async(torch.tensor(0))
torch._assert_async(torch.tensor(1))
torch._assert_async(torch.tensor(0.1))
torch._assert_async(torch.tensor(-0.1))
with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"):
torch._assert_async(torch.tensor(0.0))
torch._assert_async(torch.tensor(True))
with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"):
torch._assert_async(torch.tensor(False))
torch._assert_async(torch.tensor(0 + 0.1j))
with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"):
torch._assert_async(torch.tensor(0 + 0j))
# NB: we must not be built with CUDA; if we are built with CUDA but no CUDA
# is available, we get a different error.
@unittest.skipIf(torch.backends.cuda.is_built() or IS_SANDCASTLE, "CUDA is built, can't test CUDA not built error")
def test_cuda_not_built(self):
msg = "Torch not compiled with CUDA enabled"
self.assertRaisesRegex(AssertionError, msg, lambda: torch.cuda.current_device())
self.assertRaisesRegex(AssertionError, msg, lambda: torch.tensor([1], device="cuda"))
self.assertRaisesRegex(AssertionError, msg, lambda: torch.tensor([1]).cuda())
self.assertRaisesRegex(TypeError, msg, lambda: torch.cuda.FloatTensor())
self.assertRaisesRegex(TypeError, msg, lambda: torch.set_default_tensor_type(torch.cuda.FloatTensor))
self.assertRaisesRegex(AssertionError, msg, lambda: torch.tensor([1]).to(device="cuda"))
def test_has_internal_overlap(self):
OVERLAP_NO = 0
OVERLAP_YES = 1
OVERLAP_TOO_HARD = 2
# Check for contiguous tensors
a = torch.randn(3, 3)
self.assertEqual(torch._debug_has_internal_overlap(a), OVERLAP_NO)
# Checks for zero strides
b = torch.randn(1, 3)
b_expanded = b.expand(4, 3)
self.assertEqual(torch._debug_has_internal_overlap(b_expanded), OVERLAP_YES)
# Check for zero strided, size 1 axis, in non-contiguous storage (gh-33812)
c = torch.randn(10).as_strided([2, 1, 5], [1, 0, 2])
self.assertEqual(torch._debug_has_internal_overlap(c), OVERLAP_NO)
c = torch.randn(2, 1, 10)[::2].as_strided((2, 1, 5), (10, 0, 2))
self.assertEqual(torch._debug_has_internal_overlap(c), OVERLAP_TOO_HARD)
def test_allow_tensor_metadata_change(self):
def do_test(t):
with self.assertRaisesRegex(
RuntimeError,
"set_sizes_contiguous is not allowed on a Tensor created from .data or .detach()"):
t.resize_((2, 1))
with self.assertRaisesRegex(
RuntimeError,
"set_storage is not allowed on a Tensor created from .data or .detach()"):
t.set_()
with self.assertRaisesRegex(
RuntimeError,
"set_storage_offset is not allowed on a Tensor created from .data or .detach()"):
t.set_(t.storage(), 0, t.size(), list(t.stride()))
do_test(torch.tensor([[1, 2]]).data)
do_test(torch.tensor([[1, 2]]).detach())
@skipIfNotRegistered("LayerNorm", "Skipping as LayerNorm is not registered")
def test_c10_layer_norm(self):
# test that we can call c10 ops and they return a reasonable result
X = torch.rand(5, 5, dtype=torch.float)
weight = torch.rand(*X.size()[1:], dtype=torch.float)
bias = torch.rand(*X.size()[1:], dtype=torch.float)
epsilon = 1e-4
expected_norm = torch.nn.functional.layer_norm(
X, X.size()[1:], weight=weight, bias=bias, eps=epsilon)
actual_norm, actual_mean, actual_stdev = \
torch.ops._caffe2.LayerNorm(torch.tensor(X), torch.tensor(
weight), torch.tensor(bias), 1, epsilon, True)
torch.testing.assert_close(expected_norm, actual_norm)
def test_memory_format(self):
def test_helper(x, memory_format):
y = x.contiguous(memory_format=memory_format)
self.assertFalse(y.is_contiguous())
self.assertTrue(y.is_contiguous(memory_format=memory_format))
self.assertEqual(y, x)
test_helper(torch.randn(4, 3, 8, 8), torch.channels_last)
test_helper(torch.randn(4, 3, 8, 8, 8), torch.channels_last_3d)
def test_memory_format_contiguous_returns_same_tensor_if_already_satisfies(self):
def test_helper(x, memory_format):
alias = x.contiguous(memory_format=memory_format)
alias.fill_(7)
self.assertEqual(x, alias)
test_helper(torch.randn(4, 8, 8, 3).permute(0, 3, 1, 2), torch.channels_last)
test_helper(torch.randn(4, 8, 8, 8, 3).permute(0, 4, 1, 2, 3), torch.channels_last_3d)
def test_memory_format_empty(self):
def test_helper(dim1, dim2, memory_format):
with self.assertRaises(RuntimeError):
x = torch.empty(dim1, memory_format=memory_format)
x = torch.empty(dim2, memory_format=memory_format)
self.assertTrue(x.is_contiguous(memory_format=memory_format))
test_helper((3, 3), (3, 3, 3, 3), torch.channels_last)
test_helper((3, 3, 3), (3, 3, 3, 3, 3), torch.channels_last_3d)
def test_subclass_tensors(self):
# raise an error when trying to subclass FloatTensor
with self.assertRaisesRegex(TypeError, "type 'torch.FloatTensor' is not an acceptable base type"):
class Foo1(torch.FloatTensor):
pass
# but allow subclassing Tensor:
class Foo2(torch.Tensor):
def foo(self):
return 5
f = Foo2()
self.assertEqual(f.foo(), 5)
def test_ndim(self):
a = torch.randn(1, 2, 3)
self.assertEqual(3, a.ndim)
b = torch.randn(())
self.assertEqual(0, b.ndim)
c = torch.randn(1, 0)
self.assertEqual(2, c.ndim)
def test_fill_diagonal(self):
a1 = torch.randn(7, 3)
a2 = a1.clone()
v = 1
for i in range(3):
a2[i][i] = v
a1.fill_diagonal_(v)
self.assertEqual(a1, a2)
b1 = torch.randn(7, 3)
b2 = b1.clone()
for i in range(3):
b2[i][i] = v
b2[i + 4][i] = v
b1.fill_diagonal_(v, wrap=True)
self.assertEqual(b1, b2)
c1 = torch.rand(3, 3, 3)
c2 = c1.clone()
for i in range(3):
c2[i][i][i] = v
c1.fill_diagonal_(v)
self.assertEqual(c1, c2)
# non-contiguous tensor
d1 = torch.rand(3, 3, 3)[:, 1, ...]
d2 = d1.clone()
for i in range(3):
d2[i][i] = v
d1.fill_diagonal_(v)
self.assertEqual(d1, d2)
e1 = torch.rand(7, 3, 3)[:, 1, ...]
e2 = e1.clone()
for i in range(3):
e2[i][i] = v
e2[i + 4][i] = v
e1.fill_diagonal_(v, wrap=True)
self.assertEqual(e1, e2)
def test_setting_real_imag_to_a_number(self):
x = torch.randn(4, dtype=torch.cfloat)
x.real = 0
x.imag = 0
zeros = torch.zeros(4)
self.assertEqual(x.real, zeros)
self.assertEqual(x.imag, zeros)
def test_batch_norm_cpu_inference(self):
# input nchw in (2,1,1,1), (2,2,2,2)
inputs = [
torch.tensor([[[[-0.5000]]], [[[0.5000]]]]),
torch.tensor([
[
[[-0.5000, 0.5000], [-1.0000, 1.0000]],
[[-0.2500, -0.5000], [0.2500, 0.5000]]
],
[
[[0.1000, 1.0000], [1.0000, 0.1000]],
[[1.0000, 0.5000], [1.5000, -1.5000]]
]])]
# output nchw in (2,1,1,1), (2,2,2,2)
outputs = [
torch.tensor([
[[[-0.499997496604919433593750000]]],
[[[0.499997496604919433593750000]]]]),
torch.tensor([
[[[-0.499997496604919433593750000, 0.499997496604919433593750000],
[-0.999994993209838867187500000, 0.999994993209838867187500000]],
[[-0.249998748302459716796875000, -0.499997496604919433593750000],
[0.249998748302459716796875000, 0.499997496604919433593750000]]],
[[[0.099999502301216125488281250, 0.999994993209838867187500000],
[0.999994993209838867187500000, 0.099999502301216125488281250]],
[[0.999994993209838867187500000, 0.499997496604919433593750000],
[1.499992489814758300781250000, -1.499992489814758300781250000]]]])]
for i in range(len(inputs)):
for affine in [False, True]:
m = torch.nn.BatchNorm2d(inputs[i].size()[1], 1e-05, 0.1, affine=affine)
m.eval()
# contiguous case
input1 = inputs[i].contiguous()
output1 = m(input1)
# non-contiguous case
input2 = input1.permute(0, 1, 3, 2)
output2 = m(input2).permute(0, 1, 3, 2)
# channels last case
input3 = input1.contiguous(memory_format=torch.channels_last)
output3 = m(input3)
self.assertEqual(output3, outputs[i])
self.assertEqual(output3, output1)
self.assertEqual(output3, output2)
# FIXME: move these meta tests to their own test suite/class or
# distribute them among the appropriate test suites for their ops
def test_empty_meta(self):
x = torch.empty(2 ** 20, 2 ** 20, device='meta')
y = torch.empty(2 ** 20, device='meta')
z = x + y
self.assertEqual(z.size(), (2 ** 20, 2 ** 20))
self.assertRaises(RuntimeError, lambda: z[0][0].item())
def test_format_scalar_meta(self):
x = torch.empty((), device='meta')
self.assertEqual(format(x), repr(x))
def test_upsample_nearest1d_meta(self):
# TODO: this test should be triggered by test_nn.py but right
# now meta is not enabled (and even if it was, we are probably
# missing too many meta functions to get through the test unmolested)
# NB: Can't make the exponent too big, or it will overflow
# signed 64-bit integer
x = torch.empty(2 * 10 ** 8, 3, 2 * 10 ** 8, device='meta')
z = torch.nn.functional.interpolate(x, scale_factor=2)
self.assertEqual(z.size(), (2 * 10 ** 8, 3, 4 * 10 ** 8))
self.assertRaises(RuntimeError, lambda: z[0][0][0].item())
# TODO: the out tests cannot be triggered by test_nn.py because
# we don't actually do out= arguments for nn functions, so there
# is no public API by which to get the out version
# interpolate doesn't seem to support out=
# (not sure why passing None here doesn't work? How strange...)
z = torch.empty(0, device='meta')
torch._C._nn.upsample_nearest1d(x, (4 * 10 ** 8,), 2, out=z)
self.assertEqual(z.size(), (2 * 10 ** 8, 3, 4 * 10 ** 8))
self.assertRaises(RuntimeError, lambda: z[0][0][0].item())
def test_upsample_nearest2d_meta(self):
# TODO: the out tests cannot be triggered by test_nn.py because
# we don't actually do out= arguments for nn functions, so there
# is no public API by which to get the out version
# Make sure we don't clobber strides of out tensor. NB: this
# test must be done on 2d/3d, because 1d doesn't have any meaningful
# layout support
x = torch.empty(4, 3, 8, 8, device='meta')
out = torch.empty(4, 3, 16, 16, device='meta', memory_format=torch.channels_last)
torch._C._nn.upsample_nearest2d(x, (16, 16), out=out)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
x = torch.empty(4, 3, 8, 8, device='meta', memory_format=torch.channels_last)
out = torch.empty(4, 3, 16, 16, device='meta')
torch._C._nn.upsample_nearest2d(x, (16, 16), out=out)
self.assertTrue(out.is_contiguous())
# But if resize occurs, do clobber
x = torch.empty(4, 3, 8, 8, device='meta', memory_format=torch.channels_last)
out = torch.empty(0, device='meta')
torch._C._nn.upsample_nearest2d(x, (16, 16), out=out)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
# Complain if out dtype mismatch
x = torch.empty(4, 3, 8, 8, device='meta', dtype=torch.float)
out = torch.empty(4, 3, 16, 16, device='meta', dtype=torch.double)
self.assertExpectedRaisesInline(
RuntimeError, lambda: torch._C._nn.upsample_nearest2d(x, (16, 16), out=out),
"""Expected out tensor to have dtype float, but got double instead"""
)
# Complain if out device mismatch
x = torch.empty(0, 3, 8, 8, device='meta')
out = torch.empty(0, 3, 16, 16, device='cpu')
self.assertExpectedRaisesInline(
RuntimeError, lambda: torch._C._nn.upsample_nearest2d(x, (16, 16), out=out),
"""Expected out tensor to have device meta, but got cpu instead"""
)
def test_add_meta_scalar(self):
# From https://github.com/pytorch/pytorch/issues/53815
x = torch.empty(2, device='meta')
y = x + 2
self.assertEqual(y.size(), x.size())
def test_normal_shape(self):
warned = False
for device in get_all_device_types():
tensor1 = torch.rand(1, device=device)
tensor4 = torch.rand(4, device=device)
tensor120 = torch.rand(120, device=device)
tensor2145 = torch.rand(2, 1, 4, 5, device=device)
tensor2345 = torch.rand(2, 3, 4, 5, device=device)
tensor2345_non_contiguous = torch.rand(2, 4, 3, 5, device=device).permute(0, 2, 1, 3)
tensor2345_channels_last = tensor2345.contiguous(memory_format=torch.channels_last)
output2345 = torch.zeros(2, 3, 4, 5, device=device)
output345 = torch.zeros(3, 4, 5, device=device)
# inputs have same size
self.assertEqual(torch.normal(tensor2345, tensor2345).size(), (2, 3, 4, 5))
self.assertEqual(torch.normal(tensor2345_non_contiguous, tensor2345).size(), (2, 3, 4, 5))
self.assertEqual(torch.normal(tensor2345, tensor2345_channels_last).size(), (2, 3, 4, 5))
self.assertEqual(torch.normal(tensor2345_non_contiguous, tensor2345_channels_last).size(), (2, 3, 4, 5))
# scalar case
self.assertEqual(torch.normal(tensor2345, 2).size(), (2, 3, 4, 5))
self.assertEqual(torch.normal(2, tensor2345).size(), (2, 3, 4, 5))
# inputs are expandable tensors
self.assertEqual(torch.normal(tensor2345, tensor1).size(), (2, 3, 4, 5))
self.assertEqual(torch.normal(tensor2145, tensor2345).size(), (2, 3, 4, 5))
# inputs are non-expandable tensors, but they have same number of elements
with self.assertRaisesRegex(
RuntimeError,
r"The size of tensor a \(120\) must match the size of "
r"tensor b \(5\) at non-singleton dimension 3"):
self.assertEqual(torch.normal(tensor120, tensor2345).size(), (120,))
with self.assertRaisesRegex(
RuntimeError,
r"The size of tensor a \(5\) must match the size of "
r"tensor b \(120\) at non-singleton dimension 3"):
self.assertEqual(torch.normal(tensor2345, tensor120).size(), (2, 3, 4, 5))
# inputs are non-expandable tensors and they don't have same number of elements
with self.assertRaisesRegex(
RuntimeError,
r"The size of tensor a \(5\) must match the size of "
r"tensor b \(4\) at non-singleton dimension 3"):
torch.normal(tensor2345, tensor4)
# output and inputs are size compatible
self.assertEqual(torch.normal(tensor2345, tensor2345, out=output2345).size(), (2, 3, 4, 5))
# output and inputs are not size compatible
with self.assertWarnsRegex(
UserWarning,
"This behavior is deprecated, and in a future PyTorch "
"release outputs will not be resized unless they have "
"zero elements"):
self.assertEqual(torch.normal(tensor2345, tensor2145, out=output345).size(), (2, 3, 4, 5))
with self.assertRaisesRegex(
RuntimeError,
r"The size of tensor a \(5\) must match the size of "
r"tensor b \(120\) at non-singleton dimension 3"):
# inputs are not expandable, output size is not the same as mean
torch.normal(tensor2345, tensor120, out=output345)
def test_tensoriterator_output_setup(self):
# Test whether the output's memory layout is correct
def test_memory_layout(x, y, scale, zero_point, out):
self.assertEqual(x.dim(), 4)
self.assertEqual(x.size(), y.size())
self.assertEqual(y.size(), out.size())
shape = x.size()
for n in range(shape[0]):
for c in range(shape[1]):
for h in range(shape[2]):
for w in range(shape[3]):
if scale is not None and zero_point is not None:
self.assertEqual(
out[n][c][h][w],
torch.ops.quantized.add(x[n][c][h][w], y[n][c][h][w], scale, zero_point))
else:
self.assertEqual(out[n][c][h][w], x[n][c][h][w] + y[n][c][h][w])
xraw = torch.rand(2, 3, 4, 4)
yraw = torch.rand(2, 3, 4, 4)
qxraw = torch.quantize_per_tensor(xraw, 0.1, 5, torch.quint8)
qyraw = torch.quantize_per_tensor(yraw, 0.1, 5, torch.quint8)
# contiguous case fast setup
test_memory_layout(xraw, yraw, None, None, xraw + yraw)
test_memory_layout(qxraw, qyraw, 0.1, 5, torch.ops.quantized.add(qxraw, qyraw, 0.1, 5))
# channels last case fast setup
x = xraw.contiguous(memory_format=torch.channels_last)
y = yraw.contiguous(memory_format=torch.channels_last)
test_memory_layout(x, y, None, None, x + y)
qx = qxraw.contiguous(memory_format=torch.channels_last)
qy = qyraw.contiguous(memory_format=torch.channels_last)
test_memory_layout(qx, qy, 0.1, 5, torch.ops.quantized.add(qx, qy, 0.1, 5))
# non contiguous case fast setup (dense, non-overlapping, same shape and strides)
x = xraw.permute(0, 2, 3, 1)
y = yraw.permute(0, 2, 3, 1)
test_memory_layout(x, y, None, None, x + y)
qx = qxraw.permute(0, 2, 3, 1)
qy = qyraw.permute(0, 2, 3, 1)
test_memory_layout(qx, qy, 0.1, 5, torch.ops.quantized.add(qx, qy, 0.1, 5))
# non contiguous case fast setup (dense, non-overlapping)
# input tensors have same shape and strides
# output tensor have same shape as input tensors but different stride
# output tensor should preserve its strides in this case
x = xraw.permute(0, 2, 3, 1)
y = yraw.permute(0, 2, 3, 1)
out = torch.empty_like(xraw)
out = out.permute(0, 3, 2, 1)
expected_stride = out.stride()
test_memory_layout(x, y, None, None, torch.add(x, y, out=out))
self.assertEqual(expected_stride, out.stride())
# non contiguous case non fast setup
x = xraw.permute(0, 2, 3, 1)
y = yraw.permute(0, 3, 2, 1)
test_memory_layout(x, y, None, None, x + y)
qx = qxraw.permute(0, 2, 3, 1)
qy = qyraw.permute(0, 3, 2, 1)
test_memory_layout(qx, qy, 0.1, 5, torch.ops.quantized.add(qx, qy, 0.1, 5))
# Tests to make sure we still handle .data properly until it is removed
def test_dot_data_use(self):
# .data allows to change the Tensors types inplace, check that we still
# raise a nice error.
with self.assertRaisesRegex(
RuntimeError,
# message includes both Double and ComplexFloat
'(?=.*Double)(?=.*ComplexFloat)'):
# Calls model with a LongTensor input but DoubleTensor weights
input = torch.randn(1, 1, 1, 6, dtype=torch.double)
weight = torch.zeros(1, 1, 1, 3, dtype=torch.complex64)
model = torch.nn.Conv2d(1, 1, (1, 3), stride=1, padding=0, bias=False)
model.weight.data = weight
out = model(input)
def test_empty_storage_view(self):
# we should be able to "modify" slices of a 0-element
# array without an error being raised due to
# trying to resize its storage
t = torch.from_numpy(np.empty((0, 4)))
t[:, 1::2] *= 1
def test_has_storage(self):
self.assertIsNotNone(torch.tensor([]).storage())
self.assertIsNotNone(torch.empty(0).storage())
self.assertIsNotNone(torch.tensor([]).clone().storage())
self.assertIsNotNone(torch.tensor([0, 0, 0]).nonzero().storage())
self.assertIsNotNone(torch.tensor([]).new().storage())
# FIXME: Extend this test and put in a TensorProperties test class
def test_numel(self):
b = torch.ByteTensor(3, 100, 100)
self.assertEqual(b.nelement(), 3 * 100 * 100)
self.assertEqual(b.numel(), 3 * 100 * 100)
# Verifies that (deep)copies of dtypes are the same objects
def test_copy_dtypes(self):
for dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool):
copied_dtype = copy.deepcopy(dtype)
self.assertIs(dtype, copied_dtype)
def test_dtype_is_signed(self):
for dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.half):
self.assertEqual(dtype.is_signed, torch.is_signed(torch.tensor(0, dtype=dtype)))
self.assertRaisesRegex(RuntimeError, 'not supported for quantized', lambda: torch.quint8.is_signed)
self.assertRaisesRegex(RuntimeError, 'not supported for quantized', lambda: torch.qint8.is_signed)
self.assertRaisesRegex(RuntimeError, 'not supported for quantized', lambda: torch.qint32.is_signed)
# FIXME: Put the following random tests into their own test class or test suite
def test_RNGState(self):
state = torch.get_rng_state()
stateCloned = state.clone()
before = torch.rand(1000)
self.assertEqual(state.ne(stateCloned).long().sum(), 0, atol=0, rtol=0)
torch.set_rng_state(state)
after = torch.rand(1000)
self.assertEqual(before, after, atol=0, rtol=0)
def test_RNGStateAliasing(self):
# Fork the random number stream at this point
gen = torch.Generator()
gen.set_state(torch.get_rng_state())
self.assertEqual(gen.get_state(), torch.get_rng_state())
target_value = torch.rand(1000)
# Dramatically alter the internal state of the main generator
_ = torch.rand(100000)
forked_value = torch.rand(1000, generator=gen)
self.assertEqual(target_value, forked_value, atol=0, rtol=0, msg="RNG has not forked correctly.")
def test_RNG_after_pickle(self):
torch.random.manual_seed(100)
before = torch.rand(10)
torch.random.manual_seed(100)
buf = io.BytesIO()
tensor = torch.tensor([1, 2, 3])
ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(tensor)
after = torch.rand(10)
self.assertEqual(before, after, atol=0, rtol=0)
def test_boxMullerState(self):
torch.manual_seed(123)
odd_number = 101
seeded = torch.randn(odd_number)
state = torch.get_rng_state()
midstream = torch.randn(odd_number)
torch.set_rng_state(state)
repeat_midstream = torch.randn(odd_number)
torch.manual_seed(123)
reseeded = torch.randn(odd_number)
self.assertEqual(midstream, repeat_midstream, atol=0, rtol=0,
msg='get_rng_state/set_rng_state not generating same sequence of normally distributed numbers')
self.assertEqual(seeded, reseeded, atol=0, rtol=0,
msg='repeated calls to manual_seed not generating same sequence of normally distributed numbers')
def test_manual_seed(self):
rng_state = torch.get_rng_state()
torch.manual_seed(2)
x = torch.randn(100)
self.assertEqual(torch.initial_seed(), 2)
torch.manual_seed(2)
y = torch.randn(100)
self.assertEqual(x, y)
max_int64 = 0x7fff_ffff_ffff_ffff
min_int64 = -max_int64 - 1
max_uint64 = 0xffff_ffff_ffff_ffff
# Check all boundary cases of valid seed value inputs
test_cases = [
# (seed, expected_initial_seed)
# Positive seeds should be unchanged
(max_int64, max_int64),
(max_int64 + 1, max_int64 + 1),
(max_uint64, max_uint64),
(0, 0),
# Negative seeds wrap around starting from the largest seed value
(-1, max_uint64),
(min_int64, max_int64 + 1)
]
for seed, expected_initial_seed in test_cases:
torch.manual_seed(seed)
actual_initial_seed = torch.initial_seed()
msg = "expected initial_seed() = %x after calling manual_seed(%x), but got %x instead" % (
expected_initial_seed, seed, actual_initial_seed)
self.assertEqual(expected_initial_seed, actual_initial_seed, msg=msg)
for invalid_seed in [min_int64 - 1, max_uint64 + 1]:
with self.assertRaisesRegex(RuntimeError, r'Overflow when unpacking long'):
torch.manual_seed(invalid_seed)
torch.set_rng_state(rng_state)
# FIXME: Describe this test and port to the generic device framework in a more
# appropriate test suite for the copy operation
def test_copy_transpose(self):
x = torch.arange(100 * 100, dtype=torch.float).reshape(100, 100).t()
y = torch.empty(100, 100, dtype=torch.float)
y.copy_(x)
self.assertEqual(y[:, 0], range(100))
self.assertEqual(y[:, 40], range(4000, 4100))
y = torch.empty(100, 100, dtype=torch.double)
y.copy_(x)
self.assertEqual(y[:, 0], range(100))
self.assertEqual(y[:, 40], range(4000, 4100))
# Validates regression reported in https://github.com/pytorch/pytorch/issues/45269
x = torch.arange(100 * 100).reshape(100, 100).to(dtype=torch.cfloat).t()
y = torch.empty(100, 100, dtype=torch.cfloat)
y.copy_(x)
self.assertEqual(y[:, 0], range(100))
self.assertEqual(y[:, 40], range(4000, 4100))
x = torch.arange(100 * 100).reshape(100, 100).to(dtype=torch.complex32).t()
y = torch.empty(100, 100, dtype=torch.complex32)
y.copy_(x)
self.assertEqual(y[:, 0], range(100))
self.assertEqual(y[:, 40], range(4000, 4100))
# FIXME: Port to a more appropriate test suite
def test_copy_broadcast(self):
torch.zeros(5, 6).copy_(torch.zeros(6))
self.assertRaises(RuntimeError, lambda: torch.zeros(5, 6).copy_(torch.zeros(30)))
# FIXME: Port to a more appropriate test suite
def test_copy_many_to_one(self):
# Testing in-place copy where it attempt to write from many memory
# storage to a single storage would cause RuntimeError to be thrown
self.assertRaises(RuntimeError, lambda: torch.zeros(1, 6).expand(5, 6).copy_(torch.zeros(5, 6)))
# FIXME: Port to a more appropriate test suite
def _test_to_with_layout(self, layout):
def test_copy_behavior(t, non_blocking=False):
self.assertIs(t, t.to(t, non_blocking=non_blocking))
self.assertIs(t, t.to(t.dtype, non_blocking=non_blocking))
self.assertIs(t, t.to(torch.empty_like(t), non_blocking=non_blocking))
self.assertIsNot(t, t.to(t, non_blocking=non_blocking, copy=True))
self.assertIsNot(t, t.to(t.dtype, non_blocking=non_blocking, copy=True))
self.assertIsNot(t, t.to(torch.empty_like(t), non_blocking=non_blocking, copy=True))
devices = [t.device]
if t.device.type == 'cuda':
if t.device.index == -1:
devices.append('cuda:{}'.format(torch.cuda.current_device()))
elif t.device.index == torch.cuda.current_device():
devices.append('cuda')
for device in devices:
self.assertIs(t, t.to(device, non_blocking=non_blocking))
self.assertIs(t, t.to(device, t.dtype, non_blocking=non_blocking))
self.assertIsNot(t, t.to(device, non_blocking=non_blocking, copy=True))
self.assertIsNot(t, t.to(device, t.dtype, non_blocking=non_blocking, copy=True))
a = torch.tensor(5)
if layout == torch.sparse_csr:
a = torch.tensor([[0, 1, 2], [2, 0, 3]]).to_sparse_csr()
test_copy_behavior(a)
self.assertEqual(a.device, a.to('cpu').device)
self.assertEqual(a.device, a.to('cpu', dtype=torch.float32).device)
self.assertIs(torch.float32, a.to('cpu', dtype=torch.float32).dtype)
self.assertEqual(a.device, a.to(torch.float32).device)
self.assertIs(torch.float32, a.to(dtype=torch.float32).dtype)
def test_data_ptr(getter):
self.assertEqual(getter(a), getter(a.to('cpu')))
self.assertEqual(getter(a), getter(a.to(dtype=a.dtype, device=a.device, copy=False)))
self.assertEqual(getter(a), getter(a.to('cpu', copy=False)))
self.assertNotEqual(getter(a), getter(a.to('cpu', copy=True)))
if layout == torch.sparse_csr:
# TODO: compressed sparse tensors currently don't support data_ptr.
# Exercising failure will allow us to widen coverage of this test once it does.
with self.assertRaisesRegex(RuntimeError, "Cannot access data pointer of Tensor that doesn't have storage"):
a.data_ptr()
# While compressed sparse tensors don't have a concept of data_ptr
# the underlying tensors do. The implementation of to appropriately forwards
# the call to the components, which is what we're test here.
test_data_ptr(lambda a: a.values().data_ptr())
test_data_ptr(lambda a: a.crow_indices().data_ptr())
test_data_ptr(lambda a: a.col_indices().data_ptr())
else:
test_data_ptr(lambda a: a.data_ptr())
if torch.cuda.is_available():
for non_blocking in [True, False]:
for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:
b = torch.tensor(5., device=cuda)
test_copy_behavior(b, non_blocking)
self.assertEqual(b.device, b.to(cuda, non_blocking=non_blocking).device)
self.assertEqual(a.device, b.to('cpu', non_blocking=non_blocking).device)
self.assertEqual(b.device, a.to(cuda, non_blocking=non_blocking).device)
self.assertIs(torch.int32, b.to('cpu', dtype=torch.int32, non_blocking=non_blocking).dtype)
self.assertEqual(a.device, b.to('cpu', dtype=torch.int32, non_blocking=non_blocking).device)
self.assertIs(torch.int32, b.to(dtype=torch.int32).dtype)
self.assertEqual(b.device, b.to(dtype=torch.int32).device)
def test_to(self):
self._test_to_with_layout(torch.strided)
is_cuda10_2_or_higher = (
(torch.version.cuda is not None)
and ([int(x) for x in torch.version.cuda.split(".")] >= [10, 2]))
if is_cuda10_2_or_higher: # in cuda10_1 sparse_csr is beta
self._test_to_with_layout(torch.sparse_csr)
# FIXME: describe this test
def test_as_subclass(self):
class SubTensor(torch.Tensor):
member_var = object()
t0 = torch.tensor(0)
t1 = torch.tensor([1, 2])
t2 = torch.tensor([[3, 4], [5, 6]])
s0 = t0.as_subclass(SubTensor)
s1 = t1.as_subclass(SubTensor)
s2 = t2.as_subclass(SubTensor)
# Check that the correct type is returned.
self.assertTrue(type(s0) is SubTensor)
self.assertTrue(type(s1) is SubTensor)
self.assertTrue(type(s2) is SubTensor)
# Check that the data is equal.
self.assertEqual(t0, s0)
self.assertEqual(t1, s1)
self.assertEqual(t2, s2)
t0[()] = 1
t1[1] = 3
t2[1, 1] = 7
# Check that the data is equal even after modification.
self.assertEqual(t0, s0)
self.assertEqual(t1, s1)
self.assertEqual(t2, s2)
# Check that member variables are passed through.
self.assertTrue(s0.member_var is SubTensor.member_var)
self.assertTrue(s1.member_var is SubTensor.member_var)
self.assertTrue(s2.member_var is SubTensor.member_var)
# Test that autograd is propagated.
t = torch.tensor(5, dtype=torch.float32, requires_grad=True)
# Run a calculation on the tensor.
exp_t = torch.exp(t)
# Cast exp_t to a subclass.
exp_s = exp_t.as_subclass(SubTensor)
# Make sure that t.grad was initially None
self.assertTrue(t.grad is None)
# Run the autograd calculation.
exp_s.backward()
# Make sure autograd was propagated to the original tensor
# declared with requires_grad.
self.assertTrue(t.grad is not None)
# Make sure invalid subclasses raise nice errors
class BadSubTensor():
member_var = object()
err_msg = "Creating a Tensor subclass from a class that does not inherit from Tensor"
with self.assertRaisesRegex(RuntimeError, err_msg):
s0 = t0.as_subclass(BadSubTensor)
# FIXME: Port to a test suite that better fits slicing
def test_slice(self):
empty = torch.empty(0, 4)
x = torch.arange(0., 16).view(4, 4)
self.assertEqual(x[:], x)
self.assertEqual(x[:4], x)
# start and stop are clamped to the size of dim
self.assertEqual(x[:5], x)
# if start >= stop then the result is empty
self.assertEqual(x[2:1], empty)
self.assertEqual(x[2:2], empty)
# out of bounds is also empty
self.assertEqual(x[10:12], empty)
# additional correctness checks
self.assertEqual(x[:1].tolist(), [[0, 1, 2, 3]])
self.assertEqual(x[:-3].tolist(), [[0, 1, 2, 3]])
self.assertEqual(x[:, -2:3].tolist(), [[2], [6], [10], [14]])
self.assertEqual(x[0:-1:2].tolist(), [[0, 1, 2, 3], [8, 9, 10, 11]])
def test_type(self):
x = torch.randn(3, 3).double()
self.assertEqual(x.type('torch.FloatTensor').dtype, torch.float32)
self.assertEqual(x.type(torch.FloatTensor).dtype, torch.float32)
self.assertEqual(x.int().type(torch.Tensor).dtype, torch.get_default_dtype())
self.assertEqual(x.type(torch.int32).dtype, torch.int32)
# FIXME: port to a quantization test suite
def test_qengine(self):
qengines = torch.backends.quantized.supported_engines
original_qe = torch.backends.quantized.engine
for qe in qengines:
torch.backends.quantized.engine = qe
assert torch.backends.quantized.engine == qe, 'qengine not set successfully'
torch.backends.quantized.engine = original_qe
# FIXME: port to a distributed test suite -- also... how could this be OOMing on Windows CUDA?
@slowTest
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(IS_WINDOWS, 'FIXME: CUDA OOM error on Windows')
def test_multinomial_invalid_probs(self):
def _spawn_method(self, method, arg):
try:
mp.set_start_method('spawn')
except RuntimeError:
pass
with mp.Pool(1) as pool:
out: list = pool.map(method, [arg])
self.assertTrue(out[0])
def _test_multinomial_invalid_probs(probs):
try:
# n_sample = 1 is a special case, test n_sample=2 which is more general
torch.multinomial(probs.to('cpu'), 2)
return False # Should not be reached
except RuntimeError as e:
return 'probability tensor contains either `inf`, `nan` or element < 0' in str(e)
_spawn_method(_test_multinomial_invalid_probs, torch.tensor([1., -1., 1.]))
_spawn_method(_test_multinomial_invalid_probs, torch.tensor([1., inf, 1.]))
_spawn_method(_test_multinomial_invalid_probs, torch.tensor([1., -inf, 1.]))
_spawn_method(_test_multinomial_invalid_probs, torch.tensor([1., 1., nan]))
# FIXME: port to more appropriate test suite
def test_to_with_tensor(self):
a = torch.tensor(5)
self.assertEqual(a.device, a.to(a).device)
if torch.cuda.is_available():
for non_blocking in [True, False]:
for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:
b = torch.tensor(5., device=cuda)
self.assertEqual(b.device, b.to(b, non_blocking=non_blocking).device)
self.assertEqual(a.device, b.to(a, non_blocking=non_blocking).device)
self.assertEqual(b.device, a.to(b, non_blocking=non_blocking).device)
def test_device(self):
cpu = torch.device('cpu')
self.assertEqual('cpu', str(cpu))
self.assertEqual('cpu', cpu.type)
self.assertEqual(None, cpu.index)
cpu0 = torch.device('cpu:0')
self.assertEqual('cpu:0', str(cpu0))
self.assertEqual('cpu', cpu0.type)
self.assertEqual(0, cpu0.index)
cpu0 = torch.device('cpu', 0)
self.assertEqual('cpu:0', str(cpu0))
self.assertEqual('cpu', cpu0.type)
self.assertEqual(0, cpu0.index)
cuda = torch.device('cuda')
self.assertEqual('cuda', str(cuda))
self.assertEqual('cuda', cuda.type)
self.assertEqual(None, cuda.index)
cuda1 = torch.device('cuda:1')
self.assertEqual('cuda:1', str(cuda1))
self.assertEqual('cuda', cuda1.type)
self.assertEqual(1, cuda1.index)
cuda1 = torch.device('cuda', 1)
self.assertEqual('cuda:1', str(cuda1))
self.assertEqual('cuda', cuda1.type)
self.assertEqual(1, cuda1.index)
cuda90 = torch.device('cuda', 90)
self.assertEqual('cuda:90', str(cuda90))
self.assertEqual('cuda', cuda90.type)
self.assertEqual(90, cuda90.index)
self.assertRaises(RuntimeError, lambda: torch.device('cpu:-1'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:-1'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:2 '))
self.assertRaises(RuntimeError, lambda: torch.device('cuda: 2'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:2 2'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:2.'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:2?'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:?2'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:2.232'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:2 cuda:3'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:2+cuda:3'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:2cuda:3'))
self.assertRaises(RuntimeError, lambda: torch.device(-1))
self.assertRaises(RuntimeError, lambda: torch.device('other'))
self.assertRaises(RuntimeError, lambda: torch.device('other:0'))
device_set = {'cpu', 'cpu:0', 'cuda', 'cuda:0', 'cuda:1', 'cuda:10', 'cuda:100'}
device_hash_set = set()
for device in list(device_set):
device_hash_set.add(hash(torch.device(device)))
self.assertEqual(len(device_set), len(device_hash_set))
def get_expected_device_repr(device):
if device.index is not None:
return "device(type='{type}', index={index})".format(
type=device.type, index=device.index)
return "device(type='{type}')".format(type=device.type)
for device in device_set:
dev = torch.device(device)
self.assertEqual(repr(dev), get_expected_device_repr(dev))
# Tests that the use_deterministic_flag can be set as expected
@wrapDeterministicFlagAPITest
def test_deterministic_flag(self):
for deterministic, warn_only in product([True, False], [True, False]):
torch.use_deterministic_algorithms(deterministic, warn_only=warn_only)
self.assertEqual(deterministic, torch.are_deterministic_algorithms_enabled())
self.assertEqual(warn_only, torch.is_deterministic_algorithms_warn_only_enabled())
if deterministic:
if warn_only:
debug_mode = 1
else:
debug_mode = 2
else:
debug_mode = 0
self.assertEqual(debug_mode, torch.get_deterministic_debug_mode())
for debug_mode in [0, 1, 2]:
torch.set_deterministic_debug_mode(debug_mode)
self.assertEqual(debug_mode, torch.get_deterministic_debug_mode())
deterministic = debug_mode in [1, 2]
warn_only = debug_mode == 1
self.assertEqual(deterministic, torch.are_deterministic_algorithms_enabled())
self.assertEqual(warn_only, torch.is_deterministic_algorithms_warn_only_enabled())
for debug_mode, debug_mode_str in [(0, 'default'), (1, 'warn'), (2, 'error')]:
torch.set_deterministic_debug_mode(debug_mode_str)
self.assertEqual(debug_mode, torch.get_deterministic_debug_mode())
with self.assertRaisesRegex(
TypeError,
r"_set_deterministic_algorithms\(\): argument 'mode' \(position 1\) must be bool, not int"):
torch.use_deterministic_algorithms(1)
with self.assertRaisesRegex(
TypeError,
r"_set_deterministic_algorithms\(\): argument 'warn_only' must be bool, not int"):
torch.use_deterministic_algorithms(False, warn_only=1)
def test_type_conversion_via_dtype_name(self):
x = torch.tensor([1])
self.assertEqual(x.byte().dtype, torch.uint8)
self.assertEqual(x.bool().dtype, torch.bool)
self.assertEqual(x.char().dtype, torch.int8)
self.assertEqual(x.double().dtype, torch.float64)
self.assertEqual(x.float().dtype, torch.float32)
self.assertEqual(x.half().dtype, torch.float16)
self.assertEqual(x.int().dtype, torch.int32)
self.assertEqual(x.bfloat16().dtype, torch.bfloat16)
cfloat = x.cfloat()
self.assertEqual(cfloat.dtype, torch.complex64)
self.assertEqual(cfloat.real, x.float())
self.assertEqual(cfloat.imag, torch.zeros_like(cfloat.imag))
cdouble = x.cdouble()
self.assertEqual(cdouble.dtype, torch.complex128)
self.assertEqual(cdouble.real, x.double())
self.assertEqual(cdouble.imag, torch.zeros_like(cdouble.imag))
chalf = x.chalf()
self.assertEqual(chalf.dtype, torch.complex32)
self.assertEqual(chalf.real, x.half())
self.assertEqual(chalf.imag, torch.zeros_like(chalf.imag))
def test_type_alias(self):
type_alias_map = {torch.float64: torch.double,
torch.float32: torch.float,
torch.int32: torch.int,
torch.int64: torch.long,
torch.int16: torch.short,
torch.float16: torch.half,
torch.complex32: torch.chalf,
torch.complex64: torch.cfloat}
for dtype, alias in type_alias_map.items():
self.assertIs(alias, dtype)
def test_doc_template(self) -> None:
"""
Test that all public API doc strings use the same standard template for
all common arguments such as tensor or dim
"""
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
with open(doc_file, "r", encoding="utf-8") as f:
doc_strs = f.read()
matches = re.findall(
r'add_docstr\(([^,]+?),[^"\']*?(?:"""|\'\'\')(.*?)(?:"""|\'\'\')(?:\.|,?[^,\)]*?\))',
doc_strs,
re.MULTILINE | re.DOTALL,
)
self.assertTrue(matches)
for m in matches:
func = m[0].strip()
desc = m[1].strip()
for common_args in [multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args]:
for k, v in common_args.items():
self.assertNotIn(v, desc, 'The argument description "{}" in {} can be '
'replaced by {{{}}}'.format(v, func, k))
def test_doc(self):
checked_types = (types.MethodType, types.FunctionType,
types.BuiltinFunctionType, types.BuiltinMethodType)
def _test_namespace(ns, *skips):
if isinstance(ns, object):
ns_name = ns.__class__.__name__
else:
ns_name = ns.__name__
skip_regexes = []
for r in skips:
if isinstance(r, string_classes):
skip_regexes.append(re.compile('^{}$'.format(re.escape(r))))
else:
skip_regexes.append(r)
for name in dir(ns):
if name.startswith('_'):
continue
if name in ['real', 'imag']:
y = torch.randn(1, dtype=torch.cfloat)
var = getattr(y, name)
elif name in ["H", "mT", "mH"]:
y = torch.randn(1, 1)
var = getattr(y, name)
else:
var = getattr(ns, name)
if not isinstance(var, checked_types):
continue
doc = var.__doc__
has_doc = doc is not None and len(doc.strip()) > 0
full_name = ns_name + '.' + name
if any(r.match(name) for r in skip_regexes):
self.assertFalse(has_doc,
'New docs have been added for {}, please remove '
'it from the skipped list in TestTorch.test_doc'.format(full_name))
else:
self.assertTrue(has_doc, '{} is missing documentation'.format(full_name))
# FIXME: All of the following should be marked as expected failures
# so that it is easier to tell when missing has been added.
# FIXME: fix all the skipped ones below!
test_namespace(torch.randn(1),
'as_strided_',
re.compile('^clamp_(min|max)_?$'),
'is_distributed',
'is_nonzero',
'is_same_size',
'log_softmax',
'map2_',
'new',
'reinforce',
'relu',
'relu_',
'prelu',
'resize',
'resize_as',
'softmax',
'split_with_sizes',
'unsafe_split_with_sizes',
'_autocast_to_fp16',
'_autocast_to_fp32',
)
test_namespace(torch.nn)
test_namespace(torch.nn.functional, 'assert_int_or_pair')
# TODO: add torch.* tests when we have proper namespacing on ATen functions
# test_namespace(torch)
# FIXME: deprecate torch.Tensor constructor
def test_tensor_ctor_scalar(self):
x = torch.Tensor(torch.tensor(1.0))
self.assertEqual(x, torch.tensor(1.0))
def test_deepcopy_gradient(self):
from copy import deepcopy
a = torch.zeros(10)
a.grad = torch.ones(10)
self.assertEqual(a.grad, deepcopy(a).grad)
s = torch.zeros(10).to_sparse()
s.grad = torch.ones(10).to_sparse()
self.assertEqual(s.grad, deepcopy(s).grad)
# ensure sharing is not broken
c = deepcopy([a, a.grad])
self.assertTrue(c[0].grad is c[1])
def test_tensor_base_init(self):
# Direct construction not OK
self.assertRaises(RuntimeError, lambda: torch._C._TensorBase())
# But construction of subclass is OK
class T(torch._C._TensorBase):
pass
T()
def test_tensor_base_new(self):
# OK to call super().__new__, see
# https://github.com/pytorch/pytorch/issues/57421
class TestTensor(torch._C._TensorBase):
@staticmethod
def __new__(cls, x, *args, **kwargs):
return super().__new__(cls, x, *args, **kwargs)
x = torch.ones(5)
test_tensor = TestTensor(x)
def test_pyobj_preserved(self):
x = torch.empty(2)
x.foo = 2 # put something on __dict__
y = torch.empty(2)
y.grad = x
del x # x is dead in Python
self.assertEqual(y.grad.foo, 2)
z = y.grad # it's live
del z # it's dead again
self.assertEqual(y.grad.foo, 2)
def test_subclass_preserved(self):
class MyTensor(torch.Tensor):
pass
x = MyTensor(torch.empty(2))
y = torch.empty(2)
y.grad = x
del x # x is dead in Python
self.assertEqual(type(y.grad), MyTensor)
z = y.grad # it's live
del z # it's dead again
self.assertEqual(type(y.grad), MyTensor)
def test_tensor_slot_dealloc(self):
class SlotTensor1(torch._C._TensorBase):
__slots__ = ['slot1']
class SlotTensor2(SlotTensor1):
__slots__ = ['slot2']
m1, t1 = Tracker.make()
m2, t2 = Tracker.make()
slot_tensor = SlotTensor2(torch.empty(2))
slot_tensor.slot1 = t1
slot_tensor.slot2 = t2
del t1
del t2
self.assertFalse(m1[0])
self.assertFalse(m2[0])
del slot_tensor
self.assertTrue(m1[0])
self.assertTrue(m2[0])
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_tensor_dict_dealloc(self):
m, t = Tracker.make()
x = torch.empty(2)
x.arf = t
del t
self.assertFalse(m[0])
del x
self.assertTrue(m[0])
def test_tensor_finalizer_dealloc(self):
m = [False]
class FinalizerTensor(torch._C._TensorBase):
def __del__(self):
m[0] = True
fin_tensor = FinalizerTensor(torch.empty(2))
self.assertFalse(m[0])
del fin_tensor
self.assertTrue(m[0])
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_tensor_weakref_dealloc(self):
x = torch.empty(2)
m = [False]
def cb(r):
m[0] = True
wref = weakref.ref(x, cb)
del x
self.assertTrue(m[0])
self.assertEqual(wref(), None)
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_tensor_cycle_via_dict(self):
m1, t1 = Tracker.make()
x = torch.empty(2)
x._tracker = t1
del t1
m2, t2 = Tracker.make()
y = torch.empty(2)
y._tracker = t2
del t2
x._loop = y
y._loop = x
# C++ reference should keep the cycle live!
# This exercise THPVariable_subtype_traverse
# NB: Because z.grad is a reference done entirely in C++, cycles
# involving it directly are NOT broken by Python GC; you've
# set up a good old C++ reference cycle which we cannot safely
# break (because C++ references are allowed to be accessed
# multithreaded-ly) (TODO: except maybe if you can prove that
# only Python has access to the C++ object, in which case you can
# also prove that no multithreaded access occurs)
z = torch.empty(2)
z.grad = x
del x
del y
gc.collect()
self.assertFalse(m1[0])
self.assertFalse(m2[0])
with disable_gc():
del z
self.assertFalse(m1[0])
self.assertFalse(m2[0])
gc.collect()
self.assertTrue(m1[0])
self.assertTrue(m2[0])
def test_tensor_cycle_via_slots(self):
m1 = [False]
m2 = [False]
class SlotTensor1(torch._C._TensorBase):
__slots__ = ['slot1']
def __del__(self):
m1[0] = True
class SlotTensor2(SlotTensor1):
__slots__ = ['slot2']
def __del__(self):
m2[0] = True
x = SlotTensor1(torch.empty(2))
y = SlotTensor2(torch.empty(2))
x.slot1 = y
y.slot2 = x
del x
with disable_gc():
del y
self.assertFalse(m1[0])
self.assertFalse(m2[0])
gc.collect()
self.assertTrue(m1[0])
self.assertTrue(m2[0])
# FIXME: move to test_autograd?
@skipIfTorchDynamo("TorchDynamo does not work well with hooks")
def test_backward_hooks_traverse(self):
m1, t1 = Tracker.make()
m2, t2 = Tracker.make()
x = torch.empty(2, requires_grad=True)
x._tracker = t1
y = torch.empty(2, requires_grad=True)
y._tracker = t2
del t1
del t2
# this hits a special setter, it's not just a __dict__ entry
x._backward_hooks = y
y._backward_hooks = x
del x
with disable_gc():
del y
self.assertFalse(m1[0])
self.assertFalse(m2[0])
gc.collect()
self.assertTrue(m1[0])
self.assertTrue(m2[0])
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_dead_weak_ref(self):
x = torch.empty(2)
w_x = weakref.ref(x)
y = torch.empty(2)
y.grad = x
del x
x = w_x()
# Ideally, x would keep the tensor live. But CPython doesn't
# provide enough hooks to do this. So it will go dead and x
# will transmute into an undefined tensor. Not great, but the
# best we can do.
del y
self.assertRaises(RuntimeError, lambda: x.sigmoid())
def test_resurrected_weak_ref(self):
x = torch.empty(2)
w_x = weakref.ref(x)
y = torch.empty(2)
y.grad = x
del x
x = w_x()
# Use this to manually fix weak references after dereferencing them
x._fix_weakref()
del y
x.sigmoid()
# FIXME: move to test_linalg
@torch.inference_mode()
def test_bmm_multithreaded(self):
device = 'cpu'
num_threads = torch.get_num_threads()
torch.set_num_threads(4)
batch_sizes = [1, 10]
M, N, O = 23, 8, 12
dtype = torch.float32
numpy_dtype = dtype
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_inputs(num_batches):
# transposed tensors
for perm1, perm2 in itertools.product(itertools.permutations((0, 1, 2)), repeat=2):
b1 = make_tensor((num_batches, M, N), dtype=dtype, device=device, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), dtype=dtype, device=device, low=-1, high=1)
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
yield b1, b2
# broadcasting tensors
for b1, b2, b3, b4, b5, b6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if b1 else 1, M if b2 else 1, N if b3 else 1)
shape2 = (num_batches if b4 else 1, N if b5 else 1, O if b6 else 1)
b1 = make_tensor(shape1, dtype=dtype, device=device, low=-1, high=1).expand(num_batches, M, N)
b2 = make_tensor(shape2, dtype=dtype, device=device, low=-1, high=1).expand(num_batches, N, O)
yield b1, b2
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = torch.randn(shape1, dtype=dtype, device=device)
b2 = torch.randn(shape2, dtype=dtype, device=device)
yield b1, b2
try:
for num_batches in batch_sizes:
for (b1, b2), perm3 in itertools.product(generate_inputs(num_batches), itertools.permutations((0, 1, 2))):
res1 = torch.bmm(b1, b2)
res2 = torch.full((num_batches, M, O), math.nan, dtype=dtype, device=device) \
.permute(perm3).contiguous().permute(invert_perm(perm3))
torch.bmm(b1, b2, out=res2)
expect = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
self.assertEqual(expect, res1)
self.assertEqual(expect, res2)
finally:
torch.set_num_threads(num_threads)
def test_conj_neg_tolist(self):
x = torch.randn(2, dtype=torch.cfloat)
y1 = x.conj()
y1_expect = x.conj_physical()
y2 = y1.imag
self.assertEqual(y1, y1_expect.tolist())
self.assertEqual(y2, y1_expect.imag.tolist())
# The following block extends TestTorch with negative dim wrapping tests
# FIXME: replace these with OpInfo sample inputs or systemic OpInfo tests
# Functions to test negative dimension wrapping
METHOD = 1
INPLACE_METHOD = 2
FUNCTIONAL = 4
DIM_ARG = None
def make_neg_dim_test(name, tensor_arg, arg_constr, types, extra_dim=0):
def neg_dim_test(self):
if isinstance(tensor_arg, list):
assert METHOD not in types and INPLACE_METHOD not in types
x = [torch.randn(arg) for arg in tensor_arg]
ndim = len(tensor_arg[-1])
else:
x = torch.randn(*tensor_arg)
ndim = len(tensor_arg)
ndim += extra_dim
n_dim_to_test = sum(e is DIM_ARG for e in arg_constr())
for dims_val in combinations(range(ndim), n_dim_to_test):
arg = arg_constr()
arg_neg = copy.deepcopy(arg)
idx = 0
for i, v in enumerate(arg):
if v is DIM_ARG:
arg[i] = dims_val[idx]
arg_neg[i] = dims_val[idx] - ndim
idx += 1
if METHOD in types:
a = getattr(x, name)(*arg)
b = getattr(x, name)(*arg_neg)
self.assertEqual(a, b)
if INPLACE_METHOD in types:
a = x.clone()
getattr(a, name + '_')(*arg)
b = x.clone()
getattr(b, name + '_')(*arg_neg)
self.assertEqual(a, b)
if FUNCTIONAL in types:
a = getattr(torch, name)(x, *arg)
b = getattr(torch, name)(x, *arg_neg)
self.assertEqual(a, b)
return neg_dim_test
def idx_tensor(size, max_val):
return torch.LongTensor(*size).random_(0, max_val - 1)
def add_neg_dim_tests():
neg_dim_tests = [
('narrow', (10, 20, 30), lambda: [DIM_ARG, 0, 5], [METHOD]),
('transpose', (10, 20, 30), lambda: [DIM_ARG, DIM_ARG], [METHOD, INPLACE_METHOD, FUNCTIONAL]),
('size', (10, 20, 30), lambda: [DIM_ARG], [METHOD]),
('cat', [(2, 3, 4), (2, 3, 4)], lambda: [DIM_ARG], [FUNCTIONAL]),
('chunk', (10, 20, 30), lambda: [5, DIM_ARG], [METHOD, FUNCTIONAL]),
('gather', (10, 20), lambda: [DIM_ARG, idx_tensor((10, 20), 10)], [METHOD, FUNCTIONAL]),
('index_select', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10)], [METHOD, FUNCTIONAL]),
('split', (10, 20), lambda: [5, DIM_ARG], [METHOD, FUNCTIONAL]),
('squeeze', (10, 1, 20, 1), lambda: [DIM_ARG], [METHOD, INPLACE_METHOD, FUNCTIONAL]),
('unbind', (2, 3, 4), lambda: [DIM_ARG], [FUNCTIONAL]),
('unsqueeze', (10, 20), lambda: [DIM_ARG], [METHOD, INPLACE_METHOD, FUNCTIONAL], 1),
('logcumsumexp', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('cumprod', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('cumsum', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('cummax', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('cummin', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('mean', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('median', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('nanmedian', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('mode', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('norm', (10, 20), lambda: [2, DIM_ARG], [METHOD, FUNCTIONAL]),
('prod', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('std', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('sum', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('var', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('kthvalue', (10, 20), lambda: [3, DIM_ARG], [METHOD, FUNCTIONAL]),
('max', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('min', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('sort', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('topk', (10, 20), lambda: [5, DIM_ARG], [METHOD, FUNCTIONAL]),
('renorm', (10, 20), lambda: [2, DIM_ARG, 1], [METHOD, INPLACE_METHOD, FUNCTIONAL]),
('index_add', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10), torch.randn(10, 10)], [INPLACE_METHOD]),
('index_copy', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10), torch.randn(10, 10)], [INPLACE_METHOD]),
('index_fill', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10), 12], [INPLACE_METHOD]),
('scatter', (10, 10), lambda: [DIM_ARG, idx_tensor((10, 10), 10), torch.randn(10, 10)], [INPLACE_METHOD]),
('select', (10, 20), lambda: [DIM_ARG, 3], [METHOD]),
('unfold', (10, 20), lambda: [DIM_ARG, 5, 2], [METHOD]),
]
for decl in neg_dim_tests:
if len(decl) == 4:
name, tensor_arg, arg_constr, types = decl
extra_dim = 0
elif len(decl) == 5:
name, tensor_arg, arg_constr, types, extra_dim = decl
test_name = 'test_' + name + '_neg_dim'
assert not hasattr(TestTorch, test_name), "Duplicated test name: " + test_name
setattr(TestTorch, test_name, make_neg_dim_test(name, tensor_arg, arg_constr, types, extra_dim))
# TODO: these empy classes are temporarily instantiated for XLA compatibility
# once XLA updates their test suite it should be removed
class TestViewOps(TestCase):
pass
class TestTensorDeviceOps(TestCase):
pass
# Generates tests
# Note: test generation must be done at file scope, not within main, or
# pytest will fail.
add_neg_dim_tests()
instantiate_device_type_tests(TestViewOps, globals())
instantiate_device_type_tests(TestVitalSignsCuda, globals())
instantiate_device_type_tests(TestTensorDeviceOps, globals())
instantiate_device_type_tests(TestTorchDeviceType, globals())
instantiate_device_type_tests(TestDevicePrecision, globals(), except_for='cpu')
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_torch.py |
# Owner(s): ["module: cpp-extensions"]
import os
import shutil
import sys
import unittest
import torch.testing._internal.common_utils as common
from torch.testing._internal.common_utils import IS_ARM64
import torch
import torch.utils.cpp_extension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
TEST_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
TEST_CUDNN = False
TEST_ROCM = torch.cuda.is_available() and torch.version.hip is not None and ROCM_HOME is not None
if TEST_CUDA and torch.version.cuda is not None: # the skip CUDNN test for ROCm
CUDNN_HEADER_EXISTS = os.path.isfile(os.path.join(CUDA_HOME, "include/cudnn.h"))
TEST_CUDNN = (
TEST_CUDA and CUDNN_HEADER_EXISTS and torch.backends.cudnn.is_available()
)
def remove_build_path():
if sys.platform == "win32":
# Not wiping extensions build folder because Windows
return
default_build_root = torch.utils.cpp_extension.get_default_build_root()
if os.path.exists(default_build_root):
shutil.rmtree(default_build_root, ignore_errors=True)
class TestCppExtensionOpenRgistration(common.TestCase):
"""Tests Open Device Registration with C++ extensions.
"""
def setUp(self):
super().setUp()
# cpp extensions use relative paths. Those paths are relative to
# this file, so we'll change the working directory temporarily
self.old_working_dir = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
def tearDown(self):
super().tearDown()
# return the working directory (see setUp)
os.chdir(self.old_working_dir)
@classmethod
def setUpClass(cls):
remove_build_path()
@classmethod
def tearDownClass(cls):
remove_build_path()
@unittest.skipIf(IS_ARM64, "Does not work on arm")
def test_open_device_registration(self):
module = torch.utils.cpp_extension.load(
name="custom_device_extension",
sources=[
"cpp_extensions/open_registration_extension.cpp",
],
extra_include_paths=["cpp_extensions"],
extra_cflags=["-g"],
verbose=True,
)
self.assertFalse(module.custom_add_called())
# create a tensor using our custom device object.
device = module.custom_device()
x = torch.empty(4, 4, device=device)
y = torch.empty(4, 4, device=device)
# Check that our device is correct.
self.assertTrue(x.device == device)
self.assertFalse(x.is_cpu)
self.assertFalse(module.custom_add_called())
# calls out custom add kernel, registered to the dispatcher
z = x + y
# check that it was called
self.assertTrue(module.custom_add_called())
z_cpu = z.to(device='cpu')
# Check that our cross-device copy correctly copied the data to cpu
self.assertTrue(z_cpu.is_cpu)
self.assertFalse(z.is_cpu)
self.assertTrue(z.device == device)
self.assertEqual(z, z_cpu)
z2 = z_cpu + z_cpu
# None of our CPU operations should call the custom add function.
self.assertFalse(module.custom_add_called())
if __name__ == "__main__":
common.run_tests()
| pytorch-master | test/test_cpp_extensions_open_device_registration.py |
# Owner(s): ["module: tests"]
import torch
import numpy as np
import math
from numbers import Number
import random
import unittest
from torch._six import inf, nan
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
torch_to_numpy_dtype_dict,
numpy_to_torch_dtype_dict,
suppress_warnings,
TEST_SCIPY,
slowTest,
skipIfNoSciPy,
IS_WINDOWS,
gradcheck,
TEST_WITH_ASAN,
)
from torch.testing._internal.common_methods_invocations import (
unary_ufuncs,
generate_elementwise_unary_tensors,
generate_elementwise_unary_small_value_tensors,
generate_elementwise_unary_large_value_tensors,
generate_elementwise_unary_extremal_value_tensors,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
dtypes,
onlyCPU,
onlyNativeDeviceTypes,
onlyCUDA,
dtypesIfCUDA,
precisionOverride,
dtypesIfCPU,
)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
floating_types_and,
all_types_and_complex_and,
integral_types_and,
get_all_math_dtypes,
complex_types,
all_types_and,
floating_and_complex_types_and,
)
if TEST_SCIPY:
import scipy
# Refer [scipy reference filter]
# Filter operators for which the reference function
# is available in the current environment (for reference_numerics tests).
reference_filtered_ops = list(filter(lambda op: op.ref is not None, unary_ufuncs))
# Tests for unary "universal functions (ufuncs)" that accept a single
# tensor and have common properties like:
# - they are elementwise functions
# - the input shape is the output shape
# - they typically have method and inplace variants
# - they typically support the out kwarg
# - they typically have NumPy or SciPy references
# See NumPy's universal function documentation
# (https://numpy.org/doc/1.18/reference/ufuncs.html) for more details
# about the concept of ufuncs.
# TODO: port test_unary_out_op_mem_overlap
# TODO: add test for inplace variants erroring on broadcasted inputs
class TestUnaryUfuncs(TestCase):
exact_dtype = True
@ops(
[_fn for _fn in unary_ufuncs if _fn.domain != (None, None)],
allowed_dtypes=floating_types_and(torch.bfloat16, torch.half),
)
def test_float_domains(self, device, dtype, op):
eps = (1e-5, 1e-3, 1e-1, 1, 2, 10, 20, 50, 100)
low, high = op.domain
# NOTE: the following two loops are separated for readability
if low is not None:
low_tensor = torch.tensor(low, device=device, dtype=dtype)
for epsilon in eps:
lower_tensor = low_tensor - epsilon
# Skips the test if the difference is not representable,
# which can occur if, for example, the difference is small
# and the dtype is imprecise (like bfloat16 is)
if lower_tensor.item() == low_tensor.item():
continue
result = op(lower_tensor)
self.assertEqual(
result.item(),
float("nan"),
msg=(
"input of {0} outside lower domain boundary"
" {1} produced {2}, not nan!"
).format(lower_tensor.item(), low, result.item()),
)
if high is not None:
high_tensor = torch.tensor(high, device=device, dtype=dtype)
for epsilon in eps:
higher_tensor = high_tensor + epsilon
# See above comment
if higher_tensor.item() == high_tensor.item():
continue
result = op(higher_tensor)
self.assertEqual(
result.item(),
float("nan"),
msg=(
"input of {0} outside upper domain boundary"
" {1} produced {2}, not nan!"
).format(higher_tensor.item(), high, result.item()),
)
# Helper for comparing torch tensors and numpy arrays
# TODO: should this or assertEqual also validate that strides are equal?
def assertEqualHelper(
self, actual, expected, msg, *, dtype, exact_dtype=True, **kwargs
):
assert isinstance(actual, torch.Tensor)
# Some NumPy functions return scalars, not arrays
if isinstance(expected, Number):
self.assertEqual(actual.item(), expected, msg, **kwargs)
elif isinstance(expected, np.ndarray):
# Handles exact dtype comparisons between arrays and tensors
if exact_dtype:
if (
actual.dtype is torch.bfloat16
or expected.dtype != torch_to_numpy_dtype_dict[actual.dtype]
):
# Allows array dtype to be float32 when comparing with bfloat16 tensors
# since NumPy doesn't support the bfloat16 dtype
# Also ops like scipy.special.erf, scipy.special.erfc, etc, promote float16
# to float32
if expected.dtype == np.float32:
assert actual.dtype in (
torch.float16,
torch.bfloat16,
torch.float32,
)
elif expected.dtype == np.float64:
assert actual.dtype in (
torch.float16,
torch.bfloat16,
torch.float32,
torch.float64,
)
else:
self.fail(
"Expected dtype {0} but got {1}!".format(
expected.dtype, actual.dtype
)
)
self.assertEqual(
actual,
torch.from_numpy(expected).to(actual.dtype),
msg,
exact_device=False,
**kwargs
)
else:
self.assertEqual(actual, expected, msg, exact_device=False, **kwargs)
# Tests that the function and its (array-accepting) reference produce the same
# values on given tensors
def _test_reference_numerics(self, dtype, op, tensors, equal_nan=True):
def _helper_reference_numerics(
expected, actual, msg, exact_dtype, equal_nan=True
):
if not torch.can_cast(
numpy_to_torch_dtype_dict[expected.dtype.type], dtype
):
exact_dtype = False
if dtype in [torch.uint8, torch.int8, torch.bool]:
# NOTE: For these dtypes, PyTorch computes in the default scalar type (float)
# while NumPy computes in float16
self.assertEqualHelper(
actual,
expected,
msg,
dtype=dtype,
exact_dtype=exact_dtype,
rtol=1e-3,
atol=1e-2,
)
elif dtype is torch.bfloat16:
# Ref: https://github.com/pytorch/pytorch/blob/master/torch/testing/_internal/common_utils.py#L1149
self.assertEqualHelper(
actual,
expected,
msg,
dtype=dtype,
exact_dtype=exact_dtype,
rtol=16e-3,
atol=1e-5,
)
else:
self.assertEqualHelper(
actual,
expected,
msg,
dtype=dtype,
equal_nan=equal_nan,
exact_dtype=exact_dtype,
)
for t in tensors:
t = t.input
torch_kwargs, numpy_kwargs = op.sample_kwargs(t.device, dtype, t)
if dtype is torch.bfloat16:
a = t.cpu().to(torch.float32).numpy()
elif dtype is torch.complex32:
a = t.cpu().to(torch.complex64).numpy()
else:
a = t.cpu().numpy()
actual = op(t, **torch_kwargs)
expected = op.ref(a, **numpy_kwargs)
# Crafts a custom error message for smaller, printable tensors
if t.numel() < 10:
msg = (
"Failed to produce expected results! Input tensor was"
" {0}, torch result is {1}, and reference result is"
" {2}."
).format(t, actual, expected)
else:
msg = None
exact_dtype = True
if isinstance(actual, torch.Tensor):
_helper_reference_numerics(
expected, actual, msg, exact_dtype, equal_nan
)
else:
for x, y in zip(expected, actual):
# testing multi-outputs results
_helper_reference_numerics(x, y, msg, exact_dtype, equal_nan)
# Tests that the function and its (array-accepting) reference produce the same
# values on a range of tensors, including empty tensors, scalar tensors,
# 1D tensors and a large 2D tensor with interesting and extremal values
# and noncontiguities.
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@suppress_warnings
@ops(reference_filtered_ops)
def test_reference_numerics_normal(self, device, dtype, op):
tensors = generate_elementwise_unary_tensors(
op, device=device, dtype=dtype, requires_grad=False
)
self._test_reference_numerics(dtype, op, tensors)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@suppress_warnings
@ops(reference_filtered_ops)
def test_reference_numerics_small(self, device, dtype, op):
if dtype in (torch.bool,):
raise self.skipTest("bool has no small values")
tensors = generate_elementwise_unary_small_value_tensors(
op, device=device, dtype=dtype, requires_grad=False
)
self._test_reference_numerics(dtype, op, tensors)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@suppress_warnings
@ops(reference_filtered_ops)
def test_reference_numerics_large(self, device, dtype, op):
if dtype in (torch.bool, torch.uint8, torch.int8):
raise self.skipTest("bool, uint8, and int8 dtypes have no large values")
tensors = generate_elementwise_unary_large_value_tensors(
op, device=device, dtype=dtype, requires_grad=False
)
self._test_reference_numerics(dtype, op, tensors)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@suppress_warnings
@ops(
reference_filtered_ops,
allowed_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half),
)
def test_reference_numerics_extremal(self, device, dtype, op):
tensors = generate_elementwise_unary_extremal_value_tensors(
op, device=device, dtype=dtype, requires_grad=False
)
self._test_reference_numerics(dtype, op, tensors)
# Tests for testing (non)contiguity consistency
@ops(unary_ufuncs)
def test_contig_vs_every_other(self, device, dtype, op):
contig = make_tensor(
(1026,), device=device, dtype=dtype, low=op.domain[0], high=op.domain[1]
)
non_contig = contig[::2]
self.assertTrue(contig.is_contiguous())
self.assertFalse(non_contig.is_contiguous())
torch_kwargs, _ = op.sample_kwargs(device, dtype, non_contig)
self.assertEqual(
op(contig, **torch_kwargs)[::2], op(non_contig, **torch_kwargs)
)
@ops(unary_ufuncs)
def test_contig_vs_transposed(self, device, dtype, op):
contig = make_tensor(
(789, 357), device=device, dtype=dtype, low=op.domain[0], high=op.domain[1]
)
non_contig = contig.T
self.assertTrue(contig.is_contiguous())
self.assertFalse(non_contig.is_contiguous())
torch_kwargs, _ = op.sample_kwargs(device, dtype, contig)
self.assertEqual(op(contig, **torch_kwargs).T, op(non_contig, **torch_kwargs))
@ops(unary_ufuncs)
def test_non_contig(self, device, dtype, op):
shapes = [(5, 7), (1024,)]
for shape in shapes:
contig = make_tensor(
shape, dtype=dtype, device=device, low=op.domain[0], high=op.domain[1]
)
non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0]
non_contig.copy_(contig)
self.assertTrue(contig.is_contiguous())
self.assertFalse(non_contig.is_contiguous())
torch_kwargs, _ = op.sample_kwargs(device, dtype, contig)
self.assertEqual(op(contig, **torch_kwargs), op(non_contig, **torch_kwargs))
@ops(unary_ufuncs)
def test_non_contig_index(self, device, dtype, op):
contig = make_tensor(
(2, 2, 1, 2),
dtype=dtype,
device=device,
low=op.domain[0],
high=op.domain[1],
)
non_contig = contig[:, 1, ...]
contig = non_contig.contiguous()
self.assertTrue(contig.is_contiguous())
self.assertFalse(non_contig.is_contiguous())
torch_kwargs, _ = op.sample_kwargs(device, dtype, contig)
self.assertEqual(op(contig, **torch_kwargs), op(non_contig, **torch_kwargs))
@ops(unary_ufuncs)
def test_non_contig_expand(self, device, dtype, op):
shapes = [(1, 3), (1, 7), (5, 7)]
for shape in shapes:
contig = make_tensor(
shape, dtype=dtype, device=device, low=op.domain[0], high=op.domain[1]
)
non_contig = contig.clone().expand(3, -1, -1)
self.assertTrue(contig.is_contiguous())
self.assertFalse(non_contig.is_contiguous())
torch_kwargs, _ = op.sample_kwargs(device, dtype, contig)
contig = op(contig, **torch_kwargs)
non_contig = op(non_contig, **torch_kwargs)
for i in range(3):
self.assertEqual(
contig, non_contig[i], msg="non-contiguous expand[" + str(i) + "]"
)
@ops(unary_ufuncs)
def test_contig_size1(self, device, dtype, op):
contig = make_tensor(
(5, 100), dtype=dtype, device=device, low=op.domain[0], high=op.domain[1]
)
contig = contig[:1, :50]
contig2 = torch.empty(contig.size(), device=device, dtype=dtype)
contig2.copy_(contig)
self.assertTrue(contig.is_contiguous())
self.assertTrue(contig2.is_contiguous())
torch_kwargs, _ = op.sample_kwargs(device, dtype, contig)
self.assertEqual(op(contig, **torch_kwargs), op(contig2, **torch_kwargs))
@ops(unary_ufuncs)
def test_contig_size1_large_dim(self, device, dtype, op):
contig = make_tensor(
(5, 2, 3, 1, 4, 5, 3, 2, 1, 2, 3, 4),
dtype=dtype,
device=device,
low=op.domain[0],
high=op.domain[1],
)
contig = contig[:1, :, :, :, :, :, :, :, :, :, :, :]
contig2 = torch.empty(contig.size(), device=device, dtype=dtype)
contig2.copy_(contig)
self.assertTrue(contig.is_contiguous())
self.assertTrue(contig2.is_contiguous())
torch_kwargs, _ = op.sample_kwargs(device, dtype, contig)
self.assertEqual(op(contig, **torch_kwargs), op(contig2, **torch_kwargs))
# Tests that computation on a multiple batches is the same as
# per-batch computation.
@ops(unary_ufuncs)
def test_batch_vs_slicing(self, device, dtype, op):
input = make_tensor(
(1024, 512), dtype=dtype, device=device, low=op.domain[0], high=op.domain[1]
)
torch_kwargs, _ = op.sample_kwargs(device, dtype, input)
actual = op(input, **torch_kwargs)
expected = torch.stack([op(slice, **torch_kwargs) for slice in input])
self.assertEqual(actual, expected)
@dtypes(*all_types_and(torch.bool, torch.half))
def test_nan_to_num(self, device, dtype):
for contiguous in [False, True]:
x = make_tensor((64, 64), low=0.0, high=100.0, dtype=dtype, device=device)
if dtype.is_floating_point:
# Add extremal values.
extremals = [float("nan"), float("inf"), -float("inf")]
for idx, extremal in zip(torch.randint(0, 63, (3,)), extremals):
x[idx, :] = extremal
if not contiguous:
x = x.T
# With args
nan = random.random()
posinf = random.random() * 5
neginf = random.random() * 10
self.compare_with_numpy(
lambda x: x.nan_to_num(nan=nan, posinf=posinf),
lambda x: np.nan_to_num(x, nan=nan, posinf=posinf),
x,
)
self.compare_with_numpy(
lambda x: x.nan_to_num(posinf=posinf, neginf=neginf),
lambda x: np.nan_to_num(x, posinf=posinf, neginf=neginf),
x,
)
# Out Variant
out = torch.empty_like(x)
result = torch.nan_to_num(x)
torch.nan_to_num(x, out=out)
self.assertEqual(result, out)
result = torch.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf)
torch.nan_to_num(x, out=out, nan=nan, posinf=posinf, neginf=neginf)
self.assertEqual(result, out)
@onlyCPU
def test_nan_to_num_bfloat16(self, device):
def test_dtype(fn, input, dtype):
input = input.detach().clone().to(dtype=dtype).requires_grad_(True)
input2 = input.detach().clone().float().requires_grad_(True)
out = fn(input)
out.sum().backward()
out2 = fn(input2)
out2.sum().backward()
self.assertEqual(out.dtype, dtype)
self.assertEqual(input.grad.dtype, dtype)
self.assertEqual(out, out2, exact_dtype=False)
self.assertEqual(input.grad, input2.grad, exact_dtype=False)
def func():
return torch.nan_to_num
shapes = [[1, 3, 6, 6], [1, 3, 6, 128], [1, 3, 256, 256]]
for shape in shapes:
x = torch.randn(shape, device=device)
extremals = [float('nan'), float('inf'), -float('inf')]
for id1, id2, extremal in zip(torch.randint(0, 2, (3,)), torch.randint(0, 5, (3,)), extremals):
x[0, id1, id2, :] = extremal
test_dtype(func(), x, torch.bfloat16)
@dtypes(torch.cdouble)
def test_complex_edge_values(self, device, dtype):
# sqrt Test Reference: https://github.com/pytorch/pytorch/pull/47424
x = torch.tensor(0.0 - 1.0e20j, dtype=dtype, device=device)
self.compare_with_numpy(torch.sqrt, np.sqrt, x)
# acos test reference: https://github.com/pytorch/pytorch/issue/42952
# Skip on Windows, as CUDA acos returns conjugate value
# see https://github.com/pytorch/pytorch/issues/52299
if not (IS_WINDOWS and dtype == torch.cdouble and "cuda" in device):
self.compare_with_numpy(torch.acos, np.arccos, x)
x = torch.tensor(
(-1.0e60 if dtype == torch.cdouble else -1.0e20) - 4988429.2j,
dtype=dtype,
device=device,
)
self.compare_with_numpy(torch.sqrt, np.sqrt, x)
@unittest.skipIf(not TEST_SCIPY, "Requires SciPy")
@dtypes(torch.float, torch.double)
def test_digamma_special(self, device, dtype):
# Based on SciPy test for the following special values.
# Reference:
# https://github.com/scipy/scipy/blob/3a8a3a1d4657254a6611e77e9c28feafa26e6645/scipy/special/tests/test_digamma.py#L22
euler = 0.57721566490153286
dataset = [
(0.0, -0.0),
(1, -euler),
(0.5, -2 * math.log(2) - euler),
(1 / 3, -math.pi / (2 * math.sqrt(3)) - 3 * math.log(3) / 2 - euler),
(1 / 4, -math.pi / 2 - 3 * math.log(2) - euler),
(
1 / 6,
-math.pi * math.sqrt(3) / 2
- 2 * math.log(2)
- 3 * math.log(3) / 2
- euler,
),
(
1 / 8,
-math.pi / 2
- 4 * math.log(2)
- (math.pi + math.log(2 + math.sqrt(2)) - math.log(2 - math.sqrt(2)))
/ math.sqrt(2)
- euler,
),
]
x = torch.tensor(dataset, device=device, dtype=dtype)
self.compare_with_numpy(torch.digamma, scipy.special.digamma, x)
@unittest.skipIf(not TEST_SCIPY, "Requires SciPy")
@dtypes(torch.float, torch.double)
def test_digamma(self, device, dtype):
# Tests pole behavior
tensor = torch.tensor(
[
-0.999999994,
-1.999999994,
-2.0000000111,
-100.99999994,
0.000000111,
-1931.99999994,
-0.000000111,
0,
-0,
-1,
-2,
-931,
],
dtype=dtype,
device=device,
)
self.compare_with_numpy(torch.digamma, scipy.special.digamma, tensor)
@dtypes(*floating_types_and(torch.half))
def test_frexp(self, device, dtype):
input = make_tensor((50, 50), dtype=dtype, device=device)
mantissa, exponent = torch.frexp(input)
np_mantissa, np_exponent = np.frexp(input.cpu().numpy())
self.assertEqual(mantissa, np_mantissa)
self.assertEqual(exponent, np_exponent)
# torch.frexp returns exponent in int32 to be compatible with np.frexp
self.assertTrue(exponent.dtype == torch.int32)
self.assertTrue(torch_to_numpy_dtype_dict[exponent.dtype] == np_exponent.dtype)
def test_frexp_assert_raises(self, device):
invalid_input_dtypes = integral_types_and(torch.bool) + complex_types()
for dtype in invalid_input_dtypes:
input = make_tensor((50, 50), dtype=dtype, device=device)
with self.assertRaisesRegex(
RuntimeError, r"torch\.frexp\(\) only supports floating-point dtypes"
):
torch.frexp(input)
for dtype in floating_types_and(torch.half):
input = make_tensor((50, 50), dtype=dtype, device=device)
dtypes = list(
all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16)
)
dtypes.remove(dtype)
for mantissa_dtype in dtypes:
mantissa = torch.empty_like(input, dtype=mantissa_dtype)
exponent = torch.empty_like(input, dtype=torch.int)
with self.assertRaisesRegex(
RuntimeError,
r"torch\.frexp\(\) expects mantissa to have dtype .+ but got .+",
):
torch.frexp(input, out=(mantissa, exponent))
dtypes.append(dtype)
dtypes.remove(torch.int)
for exponent_dtype in dtypes:
mantissa = torch.empty_like(input)
exponent = torch.empty_like(input, dtype=exponent_dtype)
with self.assertRaisesRegex(
RuntimeError,
r"torch\.frexp\(\) expects exponent to have int dtype but got .+",
):
torch.frexp(input, out=(mantissa, exponent))
def test_mvlgamma_argcheck(self, device):
def run_test(d):
input = torch.linspace((d - 2) / 2, 10, 10, device=device)
torch.mvlgamma(input, d)
with self.assertRaisesRegex(
RuntimeError, r"All elements must be greater than \(p-1\)/2"
):
run_test(3)
def test_polygamma_neg(self, device):
with self.assertRaisesRegex(
RuntimeError, r"polygamma\(n, x\) does not support negative n\."
):
torch.polygamma(-1, torch.tensor([1.0, 2.0], device=device))
# TODO resolve with opinfos
@onlyCPU
def test_op_invert(self, device):
res = 0xFFFF - torch.arange(127, dtype=torch.int8)
for dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
a = torch.arange(127, dtype=dtype)
self.assertEqual(res.to(dtype), ~a)
self.assertEqual(torch.tensor([True, False]), ~torch.tensor([False, True]))
# test exceptions
for dtype in (torch.half, torch.float, torch.double):
a = torch.zeros(10, dtype=dtype)
with self.assertRaises(TypeError):
b = ~a
@dtypes(torch.complex64, torch.complex128)
def test_abs_angle_complex_to_float(self, device, dtype):
# Constructs random complex values
from random import random
random_vals = []
for multiplier in (-1, 1, -10, 10, -100, 100):
for _ in range(10):
random_vals.append(
complex(random() * multiplier, random() * multiplier)
)
for vals in (random_vals, []):
a = np.array(vals, dtype=torch_to_numpy_dtype_dict[dtype])
t = torch.tensor(vals, device=device, dtype=dtype)
for fn_name in ("abs", "angle"):
torch_fn = getattr(torch, fn_name)
np_fn = getattr(np, fn_name)
# Tests function
np_result = torch.from_numpy(np_fn(a))
torch_result = torch_fn(t).cpu()
self.assertEqual(np_result, torch_result, exact_dtype=True)
# Tests float out
float_dtype = (
torch.float32 if dtype is torch.complex64 else torch.float64
)
np_float_out = np_fn(a).astype(torch_to_numpy_dtype_dict[float_dtype])
float_out = torch.empty_like(t).float()
torch_fn(t, out=float_out)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.from_numpy(np_float_out), float_out.cpu()
)
# Tests float out (resized out)
float_out = torch.empty(1, device=device, dtype=float_dtype)
torch_fn(t, out=float_out)
self.assertEqual(torch.from_numpy(np_float_out), float_out.cpu())
# Tests complex out
np_complex_out = np_fn(a)
complex_out = torch.empty_like(t)
torch_fn(t, out=complex_out)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.from_numpy(np_complex_out), complex_out.cpu()
)
# Tests complex out (resized out)
complex_out = torch.empty(0, device=device, dtype=dtype)
torch_fn(t, out=complex_out)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.from_numpy(np_complex_out), complex_out.cpu()
)
# Tests long out behavior (expected failure)
long_out = torch.empty(0, device=device, dtype=torch.long)
with self.assertRaises(RuntimeError):
torch_fn(t, out=long_out)
# Tests inplace
if fn_name == "abs":
torch_inplace_method = getattr(torch.Tensor, fn_name + "_")
np_fn(a, out=a)
if dtype.is_complex:
with self.assertRaisesRegex(
RuntimeError,
"In-place abs is not supported for complex tensors.",
):
torch_inplace_method(t)
return
torch_inplace_method(t)
self.assertEqual(torch.from_numpy(a), t.cpu())
# Note: angle does not have an in-place variant
if fn_name == "angle":
with self.assertRaises(AttributeError):
torch_inplace_method = getattr(torch.Tensor, fn_name + "_")
def check_internal_mem_overlap(
self, inplace_op, num_inputs, dtype, device, expected_failure=False
):
if isinstance(inplace_op, str):
inplace_op = getattr(torch.Tensor, inplace_op)
input = torch.randn(1, dtype=dtype, device=device).expand(3, 3)
inputs = [input] + [torch.randn_like(input) for i in range(num_inputs - 1)]
if not expected_failure:
with self.assertRaisesRegex(RuntimeError, "single memory location"):
inplace_op(*inputs)
else:
with self.assertRaises(AssertionError):
with self.assertRaisesRegex(RuntimeError, "single memory location"):
inplace_op(*inputs)
def unary_check_input_output_mem_overlap(
self, data, sz, op, expected_failure=False
):
def _test(op, output, input):
output_exp = torch.empty_like(output)
op(input, out=output_exp)
self.assertEqual(op(input, out=output), output_exp, msg=op.__name__)
# output is identical to input:
_test(op, output=data[0:sz], input=data[0:sz])
# output and input are independent:
_test(op, output=data[0:sz], input=data[sz : 2 * sz])
# output partially overlaps with input:
if not expected_failure:
with self.assertRaisesRegex(RuntimeError, "unsupported operation"):
_test(op, data[0:sz], data[1 : sz + 1])
else:
with self.assertRaises(AssertionError):
with self.assertRaisesRegex(RuntimeError, "unsupported operation"):
_test(op, data[0:sz], data[1 : sz + 1])
# TODO: run on non-native device types
@dtypes(torch.double)
def test_unary_out_op_mem_overlap(self, device, dtype):
sz = 3
doubles = torch.randn(2 * sz, dtype=dtype, device=device)
positives = torch.randint(1, 100, (2 * sz,), device=device).double()
ints = torch.randint(-100, 100, (2 * sz,), device=device)
unary_mem_overlap_cases = [
("abs", doubles, True, True, "cpu"),
("abs", doubles, True, True, "cuda"),
("acos", doubles, True, True, "cpu"),
("acos", doubles, True, True, "cuda"),
("asin", doubles, True, True, "cpu"),
("asin", doubles, True, True, "cuda"),
("atan", doubles, True, True, "cpu"),
("atan", doubles, True, True, "cuda"),
("acosh", doubles, True, True, "cpu"),
("acosh", doubles, True, True, "cuda"),
("asinh", doubles, True, True, "cpu"),
("asinh", doubles, True, True, "cuda"),
("atanh", doubles, True, True, "cpu"),
("atanh", doubles, True, True, "cuda"),
("bitwise_not", ints, True, True, "cpu"),
("bitwise_not", ints, True, True, "cuda"),
("ceil", doubles, True, True, "cpu"),
("ceil", doubles, True, True, "cuda"),
("cos", doubles, True, True, "cpu"),
("cos", doubles, True, True, "cuda"),
("cosh", doubles, True, True, "cpu"),
("cosh", doubles, True, True, "cuda"),
("digamma", doubles, True, True, "cpu"),
("erf", doubles, True, True, "cpu"),
("erf", doubles, True, True, "cuda"),
("erfc", doubles, True, True, "cpu"),
("erfc", doubles, True, True, "cuda"),
("erfinv", doubles, True, True, "cpu"),
("erfinv", doubles, True, True, "cuda"),
("exp", doubles, True, True, "cpu"),
("exp", doubles, True, True, "cuda"),
("exp2", doubles, True, True, "cpu"),
("exp2", doubles, True, True, "cuda"),
("expm1", doubles, True, True, "cpu"),
("expm1", doubles, True, True, "cuda"),
("floor", doubles, True, True, "cpu"),
("floor", doubles, True, True, "cuda"),
("frac", doubles, True, True, "cpu"),
("frac", doubles, True, True, "cuda"),
("i0", doubles, True, True, "cpu"),
("i0", doubles, True, True, "cuda"),
("log", positives, True, True, "cpu"),
("log", positives, True, True, "cuda"),
("log10", positives, True, True, "cpu"),
("log10", positives, True, True, "cuda"),
("log1p", positives, True, True, "cpu"),
("log1p", positives, True, True, "cuda"),
("log2", positives, True, True, "cpu"),
("log2", positives, True, True, "cuda"),
("neg", doubles, True, True, "cpu"),
("neg", doubles, True, True, "cuda"),
("reciprocal", doubles, True, True, "cpu"),
("reciprocal", doubles, True, True, "cuda"),
("round", doubles, True, True, "cpu"),
("round", doubles, True, True, "cuda"),
("rsqrt", positives, True, True, "cpu"),
("rsqrt", positives, True, True, "cuda"),
("sin", doubles, True, True, "cpu"),
("sin", doubles, True, True, "cuda"),
("sinh", doubles, True, True, "cpu"),
("sinh", doubles, False, True, "cuda"),
("sigmoid", doubles, True, True, "cpu"),
("sigmoid", doubles, True, True, "cuda"),
("logit", doubles, True, True, "cpu"),
("logit", doubles, True, True, "cuda"),
("sqrt", doubles, True, True, "cpu"),
("sqrt", doubles, False, True, "cuda"),
("tan", doubles, True, True, "cpu"),
("tan", doubles, True, True, "cuda"),
("tanh", doubles, True, True, "cpu"),
("tanh", doubles, True, True, "cuda"),
("trunc", doubles, True, True, "cpu"),
("trunc", doubles, True, True, "cuda"),
]
for (
fn,
inputs,
has_input_output_mem_overlap_check,
has_internal_mem_overlap_check,
dev,
) in unary_mem_overlap_cases:
if dev != device:
continue
out_fn = getattr(torch, fn)
in_fn = getattr(torch.Tensor, fn + "_")
self.unary_check_input_output_mem_overlap(
inputs,
sz,
out_fn,
expected_failure=not has_input_output_mem_overlap_check,
)
self.check_internal_mem_overlap(
in_fn,
1,
dtype,
dev,
expected_failure=not has_internal_mem_overlap_check,
)
# TODO: opinfo hardshrink
@onlyCPU
@dtypes(torch.float, torch.double, torch.bfloat16)
def test_hardshrink(self, device, dtype):
data = torch.tensor([1, 0.5, 0.3, 0.6], dtype=dtype, device=device).view(2, 2)
self.assertEqual(
torch.tensor([1, 0.5, 0, 0.6], dtype=dtype, device=device).view(2, 2),
data.hardshrink(0.3),
)
self.assertEqual(
torch.tensor([1, 0, 0, 0.6], dtype=dtype, device=device).view(2, 2),
data.hardshrink(0.5),
)
# test default lambd=0.5
self.assertEqual(data.hardshrink(), data.hardshrink(0.5))
# test non-contiguous case
self.assertEqual(
torch.tensor([1, 0, 0.5, 0.6], dtype=dtype, device=device).view(2, 2),
data.t().hardshrink(0.3),
)
@onlyCPU
@dtypes(torch.float, torch.double, torch.bfloat16)
def test_hardshrink_edge_cases(self, device, dtype) -> None:
def h(values, l_expected):
for l, expected in l_expected.items():
values_tensor = torch.tensor(
[float(v) for v in values], dtype=dtype, device=device
)
expected_tensor = torch.tensor(
[float(v) for v in expected], dtype=dtype, device=device
)
self.assertEqual(
expected_tensor == values_tensor.hardshrink(l),
torch.ones_like(values_tensor, dtype=torch.bool),
)
def test_helper(min, max):
h(
[0.0, min, -min, 0.1, -0.1, 1.0, -1.0, max, -max, inf, -inf],
{
0.0: [0.0, min, -min, 0.1, -0.1, 1.0, -1.0, max, -max, inf, -inf],
min: [0.0, 0.0, 0.0, 0.1, -0.1, 1.0, -1.0, max, -max, inf, -inf],
0.1: [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, -1.0, max, -max, inf, -inf],
1.0: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, max, -max, inf, -inf],
max: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, inf, -inf],
inf: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
},
)
test_helper(torch.finfo(dtype).tiny, torch.finfo(dtype).max)
@onlyCPU
@slowTest
@dtypes(torch.float)
@unittest.skipIf(True, "Insufficient memory on linux.(2|4)xlarge")
def test_exp_slow(self, device, dtype):
# Test for https://github.com/pytorch/pytorch/issues/17271
# This is pretty slow on my Macbook but it only takes a few
# seconds on a beefy Xeon server
a = torch.exp(torch.ones(2**31, dtype=dtype, device=device))
b = torch.exp(torch.ones(1, dtype=dtype, device=device))
self.assertEqual(a, b.expand(2**31))
@precisionOverride(
{torch.bfloat16: 1e-2, torch.float: 0.0002, torch.double: 0.0002}
)
@dtypes(torch.float, torch.double, torch.bfloat16)
def test_hardswish(self, device, dtype):
inputValues = [-1000, -4, -3, -2, 0, 2, 3, 4, 1000]
expectedOutput = np.multiply(
inputValues, np.minimum(np.maximum((np.add(inputValues, 3)), 0), 6) / 6.0
)
inputTensor = torch.tensor(inputValues, dtype=dtype, device=device)
expectedOutputTensor = torch.tensor(expectedOutput, dtype=dtype, device=device)
# normal
self.assertEqual(
torch.nn.functional.hardswish(inputTensor), expectedOutputTensor
)
# inplace
inputTensorCpy = inputTensor.clone().detach()
torch.nn.functional.hardswish(inputTensorCpy, inplace=True)
self.assertEqual(inputTensorCpy, expectedOutputTensor)
@precisionOverride(
{torch.bfloat16: 1e-2, torch.float: 0.0002, torch.double: 0.0002}
)
@dtypes(torch.float, torch.double, torch.bfloat16)
def test_hardsigmoid(self, device, dtype):
inputValues = [-1000, -4, -3, -2, 0, 2, 3, 4, 1000]
expectedOutput = np.minimum(np.maximum((np.add(inputValues, 3)), 0), 6) / 6.0
inputTensor = torch.tensor(inputValues, dtype=dtype, device=device)
# normal
self.assertEqual(
torch.nn.functional.hardsigmoid(inputTensor),
torch.tensor(expectedOutput, dtype=dtype, device=device),
)
# inplace
inputTensorCpy = inputTensor.clone().detach()
self.assertEqual(
torch.nn.functional.hardsigmoid(inputTensorCpy, inplace=True),
torch.tensor(expectedOutput, dtype=dtype, device=device),
)
@precisionOverride(
{torch.bfloat16: 1e-2, torch.float: 0.0002, torch.double: 0.0002}
)
@dtypes(torch.float, torch.double, torch.bfloat16)
def test_hardsigmoid_backward(self, device, dtype):
inputValues = [-3.0, 3.0, -2.0, 2.0, -6.0, 6.0]
expectedValues = [0.0, 0.0, 1.0 / 6.0, 1.0 / 6.0, 0.0, 0.0]
inputTensor = torch.tensor(
inputValues, dtype=dtype, device=device
).requires_grad_()
expetedTensor = torch.tensor(expectedValues, dtype=dtype, device=device)
out = torch.nn.functional.hardsigmoid(inputTensor)
out.backward(torch.ones_like(inputTensor))
self.assertEqual(inputTensor.grad, expetedTensor)
@skipIfNoSciPy
@dtypes(torch.float, torch.double)
def test_silu(self, device, dtype):
input_np = np.random.randn(5, 8)
special_input = [[-1000, -1, -0.1, 0, 0.5, 1, 2, 1000]]
input_np = np.concatenate((input_np, special_input), axis=0).astype(
torch_to_numpy_dtype_dict[dtype]
)
expected_output_np = input_np * scipy.special.expit(input_np)
expected_output = torch.from_numpy(expected_output_np).to(device)
expected_output_noncontig = expected_output.transpose(0, 1)
atol = 1e-6
rtol = 1e-6
input = torch.from_numpy(input_np).clone().contiguous().to(device)
self.assertEqual(
torch.nn.functional.silu(input), expected_output, atol=atol, rtol=rtol
)
self.assertEqual(
torch.nn.functional.silu(input, inplace=True),
expected_output,
atol=atol,
rtol=rtol,
)
input = torch.from_numpy(input_np).clone().to(device)
input_noncontig = input.transpose(0, 1)
self.assertEqual(
torch.nn.functional.silu(input_noncontig),
expected_output_noncontig,
atol=atol,
rtol=rtol,
)
self.assertEqual(
torch.nn.functional.silu(input_noncontig, inplace=True),
expected_output_noncontig,
atol=atol,
rtol=rtol,
)
# It is not obvious how to merge this into OpInfo becuase these inputs
# succeed for gradcheck but are expected to fail for gradgradcheck
@dtypes(torch.double)
def test_sinc(self, device, dtype):
# The derivative of sinc(x) at x=0 has to be special cased.
# A naive computation will result in 0/0 -> NaN.
# We also need to be careful when we are very close to 0, as the
# derivative's denominator is squared, and there are some floats
# that are positive and whose squares are zero.
a = torch.tensor(
[0.0, torch.finfo(torch.double).tiny, 1.0],
dtype=dtype,
requires_grad=True,
device=device,
)
gradcheck(torch.sinc, a)
@skipIfNoSciPy
@dtypes(torch.float, torch.double)
def test_mish(self, device, dtype):
input_np = np.random.randn(5, 8)
special_input = [[-1000, -1, -0.1, 0, 0.5, 1, 2, 1000]]
input_np = np.concatenate((input_np, special_input), axis=0).astype(
torch_to_numpy_dtype_dict[dtype]
)
expected_output_np = input_np * np.tanh(np.log1p(np.exp(input_np)))
expected_output = torch.from_numpy(expected_output_np).to(device)
expected_output_noncontig = expected_output.transpose(0, 1)
atol = 1e-6
rtol = 1e-6
input = torch.from_numpy(input_np).clone().contiguous().to(device)
self.assertEqual(
torch.nn.functional.mish(input), expected_output, atol=atol, rtol=rtol
)
self.assertEqual(
torch.nn.functional.mish(input, inplace=True),
expected_output,
atol=atol,
rtol=rtol,
)
input = torch.from_numpy(input_np).clone().to(device)
input_noncontig = input.transpose(0, 1)
self.assertEqual(
torch.nn.functional.mish(input_noncontig),
expected_output_noncontig,
atol=atol,
rtol=rtol,
)
self.assertEqual(
torch.nn.functional.mish(input_noncontig, inplace=True),
expected_output_noncontig,
atol=atol,
rtol=rtol,
)
# do ops like threshold need a test_unary(_nonufunc) test suite?
@onlyCPU
@dtypes(*get_all_math_dtypes("cpu"))
def test_threshold(self, device, dtype):
if dtype != torch.uint8 and dtype != torch.float16 and not dtype.is_complex:
# 100 is wide enough to use AVX2 instructions for all types
x = (
torch.randn(100, dtype=torch.float, device=device)
.sign()
.to(dtype=dtype)
)
y = torch.threshold(x, 0, 0)
self.assertTrue(y.le(0).any())
def _helper_test_igamma(self, loglo, loghi, device, dtype, torch_fcn, scipy_fcn):
exp1 = 2.71828182846
vec1 = torch.logspace(
loglo, loghi, steps=500, base=exp1, dtype=torch.float64, device=device
).unsqueeze(-1)
vec1 = vec1.to(dtype)
inputs = [
(vec1, vec1.transpose(0, 1)),
(vec1, vec1), # for large number, it should approach 0.5
(vec1, 0.5 * vec1), # test for considerable ratio
(vec1, 2.0 * vec1),
(vec1[::2, :], vec1[::2, :]), # contiguous/noncontiguous tests
(vec1[::2, :], vec1[: vec1.shape[0] // 2, :]),
(vec1[: vec1.shape[0] // 2, :], vec1[::2, :]),
]
half_prec = dtype in [torch.bfloat16, torch.float16]
for input0, input1 in inputs:
actual = torch_fcn(input0, input1)
if half_prec:
input0 = input0.to(torch.float)
input1 = input1.to(torch.float)
expected = scipy_fcn(input0.cpu().numpy(), input1.cpu().numpy())
expected = torch.from_numpy(expected).to(dtype)
self.assertEqual(actual, expected)
@dtypesIfCPU(torch.float16, torch.bfloat16, torch.float32, torch.float64)
@dtypes(torch.float32, torch.float64)
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@onlyNativeDeviceTypes
def test_igamma_common(self, device, dtype):
# test igamma for reasonable range of values
loglo = -4 # approx 0.018
loghi = 4 # approx 54.6
self._helper_test_igamma(
loglo, loghi, device, dtype, torch.igamma, scipy.special.gammainc
)
@dtypesIfCPU(torch.float16, torch.bfloat16, torch.float32, torch.float64)
@dtypes(torch.float32, torch.float64)
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@onlyNativeDeviceTypes
def test_igammac_common(self, device, dtype):
# test igammac for reasonable range of values
loglo = -4 # approx 0.018
loghi = 4 # approx 54.6
self._helper_test_igamma(
loglo, loghi, device, dtype, torch.igammac, scipy.special.gammaincc
)
@dtypesIfCPU(torch.float16, torch.bfloat16, torch.float32, torch.float64)
@dtypes(torch.float32, torch.float64)
@onlyNativeDeviceTypes
def test_igamma_edge_cases(self, device, dtype):
tkwargs = {"dtype": dtype, "device": device}
infs = torch.zeros((3,), **tkwargs) + float("inf")
zeros = torch.zeros((3,), **tkwargs)
ones = torch.ones((3,), **tkwargs)
zero_to_large = torch.tensor([0.0, 1.0, 1e3], **tkwargs)
small_to_inf = torch.tensor([1e-3, 1.0, float("inf")], **tkwargs)
nans = torch.zeros((3,), **tkwargs) + float("nan")
inpouts = [
# (a , x), out
((zeros, small_to_inf), ones),
((small_to_inf, zeros), zeros),
((infs, zero_to_large), zeros),
((zero_to_large, infs), ones),
((zeros, zeros), nans),
((infs, infs), nans),
((-small_to_inf, small_to_inf), nans),
]
for inputs, output in inpouts:
input0, input1 = inputs
calc = torch.igamma(input0, input1)
if torch.all(torch.isnan(output)):
self.assertTrue(torch.all(torch.isnan(calc)))
else:
self.assertEqual(calc, output)
@dtypesIfCPU(torch.float16, torch.bfloat16, torch.float32, torch.float64)
@dtypes(torch.float32, torch.float64)
@onlyNativeDeviceTypes
def test_igammac_edge_cases(self, device, dtype):
tkwargs = {"dtype": dtype, "device": device}
infs = torch.zeros((3,), **tkwargs) + float("inf")
zeros = torch.zeros((3,), **tkwargs)
ones = torch.ones((3,), **tkwargs)
zero_to_large = torch.tensor([0.0, 1.0, 1e3], **tkwargs)
small_to_inf = torch.tensor([1e-3, 1.0, float("inf")], **tkwargs)
nans = torch.zeros((3,), **tkwargs) + float("nan")
inpouts = [
# (a , x), out
((zeros, small_to_inf), zeros),
((small_to_inf, zeros), ones),
((infs, zero_to_large), ones),
((zero_to_large, infs), zeros),
((zeros, zeros), nans),
((infs, infs), nans),
((-small_to_inf, small_to_inf), nans),
]
for inputs, output in inpouts:
input0, input1 = inputs
calc = torch.igammac(input0, input1)
if torch.all(torch.isnan(output)):
self.assertTrue(torch.all(torch.isnan(calc)))
else:
self.assertEqual(calc, output)
def _i0_helper(self, t):
# Test by comparing to scipy
dtype = t.dtype
actual = torch.i0(t)
if dtype is torch.bfloat16:
t = t.to(torch.float32)
expected = scipy.special.i0(t.cpu().numpy())
# Casting down for dtype float16 is required since scipy upcasts to float32
if dtype is torch.bfloat16 or dtype is torch.float16:
expected = torch.from_numpy(expected).to(dtype)
self.assertEqual(actual, expected)
def _i0_range_helper(self, range, device, dtype):
# i0 tests are broken up by the domain for which the function does not overflow for each dtype
# This is done to ensure that the function performs well across all possible input values, without worrying
# about inf or nan possibilities
for r in (range, -range):
t = torch.rand(1000, device=device).to(dtype) * r
self._i0_helper(t)
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@dtypes(torch.bfloat16, torch.float32, torch.float64)
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
def test_i0_range1(self, device, dtype):
# This tests the domain for i0 for which float16 does not overflow
# The domain is (-13.25, 13.25)
self._i0_range_helper(13.25, device, dtype)
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@dtypes(torch.bfloat16, torch.float32, torch.float64)
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
def test_i0_range2(self, device, dtype):
# This tests the domain for i0 for which float32 and bfloat16 does not overflow
# The domain is (-88.5, 88.5)
self._i0_range_helper(88.5, device, dtype)
@dtypes(torch.float64)
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
def test_i0_range3(self, device, dtype):
# This tests the domain for i0 for which float64 does not overflow
# The domain is (-709.75, 709.75)
self._i0_range_helper(709.75, device, dtype)
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@dtypes(torch.bfloat16, torch.float32, torch.float64)
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
def test_i0_special(self, device, dtype):
t = torch.tensor([], device=device, dtype=dtype)
self._i0_helper(t)
t = torch.tensor([inf, -inf, nan], device=device, dtype=dtype)
self.assertTrue(torch.i0(t).isnan().all())
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@dtypes(torch.bfloat16, torch.float32, torch.float64)
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
def test_special_i0_i1_vs_scipy(self, device, dtype):
def check_equal(t, torch_fn, scipy_fn):
# Test by comparing to scipy
actual = torch_fn(t)
if dtype is torch.bfloat16:
t = t.to(torch.float32)
expected = scipy_fn(t.cpu().numpy())
# Casting down for dtype float16 is required since scipy upcasts to float32
if dtype is torch.bfloat16 or dtype is torch.float16:
expected = torch.from_numpy(expected).to(dtype)
self.assertEqual(actual, expected)
t = torch.tensor([], device=device, dtype=dtype)
check_equal(t, torch.i0, scipy.special.i0)
check_equal(t, torch.special.i0e, scipy.special.i0e)
if dtype not in [torch.half, torch.bfloat16]:
check_equal(t, torch.special.i1, scipy.special.i1)
check_equal(t, torch.special.i1e, scipy.special.i1e)
range = (-1e7, 1e7)
if dtype == torch.half:
range = (-65000, 65000)
t = torch.linspace(*range, int(1e4), device=device, dtype=dtype)
check_equal(t, torch.i0, scipy.special.i0)
check_equal(t, torch.special.i0e, scipy.special.i0e)
if dtype not in [torch.half, torch.bfloat16]:
check_equal(t, torch.special.i1, scipy.special.i1)
check_equal(t, torch.special.i1e, scipy.special.i1e)
# NaN, inf, -inf are tested in reference_numerics tests.
info = torch.finfo(dtype)
min, max, eps, tiny = info.min, info.max, info.eps, info.tiny
t = torch.tensor([min, max, eps, tiny], dtype=dtype, device=device)
check_equal(t, torch.i0, scipy.special.i0)
check_equal(t, torch.special.i0e, scipy.special.i0e)
if dtype not in [torch.half, torch.bfloat16]:
check_equal(t, torch.special.i1, scipy.special.i1)
check_equal(t, torch.special.i1e, scipy.special.i1e)
@dtypes(torch.float32, torch.float64)
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
def test_special_ndtr_vs_scipy(self, device, dtype):
def check_equal(t):
# Test by comparing to scipy
actual = torch.special.ndtr(t)
expected = scipy.special.ndtr(t.cpu().numpy())
self.assertEqual(actual, expected)
range = (-10, 10)
t = torch.linspace(*range, 1, device=device, dtype=dtype)
check_equal(t)
# Skip testing NaN, inf, -inf since they are tested in reference_numerics tests.
info = torch.finfo(dtype)
min, max, eps, tiny = info.min, info.max, info.eps, info.tiny
t = torch.tensor([min, max, eps, tiny], dtype=dtype, device=device)
check_equal(t)
@dtypes(torch.float32, torch.float64)
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
def test_special_log_ndtr_vs_scipy(self, device, dtype):
def check_equal(t):
# Test by comparing with scipy
actual = torch.special.log_ndtr(t)
expected = scipy.special.log_ndtr(t.cpu().numpy())
self.assertEqual(actual, expected)
# Skip testing NaN, inf, -inf since they are tested in reference_numerics tests.
info = torch.finfo(dtype)
min, max, eps, tiny = info.min, info.max, info.eps, info.tiny
t = torch.tensor([min, max, eps, tiny], dtype=dtype, device=device)
check_equal(t)
# TODO: allow large opinfo values to be opted-into via metadata
@dtypes(torch.long)
def test_abs_big_number(self, device, dtype):
bignumber = 2**31 + 1
res = torch.tensor([bignumber], device=device, dtype=dtype)
self.assertGreater(res.abs()[0], 0)
# TODO: add signed zero testing to opinfos
@dtypes(torch.float, torch.double)
def test_abs_signed_zero(self, device, dtype):
# Both abs(0.0) and abs(-0.0) should result in 0.0
size = 128 + 1 # pick a large enough number with remainder so that
# both vectorized and nonvectorized op is tested
inp = torch.zeros(size, device=device, dtype=dtype)
inp[::2] = -0.0
inp = inp.abs()
for v in inp:
self.assertGreater(math.copysign(1.0, v), 0.0)
# TODO: update to compare against NumPy by rationalizing with OpInfo
@onlyCUDA
@dtypes(torch.float, torch.double)
def test_abs_zero(self, device, dtype):
# Both abs(0.0) and abs(-0.0) should result in 0.0
abs_zeros = torch.tensor([0.0, -0.0], device=device, dtype=dtype).abs().tolist()
for num in abs_zeros:
self.assertGreater(math.copysign(1.0, num), 0.0)
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_isposinf_isneginf_non_boolean_output(self, device, dtype):
# test non-boolean tensors as the `out=` parameters
# boolean outputs are tested in the above testcases
vals = (float("inf"), -float("inf"), 1.2)
t = torch.tensor(vals, device=device)
for torch_op in (torch.isposinf, torch.isneginf):
out = torch.empty_like(t, dtype=dtype)
with self.assertRaisesRegex(
RuntimeError, "does not support non-boolean outputs"
):
torch_op(t, out=out)
def test_nonzero_empty(self, device):
def assert_tuple_empty(tup, dim):
self.assertEqual(dim, len(tup))
for t in tup:
self.assertEqual(torch.Size([0]), t.shape)
x = torch.randn(0, 2, 0, 5, 0, device=device)
y = torch.nonzero(x)
z = torch.nonzero(x, as_tuple=True)
self.assertEqual(0, y.numel())
self.assertEqual(torch.Size([0, 5]), y.shape)
assert_tuple_empty(z, 5)
x = torch.tensor(0.5, device=device)
y = torch.nonzero(x)
# nonzero with as_tuple returns a
# tuple of len 1 for a zero-dim tensor.
# This is done to match Numpy behavior.
z = torch.nonzero(x, as_tuple=True)
self.assertEqual(1, len(z))
self.assertEqual(torch.zeros(1, dtype=torch.long), z[0])
x = torch.zeros((), device=device)
y = torch.nonzero(x)
z = torch.nonzero(x, as_tuple=True)
self.assertEqual(torch.Size([0, 0]), y.shape)
self.assertEqual(1, len(z))
self.assertEqual(torch.empty(0, dtype=torch.long), z[0])
# TODO: rationalize with exp OpInfo
@dtypes(*floating_and_complex_types_and(torch.bfloat16))
@dtypesIfCUDA(*floating_and_complex_types_and(torch.half, torch.bfloat16))
def test_exp(self, device, dtype):
for v in (2, -2) + ((1j, 1 + 1j) if dtype.is_complex else ()):
a = (
torch.tensor(v, dtype=dtype, device=device)
* torch.arange(18, device=device)
/ 3
* math.pi
)
a = a.to(dtype)
# bfloat16 overflows
if dtype == torch.bfloat16:
return
self.compare_with_numpy(torch.exp, np.exp, a)
if dtype.is_complex:
inf_real_zero_imag_in = torch.tensor(
complex(float("inf"), 0), device=device, dtype=dtype
)
inf_real_zero_imag_out = torch.exp(inf_real_zero_imag_in).item()
self.assertTrue(math.isinf(inf_real_zero_imag_out.real))
if self.device_type == "cpu":
pass
# These are commented out because it cannot be consistently reproduced.
# This is incorrect. It should be zero. Need fix!
# https://github.com/pytorch/pytorch/issues/40590
# self.assertNotEqual(inf_real_zero_imag_out.imag, 0)
# This is incorrect. They should equal. Need fix!
# https://github.com/pytorch/pytorch/issues/40590
# with self.assertRaises(AssertionError):
# self.compare_with_numpy(torch.exp, np.exp, inf_real_zero_imag_in)
else:
self.assertEqual(inf_real_zero_imag_out.imag, 0, atol=0, rtol=0)
self.compare_with_numpy(torch.exp, np.exp, inf_real_zero_imag_in)
zero_real_inf_imag_in = torch.tensor(
complex(0, float("inf")), device=device, dtype=dtype
)
zero_real_inf_imag_out = torch.exp(zero_real_inf_imag_in).item()
self.assertTrue(math.isnan(zero_real_inf_imag_out.real))
self.assertTrue(math.isnan(zero_real_inf_imag_out.imag))
# Ensure we are notified when NumPy changes its behavior
self.compare_with_numpy(torch.exp, np.exp, zero_real_inf_imag_in)
inf_real_imag_in = torch.tensor(
complex(float("inf"), float("inf")), device=device, dtype=dtype
)
inf_real_imag_out = torch.exp(inf_real_imag_in).item()
if self.device_type == "cpu":
pass
# This is incorrect. Need fix! https://github.com/pytorch/pytorch/issues/40590
# This is commented out because it cannot be consistently reproduced.
# with self.assertRaises(AssertionError):
# self.compare_with_numpy(torch.exp, np.exp, inf_real_imag_in)
else:
self.assertTrue(math.isinf(inf_real_imag_out.real))
self.assertTrue(math.isnan(inf_real_imag_out.imag))
self.compare_with_numpy(torch.exp, np.exp, inf_real_imag_in)
inf_real_nan_imag_in = torch.tensor(
complex(float("inf"), float("nan")), device=device, dtype=dtype
)
inf_real_nan_imag_out = torch.exp(inf_real_nan_imag_in).item()
if self.device_type == "cpu":
pass
# This is incorrect. It should be inf. Need fix! https://github.com/pytorch/pytorch/issues/40590
# This is commented out because it cannot be consistently reproduced.
# with self.assertRaises(AssertionError):
# self.compare_with_numpy(torch.exp, np.exp, inf_real_nan_imag_in)
else:
self.assertTrue(math.isinf(inf_real_nan_imag_out.real))
self.assertTrue(math.isnan(inf_real_nan_imag_out.imag))
self.compare_with_numpy(torch.exp, np.exp, inf_real_nan_imag_in)
nan_real_inf_imag_in = torch.tensor(
complex(float("nan"), float("inf")), device=device, dtype=dtype
)
nan_real_inf_imag_out = torch.exp(nan_real_inf_imag_in).item()
self.assertTrue(math.isnan(nan_real_inf_imag_out.real))
self.assertTrue(math.isnan(nan_real_inf_imag_out.imag))
# Ensure we are notified when NumPy changes its behavior
self.compare_with_numpy(torch.exp, np.exp, nan_real_inf_imag_in)
instantiate_device_type_tests(TestUnaryUfuncs, globals())
if __name__ == "__main__":
run_tests()
| pytorch-master | test/test_unary_ufuncs.py |
# Owner(s): ["module: unknown"]
import threading
import time
import torch
import unittest
from torch.futures import Future
from torch.testing._internal.common_utils import IS_WINDOWS, TestCase, TemporaryFileName, run_tests
from typing import TypeVar
T = TypeVar("T")
def add_one(fut):
return fut.wait() + 1
class TestFuture(TestCase):
def test_set_exception(self) -> None:
# This test is to ensure errors can propagate across futures.
error_msg = "Intentional Value Error"
value_error = ValueError(error_msg)
f = Future[T]()
# Set exception
f.set_exception(value_error)
# Exception should throw on wait
with self.assertRaisesRegex(ValueError, "Intentional"):
f.wait()
# Exception should also throw on value
f = Future()
f.set_exception(value_error)
with self.assertRaisesRegex(ValueError, "Intentional"):
f.value()
def cb(fut):
fut.value()
f = Future()
f.set_exception(value_error)
with self.assertRaisesRegex(RuntimeError, "Got the following error"):
cb_fut = f.then(cb)
cb_fut.wait()
def test_set_exception_multithreading(self) -> None:
# Ensure errors can propagate when one thread waits on future result
# and the other sets it with an error.
error_msg = "Intentional Value Error"
value_error = ValueError(error_msg)
def wait_future(f):
with self.assertRaisesRegex(ValueError, "Intentional"):
f.wait()
f = Future[T]()
t = threading.Thread(target=wait_future, args=(f, ))
t.start()
f.set_exception(value_error)
t.join()
def cb(fut):
fut.value()
def then_future(f):
fut = f.then(cb)
with self.assertRaisesRegex(RuntimeError, "Got the following error"):
fut.wait()
f = Future[T]()
t = threading.Thread(target=then_future, args=(f, ))
t.start()
f.set_exception(value_error)
t.join()
def test_done(self) -> None:
f = Future[torch.Tensor]()
self.assertFalse(f.done())
f.set_result(torch.ones(2, 2))
self.assertTrue(f.done())
def test_done_exception(self) -> None:
err_msg = "Intentional Value Error"
def raise_exception(unused_future):
raise RuntimeError(err_msg)
f1 = Future[torch.Tensor]()
self.assertFalse(f1.done())
f1.set_result(torch.ones(2, 2))
self.assertTrue(f1.done())
f2 = f1.then(raise_exception)
self.assertTrue(f2.done())
with self.assertRaisesRegex(RuntimeError, err_msg):
f2.wait()
def test_wait(self) -> None:
f = Future[torch.Tensor]()
f.set_result(torch.ones(2, 2))
self.assertEqual(f.wait(), torch.ones(2, 2))
def test_wait_multi_thread(self) -> None:
def slow_set_future(fut, value):
time.sleep(0.5)
fut.set_result(value)
f = Future[torch.Tensor]()
t = threading.Thread(target=slow_set_future, args=(f, torch.ones(2, 2)))
t.start()
self.assertEqual(f.wait(), torch.ones(2, 2))
t.join()
def test_mark_future_twice(self) -> None:
fut = Future[int]()
fut.set_result(1)
with self.assertRaisesRegex(
RuntimeError,
"Future can only be marked completed once"
):
fut.set_result(1)
def test_pickle_future(self):
fut = Future[int]()
errMsg = "Can not pickle torch.futures.Future"
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
torch.save(fut, fname)
def test_then(self):
fut = Future[torch.Tensor]()
then_fut = fut.then(lambda x: x.wait() + 1)
fut.set_result(torch.ones(2, 2))
self.assertEqual(fut.wait(), torch.ones(2, 2))
self.assertEqual(then_fut.wait(), torch.ones(2, 2) + 1)
def test_chained_then(self):
fut = Future[torch.Tensor]()
futs = []
last_fut = fut
for _ in range(20):
last_fut = last_fut.then(add_one)
futs.append(last_fut)
fut.set_result(torch.ones(2, 2))
for i in range(len(futs)):
self.assertEqual(futs[i].wait(), torch.ones(2, 2) + i + 1)
def _test_then_error(self, cb, errMsg):
fut = Future[int]()
then_fut = fut.then(cb)
fut.set_result(5)
self.assertEqual(5, fut.wait())
with self.assertRaisesRegex(RuntimeError, errMsg):
then_fut.wait()
def test_then_wrong_arg(self):
def wrong_arg(tensor):
return tensor + 1
self._test_then_error(wrong_arg, "unsupported operand type.*Future.*int")
def test_then_no_arg(self):
def no_arg():
return True
self._test_then_error(no_arg, "takes 0 positional arguments but 1 was given")
def test_then_raise(self):
def raise_value_error(fut):
raise ValueError("Expected error")
self._test_then_error(raise_value_error, "Expected error")
def test_add_done_callback_simple(self):
callback_result = False
def callback(fut):
nonlocal callback_result
fut.wait()
callback_result = True
fut = Future[torch.Tensor]()
fut.add_done_callback(callback)
self.assertFalse(callback_result)
fut.set_result(torch.ones(2, 2))
self.assertEqual(fut.wait(), torch.ones(2, 2))
self.assertTrue(callback_result)
def test_add_done_callback_maintains_callback_order(self):
callback_result = 0
def callback_set1(fut):
nonlocal callback_result
fut.wait()
callback_result = 1
def callback_set2(fut):
nonlocal callback_result
fut.wait()
callback_result = 2
fut = Future[torch.Tensor]()
fut.add_done_callback(callback_set1)
fut.add_done_callback(callback_set2)
fut.set_result(torch.ones(2, 2))
self.assertEqual(fut.wait(), torch.ones(2, 2))
# set2 called last, callback_result = 2
self.assertEqual(callback_result, 2)
def _test_add_done_callback_error_ignored(self, cb):
fut = Future[int]()
fut.add_done_callback(cb)
fut.set_result(5)
# error msg logged to stdout
self.assertEqual(5, fut.wait())
def test_add_done_callback_error_is_ignored(self):
def raise_value_error(fut):
raise ValueError("Expected error")
self._test_add_done_callback_error_ignored(raise_value_error)
def test_add_done_callback_no_arg_error_is_ignored(self):
def no_arg():
return True
# Adding another level of function indirection here on purpose.
# Otherwise mypy will pick up on no_arg having an incompatible type and fail CI
self._test_add_done_callback_error_ignored(no_arg)
def test_interleaving_then_and_add_done_callback_maintains_callback_order(self):
callback_result = 0
def callback_set1(fut):
nonlocal callback_result
fut.wait()
callback_result = 1
def callback_set2(fut):
nonlocal callback_result
fut.wait()
callback_result = 2
def callback_then(fut):
nonlocal callback_result
return fut.wait() + callback_result
fut = Future[torch.Tensor]()
fut.add_done_callback(callback_set1)
then_fut = fut.then(callback_then)
fut.add_done_callback(callback_set2)
fut.set_result(torch.ones(2, 2))
self.assertEqual(fut.wait(), torch.ones(2, 2))
# then_fut's callback is called with callback_result = 1
self.assertEqual(then_fut.wait(), torch.ones(2, 2) + 1)
# set2 called last, callback_result = 2
self.assertEqual(callback_result, 2)
def test_interleaving_then_and_add_done_callback_propagates_error(self):
def raise_value_error(fut):
raise ValueError("Expected error")
fut = Future[torch.Tensor]()
then_fut = fut.then(raise_value_error)
fut.add_done_callback(raise_value_error)
fut.set_result(torch.ones(2, 2))
# error from add_done_callback's callback is swallowed
# error from then's callback is not
self.assertEqual(fut.wait(), torch.ones(2, 2))
with self.assertRaisesRegex(RuntimeError, "Expected error"):
then_fut.wait()
def test_collect_all(self):
fut1 = Future[int]()
fut2 = Future[int]()
fut_all = torch.futures.collect_all([fut1, fut2])
def slow_in_thread(fut, value):
time.sleep(0.1)
fut.set_result(value)
t = threading.Thread(target=slow_in_thread, args=(fut1, 1))
fut2.set_result(2)
t.start()
res = fut_all.wait()
self.assertEqual(res[0].wait(), 1)
self.assertEqual(res[1].wait(), 2)
t.join()
@unittest.skipIf(IS_WINDOWS, "TODO: need to fix this testcase for Windows")
def test_wait_all(self):
fut1 = Future[int]()
fut2 = Future[int]()
# No error version
fut1.set_result(1)
fut2.set_result(2)
res = torch.futures.wait_all([fut1, fut2])
print(res)
self.assertEqual(res, [1, 2])
# Version with an exception
def raise_in_fut(fut):
raise ValueError("Expected error")
fut3 = fut1.then(raise_in_fut)
with self.assertRaisesRegex(RuntimeError, "Expected error"):
torch.futures.wait_all([fut3, fut2])
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_futures.py |
# Owner(s): ["module: tensor creation"]
import torch
import numpy as np
import sys
import math
import warnings
import unittest
from itertools import product, combinations, combinations_with_replacement, permutations
import random
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TestCase, run_tests, do_test_empty_full, TEST_WITH_ROCM, suppress_warnings,
torch_to_numpy_dtype_dict, numpy_to_torch_dtype_dict, slowTest,
TEST_SCIPY, IS_MACOS, IS_PPC, IS_WINDOWS, parametrize, skipIfTorchDynamo)
from torch.testing._internal.common_device_type import (
expectedFailureMeta, instantiate_device_type_tests, deviceCountAtLeast, onlyNativeDeviceTypes,
onlyCPU, largeTensorTest, precisionOverride, dtypes,
onlyCUDA, skipCPUIf, dtypesIfCUDA, skipMeta)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, all_types_and, floating_and_complex_types,
floating_types, floating_and_complex_types_and, integral_types_and, get_all_dtypes
)
from torch.testing._creation import float_to_corresponding_complex_type_map
from torch.utils.dlpack import to_dlpack
# TODO: refactor tri_tests_args, _compare_trilu_indices, run_additional_tri_tests
from torch.testing._internal.common_methods_invocations import (
tri_tests_args, _compare_trilu_indices, run_additional_tri_tests)
# TODO: replace with make_tensor
def _generate_input(shape, dtype, device, with_extremal):
if shape == ():
x = torch.tensor((), dtype=dtype, device=device)
else:
if dtype.is_floating_point or dtype.is_complex:
# work around torch.randn not being implemented for bfloat16
if dtype == torch.bfloat16:
x = torch.randn(*shape, device=device) * random.randint(30, 100)
x = x.to(torch.bfloat16)
else:
x = torch.randn(*shape, dtype=dtype, device=device) * random.randint(30, 100)
x[torch.randn(*shape) > 0.5] = 0
if with_extremal and dtype.is_floating_point:
# Use extremal values
x[torch.randn(*shape) > 0.5] = float('nan')
x[torch.randn(*shape) > 0.5] = float('inf')
x[torch.randn(*shape) > 0.5] = float('-inf')
elif with_extremal and dtype.is_complex:
x[torch.randn(*shape) > 0.5] = complex('nan')
x[torch.randn(*shape) > 0.5] = complex('inf')
x[torch.randn(*shape) > 0.5] = complex('-inf')
elif dtype == torch.bool:
x = torch.zeros(shape, dtype=dtype, device=device)
x[torch.randn(*shape) > 0.5] = True
else:
x = torch.randint(15, 100, shape, dtype=dtype, device=device)
return x
# TODO: replace with make_tensor
def _rand_shape(dim, min_size, max_size):
shape = []
for i in range(dim):
shape.append(random.randint(min_size, max_size))
return tuple(shape)
# Test suite for tensor creation ops
#
# Includes creation functions like torch.eye, random creation functions like
# torch.rand, and *like functions like torch.ones_like.
# DOES NOT INCLUDE view ops, which are tested in TestViewOps (currently in
# test_torch.py) OR numpy interop (which is also still tested in test_torch.py)
#
# See https://pytorch.org/docs/master/torch.html#creation-ops
class TestTensorCreation(TestCase):
exact_dtype = True
@onlyCPU
@dtypes(torch.float)
def test_diag_embed(self, device, dtype):
x = torch.arange(3 * 4, dtype=dtype, device=device).view(3, 4)
result = torch.diag_embed(x)
expected = torch.stack([torch.diag(r) for r in x], 0)
self.assertEqual(result, expected)
result = torch.diag_embed(x, offset=1, dim1=0, dim2=2)
expected = torch.stack([torch.diag(r, 1) for r in x], 1)
self.assertEqual(result, expected)
def test_cat_mem_overlap(self, device):
x = torch.rand((1, 3), device=device).expand((6, 3))
y = torch.rand((3, 3), device=device)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
torch.cat([y, y], out=x)
@onlyNativeDeviceTypes
def test_vander(self, device):
x = torch.tensor([1, 2, 3, 5], device=device)
self.assertEqual((0, 0), torch.vander(torch.tensor([]), 0).shape)
with self.assertRaisesRegex(RuntimeError, "N must be non-negative."):
torch.vander(x, N=-1)
with self.assertRaisesRegex(RuntimeError, "x must be a one-dimensional tensor."):
torch.vander(torch.stack((x, x)))
@onlyNativeDeviceTypes
@dtypes(torch.bool, torch.uint8, torch.int8, torch.short, torch.int, torch.long,
torch.float, torch.double,
torch.cfloat, torch.cdouble)
def test_vander_types(self, device, dtype):
if dtype is torch.uint8:
# Note: no negative uint8 values
X = [[1, 2, 3, 5], [0, 1 / 3, 1, math.pi, 3 / 7]]
elif dtype is torch.bool:
# Note: see https://github.com/pytorch/pytorch/issues/37398
# for why this is necessary.
X = [[True, True, True, True], [False, True, True, True, True]]
elif dtype in [torch.cfloat, torch.cdouble]:
X = [[1 + 1j, 1 + 0j, 0 + 1j, 0 + 0j],
[2 + 2j, 3 + 2j, 4 + 3j, 5 + 4j]]
else:
X = [[1, 2, 3, 5], [-math.pi, 0, 1 / 3, 1, math.pi, 3 / 7]]
N = [None, 0, 1, 3]
increasing = [False, True]
for x, n, inc in product(X, N, increasing):
numpy_dtype = torch_to_numpy_dtype_dict[dtype]
pt_x = torch.tensor(x, device=device, dtype=dtype)
np_x = np.array(x, dtype=numpy_dtype)
pt_res = torch.vander(pt_x, increasing=inc) if n is None else torch.vander(pt_x, n, inc)
np_res = np.vander(np_x, n, inc)
self.assertEqual(
pt_res,
torch.from_numpy(np_res),
atol=1e-3,
rtol=0,
exact_dtype=False)
def test_cat_all_dtypes_and_devices(self, device):
for dt in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16, torch.chalf):
x = torch.tensor([[1, 2], [3, 4]], dtype=dt, device=device)
expected1 = torch.tensor([[1, 2], [3, 4], [1, 2], [3, 4]], dtype=dt, device=device)
self.assertEqual(torch.cat((x, x), 0), expected1)
expected2 = torch.tensor([[1, 2, 1, 2], [3, 4, 3, 4]], dtype=dt, device=device)
self.assertEqual(torch.cat((x, x), 1), expected2)
def test_fill_all_dtypes_and_devices(self, device):
for dt in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16, torch.chalf):
for x in [torch.tensor((10, 10), dtype=dt, device=device),
torch.empty(10000, dtype=dt, device=device)]: # large tensor
numel = x.numel()
bound = 100 if dt in (torch.uint8, torch.int8) else 2000
for n in range(-bound, bound, bound // 10):
x.fill_(n)
self.assertEqual(x, torch.tensor([n] * numel, dtype=dt, device=device))
self.assertEqual(dt, x.dtype)
def test_roll(self, device):
numbers = torch.arange(1, 9, device=device)
single_roll = numbers.roll(1, 0)
expected = torch.tensor([8, 1, 2, 3, 4, 5, 6, 7], device=device)
self.assertEqual(single_roll, expected, msg="{} did not equal expected result".format(single_roll))
roll_backwards = numbers.roll(-2, 0)
expected = torch.tensor([3, 4, 5, 6, 7, 8, 1, 2], device=device)
self.assertEqual(roll_backwards, expected, msg="{} did not equal expected result".format(roll_backwards))
data = numbers.view(2, 2, 2)
rolled = data.roll(1, 0)
expected = torch.tensor([5, 6, 7, 8, 1, 2, 3, 4], device=device).view(2, 2, 2)
self.assertEqual(expected, rolled, msg="{} did not equal expected result: {}".format(rolled, expected))
data = data.view(2, 4)
# roll a loop until back where started
loop_rolled = data.roll(2, 0).roll(4, 1)
self.assertEqual(data, loop_rolled, msg="{} did not equal the original: {}".format(loop_rolled, data))
# multiple inverse loops
self.assertEqual(data, data.roll(-20, 0).roll(-40, 1))
self.assertEqual(torch.tensor([8, 1, 2, 3, 4, 5, 6, 7], device=device), numbers.roll(1, 0))
# test non-contiguous
# strided equivalent to numbers.as_strided(size=(4, 2), stride=(1, 4))
strided = numbers.view(2, 4).transpose(0, 1)
self.assertFalse(strided.is_contiguous(), "this test needs a non-contiguous tensor")
expected = torch.tensor([4, 8, 1, 5, 2, 6, 3, 7]).view(4, 2)
rolled = strided.roll(1, 0)
self.assertEqual(expected, rolled,
msg="non contiguous tensor rolled to {} instead of {} ".format(rolled, expected))
# test roll with no dimension specified
expected = numbers.roll(1, 0).view(2, 4)
self.assertEqual(expected, data.roll(1), msg="roll with no dims should flatten and roll.")
self.assertEqual(expected, data.roll(1, dims=None), msg="roll with no dims should flatten and roll.")
# test roll over multiple dimensions
expected = torch.tensor([[7, 8, 5, 6], [3, 4, 1, 2]], device=device)
double_rolled = data.roll(shifts=(2, -1), dims=(1, 0))
self.assertEqual(double_rolled, expected,
msg="should be able to roll over two dimensions, got {}".format(double_rolled))
self.assertRaisesRegex(RuntimeError, "required", lambda: data.roll(shifts=(), dims=()))
self.assertRaisesRegex(RuntimeError, "required", lambda: data.roll(shifts=(), dims=1))
# shifts/dims should align
self.assertRaisesRegex(RuntimeError, "align", lambda: data.roll(shifts=(1, 2), dims=(1,)))
self.assertRaisesRegex(RuntimeError, "align", lambda: data.roll(shifts=(1,), dims=(1, 2)))
# test bool tensor
t = torch.zeros(6, dtype=torch.bool, device=device)
t[0] = True
t[3] = True
self.assertEqual(torch.tensor([False, True, False, False, True, False]), t.roll(1, 0))
# test complex tensor
t = torch.tensor([1, 2 + 1j, 3.5, 4. + 2j, 5j, 6.], device=device)
t[0] = 1 + 0.5j
t[3] = 4.
expected = torch.tensor([6., 1 + 0.5j, 2 + 1j, 3.5, 4., 5j], device=device)
self.assertEqual(expected, t.roll(1, 0))
@slowTest
def test_triu_tril(self, device):
def gen_mask(shape, diagonal, device, upper):
mask = torch.zeros(*shape[-2:]).byte()
for i in range(shape[-2]):
for j in range(shape[-1]):
cond = j - i < diagonal if upper else j - i > diagonal
if cond:
mask[i, j] = 1
return mask.expand(*shape).to(device)
torch_functions = {True: torch.triu, False: torch.tril}
numpy_functions = {True: np.triu, False: np.tril}
# TODO: remove this when bool and half are supported for torch.where
def bool_half_compat_where(pred, true_tensor, false_tensor, dtype):
if dtype == torch.bool or dtype == torch.half:
return torch.where(pred.byte(), true_tensor.byte(), false_tensor.byte()).to(dtype=dtype)
else:
return torch.where(pred, true_tensor, false_tensor)
def run_test(shape, device, diagonal, dtype):
x = torch.empty(*shape, device=device, dtype=dtype).fill_(2)
for upper in [True, False]:
# normal test with mask
torch_tri_func = torch_functions[upper]
res1 = torch_tri_func(x, diagonal=diagonal)
res2 = torch.empty(0, device=device, dtype=dtype)
torch_tri_func(x, diagonal=diagonal, out=res2)
exp_mask = gen_mask(shape, diagonal, device, upper)
expected = bool_half_compat_where(exp_mask, torch.tensor(0).type_as(x), x, dtype)
self.assertEqual(res1, res2, atol=0, rtol=0)
self.assertEqual(expected, res1, atol=0, rtol=0)
# non-contiguous and expanded tensors test
if 0 not in shape:
for s in range(-len(shape), -1):
# non-contiguous tensors
x_nc = x.clone().transpose(s, s + 1)
exp_mask = gen_mask(x_nc.size(), diagonal, device, upper)
if 1 not in shape:
assert not x_nc.is_contiguous(), "x is intentionally non-contiguous"
exp_nc = bool_half_compat_where(exp_mask, torch.tensor(0).type_as(x), x_nc, dtype)
self.assertEqual(torch_tri_func(x_nc, diagonal), exp_nc, atol=0, rtol=0)
x_nc_is_contiguous = x_nc.is_contiguous()
if upper:
self.assertEqual(x_nc.triu_(diagonal), exp_nc, atol=0, rtol=0)
else:
self.assertEqual(x_nc.tril_(diagonal), exp_nc, atol=0, rtol=0)
self.assertTrue(x_nc.is_contiguous() == x_nc_is_contiguous,
"contiguity of x_nc should not be changed")
# expanded tensors
expanded_size = (x.size(0),) + x.size()
x_expanded = x.clone().expand(*expanded_size)
if x.size(0) != 1:
assert 0 in x_expanded.stride(), "x intentionally has 0 in its stride"
output = torch_tri_func(x_expanded, diagonal)
self.assertEqual(output, expected.expand(expanded_size), atol=0, rtol=0)
if x.size(0) != 1:
self.assertTrue(0 in x_expanded.stride(),
"geometry of x_expanded should be the same")
if upper:
self.assertEqual(output, x_expanded.triu_(diagonal), atol=0, rtol=0)
else:
self.assertEqual(output, x_expanded.tril_(diagonal), atol=0, rtol=0)
# numpy test
numpy_tri_func = numpy_functions[upper]
self.assertEqual(numpy_tri_func(x.to('cpu').numpy(), diagonal), res1.cpu().numpy())
diagonals = [-2, -1, 0, 1, 2]
shapes = [(3, 3), (5, 3, 3), (7, 5, 3, 3), # square matrices
(7, 3), (5, 7, 3), (7, 5, 7, 3), # fat matrices
(3, 7), (5, 3, 7), (7, 5, 3, 7), # thin matrices
(3, 0), (0, 3, 3), (3, 3, 0, 0), # no numel matrices
(3, 1), (5, 3, 1), (7, 5, 3, 1), # very fat matrices
(1, 3), (5, 1, 3), (7, 5, 1, 3), # very thin matrices
(1, 3, 3, 3), (3, 1, 3, 3, 3)] # unsqueezed batch dimensions
dtypes = all_types_and_complex_and(torch.half, torch.bool)
for s, d, dtype in product(shapes, diagonals, dtypes):
run_test(s, device, d, dtype)
@onlyCPU
def test_triu_tril_bfloat16(self, device):
op_funcs = [torch.tril, torch.triu]
for op_fun in op_funcs:
input = torch.randn(3, 3, dtype=torch.float32, device=device).bfloat16().requires_grad_(True)
input2 = input.detach().clone().float().requires_grad_(True)
out = op_fun(input)
out.sum().backward()
out2 = op_fun(input2)
out2.sum().backward()
self.assertEqual(out.dtype, torch.bfloat16)
self.assertEqual(input.grad.dtype, torch.bfloat16)
self.assertEqual(out, out2.bfloat16())
self.assertEqual(input.grad, input2.grad.bfloat16(), atol=0.01, rtol=0)
def test_diagflat(self, device):
dtype = torch.float32
# Basic sanity test
x = torch.randn((100,), dtype=dtype, device=device)
result = torch.diagflat(x)
expected = torch.diag(x)
self.assertEqual(result, expected)
# Test offset
x = torch.randn((100,), dtype=dtype, device=device)
result = torch.diagflat(x, 17)
expected = torch.diag(x, 17)
self.assertEqual(result, expected)
# Test where input has more than one dimension
x = torch.randn((2, 3, 4), dtype=dtype, device=device)
result = torch.diagflat(x)
expected = torch.diag(x.contiguous().view(-1))
self.assertEqual(result, expected)
# Noncontig input
x = torch.randn((2, 3, 4), dtype=dtype, device=device).transpose(2, 0)
self.assertFalse(x.is_contiguous())
result = torch.diagflat(x)
expected = torch.diag(x.contiguous().view(-1))
self.assertEqual(result, expected)
# Complex number support
result = torch.diagflat(torch.ones(4, dtype=torch.complex128))
expected = torch.eye(4, dtype=torch.complex128)
self.assertEqual(result, expected)
def test_block_diag(self, device):
def block_diag_workaround(*arrs):
arrs_expanded = []
for a in arrs:
if a.dim() == 2:
arrs_expanded.append(a)
elif a.dim() == 1:
arrs_expanded.append(a.expand(1, a.size(0)))
elif a.dim() == 0:
arrs_expanded.append(a.expand(1, 1))
shapes = torch.tensor([a.shape for a in arrs_expanded], device=device)
out = torch.zeros(
torch.sum(shapes, dim=0).tolist(),
dtype=arrs_expanded[0].dtype,
device=device
)
r, c = 0, 0
for i, (rr, cc) in enumerate(shapes):
out[r:r + rr, c:c + cc] = arrs_expanded[i]
r += rr
c += cc
return out
tensors = [
torch.rand((2, 2), device=device),
torch.rand((2, 3), device=device),
torch.rand(10, device=device),
torch.rand((8, 1), device=device),
torch.rand(1, device=device)[0]
]
result = torch.block_diag(*tensors)
result_check = block_diag_workaround(*tensors)
self.assertEqual(result, result_check)
tensor = torch.rand(1, device=device)[0]
result = torch.block_diag(tensor)
result_check = tensor.expand(1, 1)
self.assertEqual(result, result_check)
tensor = torch.rand(10, device=device)
result = torch.block_diag(tensor)
result_check = tensor.expand(1, tensor.size(0))
self.assertEqual(result, result_check)
result = torch.block_diag()
result_check = torch.empty(1, 0, device=device)
self.assertEqual(result, result_check)
self.assertEqual(result.device.type, 'cpu')
test_dtypes = [
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.float32,
torch.float64,
torch.complex64,
torch.complex128
]
# Test pairs of different dtypes
for dtype1 in test_dtypes:
for dtype2 in test_dtypes:
a = torch.tensor(1, device=device, dtype=dtype1)
b = torch.tensor(2, device=device, dtype=dtype2)
result = torch.block_diag(a, b)
result_dtype = torch.result_type(a, b)
result_check = torch.tensor([[1, 0], [0, 2]], device=device, dtype=result_dtype)
self.assertEqual(result, result_check)
with self.assertRaisesRegex(
RuntimeError,
"torch.block_diag: Input tensors must have 2 or fewer dimensions. Input 1 has 3 dimensions"
):
torch.block_diag(torch.tensor(5), torch.tensor([[[6]]]))
with self.assertRaisesRegex(
RuntimeError,
"torch.block_diag: Input tensors must have 2 or fewer dimensions. Input 0 has 4 dimensions"
):
torch.block_diag(torch.tensor([[[[6]]]]))
if device != 'cpu':
with self.assertRaisesRegex(
RuntimeError,
(
"torch.block_diag: input tensors must all be on the same device."
" Input 0 is on device cpu and input 1 is on device "
)
):
torch.block_diag(torch.ones(2, 2).cpu(), torch.ones(2, 2, device=device))
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
def test_block_diag_scipy(self, device):
import scipy.linalg
scipy_tensors_list = [
[
1,
[2],
[],
[3, 4, 5],
[[], []],
[[6], [7.3]]
],
[
[[1, 2], [3, 4]],
[1]
],
[
[[4, 9], [7, 10]],
[4.6, 9.12],
[1j + 3]
],
[]
]
expected_torch_types = [
torch.float32,
torch.int64,
torch.complex64,
torch.float32
]
expected_scipy_types = [
torch.float64,
# windows scipy block_diag returns int32 types
torch.int32 if IS_WINDOWS else torch.int64,
torch.complex128,
torch.float64
]
for scipy_tensors, torch_type, scipy_type in zip(scipy_tensors_list, expected_torch_types, expected_scipy_types):
torch_tensors = [torch.tensor(t, device=device) for t in scipy_tensors]
torch_result = torch.block_diag(*torch_tensors)
self.assertEqual(torch_result.dtype, torch_type)
scipy_result = torch.tensor(
scipy.linalg.block_diag(*scipy_tensors),
device=device
)
self.assertEqual(scipy_result.dtype, scipy_type)
scipy_result = scipy_result.to(torch_type)
self.assertEqual(torch_result, scipy_result)
@onlyNativeDeviceTypes
@dtypes(torch.half, torch.float32, torch.float64)
def test_torch_complex(self, device, dtype):
real = torch.tensor([1, 2], device=device, dtype=dtype)
imag = torch.tensor([3, 4], device=device, dtype=dtype)
z = torch.complex(real, imag)
complex_dtype = float_to_corresponding_complex_type_map[dtype]
self.assertEqual(torch.tensor([1.0 + 3.0j, 2.0 + 4.0j], dtype=complex_dtype), z)
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
def test_torch_polar(self, device, dtype):
abs = torch.tensor([1, 2, -3, -4.5, 1, 1], device=device, dtype=dtype)
angle = torch.tensor([math.pi / 2, 5 * math.pi / 4, 0, -11 * math.pi / 6, math.pi, -math.pi],
device=device, dtype=dtype)
z = torch.polar(abs, angle)
complex_dtype = torch.complex64 if dtype == torch.float32 else torch.complex128
self.assertEqual(torch.tensor([1j, -1.41421356237 - 1.41421356237j, -3,
-3.89711431703 - 2.25j, -1, -1],
dtype=complex_dtype),
z, atol=1e-5, rtol=1e-5)
@onlyNativeDeviceTypes
@dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64,
torch.complex64, torch.complex128, torch.bool)
def test_torch_complex_floating_dtype_error(self, device, dtype):
for op in (torch.complex, torch.polar):
a = torch.tensor([1, 2], device=device, dtype=dtype)
b = torch.tensor([3, 4], device=device, dtype=dtype)
error = r"Expected both inputs to be Half, Float or Double tensors but " \
r"got [A-Za-z]+ and [A-Za-z]+"
with self.assertRaisesRegex(RuntimeError, error):
op(a, b)
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
def test_torch_complex_same_dtype_error(self, device, dtype):
def dtype_name(dtype):
return 'Float' if dtype == torch.float32 else 'Double'
for op in (torch.complex, torch.polar):
other_dtype = torch.float64 if dtype == torch.float32 else torch.float32
a = torch.tensor([1, 2], device=device, dtype=dtype)
b = torch.tensor([3, 4], device=device, dtype=other_dtype)
error = "Expected object of scalar type {} but got scalar type " \
"{} for second argument".format(dtype_name(dtype),
dtype_name(other_dtype))
with self.assertRaisesRegex(RuntimeError, error):
op(a, b)
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
def test_torch_complex_out_dtype_error(self, device, dtype):
def dtype_name(dtype):
return 'Float' if dtype == torch.float32 else 'Double'
def complex_dtype_name(dtype):
return 'ComplexFloat' if dtype == torch.complex64 else 'ComplexDouble'
for op in (torch.complex, torch.polar):
a = torch.tensor([1, 2], device=device, dtype=dtype)
b = torch.tensor([3, 4], device=device, dtype=dtype)
out = torch.zeros(2, device=device, dtype=dtype)
expected_dtype = torch.complex64 if dtype == torch.float32 else torch.complex128
error = "Expected object of scalar type {} but got scalar type " \
"{} for argument 'out'".format(
complex_dtype_name(expected_dtype), dtype_name(dtype))
with self.assertRaisesRegex(RuntimeError, error):
op(a, b, out=out)
def test_cat_empty_legacy(self, device):
# FIXME: this is legacy behavior and should be removed
# when we support empty tensors with arbitrary sizes
dtype = torch.float32
x = torch.randn((4, 3, 32, 32), dtype=dtype, device=device)
empty = torch.randn((0,), dtype=dtype, device=device)
res1 = torch.cat([x, empty], dim=1)
res2 = torch.cat([empty, x], dim=1)
self.assertEqual(res1, res2)
res1 = torch.cat([empty, empty], dim=1)
self.assertEqual(res1, empty)
with self.assertRaisesRegex(RuntimeError,
'non-empty list of Tensors'):
torch.cat([], dim=1)
def test_cat_empty(self, device):
dtype = torch.float32
x = torch.randn((4, 3, 32, 32), dtype=dtype, device=device)
empty = torch.randn((4, 0, 32, 32), dtype=dtype, device=device)
res1 = torch.cat([x, empty], dim=1)
res2 = torch.cat([empty, x], dim=1)
self.assertEqual(res1, res2)
res1 = torch.cat([empty, empty], dim=1)
self.assertEqual(res1, empty)
# check non-legacy-behavior (sizes don't match)
empty = torch.randn((4, 0, 31, 32), dtype=dtype, device=device)
self.assertRaises(RuntimeError, lambda: torch.cat([x, empty], dim=1))
self.assertRaises(RuntimeError, lambda: torch.cat([empty, x], dim=1))
# check non-legacy-behavior (dimensions don't match)
empty = torch.randn((4, 0), dtype=dtype, device=device)
self.assertRaises(RuntimeError, lambda: torch.cat([x, empty], dim=1))
self.assertRaises(RuntimeError, lambda: torch.cat([empty, x], dim=1))
def test_cat_out(self, device):
x = torch.zeros((0), device=device)
y = torch.randn((4, 6), device=device)
with self.assertRaisesRegex(
RuntimeError,
r"unsupported operation: some elements of the input tensor and "
r"the written-to tensor refer to a single memory location."):
torch.cat([x, y], dim=0, out=x)
with self.assertRaisesRegex(
RuntimeError,
r"unsupported operation: some elements of the input tensor and "
r"the written-to tensor refer to a single memory location."):
torch.cat([x, y], dim=0, out=y)
z = torch.zeros((4, 6), device=device)
with self.assertRaisesRegex(
RuntimeError,
r"unsupported operation: some elements of the input tensor and "
r"the written-to tensor refer to a single memory location."):
torch.cat([y, z], out=z[:2, :])
w = y.view(-1).clone()
a = torch.cat([w[:2], w[4:6]])
b = torch.cat([w[:2], w[4:6]], out=w[6:10])
self.assertEqual(a, b)
self.assertEqual(w[:6], y.view(-1)[:6])
# Case:
# Reference: https://github.com/pytorch/pytorch/issues/49878
for dim in [0, 1]:
x = torch.zeros((10, 5, 2), device=device)
random_length = random.randint(1, 4)
y = x.narrow(dim, 0, x.shape[dim] - random_length)
val = torch.full_like(y[0], 3., device=device)
if dim == 0:
self.assertTrue(y.is_contiguous())
else:
self.assertFalse(y.is_contiguous())
torch.cat((val[None],) * y.shape[0], dim=0, out=y)
expected_y = torch.cat((val[None],) * y.shape[0], dim=0)
expected_x = torch.zeros((10, 5, 2), device=device)
if dim == 0:
expected_x[:x.shape[dim] - random_length, :, :] = expected_y
elif dim == 1:
expected_x[:, :x.shape[dim] - random_length, :] = expected_y
self.assertEqual(y, expected_y)
self.assertEqual(x, expected_x)
def test_cat_out_channels_last(self, device):
x = torch.randn((4, 3, 8, 8))
y = torch.randn(x.shape)
res1 = torch.cat((x, y))
z = res1.clone().contiguous(memory_format=torch.channels_last)
res2 = torch.cat((x, y), out=z)
self.assertEqual(res1, res2)
@onlyNativeDeviceTypes
def test_cat_in_channels_last(self, device):
for dim in range(4):
x = torch.randn((4, 15, 8, 8), device=device)
y = torch.randn(x.shape, device=device)
res1 = torch.cat((x, y), dim=dim)
x = x.clone().contiguous(memory_format=torch.channels_last)
y = y.clone().contiguous(memory_format=torch.channels_last)
res2 = torch.cat((x, y), dim=dim)
self.assertTrue(res2.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(res1, res2)
# Size larger than grain size.
x = torch.randn((4, 15, 256, 256), device=device)
y = torch.randn(x.shape, device=device)
res1 = torch.cat((x, y), dim=dim)
x = x.clone().contiguous(memory_format=torch.channels_last)
y = y.clone().contiguous(memory_format=torch.channels_last)
res2 = torch.cat((x, y), dim=dim)
self.assertTrue(res2.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(res1, res2)
@onlyNativeDeviceTypes
def test_cat_preserve_channels_last(self, device):
x = torch.randn((4, 3, 8, 8), device=device)
y = torch.randn(x.shape, device=device)
res1 = torch.cat((x, y))
res2 = torch.cat((x.contiguous(memory_format=torch.channels_last), y.contiguous(memory_format=torch.channels_last)))
self.assertEqual(res1, res2)
self.assertTrue(res2.is_contiguous(memory_format=torch.channels_last))
# discontiguous channels-last inputs
x = torch.arange(24, dtype=torch.float, device=device).reshape(2, 2, 3, 2).to(memory_format=torch.channels_last)
x1 = x[:, :, :2]
x2 = x[:, :, 1:]
res1 = torch.cat((x1, x2), dim=-1)
res2 = torch.cat((x1.contiguous(), x2.contiguous()), dim=-1)
self.assertEqual(res1, res2)
self.assertTrue(res1.is_contiguous(memory_format=torch.channels_last))
@onlyCUDA
def test_cat_out_memory_format(self, device):
inp_size = (4, 4, 4, 4)
expected_size = (8, 4, 4, 4)
a_cuda = torch.randn(inp_size, device=device).contiguous(memory_format=torch.channels_last)
a_cpu = torch.randn(inp_size, device='cpu').contiguous(memory_format=torch.channels_last)
b_cuda = torch.randn(inp_size, device=device).contiguous(memory_format=torch.contiguous_format)
b_cpu = torch.randn(inp_size, device='cpu').contiguous(memory_format=torch.contiguous_format)
c_cuda = torch.randn(inp_size, device=device).contiguous(memory_format=torch.channels_last)
# Case 1: if out= is the correct shape then the memory format of out= is respected
out_cuda = torch.empty(expected_size, device=device).contiguous(memory_format=torch.contiguous_format)
res1_cuda = torch.cat((a_cuda, b_cuda), out=out_cuda)
out_cpu = torch.empty(expected_size, device='cpu').contiguous(memory_format=torch.contiguous_format)
res1_cpu = torch.cat((a_cpu, b_cpu), out=out_cpu)
self.assertTrue(res1_cuda.is_contiguous(memory_format=torch.contiguous_format))
self.assertTrue(res1_cpu.is_contiguous(memory_format=torch.contiguous_format))
# Case 2: if out= is not the correct shape then the output it is resized internally
# - For both CPU and CUDA variants, it only propagates memory format if all the tensors have
# the same memory format, otherwise it just uses contiguous_format as a default
out_cuda = torch.empty((0), device=device).contiguous(memory_format=torch.contiguous_format)
# a_cuda and b_cuda have different memory_format
res2_cuda = torch.cat((a_cuda, b_cuda), out=out_cuda)
out_cpu = torch.empty((0), device='cpu').contiguous(memory_format=torch.contiguous_format)
res2_cpu = torch.cat((a_cpu, b_cpu), out=out_cpu)
self.assertTrue(res2_cuda.is_contiguous(memory_format=torch.contiguous_format))
self.assertTrue(res2_cpu.is_contiguous(memory_format=torch.contiguous_format))
out_cuda = torch.empty((0), device=device).contiguous(memory_format=torch.contiguous_format)
# a_cuda and c_cuda have same memory_format
res3_cuda = torch.cat((a_cuda, c_cuda), out=out_cuda)
self.assertTrue(res3_cuda.is_contiguous(memory_format=torch.channels_last))
@onlyCUDA
@deviceCountAtLeast(2)
def test_cat_different_devices(self, devices):
cuda0 = torch.randn((3, 3), device=devices[0])
cuda1 = torch.randn((3, 3), device=devices[1])
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch.cat((cuda0, cuda1))
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch.cat((cuda0, cuda0), out=cuda1)
@onlyCUDA
def test_cat_stack_cross_devices(self, device):
cuda = torch.randn((3, 3), device=device)
cpu = torch.randn((3, 3), device='cpu')
# cat
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch.cat((cuda, cpu))
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch.cat((cpu, cuda))
# Stack
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch.stack((cuda, cpu))
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch.stack((cpu, cuda))
# TODO: reconcile with other cat tests
# TODO: Compare with a NumPy reference instead of CPU
@onlyCUDA
def test_cat(self, device):
SIZE = 10
for dim in range(-3, 3):
pos_dim = dim if dim >= 0 else 3 + dim
x = torch.rand(13, SIZE, SIZE, device=device).transpose(0, pos_dim)
y = torch.rand(17, SIZE, SIZE, device=device).transpose(0, pos_dim)
z = torch.rand(19, SIZE, SIZE, device=device).transpose(0, pos_dim)
res1 = torch.cat((x, y, z), dim)
self.assertEqual(res1.narrow(pos_dim, 0, 13), x, atol=0, rtol=0)
self.assertEqual(res1.narrow(pos_dim, 13, 17), y, atol=0, rtol=0)
self.assertEqual(res1.narrow(pos_dim, 30, 19), z, atol=0, rtol=0)
x = torch.randn(20, SIZE, SIZE, device=device)
self.assertEqual(torch.cat(torch.split(x, 7)), x)
self.assertEqual(torch.cat(torch.chunk(x, 7)), x)
y = torch.randn(1, SIZE, SIZE, device=device)
z = torch.cat([x, y])
self.assertEqual(z.size(), (21, SIZE, SIZE))
# TODO: update this test to compare against NumPy instead of CPU
@onlyCUDA
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_device_rounding(self, device, dtype):
# test half-to-even
a = [-5.8, -3.5, -2.3, -1.5, -0.5, 0.5, 1.5, 2.3, 3.5, 5.8]
res = [-6., -4., -2., -2., 0., 0., 2., 2., 4., 6.]
a_tensor = torch.tensor(a, device=device).round()
res_tensor = torch.tensor(res, device='cpu')
self.assertEqual(a_tensor, res_tensor)
# Note: This test failed on XLA since its test cases are created by empty_strided which
# doesn't support overlapping sizes/strides in XLA impl
@skipIfTorchDynamo("TorchDynamo fails on this test for unknown reasons")
@onlyNativeDeviceTypes
def test_like_fn_stride_proparation_vs_tensoriterator_unary_op(self, device):
# Test like functions against tensoriterator based unary operator (exp) to
# make sure the returned tensor from like function follows the same stride propergation
# rule as what tensoriterator does for unary operator. The like function's output strides
# is computed on CPU side always, no need to test GPU here.
def compare_helper_(like_fn, t):
te = torch.exp(t)
tl = like_fn(t)
self.assertEqual(te.stride(), tl.stride())
self.assertEqual(te.size(), tl.size())
like_fns = [
lambda t, **kwargs: torch.zeros_like(t, **kwargs),
lambda t, **kwargs: torch.ones_like(t, **kwargs),
lambda t, **kwargs: torch.randint_like(t, 10, 100, **kwargs),
lambda t, **kwargs: torch.randint_like(t, 100, **kwargs),
lambda t, **kwargs: torch.randn_like(t, **kwargs),
lambda t, **kwargs: torch.rand_like(t, **kwargs),
lambda t, **kwargs: torch.full_like(t, 7, **kwargs),
lambda t, **kwargs: torch.empty_like(t, **kwargs)]
# dense non-overlapping tensor,
# non-dense non-overlapping sliced tensor
# non-dense non-overlapping gapped tensor
# non-dense non-overlapping 0 strided tensor
# non-dense overlapping general tensor
# non-dense overlapping sliced tensor
# non-dense overlapping gapped tensor
# non-dense overlapping 0 strided tensor
# non-dense overlapping equal strides
tset = (
torch.randn(4, 3, 2, device=device),
torch.randn(4, 3, 2, device=device)[:, :, ::2],
torch.empty_strided((4, 3, 2), (10, 3, 1), device=device).fill_(1.0),
torch.empty_strided((4, 3, 2), (10, 0, 3), device=device).fill_(1.0),
torch.empty_strided((4, 3, 2), (10, 1, 2), device=device).fill_(1.0),
torch.empty_strided((4, 3, 2), (4, 2, 1), device=device)[:, :, ::2].fill_(1.0),
torch.empty_strided((4, 3, 2), (10, 1, 1), device=device).fill_(1.0),
torch.empty_strided((4, 1, 1, 2), (10, 0, 0, 2), device=device).fill_(1.0),
torch.empty_strided((4, 2, 3), (10, 3, 3), device=device).fill_(1.0))
for like_fn in like_fns:
for t in tset:
for p in permutations(range(t.dim())):
tp = t.permute(p)
compare_helper_(like_fn, tp)
def _hvd_split_helper(self, torch_fn, np_fn, op_name, inputs, device, dtype, dim):
dimension_error_message = op_name + " requires a tensor with at least "
divisibiliy_error_message = op_name + " attempted to split along dimension "
for shape, arg in inputs:
direction = dim - (len(shape) == 1 and dim == 1)
bound = dim + 2 * (dim == 0) + (dim == 2)
error_expected = len(shape) < bound or (not isinstance(arg, list) and shape[direction] % arg != 0)
t = make_tensor(shape, dtype=dtype, device=device)
t_np = t.cpu().numpy()
if not error_expected:
self.assertEqual(torch_fn(t, arg), np_fn(t_np, arg))
else:
self.assertRaises(RuntimeError, lambda: torch_fn(t, arg))
self.assertRaises(ValueError, lambda: np_fn(t, arg))
expected_error_message = dimension_error_message if len(shape) < bound else divisibiliy_error_message
self.assertRaisesRegex(RuntimeError, expected_error_message, lambda: torch_fn(t, arg))
@onlyNativeDeviceTypes
@dtypes(torch.long, torch.float32, torch.complex64)
def test_hsplit(self, device, dtype):
inputs = (
((), 3),
((), [2, 4, 6]),
((6,), 2),
((6,), 4),
((6,), [2, 5]),
((6,), [7, 9]),
((3, 8), 4),
((3, 8), 5),
((3, 8), [1, 5]),
((3, 8), [3, 8]),
((5, 5, 5), 2),
((5, 5, 5), [1, 4]),
((5, 0, 5), 3),
((5, 5, 0), [2, 6]),
)
self._hvd_split_helper(torch.hsplit, np.hsplit, "torch.hsplit", inputs, device, dtype, 1)
@onlyNativeDeviceTypes
@dtypes(torch.long, torch.float32, torch.complex64)
def test_vsplit(self, device, dtype):
inputs = (
((6,), 2),
((6,), 4),
((6, 5), 2),
((6, 5), 4),
((6, 5), [1, 2, 3]),
((6, 5), [1, 5, 9]),
((6, 5, 5), 2),
((6, 0, 5), 2),
((5, 0, 5), [1, 5]),
)
self._hvd_split_helper(torch.vsplit, np.vsplit, "torch.vsplit", inputs, device, dtype, 0)
@onlyNativeDeviceTypes
@dtypes(torch.long, torch.float32, torch.complex64)
def test_dsplit(self, device, dtype):
inputs = (
((6,), 4),
((6, 6), 3),
((5, 5, 6), 2),
((5, 5, 6), 4),
((5, 5, 6), [1, 2, 3]),
((5, 5, 6), [1, 5, 9]),
((5, 5, 0), 2),
((5, 0, 6), 4),
((5, 0, 6), [1, 2, 3]),
((5, 5, 6), [1, 5, 9]),
)
self._hvd_split_helper(torch.dsplit, np.dsplit, "torch.dsplit", inputs, device, dtype, 2)
def _test_special_stacks(self, dim, at_least_dim, torch_fn, np_fn, device, dtype):
# Test error for non-tuple argument
t = torch.randn(10)
with self.assertRaisesRegex(TypeError, "must be tuple of Tensors, not Tensor"):
torch_fn(t)
# Test error for a single array
with self.assertRaisesRegex(TypeError, "must be tuple of Tensors, not Tensor"):
torch_fn((t))
# Test 0-D
num_tensors = random.randint(1, 5)
input_t = [torch.tensor(random.uniform(0, 10), device=device, dtype=dtype) for i in range(num_tensors)]
actual = torch_fn(input_t)
expected = np_fn([input.cpu().numpy() for input in input_t])
self.assertEqual(actual, expected)
for ndims in range(1, 5):
base_shape = list(_rand_shape(ndims, min_size=1, max_size=5))
for i in range(ndims):
shape = list(base_shape)
num_tensors = random.randint(1, 5)
torch_input = []
# Create tensors with shape being different along one axis only
for param in range(num_tensors):
shape[i] = random.randint(1, 5)
torch_input.append(_generate_input(tuple(shape), dtype, device, with_extremal=False))
# Determine if input tensors have valid dimensions.
valid_dim = True
for k in range(len(torch_input) - 1):
for tdim in range(ndims):
# Test whether all tensors have the same shape except in concatenating dimension
# Unless the number of dimensions is less than the corresponding at_least function dimension
# Since the original concatenating dimension would shift after applying at_least and would no
# longer be the concatenating dimension
if (ndims < at_least_dim or tdim != dim) and torch_input[k].size()[tdim] != torch_input[k + 1].size()[tdim]:
valid_dim = False
# Special case for hstack is needed since hstack works differently when ndims is 1
if valid_dim or (torch_fn is torch.hstack and ndims == 1):
# Valid dimensions, test against numpy
np_input = [input.cpu().numpy() for input in torch_input]
actual = torch_fn(torch_input)
expected = np_fn(np_input)
self.assertEqual(actual, expected)
else:
# Invalid dimensions, test for error
with self.assertRaisesRegex(RuntimeError, "Sizes of tensors must match except in dimension"):
torch_fn(torch_input)
with self.assertRaises(ValueError):
np_input = [input.cpu().numpy() for input in torch_input]
np_fn(np_input)
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half))
def test_hstack_column_stack(self, device, dtype):
ops = ((torch.hstack, np.hstack), (torch.column_stack, np.column_stack))
for torch_op, np_op in ops:
self._test_special_stacks(1, 1, torch_op, np_op, device, dtype)
# Test torch.column_stack with combinations of 1D and 2D tensors input
one_dim_tensor = torch.arange(0, 10).to(dtype=dtype, device=device)
two_dim_tensor = torch.arange(0, 100).to(dtype=dtype, device=device).reshape(10, 10)
inputs = two_dim_tensor, one_dim_tensor, two_dim_tensor, one_dim_tensor
torch_result = torch.column_stack(inputs)
np_inputs = [input.cpu().numpy() for input in inputs]
np_result = np.column_stack(np_inputs)
self.assertEqual(np_result,
torch_result)
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half))
def test_vstack_row_stack(self, device, dtype):
ops = ((torch.vstack, np.vstack), (torch.row_stack, np.row_stack))
for torch_op, np_op in ops:
self._test_special_stacks(0, 2, torch_op, np_op, device, dtype)
for i in range(5):
# Test dimension change for 1D tensor of size (N) and 2D tensor of size (1, N)
n = random.randint(1, 10)
input_a = _generate_input((n,), dtype, device, with_extremal=False)
input_b = _generate_input((1, n), dtype, device, with_extremal=False)
torch_input = [input_a, input_b]
np_input = [input.cpu().numpy() for input in torch_input]
actual = torch_op(torch_input)
expected = np_op(np_input)
self.assertEqual(actual, expected)
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half))
def test_dstack(self, device, dtype):
self._test_special_stacks(2, 3, torch.dstack, np.dstack, device, dtype)
for i in range(5):
# Test dimension change for 1D tensor of size (N), 2D tensor of size (1, N), and 3D tensor of size (1, N, 1)
n = random.randint(1, 10)
input_a = _generate_input((n,), dtype, device, with_extremal=False)
input_b = _generate_input((1, n), dtype, device, with_extremal=False)
input_c = _generate_input((1, n, 1), dtype, device, with_extremal=False)
torch_input = [input_a, input_b, input_c]
np_input = [input.cpu().numpy() for input in torch_input]
actual = torch.dstack(torch_input)
expected = np.dstack(np_input)
self.assertEqual(actual, expected)
# Test dimension change for 2D tensor of size (M, N) and 3D tensor of size (M, N, 1)
m = random.randint(1, 10)
n = random.randint(1, 10)
input_a = _generate_input((m, n), dtype, device, with_extremal=False)
input_b = _generate_input((m, n, 1), dtype, device, with_extremal=False)
torch_input = [input_a, input_b]
np_input = [input.cpu().numpy() for input in torch_input]
actual = torch.dstack(torch_input)
expected = np.dstack(np_input)
self.assertEqual(actual, expected)
@dtypes(torch.int32, torch.int64)
def test_large_linspace(self, device, dtype):
start = torch.iinfo(dtype).min
end = torch.iinfo(dtype).max & ~0xfff
steps = 15
x = torch.linspace(start, end, steps, dtype=dtype, device=device)
self.assertGreater(x[1] - x[0], (end - start) / steps)
@dtypes(torch.float32, torch.float64)
def test_unpack_double(self, device, dtype):
# Reference: https://github.com/pytorch/pytorch/issues/33111
vals = (2 ** 24 + 1, 2 ** 53 + 1,
np.iinfo(np.int64).max, np.iinfo(np.uint64).max, np.iinfo(np.uint64).max + 1,
-1e500, 1e500)
for val in vals:
t = torch.tensor(val, dtype=dtype, device=device)
a = np.array(val, dtype=torch_to_numpy_dtype_dict[dtype])
self.assertEqual(t, torch.from_numpy(a))
def _float_to_int_conversion_helper(self, vals, device, dtype):
a = np.array(vals, dtype=np.float32).astype(torch_to_numpy_dtype_dict[dtype])
t = torch.tensor(vals, device=device, dtype=torch.float).to(dtype)
self.assertEqual(torch.from_numpy(a), t.cpu())
# Checks that float->integer casts don't produce undefined behavior errors.
# Note: In C++, casting from a floating value to an integral dtype
# is undefined if the floating point value is not within the integral
# dtype's dynamic range. This can (and should) cause undefined behavior
# errors with UBSAN. These casts are deliberate in PyTorch, however, and
# NumPy has the same behavior.
@onlyNativeDeviceTypes
@unittest.skipIf(IS_MACOS, "Test is broken on MacOS, see https://github.com/pytorch/pytorch/issues/38752")
@unittest.skipIf(IS_PPC, "Test is borken on PowerPC, see https://github.com/pytorch/pytorch/issues/39671")
@dtypes(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_float_to_int_conversion_finite(self, device, dtype):
min = torch.finfo(torch.float).min
max = torch.finfo(torch.float).max
# Note: CUDA max float -> integer conversion is divergent on some dtypes
vals = (min, -2, -1.5, -.5, 0, .5, 1.5, 2, max)
if self.device_type == 'cuda':
if torch.version.hip:
# HIP min float -> int64 conversion is divergent
vals = (-2, -1.5, -.5, 0, .5, 1.5, 2)
else:
vals = (min, -2, -1.5, -.5, 0, .5, 1.5, 2)
self._float_to_int_conversion_helper(vals, device, dtype)
# Note: CUDA will fail this test on most dtypes, often dramatically.
@onlyCPU
@dtypes(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_float_to_int_conversion_nonfinite(self, device, dtype):
vals = (float('-inf'), float('inf'), float('nan'))
self._float_to_int_conversion_helper(vals, device, dtype)
# TODO: re-enable this test
@unittest.skipIf(True, "real and imag not implemented for complex")
@onlyNativeDeviceTypes
def test_complex_type_conversions(self, device):
dtypes = [torch.float, torch.complex64, torch.complex128]
for from_type in dtypes:
for to_type in dtypes:
from_tensor = torch.randn(4, dtype=from_type, device=device)
to_tensor = from_tensor.to(to_type)
if from_type.is_complex and not to_type.is_complex:
self.assertEqual(torch.real(from_tensor), to_tensor, exact_dtype=False)
elif not from_type.is_complex and to_type.is_complex:
self.assertEqual(from_tensor, torch.real(to_tensor), exact_dtype=False)
self.assertEqual(torch.zeros_like(torch.imag(to_tensor)), torch.imag(to_tensor), exact_dtype=False)
else:
self.assertEqual(from_tensor, to_tensor, exact_dtype=False)
@slowTest
@onlyCPU
def test_cat_big(self, device):
SIZE1 = 6500
SIZE2 = 4500
concat_list = []
concat_list.append(torch.ones((SIZE1, 1024 * 512), dtype=torch.uint8, device=device))
concat_list.append(torch.ones((SIZE2, 1024 * 512), dtype=torch.uint8, device=device))
result = torch.cat(concat_list)
self.assertEqual(result.size(0), SIZE1 + SIZE2)
@onlyCPU
def test_cat_bad_input_sizes(self, device):
x = torch.randn(2, 1, device=device)
y = torch.randn(2, 1, 1, device=device)
z = torch.randn(2, 1, 1, device=device)
self.assertRaises(RuntimeError, lambda: torch.cat([x, y, z]))
x = torch.randn(2, 1, 2, device=device)
y = torch.randn(2, 1, 1, device=device)
z = torch.randn(2, 2, 1, device=device)
self.assertRaises(RuntimeError, lambda: torch.cat([x, y, z], dim=1))
@onlyCPU
@dtypes(torch.half, torch.double, torch.int)
def test_cat2(self, device, dtype):
SIZE = 10
for dim in range(-3, 3):
pos_dim = dim if dim >= 0 else 3 + dim
x = torch.randint(low=-100, high=100, size=(13, SIZE, SIZE), device=device).to(dtype).transpose(0, pos_dim)
y = torch.randint(low=-100, high=100, size=(17, SIZE, SIZE), device=device).to(dtype).transpose(0, pos_dim)
z = torch.randint(low=-100, high=100, size=(19, SIZE, SIZE), device=device).to(dtype).transpose(0, pos_dim)
res1 = torch.cat((x, y, z), dim)
self.assertEqual(res1.narrow(pos_dim, 0, 13), x, atol=0, rtol=0)
self.assertEqual(res1.narrow(pos_dim, 13, 17), y, atol=0, rtol=0)
self.assertEqual(res1.narrow(pos_dim, 30, 19), z, atol=0, rtol=0)
x = torch.randint(low=-100, high=100, size=(20, SIZE, SIZE), device=device).to(dtype)
self.assertEqual(torch.cat(torch.split(x, 7)), x)
self.assertEqual(torch.cat(torch.chunk(x, 7)), x)
y = torch.randint(low=-100, high=100, size=(1, SIZE, SIZE), device=device).to(dtype)
z = torch.cat([x, y])
self.assertEqual(z.size(), (21, SIZE, SIZE))
self.assertRaises(RuntimeError, lambda: torch.cat([]))
self.assertRaisesRegex(TypeError, 'got None', lambda: torch.cat([x, None]))
@onlyCPU
def test_cat_scalars(self, device):
x = torch.tensor(0, device=device)
y = torch.tensor(1, device=device)
with self.assertRaisesRegex(RuntimeError, 'zero-dimensional.*cannot be concatenated'):
torch.cat([x, y])
def test_zeros_dtype_out_match(self, device):
d = torch.tensor((2, 3), device=device, dtype=torch.double)
self.assertRaises(RuntimeError, lambda: torch.zeros((2, 3), device=device, dtype=torch.float32, out=d))
# FIXME: Create an OpInfo-based tensor creation method test that verifies this for all tensor
# creation methods and verify all dtypes and layouts
@dtypes(torch.bool, torch.uint8, torch.int16, torch.int64, torch.float16, torch.float32, torch.complex64)
def test_zeros_dtype_layout_device_match(self, device, dtype):
layout = torch.strided
t = torch.zeros((2, 3), device=device, dtype=dtype, layout=layout)
self.assertIs(dtype, t.dtype)
self.assertIs(layout, t.layout)
self.assertEqual(torch.device(device), t.device)
# TODO: update to work on CUDA, too
@onlyCPU
def test_trilu_indices(self, device):
for test_args in tri_tests_args:
_compare_trilu_indices(self, *test_args)
run_additional_tri_tests(self, 'cpu')
# test default options
x = torch.ones(
3, 3, dtype=torch.long, device='cpu', layout=torch.strided)
self.assertEqual(
x.tril(0).nonzero().transpose(0, 1), torch.tril_indices(3, 3))
self.assertEqual(
x.triu(0).nonzero().transpose(0, 1), torch.triu_indices(3, 3))
# test stride 0 cases
x = torch.ones(
3, 1, 3, 3, dtype=torch.long, device='cpu', layout=torch.strided)
output = x.triu(2).expand(3, 3, 3, 3)
b = x.clone().expand(3, 3, 3, 3)
self.assertEqual(b.triu(2), output)
self.assertRaises(RuntimeError, lambda: b.triu_(2))
@onlyCPU
def test_triu_tril_indices_bfloat16(self, device):
op_funcs = [torch.tril_indices, torch.triu_indices]
for op_fun in op_funcs:
out = op_fun(4, 3, 1, dtype=torch.bfloat16)
out2 = op_fun(4, 3, 1, dtype=torch.float)
self.assertEqual(out.dtype, torch.bfloat16)
self.assertEqual(out, out2.bfloat16())
# TODO: update to work on CUDA, too
@onlyCPU
def test_stack(self, device):
for dtype in (torch.half, torch.double, torch.int):
x = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype)
y = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype)
z = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype)
for dim in range(4):
res = torch.stack((x, y, z), dim)
res_neg = torch.stack((x, y, z), dim - 4)
expected_size = x.size()[:dim] + (3,) + x.size()[dim:]
self.assertEqual(res, res_neg)
self.assertEqual(res.size(), expected_size)
self.assertEqual(res.select(dim, 0), x, atol=0, rtol=0)
self.assertEqual(res.select(dim, 1), y, atol=0, rtol=0)
self.assertEqual(res.select(dim, 2), z, atol=0, rtol=0)
# TODO: update to work on CUDA, too
@onlyCPU
def test_stack_out(self, device):
for dtype in (torch.half, torch.double, torch.int):
x = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype)
y = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype)
z = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype)
for dim in range(4):
expected_size = x.size()[:dim] + (3,) + x.size()[dim:]
res_out = x.new(expected_size)
res_neg_out = x.new(expected_size)
res_out_dp = res_out.data_ptr()
res_out_neg_dp = res_neg_out.data_ptr()
torch.stack((x, y, z), dim, out=res_out)
torch.stack((x, y, z), dim - 4, out=res_neg_out)
self.assertEqual(res_out, res_neg_out)
self.assertEqual(res_out.size(), expected_size)
self.assertEqual(res_out_dp, res_out.data_ptr())
self.assertEqual(res_out_neg_dp, res_neg_out.data_ptr())
self.assertEqual(res_out.select(dim, 0), x, atol=0, rtol=0)
self.assertEqual(res_out.select(dim, 1), y, atol=0, rtol=0)
self.assertEqual(res_out.select(dim, 2), z, atol=0, rtol=0)
def test_repeat_interleave(self, device):
x = torch.tensor([0, 1, 2, 3], device=device)
expected = torch.tensor([1, 2, 2, 3, 3, 3], device=device)
self.assertEqual(torch.repeat_interleave(x), expected)
with self.assertRaises(RuntimeError):
torch.repeat_interleave(torch.arange(4, device=device).reshape(2, 2))
with self.assertRaises(RuntimeError):
torch.repeat_interleave(torch.arange(4.0, device=device))
with self.assertRaises(RuntimeError):
torch.repeat_interleave(torch.tensor([1, 2, -1, 3, 4], device=device))
y = torch.tensor([[1, 2], [3, 4]], device=device)
y1_v1 = torch.repeat_interleave(y, 2)
y1_v2 = torch.repeat_interleave(y, torch.tensor(2, device=device))
y1_v3 = torch.repeat_interleave(y, torch.tensor([2], device=device))
y1_expect = torch.tensor([1, 1, 2, 2, 3, 3, 4, 4], device=device)
self.assertEqual(y1_v1, y1_expect)
self.assertEqual(y1_v2, y1_expect)
self.assertEqual(y1_v3, y1_expect)
y2 = torch.repeat_interleave(y, 3, dim=1)
y2_expect = torch.tensor([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]], device=device)
self.assertEqual(y2, y2_expect)
y3 = torch.repeat_interleave(y, torch.tensor([1, 2], device=device), dim=0)
y3_expect = torch.tensor([[1, 2],
[3, 4],
[3, 4]], device=device)
self.assertEqual(y3, y3_expect)
with self.assertRaises(RuntimeError):
torch.repeat_interleave(y, torch.tensor([1, 2, 3], device=device), dim=0)
with self.assertRaises(RuntimeError):
torch.repeat_interleave(y, torch.arange(9, device=device).reshape(3, 3), dim=0)
# test zero sized dimension
x = torch.zeros((5, 0), device=device)
y = torch.repeat_interleave(x, repeats=3, dim=1)
self.assertEqual(y, x.new_zeros(5, 0, device=device))
x = torch.tensor([], dtype=torch.int64, device=device)
y = torch.repeat_interleave(x, x)
self.assertEqual(y, x)
# TODO: udpate to work on CUDA, too
@onlyCPU
def test_new_methods_requires_grad(self, device):
size = (10,)
test_cases = [
# method name, args
('new_full', [size, 1]),
('new_empty', [size]),
('new_zeros', [size]),
('new_ones', [size]),
]
for method_name, args in test_cases:
x = torch.randn(size)
for requires_grad in [True, False]:
x_new = x.__getattribute__(method_name)(*args, requires_grad=requires_grad)
self.assertEqual(x_new.requires_grad, requires_grad)
x = torch.randint(10, size)
with self.assertRaisesRegex(
RuntimeError,
r'Only Tensors of floating point and complex dtype can require gradients'):
x_new = x.__getattribute__(method_name)(*args, requires_grad=True)
# TODO: update to work on CUDA, too?
@onlyCPU
def test_tensor_from_sequence(self, device):
class MockSequence(object):
def __init__(self, lst):
self.lst = lst
def __len__(self):
return len(self.lst)
def __getitem__(self, item):
raise TypeError
class GoodMockSequence(MockSequence):
def __getitem__(self, item):
return self.lst[item]
bad_mock_seq = MockSequence([1.0, 2.0, 3.0])
good_mock_seq = GoodMockSequence([1.0, 2.0, 3.0])
with self.assertRaisesRegex(ValueError, 'could not determine the shape'):
torch.tensor(bad_mock_seq)
self.assertEqual(torch.tensor([1.0, 2.0, 3.0]), torch.tensor(good_mock_seq))
# TODO: update to work on CUDA, too?
@onlyCPU
def test_simple_scalar_cast(self, device):
ok = [torch.tensor([1.5]), torch.zeros(1, 1, 1, 1)]
ok_values = [1.5, 0]
not_ok = map(torch.Tensor, [[], [1, 2], [[1, 2], [3, 4]]])
for tensor, value in zip(ok, ok_values):
self.assertEqual(int(tensor), int(value))
self.assertEqual(float(tensor), float(value))
self.assertEqual(complex(tensor), complex(value))
self.assertEqual(complex(torch.tensor(1.5j)), 1.5j)
for tensor in not_ok:
self.assertRaises(ValueError, lambda: int(tensor))
self.assertRaises(ValueError, lambda: float(tensor))
self.assertRaises(ValueError, lambda: complex(tensor))
self.assertRaises(RuntimeError, lambda: float(torch.tensor(1.5j)))
self.assertRaises(RuntimeError, lambda: int(torch.tensor(1.5j)))
# TODO: update to work on CUDA, too?
@onlyCPU
def test_offset_scalar_cast(self, device):
x = torch.tensor([1., 2., 3.])
y = x[2:]
self.assertEqual(int(y), 3)
def test_meshgrid_empty(self):
with self.assertRaisesRegex(RuntimeError,
'expects a non-empty TensorList'):
torch.meshgrid()
def test_meshgrid_unsupported_indexing(self):
with self.assertRaisesRegex(RuntimeError,
'indexing must be one of "xy" or "ij"'):
torch.meshgrid(torch.tensor([1, 2]), indexing='')
def test_meshgrid_non_1d_tensor(self):
with self.assertRaisesRegex(RuntimeError,
'Expected 0D or 1D tensor'):
torch.meshgrid(torch.tensor([[1, 2], [3, 4]]))
def test_meshgrid_inconsistent_dtype(self):
with self.assertRaisesRegex(
RuntimeError, 'expects all tensors to have the same dtype'):
torch.meshgrid(torch.tensor([1], dtype=torch.int),
torch.tensor([2], dtype=torch.float))
def test_meshgrid_inconsistent_device(self):
with self.assertRaisesRegex(
RuntimeError, 'expects all tensors to have the same device'):
torch.meshgrid(torch.tensor([1], device='cpu'),
torch.tensor([2], device='meta'))
def test_meshgrid_warns_if_no_indexing(self):
with self.assertWarnsOnceRegex(
UserWarning, '.*will be required to pass the indexing arg.*'):
torch.meshgrid(torch.tensor([1, 2]))
def test_meshgrid_default_indexing(self, device):
a = torch.tensor(1, device=device)
b = torch.tensor([1, 2, 3], device=device)
c = torch.tensor([1, 2], device=device)
grid_a, grid_b, grid_c = torch.meshgrid([a, b, c])
self.assertEqual(grid_a.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_b.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_c.shape, torch.Size([1, 3, 2]))
grid_a2, grid_b2, grid_c2 = torch.meshgrid(a, b, c)
self.assertEqual(grid_a2.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_b2.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_c2.shape, torch.Size([1, 3, 2]))
expected_grid_a = torch.ones(1, 3, 2, dtype=torch.int64, device=device)
expected_grid_b = torch.tensor([[[1, 1],
[2, 2],
[3, 3]]], device=device)
expected_grid_c = torch.tensor([[[1, 2],
[1, 2],
[1, 2]]], device=device)
self.assertTrue(grid_a.equal(expected_grid_a))
self.assertTrue(grid_b.equal(expected_grid_b))
self.assertTrue(grid_c.equal(expected_grid_c))
self.assertTrue(grid_a2.equal(expected_grid_a))
self.assertTrue(grid_b2.equal(expected_grid_b))
self.assertTrue(grid_c2.equal(expected_grid_c))
def test_meshgrid_xy_indexing(self, device):
a = torch.tensor(1, device=device)
b = torch.tensor([1, 2, 3], device=device)
c = torch.tensor([1, 2], device=device)
grid_a, grid_b, grid_c = torch.meshgrid([a, b, c], indexing='xy')
self.assertEqual(grid_a.shape, torch.Size([3, 1, 2]))
self.assertEqual(grid_b.shape, torch.Size([3, 1, 2]))
self.assertEqual(grid_c.shape, torch.Size([3, 1, 2]))
grid_a2, grid_b2, grid_c2 = torch.meshgrid(a, b, c, indexing='xy')
self.assertEqual(grid_a2.shape, torch.Size([3, 1, 2]))
self.assertEqual(grid_b2.shape, torch.Size([3, 1, 2]))
self.assertEqual(grid_c2.shape, torch.Size([3, 1, 2]))
expected_grid_a = torch.ones(3, 1, 2, dtype=torch.int64, device=device)
expected_grid_b = torch.tensor([[[1, 1]],
[[2, 2]],
[[3, 3]]], device=device)
expected_grid_c = torch.tensor([[[1, 2]],
[[1, 2]],
[[1, 2]]], device=device)
self.assertTrue(grid_a.equal(expected_grid_a))
self.assertTrue(grid_b.equal(expected_grid_b))
self.assertTrue(grid_c.equal(expected_grid_c))
self.assertTrue(grid_a2.equal(expected_grid_a))
self.assertTrue(grid_b2.equal(expected_grid_b))
self.assertTrue(grid_c2.equal(expected_grid_c))
def test_meshgrid_ij_indexing(self, device):
a = torch.tensor(1, device=device)
b = torch.tensor([1, 2, 3], device=device)
c = torch.tensor([1, 2], device=device)
grid_a, grid_b, grid_c = torch.meshgrid([a, b, c], indexing='ij')
self.assertEqual(grid_a.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_b.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_c.shape, torch.Size([1, 3, 2]))
grid_a2, grid_b2, grid_c2 = torch.meshgrid(a, b, c, indexing='ij')
self.assertEqual(grid_a2.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_b2.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_c2.shape, torch.Size([1, 3, 2]))
expected_grid_a = torch.ones(1, 3, 2, dtype=torch.int64, device=device)
expected_grid_b = torch.tensor([[[1, 1],
[2, 2],
[3, 3]]], device=device)
expected_grid_c = torch.tensor([[[1, 2],
[1, 2],
[1, 2]]], device=device)
self.assertTrue(grid_a.equal(expected_grid_a))
self.assertTrue(grid_b.equal(expected_grid_b))
self.assertTrue(grid_c.equal(expected_grid_c))
self.assertTrue(grid_a2.equal(expected_grid_a))
self.assertTrue(grid_b2.equal(expected_grid_b))
self.assertTrue(grid_c2.equal(expected_grid_c))
def test_meshgrid_ij_indexing_is_default(self, device):
a = torch.tensor(1, device=device)
b = torch.tensor([1, 2, 3], device=device)
c = torch.tensor([1, 2], device=device)
grid_a, grid_b, grid_c = torch.meshgrid(a, b, c, indexing='ij')
grid_a2, grid_b2, grid_c2 = torch.meshgrid(a, b, c)
self.assertTrue(grid_a.equal(grid_a2))
self.assertTrue(grid_b.equal(grid_b2))
self.assertTrue(grid_c.equal(grid_c2))
@skipMeta
def test_meshgrid_vs_numpy(self, device):
# Shapes to the random tensors. Each line is a test case, and
# each list within that line is the shape of a single
# tensor. The shapes are restricted to 0D (represented by [])
# and 1D tensors.
cases = [
[[]],
[[1], [1], [1]],
[[], [], []],
[[3], [5], [7]],
[[3], [], [7]],
[[11], [13]],
[[15]],
]
# We also need to test the different indexing modes. We can't
# just enumerate them because we don't presently support the
# same modes as numpy.meshgrid, nor does our default
# correspond to their default.
#
# TODO Eliminate this and replace it with a list of all
# supported indexing modes when we have full compatibility.
indexing_correspondence = [
# No indexing in PyTorch corresponds to "ij" indexing in
# NumPy.
({}, {'indexing': 'ij'}),
# No indexing in NumPy corresponds to "xy" indexing in
# PyTorch.
({'indexing': 'xy'}, {}),
# "ij" and "xy" are implemented identically in both.
({'indexing': 'ij'}, {'indexing': 'ij'}),
({'indexing': 'xy'}, {'indexing': 'xy'}),
]
for shapes, (torch_kwargs, numpy_kwargs) in product(cases, indexing_correspondence):
with self.subTest(shapes=shapes, torch_kwargs=torch_kwargs, numpy_kwargs=numpy_kwargs):
tensors = [make_tensor(shape, device=device, dtype=torch.int) for shape in shapes]
torch_grids = torch.meshgrid(*tensors, **torch_kwargs)
numpy_grids = np.meshgrid(*(tensor.cpu().numpy() for tensor in tensors), **numpy_kwargs)
self.assertEqual(torch_grids, numpy_grids)
def test_cartesian_prod(self, device):
a = torch.tensor([1], device=device)
b = torch.tensor([1, 2, 3], device=device)
c = torch.tensor([1, 2], device=device)
prod = torch.cartesian_prod(a, b, c)
expected = torch.tensor(list(product([a], b, c)), device=device)
self.assertEqual(expected, prod)
# test 0 size input
d = torch.empty(0, dtype=b.dtype, device=device)
prod = torch.cartesian_prod(a, b, c, d)
expected = torch.empty(0, 4, dtype=b.dtype, device=device)
self.assertEqual(expected, prod)
# test single input
prod = torch.cartesian_prod(b)
self.assertEqual(b, prod)
def test_combinations(self, device):
a = torch.tensor([1, 2, 3], device=device)
c = torch.combinations(a, r=0)
expected = torch.empty(0, dtype=a.dtype, device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, r=1)
expected = torch.tensor(list(combinations(a, r=1)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, r=1, with_replacement=True)
expected = torch.tensor(list(combinations_with_replacement(a, r=1)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a)
expected = torch.tensor(list(combinations(a, r=2)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, with_replacement=True)
expected = torch.tensor(list(combinations_with_replacement(a, r=2)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, r=3)
expected = torch.tensor(list(combinations(a, r=3)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, r=4)
expected = torch.empty(0, 4, dtype=a.dtype, device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, r=5)
expected = torch.empty(0, 5, dtype=a.dtype, device=device)
self.assertEqual(c, expected)
# test empty imput
a = torch.empty(0, device=device)
c1 = torch.combinations(a)
c2 = torch.combinations(a, with_replacement=True)
expected = torch.empty(0, 2, dtype=a.dtype, device=device)
self.assertEqual(c1, expected)
self.assertEqual(c2, expected)
@skipMeta
def test_linlogspace_mem_overlap(self, device):
x = torch.rand(1, device=device).expand(10)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
torch.linspace(1, 10, 10, out=x)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
torch.logspace(1, 10, 10, out=x)
def test_ctor_with_numpy_array(self, device):
correct_dtypes = [
np.double,
np.float,
np.float16,
np.int64,
np.int32,
np.int16,
np.int8,
np.uint8,
np.bool,
]
incorrect_byteorder = '>' if sys.byteorder == 'little' else '<'
incorrect_dtypes = [incorrect_byteorder + t for t in ['d', 'f']]
for dtype in correct_dtypes:
array = np.array([1, 2, 3, 4], dtype=dtype)
# Upcast
tensor = torch.DoubleTensor(array).to(device)
for i in range(len(array)):
self.assertEqual(tensor[i], array[i])
# Downcast (sometimes)
tensor = torch.FloatTensor(array).to(device)
for i in range(len(array)):
self.assertEqual(tensor[i], array[i])
tensor = torch.HalfTensor(array).to(device)
for i in range(len(array)):
self.assertEqual(tensor[i], array[i])
@dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
def test_random(self, device, dtype):
# This test is flaky with p<=(2/(ub-lb))^200=6e-36
t = torch.empty(200, dtype=dtype, device=device)
lb = 1
ub = 4
t.fill_(-1)
t.random_(lb, ub)
self.assertEqual(t.min(), lb)
self.assertEqual(t.max(), ub - 1)
t.fill_(-1)
t.random_(ub)
self.assertEqual(t.min(), 0)
self.assertEqual(t.max(), ub - 1)
def test_random_bool(self, device):
size = 2000
t = torch.empty(size, dtype=torch.bool, device=device)
t.fill_(False)
t.random_()
self.assertEqual(t.min(), False)
self.assertEqual(t.max(), True)
self.assertTrue(0.4 < (t.eq(True)).to(torch.int).sum().item() / size < 0.6)
t.fill_(True)
t.random_()
self.assertEqual(t.min(), False)
self.assertEqual(t.max(), True)
self.assertTrue(0.4 < (t.eq(True)).to(torch.int).sum().item() / size < 0.6)
def test_random_from_to_bool(self, device):
size = 2000
int64_min_val = torch.iinfo(torch.int64).min
int64_max_val = torch.iinfo(torch.int64).max
min_val = 0
max_val = 1
froms = [int64_min_val, -42, min_val - 1, min_val, max_val, max_val + 1, 42]
tos = [-42, min_val - 1, min_val, max_val, max_val + 1, 42, int64_max_val]
for from_ in froms:
for to_ in tos:
t = torch.empty(size, dtype=torch.bool, device=device)
if to_ > from_:
if not (min_val <= from_ <= max_val):
self.assertRaisesRegex(
RuntimeError,
"from is out of bounds",
lambda: t.random_(from_, to_)
)
elif not (min_val <= (to_ - 1) <= max_val):
self.assertRaisesRegex(
RuntimeError,
"to - 1 is out of bounds",
lambda: t.random_(from_, to_)
)
else:
t.random_(from_, to_)
range_ = to_ - from_
delta = 1
self.assertTrue(from_ <= t.to(torch.int).min() < (from_ + delta))
self.assertTrue((to_ - delta) <= t.to(torch.int).max() < to_)
else:
self.assertRaisesRegex(
RuntimeError,
"random_ expects 'from' to be less than 'to', but got from=" + str(from_) + " >= to=" + str(to_),
lambda: t.random_(from_, to_)
)
@dtypes(*all_types_and(torch.bfloat16, torch.half))
def test_random_full_range(self, device, dtype):
size = 2000
alpha = 0.1
int64_min_val = torch.iinfo(torch.int64).min
int64_max_val = torch.iinfo(torch.int64).max
if dtype == torch.double:
fp_limit = 2**53
elif dtype == torch.float:
fp_limit = 2**24
elif dtype == torch.half:
fp_limit = 2**11
elif dtype == torch.bfloat16:
fp_limit = 2**8
else:
fp_limit = 0
t = torch.empty(size, dtype=dtype, device=device)
if dtype in [torch.float, torch.double, torch.half, torch.bfloat16]:
from_ = int(max(-fp_limit, int64_min_val))
to_inc_ = int(min(fp_limit, int64_max_val))
else:
from_ = int(max(torch.iinfo(dtype).min, int64_min_val))
to_inc_ = int(min(torch.iinfo(dtype).max, int64_max_val))
range_ = to_inc_ - from_ + 1
t.random_(from_, None)
delta = max(1, alpha * range_)
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_inc_ - delta) < t.to(torch.double).max() <= to_inc_)
@dtypes(*all_types_and(torch.bfloat16, torch.half))
def test_random_from_to(self, device, dtype):
size = 2000
alpha = 0.1
int64_min_val = torch.iinfo(torch.int64).min
int64_max_val = torch.iinfo(torch.int64).max
if dtype in [torch.float, torch.double, torch.half]:
min_val = int(max(torch.finfo(dtype).min, int64_min_val))
max_val = int(min(torch.finfo(dtype).max, int64_max_val))
froms = [min_val, -42, 0, 42]
tos = [-42, 0, 42, max_val >> 1]
elif dtype == torch.bfloat16:
min_val = int64_min_val
max_val = int64_max_val
froms = [min_val, -42, 0, 42]
tos = [-42, 0, 42, max_val >> 1]
elif dtype == torch.uint8:
min_val = torch.iinfo(dtype).min
max_val = torch.iinfo(dtype).max
froms = [int64_min_val, -42, min_val - 1, min_val, 42, max_val, max_val + 1]
tos = [-42, min_val - 1, min_val, 42, max_val, max_val + 1, int64_max_val]
elif dtype == torch.int64:
min_val = int64_min_val
max_val = int64_max_val
froms = [min_val, -42, 0, 42]
tos = [-42, 0, 42, max_val]
else:
min_val = torch.iinfo(dtype).min
max_val = torch.iinfo(dtype).max
froms = [int64_min_val, min_val - 1, min_val, -42, 0, 42, max_val, max_val + 1]
tos = [min_val - 1, min_val, -42, 0, 42, max_val, max_val + 1, int64_max_val]
if dtype == torch.double:
fp_limit = 2**53
elif dtype == torch.float:
fp_limit = 2**24
elif dtype == torch.half:
fp_limit = 2**11
elif dtype == torch.bfloat16:
fp_limit = 2**8
else:
fp_limit = 0
for from_ in froms:
for to_ in tos:
t = torch.empty(size, dtype=dtype, device=device)
if to_ > from_:
if not (min_val <= from_ <= max_val):
self.assertRaisesRegex(
RuntimeError,
"from is out of bounds",
lambda: t.random_(from_, to_)
)
elif not (min_val <= (to_ - 1) <= max_val):
self.assertRaisesRegex(
RuntimeError,
"to - 1 is out of bounds",
lambda: t.random_(from_, to_)
)
else:
if dtype.is_floating_point and (
not (-fp_limit <= from_ <= fp_limit) or not (-fp_limit <= (to_ - 1) <= fp_limit)):
if not (-fp_limit <= from_ <= fp_limit):
self.assertWarnsRegex(UserWarning, "from is out of bounds",
lambda: t.random_(from_, to_))
if not (-fp_limit <= (to_ - 1) <= fp_limit):
self.assertWarnsRegex(UserWarning, "to - 1 is out of bounds",
lambda: t.random_(from_, to_))
else:
t.random_(from_, to_)
range_ = to_ - from_
delta = max(1, alpha * range_)
if dtype == torch.bfloat16:
# Less strict checks because of rounding errors
# TODO investigate rounding errors
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_ - delta) < t.to(torch.double).max() <= to_)
else:
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_ - delta) <= t.to(torch.double).max() < to_)
else:
self.assertRaisesRegex(
RuntimeError,
"random_ expects 'from' to be less than 'to', but got from=" + str(from_) + " >= to=" + str(to_),
lambda: t.random_(from_, to_)
)
@dtypes(*all_types_and(torch.bfloat16, torch.half))
def test_random_to(self, device, dtype):
size = 2000
alpha = 0.1
int64_min_val = torch.iinfo(torch.int64).min
int64_max_val = torch.iinfo(torch.int64).max
if dtype in [torch.float, torch.double, torch.half]:
min_val = int(max(torch.finfo(dtype).min, int64_min_val))
max_val = int(min(torch.finfo(dtype).max, int64_max_val))
tos = [-42, 0, 42, max_val >> 1]
elif dtype == torch.bfloat16:
min_val = int64_min_val
max_val = int64_max_val
tos = [-42, 0, 42, max_val >> 1]
elif dtype == torch.uint8:
min_val = torch.iinfo(dtype).min
max_val = torch.iinfo(dtype).max
tos = [-42, min_val - 1, min_val, 42, max_val, max_val + 1, int64_max_val]
elif dtype == torch.int64:
min_val = int64_min_val
max_val = int64_max_val
tos = [-42, 0, 42, max_val]
else:
min_val = torch.iinfo(dtype).min
max_val = torch.iinfo(dtype).max
tos = [min_val - 1, min_val, -42, 0, 42, max_val, max_val + 1, int64_max_val]
from_ = 0
for to_ in tos:
t = torch.empty(size, dtype=dtype, device=device)
if to_ > from_:
if not (min_val <= (to_ - 1) <= max_val):
self.assertRaisesRegex(
RuntimeError,
"to - 1 is out of bounds",
lambda: t.random_(from_, to_)
)
else:
t.random_(to_)
range_ = to_ - from_
delta = max(1, alpha * range_)
if dtype == torch.bfloat16:
# Less strict checks because of rounding errors
# TODO investigate rounding errors
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_ - delta) < t.to(torch.double).max() <= to_)
else:
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_ - delta) <= t.to(torch.double).max() < to_)
else:
self.assertRaisesRegex(
RuntimeError,
"random_ expects 'from' to be less than 'to', but got from=" + str(from_) + " >= to=" + str(to_),
lambda: t.random_(from_, to_)
)
@dtypes(*all_types_and(torch.bfloat16, torch.half))
def test_random_default(self, device, dtype):
size = 2000
alpha = 0.1
if dtype == torch.float:
to_inc = 1 << 24
elif dtype == torch.double:
to_inc = 1 << 53
elif dtype == torch.half:
to_inc = 1 << 11
elif dtype == torch.bfloat16:
to_inc = 1 << 8
else:
to_inc = torch.iinfo(dtype).max
t = torch.empty(size, dtype=dtype, device=device)
t.random_()
self.assertTrue(0 <= t.to(torch.double).min() < alpha * to_inc)
self.assertTrue((to_inc - alpha * to_inc) < t.to(torch.double).max() <= to_inc)
# TODO: this test should be updated
@onlyNativeDeviceTypes
def test_empty_full(self, device):
torch_device = torch.device(device)
device_type = torch_device.type
dtypes = get_all_dtypes(include_half=False, include_bfloat16=False, include_complex32=True)
if device_type == 'cpu':
do_test_empty_full(self, dtypes, torch.strided, torch_device)
if device_type == 'cuda':
do_test_empty_full(self, dtypes, torch.strided, None)
do_test_empty_full(self, dtypes, torch.strided, torch_device)
# TODO: this test should be updated
@suppress_warnings
@onlyNativeDeviceTypes
@deviceCountAtLeast(1)
def test_tensor_device(self, devices):
device_type = torch.device(devices[0]).type
if device_type == 'cpu':
self.assertEqual('cpu', torch.tensor(5).device.type)
self.assertEqual('cpu',
torch.ones((2, 3), dtype=torch.float32, device='cpu').device.type)
self.assertEqual('cpu',
torch.ones((2, 3), dtype=torch.float32, device='cpu:0').device.type)
self.assertEqual('cpu',
torch.tensor(torch.ones((2, 3), dtype=torch.float32), device='cpu:0').device.type)
self.assertEqual('cpu', torch.tensor(np.random.randn(2, 3), device='cpu').device.type)
if device_type == 'cuda':
self.assertEqual('cuda:0', str(torch.tensor(5).cuda(0).device))
self.assertEqual('cuda:0', str(torch.tensor(5).cuda('cuda:0').device))
self.assertEqual('cuda:0',
str(torch.tensor(5, dtype=torch.int64, device=0).device))
self.assertEqual('cuda:0',
str(torch.tensor(5, dtype=torch.int64, device='cuda:0').device))
self.assertEqual('cuda:0',
str(torch.tensor(torch.ones((2, 3), dtype=torch.float32), device='cuda:0').device))
self.assertEqual('cuda:0', str(torch.tensor(np.random.randn(2, 3), device='cuda:0').device))
for device in devices:
with torch.cuda.device(device):
device_string = 'cuda:' + str(torch.cuda.current_device())
self.assertEqual(device_string,
str(torch.tensor(5, dtype=torch.int64, device='cuda').device))
with self.assertRaises(RuntimeError):
torch.tensor(5).cuda('cpu')
with self.assertRaises(RuntimeError):
torch.tensor(5).cuda('cpu:0')
if len(devices) > 1:
self.assertEqual('cuda:1', str(torch.tensor(5).cuda(1).device))
self.assertEqual('cuda:1', str(torch.tensor(5).cuda('cuda:1').device))
self.assertEqual('cuda:1',
str(torch.tensor(5, dtype=torch.int64, device=1).device))
self.assertEqual('cuda:1',
str(torch.tensor(5, dtype=torch.int64, device='cuda:1').device))
self.assertEqual('cuda:1',
str(torch.tensor(torch.ones((2, 3), dtype=torch.float32),
device='cuda:1').device))
self.assertEqual('cuda:1',
str(torch.tensor(np.random.randn(2, 3), device='cuda:1').device))
# TODO: this test should be updated
@onlyNativeDeviceTypes
def test_as_strided_neg(self, device):
error = r'as_strided: Negative strides are not supported at the ' \
r'moment, got strides: \[-?[0-9]+(, -?[0-9]+)*\]'
with self.assertRaisesRegex(RuntimeError, error):
torch.as_strided(torch.ones(3, 3, device=device), (1, 1), (2, -1))
with self.assertRaisesRegex(RuntimeError, error):
torch.as_strided(torch.ones(14, device=device), (2,), (-11,))
# TODO: this test should be updated
def test_zeros(self, device):
res1 = torch.zeros(100, 100, device=device)
res2 = torch.tensor((), device=device)
torch.zeros(100, 100, device=device, out=res2)
self.assertEqual(res1, res2)
boolTensor = torch.zeros(2, 2, device=device, dtype=torch.bool)
expected = torch.tensor([[False, False], [False, False]],
device=device, dtype=torch.bool)
self.assertEqual(boolTensor, expected)
halfTensor = torch.zeros(1, 1, device=device, dtype=torch.half)
expected = torch.tensor([[0.]], device=device, dtype=torch.float16)
self.assertEqual(halfTensor, expected)
bfloat16Tensor = torch.zeros(1, 1, device=device, dtype=torch.bfloat16)
expected = torch.tensor([[0.]], device=device, dtype=torch.bfloat16)
self.assertEqual(bfloat16Tensor, expected)
complexTensor = torch.zeros(2, 2, device=device, dtype=torch.complex64)
expected = torch.tensor([[0., 0.], [0., 0.]], device=device, dtype=torch.complex64)
self.assertEqual(complexTensor, expected)
complexHalfTensor = torch.zeros(2, 2, device=device, dtype=torch.complex32)
expected = torch.tensor([[0., 0.], [0., 0.]], device=device, dtype=torch.complex32)
self.assertEqual(complexHalfTensor, expected)
# TODO: this test should be updated
def test_zeros_out(self, device):
shape = (3, 4)
out = torch.zeros(shape, device=device)
torch.zeros(shape, device=device, out=out)
# change the dtype, layout, device
with self.assertRaises(RuntimeError):
torch.zeros(shape, device=device, dtype=torch.int64, out=out)
with self.assertRaises(RuntimeError):
torch.zeros(shape, device=device, layout=torch.sparse_coo, out=out)
# leave them the same
self.assertEqual(torch.zeros(shape, device=device),
torch.zeros(shape, device=device, dtype=out.dtype, out=out))
self.assertEqual(torch.zeros(shape, device=device),
torch.zeros(shape, device=device, layout=torch.strided, out=out))
self.assertEqual(torch.zeros(shape, device=device),
torch.zeros(shape, device=device, out=out))
# TODO: this test should be updated
def test_ones(self, device):
res1 = torch.ones(100, 100, device=device)
res2 = torch.tensor((), device=device)
torch.ones(100, 100, device=device, out=res2)
self.assertEqual(res1, res2)
# test boolean tensor
res1 = torch.ones(1, 2, device=device, dtype=torch.bool)
expected = torch.tensor([[True, True]], device=device, dtype=torch.bool)
self.assertEqual(res1, expected)
# test chalf
self.assertEqual(torch.ones(100, 100, device=device, dtype=torch.chalf),
torch.ones(100, 100, device=device, dtype=torch.cfloat), exact_dtype=False)
# TODO: this test should be updated
@onlyCPU
def test_constructor_dtypes(self, device):
default_type = torch.tensor([]).type()
self.assertIs(torch.tensor([]).dtype, torch.get_default_dtype())
self.assertIs(torch.uint8, torch.ByteTensor.dtype)
self.assertIs(torch.float32, torch.FloatTensor.dtype)
self.assertIs(torch.float64, torch.DoubleTensor.dtype)
torch.set_default_tensor_type('torch.FloatTensor')
self.assertIs(torch.float32, torch.get_default_dtype())
self.assertIs(torch.FloatStorage, torch.Storage)
# only floating-point types are supported as the default type
self.assertRaises(TypeError, lambda: torch.set_default_tensor_type('torch.IntTensor'))
torch.set_default_dtype(torch.float64)
self.assertIs(torch.float64, torch.get_default_dtype())
self.assertIs(torch.DoubleStorage, torch.Storage)
torch.set_default_tensor_type(torch.FloatTensor)
self.assertIs(torch.float32, torch.get_default_dtype())
self.assertIs(torch.FloatStorage, torch.Storage)
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
self.assertIs(torch.float32, torch.get_default_dtype())
self.assertIs(torch.float32, torch.cuda.FloatTensor.dtype)
self.assertIs(torch.cuda.FloatStorage, torch.Storage)
torch.set_default_dtype(torch.float64)
self.assertIs(torch.float64, torch.get_default_dtype())
self.assertIs(torch.cuda.DoubleStorage, torch.Storage)
# don't allow passing dtype to set_default_tensor_type
self.assertRaises(TypeError, lambda: torch.set_default_tensor_type(torch.float32))
# don't allow passing dtype to set_default_dtype
for t in all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.qint8):
# only floating-point types are supported as the default type
if t in (
torch.half,
torch.float,
torch.double,
torch.bfloat16):
torch.set_default_dtype(t)
else:
self.assertRaises(TypeError, lambda: torch.set_default_dtype(t))
torch.set_default_tensor_type(default_type)
# TODO: this test should be updated
@onlyCPU
def test_constructor_device_legacy(self, device):
self.assertRaises(RuntimeError, lambda: torch.FloatTensor(device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.FloatTensor(torch.Size([2, 3, 4]), device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.FloatTensor((2.0, 3.0), device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(torch.Size([2, 3, 4]), device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor((2.0, 3.0), device='cuda'))
# Tensor constructor/new with Tensor argument shouldn't work with device specified
i = torch.tensor([1], device='cpu')
self.assertRaises(RuntimeError, lambda: torch.Tensor(i, device='cpu'))
self.assertRaises(RuntimeError, lambda: i.new(i, device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(i, device='cuda'))
self.assertRaises(RuntimeError, lambda: i.new(i, device='cuda'))
x = torch.randn((3,), device='cpu')
self.assertRaises(RuntimeError, lambda: x.new(device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new((2.0, 3.0), device='cuda'))
if torch.cuda.is_available():
self.assertRaises(RuntimeError, lambda: torch.cuda.FloatTensor(device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.FloatTensor(torch.Size([2, 3, 4]), device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.FloatTensor((2.0, 3.0), device='cpu'))
# Tensor constructor/new with Tensor argument shouldn't work with device specified
i = torch.tensor([1], device='cuda')
self.assertRaises(RuntimeError, lambda: torch.Tensor(i, device='cuda'))
self.assertRaises(RuntimeError, lambda: i.new(i, device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(i, device='cpu'))
self.assertRaises(RuntimeError, lambda: i.new(i, device='cpu'))
default_type = torch.Tensor().type()
torch.set_default_tensor_type(torch.cuda.FloatTensor)
self.assertRaises(RuntimeError, lambda: torch.Tensor(device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(torch.Size([2, 3, 4]), device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.Tensor((2.0, 3.0), device='cpu'))
torch.set_default_tensor_type(torch.cuda.FloatTensor)
torch.set_default_tensor_type(default_type)
x = torch.randn((3,), device='cuda')
self.assertRaises(RuntimeError, lambda: x.new(device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new((2.0, 3.0), device='cpu'))
# TODO: this test should be updated
@suppress_warnings
@onlyCPU
def test_tensor_factory(self, device):
# TODO: This test probably doesn't make too much sense now that
# torch.tensor has been established for a while; it makes more
# sense to test the legacy behavior in terms of the new behavior
expected = torch.Tensor([1, 1])
# test data
res1 = torch.tensor([1, 1])
self.assertEqual(res1, expected, exact_dtype=False)
res1 = torch.tensor([1, 1], dtype=torch.int)
self.assertEqual(res1, expected, exact_dtype=False)
self.assertIs(torch.int, res1.dtype)
# test copy
res2 = torch.tensor(expected)
self.assertEqual(res2, expected)
res2[1] = 2
self.assertEqual(expected, torch.ones_like(expected))
res2 = torch.tensor(expected, dtype=torch.int)
self.assertEqual(res1, expected, exact_dtype=False)
self.assertIs(torch.int, res1.dtype)
# test copy with numpy
for dtype in [np.float64, np.int64, np.int8, np.uint8]:
a = np.array([5.]).astype(dtype)
res1 = torch.tensor(a)
self.assertEqual(5., res1[0].item())
a[0] = 7.
self.assertEqual(5., res1[0].item())
# test boolean tensor
a = torch.tensor([True, True, False, True, True], dtype=torch.bool)
b = torch.tensor([-1, -1.1, 0, 1, 1.1], dtype=torch.bool)
self.assertEqual(a, b)
c = torch.tensor([-0.1, -1.1, 0, 1, 0.1], dtype=torch.bool)
self.assertEqual(a, c)
d = torch.tensor((-.3, 0, .3, 1, 3 / 7), dtype=torch.bool)
e = torch.tensor((True, False, True, True, True), dtype=torch.bool)
self.assertEqual(e, d)
f = torch.tensor((-1, 0, -1.1, 1, 1.1), dtype=torch.bool)
self.assertEqual(e, f)
int64_max = torch.iinfo(torch.int64).max
int64_min = torch.iinfo(torch.int64).min
float64_max = torch.finfo(torch.float64).max
float64_min = torch.finfo(torch.float64).min
g_1 = torch.tensor((float('nan'), 0, int64_min, int64_max, int64_min - 1), dtype=torch.bool)
self.assertEqual(e, g_1)
g_2 = torch.tensor((int64_max + 1, 0, (int64_max + 1) * 2, (int64_max + 1) * 2 + 1, float64_min), dtype=torch.bool)
self.assertEqual(e, g_2)
g_3 = torch.tensor((float64_max, 0, float64_max + 1, float64_min - 1, float64_max + 1e291), dtype=torch.bool)
self.assertEqual(e, g_3)
h = torch.tensor([True, False, False, True, False, True, True], dtype=torch.bool)
i = torch.tensor([1e-323, 1e-324, 0j, 1e-323j, 1e-324j, 1 + 2j, -1j], dtype=torch.bool)
self.assertEqual(h, i)
j = torch.tensor((True, True, True, True), dtype=torch.bool)
k = torch.tensor((1e323, -1e323, float('inf'), -float('inf')), dtype=torch.bool)
self.assertEqual(j, k)
# TODO: this test should be updated
@suppress_warnings
@onlyCPU
def test_tensor_factory_copy_var(self, device):
def check_copy(copy, is_leaf, requires_grad, data_ptr=None):
if data_ptr is None:
data_ptr = copy.data_ptr
self.assertEqual(copy, source, exact_dtype=False)
self.assertTrue(copy.is_leaf == is_leaf)
self.assertTrue(copy.requires_grad == requires_grad)
self.assertTrue(copy.data_ptr == data_ptr)
source = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
# test torch.tensor()
check_copy(torch.tensor(source), True, False)
check_copy(torch.tensor(source, requires_grad=False), True, False)
check_copy(torch.tensor(source, requires_grad=True), True, True)
# test tensor.new_tensor()
copy = torch.randn(1)
check_copy(copy.new_tensor(source), True, False)
check_copy(copy.new_tensor(source, requires_grad=False), True, False)
check_copy(copy.new_tensor(source, requires_grad=True), True, True)
# test torch.as_tensor()
check_copy(torch.as_tensor(source), source.is_leaf, source.requires_grad, source.data_ptr) # not copy
check_copy(torch.as_tensor(source, dtype=torch.float), False, True) # copy and keep the graph
# TODO: this test should be updated
@onlyCPU
def test_tensor_factory_type_inference(self, device):
def test_inference(default_dtype):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(default_dtype)
default_complex_dtype = torch.complex64 if default_dtype == torch.float32 else torch.complex128
self.assertIs(default_dtype, torch.tensor(()).dtype)
self.assertIs(default_dtype, torch.tensor(5.).dtype)
self.assertIs(torch.int64, torch.tensor(5).dtype)
self.assertIs(torch.bool, torch.tensor(True).dtype)
self.assertIs(torch.int32, torch.tensor(5, dtype=torch.int32).dtype)
self.assertIs(default_dtype, torch.tensor(((7, 5), (9, 5.))).dtype)
self.assertIs(default_dtype, torch.tensor(((5., 5), (3, 5))).dtype)
self.assertIs(torch.int64, torch.tensor(((5, 3), (3, 5))).dtype)
self.assertIs(default_complex_dtype, torch.tensor(((5, 3 + 2j), (3, 5 + 4j))).dtype)
self.assertIs(torch.float64, torch.tensor(np.array(())).dtype)
self.assertIs(torch.float64, torch.tensor(np.array(5.)).dtype)
if np.array(5).dtype == np.int64: # np long, which can be 4 bytes (e.g. on windows)
self.assertIs(torch.int64, torch.tensor(np.array(5)).dtype)
else:
self.assertIs(torch.int32, torch.tensor(np.array(5)).dtype)
self.assertIs(torch.uint8, torch.tensor(np.array(3, dtype=np.uint8)).dtype)
self.assertIs(default_dtype, torch.tensor(((7, np.array(5)), (np.array(9), 5.))).dtype)
self.assertIs(torch.float64, torch.tensor(((7, 5), (9, np.array(5.)))).dtype)
self.assertIs(torch.int64, torch.tensor(((5, np.array(3)), (np.array(3), 5))).dtype)
torch.set_default_dtype(saved_dtype)
test_inference(torch.float64)
test_inference(torch.float32)
# TODO: this test should be updated
@suppress_warnings
@onlyCPU
def test_new_tensor(self, device):
expected = torch.autograd.Variable(torch.ByteTensor([1, 1]))
# test data
res1 = expected.new_tensor([1, 1])
self.assertEqual(res1, expected)
res1 = expected.new_tensor([1, 1], dtype=torch.int)
self.assertEqual(res1, expected, exact_dtype=False)
self.assertIs(torch.int, res1.dtype)
# test copy
res2 = expected.new_tensor(expected)
self.assertEqual(res2, expected)
res2[1] = 2
self.assertEqual(expected, torch.ones_like(expected))
res2 = expected.new_tensor(expected, dtype=torch.int)
self.assertEqual(res2, expected, exact_dtype=False)
self.assertIs(torch.int, res2.dtype)
# test copy with numpy
a = np.array([5.])
res1 = torch.tensor(a)
res1 = res1.new_tensor(a)
self.assertEqual(5., res1[0].item())
a[0] = 7.
self.assertEqual(5., res1[0].item())
if torch.cuda.device_count() >= 2:
expected = expected.cuda(1)
res1 = expected.new_tensor([1, 1])
self.assertEqual(res1.get_device(), expected.get_device())
res1 = expected.new_tensor([1, 1], dtype=torch.int)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res1.get_device(), expected.get_device())
res2 = expected.new_tensor(expected)
self.assertEqual(res2.get_device(), expected.get_device())
res2 = expected.new_tensor(expected, dtype=torch.int)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res2.get_device(), expected.get_device())
res2 = expected.new_tensor(expected, dtype=torch.int, device=0)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res2.get_device(), 0)
res1 = expected.new_tensor(1)
self.assertEqual(res1.get_device(), expected.get_device())
res1 = expected.new_tensor(1, dtype=torch.int)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res1.get_device(), expected.get_device())
# TODO: this test should be updated
@onlyCPU
def test_as_tensor(self, device):
# from python data
x = [[0, 1], [2, 3]]
self.assertEqual(torch.tensor(x), torch.as_tensor(x))
self.assertEqual(torch.tensor(x, dtype=torch.float32), torch.as_tensor(x, dtype=torch.float32))
# python data with heterogeneous types
z = [0, 'torch']
with self.assertRaisesRegex(TypeError, "invalid data type"):
torch.tensor(z)
torch.as_tensor(z)
# python data with self-referential lists
z = [0]
z += [z]
with self.assertRaisesRegex(TypeError, "self-referential lists are incompatible"):
torch.tensor(z)
torch.as_tensor(z)
z = [[1, 2], z]
with self.assertRaisesRegex(TypeError, "self-referential lists are incompatible"):
torch.tensor(z)
torch.as_tensor(z)
# from tensor (doesn't copy unless type is different)
y = torch.tensor(x)
self.assertIs(y, torch.as_tensor(y))
self.assertIsNot(y, torch.as_tensor(y, dtype=torch.float32))
if torch.cuda.is_available():
self.assertIsNot(y, torch.as_tensor(y, device='cuda'))
y_cuda = y.to('cuda')
self.assertIs(y_cuda, torch.as_tensor(y_cuda))
self.assertIs(y_cuda, torch.as_tensor(y_cuda, device='cuda'))
# doesn't copy
for dtype in [np.float64, np.int64, np.int8, np.uint8]:
n = np.random.rand(5, 6).astype(dtype)
n_astensor = torch.as_tensor(n)
self.assertEqual(torch.tensor(n), n_astensor)
n_astensor[0][0] = 25.7
self.assertEqual(torch.tensor(n), n_astensor)
# changing dtype causes copy
n = np.random.rand(5, 6).astype(np.float32)
n_astensor = torch.as_tensor(n, dtype=torch.float64)
self.assertEqual(torch.tensor(n, dtype=torch.float64), n_astensor)
n_astensor[0][1] = 250.8
self.assertNotEqual(torch.tensor(n, dtype=torch.float64), n_astensor)
# changing device causes copy
if torch.cuda.is_available():
n = np.random.randn(5, 6)
n_astensor = torch.as_tensor(n, device='cuda')
self.assertEqual(torch.tensor(n, device='cuda'), n_astensor)
n_astensor[0][2] = 250.9
self.assertNotEqual(torch.tensor(n, device='cuda'), n_astensor)
# TODO: this test should be updated
@suppress_warnings
def test_range(self, device):
res1 = torch.range(0, 1, device=device)
res2 = torch.tensor((), device=device)
torch.range(0, 1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Check range for non-contiguous tensors.
x = torch.zeros(2, 3, device=device)
torch.range(0, 3, device=device, out=x.narrow(1, 1, 2))
res2 = torch.tensor(((0, 0, 1), (0, 2, 3)), device=device, dtype=torch.float32)
self.assertEqual(x, res2, atol=1e-16, rtol=0)
# Check negative
res1 = torch.tensor((1, 0), device=device, dtype=torch.float32)
res2 = torch.tensor((), device=device)
torch.range(1, 0, -1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Equal bounds
res1 = torch.ones(1, device=device)
res2 = torch.tensor((), device=device)
torch.range(1, 1, -1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
torch.range(1, 1, 1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# TODO: this test should be updated
def test_range_warning(self, device):
with warnings.catch_warnings(record=True) as w:
torch.range(0, 10, device=device)
self.assertEqual(len(w), 1)
# TODO: this test should be updated
def test_arange(self, device):
res = torch.tensor(range(10000), device=device)
res1 = torch.arange(0, 10000, device=device) # Use a larger number so vectorized code can be triggered
res2 = torch.tensor([], dtype=torch.int64, device=device)
torch.arange(0, 10000, out=res2)
self.assertEqual(res, res1, atol=0, rtol=0)
self.assertEqual(res, res2, atol=0, rtol=0)
# Vectorization on non-contiguous tensors
res = torch.rand(3, 3, 300000, device=device).to(torch.int64)
res = res.permute(2, 0, 1)
torch.arange(0, 300000 * 3 * 3, out=res)
self.assertEqual(res.flatten(), torch.arange(0, 300000 * 3 * 3, device=device))
# Check arange with only one argument
res1 = torch.arange(10, device=device)
res2 = torch.arange(0, 10, device=device)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Check arange for non-contiguous tensors.
x = torch.zeros(2, 3, device=device)
torch.arange(0, 4, out=x.narrow(1, 1, 2))
res2 = torch.tensor(((0., 0., 1.), (0., 2., 3.)), device=device)
self.assertEqual(x, res2, atol=1e-16, rtol=0)
# Check negative
res1 = torch.tensor((1., 0.), device=device)
res2 = torch.tensor([], device=device)
torch.arange(1, -1, -1, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Equal bounds
res1 = torch.ones(1, device=device)
res2 = torch.tensor([], device=device)
torch.arange(1, 0, -1, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
torch.arange(1, 2, 1, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# FloatTensor
out = torch.tensor([], dtype=torch.float, device=device)
res1 = torch.arange(0.6, 0.89, 0.1, out=out)
self.assertEqual(res1, [0.6, 0.7, 0.8])
out = torch.tensor([], dtype=torch.float, device=device)
res1 = torch.arange(1, 10, 0.3, out=out)
self.assertEqual(res1.size(0), 30)
self.assertEqual(res1[0], 1)
self.assertEqual(res1[29], 9.7)
# DoubleTensor
out = torch.tensor([], dtype=torch.double, device=device)
res1 = torch.arange(0.6, 0.89, 0.1, out=out)
self.assertEqual(res1, [0.6, 0.7, 0.8])
out = torch.tensor([], dtype=torch.double, device=device)
res1 = torch.arange(1, 10, 0.3, out=out)
self.assertEqual(res1.size(0), 30)
self.assertEqual(res1[0], 1)
self.assertEqual(res1[29], 9.7)
# Bool Input matching numpy semantics
r = torch.arange(True, device=device)
self.assertEqual(r[0], 0)
r2 = torch.arange(False, device=device)
self.assertEqual(len(r2), 0)
self.assertEqual(r.dtype, torch.int64)
self.assertEqual(r2.dtype, torch.int64)
# Check that it's exclusive
r = torch.arange(0, 5, device=device)
self.assertEqual(r.min(), 0)
self.assertEqual(r.max(), 4)
self.assertEqual(r.numel(), 5)
r = torch.arange(0, 6, 3, device=device)
self.assertEqual(r.min(), 0)
self.assertEqual(r.max(), 3)
self.assertEqual(r.numel(), 2)
r = torch.arange(0, 5, 2, device=device)
self.assertEqual(r.min(), 0)
self.assertEqual(r.max(), 4)
self.assertEqual(r.numel(), 3)
r = torch.arange(0, -5, -2, device=device)
self.assertEqual(r.min(), -4)
self.assertEqual(r.max(), 0)
self.assertEqual(r.numel(), 3)
r1 = torch.arange(0, 5 + 1e-6, device=device)
# NB: without the dtype, we'll infer output type to be int64
r2 = torch.arange(0, 5, dtype=torch.float32, device=device)
r3 = torch.arange(0, 5 - 1e-6, device=device)
self.assertEqual(r1[:-1], r2, atol=0, rtol=0)
self.assertEqual(r2, r3, atol=0, rtol=0)
r1 = torch.arange(10, -1 + 1e-6, -1, device=device)
# NB: without the dtype, we'll infer output type to be int64
r2 = torch.arange(10, -1, -1, dtype=torch.float32, device=device)
r3 = torch.arange(10, -1 - 1e-6, -1, device=device)
self.assertEqual(r1, r2, atol=0, rtol=0)
self.assertEqual(r2, r3[:-1], atol=0, rtol=0)
w = 1449629115440469
r = torch.arange(0, 100 * w, w, device=device)
self.assertEqual(r.numel(), 100)
# Test Rounding Errors
line = torch.zeros(size=(1, 49), device=device)
self.assertWarnsRegex(UserWarning, 'The out tensor will be resized',
lambda: torch.arange(-1, 1, 2. / 49, dtype=torch.float32, out=line))
self.assertEqual(line.shape, [50])
x = torch.empty(1).expand(10)
self.assertRaises(RuntimeError, lambda: torch.arange(10, out=x))
msg = "unsupported range"
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(-5, float('nan'), device=device))
# check with step size
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(0, float('-inf'), -1, device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(0, float('inf'), device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('-inf'), 10, device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('nan'), 10, device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('inf'), device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('nan'), device=device))
self.assertRaisesRegex(
RuntimeError, "overflow",
lambda: torch.arange(1.175494351e-38, 3.402823466e+38, device=device))
# check that it holds a consistent output shape on precision-cornered step sizes
d = torch.arange(-4.0, 4.0, 0.01, dtype=torch.float32, device=device)
self.assertEqual(d.shape[0], 800)
# TODO: this test should be updated
@onlyCPU
def test_arange_inference(self, device):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(torch.float32)
# end only
self.assertIs(torch.float32, torch.arange(1.).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1.)).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1., dtype=torch.float64)).dtype)
self.assertIs(torch.int64, torch.arange(1).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1)).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1, dtype=torch.int16)).dtype)
# start, end, [step]
self.assertIs(torch.float32, torch.arange(1., 3).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1., dtype=torch.float64), 3).dtype)
self.assertIs(torch.float32, torch.arange(1, 3.).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1, dtype=torch.int16), torch.tensor(3.)).dtype)
self.assertIs(torch.float32, torch.arange(1, 3, 1.).dtype)
self.assertIs(torch.float32,
torch.arange(torch.tensor(1),
torch.tensor(3, dtype=torch.int16),
torch.tensor(1., dtype=torch.float64)).dtype)
self.assertIs(torch.int64, torch.arange(1, 3).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1), 3).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1), torch.tensor(3, dtype=torch.int16)).dtype)
self.assertIs(torch.int64, torch.arange(1, 3, 1).dtype)
self.assertIs(torch.int64,
torch.arange(torch.tensor(1),
torch.tensor(3),
torch.tensor(1, dtype=torch.int16)).dtype)
torch.set_default_dtype(saved_dtype)
# cannot call storage() on meta tensor
@skipMeta
def test_empty_strided(self, device):
for shape in [(2, 3, 4), (0, 2, 0)]:
# some of these cases are pretty strange, just verifying that if as_strided
# allows them then empty_strided can as well.
for strides in [(12, 4, 1), (2, 4, 6), (0, 0, 0)]:
empty_strided = torch.empty_strided(shape, strides, device=device)
# as_strided checks the storage size is big enough to support such a strided tensor;
# instead of repeating this calculation, we just use empty_strided which does the same
# calculation when setting the storage size.
as_strided = torch.empty(empty_strided.storage().size(),
device=device).as_strided(shape, strides)
self.assertEqual(empty_strided.shape, as_strided.shape)
self.assertEqual(empty_strided.stride(), as_strided.stride())
def test_new_empty_strided(self, device):
def _test(sizes, strides, dtype):
x = torch.zeros(5, 5, dtype=dtype, device=device)
result = x.new_empty_strided(sizes, strides)
expected = torch.empty_strided(sizes, strides, dtype=x.dtype, device=x.device)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.stride(), expected.stride())
self.assertEqual(result.dtype, expected.dtype)
self.assertEqual(result.device, expected.device)
_test([2, 3], [3, 1], torch.float)
_test([5, 3], [0, 1], torch.int)
_test([], [], torch.float)
# Some really weird cases
for shape in [(2, 3, 4), (0, 2, 0)]:
for strides in [(12, 4, 1), (2, 4, 6), (0, 0, 0)]:
_test(shape, strides, torch.float)
# Make sure sizes and strides have the same length
# https://github.com/pytorch/pytorch/issues/82416
with self.assertRaisesRegex(
RuntimeError,
r"dimensionality of sizes \(1\) must match dimensionality of strides \(0\)"):
dtype = torch.float64
x = torch.tensor(-4.8270, dtype=dtype, device=device)
size = (2,)
stride = ()
x.new_empty_strided(size, stride, dtype=dtype, device=device)
def test_strided_mismatched_stride_shape(self, device):
for shape, strides in [((1, ), ()), ((1, 2), (1, ))]:
with self.assertRaisesRegex(RuntimeError, "mismatch in length of strides and shape"):
torch.tensor(0.42, device=device).as_strided(shape, strides)
with self.assertRaisesRegex(RuntimeError, "mismatch in length of strides and shape"):
torch.tensor(0.42, device=device).as_strided_(shape, strides)
def test_empty_tensor_props(self, device):
sizes = [(0,), (0, 3), (5, 0), (5, 0, 3, 0, 2), (0, 3, 0, 2), (0, 5, 0, 2, 0)]
for size in sizes:
x = torch.empty(tuple(size), device=device)
self.assertEqual(size, x.shape)
self.assertTrue(x.is_contiguous())
size_ones_instead_of_zeros = (x if x != 0 else 1 for x in size)
y = torch.empty(tuple(size_ones_instead_of_zeros), device=device)
self.assertEqual(x.stride(), y.stride())
@onlyNativeDeviceTypes
def test_empty_overflow(self, device):
with self.assertRaisesRegex(RuntimeError, 'Storage size calculation overflowed'):
torch.empty([2, 4, 2**29, 2**29], dtype=torch.float64)
with self.assertRaisesRegex(RuntimeError, 'Storage size calculation overflowed'):
torch.empty([8, 8, 2**29, 2**29], dtype=torch.float64)
with self.assertRaisesRegex(RuntimeError, 'Storage size calculation overflowed'):
torch.empty_strided([8, 8], [2**61, 1], dtype=torch.float64)
def test_eye(self, device):
for dtype in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16):
if dtype == torch.bfloat16:
continue
# Test the RuntimeError is raised when either m or n is a negative number
for n, m in ((-1, 1), (1, -1), (-1, -1)):
with self.assertRaisesRegex(RuntimeError, 'must be greater or equal to'):
torch.eye(n, m, device=device, dtype=dtype)
# Test when the `m` parameter is not provided
for n in (3, 5, 7):
res1 = torch.eye(n, device=device, dtype=dtype)
naive_eye = torch.zeros(n, n, dtype=dtype, device=device)
naive_eye.diagonal(dim1=-2, dim2=-1).fill_(1)
self.assertEqual(naive_eye, res1)
# Check eye_out outputs
res2 = torch.empty(0, device=device, dtype=dtype)
torch.eye(n, out=res2)
self.assertEqual(res1, res2)
for n, m in product([3, 5, 7], repeat=2):
# Construct identity using diagonal and fill
res1 = torch.eye(n, m, device=device, dtype=dtype)
naive_eye = torch.zeros(n, m, dtype=dtype, device=device)
naive_eye.diagonal(dim1=-2, dim2=-1).fill_(1)
self.assertEqual(naive_eye, res1)
# Check eye_out outputs
res2 = torch.empty(0, device=device, dtype=dtype)
torch.eye(n, m, out=res2)
self.assertEqual(res1, res2)
@precisionOverride({torch.float: 1e-8, torch.double: 1e-10})
@dtypes(*floating_and_complex_types())
def test_linspace_vs_numpy(self, device, dtype):
start = -0.0316082797944545745849609375 + (0.8888888888j if dtype.is_complex else 0)
end = .0315315723419189453125 + (0.444444444444j if dtype.is_complex else 0)
for steps in [1, 2, 3, 5, 11, 256, 257, 2**22]:
t = torch.linspace(start, end, steps, device=device, dtype=dtype)
a = np.linspace(start, end, steps, dtype=torch_to_numpy_dtype_dict[dtype])
t = t.cpu()
self.assertEqual(t, torch.from_numpy(a))
self.assertTrue(t[0].item() == a[0])
self.assertTrue(t[steps - 1].item() == a[steps - 1])
def _test_linspace_logspace_complex_helper(self, torch_fn, np_fn, device, dtype):
start = torch.randn(1, dtype=dtype).item()
end = (start + torch.randn(1, dtype=dtype) + random.randint(5, 15)).item()
def test_fn(torch_fn, numpy_fn, steps):
t = torch_fn(start, end, steps, device=device)
a = numpy_fn(start, end, steps, dtype=torch_to_numpy_dtype_dict[dtype])
t = t.cpu()
self.assertEqual(t, torch.from_numpy(a))
for steps in [1, 2, 3, 5, 11, 256, 257, 2**22]:
test_fn(torch.linspace, np.linspace, steps)
@dtypes(torch.complex64)
def test_linspace_vs_numpy_complex(self, device, dtype):
self._test_linspace_logspace_complex_helper(torch.linspace, np.linspace,
device, dtype)
@dtypes(torch.complex64)
def test_logspace_vs_numpy_complex(self, device, dtype):
self._test_linspace_logspace_complex_helper(torch.logspace, np.logspace,
device, dtype)
@precisionOverride({torch.float: 1e-6, torch.double: 1e-10})
@dtypes(*floating_types())
def test_logspace_vs_numpy(self, device, dtype):
start = -0.0316082797944545745849609375
end = .0315315723419189453125
for steps in [1, 2, 3, 5, 11, 256, 257, 2**22]:
t = torch.logspace(start, end, steps, device=device, dtype=dtype)
a = np.logspace(start, end, steps, dtype=torch_to_numpy_dtype_dict[dtype])
t = t.cpu()
self.assertEqual(t, torch.from_numpy(a))
self.assertEqual(t[0], a[0])
self.assertEqual(t[steps - 1], a[steps - 1])
@onlyCUDA
@largeTensorTest('16GB')
def test_range_factories_64bit_indexing(self, device):
bigint = 2 ** 31 + 1
t = torch.arange(bigint, dtype=torch.long, device=device)
self.assertEqual(t[-1].item(), bigint - 1)
del t
t = torch.linspace(0, 1, bigint, dtype=torch.float, device=device)
self.assertEqual(t[-1].item(), 1)
del t
t = torch.logspace(0, 1, bigint, 2, dtype=torch.float, device=device)
self.assertEqual(t[-1].item(), 2)
del t
@expectedFailureMeta # RuntimeError: The tensor has a non-zero number of elements
@onlyNativeDeviceTypes
def test_tensor_ctor_device_inference(self, device):
torch_device = torch.device(device)
values = torch.tensor((1, 2, 3), device=device)
# Tests tensor and as_tensor
# Note: warnings are suppressed (suppresses warnings)
for op in (torch.tensor, torch.as_tensor):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertEqual(op(values).device, torch_device)
self.assertEqual(op(values, dtype=torch.float64).device, torch_device)
if self.device_type == 'cuda':
with torch.cuda.device(device):
self.assertEqual(op(values.cpu()).device, torch.device('cpu'))
# Tests sparse ctor
indices = torch.tensor([[0, 1, 1],
[2, 0, 1],
[2, 1, 0]], device=device)
sparse_size = (3, 3, 3)
sparse_default = torch.sparse_coo_tensor(indices, values, sparse_size)
self.assertEqual(sparse_default.device, torch_device)
sparse_with_dtype = torch.sparse_coo_tensor(indices, values, sparse_size, dtype=torch.float64)
self.assertEqual(sparse_with_dtype.device, torch_device)
if self.device_type == 'cuda':
with torch.cuda.device(device):
sparse_with_dtype = torch.sparse_coo_tensor(indices.cpu(), values.cpu(),
sparse_size, dtype=torch.float64)
self.assertEqual(sparse_with_dtype.device, torch.device('cpu'))
def _test_signal_window_functions(self, name, dtype, device, **kwargs):
import scipy.signal as signal
torch_method = getattr(torch, name + '_window')
if not dtype.is_floating_point:
with self.assertRaisesRegex(RuntimeError, r'floating point'):
torch_method(3, dtype=dtype)
return
for size in [0, 1, 2, 5, 10, 50, 100, 1024, 2048]:
for periodic in [True, False]:
res = torch_method(size, periodic=periodic, **kwargs, device=device, dtype=dtype)
# NB: scipy always returns a float64 result
ref = torch.from_numpy(signal.get_window((name, *(kwargs.values())), size, fftbins=periodic))
self.assertEqual(res, ref, exact_dtype=False)
with self.assertRaisesRegex(RuntimeError, r'not implemented for sparse types'):
torch_method(3, layout=torch.sparse_coo)
self.assertTrue(torch_method(3, requires_grad=True).requires_grad)
self.assertFalse(torch_method(3).requires_grad)
@onlyNativeDeviceTypes
@precisionOverride({torch.bfloat16: 5e-2, torch.half: 1e-3})
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
@dtypesIfCUDA(torch.float, torch.double, torch.bfloat16, torch.half, torch.long)
@dtypes(torch.float, torch.double, torch.long)
@parametrize("window", ['hann', 'hamming', 'bartlett', 'blackman'])
def test_signal_window_functions(self, device, dtype, window):
self._test_signal_window_functions(window, dtype, device)
@onlyNativeDeviceTypes
@precisionOverride({torch.bfloat16: 5e-2, torch.half: 1e-3})
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
@dtypesIfCUDA(torch.float, torch.double, torch.bfloat16, torch.half, torch.long)
@dtypes(torch.float, torch.double, torch.long)
def test_kaiser_window(self, device, dtype):
for num_test in range(50):
self._test_signal_window_functions('kaiser', dtype, device, beta=random.random() * 30)
def test_tensor_factories_empty(self, device):
# ensure we can create empty tensors from each factory function
shapes = [(5, 0, 1), (0,), (0, 0, 1, 0, 2, 0, 0)]
for shape in shapes:
for dt in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16, torch.chalf):
self.assertEqual(shape, torch.zeros(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.zeros_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual(shape, torch.full(shape, 3, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.full_like(torch.zeros(shape, device=device, dtype=dt), 3).shape)
self.assertEqual(shape, torch.ones(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.ones_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual(shape, torch.empty(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.empty_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual(shape, torch.empty_strided(shape, (0,) * len(shape), device=device, dtype=dt).shape)
if dt == torch.bool:
self.assertEqual(shape, torch.randint(2, shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.randint_like(torch.zeros(shape, device=device, dtype=dt), 2).shape)
elif dt.is_complex:
self.assertRaises(RuntimeError, lambda: torch.randint(6, shape, device=device, dtype=dt).shape)
else:
self.assertEqual(shape, torch.randint(6, shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.randint_like(torch.zeros(shape, device=device, dtype=dt), 6).shape)
if dt not in {torch.double, torch.float, torch.half, torch.bfloat16,
torch.complex32, torch.complex64, torch.complex128}:
self.assertRaises(RuntimeError, lambda: torch.rand(shape, device=device, dtype=dt).shape)
if dt == torch.double or dt == torch.float or dt.is_complex:
self.assertEqual(shape, torch.randn(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.randn_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual((0,), torch.arange(0, device=device).shape)
self.assertEqual((0, 0), torch.eye(0, device=device).shape)
self.assertEqual((0, 0), torch.eye(0, 0, device=device).shape)
self.assertEqual((5, 0), torch.eye(5, 0, device=device).shape)
self.assertEqual((0, 5), torch.eye(0, 5, device=device).shape)
self.assertEqual((0,), torch.linspace(1, 1, 0, device=device).shape)
self.assertEqual((0,), torch.logspace(1, 1, 0, device=device).shape)
self.assertEqual((0,), torch.randperm(0, device=device).shape)
self.assertEqual((0,), torch.bartlett_window(0, device=device).shape)
self.assertEqual((0,), torch.bartlett_window(0, periodic=False, device=device).shape)
self.assertEqual((0,), torch.hamming_window(0, device=device).shape)
self.assertEqual((0,), torch.hann_window(0, device=device).shape)
self.assertEqual((0,), torch.kaiser_window(0, device=device).shape)
self.assertEqual((1, 1, 0), torch.tensor([[[]]], device=device).shape)
self.assertEqual((1, 1, 0), torch.as_tensor([[[]]], device=device).shape)
@onlyCUDA
def test_tensor_factory_gpu_type_inference(self, device):
saved_type = torch.tensor([]).type()
torch.set_default_tensor_type(torch.cuda.DoubleTensor)
torch.set_default_dtype(torch.float32)
self.assertIs(torch.float32, torch.tensor(0.).dtype)
self.assertEqual(torch.device(device), torch.tensor(0.).device)
torch.set_default_dtype(torch.float64)
self.assertIs(torch.float64, torch.tensor(0.).dtype)
self.assertEqual(torch.device(device), torch.tensor(0.).device)
torch.set_default_tensor_type(saved_type)
@onlyCUDA
def test_tensor_factory_gpu_type(self, device):
saved_type = torch.tensor([]).type()
torch.set_default_tensor_type(torch.cuda.FloatTensor)
x = torch.zeros((5, 5))
self.assertIs(torch.float32, x.dtype)
self.assertTrue(x.is_cuda)
torch.set_default_tensor_type(torch.cuda.DoubleTensor)
x = torch.zeros((5, 5))
self.assertIs(torch.float64, x.dtype)
self.assertTrue(x.is_cuda)
torch.set_default_tensor_type(saved_type)
@skipCPUIf(True, 'compares device with cpu')
@dtypes(torch.int, torch.long, torch.float, torch.double)
def test_arange_device_vs_cpu(self, device, dtype):
cpu_tensor = torch.arange(0, 10, dtype=dtype, device='cpu')
device_tensor = torch.arange(0, 10, dtype=dtype, device=device)
self.assertEqual(cpu_tensor, device_tensor)
def test_arange_bfloat16(self, device):
ref_tensor = torch.tensor([0, 1, 2, 3], dtype=torch.bfloat16, device=device)
bfloat16_tensor = torch.arange(0, 4, dtype=torch.bfloat16, device=device)
self.assertEqual(ref_tensor, bfloat16_tensor)
# step=2
ref_tensor = torch.tensor([0, 2, 4], dtype=torch.bfloat16, device=device)
bfloat16_tensor = torch.arange(0, 6, step=2, dtype=torch.bfloat16, device=device)
self.assertEqual(ref_tensor, bfloat16_tensor)
@dtypes(*all_types_and_complex_and(torch.bfloat16))
@dtypesIfCUDA(*all_types_and_complex_and(torch.bfloat16))
def test_linspace(self, device, dtype):
_from = random.random()
to = _from + random.random()
res1 = torch.linspace(_from, to, 137, device=device, dtype=dtype)
res2 = torch.tensor((), device=device, dtype=dtype)
torch.linspace(_from, to, 137, dtype=dtype, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# small tensor
self.assertEqual(torch.linspace(10, 20, 11, device=device, dtype=dtype),
torch.tensor(list(range(10, 21)), device=device, dtype=dtype))
# large tensor
if dtype not in (torch.int8, torch.uint8):
self.assertEqual(torch.linspace(10, 2000, 1991, device=device, dtype=dtype),
torch.tensor(list(range(10, 2001)), device=device, dtype=dtype))
# Vectorization on non-contiguous tensors
if dtype not in (torch.int8, torch.uint8): # int8 and uint8 are too small for this test
res = torch.rand(3, 3, 1000, device=device).to(dtype)
res = res.permute(2, 0, 1)
torch.linspace(0, 1000 * 3 * 3, 1000 * 3 * 3, out=res)
self.assertEqual(res.flatten(), torch.linspace(0, 1000 * 3 * 3, 1000 * 3 * 3, device=device, dtype=dtype))
self.assertRaises(RuntimeError, lambda: torch.linspace(0, 1, -1, device=device, dtype=dtype))
# steps = 1
self.assertEqual(torch.linspace(0, 1, 1, device=device, dtype=dtype),
torch.zeros(1, device=device, dtype=dtype), atol=0, rtol=0)
# steps = 0
self.assertEqual(torch.linspace(0, 1, 0, device=device, dtype=dtype).numel(), 0, atol=0, rtol=0)
# steps not provided
self.assertRaises(TypeError, lambda: torch.linspace(0, 1, device=device, dtype=dtype))
if dtype == torch.float:
# passed dtype can't be safely casted to inferred dtype
with self.assertRaisesRegex(RuntimeError, r"torch.linspace\(\): inferred dtype"):
torch.linspace(0, 1j, 5, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"torch.linspace\(\): inferred dtype"):
torch.linspace(0j, 1, 5, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"torch.linspace\(\): inferred dtype"):
torch.linspace(0j, 1j, 5, device=device, dtype=dtype)
# Check linspace for generating the correct output for each dtype.
start = 0 if dtype == torch.uint8 else -100
expected_lin = torch.tensor([start + .5 * i for i in range(401)], device=device, dtype=torch.double)
actual_lin = torch.linspace(start, start + 200, 401, device=device, dtype=dtype)
# If on GPU, allow for minor error depending on dtype.
tol = 0.
if device != 'cpu':
if dtype == torch.half:
tol = 1e-1
elif dtype == torch.float:
tol = 1e-5
elif dtype == torch.double:
tol = 1e-10
self.assertEqual(expected_lin.to(dtype), actual_lin, atol=tol, rtol=0)
# Check linspace for generating with start > end.
self.assertEqual(torch.linspace(2, 0, 3, device=device, dtype=dtype),
torch.tensor((2, 1, 0), device=device, dtype=dtype),
atol=0, rtol=0)
# Check for race condition (correctness when applied on a large tensor).
if dtype not in (torch.int8, torch.uint8, torch.int16, torch.half, torch.bfloat16):
y = torch.linspace(0, 999999 + (999999j if dtype.is_complex else 0),
1000000, device=device, dtype=dtype)
if dtype.is_complex:
cond = torch.logical_and(y[:-1].real < y[1:].real, y[:-1].imag < y[1:].imag)
else:
cond = y[:-1] < y[1:]
correct = all(cond)
self.assertTrue(correct)
# Check linspace for non-contiguous tensors.
x = torch.zeros(2, 3, device=device, dtype=dtype)
y = torch.linspace(0, 3, 4, out=x.narrow(1, 1, 2), dtype=dtype)
self.assertEqual(x, torch.tensor(((0, 0, 1), (0, 2, 3)), device=device, dtype=dtype), atol=0, rtol=0)
def _test_linspace_logspace_deduction_helper(self, fn, device):
for start, end in [(1, 2), (1., 2), (1., -2.), (1j, 2j), (0., 2j), (1j, 2)]:
dtype = torch.float32
if isinstance(start, complex) or isinstance(end, complex):
dtype = torch.cfloat
self.assertEqual(fn(start, end, steps=100, device=device).dtype, dtype)
def test_linspace_deduction(self, device):
# Test deduction from input parameters.
self._test_linspace_logspace_deduction_helper(torch.linspace, device)
def test_logspace_deduction(self, device):
# Test deduction from input parameters.
self._test_linspace_logspace_deduction_helper(torch.logspace, device)
# The implementation of linspace+logspace goes through a different path
# when the steps arg is equal to 0 or 1. For other values of `steps`
# they call specialized linspace (or logspace) kernels.
LINSPACE_LOGSPACE_SPECIAL_STEPS = [0, 1]
# NOTE [Linspace+Logspace precision override]
# Our Linspace and logspace torch.half CUDA kernels are not very precise.
# Since linspace/logspace are deterministic, we can compute an expected
# amount of error (by testing without a precision override), adding a tiny
# amount (EPS) to that, and using that value as the override.
LINSPACE_LOGSPACE_EXTRA_EPS = 1e-5
# Compares linspace device vs. cpu
def _test_linspace(self, device, dtype, steps):
a = torch.linspace(0, 10, steps=steps, dtype=dtype, device=device)
b = torch.linspace(0, 10, steps=steps)
self.assertEqual(a, b, exact_dtype=False)
# See NOTE [Linspace+Logspace precision override]
@skipCPUIf(True, "compares with CPU")
@precisionOverride({torch.half: 0.0039 + LINSPACE_LOGSPACE_EXTRA_EPS})
@dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16))
def test_linspace_device_vs_cpu(self, device, dtype):
self._test_linspace(device, dtype, steps=10)
@skipCPUIf(True, "compares with CPU")
@dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16))
def test_linspace_special_steps(self, device, dtype):
for steps in self.LINSPACE_LOGSPACE_SPECIAL_STEPS:
self._test_linspace(device, dtype, steps=steps)
# Compares logspace device vs cpu
def _test_logspace(self, device, dtype, steps):
a = torch.logspace(1, 1.1, steps=steps, dtype=dtype, device=device)
b = torch.logspace(1, 1.1, steps=steps)
self.assertEqual(a, b, exact_dtype=False)
# Compares logspace device vs cpu
def _test_logspace_base2(self, device, dtype, steps):
a = torch.logspace(1, 1.1, steps=steps, base=2, dtype=dtype, device=device)
b = torch.logspace(1, 1.1, steps=steps, base=2)
self.assertEqual(a, b, exact_dtype=False)
# See NOTE [Linspace+Logspace precision override]
@skipCPUIf(True, "compares with CPU")
@precisionOverride({torch.half: 0.025 + LINSPACE_LOGSPACE_EXTRA_EPS})
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_logspace_device_vs_cpu(self, device, dtype):
self._test_logspace(device, dtype, steps=10)
# See NOTE [Linspace+Logspace precision override]
@skipCPUIf(True, "compares with CPU")
@precisionOverride({torch.half: 0.0201 + LINSPACE_LOGSPACE_EXTRA_EPS})
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_logspace_base2(self, device, dtype):
self._test_logspace_base2(device, dtype, steps=10)
@skipCPUIf(True, "compares with CPU")
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_logspace_special_steps(self, device, dtype):
for steps in self.LINSPACE_LOGSPACE_SPECIAL_STEPS:
self._test_logspace(device, dtype, steps=steps)
self._test_logspace_base2(device, dtype, steps=steps)
@dtypes(*all_types_and(torch.bfloat16))
@dtypesIfCUDA(*integral_types_and(torch.half, torch.bfloat16, torch.float32, torch.float64) if TEST_WITH_ROCM else
all_types_and(torch.half, torch.bfloat16))
def test_logspace(self, device, dtype):
_from = random.random()
to = _from + random.random()
res1 = torch.logspace(_from, to, 137, device=device, dtype=dtype)
res2 = torch.tensor((), device=device, dtype=dtype)
torch.logspace(_from, to, 137, device=device, dtype=dtype, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
self.assertRaises(RuntimeError, lambda: torch.logspace(0, 1, -1, device=device, dtype=dtype))
# steps not provided
self.assertRaises(TypeError, lambda: torch.logspace(0, 1, device=device, dtype=dtype))
self.assertEqual(torch.logspace(0, 1, 1, device=device, dtype=dtype),
torch.ones(1, device=device, dtype=dtype), atol=0, rtol=0)
if dtype == torch.float:
# passed dtype can't be safely casted to inferred dtype
with self.assertRaisesRegex(RuntimeError, r"torch.logspace\(\): inferred dtype"):
torch.logspace(0, 1j, 5, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"torch.logspace\(\): inferred dtype"):
torch.logspace(0j, 1, 5, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"torch.logspace\(\): inferred dtype"):
torch.logspace(0j, 1j, 5, device=device, dtype=dtype)
# Check precision - start, stop and base are chosen to avoid overflow
# steps is chosen so that step size is not subject to rounding error
# a tolerance is needed for gpu tests due to differences in computation
atol = None
rtol = None
if self.device_type == 'cpu':
atol = 0
rtol = 0
self.assertEqual(torch.tensor([2. ** (i / 8.) for i in range(49)], device=device, dtype=dtype),
torch.logspace(0, 6, steps=49, base=2, device=device, dtype=dtype),
atol=atol, rtol=rtol)
# Check non-default base=2
self.assertEqual(torch.logspace(1, 1, 1, 2, device=device, dtype=dtype),
torch.ones(1, device=device, dtype=dtype) * 2)
self.assertEqual(torch.logspace(0, 2, 3, 2, device=device, dtype=dtype),
torch.tensor((1, 2, 4), device=device, dtype=dtype))
# Check logspace_ for generating with start > end.
self.assertEqual(torch.logspace(1, 0, 2, device=device, dtype=dtype),
torch.tensor((10, 1), device=device, dtype=dtype), atol=0, rtol=0)
# Check logspace_ for non-contiguous tensors.
x = torch.zeros(2, 3, device=device, dtype=dtype)
y = torch.logspace(0, 3, 4, base=2, device=device, dtype=dtype, out=x.narrow(1, 1, 2))
self.assertEqual(x, torch.tensor(((0, 1, 2), (0, 4, 8)), device=device, dtype=dtype), atol=0, rtol=0)
@onlyNativeDeviceTypes
@dtypes(torch.half, torch.float, torch.double)
def test_full_inference(self, device, dtype):
size = (2, 2)
prev_default = torch.get_default_dtype()
torch.set_default_dtype(dtype)
# Tests bool fill value inference
t = torch.full(size, True)
self.assertEqual(t.dtype, torch.bool)
# Tests integer fill value inference
t = torch.full(size, 1)
self.assertEqual(t.dtype, torch.long)
# Tests float fill value inference
t = torch.full(size, 1.)
self.assertEqual(t.dtype, dtype)
# Tests complex inference
t = torch.full(size, (1 + 1j))
ctype = torch.complex128 if dtype is torch.double else torch.complex64
self.assertEqual(t.dtype, ctype)
torch.set_default_dtype(prev_default)
def test_full_out(self, device):
size = (5,)
o = torch.empty(size, device=device, dtype=torch.long)
# verifies dtype/out conflict throws a RuntimeError
with self.assertRaises(RuntimeError):
torch.full(o.shape, 1., dtype=torch.float, out=o)
# verifies out dtype overrides inference
self.assertEqual(torch.full(o.shape, 1., out=o).dtype, o.dtype)
self.assertEqual(torch.full(size, 1, out=o).dtype, o.dtype)
# check that warning for numpy being not writable is suppressed
# when a copy of it is being created.
# see issue #47160
def test_tensor_from_non_writable_numpy(self, device):
with warnings.catch_warnings(record=True) as w:
a = np.arange(5.)
a.flags.writeable = False
t = torch.tensor(a)
self.assertEqual(len(w), 0)
# Class for testing random tensor creation ops, like torch.randint
class TestRandomTensorCreation(TestCase):
exact_dtype = True
# TODO: add torch.complex64, torch.complex128
@dtypes(torch.float, torch.double)
def test_normal(self, device, dtype):
def helper(self, device, dtype, ptype, t_transform, std_transform):
q = torch.empty(100, 100, dtype=dtype, device=device)
q.normal_()
self.assertEqual(t_transform(q).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(q).std(), std_transform(1), atol=0.2, rtol=0)
q.normal_(2, 3)
self.assertEqual(t_transform(q).mean(), 2, atol=0.3, rtol=0)
self.assertEqual(t_transform(q).std(), std_transform(3), atol=0.3, rtol=0)
q = torch.empty(100, 100, dtype=dtype, device=device)
q_row1 = q[0:1].clone()
q[99:100].normal_()
self.assertEqual(t_transform(q[99:100]).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(q[99:100]).std(), std_transform(1), atol=0.2, rtol=0)
self.assertEqual(t_transform(q[0:1]).clone(), t_transform(q_row1))
mean = torch.empty(100, 100, dtype=dtype, device=device)
mean[:50].fill_(ptype(0))
mean[50:].fill_(ptype(1))
std = torch.empty(100, 100, dtype=torch.float, device=device)
std[:, :50] = 4
std[:, 50:] = 1
r = torch.normal(mean)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0)
self.assertEqual(t_transform(r).std(), std_transform(1), atol=0.2, rtol=0)
r.fill_(42)
r = torch.normal(mean, 3)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0)
self.assertEqual(t_transform(r).std(), std_transform(3), atol=0.2, rtol=0)
r.fill_(42)
torch.normal(mean, 3, out=r)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0)
self.assertEqual(t_transform(r).std(), std_transform(3), atol=0.2, rtol=0)
r.fill_(42)
r = torch.normal(2, std)
self.assertFalse(r.dtype.is_complex)
self.assertEqual(str(r.device), device)
self.assertEqual(r.mean(), 2, atol=0.2, rtol=0)
self.assertEqual(r[:, :50].std(), 4, atol=0.3, rtol=0)
self.assertEqual(r[:, 50:].std(), 1, atol=0.2, rtol=0)
r.fill_(42)
torch.normal(2, std, out=r)
self.assertFalse(r.dtype.is_complex)
self.assertEqual(str(r.device), device)
self.assertEqual(r.mean(), 2, atol=0.2, rtol=0)
self.assertEqual(r[:, :50].std(), 4, atol=0.3, rtol=0)
self.assertEqual(r[:, 50:].std(), 1, atol=0.2, rtol=0)
r.fill_(42)
r = torch.normal(mean, std)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[:, :50]).std(), std_transform(4), atol=0.3, rtol=0)
self.assertEqual(t_transform(r[:, 50:]).std(), std_transform(1), atol=0.2, rtol=0)
r.fill_(42)
torch.normal(mean, std, out=r)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[:, :50]).std(), std_transform(4), atol=0.3, rtol=0)
self.assertEqual(t_transform(r[:, 50:]).std(), std_transform(1), atol=0.2, rtol=0)
# test empty mean/std
out = torch.normal(mean=torch.empty((0, 2)), std=torch.empty((0, 1)))
self.assertEqual(out.size(), torch.Size([0, 2]))
r.fill_(42)
r = torch.normal(2, 3, (100, 100), dtype=dtype, device=device)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r).mean(), 2, atol=0.3, rtol=0)
self.assertEqual(t_transform(r).std(), std_transform(3), atol=0.3, rtol=0)
r.fill_(42)
torch.normal(2, 3, (100, 100), dtype=dtype, device=device, out=r)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r).mean(), 2, atol=0.3, rtol=0)
self.assertEqual(t_transform(r).std(), std_transform(3), atol=0.3, rtol=0)
# float std 0 with float mean
r.fill_(42)
torch.normal(2, 0, (10, 10), dtype=dtype, device=device, out=r)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertTrue(r.eq(2).all())
# float std 0 with tensor mean
r.fill_(42)
mean_rand = torch.randn(10, 10, dtype=dtype, device=device)
torch.normal(mean_rand, 0, out=r)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(mean_rand, r, atol=0, rtol=0)
# tensor std 0 with float mean
r.fill_(42)
std_zeros = torch.zeros(10, 10, dtype=dtype, device=device)
torch.normal(2, std_zeros, out=r)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertTrue(r.eq(2).all())
# tensor std 0 with tensor mean
r.fill_(42)
torch.normal(mean_rand, std_zeros, out=r)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(mean_rand, r, atol=0, rtol=0)
if dtype.is_complex:
helper(self, device, dtype, lambda x: complex(x, x),
lambda t: torch.real(t).to(torch.float), lambda mean: mean / math.sqrt(2))
helper(self, device, dtype, lambda x: complex(x, x),
lambda t: torch.imag(t).to(torch.float), lambda mean: mean / math.sqrt(2))
self.assertRaisesRegex(
RuntimeError, "normal expects standard deviation to be non-complex",
lambda: torch.normal(0, torch.empty(100, 100, dtype=dtype, device=device)))
out = torch.empty(100, 100, dtype=dtype, device=device)
self.assertRaisesRegex(
RuntimeError, "normal expects standard deviation to be non-complex",
lambda: torch.normal(0, torch.empty(100, 100, dtype=dtype, device=device), out=out))
else:
helper(self, device, dtype, lambda x: x, lambda t: t, lambda mean: mean)
# Ensure that normal raises appropriate error when `std` < 0
def test_normal_std_error(self, device):
a = torch.tensor(0, dtype=torch.float32, device=device)
std = torch.tensor(-1, dtype=torch.float32, device=device)
for input in [0, a]:
with self.assertRaisesRegex(RuntimeError, r'normal expects std >= 0.0, but found std'):
torch.normal(input, -1, (10,))
with self.assertRaisesRegex(RuntimeError, r'normal expects all elements of std >= 0.0'):
torch.normal(input, std)
@dtypes(torch.float, torch.double, torch.half)
@dtypesIfCUDA(torch.float, torch.double, torch.half, torch.bfloat16)
def test_uniform_from_to(self, device, dtype):
size = 2000
alpha = 0.1
float_min = torch.finfo(torch.float).min
float_max = torch.finfo(torch.float).max
double_min = torch.finfo(torch.double).min
double_max = torch.finfo(torch.double).max
if dtype == torch.bfloat16:
min_val = -3.389531389251535e+38
max_val = 3.389531389251535e+38
else:
min_val = torch.finfo(dtype).min
max_val = torch.finfo(dtype).max
values = [double_min, float_min, -42, 0, 42, float_max, double_max]
for from_ in values:
for to_ in values:
t = torch.empty(size, dtype=dtype, device=device)
if not (min_val <= from_ <= max_val) or not (min_val <= to_ <= max_val):
pass
elif to_ < from_:
self.assertRaisesRegex(
RuntimeError,
"uniform_ expects to return",
lambda: t.uniform_(from_, to_)
)
elif to_ - from_ > max_val:
self.assertRaisesRegex(
RuntimeError,
"uniform_ expects to-from",
lambda: t.uniform_(from_, to_)
)
else:
t.uniform_(from_, to_)
range_ = to_ - from_
if not (dtype == torch.bfloat16) and not (
dtype == torch.half and device == 'cpu') and not torch.isnan(t).all():
delta = alpha * range_
double_t = t.to(torch.double)
if range_ == 0:
self.assertTrue(double_t.min() == from_)
self.assertTrue(double_t.max() == to_)
elif dtype == torch.half:
self.assertTrue(from_ <= double_t.min() <= (from_ + delta))
self.assertTrue((to_ - delta) <= double_t.max() <= to_)
else:
self.assertTrue(from_ <= double_t.min() <= (from_ + delta))
self.assertTrue((to_ - delta) <= double_t.max() < to_)
def test_random_neg_values(self, device):
SIZE = 10
signed_dtypes = [torch.double, torch.float, torch.long, torch.int, torch.short]
for dtype in signed_dtypes:
res = torch.rand(SIZE, SIZE).to(device=device, dtype=dtype)
res.random_(-10, -1)
self.assertLessEqual(res.max().item(), 9)
self.assertGreaterEqual(res.min().item(), -10)
# TODO: this test should be updated
@onlyCPU
def test_randint_inference(self, device):
size = (2, 1)
for args in [(3,), (1, 3)]: # (low,) and (low, high)
self.assertIs(torch.int64, torch.randint(*args, size=size).dtype)
self.assertIs(torch.int64, torch.randint(*args, size=size, layout=torch.strided).dtype)
self.assertIs(torch.int64, torch.randint(*args, size=size, generator=torch.default_generator).dtype)
self.assertIs(torch.float32, torch.randint(*args, size=size, dtype=torch.float32).dtype)
out = torch.empty(size, dtype=torch.float32)
self.assertIs(torch.float32, torch.randint(*args, size=size, out=out).dtype)
self.assertIs(torch.float32, torch.randint(*args, size=size, out=out, dtype=torch.float32).dtype)
out = torch.empty(size, dtype=torch.int64)
self.assertIs(torch.int64, torch.randint(*args, size=size, out=out).dtype)
self.assertIs(torch.int64, torch.randint(*args, size=size, out=out, dtype=torch.int64).dtype)
# TODO: this test should be updated
@onlyCPU
def test_randint(self, device):
SIZE = 100
def seed(generator):
if generator is None:
torch.manual_seed(123456)
else:
generator.manual_seed(123456)
return generator
for generator in (None, torch.Generator()):
generator = seed(generator)
res1 = torch.randint(0, 6, (SIZE, SIZE), generator=generator)
res2 = torch.empty((), dtype=torch.int64)
generator = seed(generator)
torch.randint(0, 6, (SIZE, SIZE), generator=generator, out=res2)
generator = seed(generator)
res3 = torch.randint(6, (SIZE, SIZE), generator=generator)
res4 = torch.empty((), dtype=torch.int64)
generator = seed(generator)
torch.randint(6, (SIZE, SIZE), out=res4, generator=generator)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
self.assertEqual(res1, res4)
self.assertEqual(res2, res3)
self.assertEqual(res2, res4)
self.assertEqual(res3, res4)
self.assertTrue((res1 < 6).all().item())
self.assertTrue((res1 >= 0).all().item())
@dtypes(torch.half, torch.float, torch.bfloat16, torch.double,
torch.complex32, torch.complex64, torch.complex128)
def test_randn(self, device, dtype):
SIZE = 100
for size in [0, SIZE]:
torch.manual_seed(123456)
res1 = torch.randn(size, size, dtype=dtype, device=device)
res2 = torch.tensor([], dtype=dtype, device=device)
torch.manual_seed(123456)
torch.randn(size, size, out=res2)
self.assertEqual(res1, res2)
@dtypes(torch.float, torch.double, torch.complex32, torch.complex64, torch.complex128)
def test_rand(self, device, dtype):
SIZE = 100
for size in [0, SIZE]:
torch.manual_seed(123456)
res1 = torch.rand(size, size, dtype=dtype, device=device)
res2 = torch.tensor([], dtype=dtype, device=device)
torch.manual_seed(123456)
torch.rand(size, size, out=res2)
self.assertEqual(res1, res2)
def test_randperm(self, device):
if device == 'cpu' or device == 'meta':
rng_device = None
else:
# TODO: This won't actually work for non-CUDA device
# see https://github.com/pytorch/pytorch/issues/54282
rng_device = [device]
# Test core functionality. On CUDA, different value of n has different
# code path
for n in (5, 100, 50000, 100000):
# Ensure both integer and floating-point numbers are tested. Half follows an execution path that is
# different from others on CUDA.
for dtype in (torch.long, torch.half, torch.float, torch.bfloat16):
if n > 2049 and dtype == torch.half: # Large n for torch.half will raise an exception, do not test here.
continue
if dtype == torch.bfloat16 and device != 'cpu':
continue
if n > 256 and dtype == torch.bfloat16:
continue
with torch.random.fork_rng(devices=rng_device):
res1 = torch.randperm(n, dtype=dtype, device=device)
res2 = torch.empty(0, dtype=dtype, device=device)
torch.randperm(n, out=res2, dtype=dtype, device=device)
self.assertEqual(res1, res2, atol=0, rtol=0)
self.assertEqual(res1.sort().values.long(), torch.arange(n, device=device))
# Default type is long
for n in (100, 10000):
self.assertEqual(torch.randperm(n, device=device).dtype, torch.long)
# randperm of 0 elements is an empty tensor
res1 = torch.randperm(0)
res2 = torch.tensor(5, dtype=dtype, device=device)
torch.randperm(0, out=res2)
self.assertEqual(res1.numel(), 0)
self.assertEqual(res2.numel(), 0)
# Test exceptions when n is too large for a floating point type
for dtype, small_n, large_n in ((torch.uint8, 2**8, 2**8 + 1),
(torch.half, 2**11 + 1, 2**11 + 2),
(torch.float, 2**24 + 1, 2**24 + 2),
(torch.double, 2**25, # 2**53 + 1 is too large to run
2**53 + 2)):
res = torch.empty(0, dtype=dtype, device=device)
torch.randperm(small_n, out=res) # No exception expected
self.assertRaises(RuntimeError, lambda: torch.randperm(large_n, out=res, device=device))
# Test non-contiguous tensors
for n in (4, 5, 6, 10, 20):
non_contiguous_tensor = torch.zeros((2, 3), dtype=torch.long, device=device).t()
self.assertFalse(non_contiguous_tensor.is_contiguous())
with torch.random.fork_rng(devices=rng_device):
res = torch.randperm(n, dtype=torch.long, device=device)
torch.randperm(n, out=non_contiguous_tensor)
self.assertEqual(non_contiguous_tensor, res)
self.assertEqual(res.sort().values.long(), torch.arange(n, device=device))
# Test exceptions when device and generator types are incompatible
@onlyCUDA
def test_randperm_device_compatibility(self, device):
cuda_gen = torch.Generator(device='cuda')
cpu_gen = torch.Generator(device='cpu')
# n=0 is a special case that we don't need to use generator, thus no error even if
# device and generator don't match
torch.randperm(0, device='cuda:0', generator=torch.Generator(device='cuda:1'))
if torch.cuda.device_count() > 1:
torch.randperm(0, device='cuda:1', generator=torch.Generator(device='cuda:0'))
torch.randperm(0, device='cuda', generator=torch.Generator(device='cpu'))
torch.randperm(0, device='cpu', generator=torch.Generator(device='cuda'))
for n in (1, 3, 100, 30000):
torch.randperm(n, device='cuda', generator=torch.Generator(device='cuda:0'))
torch.randperm(n, device='cuda:0', generator=torch.Generator(device='cuda'))
# For cuda:0 to match cuda:1, we are making consistent device type matching
# behavior just like torch.randint. Longer term, generator should ignore
# device ordinal, since it's not used anyway.
torch.randint(low=0, high=n + 1, size=(1,), device="cuda:0", generator=torch.Generator(device='cuda:1'))
torch.randperm(n, device='cuda:0', generator=torch.Generator(device='cuda:1'))
if torch.cuda.device_count() > 1:
torch.randint(low=0, high=n + 1, size=(1,), device="cuda:1", generator=torch.Generator(device='cuda:0'))
torch.randperm(n, device='cuda:1', generator=torch.Generator(device='cuda:0'))
regex = 'Expected a .* device type for generator but found .*'
cuda_t = torch.tensor(n, device='cuda')
self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, device='cuda', generator=cpu_gen))
self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, device='cuda', generator=cpu_gen, out=cuda_t))
cpu_t = torch.tensor(n, device='cpu')
self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, device='cpu', generator=cuda_gen))
self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, device='cpu', generator=cuda_gen, out=cpu_t))
self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, generator=cuda_gen)) # implicitly on CPU
# Class for testing *like ops, like torch.ones_like
class TestLikeTensorCreation(TestCase):
exact_dtype = True
# TODO: this test should be updated
def test_ones_like(self, device):
expected = torch.ones(100, 100, device=device)
res1 = torch.ones_like(expected)
self.assertEqual(res1, expected)
# test boolean tensor
expected = torch.tensor([True, True], device=device, dtype=torch.bool)
res1 = torch.ones_like(expected)
self.assertEqual(res1, expected)
# TODO: this test should be updated
@onlyCPU
def test_empty_like(self, device):
x = torch.autograd.Variable(torch.tensor([]))
y = torch.autograd.Variable(torch.randn(4, 4))
z = torch.autograd.Variable(torch.IntTensor([1, 2, 3]))
for a in (x, y, z):
self.assertEqual(torch.empty_like(a).shape, a.shape)
self.assertEqualTypeString(torch.empty_like(a), a)
def test_zeros_like(self, device):
expected = torch.zeros((100, 100,), device=device)
res1 = torch.zeros_like(expected)
self.assertEqual(res1, expected)
@deviceCountAtLeast(2)
def test_zeros_like_multiple_device(self, devices):
expected = torch.zeros(100, 100, device=devices[0])
x = torch.randn(100, 100, device=devices[1], dtype=torch.float32)
output = torch.zeros_like(x)
self.assertEqual(output, expected)
@deviceCountAtLeast(2)
def test_ones_like_multiple_device(self, devices):
expected = torch.ones(100, 100, device=devices[0])
x = torch.randn(100, 100, device=devices[1], dtype=torch.float32)
output = torch.ones_like(x)
self.assertEqual(output, expected)
# Full-like precedence is the explicit dtype then the dtype of the "like"
# tensor.
@onlyNativeDeviceTypes
def test_full_like_inference(self, device):
size = (2, 2)
like = torch.empty((5,), device=device, dtype=torch.long)
self.assertEqual(torch.full_like(like, 1.).dtype, torch.long)
self.assertEqual(torch.full_like(like, 1., dtype=torch.complex64).dtype,
torch.complex64)
# Tests for the `frombuffer` function (only work on CPU):
# Constructs tensors from Python objects that implement the buffer protocol,
# without copying data.
SIZE = 5
SHAPE = (SIZE,)
def may_require_grad(dtype):
return dtype.is_floating_point or dtype.is_complex
def get_dtype_size(dtype):
return int(torch.empty((), dtype=dtype).element_size())
class TestBufferProtocol(TestCase):
def _run_test(self, shape, dtype, count=-1, first=0, offset=None, **kwargs):
numpy_dtype = torch_to_numpy_dtype_dict[dtype]
if offset is None:
offset = first * get_dtype_size(dtype)
numpy_original = make_tensor(shape, dtype=dtype, device="cpu").numpy()
original = memoryview(numpy_original)
# First call PyTorch's version in case of errors.
# If this call exits successfully, the NumPy version must also do so.
torch_frombuffer = torch.frombuffer(original, dtype=dtype, count=count, offset=offset, **kwargs)
numpy_frombuffer = np.frombuffer(original, dtype=numpy_dtype, count=count, offset=offset)
self.assertEqual(numpy_frombuffer, torch_frombuffer)
self.assertEqual(numpy_frombuffer.__array_interface__["data"][0], torch_frombuffer.data_ptr())
return (numpy_original, torch_frombuffer)
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_same_type(self, device, dtype):
self._run_test((), dtype)
self._run_test((4,), dtype)
self._run_test((10, 10), dtype)
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_requires_grad(self, device, dtype):
def _run_test_and_check_grad(requires_grad, *args, **kwargs):
kwargs["requires_grad"] = requires_grad
_, tensor = self._run_test(*args, **kwargs)
self.assertTrue(tensor.requires_grad == requires_grad)
requires_grad = may_require_grad(dtype)
_run_test_and_check_grad(requires_grad, (), dtype)
_run_test_and_check_grad(requires_grad, (4,), dtype)
_run_test_and_check_grad(requires_grad, (10, 10), dtype)
_run_test_and_check_grad(False, (), dtype)
_run_test_and_check_grad(False, (4,), dtype)
_run_test_and_check_grad(False, (10, 10), dtype)
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_with_offset(self, device, dtype):
# Offset should be valid whenever there is, at least,
# one remaining element
for i in range(SIZE):
self._run_test(SHAPE, dtype, first=i)
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_with_count(self, device, dtype):
# Count should be valid for any valid in the interval
# [-1, len(input)], except for 0
for i in range(-1, SIZE + 1):
if i != 0:
self._run_test(SHAPE, dtype, count=i)
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_with_count_and_offset(self, device, dtype):
# Explicit default count [-1, 1, 2, ..., len]
for i in range(-1, SIZE + 1):
if i != 0:
self._run_test(SHAPE, dtype, count=i)
# Explicit default offset [0, 1, ..., len - 1]
for i in range(SIZE):
self._run_test(SHAPE, dtype, first=i)
# All possible combinations of count and dtype aligned
# offset for 'input'
# count:[1, 2, ..., len - 1] x first:[0, 1, ..., len - count]
for i in range(1, SIZE):
for j in range(SIZE - i + 1):
self._run_test(SHAPE, dtype, count=i, first=j)
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_invalid_positional_args(self, device, dtype):
bytes = get_dtype_size(dtype)
in_bytes = SIZE * bytes
# Empty array
with self.assertRaisesRegex(ValueError,
r"both buffer length \(0\) and count"):
empty = np.array([])
torch.frombuffer(empty, dtype=dtype)
# Count equals 0
with self.assertRaisesRegex(ValueError,
r"both buffer length .* and count \(0\)"):
self._run_test(SHAPE, dtype, count=0)
# Offset negative and bigger than total length
with self.assertRaisesRegex(ValueError,
rf"offset \(-{bytes} bytes\) must be"):
self._run_test(SHAPE, dtype, first=-1)
with self.assertRaisesRegex(ValueError,
rf"offset \({in_bytes} bytes\) must be .* "
rf"buffer length \({in_bytes} bytes\)"):
self._run_test(SHAPE, dtype, first=SIZE)
# Non-multiple offset with all elements
if bytes > 1:
offset = bytes - 1
with self.assertRaisesRegex(ValueError,
rf"buffer length \({in_bytes - offset} bytes\) after "
rf"offset \({offset} bytes\) must be"):
self._run_test(SHAPE, dtype, offset=bytes - 1)
# Count too big for each good first element
for first in range(SIZE):
count = SIZE - first + 1
with self.assertRaisesRegex(ValueError,
rf"requested buffer length \({count} \* {bytes} bytes\) "
rf"after offset \({first * bytes} bytes\) must .*"
rf"buffer length \({in_bytes} bytes\)"):
self._run_test(SHAPE, dtype, count=count, first=first)
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_shared_buffer(self, device, dtype):
x = make_tensor((1,), dtype=dtype, device=device)
# Modify the whole tensor
arr, tensor = self._run_test(SHAPE, dtype)
tensor[:] = x
self.assertEqual(arr, tensor)
self.assertTrue((tensor == x).all().item())
# Modify the whole tensor from all valid offsets, given
# a count value
for count in range(-1, SIZE + 1):
if count == 0:
continue
actual_count = count if count > 0 else SIZE
for first in range(SIZE - actual_count):
last = first + actual_count
arr, tensor = self._run_test(SHAPE, dtype, first=first, count=count)
tensor[:] = x
self.assertEqual(arr[first:last], tensor)
self.assertTrue((tensor == x).all().item())
# Modify the first value in the array
arr[first] = x.item() - 1
self.assertEqual(arr[first:last], tensor)
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_not_a_buffer(self, device, dtype):
with self.assertRaisesRegex(ValueError,
r"object does not implement Python buffer protocol."):
torch.frombuffer([1, 2, 3, 4], dtype=dtype)
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_non_writable_buffer(self, device, dtype):
numpy_arr = make_tensor((1,), dtype=dtype, device=device).numpy()
byte_arr = numpy_arr.tobytes()
with self.assertWarnsOnceRegex(UserWarning,
r"The given buffer is not writable."):
torch.frombuffer(byte_arr, dtype=dtype)
def test_byte_to_int(self):
byte_array = np.array([-1, 0, 0, 0, -1, 0, 0, 0], dtype=np.byte)
tensor = torch.frombuffer(byte_array, dtype=torch.int32)
self.assertEqual(tensor.numel(), 2)
# Assuming little endian machine
self.assertSequenceEqual(tensor, [255, 255])
# Tests for the `asarray` function:
# Constructs tensors from a Python object that has one of the following
# characteristics:
# 1. is a Tensor
# 2. is a DLPack capsule
# 3. implements the Python Buffer protocol
# 4. is an arbitrary list
# The implementation itself is based on the Python Array API:
# https://data-apis.org/array-api/latest/API_specification/creation_functions.html
def get_another_device(device):
return "cuda" if torch.device(device).type == "cpu" else "cpu"
def identity(tensor):
return tensor
def to_numpy(tensor):
return tensor.numpy()
def to_memview(tensor):
return memoryview(to_numpy(tensor))
class TestAsArray(TestCase):
def _check(self, original, cvt=lambda t: t, is_alias=True, same_dtype=True, same_device=True, **kwargs):
"""Check the output of 'asarray', given its input and assertion informations.
Besides calling 'asarray' itself, this function does 4 different checks:
1. Whether the result is aliased or not, depending on 'is_alias'
2. Whether the result has the expected dtype and elements
3. Whether the result lives in the expected device
4. Whether the result has its 'requires_grad' set or not
"""
result = torch.asarray(cvt(original), **kwargs)
self.assertTrue(isinstance(result, torch.Tensor))
# 1. The storage pointers should be equal only if 'is_alias' is set
if is_alias:
self.assertEqual(result.data_ptr(), original.data_ptr())
else:
self.assertNotEqual(result.data_ptr(), original.data_ptr())
# 2. Comparison of the elements only takes place if the original
# sequence and the resulting tensor have the same data type
if same_dtype:
self.assertEqual(original, result)
else:
dtype = kwargs.get("dtype", torch.get_default_dtype())
self.assertEqual(original.shape, result.shape)
self.assertEqual(dtype, result.dtype)
# 3. Given the specified target device, we first check whether
# its type is the same, and then if its index is the same (if it
# is not None)
if same_device:
device = original.device
else:
device = torch.device(kwargs.get("device", "cpu"))
# Compare the target device type, and its index
self.assertEqual(device.type, result.device.type)
if device.index is not None:
self.assertEqual(device.index, result.device.index)
# 4. By default, 'requires_grad' is unset
self.assertEqual(result.requires_grad, kwargs.get("requires_grad", False))
def _test_alias_with_cvt(self, cvt, device, dtype, shape=(5, 5), only_with_dtype=False):
original = make_tensor(shape, dtype=dtype, device=device)
def check(**kwargs):
self._check(original, cvt=cvt, **kwargs)
if not only_with_dtype:
check(copy=False)
check(device=device)
check(device=device, copy=False)
check(dtype=dtype)
check(dtype=dtype, copy=False)
check(requires_grad=False, dtype=dtype)
check(requires_grad=may_require_grad(dtype), dtype=dtype)
check(device=device, dtype=dtype)
check(device=device, dtype=dtype, copy=False)
# Skipping 'meta' devices, since there's no point in comparing their
# data pointer (which is basically the point here), since they all
# return 0.
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_alias_from_tensor(self, device, dtype):
self._test_alias_with_cvt(identity, device, dtype)
@onlyCPU
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_alias_from_numpy(self, device, dtype):
self._test_alias_with_cvt(to_numpy, device, dtype)
# Skipping 'meta', since 'to_dlpack' does not work for them.
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_alias_from_dlpack(self, device, dtype):
self._test_alias_with_cvt(to_dlpack, device, dtype)
@onlyCPU
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_alias_from_buffer(self, device, dtype):
self._test_alias_with_cvt(to_memview, device, dtype, shape=(5,), only_with_dtype=True)
def _test_copy_with_cvt(self, cvt, device, dtype, shape=(5, 5), only_with_dtype=False):
original = make_tensor(shape, dtype=dtype, device=device)
def check(**kwargs):
self._check(original, cvt=cvt, is_alias=False, **kwargs)
if not only_with_dtype:
check(copy=True)
check(device=device, copy=True)
check(requires_grad=False, dtype=dtype, copy=True)
check(requires_grad=may_require_grad(dtype), dtype=dtype, copy=True)
check(dtype=dtype, copy=True)
check(device=device, dtype=dtype, copy=True)
# Copy is forced because of different device
if torch.cuda.is_available():
other = get_another_device(device)
check(same_device=False, device=other, dtype=dtype)
check(same_device=False, device=other, dtype=dtype, copy=True)
# Copy is forced because of different dtype
if not only_with_dtype:
for other in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16):
if dtype != other:
check(same_dtype=False, dtype=other)
check(same_dtype=False, dtype=other, copy=True)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_copy_tensor(self, device, dtype):
self._test_copy_with_cvt(identity, device, dtype)
@onlyCPU
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_copy_from_numpy(self, device, dtype):
self._test_copy_with_cvt(to_numpy, device, dtype)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_copy_from_dlpack(self, device, dtype):
self._test_copy_with_cvt(to_dlpack, device, dtype)
@onlyCPU
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_copy_from_buffer(self, device, dtype):
self._test_copy_with_cvt(to_memview, device, dtype, shape=(5,), only_with_dtype=True)
def _test_copy_mult_devices(self, devices, dtype, cvt):
cuda1 = devices[0]
cuda2 = devices[1]
original = make_tensor((5, 5), dtype=dtype, device=cuda1)
def check(**kwargs):
self._check(original, cvt, is_alias=False, same_device=False, device=cuda2, **kwargs)
check()
check(copy=True)
check(dtype=dtype, copy=True)
@onlyCUDA
@deviceCountAtLeast(2)
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_copy_from_tensor_mult_devices(self, devices, dtype):
self._test_copy_mult_devices(devices, dtype, identity)
@onlyCUDA
@deviceCountAtLeast(2)
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_copy_from_dlpack_mult_devices(self, devices, dtype):
self._test_copy_mult_devices(devices, dtype, to_dlpack)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_copy_list(self, device, dtype):
original = make_tensor((5, 5), dtype=dtype, device=torch.device("cpu"))
def check(**kwargs):
self._check(original, torch.Tensor.tolist, is_alias=False, **kwargs)
same_device = torch.device("cpu") == device
check(same_device=same_device, device=device, dtype=dtype)
check(same_device=same_device, device=device, dtype=dtype, requires_grad=False)
check(same_device=same_device, device=device, dtype=dtype, requires_grad=may_require_grad(dtype))
check(same_device=same_device, device=device, dtype=dtype, copy=True)
@dtypes(torch.float32)
def test_unsupported_alias(self, device, dtype):
original = make_tensor((5, 5), dtype=dtype, device=device)
if torch.cuda.is_available():
other_device = get_another_device(device)
with self.assertRaisesRegex(ValueError,
f"from device '{device}' to '{other_device}'"):
torch.asarray(original, device=other_device, copy=False)
with self.assertRaisesRegex(ValueError,
"with dtype '.*' into dtype '.*'"):
torch.asarray(original, dtype=torch.float64, copy=False)
with self.assertRaisesRegex(ValueError,
"can't alias arbitrary sequence"):
torch.asarray(original.tolist(), copy=False)
@onlyCUDA
@deviceCountAtLeast(2)
@dtypes(torch.float32)
def test_unsupported_alias_mult_devices(self, devices, dtype):
dev1, dev2 = devices[:2]
original = make_tensor((5, 5), dtype=dtype, device=dev1)
with self.assertRaisesRegex(ValueError,
f"from device '{dev1}' to '{dev2}'"):
torch.asarray(original, device=dev2, copy=False)
@dtypes(torch.float32, torch.complex64)
def test_retain_autograd_history(self, device, dtype):
original = make_tensor((5, 5), dtype=dtype, device=device, requires_grad=True)
# 'cloned' has 'grad_fn=<CloneBackwards>'
cloned = original.clone()
def check(**kwargs):
a = torch.asarray(cloned, **kwargs)
requires_grad = kwargs.get("requires_grad", False)
self.assertEqual(a.requires_grad, requires_grad)
# Autograd history shouldn't be retained when requires_grad is False
self.assertEqual(a.grad_fn is None, not requires_grad)
check()
check(requires_grad=True)
check(copy=True)
check(requires_grad=True, copy=True)
check(requires_grad=False)
check(requires_grad=False, copy=True)
@onlyCPU
def test_astensor_consistency(self, device):
# See issue: https://github.com/pytorch/pytorch/pull/71757
examples = [
# Scalars
True,
42,
1.0,
# Homogeneous Lists
[True, True, False],
[1, 2, 3, 42],
[0.0, 1.0, 2.0, 3.0],
# Mixed Lists
[True, False, 0],
[0.0, True, False],
[0, 1.0, 42],
[0.0, True, False, 42],
# With Complex
[0.0, True, False, 42, 5j],
# With Range
range(5),
]
for e in examples:
original = torch.as_tensor(e)
t = torch.asarray(e)
self.assertEqual(t, original)
instantiate_device_type_tests(TestTensorCreation, globals())
instantiate_device_type_tests(TestRandomTensorCreation, globals())
instantiate_device_type_tests(TestLikeTensorCreation, globals())
instantiate_device_type_tests(TestBufferProtocol, globals(), only_for="cpu")
instantiate_device_type_tests(TestAsArray, globals())
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_tensor_creation_ops.py |
#!/usr/bin/env python3
# Owner(s): ["oncall: mobile"]
import torch
import torch.utils.bundled_inputs
import io
import cv2
from torch.testing._internal.common_utils import TestCase
torch.ops.load_library("//caffe2/torch/fb/operators:decode_bundled_image")
def model_size(sm):
buffer = io.BytesIO()
torch.jit.save(sm, buffer)
return len(buffer.getvalue())
def save_and_load(sm):
buffer = io.BytesIO()
torch.jit.save(sm, buffer)
buffer.seek(0)
return torch.jit.load(buffer)
"""Return an InflatableArg that contains a tensor of the compressed image and the way to decode it
keyword arguments:
img_tensor -- the raw image tensor in HWC or NCHW with pixel value of type unsigned int
if in NCHW format, N should be 1
quality -- the quality needed to compress the image
"""
def bundle_jpeg_image(img_tensor, quality):
# turn NCHW to HWC
if img_tensor.dim() == 4:
assert(img_tensor.size(0) == 1)
img_tensor = img_tensor[0].permute(1, 2, 0)
pixels = img_tensor.numpy()
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
_, enc_img = cv2.imencode(".JPEG", pixels, encode_param)
enc_img_tensor = torch.from_numpy(enc_img)
enc_img_tensor = torch.flatten(enc_img_tensor).byte()
obj = torch.utils.bundled_inputs.InflatableArg(enc_img_tensor, "torch.ops.fb.decode_bundled_image({})")
return obj
def get_tensor_from_raw_BGR(im) -> torch.Tensor:
raw_data = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
raw_data = torch.from_numpy(raw_data).float()
raw_data = raw_data.permute(2, 0, 1)
raw_data = torch.div(raw_data, 255).unsqueeze(0)
return raw_data
class TestBundledImages(TestCase):
def test_single_tensors(self):
class SingleTensorModel(torch.nn.Module):
def forward(self, arg):
return arg
im = cv2.imread("caffe2/test/test_img/p1.jpg")
tensor = torch.from_numpy(im)
inflatable_arg = bundle_jpeg_image(tensor, 90)
input = [(inflatable_arg,)]
sm = torch.jit.script(SingleTensorModel())
torch.utils.bundled_inputs.augment_model_with_bundled_inputs(sm, input)
loaded = save_and_load(sm)
inflated = loaded.get_all_bundled_inputs()
decoded_data = inflated[0][0]
# raw image
raw_data = get_tensor_from_raw_BGR(im)
self.assertEqual(len(inflated), 1)
self.assertEqual(len(inflated[0]), 1)
self.assertEqual(raw_data.shape, decoded_data.shape)
self.assertEqual(raw_data, decoded_data, atol=0.1, rtol=1e-01)
# Check if fb::image_decode_to_NCHW works as expected
with open("caffe2/test/test_img/p1.jpg", "rb") as fp:
weight = torch.full((3,), 1.0 / 255.0).diag()
bias = torch.zeros(3)
byte_tensor = torch.tensor(list(fp.read())).byte()
im2_tensor = torch.ops.fb.image_decode_to_NCHW(byte_tensor, weight, bias)
self.assertEqual(raw_data.shape, im2_tensor.shape)
self.assertEqual(raw_data, im2_tensor, atol=0.1, rtol=1e-01)
| pytorch-master | test/test_bundled_images.py |
# -*- coding: utf-8 -*-
# Owner(s): ["oncall: jit"]
from torch._C import _disabled_torch_function_impl
import torch.fx
import torch.nn.functional as F
from torch.testing._internal.common_utils import run_tests, TestCase
import unittest
import torch
import operator
import itertools
from torch.utils._pytree import tree_map
from torch.fx.experimental.symbolic_shapes import ShapeEnv, PySymInt
aten = torch.ops.aten
try:
import sympy
HAS_SYMPY = True
except ImportError:
HAS_SYMPY = False
skipIfNoSympy = unittest.skipIf(not HAS_SYMPY, "no sympy")
meta_funcs = {}
def register_meta(op):
def decorator(f):
def add_func(op):
meta_funcs[op] = f
tree_map(add_func, op)
return f
return decorator
@register_meta([aten.add.Tensor, aten.sub.Tensor])
def binary_meta(a, b):
return a.new_empty(a.shape)
@register_meta(aten.cat.default)
def cat_meta(tensors, dim=0):
concat_length = 0
shape = tensors[0].shape
for tensor in tensors:
for idx, (common_length, length) in enumerate(zip(shape, tensor.shape)):
if idx == dim:
concat_length = concat_length + length
else:
assert length == common_length
new_shape = list(shape)
new_shape[dim] = concat_length
return tensors[0].new_empty(new_shape)
@register_meta([aten.narrow_copy.SymInt])
def narrow_copy_symint_meta(a, dim, start, length, **kwargs):
shape = []
for i, x in enumerate(a.shape):
if i == dim:
shape.append(length)
else:
shape.append(x)
return a.new_empty(tuple(shape))
@register_meta([aten.expand.SymInt])
def expand_symint_meta(a, size, implicit=False):
return a.new_empty(size)
def create_contiguous(shape):
strides = [1]
for dim in reversed(shape[:-1]):
strides.append(dim * strides[-1])
return list(reversed(strides))
class FakeSymbolicTensor(torch.Tensor):
@staticmethod
def __new__(cls, sym_shape, sym_strides, dtype, layout, requires_grad, device):
# sym_strides doesn't work yet
# TODO: this is wrong in general
offset = 0
r = torch.Tensor._make_wrapper_subclass(
cls, sym_shape,
create_contiguous(sym_shape), offset,
dtype=dtype, layout=layout, requires_grad=requires_grad,
device=device,
)
r.sym_shape = sym_shape
return r
__torch_function__ = _disabled_torch_function_impl
def new_empty(self, shape):
return FakeSymbolicTensor(shape, None, self.dtype, self.layout, self.requires_grad, self.device)
@classmethod
def __torch_dispatch__(cls, func_overload, types, args=(), kwargs=None):
if func_overload in meta_funcs:
return meta_funcs[func_overload](*args, **kwargs)
if func_overload == torch.ops.aten.sym_size.default:
self = args[0]
return self.sym_shape
# some calls can be redirected to `sym_size` rather than
# `sym_sizes`. `sym_size` uses `dim` to canonicalize an index
# so we need to implement both `sym_size` and `dim` for python
# tensors
if func_overload == torch.ops.aten.dim.default:
self = args[0]
return len(self.sym_shape)
if func_overload == torch.ops.aten.new_empty.default:
self = args[0]
shape = args[1]
return FakeSymbolicTensor(shape, self.stride(), self.dtype, self.layout, self.requires_grad, self.device)
raise RuntimeError(f"operator {func_overload} not supported")
def create_symbolic_tensor(name, arg, shape_env):
sym_shapes = tuple([shape_env.create_symint(f"{name}_{idx}", val) for idx, val in enumerate(arg.size())])
sym_strides = tuple([shape_env.create_symint(f"{name}_{idx}_stride", val) for idx, val in enumerate(arg.stride())])
return FakeSymbolicTensor(sym_shapes, sym_strides, arg.dtype, arg.layout, arg.requires_grad, arg.device)
CPP_SYMINT_CLASS = type(torch._C.SymIntNode.new_symint(1))
class TestPySymInt(TestCase):
@skipIfNoSympy
def test_arith_ops(self):
shape_env = ShapeEnv()
symints = []
for i in range(5):
symints.append((i, shape_env.create_symint(f"s{i}", i)))
ops = [operator.add, operator.sub, operator.floordiv, operator.mul, operator.mod]
for op in ops:
for args in itertools.permutations(symints, 2):
if not isinstance(args[0][1], int) and ((op != operator.mod or op != operator.floordiv) and args[1][0] != 0):
self.assertTrue(op(args[0][1], args[1][1]) == op(args[0][0], args[1][0]))
@skipIfNoSympy
def test_reverse_arith_ops(self):
shape_env = ShapeEnv()
a = shape_env.create_symint("s1", 2)
self.assertTrue(5 // a == 5 // 2)
a = shape_env.create_symint("s1", 2)
self.assertTrue(5 * a == 5 * 2)
@skipIfNoSympy
def test_roundtrip(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5, 4, 3), shape_env)
self.assertTrue(not isinstance(x.shape[0], PySymInt))
self.assertTrue(isinstance(x.shape[0], CPP_SYMINT_CLASS))
self.assertTrue(x.shape[0] == 5)
self.assertTrue(x.shape[1] == 4)
self.assertTrue(x.shape[2], 3)
self.assertTrue(x.size()[0], 5)
self.assertTrue(x.size()[1], 4)
self.assertTrue(isinstance(x.size()[1], CPP_SYMINT_CLASS))
self.assertTrue(x.size()[2] == 3)
self.assertTrue(x.size(0) == 5)
self.assertTrue(x.size(1) == 4)
self.assertTrue(x.size(2) == 3)
self.assertTrue(isinstance(x.size(2), CPP_SYMINT_CLASS))
@skipIfNoSympy
def test_binary(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5, 4, 3), shape_env)
y = create_symbolic_tensor("y", torch.randn(5, 4, 3), shape_env)
z = x + y
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
# broadcasting
y = create_symbolic_tensor("y", torch.randn(1, 4, 1), shape_env)
z = x + y
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
@skipIfNoSympy
def test_symint_args(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5, 4, 3), shape_env)
y = create_symbolic_tensor("y", torch.randn(5, 4, 1), shape_env)
LAST_DIM = 2
z = x.narrow_copy(LAST_DIM, 0, y.shape[LAST_DIM])
self.assertTrue(z.shape[2] == int(y.shape[2]))
# arithmetic expr with two symints
z = x.narrow_copy(LAST_DIM, 0, x.shape[LAST_DIM] - y.shape[LAST_DIM])
self.assertTrue(z.shape[2] == 2)
# arithmetic expr with a symint and python int
z = x.narrow_copy(LAST_DIM, 0, x.shape[LAST_DIM] - 1)
self.assertTrue(z.shape[2] == 2)
@skipIfNoSympy
def test_symint_vargs(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5, 4, 3), shape_env)
y = create_symbolic_tensor("y", torch.randn(1, 4, 1), shape_env)
# varargs
z = y.expand(x.shape[0], y.shape[1], x.shape[2])
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
# shape list
z = y.expand((x.shape[0], y.shape[1], x.shape[2]))
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
# mixed python symints and ints
z = y.expand(x.shape[0], y.shape[1], 3)
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
# mixed python symints and ints in a list
z = y.expand((x.shape[0], y.shape[1], 3))
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
# mixed python symints and ints
z = y.expand(5, y.shape[1], x.shape[2])
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
# mixed python ints and symints in a list
z = y.expand((5, y.shape[1], x.shape[2]))
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
z = y.expand((y.shape[1],))
z = y.expand(y.shape[1])
@skipIfNoSympy
def test_size_expressions(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5), shape_env)
expand_x = x.expand(x.shape[0], x.shape[0])
if expand_x.shape[0] > 3:
result = expand_x + expand_x
else:
result = expand_x + expand_x
gt_op = shape_env.guards[0][0]
self.assertTrue(isinstance(gt_op, sympy.core.relational.StrictGreaterThan))
self.assertTrue(str(x.shape[0]), str(gt_op.args[0]))
self.assertTrue(str(expand_x.shape[1]), str(x.shape[0]))
self.assertTrue(str(expand_x.shape[1]), str(result.shape[0]))
@skipIfNoSympy
def test_aten_ops(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5), shape_env)
torch.ops.aten.narrow_copy.SymInt(x, 0, 0, x.shape[0])
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5, 4, 3), shape_env)
torch.ops.aten.expand.SymInt(x, [x.shape[0], x.shape[1], x.shape[2]])
def test_fx_trace_intlist(self):
class CustomModule(torch.nn.Module):
def forward(self, x):
bs, c, h, w = x.shape
return F.pad(x, (0, w % 2, 0, h % 2, 0, 0))
m = CustomModule()
x = torch.rand(1, 3, 4, 4)
# should not TypeError: pad(): argument 'pad' (position 2) must be
# tuple of ints, not tuple
torch.fx.symbolic_trace(m)
@skipIfNoSympy
def test_meta_symint(self):
shape_env = ShapeEnv()
a0 = shape_env.create_symint("a0", 2)
r = torch.empty(a0, device='meta')
self.assertIsInstance(r.shape[0], CPP_SYMINT_CLASS)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_dynamic_shapes.py |
# Owner(s): ["module: optimizer"]
import warnings
import math
import unittest
import functools
import itertools
from copy import deepcopy
import torch
from torch._six import inf
import torch.optim as optim
import torch.optim._multi_tensor as optim_mt
import torch.nn.functional as F
from torch.optim import SGD
from torch.autograd import Variable
from torch import sparse
from torch.optim.lr_scheduler import LambdaLR, MultiplicativeLR, SequentialLR, StepLR, \
MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, \
_LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, \
EPOCH_DEPRECATION_WARNING
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_UBSAN, load_tests, \
parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
def rosenbrock(tensor):
x, y = tensor
return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2
def drosenbrock(tensor):
x, y = tensor
return torch.tensor((-400 * x * (y - x ** 2) - 2 * (1 - x), 200 * (y - x ** 2)))
class TestOptim(TestCase):
exact_dtype = True
def _test_rosenbrock_sparse(self, constructor, scheduler_constructors=None,
sparse_only=False, maximize=False):
if scheduler_constructors is None:
scheduler_constructors = []
params_t = torch.tensor([1.5, 1.5])
params = Variable(params_t, requires_grad=True)
optimizer = constructor([params])
schedulers = []
for scheduler_constructor in scheduler_constructors:
schedulers.append(scheduler_constructor(optimizer))
if not sparse_only:
params_c = Variable(params_t.clone(), requires_grad=True)
optimizer_c = constructor([params_c])
solution = torch.tensor([1, 1])
initial_dist = params.data.dist(solution)
def eval(params, sparse_grad, w):
# Depending on w, provide only the x or y gradient
optimizer.zero_grad()
loss = rosenbrock(params)
loss.backward()
grad = drosenbrock(params.data)
# NB: We torture test the optimizer by returning an
# uncoalesced sparse tensor
if w:
i = torch.LongTensor([[0, 0]])
x = grad[0]
v = torch.tensor([x / 4., x - x / 4.])
else:
i = torch.LongTensor([[1, 1]])
y = grad[1]
v = torch.tensor([y - y / 4., y / 4.])
x = sparse.DoubleTensor(i, v, torch.Size([2])).to(dtype=v.dtype)
with torch.no_grad():
if sparse_grad:
params.grad = x
else:
params.grad = x.to_dense()
return loss
for i in range(2000):
# Do cyclic coordinate descent
w = i % 2
optimizer.step(functools.partial(eval, params, True, w))
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(rosenbrock(params))
else:
scheduler.step()
if not sparse_only:
optimizer_c.step(functools.partial(eval, params_c, False, w))
self.assertEqual(params.data, params_c.data)
if not maximize:
self.assertLessEqual(params.data.dist(solution), initial_dist)
else:
self.assertGreaterEqual(rosenbrock(params.data), rosenbrock(params_t))
def _test_basic_cases_template(self, weight, bias, input, constructor,
scheduler_constructors, constructor_accepts_maximize=True):
maximize_options = set([False, constructor_accepts_maximize])
if not constructor_accepts_maximize:
def three_arg_constructor(weight, bias, maximize):
self.assertFalse(maximize)
return constructor(weight, bias)
else:
three_arg_constructor = constructor
for maximize in maximize_options:
weight = Variable(weight, requires_grad=True)
bias = Variable(bias, requires_grad=True)
input = Variable(input)
optimizer = three_arg_constructor(weight, bias, maximize)
schedulers = []
for scheduler_constructor in scheduler_constructors:
schedulers.append(scheduler_constructor(optimizer))
# to check if the optimizer can be printed as a string
optimizer.__repr__()
def fn():
optimizer.zero_grad()
y = weight.mv(input)
if y.is_cuda and bias.is_cuda and y.get_device() != bias.get_device():
y = y.cuda(bias.get_device())
loss = (y + bias).pow(2).sum()
loss.backward()
return loss
initial_value = fn().item()
for _i in range(200):
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
val_loss = fn()
scheduler.step(val_loss)
else:
scheduler.step()
optimizer.step(fn)
if maximize:
self.assertGreater(fn().item(), initial_value)
else:
self.assertLess(fn().item(), initial_value)
def _test_state_dict(self, weight, bias, input, constructor):
weight = Variable(weight, requires_grad=True)
bias = Variable(bias, requires_grad=True)
input = Variable(input)
def fn_base(optimizer, weight, bias):
optimizer.zero_grad()
i = input_cuda if weight.is_cuda else input
loss = (weight.mv(i) + bias).pow(2).sum()
loss.backward()
return loss
optimizer = constructor(weight, bias)
fn = functools.partial(fn_base, optimizer, weight, bias)
# Prime the optimizer
for _i in range(20):
optimizer.step(fn)
# Clone the weights and construct new optimizer for them
weight_c = Variable(weight.data.clone(), requires_grad=True)
bias_c = Variable(bias.data.clone(), requires_grad=True)
optimizer_c = constructor(weight_c, bias_c)
fn_c = functools.partial(fn_base, optimizer_c, weight_c, bias_c)
# Load state dict
state_dict = deepcopy(optimizer.state_dict())
state_dict_c = deepcopy(optimizer.state_dict())
optimizer_c.load_state_dict(state_dict_c)
# Run both optimizations in parallel
for _i in range(20):
optimizer.step(fn)
optimizer_c.step(fn_c)
self.assertEqual(weight, weight_c)
self.assertEqual(bias, bias_c)
# Make sure state dict wasn't modified
self.assertEqual(state_dict, state_dict_c)
# Make sure state dict is deterministic with equal but not identical parameters
self.assertEqual(optimizer.state_dict(), optimizer_c.state_dict())
# Make sure repeated parameters have identical representation in state dict
optimizer_c.param_groups.extend(optimizer_c.param_groups)
self.assertEqual(optimizer.state_dict()['param_groups'][-1],
optimizer_c.state_dict()['param_groups'][-1])
# Make sure that optimizers that support maximize can load older models
state_dict = optimizer.state_dict()
if 'maximize' in state_dict['param_groups'][0]:
for group in state_dict['param_groups']:
del group['maximize']
optimizer.load_state_dict(state_dict)
# Make sure we can still step
optimizer.step()
# Make sure that optimizers that support foreach can load older models
state_dict = optimizer.state_dict()
if 'foreach' in state_dict['param_groups'][0]:
for group in state_dict['param_groups']:
del group['foreach']
optimizer.load_state_dict(state_dict)
# Make sure we can still step
optimizer.step()
# Make sure that loading optimizers with step not wrapped in tensor can work
state_dict = optimizer.state_dict()
if 'step' in state_dict['state'][0] and torch.is_tensor(state_dict['state'][0]['step']):
for state in state_dict['state'].values():
state['step'] = state['step'].item()
optimizer.load_state_dict(state_dict)
optimizer.step()
# Check that state dict can be loaded even when we cast parameters
# to a different type and move to a different device.
if not torch.cuda.is_available():
return
input_cuda = Variable(input.data.float().cuda())
weight_cuda = Variable(weight.data.float().cuda(), requires_grad=True)
bias_cuda = Variable(bias.data.float().cuda(), requires_grad=True)
optimizer_cuda = constructor(weight_cuda, bias_cuda)
fn_cuda = functools.partial(fn_base, optimizer_cuda, weight_cuda, bias_cuda)
state_dict = deepcopy(optimizer.state_dict())
state_dict_c = deepcopy(optimizer.state_dict())
optimizer_cuda.load_state_dict(state_dict_c)
# Make sure state dict wasn't modified
self.assertEqual(state_dict, state_dict_c)
# Make sure that device of state['step'] is still CPU
new_state_dict = optimizer_cuda.state_dict()
if 'step' in state_dict['state'][0] and torch.is_tensor(state_dict['state'][0]['step']):
for state in new_state_dict['state'].values():
self.assertEqual(state['step'].device.type, 'cpu')
for _i in range(20):
optimizer.step(fn)
optimizer_cuda.step(fn_cuda)
self.assertEqual(weight, weight_cuda)
self.assertEqual(bias, bias_cuda)
# validate deepcopy() copies all public attributes
def getPublicAttr(obj):
return set(k for k in obj.__dict__ if not k.startswith('_'))
self.assertEqual(getPublicAttr(optimizer), getPublicAttr(deepcopy(optimizer)))
def _test_basic_cases(self, constructor, scheduler_constructors=None,
ignore_multidevice=False, constructor_accepts_maximize=False):
if scheduler_constructors is None:
scheduler_constructors = []
def make_two_arg_constructor(constructor, maximize: bool = False):
if constructor_accepts_maximize:
return lambda weight, bias: constructor(weight, bias, maximize)
return constructor
for maximize in (True, False):
self._test_state_dict(
torch.randn(10, 5),
torch.randn(10),
torch.randn(5),
make_two_arg_constructor(constructor, maximize),
)
self._test_basic_cases_template(
torch.randn(10, 5),
torch.randn(10),
torch.randn(5),
constructor,
scheduler_constructors,
constructor_accepts_maximize,
)
# non-contiguous parameters
self._test_basic_cases_template(
torch.randn(10, 5, 2)[..., 0],
torch.randn(10, 2)[..., 0],
torch.randn(5),
constructor,
scheduler_constructors,
constructor_accepts_maximize,
)
# CUDA
if not torch.cuda.is_available():
return
self._test_basic_cases_template(
torch.randn(10, 5).cuda(),
torch.randn(10).cuda(),
torch.randn(5).cuda(),
constructor,
scheduler_constructors,
constructor_accepts_maximize,
)
# Multi-GPU
if not torch.cuda.device_count() > 1 or ignore_multidevice:
return
self._test_basic_cases_template(
torch.randn(10, 5).cuda(0),
torch.randn(10).cuda(1),
torch.randn(5).cuda(0),
constructor,
scheduler_constructors,
constructor_accepts_maximize,
)
def _test_complex_optimizer(self, optimizer_constructor):
complex_param = torch.randn(5, 5, dtype=torch.complex64, requires_grad=True)
real_param = torch.view_as_real(complex_param).detach().clone().requires_grad_()
complex_opt = optimizer_constructor(complex_param)
real_opt = optimizer_constructor(real_param)
for i in range(3):
complex_param.grad = torch.randn_like(complex_param)
real_param.grad = torch.view_as_real(complex_param.grad)
complex_opt.step()
real_opt.step()
self.assertEqual(torch.view_as_real(complex_param), real_param)
def _test_complex_2d(self, optimizer_constructor, f=None):
if f is None:
f = rosenbrock
a1 = torch.randn(2, dtype=torch.complex64, requires_grad=True)
a1_real = a1.real.clone().detach()
a1_imag = a1.imag.clone().detach()
a1_real.requires_grad_()
a1_imag.requires_grad_()
optim1 = optimizer_constructor([a1])
optim2 = optimizer_constructor([a1_real, a1_imag])
for i in range(10):
optim1.zero_grad()
optim2.zero_grad()
a2 = torch.complex(a1_real, a1_imag)
f(a1).backward()
f(a2).backward()
self.assertEqual(a1.grad.real, a1_real.grad)
self.assertEqual(a1.grad.imag, a1_imag.grad)
optim1.step()
optim2.step()
self.assertEqual(a1.real, a1_real)
self.assertEqual(a1.imag, a1_imag)
def _build_params_dict(self, weight, bias, **kwargs):
return [{'params': [weight]}, dict(params=[bias], **kwargs)]
def _build_params_dict_single(self, weight, bias, **kwargs):
return [dict(params=bias, **kwargs)]
def test_sgd(self):
for optimizer in [optim.SGD, optim_mt.SGD]:
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict_single(weight, bias, lr=1e-2),
lr=1e-3, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict_single(weight, bias, lr=1e-2), maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, maximize=maximize),
[lambda opt: StepLR(opt, gamma=0.9, step_size=10)],
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, maximize=maximize),
[lambda opt: LinearLR(opt, start_factor=0.4, end_factor=0.8, total_iters=4)],
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, maximize=maximize),
[lambda opt: ConstantLR(opt, factor=0.4, total_iters=4)],
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, maximize=maximize),
[lambda opt: StepLR(opt, gamma=0.9, step_size=10),
lambda opt: LinearLR(opt, start_factor=0.4, end_factor=0.6, total_iters=4)],
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, maximize=maximize),
[lambda opt: StepLR(opt, gamma=0.9, step_size=10),
lambda opt: ReduceLROnPlateau(opt)],
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, maximize=maximize),
[lambda opt: StepLR(opt, gamma=0.99, step_size=10),
lambda opt: ExponentialLR(opt, gamma=0.99),
lambda opt: ReduceLROnPlateau(opt)],
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, momentum=0.5, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, momentum=0.5, weight_decay=1, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize:
optimizer([weight, bias], nesterov=True, lr=1e-3, momentum=0.5, weight_decay=1, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, maximize=maximize),
[lambda opt: PolynomialLR(opt, power=0.9, total_iters=4)],
constructor_accepts_maximize=True
)
with self.assertRaisesRegex(ValueError, "Invalid momentum value: -0.5"):
optimizer(None, lr=1e-2, momentum=-0.5)
def test_sgd_sparse(self):
for optimizer in [optim.SGD, optim_mt.SGD]:
self._test_rosenbrock_sparse(
lambda params: optimizer(params, lr=5e-3)
)
self._test_rosenbrock_sparse(
lambda params: optimizer(params, lr=0.005),
[lambda opt: StepLR(opt, gamma=0.99999, step_size=300)]
)
def test_sgd_complex(self):
for optimizer in [optim.SGD, optim_mt.SGD]:
self._test_complex_optimizer(
lambda param: optimizer([param], lr=0.001)
)
self._test_complex_optimizer(
lambda param: optimizer([param], lr=0.001, momentum=1)
)
self._test_complex_optimizer(
lambda param: optimizer([param], lr=0.001, momentum=1, weight_decay=1)
)
self._test_complex_optimizer(
lambda param: optimizer([param], lr=0.001, nesterov=True, momentum=1, weight_decay=1)
)
self._test_complex_optimizer(
lambda param: optimizer([param], lr=0.001, momentum=1, dampening=0.5, weight_decay=1)
)
def test_multi_tensor_optimizers(self):
if not torch.cuda.is_available():
return
optimizer_pairs_with_flags = [
((optim.Adam, optim._multi_tensor.Adam), dict(weight_decay=1., amsgrad=True)),
((optim.Adam, optim._multi_tensor.Adam), dict(weight_decay=1., amsgrad=False)),
((optim.Adam, optim._multi_tensor.Adam), dict(weight_decay=0., amsgrad=True)),
((optim.Adam, optim._multi_tensor.Adam), dict(weight_decay=0., amsgrad=False)),
((optim.AdamW, optim._multi_tensor.AdamW), dict(weight_decay=1., amsgrad=True)),
((optim.AdamW, optim._multi_tensor.AdamW), dict(weight_decay=1., amsgrad=False)),
((optim.AdamW, optim._multi_tensor.AdamW), dict(weight_decay=0., amsgrad=True)),
((optim.AdamW, optim._multi_tensor.AdamW), dict(weight_decay=0., amsgrad=False)),
((optim.NAdam, optim._multi_tensor.NAdam), dict(weight_decay=0., momentum_decay=6e-3)),
((optim.NAdam, optim._multi_tensor.NAdam), dict(weight_decay=1., momentum_decay=6e-3)),
((optim.NAdam, optim._multi_tensor.NAdam), dict(weight_decay=0., momentum_decay=4e-3)),
((optim.NAdam, optim._multi_tensor.NAdam), dict(weight_decay=0.01, momentum_decay=4e-3)),
((optim.SGD, optim._multi_tensor.SGD), dict(lr=0.2, momentum=1, dampening=0, weight_decay=1, nesterov=True)),
((optim.SGD, optim._multi_tensor.SGD), dict(lr=0.2, momentum=1, dampening=0.5, weight_decay=1, nesterov=False)),
((optim.RAdam, optim._multi_tensor.RAdam), dict(weight_decay=0)),
((optim.RAdam, optim._multi_tensor.RAdam), dict(weight_decay=1)),
((optim.RMSprop, optim._multi_tensor.RMSprop), dict(weight_decay=1, momentum=1, centered=True)),
((optim.RMSprop, optim._multi_tensor.RMSprop), dict(weight_decay=1, momentum=0, centered=True)),
((optim.RMSprop, optim._multi_tensor.RMSprop), dict(weight_decay=1, momentum=1, centered=False)),
((optim.RMSprop, optim._multi_tensor.RMSprop), dict(weight_decay=0, momentum=1, centered=False)),
((optim.Rprop, optim._multi_tensor.Rprop), dict(lr=1e-2, etas=(0.5, 1.2), step_sizes=(1e-6, 50))),
((optim.ASGD, optim._multi_tensor.ASGD), dict(weight_decay=0)),
((optim.ASGD, optim._multi_tensor.ASGD), dict(weight_decay=1)),
((optim.Adamax, optim._multi_tensor.Adamax), dict(weight_decay=0)),
((optim.Adamax, optim._multi_tensor.Adamax), dict(weight_decay=1)),
((optim.Adadelta, optim._multi_tensor.Adadelta), dict(weight_decay=0)),
((optim.Adadelta, optim._multi_tensor.Adadelta), dict(weight_decay=1)),
((optim.Adagrad, optim._multi_tensor.Adagrad), dict(weight_decay=0)),
((optim.Adagrad, optim._multi_tensor.Adagrad), dict(weight_decay=1)),
]
kIterations = 4
device = 'cuda'
for optimizers, params in optimizer_pairs_with_flags:
res, state = [], []
for opt in optimizers:
input = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype=torch.float64, device=device).reshape(3, 2)
torch.manual_seed(1)
model = torch.nn.Sequential(torch.nn.Linear(2, 3),
torch.nn.Sigmoid(),
torch.nn.Linear(3, 1),
torch.nn.Sigmoid())
model.to(dtype=torch.float64, device=device)
optimizer = opt(model.parameters(), **params)
for _ in range(kIterations):
optimizer.zero_grad()
output = model(input)
loss = output.sum()
loss.backward()
# Test that step behaves as expected (a no-op) when grads are set to None
if iter == 0:
optimizer.zero_grad(set_to_none=True)
optimizer.step()
state.append(optimizer.state)
res.append(model.parameters())
st_state = state[0]
mt_state = state[1]
for st_p, mt_p in zip(res[0], res[1]):
self.assertEqual(st_p, mt_p, atol=5e-5, rtol=0)
# check that optimizer states are the same
st_p_state = st_state[st_p]
mt_p_state = mt_state[mt_p]
for k in st_p_state:
self.assertEqual(st_p_state[k], mt_p_state[k], atol=5e-5, rtol=0)
def test_adam(self):
for optimizer in [optim.Adam, optim_mt.Adam]:
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-2), lr=1e-3, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, amsgrad=True, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, weight_decay=0.1, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3, amsgrad=True, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3, maximize=maximize),
[lambda opt: ExponentialLR(opt, gamma=0.9)],
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3, maximize=maximize),
[lambda opt: LinearLR(opt, start_factor=0.4, total_iters=4)],
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3, maximize=maximize),
[lambda opt: ConstantLR(opt, factor=0.4, total_iters=4)],
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, amsgrad=True, maximize=maximize),
[lambda opt: ConstantLR(opt, factor=0.4, total_iters=4),
lambda opt: ExponentialLR(opt, gamma=0.9)],
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, amsgrad=True, maximize=maximize),
[lambda opt: ExponentialLR(opt, gamma=0.9),
lambda opt: ReduceLROnPlateau(opt)],
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3, amsgrad=True, maximize=maximize),
[lambda opt: StepLR(opt, gamma=0.9, step_size=10),
lambda opt: ReduceLROnPlateau(opt)],
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3, maximize=maximize),
[lambda opt: PolynomialLR(opt, total_iters=4, power=0.9)],
constructor_accepts_maximize=True
)
self._test_complex_2d(optimizer)
with self.assertRaisesRegex(ValueError, "Invalid beta parameter at index 0: 1.0"):
optimizer(None, lr=1e-2, betas=(1.0, 0.0))
with self.assertRaisesRegex(ValueError, "Invalid weight_decay value: -1"):
optimizer(None, lr=1e-2, weight_decay=-1)
def test_adamw(self):
for optimizer in [optim.AdamW, optim_mt.AdamW]:
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-2), lr=1e-3, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, weight_decay=1, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, weight_decay=1, amsgrad=True, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_complex_2d(optimizer)
with self.assertRaisesRegex(ValueError, "Invalid weight_decay value: -1"):
optimizer(None, lr=1e-2, weight_decay=-1)
def test_sparse_adam(self):
self._test_rosenbrock_sparse(
lambda params: optim.SparseAdam(params, lr=4e-2),
[],
True
)
self._test_rosenbrock_sparse(
lambda params: optim.SparseAdam(params, lr=4e-2, maximize=True),
[],
True,
True
)
with self.assertRaisesRegex(ValueError, "Invalid beta parameter at index 0: 1.0"):
optim.SparseAdam(None, lr=1e-2, betas=(1.0, 0.0))
with self.assertRaisesRegex(ValueError, "SparseAdam requires dense parameter tensors"):
optim.SparseAdam([torch.zeros(3, layout=torch.sparse_coo)])
with self.assertRaisesRegex(ValueError, "SparseAdam requires dense parameter tensors"):
optim.SparseAdam([{"params": [torch.zeros(3, layout=torch.sparse_coo)]}])
# ROCm precision is too low to pass this test
def test_adadelta(self):
# Handles https://github.com/pytorch/pytorch/issues/69698
self.rel_tol = 4e-3
for optimizer in [optim.Adadelta, optim_mt.Adadelta]:
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, rho=0.95), maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, rho=0.95), maximize=maximize),
[lambda opt: StepLR(opt, gamma=0.9, step_size=10),
lambda opt: ReduceLROnPlateau(opt)],
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], weight_decay=1, maximize=maximize),
constructor_accepts_maximize=True
)
with self.assertRaisesRegex(ValueError, "Invalid rho value: 1.1"):
optimizer(None, lr=1e-2, rho=1.1)
def test_adadelta_complex(self):
# Handles https://github.com/pytorch/pytorch/issues/69698
self.rel_tol = 2e-2
for optimizer in [optim.Adadelta]:
self._test_complex_optimizer(
lambda weight: optimizer([weight])
)
self._test_complex_optimizer(
lambda weight: optimizer([weight], rho=0.95)
)
self._test_complex_optimizer(
lambda weight: optimizer([weight], rho=0.95, weight_decay=1)
)
def test_nadam(self):
for optimizer in [optim.NAdam, optim_mt.NAdam]:
self._test_basic_cases(
lambda weight, bias: optimizer([weight, bias], lr=1e-3)
)
self._test_basic_cases(
lambda weight, bias: optimizer(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3)
)
self._test_basic_cases(
lambda weight, bias: optimizer([weight, bias], lr=1e-3, weight_decay=0.1, momentum_decay=6e-3)
)
self._test_basic_cases(
lambda weight, bias: optimizer([weight, bias], lr=1e-3, weight_decay=0.1, momentum_decay=6e-3),
[lambda opt: ExponentialLR(opt, gamma=0.9)]
)
with self.assertRaisesRegex(ValueError, "Invalid beta parameter at index 0: 1.0"):
optimizer(None, lr=1e-2, betas=(1.0, 0.0))
with self.assertRaisesRegex(ValueError, "Invalid momentum_decay value: -0.2"):
optimizer(None, lr=1e-2, momentum_decay=-0.2)
def test_adagrad(self):
for optimizer in [optim.Adagrad, optim_mt.Adagrad]:
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-1, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
[weight, bias], lr=1e-1, initial_accumulator_value=0.1, maximize=maximize,
),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-1,
maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-1,
maximize=maximize),
[lambda opt: ReduceLROnPlateau(opt)],
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-1,
maximize=maximize),
[lambda opt: ReduceLROnPlateau(opt),
lambda opt: ExponentialLR(opt, gamma=0.99)],
constructor_accepts_maximize=True
)
with self.assertRaisesRegex(ValueError, "Invalid lr_decay value: -0.5"):
optimizer(None, lr=1e-2, lr_decay=-0.5)
def test_adagrad_sparse(self):
for optimizer in [optim.Adagrad, optim_mt.Adagrad]:
self._test_rosenbrock_sparse(
lambda params: optimizer(params, lr=1e-1)
)
self._test_rosenbrock_sparse(
lambda params: optimizer(params, lr=0.1),
[lambda opt: StepLR(opt, gamma=1 - 1e-5, step_size=500),
lambda opt: ReduceLROnPlateau(opt, threshold=1e-4)]
)
def test_adagrad_complex(self):
for optimizer in [optim.Adagrad, optim_mt.Adagrad]:
self._test_complex_optimizer(
lambda param: optimizer([param], lr=1e-1)
)
self._test_complex_optimizer(
lambda param: optimizer(
[param], lr=1e-1, initial_accumulator_value=0.1
)
)
def test_adamax(self):
for optimizer in [optim.Adamax, optim_mt.Adamax]:
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
[weight, bias], lr=1e-1, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-1, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
[weight, bias], lr=1e-1, weight_decay=1, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_complex_2d(optimizer)
with self.assertRaisesRegex(ValueError, "Invalid beta parameter at index 1: 1.0"):
optimizer(None, lr=1e-2, betas=(0.0, 1.0))
def test_radam(self):
for optimizer in [optim.RAdam, optim_mt.RAdam]:
self._test_basic_cases(
lambda weight, bias: optimizer([weight, bias], lr=1e-3)
)
self._test_basic_cases(
lambda weight, bias: optimizer(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3)
)
self._test_basic_cases(
lambda weight, bias: optimizer([weight, bias], lr=1e-3, weight_decay=0.1)
)
self._test_basic_cases(
lambda weight, bias: optimizer([weight, bias], lr=1e-3),
[lambda opt: ExponentialLR(opt, gamma=0.9),
lambda opt: ReduceLROnPlateau(opt)]
)
with self.assertRaisesRegex(ValueError, "Invalid beta parameter at index 0: 1.0"):
optimizer(None, lr=1e-2, betas=(1.0, 0.0))
with self.assertRaisesRegex(ValueError, "Invalid weight_decay value: -1"):
optimizer(None, lr=1e-2, weight_decay=-1)
def test_rmsprop(self):
for optimizer in [optim.RMSprop, optim_mt.RMSprop]:
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-2, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2, centered=True, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2, centered=True, momentum=0.1, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2, momentum=0.1, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2, momentum=0.1, weight_decay=1, maximize=maximize),
constructor_accepts_maximize=True
)
with self.assertRaisesRegex(ValueError, "Invalid momentum value: -1.0"):
optimizer(None, lr=1e-2, momentum=-1.0)
def test_asgd(self):
for optimizer in [optim.ASGD, optim_mt.ASGD]:
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=1e-3, t0=100, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3, t0=100, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3, weight_decay=1, maximize=maximize),
constructor_accepts_maximize=True
)
with self.assertRaisesRegex(ValueError, "Invalid weight_decay value: -0.5"):
optimizer(None, lr=1e-2, weight_decay=-0.5)
@skipIfRocm
def test_rprop(self):
for optimizer in [optim.Rprop, optim_mt.Rprop]:
self._test_basic_cases(
lambda weight, bias, maximize: optimizer([weight, bias], lr=2e-4, maximize=maximize),
constructor_accepts_maximize=True
)
self._test_basic_cases(
lambda weight, bias, maximize: optimizer(
self._build_params_dict(weight, bias, lr=1e-2),
lr=2e-4, maximize=maximize),
constructor_accepts_maximize=True
)
with self.assertRaisesRegex(ValueError, "Invalid eta values: 1.0, 0.5"):
optimizer(None, lr=1e-2, etas=(1.0, 0.5))
def test_lbfgs(self):
self._test_basic_cases(
lambda weight, bias: optim.LBFGS([weight, bias]),
ignore_multidevice=True
)
self._test_basic_cases(
lambda weight, bias: optim.LBFGS([weight, bias], line_search_fn="strong_wolfe"),
ignore_multidevice=True
)
@unittest.skipIf(TEST_WITH_UBSAN, "division-by-zero error with UBSAN")
def test_lbfgs_return_type(self):
params = [torch.randn(10, 5), torch.randn(10)]
opt1 = optim.LBFGS(params, 0.01, tolerance_grad=inf)
opt2 = optim.LBFGS(params, 0.01, tolerance_grad=-inf)
def closure():
return torch.tensor([10])
res1 = opt1.step(closure)
res2 = opt2.step(closure)
self.assertEqual(type(res1), type(res2))
def test_invalid_param_type(self):
with self.assertRaises(TypeError):
optim.SGD(Variable(torch.randn(5, 5)), lr=3)
def test_duplicate_params_in_param_group(self):
param = Variable(torch.randn(5, 5))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
optim.SGD([param, param], lr=0.1)
self.assertEqual(len(w), 1)
self.assertIn('a parameter group with duplicate parameters', str(w[0].message))
def test_no_grad_for_all_params(self):
params = [torch.randn(5, 5, requires_grad=False) for _ in range(2)]
optimizer_list = [
optim.Adadelta,
optim.AdamW,
optim.Adam,
optim.Adagrad,
optim.Adamax,
optim.RMSprop,
optim.SGD,
optim.SparseAdam,
optim.ASGD,
]
for optim_ctr in optimizer_list:
opt = optim_ctr(params, lr=0.1)
# make sure step can still run even if
# all params have no grad
opt.step()
class SchedulerTestNet(torch.nn.Module):
def __init__(self):
super(SchedulerTestNet, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
return self.conv2(F.relu(self.conv1(x)))
class LambdaLRTestObject:
def __init__(self, value):
self.value = value
def __call__(self, epoch):
return self.value * epoch
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
class TestLRScheduler(TestCase):
exact_dtype = True
def setUp(self):
super(TestLRScheduler, self).setUp()
self.net = SchedulerTestNet()
self.opt = SGD(
[{'params': self.net.conv1.parameters()}, {'params': self.net.conv2.parameters(), 'lr': 0.5}],
lr=0.05)
def _check_warning_is_epoch_deprecation_warning(self, w, *, num_warnings: int = 1):
"""This function swallows the epoch deprecation warning which is produced when we
call `scheduler.step(epoch)` with some not `None` value of `epoch`.
this is deprecated, and this function will need to be removed/updated when
the schedulers no longer accept the parameter at all.
"""
self.assertEqual(len(w), num_warnings)
for warning in w:
self.assertEqual(len(warning.message.args), 1)
self.assertEqual(warning.message.args[0], EPOCH_DEPRECATION_WARNING)
def test_error_when_getlr_has_epoch(self):
class MultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, gamma, milestones, last_epoch=-1):
self.init_lr = [group['lr'] for group in optimizer.param_groups]
self.gamma = gamma
self.milestones = milestones
super().__init__(optimizer, last_epoch)
def get_lr(self, step):
global_step = self.last_epoch
gamma_power = ([0] + [i + 1 for i, m in enumerate(self.milestones) if global_step >= m])[-1]
return [init_lr * (self.gamma ** gamma_power) for init_lr in self.init_lr]
optimizer = torch.optim.SGD([torch.rand(1)], lr=1)
with self.assertRaises(TypeError):
scheduler = MultiStepLR(optimizer, gamma=1, milestones=[10, 20])
def test_no_cyclic_references(self):
import gc
param = Variable(torch.empty(10), requires_grad=True)
optim = SGD([param], lr=0.5)
scheduler = LambdaLR(optim, lambda epoch: 1.0)
del scheduler
# Prior to Python 3.7, local variables in a function will be referred by the current frame.
import sys
if sys.version_info < (3, 7):
import inspect
referrers = gc.get_referrers(optim)
self.assertTrue(
len(referrers) == 1 and referrers[0] is inspect.currentframe(),
"Optimizer should contain no cyclic references (except current frame)")
del referrers
else:
self.assertTrue(
len(gc.get_referrers(optim)) == 0,
"Optimizer should contain no cyclic references")
gc.collect()
del optim
self.assertEqual(
gc.collect(), 0, msg="Optimizer should be garbage-collected on __del__")
def test_old_pattern_warning(self):
epochs = 35
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
scheduler = StepLR(self.opt, gamma=0.1, step_size=3)
self.assertTrue(len(ws) == 0, "No warning should be raised")
def old_pattern():
for _ in range(epochs):
scheduler.step()
self.opt.step()
self.assertWarnsRegex(UserWarning, r'how-to-adjust-learning-rate', old_pattern)
def test_old_pattern_warning_with_arg(self):
epochs = 35
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
scheduler = StepLR(self.opt, gamma=0.1, step_size=3)
self.assertTrue(len(ws) == 0, "No warning should be raised")
def old_pattern2():
for _ in range(epochs):
scheduler.step()
self.opt.step()
self.assertWarnsRegex(UserWarning, r'how-to-adjust-learning-rate', old_pattern2)
def test_old_pattern_warning_resuming(self):
epochs = 35
for i, group in enumerate(self.opt.param_groups):
group['initial_lr'] = 0.01
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
scheduler = StepLR(self.opt, gamma=0.1, step_size=3, last_epoch=10)
self.assertTrue(len(ws) == 0, "No warning should be raised")
def old_pattern():
for _ in range(epochs):
scheduler.step()
self.opt.step()
self.assertWarnsRegex(UserWarning, r'how-to-adjust-learning-rate', old_pattern)
def test_old_pattern_warning_resuming_with_arg(self):
epochs = 35
for i, group in enumerate(self.opt.param_groups):
group['initial_lr'] = 0.01
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
scheduler = StepLR(self.opt, gamma=0.1, step_size=3, last_epoch=10)
self.assertTrue(len(ws) == 0, "No warning should be raised")
def old_pattern2():
for _ in range(epochs):
scheduler.step()
self.opt.step()
self.assertWarnsRegex(UserWarning, r'how-to-adjust-learning-rate', old_pattern2)
def test_old_pattern_warning_with_overridden_optim_step(self):
epochs = 35
for i, group in enumerate(self.opt.param_groups):
group['initial_lr'] = 0.01
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
scheduler = StepLR(self.opt, gamma=0.1, step_size=3, last_epoch=10)
self.assertTrue(len(ws) == 0, "No warning should be raised")
# emulate use-case with optimizer.step overridden
import types
old_step = self.opt.step
def new_step(o, *args, **kwargs):
retval = old_step(*args, **kwargs)
return retval
self.opt.step = types.MethodType(new_step, self.opt)
def old_pattern2():
for _ in range(epochs):
scheduler.step()
self.opt.step()
self.assertWarnsRegex(UserWarning, r'how-to-adjust-learning-rate', old_pattern2)
def test_new_pattern_no_warning(self):
epochs = 35
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
scheduler = StepLR(self.opt, gamma=0.1, step_size=3)
self.assertTrue(len(ws) == 0, "No warning should be raised")
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
for _ in range(epochs):
self.opt.step()
scheduler.step()
self.assertTrue(len(ws) == 0, "No warning should be raised")
def test_new_pattern_no_warning_with_arg(self):
epochs = 35
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
scheduler = StepLR(self.opt, gamma=0.1, step_size=3)
self.assertTrue(len(ws) == 0, "No warning should be raised")
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
for _ in range(epochs):
self.opt.step()
scheduler.step()
self.assertTrue(len(ws) == 0, "No warning should be raised")
def test_new_pattern_no_warning_with_overridden_optim_step(self):
epochs = 35
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
scheduler = StepLR(self.opt, gamma=0.1, step_size=3)
self.assertTrue(len(ws) == 0, "No warning should be raised")
# emulate use-case with optimizer.step overridden
import types
old_step = self.opt.step
def new_step(o, *args, **kwargs):
retval = old_step(*args, **kwargs)
return retval
self.opt.step = types.MethodType(new_step, self.opt)
def new_pattern():
for e in range(epochs):
self.opt.step()
scheduler.step()
self.assertWarnsRegex(UserWarning, r'`optimizer.step\(\)` has been overridden', new_pattern)
def _test_lr_is_constant_for_constant_epoch(self, scheduler):
l = []
for _ in range(10):
scheduler.optimizer.step()
with warnings.catch_warnings(record=True) as w:
scheduler.step(2)
self._check_warning_is_epoch_deprecation_warning(w)
l.append(self.opt.param_groups[0]['lr'])
self.assertEqual(min(l), max(l))
def test_step_lr_is_constant_for_constant_epoch(self):
scheduler = StepLR(self.opt, 2)
self._test_lr_is_constant_for_constant_epoch(scheduler)
def test_exponential_lr_is_constant_for_constant_epoch(self):
scheduler = ExponentialLR(self.opt, gamma=0.9)
self._test_lr_is_constant_for_constant_epoch(scheduler)
def test_constantlr_is_constant_for_constant_epoch(self):
scheduler = ConstantLR(self.opt)
self._test_lr_is_constant_for_constant_epoch(scheduler)
def test_linear_linearlr_is_constant_for_constant_epoch(self):
scheduler = LinearLR(self.opt)
self._test_lr_is_constant_for_constant_epoch(scheduler)
def test_polynomial_lr_is_constant_for_constant_epoch(self):
scheduler = PolynomialLR(self.opt, power=0.9)
self._test_lr_is_constant_for_constant_epoch(scheduler)
def test_step_lr(self):
# lr = 0.05 if epoch < 3
# lr = 0.005 if 30 <= epoch < 6
# lr = 0.0005 if epoch >= 9
epochs = 10
single_targets = [0.05] * 3 + [0.005] * 3 + [0.0005] * 3 + [0.00005] * 3
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = StepLR(self.opt, gamma=0.1, step_size=3)
self._test(scheduler, targets, epochs)
def test_get_last_lr_step_lr(self):
from torch.nn import Parameter
epochs = 10
optimizer = torch.optim.SGD([Parameter(torch.randn(2, 2, requires_grad=True))], 0.1)
targets = [[0.1] * 3 + [0.01] * 3 + [0.001] * 3 + [0.0001]]
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 3, gamma=0.1)
self._test_get_last_lr(scheduler, targets, epochs)
def test_get_last_lr_multi_step_lr(self):
# lr = 0.05 if epoch < 2
# lr = 0.005 if 2 <= epoch < 5
# lr = 0.0005 if 5 <= epoch < 9
# lr = 0.00005 if 9 <= epoch
epochs = 10
single_targets = [0.05] * 2 + [0.005] * 3 + [0.0005] * 4 + [0.00005] * 1
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9])
self._test_get_last_lr(scheduler, targets, epochs)
def test_multi_step_lr(self):
# lr = 0.05 if epoch < 2
# lr = 0.005 if 2 <= epoch < 5
# lr = 0.0005 if epoch < 9
# lr = 0.00005 if epoch >= 9
epochs = 10
single_targets = [0.05] * 2 + [0.005] * 3 + [0.0005] * 4 + [0.00005] * 3
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9])
self._test(scheduler, targets, epochs)
def test_multi_step_lr_with_epoch(self):
# lr = 0.05 if epoch < 2
# lr = 0.005 if 2 <= epoch < 5
# lr = 0.0005 if epoch < 9
# lr = 0.00005 if epoch >= 9
epochs = 10
single_targets = [0.05] * 2 + [0.005] * 3 + [0.0005] * 4 + [0.00005] * 3
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9])
self._test_with_epoch(scheduler, targets, epochs)
def test_get_last_lr_constantlr(self):
# lr = 0.025 if epoch < 5
# lr = 0.005 if 5 <= epoch
epochs = 10
single_targets = [0.025] * 5 + [0.05] * 5
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = ConstantLR(self.opt, factor=1.0 / 2, total_iters=5)
self._test_get_last_lr(scheduler, targets, epochs)
def test_get_last_lr_linearlr(self):
# lr = 0.025 if epoch == 0
# lr = 0.03125 if epoch == 1
# lr = 0.0375 if epoch == 2
# lr = 0.04375 if epoch == 3
# lr = 0.005 if 4 <= epoch
epochs = 10
start_factor = 1.0 / 4
end_factor = 3. / 5
iters = 4
interpolation = [start_factor + i * (end_factor - start_factor) / iters for i in range(iters)]
single_targets = [x * 0.05 for x in interpolation] + [0.05 * end_factor] * (epochs - iters)
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = LinearLR(self.opt, start_factor=start_factor, end_factor=end_factor, total_iters=iters)
self._test_get_last_lr(scheduler, targets, epochs)
def test_constantlr(self):
# lr = 0.025 if epoch < 5
# lr = 0.005 if 5 <= epoch
epochs = 10
single_targets = [0.025] * 5 + [0.05] * 5
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = ConstantLR(self.opt, factor=1.0 / 2, total_iters=5)
self._test(scheduler, targets, epochs)
def test_linearlr(self):
# lr = 0.025 if epoch == 0
# lr = 0.03125 if epoch == 1
# lr = 0.0375 if epoch == 2
# lr = 0.04375 if epoch == 3
# lr = 0.005 if 4 <= epoch
epochs = 10
start_factor = 1.0 / 2
iters = 4
interpolation = [start_factor + i * (1 - start_factor) / iters for i in range(iters)]
single_targets = [x * 0.05 for x in interpolation] + [0.05] * (epochs - iters)
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = LinearLR(self.opt, start_factor=start_factor, total_iters=iters)
self._test(scheduler, targets, epochs)
def test_constantlr_with_epoch(self):
# lr = 0.025 if epoch < 5
# lr = 0.005 if 5 <= epoch
epochs = 10
single_targets = [0.025] * 5 + [0.05] * 5
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = ConstantLR(self.opt, factor=1.0 / 2, total_iters=5)
self._test_with_epoch(scheduler, targets, epochs)
def test_linearlr_with_epoch(self):
# lr = 0.025 if epoch == 0
# lr = 0.03125 if epoch == 1
# lr = 0.0375 if epoch == 2
# lr = 0.04375 if epoch == 3
# lr = 0.005 if 4 <= epoch
epochs = 10
start_factor = 1.0 / 2
end_factor = 1.
iters = 4
interpolation = [start_factor + i * (end_factor - start_factor) / iters for i in range(iters)]
single_targets = [x * 0.05 for x in interpolation] + [0.05] * (epochs - iters)
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = LinearLR(self.opt, start_factor=start_factor, total_iters=iters)
self._test_with_epoch(scheduler, targets, epochs)
def test_exp_lr(self):
epochs = 10
single_targets = [0.05 * (0.9 ** x) for x in range(epochs)]
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = ExponentialLR(self.opt, gamma=0.9)
self._test(scheduler, targets, epochs)
def test_poly_lr(self):
epochs = 10
power = 0.9
total_iters = 5
single_targets = [(1.0 - x / total_iters) ** power * 0.05 for x in range(total_iters)] + [0.0] * (epochs - total_iters)
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = PolynomialLR(self.opt, power=power, total_iters=total_iters)
self._test(scheduler, targets, epochs)
def test_cos_anneal_lr(self):
epochs = 10
eta_min = 1e-10
single_targets = [eta_min + (0.05 - eta_min) *
(1 + math.cos(math.pi * x / epochs)) / 2
for x in range(epochs)]
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min)
self._test(scheduler, targets, epochs)
def test_closed_form_step_lr(self):
scheduler = StepLR(self.opt, gamma=0.1, step_size=3)
closed_form_scheduler = StepLR(self.opt, gamma=0.1, step_size=3)
self._test_against_closed_form(scheduler, closed_form_scheduler, 20)
def test_closed_form_linearlr(self):
scheduler = LinearLR(self.opt, start_factor=1.0 / 3, end_factor=0.7, total_iters=4)
closed_form_scheduler = LinearLR(self.opt, start_factor=1.0 / 3, end_factor=0.7, total_iters=4)
self._test_against_closed_form(scheduler, closed_form_scheduler, 20)
def test_closed_form_constantlr(self):
scheduler = ConstantLR(self.opt, factor=1.0 / 3, total_iters=4)
closed_form_scheduler = ConstantLR(self.opt, factor=1.0 / 3, total_iters=4)
self._test_against_closed_form(scheduler, closed_form_scheduler, 20)
def test_closed_form_multi_step_lr(self):
scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9])
closed_form_scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9])
self._test_against_closed_form(scheduler, closed_form_scheduler, 20)
def test_closed_form_exp_lr(self):
scheduler = ExponentialLR(self.opt, gamma=0.9)
closed_form_scheduler = ExponentialLR(self.opt, gamma=0.9)
self._test_against_closed_form(scheduler, closed_form_scheduler, 20)
def test_closed_form_poly_lr(self):
scheduler = PolynomialLR(self.opt, power=0.9)
closed_form_scheduler = PolynomialLR(self.opt, power=0.9)
self._test_against_closed_form(scheduler, closed_form_scheduler, 20)
def test_closed_form_cos_anneal_lr(self):
eta_min = 1e-10
epochs = 20
T_max = 5
scheduler = CosineAnnealingLR(self.opt, T_max=T_max, eta_min=eta_min)
closed_form_scheduler = CosineAnnealingLR(self.opt, T_max=T_max, eta_min=eta_min)
self._test_against_closed_form(scheduler, closed_form_scheduler, epochs)
def test_cos_anneal_lr_continue(self):
eta_min = 0.1
T_max = 5
scheduler = CosineAnnealingLR(self.opt, T_max=T_max, eta_min=eta_min)
self.opt.step()
scheduler.step()
original_lrs = scheduler._last_lr
new_scheduler = CosineAnnealingLR(
self.opt, T_max=T_max, eta_min=eta_min, last_epoch=0)
new_lrs = new_scheduler._last_lr
torch.testing.assert_allclose(original_lrs, new_lrs, rtol=1e-4, atol=1e-5)
def test_reduce_lr_on_plateau1(self):
epochs = 10
for param_group in self.opt.param_groups:
param_group['lr'] = 0.5
targets = [[0.5] * 20]
metrics = [10 - i * 0.0167 for i in range(20)]
scheduler = ReduceLROnPlateau(self.opt, threshold_mode='abs', mode='min',
threshold=0.01, patience=5, cooldown=5)
self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
def test_reduce_lr_on_plateau2(self):
epochs = 22
for param_group in self.opt.param_groups:
param_group['lr'] = 0.5
targets = [[0.5] * 6 + [0.05] * 7 + [0.005] * 7 + [0.0005] * 2]
metrics = [10 - i * 0.0165 for i in range(22)]
scheduler = ReduceLROnPlateau(self.opt, patience=5, cooldown=0, threshold_mode='abs',
mode='min', threshold=0.1)
self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
def test_reduce_lr_on_plateau3(self):
epochs = 22
for param_group in self.opt.param_groups:
param_group['lr'] = 0.5
targets = [[0.5] * (2 + 6) + [0.05] * (5 + 6) + [0.005] * 4]
metrics = [-0.8] * 2 + [-0.234] * 20
scheduler = ReduceLROnPlateau(self.opt, mode='max', patience=5, cooldown=5,
threshold_mode='abs')
self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
def test_reduce_lr_on_plateau4(self):
epochs = 20
for param_group in self.opt.param_groups:
param_group['lr'] = 0.5
targets = [[0.5] * 20]
metrics = [1.5 * (1.025 ** i) for i in range(20)] # 1.025 > 1.1**0.25
scheduler = ReduceLROnPlateau(self.opt, mode='max', patience=3,
threshold_mode='rel', threshold=0.1)
self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
def test_reduce_lr_on_plateau5(self):
epochs = 20
for param_group in self.opt.param_groups:
param_group['lr'] = 0.5
targets = [[0.5] * 6 + [0.05] * (5 + 6) + [0.005] * 4]
metrics = [1.5 * (1.005 ** i) for i in range(20)]
scheduler = ReduceLROnPlateau(self.opt, mode='max', threshold_mode='rel',
threshold=0.1, patience=5, cooldown=5)
self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
def test_reduce_lr_on_plateau6(self):
epochs = 20
for param_group in self.opt.param_groups:
param_group['lr'] = 0.5
targets = [[0.5] * 20]
metrics = [1.5 * (0.85 ** i) for i in range(20)]
scheduler = ReduceLROnPlateau(self.opt, mode='min', threshold_mode='rel',
threshold=0.1)
self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
def test_reduce_lr_on_plateau7(self):
epochs = 20
for param_group in self.opt.param_groups:
param_group['lr'] = 0.5
targets = [[0.5] * 6 + [0.05] * (5 + 6) + [0.005] * 4]
metrics = [1] * 7 + [0.6] + [0.5] * 12
scheduler = ReduceLROnPlateau(self.opt, mode='min', threshold_mode='rel',
threshold=0.1, patience=5, cooldown=5)
self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
def test_reduce_lr_on_plateau8(self):
epochs = 20
for param_group in self.opt.param_groups:
param_group['lr'] = 0.5
targets = [[0.5] * 6 + [0.4] * 14, [0.5] * 6 + [0.3] * 14]
metrics = [1.5 * (1.005 ** i) for i in range(20)]
scheduler = ReduceLROnPlateau(self.opt, mode='max', threshold_mode='rel', min_lr=[0.4, 0.3],
threshold=0.1, patience=5, cooldown=5)
self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
def test_sequentiallr1(self):
epochs = 19
schedulers = [None] * 2
targets = [[0.05, 0.04, 0.032] + [0.05 for x in range(4)]
+ [0.05 * 0.1 for x in range(4)]
+ [0.05 * 0.01 for x in range(4)]
+ [0.05 * 0.001 for x in range(4)]]
milestones = [3]
schedulers[0] = ExponentialLR(self.opt, gamma=0.8)
schedulers[1] = StepLR(self.opt, gamma=0.1, step_size=4)
scheduler = SequentialLR(self.opt, schedulers=schedulers, milestones=milestones)
self._test(scheduler, targets, epochs)
def test_sequentiallr2(self):
epochs = 13
schedulers = [None] * 2
targets = [[0.005, 0.005, 0.005] + [0.05 * 0.9 ** x for x in range(10)]]
milestones = [3]
schedulers[0] = ConstantLR(self.opt, factor=0.1, total_iters=3)
schedulers[1] = ExponentialLR(self.opt, gamma=0.9)
scheduler = SequentialLR(self.opt, schedulers=schedulers, milestones=milestones)
self._test(scheduler, targets, epochs)
def test_sequentiallr3(self):
epochs = 12
schedulers = [None] * 3
targets = [[0.005, 0.005, 0.005] + [0.05, 0.04, 0.032]
+ [0.05, 0.05, 0.005, 0.005, 0.0005, 0.0005]]
milestones = [3, 6]
schedulers[0] = ConstantLR(self.opt, factor=0.1, total_iters=3)
schedulers[1] = ExponentialLR(self.opt, gamma=0.8)
schedulers[2] = StepLR(self.opt, gamma=0.1, step_size=2)
scheduler = SequentialLR(self.opt, schedulers=schedulers, milestones=milestones)
self._test(scheduler, targets, epochs)
def test_sequentiallr4(self):
optimizer = torch.optim.SGD([torch.tensor(0.5)], lr=0.1)
prev_lr = optimizer.param_groups[0]["lr"]
schedulers = [
torch.optim.lr_scheduler.ConstantLR(optimizer, factor=1),
torch.optim.lr_scheduler.ConstantLR(optimizer, factor=0.1)
]
scheduler = torch.optim.lr_scheduler.SequentialLR(optimizer, schedulers, milestones=[10])
new_lr = optimizer.param_groups[0]["lr"]
# Ensure that multiple schedulers does not affect the initial learning rate
self.assertEqual(prev_lr, new_lr)
def test_get_last_lr_sequentiallr(self):
epochs = 12
milestones = [3, 6]
schedulers = [None] * 3
schedulers[0] = ConstantLR(self.opt, factor=0.1, total_iters=3)
schedulers[1] = ExponentialLR(self.opt, gamma=0.8)
schedulers[2] = StepLR(self.opt, gamma=0.1, step_size=2)
scheduler = SequentialLR(self.opt, schedulers=schedulers, milestones=milestones)
constant_lr_target = [0.005] * 3
exponential_lr_target = [0.05, 0.04, 0.032]
step_lr_target = [0.05, 0.05, 0.005, 0.005, 0.0005, 0.0005]
single_targets = constant_lr_target + exponential_lr_target + step_lr_target
targets = [single_targets, [x * 10 for x in single_targets]]
self._test_get_last_lr(scheduler, targets, epochs)
def test_chained_lr2_get_last_lr_before_step(self):
schedulers = [
LinearLR(self.opt, start_factor=0.4, total_iters=3),
MultiStepLR(self.opt, milestones=[4, 8, 10], gamma=0.1)
]
scheduler = ChainedScheduler(schedulers)
self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr())
def test_chained_lr1(self):
epochs = 10
schedulers = [None] * 1
targets = [[0.05] * 3 + [0.005] * 3 + [0.0005] * 3 + [0.00005] * 3]
schedulers[0] = StepLR(self.opt, gamma=0.1, step_size=3)
scheduler = ChainedScheduler(schedulers)
self._test([scheduler], targets, epochs)
self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr())
def test_chained_lr2(self):
epochs = 10
schedulers = [None] * 1
targets = [[0.02, 0.03, 0.04] + [0.05] * 9]
schedulers[0] = LinearLR(self.opt, start_factor=0.4, total_iters=3)
scheduler = ChainedScheduler(schedulers)
self._test([scheduler], targets, epochs)
self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr())
def test_chained_lr3(self):
epochs = 10
schedulers = [None] * 2
targets = [[0.02, 0.03, 0.04, 0.05] + [0.005] * 4 + [0.0005] * 3 + [0.00005] * 3]
schedulers[0] = LinearLR(self.opt, start_factor=0.4, total_iters=3)
schedulers[1] = MultiStepLR(self.opt, milestones=[4, 8, 10], gamma=0.1)
scheduler = ChainedScheduler(schedulers)
self._test([scheduler], targets, epochs)
self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr())
def test_chained_lr4(self):
epochs = 9
schedulers = [None] * 3
targets = [[0.05 * 0.2 * 0.9 ** x for x in range(3)]
+ [0.05 * 0.2 * 0.9 ** 3 * 0.1]
+ [0.05 * 0.9 ** x * 0.1 for x in range(4, 6)]
+ [0.05 * 0.9 ** x * 0.01 for x in range(6, 9)]]
schedulers[0] = ExponentialLR(self.opt, gamma=0.9)
schedulers[1] = ConstantLR(self.opt, factor=0.2, total_iters=4)
schedulers[2] = StepLR(self.opt, gamma=0.1, step_size=3)
scheduler = ChainedScheduler(schedulers)
self._test([scheduler], targets, epochs)
self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr())
def test_chained_lr5(self):
def poly_lr(lr: float):
return [
(lr * ((1.0 - x / total_iters) ** power)) for x in range(total_iters)
] + [0.0] * (epochs - total_iters)
schedulers = [None] * 2
epochs = 10
power = 0.9
total_iters = 5
const_factor = 0.1
single_targets = [x * const_factor for x in poly_lr(lr=0.05)]
targets = [single_targets, [x * const_factor for x in poly_lr(0.5)]]
schedulers[0] = PolynomialLR(self.opt, power=power, total_iters=total_iters)
schedulers[1] = ConstantLR(self.opt, factor=const_factor)
scheduler = ChainedScheduler(schedulers)
self._test(scheduler, targets, epochs)
self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr())
def test_compound_step_and_multistep_lr(self):
epochs = 10
schedulers = [None] * 2
schedulers[0] = StepLR(self.opt, gamma=0.1, step_size=3)
schedulers[1] = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9])
targets = [[0.05] * 2 + [0.005] * 1 + [5e-4] * 2 + [5e-5] + [5e-6] * 3 + [5e-8]]
self._test(schedulers, targets, epochs)
def test_compound_step_and_exp_lr(self):
epochs = 10
schedulers = [None] * 2
single_targets = [0.05 * (0.9 ** x) for x in range(3)]
single_targets += [0.005 * (0.9 ** x) for x in range(3, 6)]
single_targets += [0.0005 * (0.9 ** x) for x in range(6, 9)]
single_targets += [0.00005 * (0.9 ** x) for x in range(9, 12)]
targets = [single_targets, [x * epochs for x in single_targets]]
schedulers[0] = StepLR(self.opt, gamma=0.1, step_size=3)
schedulers[1] = ExponentialLR(self.opt, gamma=0.9)
self._test(schedulers, targets, epochs)
def test_compound_exp_and_multistep_lr(self):
epochs = 10
schedulers = [None] * 2
single_targets = [0.05 * (0.9 ** x) for x in range(2)]
single_targets += [0.005 * (0.9 ** x) for x in range(2, 5)]
single_targets += [0.0005 * (0.9 ** x) for x in range(5, 9)]
single_targets += [0.00005 * (0.9 ** x) for x in range(9, 11)]
targets = [single_targets, [x * epochs for x in single_targets]]
schedulers[0] = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9])
schedulers[1] = ExponentialLR(self.opt, gamma=0.9)
self._test(schedulers, targets, epochs)
def test_compound_exp_and_linearlr(self):
epochs = 10
iters = 4
start_factor = 0.4
end_factor = 0.9
schedulers = [None] * 2
single_targets = [0.05 * (0.9 ** x) for x in range(11)]
for i in range(iters):
single_targets[i] *= start_factor + i / iters * (end_factor - start_factor)
for i in range(iters, 11):
single_targets[i] *= end_factor
targets = [single_targets, [x * epochs for x in single_targets]]
schedulers[0] = LinearLR(self.opt, start_factor=start_factor, end_factor=end_factor, total_iters=iters)
schedulers[1] = ExponentialLR(self.opt, gamma=0.9)
self._test(schedulers, targets, epochs)
def test_compound_step_and_constantlr(self):
epochs = 10
iters = 4
factor = 0.4
schedulers = [None] * 2
single_targets = [0.05 * 0.4] * 3 + [0.005 * 0.4] + [0.005] * 2 + [0.0005] * 3 + [0.00005] * 3
targets = [single_targets, [x * epochs for x in single_targets]]
schedulers[0] = StepLR(self.opt, gamma=0.1, step_size=3)
schedulers[1] = ConstantLR(self.opt, factor=0.4, total_iters=4)
self._test(schedulers, targets, epochs)
def test_compound_linearlr_and_multistep_lr(self):
epochs = 10
iters = 4
start_factor = 0.4
schedulers = [None] * 2
single_targets = [0.05] * 2 + [0.005] * 3 + [0.0005] * 4 + [0.00005] * 2
for i in range(iters):
single_targets[i] *= start_factor + i / iters * (1 - start_factor)
targets = [single_targets, [x * epochs for x in single_targets]]
schedulers[0] = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9])
schedulers[1] = LinearLR(self.opt, start_factor=start_factor, total_iters=iters)
self._test(schedulers, targets, epochs)
def test_compound_cosanneal_and_step_lr(self):
epochs = 10
eta_min = 1e-10
single_targets = [eta_min + (0.05 - eta_min) *
(1 + math.cos(math.pi * x / epochs)) / 2
for x in range(epochs)]
single_targets = [x * 0.1 ** (i // 3) for i, x in enumerate(single_targets)]
targets = [single_targets, [x * epochs for x in single_targets]]
schedulers = [None] * 2
schedulers[0] = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min)
schedulers[1] = StepLR(self.opt, gamma=0.1, step_size=3)
self._test(schedulers, targets, epochs)
def test_compound_cosanneal_and_multistep_lr(self):
epochs = 10
eta_min = 1e-10
single_targets = [eta_min + (0.05 - eta_min) *
(1 + math.cos(math.pi * x / epochs)) / 2
for x in range(epochs)]
multipliers = [1] * 2 + [0.1] * 3 + [0.01] * 4 + [0.001]
single_targets = [x * y for x, y in zip(single_targets, multipliers)]
targets = [single_targets, [x * epochs for x in single_targets]]
schedulers = [None] * 2
schedulers[0] = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min)
schedulers[1] = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9])
self._test(schedulers, targets, epochs)
def test_compound_cosanneal_and_linearlr(self):
epochs = 10
iters = 4
start_factor = 0.4
eta_min = 1e-10
schedulers = [None] * 2
single_targets = [eta_min + (0.05 - eta_min) *
(1 + math.cos(math.pi * x / epochs)) / 2
for x in range(epochs)]
for i in range(iters):
single_targets[i] *= start_factor + i / iters * (1 - start_factor)
targets = [single_targets, [x * epochs for x in single_targets]]
schedulers[0] = LinearLR(self.opt, start_factor=start_factor, total_iters=iters)
schedulers[1] = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min)
self._test(schedulers, targets, epochs)
def test_compound_cosanneal_and_exp_lr(self):
epochs = 10
eta_min = 1e-10
single_targets = [eta_min + (0.05 - eta_min) *
(1 + math.cos(math.pi * x / epochs)) / 2
for x in range(epochs)]
multipliers = [0.1 ** i for i in range(epochs)]
single_targets = [x * y for x, y in zip(single_targets, multipliers)]
targets = [single_targets, [x * epochs for x in single_targets]]
schedulers = [None] * 2
schedulers[0] = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min)
schedulers[1] = ExponentialLR(self.opt, gamma=0.1)
self._test(schedulers, targets, epochs)
def test_compound_reduce_lr_on_plateau1(self):
epochs = 10
for param_group in self.opt.param_groups:
param_group['lr'] = 0.5
single_targets = [0.5] * 20
multipliers = [0.1 ** (i // 3) for i in range(20)]
single_targets = [x * y for x, y in zip(multipliers, single_targets)]
targets = [single_targets]
targets = targets[1:] # test runs step before checking lr
metrics = [10 - i * 0.0167 for i in range(20)]
schedulers = [None, None]
schedulers[0] = ReduceLROnPlateau(self.opt, threshold_mode='abs', mode='min',
threshold=0.01, patience=5, cooldown=5)
schedulers[1] = StepLR(self.opt, gamma=0.1, step_size=3)
self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs)
def test_compound_reduce_lr_on_plateau2(self):
epochs = 22
for param_group in self.opt.param_groups:
param_group['lr'] = 0.5
single_targets = [0.5] * 6 + [0.05] * 7 + [0.005] * 7 + [0.0005] * 2
multipliers = [1] * 3 + [0.1] * 5 + [0.01] * 4 + [0.001] * 10
single_targets = [x * y for x, y in zip(single_targets, multipliers)]
targets = [single_targets]
targets = targets[1:] # test runs step before checking lr
metrics = [10 - i * 0.0165 for i in range(22)]
schedulers = [None] * 2
schedulers[0] = ReduceLROnPlateau(self.opt, patience=5, cooldown=0, threshold_mode='abs',
mode='min', threshold=0.1)
schedulers[1] = MultiStepLR(self.opt, gamma=0.1, milestones=[3, 8, 12])
self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs)
def test_compound_reduce_lr_on_plateau3(self):
epochs = 22
for param_group in self.opt.param_groups:
param_group['lr'] = 0.5
single_targets = [0.5] * (2 + 6) + [0.05] * (5 + 6) + [0.005] * 4
multipliers = [0.1 ** i for i in range(epochs)]
single_targets = [x * y for x, y in zip(multipliers, single_targets)]
targets = [single_targets]
targets = targets[1:] # test runs step before checking lr
metrics = [-0.8] * 2 + [-0.234] * 20
schedulers = [None, None]
schedulers[0] = ReduceLROnPlateau(self.opt, mode='max', patience=5, cooldown=5,
threshold_mode='abs')
schedulers[1] = ExponentialLR(self.opt, gamma=0.1)
self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs)
def test_compound_reduce_lr_on_plateau4(self):
epochs = 20
for param_group in self.opt.param_groups:
param_group['lr'] = 0.05
epochs = 10
eta_min = 1e-10
single_targets = [eta_min + (0.05 - eta_min) *
(1 + math.cos(math.pi * x / epochs)) / 2
for x in range(epochs)]
targets = [single_targets]
targets = targets[1:] # test runs step before checking lr
metrics = [1.5 * (1.025 ** i) for i in range(20)] # 1.025 > 1.1**0.25
schedulers = [None, None]
schedulers[0] = ReduceLROnPlateau(self.opt, mode='max', patience=3,
threshold_mode='rel', threshold=0.1)
schedulers[1] = CosineAnnealingLR(self.opt, epochs, eta_min)
self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs)
def test_compound_reduce_lr_on_plateau5(self):
iters = 4
start_factor = 0.4
epochs = 22
for param_group in self.opt.param_groups:
param_group['lr'] = 0.5
single_targets = [0.5] * 6 + [0.05] * 7 + [0.005] * 7 + [0.0005] * 2
multipliers = [1] * 22
for i in range(iters):
multipliers[i] *= start_factor + i / iters * (1 - start_factor)
single_targets = [x * y for x, y in zip(single_targets, multipliers)]
targets = [single_targets]
targets = targets[1:] # test runs step before checking lr
metrics = [10 - i * 0.0165 for i in range(22)]
schedulers = [None] * 2
schedulers[0] = ReduceLROnPlateau(self.opt, patience=5, cooldown=0, threshold_mode='abs',
mode='min', threshold=0.1)
schedulers[1] = LinearLR(self.opt, start_factor=start_factor, total_iters=iters)
self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs)
def test_cycle_lr_invalid_mode(self):
with self.assertRaises(ValueError):
scheduler = CyclicLR(self.opt, base_lr=0, max_lr=0, mode="CATS")
def test_cycle_lr_triangular_mode_one_lr(self):
lr_target = [1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3]
momentum_target = [5, 4, 3, 2, 1, 2, 3, 4, 5, 4, 3]
lr_targets = [lr_target, lr_target]
momentum_targets = [momentum_target, momentum_target]
scheduler = CyclicLR(self.opt, base_lr=1, max_lr=5, step_size_up=4,
cycle_momentum=True, base_momentum=1, max_momentum=5,
mode='triangular')
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
def test_cycle_lr_triangular_mode_one_lr_no_momentum(self):
lr_target = [1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3]
lr_targets = [lr_target, lr_target]
momentum_target = [self.opt.defaults['momentum']] * len(lr_target)
momentum_targets = [momentum_target, momentum_target]
scheduler = CyclicLR(self.opt, base_lr=1, max_lr=5, step_size_up=4,
cycle_momentum=False, mode='triangular')
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
def test_cycle_lr_triangular2_mode_one_lr(self):
lr_target = [1, 2, 3, 4, 5, 4, 3, 2, 1, 1.5, 2.0, 2.5, 3.0, 2.5, 2.0, 1.5,
1, 1.25, 1.50, 1.75, 2.00, 1.75]
momentum_target = [5.0, 4.0, 3.0, 2.0, 1.0, 2.0, 3.0, 4.0, 5.0, 4.5, 4.0,
3.5, 3.0, 3.5, 4.0, 4.5, 5.0, 4.75, 4.5, 4.25, 4.0, 4.25]
lr_targets = [lr_target, lr_target]
momentum_targets = [momentum_target, momentum_target]
scheduler = CyclicLR(self.opt, base_lr=1, max_lr=5, step_size_up=4,
cycle_momentum=True, base_momentum=1, max_momentum=5,
mode='triangular2')
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
def test_cycle_lr_exp_range_mode_one_lr(self):
base_lr, max_lr = 1, 5
diff_lr = max_lr - base_lr
gamma = 0.9
xs = [0, 0.25, 0.5, 0.75, 1, 0.75, 0.50, 0.25, 0, 0.25, 0.5, 0.75, 1]
lr_target = [base_lr + x * diff_lr * gamma**i for i, x in enumerate(xs)]
momentum_target = [max_lr - x * diff_lr * gamma**i for i, x in enumerate(xs)]
lr_targets = [lr_target, lr_target]
momentum_targets = [momentum_target, momentum_target]
scheduler = CyclicLR(self.opt, base_lr=base_lr,
max_lr=max_lr, step_size_up=4,
cycle_momentum=True, base_momentum=base_lr, max_momentum=max_lr,
mode='exp_range', gamma=gamma)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
def test_cycle_lr_triangular_mode(self):
lr_target_1 = [1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3]
lr_target_2 = [x + 1 for x in lr_target_1]
lr_targets = [lr_target_1, lr_target_2]
momentum_target_1 = [5, 4, 3, 2, 1, 2, 3, 4, 5, 4, 3]
momentum_target_2 = [x + 1 for x in momentum_target_1]
momentum_targets = [momentum_target_1, momentum_target_2]
scheduler = CyclicLR(self.opt, base_lr=[1, 2], max_lr=[5, 6], step_size_up=4,
cycle_momentum=True, base_momentum=[1, 2], max_momentum=[5, 6],
mode='triangular')
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target_1))
def test_cycle_lr_triangular2_mode(self):
lr_target_1 = [1, 2, 3, 4, 5, 4, 3, 2, 1, 1.5, 2.0, 2.5, 3.0, 2.5, 2.0, 1.5, 1,
1.25, 1.50, 1.75, 2.00, 1.75]
lr_target_2 = [x + 2 for x in lr_target_1]
lr_targets = [lr_target_1, lr_target_2]
momentum_target_1 = [5.0, 4.0, 3.0, 2.0, 1.0, 2.0, 3.0, 4.0, 5.0, 4.5, 4.0, 3.5,
3.0, 3.5, 4.0, 4.5, 5.0, 4.75, 4.5, 4.25, 4.0, 4.25]
momentum_target_2 = [x + 2 for x in momentum_target_1]
momentum_targets = [momentum_target_1, momentum_target_2]
scheduler = CyclicLR(self.opt, base_lr=[1, 3], max_lr=[5, 7], step_size_up=4,
cycle_momentum=True, base_momentum=[1, 3], max_momentum=[5, 7],
mode='triangular2')
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target_1))
def test_cycle_lr_exp_range_mode(self):
base_lr_1, max_lr_1 = 1, 5
base_lr_2, max_lr_2 = 5, 12
diff_lr_1 = max_lr_1 - base_lr_1
diff_lr_2 = max_lr_2 - base_lr_2
gamma = 0.9
xs = [0, 0.25, 0.5, 0.75, 1, 0.75, 0.50, 0.25, 0, 0.25, 0.5, 0.75, 1]
lr_target_1 = [base_lr_1 + x * diff_lr_1 * gamma**i for i, x in enumerate(xs)]
lr_target_2 = [base_lr_2 + x * diff_lr_2 * gamma**i for i, x in enumerate(xs)]
lr_targets = [lr_target_1, lr_target_2]
momentum_target_1 = [max_lr_1 - x * diff_lr_1 * gamma**i for i, x in enumerate(xs)]
momentum_target_2 = [max_lr_2 - x * diff_lr_2 * gamma**i for i, x in enumerate(xs)]
momentum_targets = [momentum_target_1, momentum_target_2]
scheduler = CyclicLR(self.opt, base_lr=[base_lr_1, base_lr_2],
max_lr=[max_lr_1, max_lr_2], step_size_up=4,
cycle_momentum=True, base_momentum=[base_lr_1, base_lr_2],
max_momentum=[max_lr_1, max_lr_2],
mode='exp_range', gamma=gamma)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target_1))
def test_cycle_lr_triangular_mode_step_size_up_down(self):
lr_target = [1.0, 2.0, 3.0, 4.0, 5.0, 13.0 / 3, 11.0 / 3, 9.0 / 3, 7.0 / 3, 5.0 / 3, 1.0]
lr_targets = [lr_target, lr_target]
momentum_target = [5.0, 4.0, 3.0, 2.0, 1.0, 5.0 / 3, 7.0 / 3, 3.0, 11.0 / 3, 13.0 / 3, 5.0]
momentum_targets = [momentum_target, momentum_target]
scheduler = CyclicLR(self.opt, base_lr=1, max_lr=5,
step_size_up=4,
step_size_down=6,
cycle_momentum=True,
base_momentum=1, max_momentum=5,
mode='triangular')
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
def test_cycle_lr_triangular2_mode_step_size_up_down(self):
lr_base_target = ([
1.0, 3.0, 5.0, 13.0 / 3, 11.0 / 3, 9.0 / 3, 7.0 / 3, 5.0 / 3, 1.0, 2.0, 3.0, 8.0 / 3,
7.0 / 3, 6.0 / 3, 5.0 / 3, 4.0 / 3, 1.0, 3.0 / 2, 2.0, 11.0 / 6, 10.0 / 6, 9.0 / 6,
8.0 / 6, 7.0 / 6
])
momentum_base_target = ([
5.0, 3.0, 1.0, 5.0 / 3, 7.0 / 3, 3.0, 11.0 / 3, 13.0 / 3, 5.0, 4.0, 3.0, 10.0 / 3,
11.0 / 3, 4.0, 13.0 / 3, 14.0 / 3, 5.0, 4.5, 4.0, 25.0 / 6, 13.0 / 3, 4.5, 14.0 / 3,
29.0 / 6
])
deltas = [2 * i for i in range(0, 2)]
base_lrs = [1 + delta for delta in deltas]
max_lrs = [5 + delta for delta in deltas]
lr_targets = [[x + delta for x in lr_base_target] for delta in deltas]
momentum_targets = [[x + delta for x in momentum_base_target] for delta in deltas]
scheduler = CyclicLR(
self.opt,
base_lr=base_lrs,
max_lr=max_lrs,
step_size_up=2,
step_size_down=6,
cycle_momentum=True,
base_momentum=base_lrs,
max_momentum=max_lrs,
mode='triangular2')
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_base_target))
def test_cycle_lr_exp_range_mode_step_size_up_down(self):
base_lr, max_lr = 1, 5
diff_lr = max_lr - base_lr
gamma = 0.9
xs = ([
0.0, 0.5, 1.0, 5.0 / 6, 4.0 / 6, 3.0 / 6, 2.0 / 6, 1.0 / 6, 0.0, 0.5, 1.0, 5.0 / 6,
4.0 / 6
])
lr_target = [base_lr + x * diff_lr * gamma**i for i, x in enumerate(xs)]
lr_targets = [lr_target, lr_target]
momentum_target = [max_lr - x * diff_lr * gamma**i for i, x in enumerate(xs)]
momentum_targets = [momentum_target, momentum_target]
scheduler = CyclicLR(self.opt, base_lr=base_lr, max_lr=max_lr,
step_size_up=2, step_size_down=6,
cycle_momentum=True, base_momentum=base_lr,
max_momentum=max_lr,
mode='exp_range', gamma=gamma)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
def test_cycle_lr_with_momentumless_optimizer(self):
# Note [Temporarily set optimizer to Adam]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The TestLRScheduler object carries around an SGD optimizer to avoid having to
# instantiate one for every test. This gets in the way for our very specific case
# in which we need to use Adam (or really any optimizer that doesn't use momentum)
# in order to test that the momentum bug in CyclicLR is fixed (the bug is described
# in more detail in https://github.com/pytorch/pytorch/issues/19003 ).
old_opt = self.opt
self.opt = optim.Adam(
[{'params': self.net.conv1.parameters()}, {'params': self.net.conv2.parameters(), 'lr': 0.5}],
lr=0.05)
lr_target = [1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3]
lr_targets = [lr_target, lr_target]
momentum_target = [None] * len(lr_target)
momentum_targets = [momentum_target, momentum_target]
scheduler = CyclicLR(self.opt, base_lr=1, max_lr=5, step_size_up=4,
cycle_momentum=False, mode='triangular')
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
self.opt = old_opt # set optimizer back to SGD
def test_cycle_lr_cycle_momentum_fail_with_momentumless_optimizer(self):
with self.assertRaises(ValueError):
adam_opt = optim.Adam(self.net.parameters())
scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=True)
def test_onecycle_lr_invalid_anneal_strategy(self):
with self.assertRaises(ValueError):
scheduler = OneCycleLR(self.opt, max_lr=1e-3, total_steps=10, anneal_strategy="CATS")
def test_onecycle_lr_invalid_pct_start(self):
with self.assertRaises(ValueError):
scheduler = OneCycleLR(self.opt, max_lr=1e-3, total_steps=10, pct_start=1.1)
def test_onecycle_lr_cannot_calculate_total_steps(self):
with self.assertRaises(ValueError):
scheduler = OneCycleLR(self.opt, max_lr=1e-3)
def test_onecycle_lr_linear_annealing(self):
lr_target = [1, 13, 25, 21.5, 18, 14.5, 11, 7.5, 4, 0.5]
momentum_target = [22, 11.5, 1, 4, 7, 10, 13, 16, 19, 22]
lr_targets = [lr_target, lr_target]
momentum_targets = [momentum_target, momentum_target]
scheduler = OneCycleLR(self.opt, max_lr=25, final_div_factor=2, base_momentum=1, max_momentum=22,
total_steps=10, anneal_strategy='linear')
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, 10)
def test_onecycle_lr_linear_annealing_three_phases(self):
lr_target = [1, 9, 17, 25, 17, 9, 1, 0.75, 0.5, 0.25]
momentum_target = [22, 15, 8, 1, 8, 15, 22, 22, 22, 22]
lr_targets = [lr_target, lr_target]
momentum_targets = [momentum_target, momentum_target]
scheduler = OneCycleLR(self.opt, max_lr=25, div_factor=25,
base_momentum=1, max_momentum=22,
total_steps=10, anneal_strategy='linear',
pct_start=0.4, final_div_factor=4,
three_phase=True)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, 10)
def test_onecycle_lr_cosine_annealing(self):
def annealing_cos(start, end, pct):
cos_out = math.cos(math.pi * pct) + 1
return end + (start - end) / 2.0 * cos_out
lr_target = [1, 13, 25, annealing_cos(25, 0.5, 1 / 7.0), annealing_cos(25, 0.5, 2 / 7.0),
annealing_cos(25, 0.5, 3 / 7.0), annealing_cos(25, 0.5, 4 / 7.0), annealing_cos(25, 0.5, 5 / 7.0),
annealing_cos(25, 0.5, 6 / 7.0), 0.5]
momentum_target = [22, 11.5, 1, annealing_cos(1, 22, 1 / 7.0), annealing_cos(1, 22, 2 / 7.0),
annealing_cos(1, 22, 3 / 7.0), annealing_cos(1, 22, 4 / 7.0), annealing_cos(1, 22, 5 / 7.0),
annealing_cos(1, 22, 6 / 7.0), 22]
lr_targets = [lr_target, lr_target]
momentum_targets = [momentum_target, momentum_target]
scheduler = OneCycleLR(self.opt, max_lr=25, final_div_factor=2, base_momentum=1, max_momentum=22,
total_steps=10)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, 10)
def test_cycle_lr_with_adam(self):
old_opt = self.opt
self.opt = optim.Adam(
[{'params': self.net.conv1.parameters()}, {'params': self.net.conv2.parameters(), 'lr': 0.5}],
lr=0.05)
lr_target = [1, 13, 25, 21.5, 18, 14.5, 11, 7.5, 4, 0.5]
momentum_target = [22, 11.5, 1, 4, 7, 10, 13, 16, 19, 22]
lr_targets = [lr_target, lr_target]
momentum_targets = [momentum_target, momentum_target]
scheduler = OneCycleLR(self.opt, max_lr=25, final_div_factor=2, base_momentum=1, max_momentum=22,
total_steps=10, anneal_strategy='linear')
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, 10, use_beta1=True)
self.opt = old_opt # set optimizer back to SGD
def test_lambda_lr(self):
epochs = 10
self.opt.param_groups[0]['lr'] = 0.05
self.opt.param_groups[1]['lr'] = 0.4
targets = [[0.05 * (0.9 ** x) for x in range(epochs)], [0.4 * (0.8 ** x) for x in range(epochs)]]
scheduler = LambdaLR(self.opt,
lr_lambda=[lambda x1: 0.9 ** x1, lambda x2: 0.8 ** x2])
self._test(scheduler, targets, epochs)
def test_multiplicative_lr(self):
epochs = 10
self.opt.param_groups[0]['lr'] = 0.05
self.opt.param_groups[1]['lr'] = 0.4
targets = [[0.05 * (0.9 ** x) for x in range(epochs)], [0.4 * (0.8 ** x) for x in range(epochs)]]
scheduler = MultiplicativeLR(self.opt, lr_lambda=[lambda x1: 0.9, lambda x2: 0.8])
self._test(scheduler, targets, epochs)
@parametrize("T_mult", [1, 2, 4])
def test_CosineAnnealingWarmRestarts_lr1(self, T_mult):
iters = 100
eta_min = 1e-10
T_i = 10
T_cur = 0
targets = [[0.05], [0.5]]
scheduler = CosineAnnealingWarmRestarts(self.opt, T_0=T_i, T_mult=T_mult, eta_min=eta_min)
for _ in range(1, iters, 1):
T_cur += 1
if T_cur >= T_i:
T_cur = T_cur - T_i
T_i = int(T_mult) * T_i
targets[0] += [eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2]
targets[1] += [eta_min + (0.5 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2]
self._test(scheduler, targets, iters)
def test_CosineAnnealingWarmRestarts_lr2(self):
iters = 30
eta_min = 1e-10
T_mults = [1, 2, 4]
for T_mult in T_mults:
T_i = 10
T_cur = 0
targets = [[0.05], [0.5]]
scheduler = CosineAnnealingWarmRestarts(self.opt, T_0=T_i, T_mult=T_mult, eta_min=eta_min)
for _ in torch.arange(0.1, iters, 0.1):
T_cur = round(T_cur + 0.1, 1)
if T_cur >= T_i:
T_cur = T_cur - T_i
T_i = int(T_mult) * T_i
targets[0] += [eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2]
targets[1] += [eta_min + (0.5 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2]
self._test_CosineAnnealingWarmRestarts(scheduler, targets, iters)
def test_CosineAnnealingWarmRestarts_lr3(self):
epochs_for_T_mults = [[0, 1, 2, 3, 4, 5, 12, 27, 3, 4, 5, 6, 13],
[0, 1, 2, 3, 4, 5, 25, 32, 33, 34, 80, 81, 3],
[0, 0.1, 0.2, 0.3, 1.3, 2.3, 17.5, 18.5, 19.5, 29.5, 30.5, 31.5, 50]]
T_curs_for_T_mults = [[1, 2, 3, 4, 5, 2, 7, 3, 4, 5, 6, 3],
[1, 2, 3, 4, 5, 15, 2, 3, 4, 10, 11, 3],
[0.1, 0.2, 0.3, 1.3, 2.3, 7.5, 8.5, 9.5, 19.5, 20.5, 21.5, 10]]
T_is_for_T_mults = [[10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 20, 40, 40, 40, 80, 80, 10],
[10, 10, 10, 10, 10, 30, 30, 30, 30, 30, 30, 90]]
eta_min = 1e-10
T_mults = [1, 2, 3]
for epochs, T_mult, T_curs, T_is in zip(epochs_for_T_mults, T_mults, T_curs_for_T_mults, T_is_for_T_mults):
targets = [[0.05], [0.5]]
scheduler = CosineAnnealingWarmRestarts(self.opt, T_0=10, T_mult=T_mult, eta_min=eta_min)
for T_cur, T_i in zip(T_curs, T_is):
targets[0] += [eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2]
targets[1] += [eta_min + (0.5 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2]
self._test_interleaved_CosineAnnealingWarmRestarts(scheduler, targets, epochs)
def test_swalr_no_anneal(self):
epochs, swa_start, swa_lr = 10, 5, 0.01
initial_lrs = [group['lr'] for group in self.opt.param_groups]
targets = [[lr] * (swa_start + 1) + [swa_lr] * (epochs - swa_start - 1)
for lr in initial_lrs]
swa_scheduler = SWALR(self.opt, anneal_epochs=1, swa_lr=swa_lr)
self._test_swalr(swa_scheduler, None, targets, swa_start, epochs)
def test_swalr_cosine_anneal_after_multiplicative(self):
# same swa_lr for different param_groups
epochs, swa_start, swa_lr, anneal_epochs = 15, 5, 0.01, 5
mult_factor = 0.9
scheduler = MultiplicativeLR(self.opt, lr_lambda=lambda epoch: mult_factor)
swa_scheduler = SWALR(self.opt, anneal_epochs=anneal_epochs, swa_lr=swa_lr)
def anneal_coef(t):
if t + 1 >= anneal_epochs:
return 0.
return (1 + math.cos(math.pi * (t + 1) / anneal_epochs)) / 2
initial_lrs = [group['lr'] for group in self.opt.param_groups]
targets_before_swa = [[lr * mult_factor**i for i in range(swa_start + 1)]
for lr in initial_lrs]
swa_epochs = epochs - swa_start - 1
targets = [lrs + [lrs[-1] * anneal_coef(t) + swa_lr * (1 - anneal_coef(t)) for t in range(swa_epochs)]
for lrs in targets_before_swa]
self._test_swalr(swa_scheduler, scheduler, targets, swa_start, epochs)
def test_swalr_linear_anneal_after_multiplicative(self):
# separate swa_lr for different param_groups
epochs, swa_start, swa_lrs, anneal_epochs = 15, 5, [0.01, 0.02], 4
mult_factor = 0.9
scheduler = MultiplicativeLR(self.opt, lr_lambda=lambda epoch: mult_factor)
swa_scheduler = SWALR(self.opt, anneal_epochs=anneal_epochs,
anneal_strategy="linear", swa_lr=swa_lrs)
def anneal_coef(t):
if t + 1 >= anneal_epochs:
return 0.
return 1 - (t + 1) / anneal_epochs
initial_lrs = [group['lr'] for group in self.opt.param_groups]
targets_before_swa = [[lr * mult_factor**i for i in range(swa_start + 1)]
for lr in initial_lrs]
swa_epochs = epochs - swa_start - 1
targets = [lrs + [lrs[-1] * anneal_coef(t) + swa_lr * (1 - anneal_coef(t)) for t in range(swa_epochs)]
for lrs, swa_lr in zip(targets_before_swa, swa_lrs)]
self._test_swalr(swa_scheduler, scheduler, targets, swa_start, epochs)
def _test_swalr(self, swa_scheduler, scheduler, targets, swa_start, epochs):
for epoch in range(epochs):
for param_group, target in zip(self.opt.param_groups, targets):
self.assertEqual(target[epoch], param_group['lr'],
msg='LR is wrong in epoch {}: expected {}, got {}'.format(
epoch, target[epoch], param_group['lr']), atol=1e-5, rtol=0)
if epoch >= swa_start:
self.opt.step()
swa_scheduler.step()
elif scheduler is not None:
self.opt.step()
scheduler.step()
def test_swalr_hypers(self):
# Test that SWALR raises errors for incorrect hyper-parameters
with self.assertRaisesRegex(ValueError, "anneal_strategy must"):
swa_scheduler = SWALR(self.opt, anneal_strategy="exponential", swa_lr=1.)
with self.assertRaisesRegex(ValueError, "anneal_epochs must"):
swa_scheduler = SWALR(self.opt, anneal_epochs=-1, swa_lr=1.)
with self.assertRaisesRegex(ValueError, "anneal_epochs must"):
swa_scheduler = SWALR(self.opt, anneal_epochs=1.7, swa_lr=1.)
with self.assertRaisesRegex(ValueError, "swa_lr must"):
swa_scheduler = SWALR(self.opt, swa_lr=[1., 0.1, 0.01])
def test_step_lr_state_dict(self):
self._check_scheduler_state_dict(
lambda: StepLR(self.opt, gamma=0.1, step_size=3),
lambda: StepLR(self.opt, gamma=0.01 / 2, step_size=1))
def test_multi_step_lr_state_dict(self):
self._check_scheduler_state_dict(
lambda: MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]),
lambda: MultiStepLR(self.opt, gamma=0.01, milestones=[1, 4, 6]))
def test_exp_step_lr_state_dict(self):
self._check_scheduler_state_dict(
lambda: ExponentialLR(self.opt, gamma=0.1),
lambda: ExponentialLR(self.opt, gamma=0.01))
def test_cosine_lr_state_dict(self):
epochs = 10
eta_min = 1e-10
self._check_scheduler_state_dict(
lambda: CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min),
lambda: CosineAnnealingLR(self.opt, T_max=epochs // 2, eta_min=eta_min / 2),
epochs=epochs)
def test_reduce_lr_on_plateau_state_dict(self):
scheduler = ReduceLROnPlateau(self.opt, mode='min', factor=0.1, patience=2)
for score in [1.0, 2.0, 3.0, 4.0, 3.0, 4.0, 5.0, 3.0, 2.0, 1.0]:
scheduler.step(score)
scheduler_copy = ReduceLROnPlateau(self.opt, mode='max', factor=0.5, patience=10)
scheduler_copy.load_state_dict(scheduler.state_dict())
for key in scheduler.__dict__.keys():
if key not in {'optimizer', 'is_better'}:
self.assertEqual(scheduler.__dict__[key], scheduler_copy.__dict__[key])
def test_lambda_lr_state_dict_fn(self):
scheduler = LambdaLR(self.opt, lr_lambda=lambda x: x)
state = scheduler.state_dict()
self.assertIsNone(state['lr_lambdas'][0])
scheduler_copy = LambdaLR(self.opt, lr_lambda=lambda x: x)
scheduler_copy.load_state_dict(state)
for key in scheduler.__dict__.keys():
if key not in {'optimizer', 'lr_lambdas'}:
self.assertEqual(scheduler.__dict__[key], scheduler_copy.__dict__[key])
def test_lambda_lr_state_dict_obj(self):
scheduler = LambdaLR(self.opt, lr_lambda=LambdaLRTestObject(10))
state = scheduler.state_dict()
self.assertIsNotNone(state['lr_lambdas'][0])
scheduler_copy = LambdaLR(self.opt, lr_lambda=LambdaLRTestObject(-1))
scheduler_copy.load_state_dict(state)
for key in scheduler.__dict__.keys():
if key not in {'optimizer'}:
self.assertEqual(scheduler.__dict__[key], scheduler_copy.__dict__[key])
def test_CosineAnnealingWarmRestarts_lr_state_dict(self):
self._check_scheduler_state_dict(
lambda: CosineAnnealingWarmRestarts(self.opt, T_0=10, T_mult=2),
lambda: CosineAnnealingWarmRestarts(self.opt, T_0=100))
def test_swa_lr_state_dict(self):
self._check_scheduler_state_dict(
lambda: SWALR(self.opt, anneal_epochs=3, swa_lr=0.5),
lambda: SWALR(self.opt, anneal_epochs=10, anneal_strategy="linear", swa_lr=5.))
def _check_scheduler_state_dict(self, constr, constr2, epochs=10):
scheduler = constr()
for _ in range(epochs):
scheduler.optimizer.step()
scheduler.step()
scheduler_copy = constr2()
scheduler_copy.load_state_dict(scheduler.state_dict())
for key in scheduler.__dict__.keys():
if key != 'optimizer':
self.assertEqual(scheduler.__dict__[key], scheduler_copy.__dict__[key])
self.assertEqual(scheduler.get_last_lr(), scheduler_copy.get_last_lr())
def _test_get_last_lr(self, schedulers, targets, epochs=10):
if isinstance(schedulers, _LRScheduler):
schedulers = [schedulers]
optimizers = {scheduler.optimizer for scheduler in schedulers}
for epoch in range(epochs):
result = [scheduler.get_last_lr() for scheduler in schedulers]
[optimizer.step() for optimizer in optimizers]
[scheduler.step() for scheduler in schedulers]
target = [[t[epoch] for t in targets]] * len(schedulers)
for t, r in zip(target, result):
self.assertEqual(target, result,
msg='LR is wrong in epoch {}: expected {}, got {}'.format(
epoch, t, r), atol=1e-5, rtol=0)
def _test_with_epoch(self, schedulers, targets, epochs=10):
if isinstance(schedulers, _LRScheduler):
schedulers = [schedulers]
optimizers = {scheduler.optimizer for scheduler in schedulers}
for epoch in range(epochs):
[optimizer.step() for optimizer in optimizers]
with warnings.catch_warnings(record=True) as w:
[scheduler.step(epoch) for scheduler in schedulers] # step before assert: skip initial lr
self._check_warning_is_epoch_deprecation_warning(w, num_warnings=len(schedulers))
for param_group, target in zip(self.opt.param_groups, targets):
self.assertEqual(target[epoch], param_group['lr'],
msg='LR is wrong in epoch {}: expected {}, got {}'.format(
epoch, target[epoch], param_group['lr']), atol=1e-5, rtol=0)
def _test(self, schedulers, targets, epochs=10):
if isinstance(schedulers, _LRScheduler):
schedulers = [schedulers]
for epoch in range(epochs):
for param_group, target in zip(self.opt.param_groups, targets):
self.assertEqual(target[epoch], param_group['lr'],
msg='LR is wrong in epoch {}: expected {}, got {}'.format(
epoch, target[epoch], param_group['lr']), atol=1e-5, rtol=0)
[scheduler.step() for scheduler in schedulers]
def _test_CosineAnnealingWarmRestarts(self, scheduler, targets, epochs=10):
for index, epoch in enumerate(torch.arange(0, epochs, 0.1)):
epoch = round(epoch.item(), 1)
scheduler.step(epoch)
for param_group, target in zip(self.opt.param_groups, targets):
self.assertEqual(target[index], param_group['lr'],
msg='LR is wrong in epoch {}: expected {}, got {}'.format(
epoch, target[index], param_group['lr']), atol=1e-5, rtol=0)
def _test_interleaved_CosineAnnealingWarmRestarts(self, scheduler, targets, epochs):
for index, epoch in enumerate(epochs):
scheduler.step(epoch)
for param_group, target in zip(self.opt.param_groups, targets):
self.assertEqual(target[index], param_group['lr'],
msg='LR is wrong in epoch {}: expected {}, got {}'.format(
epoch, target[index], param_group['lr']), atol=1e-5, rtol=0)
def _test_against_closed_form(self, scheduler, closed_form_scheduler, epochs=10):
self.setUp()
targets = []
for epoch in range(epochs):
closed_form_scheduler.optimizer.step()
with warnings.catch_warnings(record=True) as w:
closed_form_scheduler.step(epoch)
self._check_warning_is_epoch_deprecation_warning(w)
targets.append([group['lr'] for group in self.opt.param_groups])
self.setUp()
for epoch in range(epochs):
self.opt.step()
scheduler.step()
for i, param_group in enumerate(self.opt.param_groups):
self.assertEqual(targets[epoch][i], param_group['lr'],
msg='LR is wrong in epoch {}: expected {}, got {}'.format(
epoch, targets[epoch][i], param_group['lr']), atol=1e-5, rtol=0)
def _test_reduce_lr_on_plateau(self, schedulers, targets, metrics, epochs=10, verbose=False):
if isinstance(schedulers, _LRScheduler) or isinstance(schedulers, ReduceLROnPlateau):
schedulers = [schedulers]
for epoch in range(epochs):
self.opt.step()
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(metrics[epoch])
else:
scheduler.step()
if verbose:
print('epoch{}:\tlr={}'.format(epoch, self.opt.param_groups[0]['lr']))
for param_group, target in zip(self.opt.param_groups, targets):
self.assertEqual(target[epoch], param_group['lr'],
msg='LR is wrong in epoch {}: expected {}, got {}'.format(
epoch, target[epoch], param_group['lr']), atol=1e-5, rtol=0)
def _test_cycle_lr(self, scheduler, lr_targets, momentum_targets, batch_iterations, verbose=False, use_beta1=False):
for batch_num in range(batch_iterations):
if verbose:
if 'momentum' in self.opt.param_groups[0].keys():
print('batch{}:\tlr={},momentum={}'.format(batch_num, self.opt.param_groups[0]['lr'],
self.opt.param_groups[0]['momentum']))
elif use_beta1 and 'betas' in self.opt.param_groups[0].keys():
print('batch{}:\tlr={},beta1={}'.format(batch_num, self.opt.param_groups[0]['lr'],
self.opt.param_groups[0]['betas'][0]))
else:
print('batch{}:\tlr={}'.format(batch_num, self.opt.param_groups[0]['lr']))
for param_group, lr_target, momentum_target in zip(self.opt.param_groups, lr_targets, momentum_targets):
self.assertEqual(
lr_target[batch_num], param_group['lr'],
msg='LR is wrong in batch_num {}: expected {}, got {}'.format(
batch_num, lr_target[batch_num], param_group['lr']), atol=1e-5, rtol=0)
if use_beta1 and 'betas' in param_group.keys():
self.assertEqual(
momentum_target[batch_num], param_group['betas'][0],
msg='Beta1 is wrong in batch_num {}: expected {}, got {}'.format(
batch_num, momentum_target[batch_num], param_group['betas'][0]), atol=1e-5, rtol=0)
elif 'momentum' in param_group.keys():
self.assertEqual(
momentum_target[batch_num], param_group['momentum'],
msg='Momentum is wrong in batch_num {}: expected {}, got {}'.format(
batch_num, momentum_target[batch_num], param_group['momentum']), atol=1e-5, rtol=0)
self.opt.step()
scheduler.step()
def test_cosine_then_cyclic(self):
# https://github.com/pytorch/pytorch/issues/21965
max_lr = 0.3
base_lr = 0.1
optim_lr = 0.5
model = torch.nn.Linear(2, 1)
optimizer = torch.optim.SGD(model.parameters(), lr=optim_lr)
lr_scheduler_1 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=20, eta_min=0.1)
lr_scheduler_2 = torch.optim.lr_scheduler.CyclicLR(
optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=1, step_size_down=3
)
for i in range(40):
optimizer.step()
if i <= lr_scheduler_1.T_max:
lr_scheduler_1.step()
else:
lr_scheduler_2.step()
last_lr = optimizer.param_groups[0]["lr"]
self.assertLessEqual(last_lr, max_lr)
class SWATestDNN(torch.nn.Module):
def __init__(self, input_features):
super(SWATestDNN, self).__init__()
self.n_features = 100
self.fc1 = torch.nn.Linear(input_features, self.n_features)
self.bn = torch.nn.BatchNorm1d(self.n_features)
def compute_preactivation(self, x):
return self.fc1(x)
def forward(self, x):
x = self.fc1(x)
x = self.bn(x)
return x
class SWATestCNN(torch.nn.Module):
def __init__(self, input_channels):
super(SWATestCNN, self).__init__()
self.n_features = 10
self.conv1 = torch.nn.Conv2d(input_channels, self.n_features, kernel_size=3, padding=1)
self.bn = torch.nn.BatchNorm2d(self.n_features, momentum=0.3)
def compute_preactivation(self, x):
return self.conv1(x)
def forward(self, x):
x = self.conv1(x)
x = self.bn(x)
return x
class TestSWAUtils(TestCase):
def _test_averaged_model(self, net_device, swa_device):
dnn = torch.nn.Sequential(
torch.nn.Conv2d(1, 5, kernel_size=3),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=2),
torch.nn.BatchNorm2d(5, momentum=0.3),
torch.nn.Conv2d(5, 2, kernel_size=3),
torch.nn.ReLU(),
torch.nn.Linear(5, 5),
torch.nn.ReLU(),
torch.nn.Linear(5, 10)
).to(net_device)
averaged_dnn = AveragedModel(dnn, device=swa_device)
averaged_params = [torch.zeros_like(param) for param in dnn.parameters()]
n_updates = 10
for i in range(n_updates):
for p, p_avg in zip(dnn.parameters(), averaged_params):
p.detach().add_(torch.randn_like(p))
p_avg += p.detach() / n_updates
averaged_dnn.update_parameters(dnn)
for p_avg, p_swa in zip(averaged_params, averaged_dnn.parameters()):
self.assertEqual(p_avg, p_swa)
# Check that AveragedModel is on the correct device
self.assertTrue(p_swa.device == swa_device)
self.assertTrue(p.device == net_device)
self.assertTrue(averaged_dnn.n_averaged.device == swa_device)
def test_averaged_model_all_devices(self):
cpu = torch.device("cpu")
self._test_averaged_model(cpu, cpu)
if torch.cuda.is_available():
cuda = torch.device(0)
self._test_averaged_model(cuda, cpu)
self._test_averaged_model(cpu, cuda)
self._test_averaged_model(cuda, cuda)
def test_averaged_model_mixed_device(self):
if not torch.cuda.is_available():
return
dnn = torch.nn.Sequential(
torch.nn.Conv2d(1, 5, kernel_size=3),
torch.nn.Linear(5, 10)
)
dnn[0].cuda()
dnn[1].cpu()
averaged_dnn = AveragedModel(dnn)
averaged_params = [torch.zeros_like(param) for param in dnn.parameters()]
n_updates = 10
for i in range(n_updates):
for p, p_avg in zip(dnn.parameters(), averaged_params):
p.detach().add_(torch.randn_like(p))
p_avg += p.detach() / n_updates
averaged_dnn.update_parameters(dnn)
for p_avg, p_swa in zip(averaged_params, averaged_dnn.parameters()):
self.assertEqual(p_avg, p_swa)
# Check that AveragedModel is on the correct device
self.assertTrue(p_avg.device == p_swa.device)
def test_averaged_model_state_dict(self):
dnn = torch.nn.Sequential(
torch.nn.Conv2d(1, 5, kernel_size=3),
torch.nn.Linear(5, 10)
)
averaged_dnn = AveragedModel(dnn)
averaged_dnn2 = AveragedModel(dnn)
n_updates = 10
for i in range(n_updates):
for p in dnn.parameters():
p.detach().add_(torch.randn_like(p))
averaged_dnn.update_parameters(dnn)
averaged_dnn2.load_state_dict(averaged_dnn.state_dict())
for p_swa, p_swa2 in zip(averaged_dnn.parameters(), averaged_dnn2.parameters()):
self.assertEqual(p_swa, p_swa2)
self.assertTrue(averaged_dnn.n_averaged == averaged_dnn2.n_averaged)
def test_averaged_model_exponential(self):
# Test AveragedModel with EMA as avg_fn
dnn = torch.nn.Sequential(
torch.nn.Conv2d(1, 5, kernel_size=3),
torch.nn.Linear(5, 10)
)
alpha = 0.9
def avg_fn(p_avg, p, n_avg):
return alpha * p_avg + (1 - alpha) * p
averaged_dnn = AveragedModel(dnn, avg_fn=avg_fn)
averaged_params = [torch.zeros_like(param) for param in dnn.parameters()]
n_updates = 10
for i in range(n_updates):
updated_averaged_params = []
for p, p_avg in zip(dnn.parameters(), averaged_params):
p.detach().add_(torch.randn_like(p))
if i == 0:
updated_averaged_params.append(p.clone())
else:
updated_averaged_params.append((p_avg * alpha +
p * (1 - alpha)).clone())
averaged_dnn.update_parameters(dnn)
averaged_params = updated_averaged_params
for p_avg, p_swa in zip(averaged_params, averaged_dnn.parameters()):
self.assertEqual(p_avg, p_swa)
def test_averaged_model_exponential_buffers(self):
# Test AveragedModel with EMA as avg_fn and use_buffers as True.
dnn = torch.nn.Sequential(
torch.nn.Conv2d(1, 5, kernel_size=3),
torch.nn.BatchNorm2d(5, momentum=0.3),
torch.nn.Linear(5, 10)
)
alpha = 0.9
def avg_fn(p_avg, p, n_avg):
return alpha * p_avg + (1 - alpha) * p
averaged_dnn = AveragedModel(dnn, avg_fn=avg_fn, use_buffers=True)
dnn_params = itertools.chain(dnn.parameters(), dnn.buffers())
averaged_params = [torch.zeros_like(param) for param in dnn_params
if param.size() != torch.Size([])]
n_updates = 10
for i in range(n_updates):
updated_averaged_params = []
for p, p_avg in zip(dnn_params, averaged_params):
if p.size() == torch.Size([]):
continue
p.detach().add_(torch.randn_like(p))
if i == 0:
updated_averaged_params.append(p.clone())
else:
updated_averaged_params.append((p_avg * alpha +
p * (1 - alpha)).clone())
averaged_dnn.update_parameters(dnn)
averaged_params = updated_averaged_params
for p_avg, p_swa in zip(
averaged_params, itertools.chain(averaged_dnn.module.parameters(), averaged_dnn.module.buffers())):
self.assertEqual(p_avg, p_swa)
def _test_update_bn(self, dnn, dl_x, dl_xy, cuda):
preactivation_sum = torch.zeros(dnn.n_features)
preactivation_squared_sum = torch.zeros(dnn.n_features)
if cuda:
preactivation_sum = preactivation_sum.cuda()
preactivation_squared_sum = preactivation_squared_sum.cuda()
total_num = 0
for x in dl_x:
x = x[0]
if cuda:
x = x.cuda()
dnn.forward(x)
preactivations = dnn.compute_preactivation(x)
if len(preactivations.shape) == 4:
preactivations = preactivations.transpose(1, 3)
preactivations = preactivations.contiguous().view(-1, dnn.n_features)
total_num += preactivations.shape[0]
preactivation_sum += torch.sum(preactivations, dim=0)
preactivation_squared_sum += torch.sum(preactivations**2, dim=0)
preactivation_mean = preactivation_sum / total_num
preactivation_var = preactivation_squared_sum / total_num
preactivation_var = preactivation_var - preactivation_mean**2
update_bn(dl_xy, dnn, device=x.device)
self.assertEqual(preactivation_mean, dnn.bn.running_mean)
self.assertEqual(preactivation_var, dnn.bn.running_var, atol=1e-1, rtol=0)
def _reset_bn(module):
if issubclass(module.__class__,
torch.nn.modules.batchnorm._BatchNorm):
module.running_mean = torch.zeros_like(module.running_mean)
module.running_var = torch.ones_like(module.running_var)
# reset batch norm and run update_bn again
dnn.apply(_reset_bn)
update_bn(dl_xy, dnn, device=x.device)
self.assertEqual(preactivation_mean, dnn.bn.running_mean)
self.assertEqual(preactivation_var, dnn.bn.running_var, atol=1e-1, rtol=0)
# using the dl_x loader instead of dl_xy
dnn.apply(_reset_bn)
update_bn(dl_x, dnn, device=x.device)
self.assertEqual(preactivation_mean, dnn.bn.running_mean)
self.assertEqual(preactivation_var, dnn.bn.running_var, atol=1e-1, rtol=0)
def test_update_bn_dnn(self):
# Test update_bn for a fully-connected network with BatchNorm1d
objects, input_features = 100, 5
x = torch.rand(objects, input_features)
y = torch.rand(objects)
ds_x = torch.utils.data.TensorDataset(x)
ds_xy = torch.utils.data.TensorDataset(x, y)
dl_x = torch.utils.data.DataLoader(ds_x, batch_size=5, shuffle=True)
dl_xy = torch.utils.data.DataLoader(ds_xy, batch_size=5, shuffle=True)
dnn = SWATestDNN(input_features=input_features)
dnn.train()
self._test_update_bn(dnn, dl_x, dl_xy, False)
if torch.cuda.is_available():
dnn = SWATestDNN(input_features=input_features)
dnn.train()
self._test_update_bn(dnn.cuda(), dl_x, dl_xy, True)
self.assertTrue(dnn.training)
def test_update_bn_cnn(self):
# Test update_bn for convolutional network and BatchNorm2d
objects = 100
input_channels = 3
height, width = 5, 5
x = torch.rand(objects, input_channels, height, width)
y = torch.rand(objects)
ds_x = torch.utils.data.TensorDataset(x)
ds_xy = torch.utils.data.TensorDataset(x, y)
dl_x = torch.utils.data.DataLoader(ds_x, batch_size=5, shuffle=True)
dl_xy = torch.utils.data.DataLoader(ds_xy, batch_size=5, shuffle=True)
dnn = SWATestCNN(input_channels=input_channels)
dnn.train()
self._test_update_bn(dnn, dl_x, dl_xy, False)
if torch.cuda.is_available():
dnn = SWATestCNN(input_channels=input_channels)
dnn.train()
self._test_update_bn(dnn.cuda(), dl_x, dl_xy, True)
self.assertTrue(dnn.training)
def test_bn_update_eval_momentum(self):
# check that update_bn preserves eval mode
objects = 100
input_channels = 3
height, width = 5, 5
x = torch.rand(objects, input_channels, height, width)
ds_x = torch.utils.data.TensorDataset(x)
dl_x = torch.utils.data.DataLoader(ds_x, batch_size=5, shuffle=True)
dnn = SWATestCNN(input_channels=input_channels)
dnn.eval()
update_bn(dl_x, dnn)
self.assertFalse(dnn.training)
# check that momentum is preserved
self.assertEqual(dnn.bn.momentum, 0.3)
instantiate_parametrized_tests(TestLRScheduler)
def _diff_fn(p, grad, opt_differentiable_state, opt_class, kwargs, *ignored):
# Ignored is the list of values in `opt_differentiable_state`, we do this
# for `gradcheck` to correctly track the state tensors as function inputs
# because otherwise it can't unpack the values in the `opt_differentiable_state`
# dict
p = p.clone()
p.grad = grad
opt_differentiable_state = {k: v.clone() for k, v in opt_differentiable_state.items()}
opt = opt_class([p], **kwargs)
opt.state.update(opt_differentiable_state)
opt.step()
return (p,) + tuple(opt_differentiable_state.values())
class TestDifferentiableOptimizer(TestCase):
def test_sgd(self):
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
mbuff = torch.rand(10, requires_grad=True, dtype=torch.float64)
state = {'momentum_buffer': mbuff}
gradcheck(_diff_fn, (p, grad, state, torch.optim.SGD, {'lr': 0.9, 'differentiable': True}, *state.values()))
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_optim.py |
# Owner(s): ["NNC"]
import operator
import os
import unittest
import contextlib
import math
import torch
import torch.nn.functional as F
from torch.testing import FileCheck
from typing import List
import warnings
# these needs to be set before `common_utils`
# infers `GRAPH_EXECUTOR`.
# this file **requires** these settings
# and setting them after `GRAPH_EXECUTOR` is
# inferred erroneously runs or skips
# some tests
torch._C._jit_set_profiling_executor(True)
torch._C._get_graph_executor_optimize(True)
from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, \
enable_profiling_mode_for_profiling_tests, slowTest
from torch.testing._internal.jit_utils import JitTestCase, \
RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, warmup_backward, set_fusion_group_inlining, \
clone_inputs, get_traced_sample_variant_pairs, TensorExprTestOptions, NoTracerWarnContextManager
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests, \
OpDTypes
from torch.testing._internal.common_jit import JitCommonTestCase
from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn
from textwrap import dedent
from itertools import product, permutations, combinations
from test_jit import backward_graph, get_lstm_inputs, get_milstm_inputs, \
LSTMCellC, LSTMCellF, LSTMCellS, MiLSTMCell
from jit.test_fuser_common import TestFuserCommon # noqa: F401
FUSION_GROUP = 'prim::TensorExprGroup'
LLVM_ENABLED = torch._C._llvm_enabled()
autograd_check_set = {'aten::__is__', 'prim::AutogradAllNonZero', 'prim::AutogradAllZero', 'prim::ListConstruct'}
def strip_profiling_nodes(nodes):
profiling_opcodes = set(['prim::BailoutTemplate', 'prim::BailOut'])
return [n for n in nodes if n.kind() not in profiling_opcodes]
def warmup_forward(f, *args, profiling_count=2):
for i in range(profiling_count):
results = f(*args)
return results
@contextlib.contextmanager
def texpr_reductions_enabled():
old = torch._C._jit_set_texpr_reductions_enabled(True)
try:
yield
finally:
torch._C._jit_set_texpr_reductions_enabled(old)
@contextlib.contextmanager
def texpr_enable_strategy(strategy):
old = torch._C._jit_set_fusion_strategy(strategy)
try:
yield
finally:
torch._C._jit_set_fusion_strategy(old)
@contextlib.contextmanager
def inline_fusion_groups():
old_inlining = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(True)
try:
yield
finally:
torch._C._debug_set_fusion_group_inlining(old_inlining)
class TestTEFuser(JitTestCase):
def setUp(self):
super().setUp()
self.tensorexpr_options = TensorExprTestOptions()
# note: `self.dynamic_shapes` instatiated in specialization of class
# defined below
fusion_strategy = [("DYNAMIC", 20)] if self.dynamic_shapes else [("STATIC", 20)]
self.old_fusion_strategy = torch._C._jit_set_fusion_strategy(fusion_strategy)
self.devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
self.int_dtypes = [
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.bool,
]
self.fp_dtypes = [
torch.float16,
torch.float32,
torch.float64,
torch.bfloat16,
]
self.dtypes = self.int_dtypes + self.fp_dtypes
def tearDown(self):
self.tensorexpr_options.restore()
torch._C._jit_set_fusion_strategy(self.old_fusion_strategy)
super().tearDown()
def assertAllFused(self, graph, except_for=None):
except_for = except_for if except_for is not None else set()
# TODO - upstream
guards = "prim::TypeCheck", "prim::RequiresGradCheck", "prim::TensorExprDynamicGuard"
guard_found = False
def autodiff_guard(node):
if node.kind() != "aten::all":
return False
inps = list(node.inputs())
if len(inps) != 1 or inps[0].node().kind() != "prim::ListConstruct":
return False
li_inps = list(inps[0].node().inputs())
for li_inp in li_inps:
if li_inp.node().kind() in ("prim::AutogradAllNonZero", "prim::AutogradAllZero"):
return True
return False
def is_guard(node):
return node.kind() in guards or autodiff_guard(node)
for node in graph.block().nodes():
if node.kind() == "prim::Constant":
continue
if is_guard(node):
self.assertFalse(guard_found)
guard_found = True
continue
if node.kind() in except_for:
continue
if node.kind() == "prim::If":
self.assertTrue(is_guard(node.prev()))
continue
self.assertTrue(False, "Found unexpected node:" + node.kind())
self.assertTrue(guard_found)
def assertLastGraphAllFused(self):
self.assertAllFused(torch.jit.last_executed_optimized_graph())
def findFusionGroups(self, graph):
result = []
for n in graph.nodes():
if n.kind() == FUSION_GROUP:
result.append(n.g('Subgraph'))
continue
for block in n.blocks():
result += self.findFusionGroups(block)
return result
def test_typecheck(self):
a = torch.ones(1)
def fused_kernel(a, b):
return (a + b) * 2.
scripted = self.checkScript(fused_kernel, (a, a))
graph = scripted.graph_for(a, a)
# double check we fused
fusion_groups = self.findFusionGroups(graph)
self.assertEqual(len(fusion_groups), 1)
# we use a bigger tensor now (size 2)
# if we won't trigger a recompilation
# we will still create a tensor up to (size 1)
# if the type check fails
a = torch.ones(2)
# shape changed if we don't trigger recompilation
# we would compute the wrong result silently
self.assertEqual(scripted(a, a), fused_kernel(a, a))
def test_sum_simple(self):
def func(x):
x2 = x * x
return x2.sum()
with texpr_reductions_enabled():
a = torch.tensor(list(x for x in range(0, 15)), dtype=torch.float, device='cpu')
a = a.reshape(5, 3)
scripted = self.checkScript(func, (a,))
self.assertLastGraphAllFused()
def test_nop(self):
pass
def test_sum_dim(self):
def func(x):
return x.sum((0, )) * 2
def func_neg(x):
return x.sum((-2, )) * 2
with texpr_reductions_enabled():
a = torch.tensor(list(x for x in range(0, 15)), dtype=torch.float, device='cpu')
a = a.reshape(5, 3)
scripted = self.checkScript(func, (a,))
self.assertLastGraphAllFused()
scripted = self.checkScript(func_neg, (a,))
self.assertLastGraphAllFused()
def test_sum_keepdim_cast(self):
def func(x):
return x.sum((0, ), keepdim=True, dtype=torch.double) * 2
with texpr_reductions_enabled():
a = torch.tensor(list(x for x in range(0, 15)), dtype=torch.float, device='cpu')
a = a.reshape(5, 3)
self.checkScript(func, (a,))
self.assertLastGraphAllFused()
def test_abs(self):
for device in self.devices:
def func(x):
return x.abs() * 2
a = torch.randn(5, device=device)
scripted = self.checkScript(func, (a,))
self.assertLastGraphAllFused()
def test_unsqueeze_size_calculation(self):
for device in self.devices:
def foo(b, d):
x = d.unsqueeze(1)
y = x * 42.
z = b + y
r = z / 42.
return r
inputs = (torch.rand(20, 28, device=device, requires_grad=True), torch.rand(20, device=device))
scripted = self.checkScript(foo, inputs)
self.assertAllFused(scripted.graph_for(*inputs))
def test_zero_element_tensors(self):
for device in self.devices:
def decode(sin_t, cos_t):
theta = torch.atan2(sin_t.float(), cos_t.float())
return theta
sin = torch.zeros(0, device=device)
cos = torch.zeros(0, device=device)
inputs = [sin, cos]
ge = self.checkScript(decode, inputs)
def test_arg_configurations_smoke(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
# A smoke test to make sure we won't use the same kernel for contiguous
# and non-contiguous arguments.
# TODO: add optionally enabled debug counters to the fuser to verify
# that we really can tell the difference between configurations
for device in self.devices:
def f(x, y):
z1, z2 = (x + y).chunk(2, dim=1)
return z1 * z2
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
traced_f = torch.jit.trace(f, (x, y,))
self.assertEqual(traced_f(x.t().contiguous(), y), traced_f(x.t(), y))
def test_broadcast(self):
for device in self.devices:
def scaleshift(x, scale, shift):
return x * scale + shift
inputs = [
torch.randn(4, 4, dtype=torch.float, device=device),
torch.randn(4, dtype=torch.float, device=device),
torch.randn(4, dtype=torch.float, device=device),
]
self.checkScript(scaleshift, inputs)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_HALF, "no half support")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "no half support with profiling on")
def test_cuda_half(self):
x = torch.randn(4, 4, dtype=torch.half, device='cuda')
y = torch.randn(4, 4, dtype=torch.half, device='cuda')
funcs = [
self.fn_test_comparison_gt_lt,
self.fn_test_relu,
self.fn_test_exp
]
# Note: Non fused inputs must be float to prevent loss of precision
inputs = (x.float(), y.float())
fusion_inputs = (x, y)
for fn in funcs:
local_inputs = [t.clone().requires_grad_() for t in inputs]
local_fusion_inputs = [t.clone().requires_grad_() for t in fusion_inputs]
# Verifies outputs
fusion = torch.jit.trace(fn, local_fusion_inputs, check_trace=False)
outputs = fn(*local_inputs)
fusion_outputs = fusion(*local_fusion_inputs)
outputs_half = [t.half() for t in outputs]
self.assertEqual(outputs_half, fusion_outputs)
# Verifies gradients
for output, fusion_output in zip(outputs_half, fusion_outputs):
grads = torch.autograd.grad(
output.float().sum(), local_inputs, allow_unused=True, retain_graph=True)
fusion_grads = torch.autograd.grad(
fusion_output.sum(), local_fusion_inputs, allow_unused=True, retain_graph=True)
grads_half = [t.half() for t in grads]
self.assertEqual(grads_half, fusion_grads)
def test_checks_cat_inputs(self):
# single fusion node causes error
with set_fusion_group_inlining(True):
for device in self.devices:
# We shouldn't treat cat nodes as broadcasting. All their inputs
# need to be checked for having the same map size, before we can
# run the kernel.
def f(x, y):
return torch.cat([x + 2 * x + x ** 2, y + 4 * y + y ** 3], dim=0)
# NOTE: y is broadcastable to x, but output of f(x, y) should have
# shape 3x4, and not 4x4.
x = torch.randn(2, 4, dtype=torch.float, device=device)
y = torch.randn(1, 4, dtype=torch.float, device=device)
scripted = self.checkScript(f, (x, y))
self.assertEqual(scripted(x, y).shape, (3, 4))
self.assertAllFused(scripted.graph_for(x, y))
def test_chunk(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def fn(x):
a, b, c = x.chunk(3, 1)
return a * b + c
inputs = [torch.randn(10, 6, dtype=torch.float, device=device)]
self.checkScript(fn, inputs)
self.assertLastGraphAllFused()
def test_chunk_correctness(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def chunk_4_0(x):
x0, x1, x2, x3 = x.chunk(4, 0)
return x0 + x1 + x2 + x3
def chunk_4_1(x):
x0, x1, x2, x3 = x.chunk(4, 1)
return x0 + x1 + x2 + x3
def chunk_4_last(x):
x0, x1, x2, x3 = x.chunk(4, 2)
return x0 + x1 + x2 + x3
fns = [chunk_4_0, chunk_4_1, chunk_4_last]
tensors = [
# splitSize = 1
torch.randn(4, 4, 4, dtype=torch.float, device=device),
# contiguous case
torch.randn(12, 8, 16, dtype=torch.float, device=device),
# non-contiguous case
torch.randn(12, 8, 16, dtype=torch.float, device=device).transpose(1, 2),
]
for tensor in tensors:
for fn in fns:
self.checkScript(fn, [tensor])
self.assertLastGraphAllFused()
def test_chunk_distributes(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def f(x, y):
z1, z2 = (x + y).chunk(2, dim=1)
return z1 * z2
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(f, (x, y))
graph = ge.graph_for(x, y)
# XXX: The old fuser does broadcast_tensors but the new fuser doesn't.
# FileCheck().check("broadcast_tensors").check('with ' + FUSION_GROUP + '_') \
# .check_count('ConstantChunk', 2, exactly=True).run(str(graph))
FileCheck().check("with " + FUSION_GROUP + "_").check_count(
"ConstantChunk", 1, exactly=True
).run(str(graph))
def test_chunk_motion_deduplicates_inputs(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def func1(x):
z = x * x
z0, z1 = z.chunk(2)
return z0 * z1
def func2(x):
z = x * x * x
z0, z1 = z.chunk(2)
return z0 * z1
inputs = [
torch.tensor([1.1, 1.2], device=device, dtype=torch.float),
]
for func in [func1, func2]:
self.checkScript(func, inputs)
self.assertLastGraphAllFused()
def test_chunk_multiple(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
# The arguments are intentionally used out of order as a test to see
# if the fusion compiler adds extra args in the correct order
def fn(s, x, y, z):
z1, z2 = z.chunk(2, 2)
x1, x2, x3 = x.chunk(3, 1)
y1, y2 = y.chunk(2, 0)
return s + x1 + x2 + x3 + y1 + y2 + z1 + z2
inputs = [
torch.randn(5, 2, 3, dtype=torch.float, device=device),
torch.randn(5, 6, 3, dtype=torch.float, device=device),
torch.randn(10, 2, 3, dtype=torch.float, device=device),
torch.randn(5, 2, 6, dtype=torch.float, device=device),
]
ge = self.checkScript(fn, inputs)
self.assertAllFused(ge.graph_for(*inputs))
def test_minmax(self):
for device in self.devices:
def tmax(a, b):
return torch.max(2 * a, b)
def tmin(a, b):
return torch.min(2 * a, b)
a = torch.randn(4, 4, dtype=torch.float)
b = torch.randn(4, 4, dtype=torch.float)
nan = torch.tensor(float('nan'), dtype=torch.float)
for f, inputs, device in product(
(tmax, tmin),
([a, b], [a, nan], [b, nan]),
self.devices):
inputs = [t.to(device) for t in inputs]
s = self.checkScript(f, inputs)
self.assertAllFused(s.graph_for(*inputs))
def test_clamp(self):
for device in self.devices:
def func2(a, b):
return torch.clamp(a + b, min=0, max=2)
def funcInf(a, b):
return torch.clamp(a + b, min=0, max=float('inf'))
def funcNegInf(a, b):
return torch.clamp(a + b, min=float('-inf'), max=0)
def funcOptMin(a, b):
return torch.clamp(a + b, max=2)
def funcOptMax(a, b):
return torch.clamp(a + b, min=0)
a = torch.randn(4, 4, dtype=torch.float, device=device, requires_grad=True)
b = torch.randn(4, 4, dtype=torch.float, device=device)
nan = torch.tensor(float('nan'), dtype=torch.float, device=device)
funcs = (func2, funcInf, funcNegInf, funcOptMin, funcOptMax)
for f, inputs in product(funcs, [[a, b], [a, nan]]):
inp1, inp2 = inputs
s = self.checkScript(f, (inp1, inp2), profiling=ProfilingMode.PROFILING)
self.assertAllFused(s.graph_for(inp1, inp2), except_for={'aten::size', 'aten::_size_if_not_equal'})
c = s(inp1, inp2)
with enable_profiling_mode_for_profiling_tests():
warmup_backward(c.sum())
graph = backward_graph(s)
self.assertAllFused(graph, except_for={'aten::Float', 'aten::_grad_sum_to_size'}.union(autograd_check_set))
def test_clamp_double(self):
for device in self.devices:
def clamp_double(x, eta: float):
return 1 - x.clamp(eta, 1 - eta)
x = torch.tensor([1.0, 1.0], dtype=torch.double, device=device)
eta = 1e-9
s = self.checkScript(clamp_double, (x, eta), profiling=ProfilingMode.PROFILING, atol=1e-10, rtol=1e-5)
self.assertAllFused(s.graph_for(x, eta), except_for={'aten::sub'})
def test_clamp_int(self):
for device in self.devices:
def clamp_int(x, eta: int):
return x.clamp(0, eta)
x = torch.tensor([1, 1], device=device)
eta = 1 << 32
s = self.checkScript(clamp_int, (x, eta), profiling=ProfilingMode.PROFILING)
self.assertAllFused(s.graph_for(x, eta))
def test_add_bool(self):
sizes = [(1,), (2,), (4, 4)]
for device, size in product(self.devices, sizes):
def f(x, y, z):
return x + y + z
x = torch.randint(0, 2, size, dtype=torch.bool, device=device)
y = torch.randint(0, 2, size, dtype=torch.bool, device=device)
z = torch.randint(0, 2, size, dtype=torch.bool, device=device)
ge = self.checkTrace(f, (x, y, z), inputs_require_grads=False)
self.assertAllFused(ge.graph_for(x, y, z))
def test_mul_bool(self):
for device in self.devices:
def f(x, y, z):
return x * y * z
x = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
y = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
z = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
ge = self.checkTrace(f, (x, y, z), inputs_require_grads=False)
self.assertAllFused(ge.graph_for(x, y, z))
def test_div_bool(self):
for device in self.devices:
def f(x, y, z):
return (x + y) / z
x = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
y = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)
z = torch.ones_like(x, dtype=torch.bool, device=device)
ge = self.checkTrace(f, (x, y, z), inputs_require_grads=False)
self.assertAllFused(ge.graph_for(x, y, z))
def test_bitwise_ops(self):
def apply(fn):
return lambda x, y, z: fn(fn(x, y), z)
binary_ops = [
operator.__and__,
operator.__or__,
operator.__xor__,
operator.__lshift__,
operator.__rshift__,
]
devices = self.devices
for dtype, op, device in product(self.int_dtypes, binary_ops, devices):
try:
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
z = self.data_for(dtype, device)
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
)
def test_minmax_int_ops(self):
def apply(fn):
return lambda x, y, z: fn(fn(x, y), z)
binary_ops = [
torch.min,
torch.max
]
devices = self.devices
for dtype, op, device in product(self.int_dtypes, binary_ops, devices):
try:
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
z = self.data_for(dtype, device)
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
)
def test_comparison_eq_ne(self):
for device in self.devices:
def f(x, y):
mask = (x == 0).type_as(x)
z = x * mask + y
mask = (x != 0).type_as(x)
z = z * mask + y
return z
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(f, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@staticmethod
def fn_test_comparison_gt_lt(x, y):
mask = (x > 0).type_as(x)
z = x * mask + y
mask = (x < 0).type_as(x)
z = z * mask + y
return z
def test_comparison_gt_lt(self):
for device in self.devices:
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(self.fn_test_comparison_gt_lt, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_comparison_ge_le(self):
for device in self.devices:
def f(x, y):
mask = (x >= 0).type_as(x)
z = x * mask + y
mask = (x <= 0).type_as(x)
z = z * mask + y
return z
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(f, (x, y))
self.assertAllFused(ge.graph_for(x, y))
x.requires_grad_(True)
y.requires_grad_(True)
self.assertAllFused(ge.graph_for(x, y), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
def test_addcmul(self):
for device in self.devices:
t = torch.randn(1, 4, dtype=torch.float, device=device)
t1 = torch.randn(4, 1, dtype=torch.float, device=device)
t2 = torch.randn(1, 4, dtype=torch.float, device=device)
def foo(t, t1, t2):
return t.addcmul(t + 1, t2, value=0.1)
ge = self.checkTrace(foo, (t, t1, t2), allow_unused=True)
graph = ge.graph_for(t, t1, t2)
fusion_groups = self.findFusionGroups(graph)
self.assertEqual(len(fusion_groups), 1)
FileCheck().check("aten::add(").check("aten::addcmul(").run(str(fusion_groups[0]))
# TODO: We leak CUDA memory here because the traced graph holds onto a
# constant-ified tensor. Since the Python-global CompilationUnit is alive
# until the end of the process, the memory is effectively leaked.
# Removed `_cuda` suffix from this test which disables leak-checking.
# If this is a real problem, we'll need to revisit Torchscript Function
# lifetimes in Python.
def test_lerp(self):
for device in self.devices:
start = torch.randn(4, 1, dtype=torch.float, device=device)
end = torch.randn(1, 4, dtype=torch.float, device=device)
weight = torch.tensor(0.5, dtype=torch.float, device=device)
# scalar weight overload
def foo_weight_scalar(start, end):
return torch.lerp(start + 1, end, 0.5)
# tensor weight overload
def foo_weight_tensor(start, end):
return torch.lerp(start + 1, end, weight)
ge_weight_scalar = self.checkTrace(foo_weight_scalar, (start, end))
graph = ge_weight_scalar.graph_for(start, end)
self.assertAllFused(graph)
# TODO: uncomment when TE enables support for scalar tensors
# ge_weight_tensor = self.checkTrace(foo_weight_tensor, (start, end))
# graph = ge_weight_tensor.graph_for(start, end)
# self.assertAllFused(graph)
def test_concat(self):
# disabling concat causes error with single concat node
with set_fusion_group_inlining(True):
for device in self.devices:
hx = torch.randn(3, 20, dtype=torch.float, device=device)
cx = torch.randn(3, 20, dtype=torch.float, device=device)
def foo(hx, cx):
return torch.cat((hx + cx, hx * cx))
ge = self.checkTrace(foo, (hx, cx))
graph = ge.graph_for(hx, cx)
self.assertAllFused(graph)
# XXX: TE fuser can handle concats in a fusion group.
# FileCheck().check("FusedConcat").check_next("return").run(str(graph))
def test_remove_output_used_only_in_size(self):
for device in self.devices:
def test_fuse(a, b):
c = a + b
d = c + b
return d
scripted_f = torch.jit.script(test_fuse)
x = torch.ones(1, requires_grad=True, device=device)
y = torch.ones(1, requires_grad=True, device=device)
warmup_forward(scripted_f, x, y, profiling_count=3)
g = scripted_f.graph_for(x, y)
diff_nodes = g.findAllNodes('prim::DifferentiableGraph')
self.assertEqual(len(diff_nodes), 1)
g = diff_nodes[0].g('Subgraph')
if_nodes = [n for n in g.nodes() if n.kind() == 'prim::If']
self.assertEqual(len(if_nodes), 1)
# the if node and the fusion group inside it should only have one output
self.assertEqual(len(list(if_nodes[0].outputs())), 1)
def test_concat_invariant(self):
for device in self.devices:
# Invariant: the output of prim::FusedConcat may
# not be an input to any node inside the FusionGroup.
def fn(x, y, z):
x1 = x + y
y1 = x - y
w = torch.cat([x1, y1])
return w + z
x = torch.randn(2, 2, dtype=torch.float, device=device)
y = torch.randn(2, 2, dtype=torch.float, device=device)
z = torch.randn(4, 2, dtype=torch.float, device=device)
ge = self.checkTrace(fn, (x, y, z))
graph = ge.graph_for(x, y, z)
self.assertAllFused(graph, except_for={'aten::add'})
# XXX: TE fuser can handle concats inside a fusion group.
# FileCheck().check("FusedConcat").check_next("return").run(str(graph))
@staticmethod
def fn_test_exp(x, y):
return (x + .5 * y).exp()
def test_exp(self):
for device in self.devices:
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(self.fn_test_exp, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_threshold(self):
for device in self.devices:
def f(x):
return torch.threshold(x, 0, -10) + x + x + x
x = torch.tensor([-1, -0.5, 0, 1, 2, 3], device=device)
scripted = self.checkScript(f, (x,))
self.assertAllFused(scripted.graph_for(x))
def test_scalar_arg(self):
for device in self.devices:
def fn_test_scalar_arg(x: torch.Tensor, p: float) -> torch.Tensor:
return p * (x * x + x)
x = torch.randn(4, 4, dtype=torch.float, device=device)
p = 3
scripted = self.checkScript(fn_test_scalar_arg, (x, p))
self.assertAllFused(scripted.graph_for(x, p))
x.requires_grad_(True)
# use another function otherwise we will bailout
# and won't be able to do fused checks
def fn_test_scalar_arg_requires_grad(x: torch.Tensor, p: float) -> torch.Tensor:
return p * (x * x + x)
scripted = torch.jit.script(fn_test_scalar_arg_requires_grad)
out = scripted(x, p)
out = scripted(x, p)
out = scripted(x, p)
self.assertAllFused(scripted.graph_for(x, p), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
def test_fusion_reuse_multi_gpu(self):
def fn(x, y):
return x * y * x * y
inputs_cpu = [
torch.randn(4, 4, dtype=torch.float),
torch.randn(4, 4, dtype=torch.float),
]
inputs_cuda0 = [x.cuda(0) for x in inputs_cpu]
inputs_cuda1 = [y.cuda(1) for y in inputs_cpu]
# Should not crash; these should compile different kernels.
ge = self.checkScript(fn, inputs_cpu)
self.assertAllFused(ge.graph_for(*inputs_cpu))
ge(*inputs_cuda0)
ge(*inputs_cuda1)
# TODO: we're currently not checking 'device' in the type info when pulling
# nodes into a fusion group. We should fix that and re-enable this test.
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
def test_kernel_cache_multi_gpu(self):
def not_fusible(x):
return x
def fn(x, y, z):
x_out = x * x * x * x * x # fusion: lambda x. x * x * x * x * x
y_out = y * y * y * y * y
z_out = z * z * z * z * z
return not_fusible(x_out), not_fusible(y_out), not_fusible(z_out)
inputs = [
torch.randn(4, 4, dtype=torch.float),
torch.randn(4, 4, dtype=torch.float, device='cuda:0'),
torch.randn(4, 4, dtype=torch.float, device='cuda:1'),
]
prev_cache_size = torch._C._jit_debug_fuser_num_cached_kernel_specs()
# There are 3 FusionGroups. Because they have the same graph, they
# should reuse the same KernelSpec in the KernelSpec cache.
ge = self.checkScript(fn, inputs)
self.assertGraphContainsExactly(
ge.graph_for(*inputs), FUSION_GROUP, 3, True)
new_cache_size = torch._C._jit_debug_fuser_num_cached_kernel_specs()
# XXX: This assumes that the same kernel isn't already used by another test
# FIXME: Use the TE fuser's way of querying the cache.
# self.assertEqual(new_cache_size - prev_cache_size, 1)
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
def test_nonzero_device_cuda(self):
device = 'cuda:' + str(1)
x = torch.tensor([0.4], dtype=torch.float, device=device)
y = torch.tensor([0.7], dtype=torch.float, device=device)
def doit(x, y):
return torch.sigmoid(torch.tanh(x * (x + y) + x))
ge = self.checkTrace(doit, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_lstm(self):
for device in self.devices:
inputs = get_lstm_inputs(device, training=True)
module = self.checkScript(LSTMCellS, inputs)
self.assertAllFused(module.graph_for(inputs), except_for={"prim::TupleConstruct"})
def test_lstm_concat(self):
# single fusion node causes error
with set_fusion_group_inlining(True):
for device in self.devices:
inputs = get_lstm_inputs(device)
ge = self.checkTrace(LSTMCellC, inputs)
graph = ge.graph_for(*inputs)
except_nodes = {"prim::TupleConstruct", "aten::linear"}
# TODO... Chunk
if self.dynamic_shapes:
except_nodes = except_nodes.union({"aten::add", "prim::ConstantChunk"})
self.assertAllFused(ge.graph_for(*inputs), except_for=except_nodes)
# XXX: TE fuser can handle concats inside a fusion group.
# FileCheck().check("FusedConcat").check_next("return").run(str(graph))
def test_lstm_gates_permutations(self):
for device in self.devices:
# lstm has gates = x.mm(w_ih.t()) + hx.mm(w_hh.t()) + b_ih + b_hh.
# Test that any permutation of this will still result in one FusionGroup.
choices = ['x.mm(w_ih.t())', 'hx.mm(w_hh.t())', 'b_ih', 'b_hh']
template = dedent('''
def cell(x, hx, cx, w_ih, w_hh, b_ih, b_hh):
gates = {} + {} + {} + {}
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
return ingate * forgetgate * cellgate * outgate
''')
for permutation in permutations(choices, len(choices)):
code = template.format(*permutation)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
fusion_group_len = 2 if self.dynamic_shapes else 1
inputs = get_lstm_inputs(device, training=False)
self.assertEqual(cu.cell(*inputs), scope['cell'](*inputs))
forward_graph = cu.cell.graph_for(*inputs)
self.assertGraphContainsExactly(forward_graph, FUSION_GROUP, fusion_group_len)
# TODO: Fuser doesn't work at all when inputs require grad. Fix that
def test_lstm_traced(self):
for device in self.devices:
inputs = get_lstm_inputs(device)
ge = self.checkTrace(LSTMCellF, inputs)
graph = ge.graph_for(*inputs)
fusion_groups = self.findFusionGroups(graph)
# TODO: chunk
fusion_group_len = 2 if self.dynamic_shapes else 1
self.assertEqual(len(fusion_groups), fusion_group_len)
f = FileCheck()
if not self.dynamic_shapes:
f.check("Chunk")
f.check("aten::sigmoid").check("aten::tanh").run(str(fusion_groups[0 if not self.dynamic_shapes else 1]))
def test_milstm(self):
if self.dynamic_shapes:
self.skipTest("don't run conv with dynamic shapes")
for device in self.devices:
inputs = get_milstm_inputs(device, training=True)
module = self.checkScript(MiLSTMCell, inputs)
forward_graph = module.graph_for(*inputs)
# TODO: chunk
fusion_group_len = 2 if self.dynamic_shapes else 1
self.assertGraphContainsExactly(
forward_graph, FUSION_GROUP, fusion_group_len, consider_subgraphs=True)
FileCheck().check("DifferentiableGraph").check("TupleConstruct") \
.check_next("return").check(FUSION_GROUP).run(str(forward_graph))
hy, cy = module(*inputs)
warmup_backward((hy + cy).sum())
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skip("rand_like is not supported yet")
def test_rand_cuda(self):
class M(torch.jit.ScriptModule):
__constants__ = ['d']
def __init__(self):
super(M, self).__init__()
self.d = torch.device('cuda')
@torch.jit.script_method
def create(self, x):
return x * x + x + torch.rand_like(x)
x = torch.zeros([3, 4, 5], dtype=torch.float, device='cuda')
m = M()
out1 = m.create(x)
out2 = m.create(x)
self.assertNotEqual(out1, out2)
self.assertTrue(torch.all(out1 >= 0))
self.assertTrue(torch.all(out1 < 1))
self.assertTrue(torch.all(out2 >= 0))
self.assertTrue(torch.all(out2 < 1))
self.assertAllFused(m.create.graph_for(x))
@staticmethod
def fn_test_relu(x, y):
return F.relu(x + .5 * y)
def test_relu(self):
for device in self.devices:
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(self.fn_test_relu, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_erf(self):
for device in self.devices:
# only enabled on gpu
if device == 'cpu':
continue
def fn_test_erf(x):
return F.relu(torch.erf(x) - torch.erfc(x))
x = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkScript(fn_test_erf, (x,), profiling=ProfilingMode.PROFILING)
self.assertAllFused(ge.graph_for(x))
x.requires_grad_(True)
ge = self.checkScript(fn_test_erf, (x,), profiling=ProfilingMode.PROFILING)
self.assertAllFused(ge.graph_for(x), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skip("rand_like is not supported yet")
def test_rand_broadcast_cuda(self):
def fn_test_rand(x, y):
r = torch.rand_like(y)
return r * x + x
# If using profiling, a different function is needed to test different
# shapes, or we'll use a cached script.
def fn_test_rand2(x, y):
r = torch.rand_like(y)
return r * x * x
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
script_f = torch.jit.script(fn_test_rand)
warmup_forward(script_f, x, y)
out = script_f(x, y)
self.assertAllFused(script_f.graph_for(x, y))
x.requires_grad_(True)
out = script_f(x, y)
self.assertAllFused(script_f.graph_for(x, y), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
# test that broadcasting random produces correct results
x = torch.ones(4, 4, dtype=torch.float, device='cuda')
y = torch.ones(4, dtype=torch.float, device='cuda')
script_f = torch.jit.script(fn_test_rand2)
warmup_forward(script_f, x, y)
out = script_f(x, y)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(out[0, :] + torch.zeros(4, 4, device='cuda'), out)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skip("rand_like is not supported yet")
def test_rand_diamond(self):
def fn_test_diamond(x, y):
r = torch.rand_like(y)
a = x + r
b = y - r
return a + b
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
script_f = torch.jit.script(fn_test_diamond)
warmup_forward(script_f, x, y)
out = script_f(x, y)
self.assertEqual(out, x + y)
def test_scalar(self):
def fn(x, y):
return 2 * x + y
x = torch.tensor(0.1, dtype=torch.float, device='cpu')
y = torch.tensor(1, dtype=torch.float, device='cpu')
ge = self.checkScript(fn, (x, y))
self.assertAllFused(ge.graph_for(x, y))
def test_inlined_optimized_graph(self):
@torch.jit.script
def foo(x):
return torch.relu(x + x)
for _ in range(3):
foo(torch.rand([4, 4]))
for _ in range(3):
foo(torch.rand([10]))
for _ in range(3):
foo(torch.rand([2, 2, 2]))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check_count("prim::If", 1, exactly=True).check("prim::TensorExpr").run(g)
torch._C._jit_pass_inline(g)
f = FileCheck()
for _ in range(3):
f.check("prim::If").check("prim::TensorExpr")
f.run(g)
def test_small_constant(self):
for device in self.devices:
def fn_test_small_constant(x, y):
return (1e-8 * x + 5e-9 * y) * 1e8
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(fn_test_small_constant, (x, y))
self.assertAllFused(ge.graph_for(x, y))
# Currently we don't pull constants into fusion groups, because in some
# cases it could remove the constant from the original graph and now our
# fusion group needs to return that constant for its other users.
# Instead of never pulling constants into the fusion group, we should just
# be more careful at how we rewrite its users.
# TODO: fix that and reenable the test.
def test_tensor_scalar_ops(self):
for device in self.devices:
def should_fuse(x):
z = 3.
y = x + z
return x * y
def should_fuse_scalar(x, z):
y = x + int(z)
return x * y
inputs = [torch.randn(2, 2, dtype=torch.float, device=device)]
ge = self.checkScript(should_fuse, inputs)
graph = ge.graph_for(*inputs)
fusion_groups = self.findFusionGroups(graph)
self.assertEqual(len(fusion_groups), 1)
FileCheck().check("aten::add").check("aten::mul").run(str(fusion_groups[0]))
inputs = [
torch.randn(2, 2, dtype=torch.float, device=device),
torch.tensor(3., dtype=torch.float, device=device),
]
ge = self.checkScript(should_fuse_scalar, inputs)
# Check that the fused graph computes correct results when the scalar
# input changes.
inputs = [
torch.randn(2, 2, dtype=torch.float, device=device),
torch.tensor(7., dtype=torch.float, device=device),
]
self.assertEqual(ge(*inputs), should_fuse_scalar(*inputs))
# The TE fuser supports fusion of non-constant scalars
self.assertGraphContainsExactly(
ge.graph_for(*inputs), FUSION_GROUP, 1, consider_subgraphs=True)
def test_where_and_typing(self):
for device in self.devices:
def f(x, y):
mask = x > y
res = torch.where(mask, x, y)
return mask, res
x = torch.randn(4, 4, dtype=torch.double, device=device)
y = torch.randn(4, 4, dtype=torch.double, device=device)
script_f = self.checkScript(f, (x, y))
self.assertAllFused(script_f.graph_for(x, y), except_for={'prim::TupleConstruct'})
def test_disabled(self):
old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu()
torch._C._jit_override_can_fuse_on_cpu(False)
def fn(a):
return a ** 2 + a
x = torch.randn(4, dtype=torch.float, device="cpu")
s = self.checkScript(fn, (x,))
g = s.graph_for(x)
self.assertEqual(len(self.findFusionGroups(g)), 0)
torch._C._jit_override_can_fuse_on_cpu(old_cpu_fuser_state)
def data_for(self, dtype, device="cuda", size=None):
if size is None:
v = torch.arange(1, 3, dtype=torch.float, device=device)
else:
v = torch.rand(*size, device=device)
if dtype == torch.bool:
return v > 2
elif dtype in [torch.qint8, torch.quint8, torch.qint32]:
return torch.quantize_per_tensor(v, 0.1, 1, dtype=dtype)
else:
return v.to(dtype)
def test_torch_to(self):
# test no op
@torch.jit.script
def foo(x):
return x.to(torch.float)
foo(torch.tensor([3.], dtype=torch.float))
foo(torch.tensor([3.], dtype=torch.float))
FileCheck().check_not("TensorExpr").run(torch.jit.last_executed_optimized_graph())
# test not fusing non-const inputs
@torch.jit.script
def foo(x, dtype: int):
return x.to(dtype)
foo(torch.tensor([3.], dtype=torch.float), torch.int)
foo(torch.tensor([3.], dtype=torch.float), torch.int)
FileCheck().check_not("TensorExpr").run(torch.jit.last_executed_optimized_graph())
# test not fusing to_pinned inputs
@torch.jit.script
def foo(x, dtype: int):
return x.to(pin_memory=True)
foo(torch.tensor([3.], dtype=torch.float), torch.int)
foo(torch.tensor([3.], dtype=torch.float), torch.int)
FileCheck().check_not("TensorExpr").run(torch.jit.last_executed_optimized_graph())
# test across-device not supported
if torch.cuda.is_available():
@torch.jit.script
def foo(x):
return x.to(device="cuda")
foo(torch.tensor([3.], dtype=torch.float))
foo(torch.tensor([3.], dtype=torch.float))
FileCheck().check_not("TensorExpr").run(torch.jit.last_executed_optimized_graph())
sizes = [(1, 4), (4, 4)]
# reuses cast impl, smaller dtype set for faster test
dtypes = [
torch.bool,
torch.int,
torch.float16,
torch.float32,
torch.float64,
]
class MyMod(torch.nn.Module):
def __init__(self, dtype):
super(MyMod, self).__init__()
self.dtype = dtype
def forward(self, x):
return x.to(self.dtype)
bad_dtypes = []
for dtype, output_dtype, device, size in product(dtypes, dtypes, self.devices, sizes):
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
if dtype == output_dtype:
continue
x = self.data_for(dtype, device, size=size)
mod = MyMod(output_dtype)
ref = mod.forward(x)
# use freezing to make non-Tensor args to `to` constant
mod = torch.jit.freeze(torch.jit.script(mod.eval()))
warmup_forward(mod.forward, x)
self.assertEqual(ref, mod.forward(x))
self.assertLastGraphAllFused()
@unittest.skip("Temporarily disabled")
def test_masked_fill(self):
dtypes = [
torch.int8,
torch.int16,
torch.int32,
torch.int64,
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
# torch.float16,
torch.float32,
torch.float64,
torch.bool,
]
sizes = [(2,), (4, 4)]
for self_dtype, device, scalar_val, size in product(dtypes, self.devices, [0.4, 3], sizes):
input_v = self.data_for(self_dtype, device, size=size)
mask = self.data_for(torch.bool, device, size=size)
def fn(input_v, mask):
return torch.masked_fill(input_v, mask, scalar_val)
ref = fn(input_v, mask)
try:
t = torch.jit.trace(fn, (input_v, mask))
torch.testing.assert_close(ref, t(input_v, mask))
self.assertLastGraphAllFused()
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(self_dtype), op.__name__, device, str(size)])
)
def test_isnan(self):
x = torch.rand([4])
x[0] = float('nan')
inputs = [
x,
torch.tensor([float('nan'), .5])
]
dtypes = [
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.float16,
torch.float32,
torch.float64,
torch.bool,
]
for inp, device, dtype in product(inputs, self.devices, dtypes):
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
inp = inp.to(device=device, dtype=dtype)
try:
f = torch.jit.trace(lambda x: x.isnan(), (inp,))
warmup_forward(f, inp)
self.assertEqual(f(inp), inp.isnan())
self.assertLastGraphAllFused()
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), 'isnan', device])
)
def test_gelu(self):
def apply(fn):
return lambda x, approximate: fn(x, approximate)
unary_ops = [
F.gelu,
]
sizes = [(1,), (2,), (4, 4)]
for dtype, op, device, size in product(self.dtypes, unary_ops, self.devices, sizes):
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device, size=size)
cond = self.data_for(torch.bool, device)
fn = apply(op)
ref = fn(x, cond)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, cond))
torch.testing.assert_close(ref, t(x, cond))
self.assertAllFused(t.graph_for(x, cond))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device, str(size)])
)
def test_unary_ops(self):
with torch._jit_internal._disable_emit_hooks():
def apply(fn):
return lambda x: fn(x)
unary_ops = [
torch.lgamma,
torch.sigmoid,
torch.reciprocal,
torch.neg,
torch.relu,
F.relu6,
torch.log,
torch.log10,
torch.log1p,
torch.log2,
torch.exp,
torch.expm1,
torch.erf,
torch.erfc,
torch.cos,
torch.sin,
torch.tan,
torch.acos,
torch.asin,
torch.cosh,
torch.sinh,
torch.atan,
torch.tanh,
F.hardtanh,
F.hardsigmoid,
F.hardswish,
F.softplus,
F.silu,
torch.sqrt,
torch.rsqrt,
torch.abs,
torch.ceil,
torch.floor,
torch.round,
torch.trunc,
torch.frac,
# TODO: broken on ROCm?
# F.hardshrink,
F.leaky_relu,
lambda x: torch.threshold(x, 0, -10),
# TODO: broken since type promotion was added
# lambda x: torch.clamp(x, -10, 10),
]
gpu_only = {torch.erf, torch.erfc}
sizes = [(1,), (2,), (4, 4)]
for dtype, op, device, size in product(self.dtypes, unary_ops, self.devices, sizes):
# TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
# todo - re-enable. fails with .500
if dtype == torch.bfloat16 and op == torch.round:
continue
if op in gpu_only and device == "cpu":
continue
try:
x = self.data_for(dtype, device, size=size)
fn = apply(op)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x,))
torch.testing.assert_close(ref, t(x))
self.assertAllFused(t.graph_for(x))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device, str(size)])
)
def test_binary_ops(self):
def apply(fn):
return lambda x, y: fn(x, y)
binary_ops = [
operator.__and__,
operator.__or__,
operator.__xor__,
torch.add,
torch.sub,
torch.mul,
torch.min,
torch.max,
lambda x, y: torch.lerp(x, y, 0.5),
torch.atan2,
torch.div,
torch.eq,
torch.ne,
torch.ge,
torch.gt,
torch.lt,
torch.fmod,
torch.remainder,
lambda x, y: y.type_as(x),
]
fp_only = [
torch.fmod,
torch.remainder,
]
devices = self.devices
for dtype, op, device in product(self.dtypes, binary_ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
fn = apply(op)
ref = fn(x, y)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y))
self.assertEqual(ref, t(x, y))
if op not in fp_only or dtype.is_floating_point:
self.assertAllFused(t.graph_for(x, y))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
)
def test_binary_scalar_ops(self):
def apply(fn):
return lambda x, y: fn(x, y)
ir_template = """
graph(%x : {dtype_x}, %y : {dtype_y}):
%z = {op}(%x, %y)
return (%z)"""
binary_ops = [
"aten::mul",
"aten::add",
"aten::sub",
"aten::div",
"aten::lt",
"aten::le",
"aten::eq",
"aten::ne",
"aten::gt",
"aten::ge",
"aten::__or__",
"aten::__xor__",
"aten::__and__",
"aten::__lshift__",
"aten::__rshift__",
]
dtypes = ['int', 'float', 'bool']
values = {'int' : [10, 3], 'float' : [12.34, 2.78], 'bool' : [True, False]}
devices = self.devices
for dtype_x, dtype_y, op, device in product(dtypes, dtypes, binary_ops, devices):
code = ir_template.format(**locals())
# Interpret the graph
try:
graph = torch._C.parse_ir(code)
for x, y in product(values[dtype_x], values[dtype_y]):
ref = torch._C._jit_interpret_graph(graph, (x, y))
except Exception:
# If we can't interpret this IR, don't bother checking NNC.
continue
# Compile the graph
try:
k = torch._C._te.TensorExprKernel(graph)
except Exception as e:
raise RuntimeError(" ".join(["Compilation failed:", device, str(code)]))
# Run the graph
for x, y in product(values[dtype_x], values[dtype_y]):
ref = torch._C._jit_interpret_graph(graph, (x, y))
try:
res = k.run((x, y))
self.assertEqual(ref, res)
except Exception as e:
raise RuntimeError(" ".join(["Failed at runtime:", device, str(x), str(y), str(code)]))
def test_matmul(self):
if self.dynamic_shapes:
self.skipTest("don't run conv with dynamic shapes")
def fn(x, y):
return torch.matmul(x, y)
devices = ['cpu'] # No cuda support for ext calls yet
sizes = [[[128, 128], [128, 128]],
[[10, 10], [10, 10]],
[[1, 16], [16, 128]],
[[128], [128]],
[[128], [128, 128]],
[[3], [3]],
[[3, 4], [4]],
[[10, 3, 4], [4]],
[[10, 3, 4], [10, 4, 5]],
[[10, 3, 4], [4, 5]],
]
# Only 2D x 2D matrix multiply is supported. For non-supported sizes we
# still want to run results verification to test that we didn't
# accidentally fuse it, but we skip the 'is-fused' check.
# TODO: add support for other shape combinations and make this set empty:
skip_is_fused_check_sizes = ["[[128], [128]]",
"[[128], [128, 128]]",
"[[3], [3]]",
"[[3, 4], [4]]",
"[[10, 3, 4], [4]]",
"[[10, 3, 4], [10, 4, 5]]",
"[[10, 3, 4], [4, 5]]",
]
for dtype, size, device in product(self.dtypes, sizes, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
size_x, size_y = size
x = self.data_for(dtype, device, size=size_x)
y = self.data_for(dtype, device, size=size_y)
ref = fn(x, y)
except Exception as e:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y))
t(x, y)
self.assertEqual(ref, t(x, y))
if not str(size) in skip_is_fused_check_sizes:
self.assertAllFused(t.graph_for(x, y))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), device])
)
def test_binary_tensor_scalar_ops(self):
with torch._jit_internal._disable_emit_hooks():
def apply_with_scalar(fn, scalar):
return lambda x: fn(x, scalar)
# FIXME: Fails in IR Eval: torch.int64 and_ cpu
binary_ops = [
operator.__and__,
operator.__or__,
operator.__xor__,
torch.add,
torch.sub,
torch.mul,
torch.eq,
torch.ne,
torch.ge,
torch.lt,
torch.gt,
]
devices = self.devices
# Maybe we should split this into separate tests to speed it up by
# only using scalar values relevant to particular ops
scalars = [1.5, 3, 0, -2.0, -1]
for dtype, op, device, scalar in product(self.dtypes, binary_ops, devices, scalars):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
fn = apply_with_scalar(op, scalar)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x))
self.assertEqual(ref, t(x))
self.assertAllFused(t.graph_for(x))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
)
def test_binary_div_ops(self):
def apply_with_scalar(fn, scalar):
return lambda x: fn(x, scalar)
binary_ops = [
torch.div,
torch.remainder,
torch.fmod,
]
devices = self.devices
# Maybe we should split this into separate tests to speed it up by
# only using scalar values relevant to particular ops
scalars = [1.5, 3, -2.0, -1] # skip 0
for dtype, op, device, scalar in product(self.dtypes, binary_ops, devices, scalars):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
fn = apply_with_scalar(op, scalar)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x))
self.assertEqual(ref, t(x))
except Exception as e:
raise RuntimeError(
"Failed: {} {} {} {}".format(dtype, op.__name__, device, scalar)
)
def test_binary_pow(self):
def apply_with_scalar(fn, scalar):
return lambda x: fn(x, scalar)
dtypes = [
# FIXME: 'pow' fails with dtype=torch.float16/device=cuda/scalar=0
# torch.float16,
torch.float32,
torch.float64,
# torch.bool intentionally not included
]
binary_ops = [
torch.pow,
]
# Maybe we should split this into separate tests to speed it up by
# only using scalar values relevant to particular ops
scalars = [1.5, 3, 0, -2.0, -1]
for dtype, op, device, scalar in product(dtypes, binary_ops, self.devices, scalars):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
fn = apply_with_scalar(op, scalar)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x))
self.assertEqual(ref, t(x))
self.assertAllFused(t.graph_for(x))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
)
def test_ternary_ops(self):
def apply(fn):
return lambda x, y, z: fn(x, y, z)
ternary_ops = [
torch.lerp,
torch.addcmul,
]
devices = self.devices
for dtype, op, device in product(self.dtypes, ternary_ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
z = self.data_for(dtype, device)
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
)
def test_ternary_norm_ops(self):
def apply(fn):
return lambda x, y, z: fn(x, y, z)
ternary_ops = [
F.batch_norm,
]
devices = self.devices
for dtype, op, device in product(self.dtypes, ternary_ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device, size=[5, 3, 128, 128])
y = self.data_for(dtype, device, size=[3])
z = self.data_for(dtype, device, size=[3])
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
)
@unittest.skip("FIXME: fuser doesn't include ListConstruct nodes to the group causing a failure")
def test_list_ops(self):
def apply(fn):
return lambda x, y, z: fn([x * x, y * y, z * z])
devices = self.devices
list_ops = [
torch.cat,
]
for dtype, op, device in product(self.dtypes, list_ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
x = self.data_for(dtype, device, size=[5, 4, 1, 7])
y = self.data_for(dtype, device, size=[5, 4, 1, 7])
z = self.data_for(dtype, device, size=[5, 4, 1, 7])
fn = apply(op)
ref = fn(x, y, z)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (x, y, z))
self.assertEqual(ref, t(x, y, z))
self.assertAllFused(t.graph_for(x, y, z))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
)
def test_where_ops(self):
def apply(fn):
return lambda cond, x, y: fn(cond, x, y)
ops = [
torch.where,
lambda cond, x, y: torch.where(cond, x, 3.1415),
lambda cond, x, y: torch.where(cond, 42, y),
]
devices = self.devices
for dtype, op, device in product(self.dtypes, ops, devices):
if dtype in [torch.float16, torch.bfloat16] and device == "cpu":
continue
try:
cond = self.data_for(torch.bool, device)
x = self.data_for(dtype, device)
y = self.data_for(dtype, device)
fn = apply(op)
ref = fn(cond, x, y)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
try:
t = torch.jit.trace(fn, (cond, x, y))
self.assertEqual(ref, t(cond, x, y))
self.assertAllFused(t.graph_for(cond, x, y))
except Exception as e:
raise RuntimeError(
" ".join(["Failed:", str(dtype), op.__name__, device])
)
def test_unsupported_dtypes(self):
for device in self.devices:
def fn(x):
return x * x + x
unsupported_dtypes = [
torch.uint8,
torch.complex32,
torch.complex64,
torch.complex128,
torch.qint8,
torch.quint8,
torch.qint32,
]
for dtype in unsupported_dtypes:
try:
x = self.data_for(dtype, device)
ref = fn(x)
except Exception:
# If eager mode doesn't support a dtype/op/device combo,
# neither does the fuser. Catch everything to avoid needing to
# guess what errors might be thrown by eager.
continue
t = torch.jit.trace(fn, (x,))
self.assertEqual(ref, t(x))
self.assertEqual(len(self.findFusionGroups(t.graph_for(x))), 0)
def test_superslomo(self):
devices = self.devices.copy()
if not LLVM_ENABLED:
devices.remove("cpu")
for device in devices:
# Test extracted from Super-SloMo: https://github.com/avinashpaliwal/Super-SloMo
# A few interesting things happen here: strided inputs of mixed size,
# plus outputs of mixed shapes. The latter characteristic happened to
# expose a memory corruption bug due to not properly guarding the
# outputs.
def eager(t0, t1, t2, t3, t4):
t5 = torch.mul(t0, t4)
t6 = torch.mul(t2, t3)
t7 = torch.mul(t6, t1)
t9 = torch.add(t5, t7)
t11 = torch.add(t0, t6)
ft_p = torch.div(t9, t11)
return (ft_p, t11, t9, t6)
t0 = torch.rand(1, 6, 352, 352, device=device).transpose(0, 1)
t1 = torch.rand(6, 3, 352, 352, device=device)
t2 = torch.rand(6, device=device)[None, None, None, :].permute(3, 0, 1, 2)
t3 = torch.rand(6, 1, 352, 352, device=device)
t4 = torch.rand(6, 3, 352, 352, device=device)
inputs = [t0, t1, t2, t3, t4]
script = torch.jit.script(eager)
for _ in range(4):
for pair in zip(script(*inputs), eager(*inputs)):
test, ref = pair
torch.testing.assert_close(test, ref)
self.assertAllFused(script.graph_for(*inputs), except_for={"prim::TupleConstruct"})
def test_sub_gt_and(self):
for device in self.devices:
def eager(t1, t2, t3, t4, t: float):
w = t1 - t2
h = t3 - t4
k = (w > t) & (h > t)
assert k.dtype == torch.bool
if t > 0.5:
# Putting a use of k in a never-executed conditional prevents
# profiling its type, which leaves it as "Tensor". If we
# propagate Tensor back to the definition of k, we have to be
# careful not to create a fusion group containing it.
return k + 1
return w
t = torch.rand(8, dtype=torch.float, device=device)
scripted = self.checkScript(eager, (t, t, t, t, 0.1))
def test_chunk_mul_one(self):
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def eager(x):
z, y, w = torch.chunk(x, 3, -1)
return z * 3, y, w
x = torch.rand(64, 1, 3072, dtype=torch.float, device=device)
z, y, w = eager(x)
script = self.checkScript(eager, (x,))
def test_eq_unsqueeze_type_as(self):
for device in self.devices:
def eager(a, b):
mask = b == 1
mask = torch.unsqueeze(mask, -1)
x = mask.type_as(a)
return x, mask
a = torch.rand(1, 64, 1024, device=device, dtype=torch.float)
b = torch.randint(-2, 2, (1, 64), device=device, dtype=torch.long)
script = self.checkScript(eager, (a, b))
def test_neg_pow(self):
def eager_tt(a: torch.Tensor, b: torch.Tensor):
return torch.neg(torch.pow(a, b))
def eager_ts(a: torch.Tensor, b: float):
return torch.neg(torch.pow(a, b))
def eager_st(a: float, b: torch.Tensor):
return torch.neg(torch.pow(a, b))
a = torch.rand(1, dtype=torch.float)
b = torch.rand(1, dtype=torch.float)
s = b.item()
script = self.checkScript(eager_tt, (a, b))
# TODO: re-enable fusion, which doesn't work right now. just test correctness for now
# self.assertAllFused(script.graph_for(a, b))
script = self.checkScript(eager_ts, (a, s))
# self.assertAllFused(script.graph_for(a, s))
script = self.checkScript(eager_st, (s, b))
# self.assertAllFused(script.graph_for(s, b))
@unittest.skipIf(not LLVM_ENABLED, "Too slow to run with the TE interpreter")
def test_conv2d_depthwise(self):
if self.dynamic_shapes:
self.skipTest("don't run conv with dynamic shapes")
def eager(input, weight, bias):
return torch.conv2d(input, weight, bias, stride=1, padding=1, groups=72)
input = torch.rand((1, 72, 56, 56), dtype=torch.float)
weight = torch.rand((72, 1, 3, 3), dtype=torch.float)
bias = torch.rand((72), dtype=torch.float)
script = self.checkScript(eager, (input, weight, bias))
self.assertAllFused(script.graph_for(input, weight, bias))
def test_conv2d(self):
if self.dynamic_shapes:
self.skipTest("don't run conv with dynamic shapes")
def eager(input, weight, bias):
return torch.conv2d(input, weight, bias, stride=1, padding=1, groups=1)
input = torch.rand((1, 64, 56, 56), dtype=torch.float)
weight = torch.rand((64, 64, 3, 3), dtype=torch.float)
bias = torch.rand((64), dtype=torch.float)
script = self.checkScript(eager, (input, weight, bias))
FileCheck().check_not("TensorExpr").run(torch.jit.last_executed_optimized_graph())
def test_type_as_cat(self):
with inline_fusion_groups():
def eager(x, y):
return torch.cat((x, y.type_as(x)), dim=1)
dtypes = self.dtypes.copy()
# CPU fuser doesn't support float16.
dtypes.remove(torch.float16)
dtypes.remove(torch.bfloat16)
for dtype1, dtype2 in product(dtypes, dtypes):
x = torch.randint(2, (1, 13,)).to(dtype1)
zero = torch.tensor([[0]]).to(dtype2)
one = torch.tensor([[1]]).to(dtype2)
script = torch.jit.trace(eager, (x, zero))
for _ in range(3):
torch.testing.assert_close(
script(x, zero),
eager(x, zero))
torch.testing.assert_close(
script(x, one),
eager(x, one))
self.assertAllFused(script.graph_for(x, one))
def test_to_device(self):
def eager(x):
return x.to(device="cpu").relu()
x = torch.rand(8)
script = self.checkScript(eager, (x,))
self.assertAllFused(script.graph_for(x))
def test_dims(self):
def eager(x, y):
return x / (y + 0.0001)
x = torch.linspace(-1, 1, 768, dtype=torch.float32).as_strided((1, 1, 768), (768, 1, 1))
y = torch.tensor([[[2.0]]], dtype=torch.float32)
script = self.checkScript(eager, (x, y))
self.assertAllFused(script.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_channels_last_dims_dynamic(self):
def eager(x, y):
return x + (y + 0.0001)
indices = [0, 1, 2, 3]
sets = []
for i in range(0, len(indices) + 1):
for subset in combinations(indices, i):
sets.append(subset)
for set in sets:
size = [2, 3, 4, 5]
for index in set:
size[index] = 1
inp = torch.rand(size).to(memory_format=torch.channels_last).cuda()
with texpr_enable_strategy([("DYNAMIC", 20)]):
foo_s = torch.jit.trace(eager, (inp, inp))
for _ in range(3):
out = foo_s(inp, inp)
out_eager = eager(inp, inp)
self.assertEqual(out_eager, out)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check("TensorExpr").run(g)
def test_exhaust_specializations(self):
with texpr_enable_strategy([("STATIC", 1)]):
@torch.jit.script
def foo(x):
return x + x + x
for _ in range(3):
foo(torch.rand([2, 2]))
for _ in range(3):
foo(torch.rand([4, 4, 4]))
g = torch.jit.last_executed_optimized_graph()
torch._C._jit_pass_inline(g)
FileCheck().check_count("TensorExpr", 2, exactly=True).run(g)
def test_unsqueeze_var_dim(self):
def eager(x, y, z: int):
return x * torch.unsqueeze(y, dim=z)
x = torch.rand(4, 4, 64).permute(1, 0, 2)
y = torch.rand(4, 4)
z = 2
script = self.checkScript(eager, (x, y, z))
def _test_fwd_bwd(self, fn):
x = torch.arange(-10, 10, dtype=torch.float32, requires_grad=True)
xs = torch.arange(-10, 10, dtype=torch.float32, requires_grad=True)
script = torch.jit.script(fn)
for i in range(11):
y = fn(x)
g0 = torch.rand_like(y)
y.backward(g0)
ys = script(xs)
ys.backward(g0)
with torch.no_grad():
x -= 0.1 * x.grad
xs -= 0.1 * xs.grad
x.grad = None
xs.grad = None
torch.testing.assert_close(y, ys)
def test_relu_fwd_bwd(self):
def eager(x):
return torch.relu(x * 1.01)
self._test_fwd_bwd(eager)
def test_hardswish_fwd_bwd(self):
def eager(x):
return F.hardswish(x) * 1.01
self._test_fwd_bwd(eager)
def test_hardsigmoid_fwd_bwd(self):
def eager(x):
return F.hardsigmoid(x) * 1.01
self._test_fwd_bwd(eager)
def test_cat_graph_opt(self):
def foo(x, y, z):
return torch.log(torch.cat([x, y, z]))
self.checkScript(foo, (torch.rand([5, 5]), torch.rand([2, 5]), torch.rand([1, 5])))
# TODO: not sure why not updated graph isn't reflected in last_optimized_graph
self.assertLastGraphAllFused()
def test_dynamic_cat(self):
with inline_fusion_groups():
@torch.jit.script
def repro(xs: List[torch.Tensor], ys: List[torch.Tensor], zs: List[torch.Tensor]):
return [
torch.cat([x, torch.cat([y, z], dim=-1)], dim=-1)
for x, y, z in zip(xs, ys, zs)
]
for _ in range(3):
N = 3
xs = [torch.ones(21) for _ in range(N)]
# Note: concat of ys and zs will have the same size for each
# pair, even though the individual ys and zs do not.
ys = [torch.ones(N - i) for i in range(N)]
zs = [torch.ones(i) for i in range(N)]
repro(xs, ys, zs)
def test_scalar_only_inputs(self):
def eager(b: float):
a = torch.ones(1)
return a * b
script = self.checkScript(eager, (1.0,))
def test_cat_2k_args(self):
with inline_fusion_groups():
def eager(x):
return torch.relu(torch.cat([x for _ in range(2000)]))
x = torch.randn(1)
trace = self.checkTrace(eager, (x,))
fusion_groups = self.findFusionGroups(trace.graph_for(x))
self.assertEqual(len(fusion_groups), 0)
def test_adaptive_avg_pool2d(self):
# TODO: once the adaptive_avg_pool2d is available in OpInfo DB, this
# test should be moved there
with inline_fusion_groups():
def foo1(x):
return torch.nn.functional.adaptive_avg_pool2d(x, (2, 2))
def foo2(x):
return torch.nn.functional.adaptive_avg_pool2d(x, (2))
x = torch.randn(4, 4, 4)
for foo in [foo1, foo2]:
f = torch.jit.trace(foo, (x,))
kernel = torch._C._te.TensorExprKernel(f.graph)
correct_val = f(x)
self.assertEqual(kernel.run((x,)), correct_val)
def test_unrolled_cat(self):
with inline_fusion_groups():
def eager(x):
ret = torch.empty(0)
for i in range(x.shape[0]):
ret = torch.cat([ret, x[i].relu()])
return ret
script = torch.jit.script(eager)
# Warm up with size=1 tensor; since the loop iterates once the
# profile data will be "burned in" assuming size=1, and then
# unrolled.
x = torch.ones(1, 1)
for _ in range(3):
script(x)
torch.testing.assert_close(eager(x), script(x))
# Now when an input hits the unrolled path, it will produce an
# incorrectly-sized tensor, since size=1 has been burned in.
x = torch.ones((8, 1))
torch.testing.assert_close(eager(x), script(x))
def test_batch_norm(self):
def test(fn, args):
trace = torch.jit.trace(fn, args)
self.assertAllFused(trace.graph_for(*args))
torch.testing.assert_allclose(fn(*args), trace(*args))
def bn(i, x):
return torch.batch_norm(i, x, x, x, x, False, 0.1, 1e-4, False).relu()
def bn_no_weight(i, x):
return torch.batch_norm(i, None, x, x, x, False, 0.1, 1e-4, False).relu()
def bn_no_bias(i, x):
return torch.batch_norm(i, x, None, x, x, False, 0.1, 1e-4, False).relu()
def bn_neither(i, x):
return torch.batch_norm(i, None, None, x, x, False, 0.1, 1e-4, False).relu()
for device in self.devices:
i = torch.randn(4, 16, 32, 40, device=device)
x = torch.randn(16, device=device)
for fn in [bn, bn_no_weight, bn_no_bias, bn_neither]:
test(fn, (i, x))
def test_profiler(self):
@torch.jit.script
def test(x, y, z):
return x * y + z
args = [torch.randn(4) for _ in range(3)]
with torch.autograd.profiler.profile() as prof:
for _ in range(3):
test(*args)
self.assertIn("fused_mul_add", prof.table())
def test_skip_grad_in_check(self):
@torch.jit.script
def foo(x):
return (x + 2) / 2
inp = torch.rand([4, 4])
for _ in range(3):
foo(inp)
inp.requires_grad_(True)
with torch.inference_mode():
for _ in range(3):
foo(inp)
g = torch.jit.last_executed_optimized_graph()
torch._C._jit_pass_inline(g)
torch._C._jit_pass_inline(g)
FileCheck().check_count("prim::If", 1, exactly=True).run(g)
def test_dynamic_shapes(self):
from functools import partial
n = 10
gen_tensor = (
lambda n: R(1, n),
lambda n: R(n, n),
lambda n: R(n, n).transpose(0, 1),
lambda n: R(n + 1, n + 1, 2)[:n, n, 0],
lambda n: R(n, n, 2)[:, :, 0],
lambda n: R(n, n + 1, n + 2, n + 3).to(memory_format=torch.channels_last),
)
with texpr_enable_strategy([("DYNAMIC", 20)]):
def foo(x, y, z):
return torch.sigmoid(torch.tanh(x))
foo.__disable_jit_function_caching__ = True
def fi(x, y, z):
return torch.tanh(x + y)
fi.__disable_jit_function_caching__ = True
def fum(x, y, z):
return torch.tanh(x + y) + z
fum.__disable_jit_function_caching__ = True
funcs = [foo, fi, fum]
with inline_fusion_groups():
for device in self.devices:
I = partial(torch.randint, 0, 100, device=device)
R = partial(torch.randn, device=device)
for i, func in enumerate(funcs):
num_args = i + 1
for j, gen in enumerate(gen_tensor):
inps = (gen(n), gen(n), gen(n))
func_s = torch.jit.trace(func, inps, check_trace=False)
torch._C._jit_pass_erase_shape_information(func_s.graph)
for _ in range(2):
x, y, z = gen(n), gen(n), gen(n)
func_s(x, y, z)
for incr in range(3):
func_s(*[gen(n + 1) for _ in range(3)])
g = torch.jit.last_executed_optimized_graph()
torch._C._jit_pass_inline(g)
torch._C._jit_pass_dce(g)
# We should see only one optimized kernel
FileCheck().check_count("TensorExprDynamicGuard", 1, exactly=True).run(g)
self.assertEqual(func(*inps), func_s(*inps))
gen = gen_tensor[0]
inps = (gen(n), gen(n), gen(n))
foo_s = torch.jit.trace(foo, inps)
torch._C._jit_pass_erase_shape_information(foo_s.graph)
g_prev = None
for gen in gen_tensor:
for i in range(3):
foo_s(*[gen(n + i) for _ in range(3)])
inps = (gen(n), gen(n), gen(n))
self.assertEqual(foo_s(*inps), foo(*inps))
g = torch.jit.last_executed_optimized_graph()
torch._C._jit_pass_inline(g)
torch._C._jit_pass_dce(g)
FileCheck().check_count("TensorExprDynamicGuard", len(gen_tensor), exactly=True).run(g)
@unittest.skipIf(not RUN_CUDA, "half-precision NNC fusion requires CUDA")
def test_autocast_up(self):
def f(x):
y = x._autocast_to_full_precision(True, True)
z = torch.exp(y)
return z
x = torch.rand((2, 2), dtype=torch.half, device="cuda")
scr = torch.jit.script(f)
scr(x)
scr(x)
self.assertLastGraphAllFused()
@unittest.skipIf(not RUN_CUDA, "half-precision NNC fusion requires CUDA")
def test_autocast_down(self):
def f(x):
y = torch.sigmoid(x)
z = y._autocast_to_reduced_precision(True, True, torch.half, torch.half)
return z
x = torch.rand((2, 2), dtype=torch.float, device="cuda")
scr = torch.jit.script(f)
scr(x)
scr(x)
self.assertLastGraphAllFused()
def test_with_strict_fusion(self):
def success(x):
with torch.jit.strict_fusion():
return x + x + x
scripted = self.checkScript(success, (torch.rand([4]),))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check_not("aten::add").check("prim::TensorExprGroup").run(g)
def foo(x):
with torch.jit.strict_fusion():
return x + x + torch.rand([4]) + 3
with self.assertRaises(Exception) as error_out:
foo_s = torch.jit.script(foo)
foo_s(torch.rand([4]))
foo_s(torch.rand([4]))
print(torch.jit.last_executed_optimized_graph())
fc = FileCheck().check("Found unfused operators")
fc.check("aten::rand(int[] size")
fc.check("torch.rand([4]").run(str(error_out.exception))
with warnings.catch_warnings(record=True) as warns:
foo(torch.rand([4]))
FileCheck().check("Only works in script mode").run(str(warns[0]))
def test_autodiff(x):
with torch.jit.strict_fusion():
return torch.rand([4]) + x + x + x
foo_s = torch.jit.script(test_autodiff)
inp = torch.rand([4], requires_grad=True)
with self.assertRaises(Exception) as error_out:
for _ in range(3):
foo_s(inp)
f = FileCheck().check("unfused operators").check("aten::rand")
f.run(str(error_out.exception))
def test_separate_fusions(x, y):
with torch.jit.strict_fusion():
return x + x + x, y + y + y
inp = torch.rand([4], requires_grad=True)
with self.assertRaises(Exception) as error_out:
for _ in range(3):
foo_s = torch.jit.script(test_separate_fusions)
foo_s(inp, inp)
f = FileCheck().check("Found multiple fusions")
f.run(str(error_out.exception))
def test_constant_chunk_shapes(self):
# We had an issue where buildShapeExpressions would fail as show below:
#
# %1 : Tensor = Constant[..] # not supported, we don't build this shape
# %2 : Tensor = Constant[..] # not supported
# %3 : Tensor = aten::add(%1, %2) # inputs not supported, we don't build shape
# ... = prim::ConstantChunk[..](%3) # it forgets to check whether input shapes exist, and fails
if self.dynamic_shapes:
self.skipTest("TODO: chunk dynamic shapes")
for device in self.devices:
def f(x, y):
r = torch.tensor(4)
z1, z2 = (x + y + r).chunk(2, dim=1)
return z1 * z2
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
ge = self.checkTrace(f, (x, y))
graph = ge.graph_for(x, y)
# make sure that we are actually testing the right scenario
FileCheck().check("with " + FUSION_GROUP + "_").check_count(
"ConstantChunk", 1, exactly=True
).run(str(graph))
f_traced = torch.jit.trace(f, (x, y))
for i in range(4):
# make sure this doesn't error out
res = f_traced(x, y)
self.assertEqual(res, f(x, y))
class TestTEFuserStatic(TestTEFuser):
dynamic_shapes = False
class TestTEFuserDynamic(TestTEFuser):
dynamic_shapes = True
del TestTEFuser
works_list = [
'__radd__',
'__rdiv__',
'__rmul__',
'__rmod__',
'abs',
'acos',
'add',
'addcmul',
'addmm.decomposed',
'asin',
'atan',
'atan2',
'ceil',
'clamp',
'clamp.scalar',
'contiguous',
'cos',
'cosh',
'div.no_rounding_mode',
'div.true_rounding',
'div.floor_rounding',
'div.trunc_rounding',
'eq',
'erf',
'erfc',
'exp',
'expand',
'expand_as',
'expm1',
'floor',
'fmod',
'fmod.autodiffed',
'ge',
'gt',
'isnan',
'le',
'lerp',
'lgamma',
'log',
'log10',
'log1p',
'log2',
'lt',
'masked_fill',
'max.binary',
'mean',
'min.binary',
'mm',
'mul',
'ne',
'neg',
'nn.functional.hardshrink',
'nn.functional.hardsigmoid',
'nn.functional.hardswish',
'nn.functional.softplus',
'nn.functional.hardtanh',
'nn.functional.leaky_relu',
'nn.functional.relu',
'nn.functional.relu6',
'nn.functional.softsign',
'nn.functional.tanhshrink',
'nn.functional.threshold',
'permute',
'pow',
'reciprocal',
'remainder',
'remainder.autodiffed',
'reshape',
'reshape_as',
'round',
'rsub',
'rsub.rsub_tensor',
'rsqrt',
'sigmoid',
'sign',
'sin',
'sinh',
'sqrt',
'sub',
'sum',
't',
'tan',
'tanh',
'transpose',
'true_divide',
'trunc',
'unsqueeze',
'view',
'view_as',
'where',
'bool',
'byte',
'char',
'double',
'float',
'half',
'int',
'long',
'short',
'bool.channels_last',
'byte.channels_last',
'char.channels_last',
'double.channels_last',
'float.channels_last',
'half.channels_last',
'int.channels_last',
'long.channels_last',
'short.channels_last',
]
known_failures = [
'__rmatmul__',
'frac',
'matmul',
]
# If your OpInfo test causes this test to fail, add it here
skip_ops = [
'conj'
]
def get_name(op):
l = [op.name]
if op.variant_test_name != '':
l.append(op.variant_test_name)
return '.'.join(l)
# Purpose of this class is to allow super() calls.
# super() [with no arguments] fails, presumably because of how instantiate_device_type_tests works.
# super(TestNNCOpInfo, self) fails because TestNNCOpInfo gets deleted from global scope.
# super(JitCommonTestCase, self).fn() would skip JitCommonTestCase.fn() implementation
class TestNNCOpInfoParent(JitCommonTestCase):
pass
class TestNNCOpInfo(TestNNCOpInfoParent):
def setUp(self):
super(TestNNCOpInfoParent, self).setUp()
self.tensorexpr_options = TensorExprTestOptions()
def tearDown(self):
self.tensorexpr_options.restore()
super(TestNNCOpInfoParent, self).tearDown()
def te_compile(self, device, dtype, op):
if op.name in skip_ops:
return
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in sample_inputs_itr:
arg_values = [sample_input.input] + list(sample_input.args)
kwarg_values = sample_input.kwargs
param_names = []
param_values = []
fx_args = []
for idx, v in enumerate(arg_values):
if isinstance(v, torch.Tensor):
param_names.append(f"arg_{idx}")
param_values.append(v)
fx_args.append(param_names[-1])
else:
fx_args.append(f'{repr(v)}')
for k, v in kwarg_values.items():
if isinstance(v, torch.Tensor):
param_names.append(k)
param_values.append(v)
fx_args.append(f'{k} = {k}')
else:
fx_args.append(f'{k} = {repr(v)}')
code = f"""
def f({', '.join(param_names)}):
return op.op({', '.join(fx_args)})"""
g = {'torch': torch, 'inf' : math.inf, 'op': op}
exec(code, g)
f = g['f']
f.__module__ = 'test'
out = f(*param_values)
ts_g = torch.jit.trace(f, param_values)
kernel = torch._C._te.TensorExprKernel(ts_g.graph)
correct_val = f(*param_values)
self.assertEqual(kernel.run(tuple(param_values)), correct_val)
self.assertEqual(kernel.fallback(tuple(param_values)), correct_val)
@onlyCPU
@unittest.skipIf(not LLVM_ENABLED, "Compiles with TensorExprKernel")
@ops([op for op in op_db if get_name(op) in works_list], allowed_dtypes=(torch.float,))
def test_working(self, device, dtype, op):
self.te_compile(device, dtype, op)
@onlyCPU
@unittest.skipIf(not LLVM_ENABLED, "Compiles with TensorExprKernel")
@ops([op for op in op_db if get_name(op) in known_failures], allowed_dtypes=(torch.float,))
def test_failures(self, device, dtype, op):
try:
self.te_compile(device, dtype, op)
except Exception as e:
pass
else:
raise RuntimeError("Expected test to fail. If it now works, move op into works_list")
@onlyCPU
@unittest.skipIf(not LLVM_ENABLED, "Compiles with TensorExprKernel")
@ops([op for op in op_db if get_name(op) not in works_list + known_failures], allowed_dtypes=(torch.float,))
def test_unsupported(self, device, dtype, op):
if get_name(op) in skip_ops:
return
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', TracerWarning)
self.te_compile(device, dtype, op)
except Exception as e:
pass
else:
raise RuntimeError("Expected test to fail. If it now works, move op into works_list")
@slowTest
@onlyCPU
@ops(op_db, dtypes=OpDTypes.supported)
def test_nnc_correctness(self, device, dtype, op):
if not op.supports_tracing:
self.skipTest("Requires tracing support")
with NoTracerWarnContextManager() as no_warn:
variant_sample_pairs = get_traced_sample_variant_pairs(device, dtype, op)
for variant, sample in variant_sample_pairs:
trace = create_traced_fn(self, variant, cache_traced_fn=True)
ref = variant(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
val = trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
self.assertEqual(ref, val)
# https://github.com/pytorch/pytorch/issues/35600
# each torch.jit.trace adds state to the _python_cu compilation unit
# since this test traces a lot of functions, out-of-memory can occur
# if the CU is not cleared.
torch.jit._state._python_cu.drop_all_functions()
only_for = ("cpu", "cuda")
instantiate_device_type_tests(TestNNCOpInfo, globals(), only_for=only_for)
# Purpose of this class is to allow super() calls. (See TestNNCOpInfoParent)
class TestLoopnestRandomizationParent(JitTestCase):
pass
class TestLoopnestRandomization(TestLoopnestRandomizationParent):
def setUp(self):
super(TestLoopnestRandomizationParent, self).setUp()
self.old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu()
self.old_must_use_cpu_state = torch._C._jit_get_te_must_use_llvm_cpu()
self.old_gpu_fuser_state = torch._C._jit_can_fuse_on_gpu()
torch._C._jit_override_can_fuse_on_cpu(True)
# TODO: force LLVM. need to add it to asan, mac, windows builds + sandcastle
# torch._C._jit_set_te_must_use_llvm_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
self.old_profiling_executor = torch._C._jit_set_profiling_executor(True)
self.old_profiling_mode = torch._C._get_graph_executor_optimize(True)
self.old_fusion_inlining = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(False)
self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
torch._C._jit_set_texpr_fuser_enabled(True)
self.old_te_must_use_llvm_cpu = torch._C._jit_get_te_must_use_llvm_cpu()
torch._C._jit_set_te_must_use_llvm_cpu(False)
# Set the seed to 1. This tests the codepath through random
# transformation.
os.environ["PYTORCH_TENSOREXPR_RANDOM_TRANSFORM_SEED"] = "1"
def tearDown(self):
torch._C._jit_set_profiling_executor(self.old_profiling_executor)
torch._C._get_graph_executor_optimize(self.old_profiling_mode)
torch._C._jit_override_can_fuse_on_gpu(self.old_gpu_fuser_state)
torch._C._jit_override_can_fuse_on_cpu(self.old_cpu_fuser_state)
torch._C._jit_set_te_must_use_llvm_cpu(self.old_must_use_cpu_state)
torch._C._debug_set_fusion_group_inlining(self.old_fusion_inlining)
torch._C._jit_set_texpr_fuser_enabled(self.texpr_fuser_state)
torch._C._jit_set_te_must_use_llvm_cpu(self.old_te_must_use_llvm_cpu)
# Set it back to 0.
os.environ["PYTORCH_TENSOREXPR_RANDOM_TRANSFORM_SEED"] = "0"
super(TestLoopnestRandomizationParent, self).tearDown()
@onlyCPU
@unittest.skipIf(not LLVM_ENABLED, "Compiles with TensorExprKernel")
def test_relu(self, device):
def fn_test_relu(x, y):
return F.relu(x + 0.5 * y)
x = torch.randn(4, 4, dtype=torch.float, device=device)
y = torch.randn(4, 4, dtype=torch.float, device=device)
fn = fn_test_relu
traced_fn = torch.jit.trace(fn, (x, y))
ref = fn(x, y)
res = traced_fn(x, y)
assert torch.allclose(ref, res)
instantiate_device_type_tests(TestLoopnestRandomization, globals(), only_for=("cpu"))
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_jit_fuser_te.py |
# Owner(s): ["module: nn"]
import contextlib
import math
import random
import string
import unittest
import io
import unittest.mock as mock
import itertools
import warnings
import pickle
from copy import deepcopy
from itertools import repeat, product
from functools import reduce, partial
from operator import mul
from collections import OrderedDict
from tempfile import NamedTemporaryFile
import sys
import os
import subprocess
import weakref
import gc
import torch
# TODO: remove this global setting
# NN tests use double as the default dtype
torch.set_default_dtype(torch.double)
from torch._six import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
import torch.nn.utils.parametrize as parametrize
import torch.nn.utils.prune as prune
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn import Parameter
from torch.nn.parameter import UninitializedParameter, UninitializedBuffer
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, floating_types_and, get_all_math_dtypes, \
floating_and_complex_types_and
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
skipIfRocmVersionLessThan, skipIfNotMiopenSuggestNHWC, TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps,\
suppress_warnings, TemporaryFileName, TEST_WITH_UBSAN, IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, set_default_dtype, IS_WINDOWS, \
slowTest, skipIfTorchDynamo
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, TEST_CUDNN_VERSION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, \
ctcloss_reference, new_module_tests, single_batch_reference_fn
from torch.testing._internal.common_device_type import expectedFailureXLA, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfNoCudnn, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, skipCUDAIfRocmVersionLessThan, skipCUDAIfNotMiopenSuggestNHWC, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, skipMeta, get_all_device_types, \
disableMkldnn, skipCPUIfNoMkldnn, disablecuDNN, skipCUDAIfMiopen, skipCUDAIfNoMiopen
from torch.nn import MultiheadAttention
from hypothesis import given
from torch.testing import make_tensor
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
if TEST_SCIPY:
from scipy import stats
import scipy.signal
import scipy.ndimage
if TEST_NUMPY:
import numpy as np
# WARNING: If you add a new top-level test case to this file, you MUST
# update test/run_test.py to list it, otherwise it will NOT be run in
# CI.
class PackedSequenceTest(TestCase):
_type_by_name = {
'torch.DoubleTensor': (torch.DoubleTensor, 'double'),
'torch.FloatTensor': (torch.FloatTensor, 'float'),
# We leave out `'torch.HalfTensor': (torch.HalfTensor, 'half'),`
# because of an error in `pad_packed_sequence`
# > AttributeError: 'torch.HalfTensor' object has no attribute 'fill_'
'torch.LongTensor': (torch.LongTensor, 'long'),
'torch.IntTensor': (torch.IntTensor, 'int'),
'torch.ShortTensor': (torch.ShortTensor, 'short'),
'torch.CharTensor': (torch.CharTensor, 'char'),
'torch.ByteTensor': (torch.ByteTensor, 'byte'),
}
def __init__(self, *args, **kwargs):
super(PackedSequenceTest, self).__init__(*args, **kwargs)
self.batch_size = 5
self.max_length = 6
def _ordered_sequence(self, tensor_type):
"""Create ordered list of random sequences"""
seqs = [tensor_type(random.randint(1, self.max_length))
for _ in range(self.batch_size)]
if tensor_type == torch.ByteTensor:
seqs = [s.random_(0, 256) for s in seqs]
else:
seqs = [s.random_(-128, 128) for s in seqs]
ordered = sorted(seqs, key=len, reverse=True)
return ordered
def _padded_sequence(self, tensor_type):
"""Create Tensor of random padded sequences"""
ordered = self._ordered_sequence(tensor_type)
lengths = [len(i) for i in ordered]
padded_tensor = rnn_utils.pad_sequence(ordered)
return padded_tensor, lengths
def test_type_casts(self):
"""Test type casting of `PackedSequence` against type casting of tensor"""
for _, (input_type, _) in self._type_by_name.items():
for expected_type_str, (_, cast_str) in self._type_by_name.items():
for enforce_sorted in [True, False]:
padded, lengths = self._padded_sequence(input_type)
packed = rnn_utils.pack_padded_sequence(
padded, lengths, enforce_sorted=enforce_sorted)
# Apply cast to `PackedSequence` instance and unpack
masked = getattr(packed, cast_str)()
unpacked, lengths_out = rnn_utils.pad_packed_sequence(masked)
self.assertEqual(unpacked.type(), expected_type_str)
def test_wrong_order(self):
a = torch.ones(25, 300)
b = torch.ones(22, 300)
b_a = rnn_utils.pad_sequence([b, a])
self.assertRaises(
RuntimeError,
lambda: rnn_utils.pack_padded_sequence(b_a, [22, 25], enforce_sorted=True))
def test_pad_sequence_with_tensor_sequences(self):
seq_tuple_input = torch.nn.utils.rnn.pad_sequence(
(torch.tensor([[7, 6]]), torch.tensor([[-7, -1]]))
)
seq_tensor_input = torch.nn.utils.rnn.pad_sequence(
torch.tensor([[[7, 6]], [[-7, -1]]])
)
self.assertEqual(seq_tuple_input, seq_tensor_input)
self.assertEqual(seq_tuple_input.shape, torch.Size([1, 2, 2]))
def test_pad_sequence_with_non_iterable_sequences(self):
msg = r"Expected iterable for input sequences, but got arg of type"
with self.assertRaisesRegex(RuntimeError, msg):
torch.nn.utils.rnn.pad_sequence(5)
def test_total_length(self):
padded, lengths = self._padded_sequence(torch.FloatTensor)
max_length = max(lengths)
packed = rnn_utils.pack_padded_sequence(padded, lengths)
# test ValueError if total_length < max_length
for total_length in (-1, 0, max_length - 1):
for batch_first in (True, False):
def err_fn():
rnn_utils.pad_packed_sequence(packed, batch_first=batch_first,
total_length=total_length)
self.assertRaisesRegex(ValueError,
r'Expected total_length to be at least the '
r'length of the longest sequence in input',
err_fn)
# test that pad_packed_sequence returns results of correct length
for batch_first in (True, False):
no_extra_pad, _ = rnn_utils.pad_packed_sequence(packed, batch_first=batch_first)
for total_length_delta in (0, 1, 8):
total_length = max_length + total_length_delta
unpacked, lengths_out = rnn_utils.pad_packed_sequence(packed, batch_first=batch_first,
total_length=total_length)
self.assertEqual(lengths, lengths_out)
self.assertEqual(unpacked.size(1 if batch_first else 0), total_length)
if total_length_delta == 0:
ref_output = no_extra_pad
elif batch_first:
extra_pad = no_extra_pad.new_zeros(self.batch_size, total_length_delta)
ref_output = torch.cat([no_extra_pad, extra_pad], 1)
else:
extra_pad = no_extra_pad.new_zeros(total_length_delta, self.batch_size)
ref_output = torch.cat([no_extra_pad, extra_pad], 0)
self.assertEqual(unpacked, ref_output)
def test_to(self):
for enforce_sorted in (True, False):
padded, lengths = self._padded_sequence(torch.IntTensor)
a = rnn_utils.pack_padded_sequence(
padded, lengths, enforce_sorted=enforce_sorted).cpu()
self.assertIs(a, a.to('cpu'))
self.assertIs(a, a.cpu())
self.assertIs(a, a.to('cpu', dtype=torch.int32))
self.assertEqual(a.long(), a.to(torch.int64))
if torch.cuda.is_available():
for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:
b = a.cuda(device=cuda)
self.assertIs(b, b.to(cuda))
self.assertIs(b, b.cuda())
self.assertEqual(a, b.to('cpu'))
self.assertEqual(b, a.to(cuda))
self.assertEqual(a, b.to('cpu', dtype=torch.int32))
self.assertIs(b, b.to(dtype=torch.int32))
self.assertEqual(b.long(), b.to(dtype=torch.int64))
def test_to_memory_format(self):
m = torch.nn.Conv2d(in_channels=16, out_channels=32, kernel_size=2, bias=True)
m = m.to(memory_format=torch.channels_last)
for param in m.parameters():
if param.dim() == 4:
self.assertTrue(param.is_contiguous(memory_format=torch.channels_last))
class TestAvgPool(TestCase):
def _sum_pool2d(self, x, kernel_size):
windows = torch.nn.functional.unfold(x, kernel_size=kernel_size, stride=kernel_size)
return torch.sum(windows, dim=1)
def _sum_pool3d(self, x, kernel_size):
# Because unfold does not support 3D sliding window we will split tensor to multiple tensors and calculate sum
h = kernel_size[0]
splited_x = [t.sum(0) for t in x.split(h) if t.size(0) == h]
# sum_pool2d assumes tensor in (1, 1, n, m) view, so unsqueeze two times
splited_x = [self._sum_pool2d(t.unsqueeze(0).unsqueeze(0), kernel_size[1:]) for t in splited_x]
joined_x = torch.cat(splited_x)
return joined_x.view(1, joined_x.numel())
def _avg_pool2d(self, x, kernel_size):
size = reduce((lambda x, y: x * y), kernel_size)
return self._sum_pool2d(x, kernel_size) / size
def _avg_pool3d(self, x, kernel_size):
size = reduce((lambda x, y: x * y), kernel_size)
return self._sum_pool3d(x, kernel_size) / size
def test_doubletensor_avg_pool2d(self):
n, m = 5, 8
input = torch.rand(1, 1, n, m)
for i in range(1, n + 1):
for j in range(1, m + 1):
actual = torch.nn.functional.avg_pool2d(input[0], (i, j))
actual = actual.view(1, actual.numel())
expected = self._avg_pool2d(input, (i, j))
self.assertEqual(actual, expected, rtol=0, atol=1e-5)
def test_avg_pool2d_with_zero_divisor(self):
self.assertRaisesRegex(RuntimeError, "divisor must be not zero",
lambda: F.avg_pool2d(torch.zeros(3, 3, 3), (2, 2), divisor_override=0))
def test_doubletensor_avg_pool2d_with_divisor(self):
n, m = 3, 3
input = torch.rand(1, 1, n, m)
for i in range(1, n + 1):
for j in range(1, m + 1):
for divisor in [1, 7, i * j]:
actual = F.avg_pool2d(input[0], (i, j), divisor_override=divisor)
actual = actual.view(1, actual.numel())
expected = self._sum_pool2d(input, (i, j)) / divisor
self.assertEqual(actual, expected, rtol=0, atol=1e-5)
def test_doubletensor_avg_pool3d(self):
h, w, d = 5, 6, 7
input = torch.rand(h, w, d)
for i in range(1, h + 1):
for j in range(1, w + 1):
for k in range(1, d + 1):
actual = torch.nn.functional.avg_pool3d(input.unsqueeze(0), (i, j, k))
actual = actual.view(1, actual.numel())
expected = self._avg_pool3d(input, (i, j, k))
self.assertEqual(actual, expected, rtol=0, atol=1e-5)
def test_doubletensor_avg_pool3d_with_divisor(self):
h, w, d = 6, 5, 7
input = torch.rand(h, w, d)
for i in range(1, h + 1):
for j in range(1, w + 1):
for k in range(1, d + 1):
for divisor in [1, 7, i * j]:
actual = torch.nn.functional.avg_pool3d(input.unsqueeze(0), (i, j, k), divisor_override=divisor)
actual = actual.view(1, actual.numel())
expected = self._sum_pool3d(input, (i, j, k)) / divisor
self.assertEqual(actual, expected, rtol=0, atol=1e-5)
def test_avg_pool3d_with_zero_divisor(self):
self.assertRaisesRegex(RuntimeError, "divisor must be not zero",
lambda: F.avg_pool3d(torch.zeros(3, 3, 3, 3), (2, 2, 2), divisor_override=0))
def test_avg_pool1d_ceil_mode(self):
# Regression test for gh-36977
x = 10 * torch.randn((1, 16, 4))
y = torch.nn.functional.avg_pool1d(
x, ceil_mode=True, count_include_pad=True, kernel_size=1, stride=2)
self.assertTrue(not torch.isnan(y).any())
if TEST_CUDA:
y = torch.nn.functional.avg_pool1d(
x.to('cuda'), ceil_mode=True, count_include_pad=True, kernel_size=1, stride=2)
self.assertTrue(not torch.isnan(y).any())
def test_avg_pool2d_ceil_mode(self):
# Regression test for gh-36977
x = 10 * torch.randn((1, 16, 4, 4))
y = torch.nn.functional.avg_pool2d(
x, ceil_mode=True, count_include_pad=True, kernel_size=(1, 2),
padding=(0, 1), stride=2)
self.assertTrue(not torch.isnan(y).any())
if TEST_CUDA:
y = torch.nn.functional.avg_pool2d(
x.to('cuda'), ceil_mode=True, count_include_pad=True, kernel_size=(1, 2),
padding=(0, 1), stride=2)
self.assertTrue(not torch.isnan(y).any())
def test_avg_pool3d_ceil_mode(self):
# Regression test for gh-36977
x = 10 * torch.randn((1, 16, 4, 4, 4))
y = torch.nn.functional.avg_pool3d(
x, ceil_mode=True, count_include_pad=True, kernel_size=(1, 2, 3), stride=2)
self.assertTrue(not torch.isnan(y).any())
if TEST_CUDA:
y = torch.nn.functional.avg_pool3d(
x.to('cuda'), ceil_mode=True, count_include_pad=True, kernel_size=(1, 2, 3), stride=2)
self.assertTrue(not torch.isnan(y).any())
class TestNN(NNTestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
def _forward(self, module, input: _TensorOrTensors):
with freeze_rng_state():
if isinstance(input, tuple):
return module(*input)
else:
return module(input)
def _backward(self, module, input: _TensorOrTensors, output, grad_output, create_graph=False):
output.backward(grad_output, retain_graph=True, create_graph=create_graph)
if isinstance(input, tuple):
return tuple(i.grad.data if i.grad is not None else None for i in input)
else:
return input.grad.data if input.grad is not None else None
def _forward_criterion(self, criterion, input, target, extra_args=None):
if extra_args is None:
extra_args = tuple()
if isinstance(input, tuple):
args = input + (target,) + extra_args
output = criterion(*args)
else:
output = criterion(input, target, *extra_args)
return output
def _backward_criterion(self, criterion, input, output, target, gradOutput=None, extra_args=None):
if extra_args is None:
extra_args = tuple()
input_tuple = input if isinstance(input, tuple) else (input,)
output_tuple = output if isinstance(output, tuple) else (output,)
for i in input_tuple:
if i.grad is not None:
i.grad.data.zero_()
args = input_tuple + (target,) + extra_args
if gradOutput is None:
gradOutput = torch.ones(())
criterion(*args).backward(gradOutput.to(output_tuple[0]))
if isinstance(input, tuple):
return tuple(i.grad.data for i in input)
else:
return input.grad.data
def _zero_grad_parameters(self, module):
for p in module.parameters():
if p.grad is not None:
with torch.no_grad():
p.grad.zero_()
p.grad.detach_()
def _get_parameters(self, module):
params = []
d_params = []
for p in module.parameters():
params.append(p)
d_params.append(p.grad)
return params, d_params
def _create_basic_net(self):
class Layer(nn.Module):
def __init__(self):
super(Layer, self).__init__()
self.layer_dummy_param = Parameter(torch.empty(3, 5))
self.register_buffer('layer_dummy_buf', torch.zeros(1, 3, 3, 7))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = Layer()
self.dummy_param = Parameter(torch.empty(3, 5))
self.register_buffer('dummy_buf', torch.zeros(7, 3, 3, 1))
l = Layer()
n = Net()
s = nn.Sequential(n, n)
return l, n, s
def test_parse_to(self):
# Test for buggy use of THPMemoryFormat_New
self.assertEqual(
repr(torch._C._nn._parse_to(memory_format=torch.contiguous_format)[3]),
"torch.contiguous_format"
)
def test_requires_grad_(self):
m = self._create_basic_net()[-1]
assert len(list(m.buffers())) > 0, 'invalid test'
assert all(not b.requires_grad for b in m.buffers()) > 0, 'invalid test'
assert len(list(m.parameters())) > 0, 'invalid test'
assert all(p.requires_grad for p in m.parameters()) > 0, 'invalid test'
for requires_grad in (False, True):
self.assertIs(m.requires_grad_(requires_grad), m)
for p in m.parameters():
self.assertEqual(p.requires_grad, requires_grad)
for b in m.buffers():
self.assertFalse(b.requires_grad)
def test_module_backcompat(self):
from torch.serialization import SourceChangeWarning
path = download_file('https://download.pytorch.org/test_data/linear.pt')
with warnings.catch_warnings():
warnings.simplefilter('ignore', SourceChangeWarning)
m = torch.load(path)
input = torch.randn(2, 3, dtype=torch.float)
self.assertEqual(m(input).size(), (2, 5))
def test_conv_backcompat(self):
from torch.serialization import SourceChangeWarning
# This file was generated by running on PyTorch 1.0.1 on Python 2:
#
# import torch
# from torch import nn
# m = nn.Conv2d(1, 1, 1)
# torch.save(m, 'legacy_conv2d.pt')
#
# NB: This Pickle also contains some Unicode data!
path = download_file('https://download.pytorch.org/test_data/legacy_conv2d.pt')
with warnings.catch_warnings():
warnings.simplefilter('ignore', SourceChangeWarning)
m = torch.load(path, encoding='utf-8')
input = torch.randn((1, 1, 1, 1), dtype=torch.float)
self.assertEqual(m(input).size(), (1, 1, 1, 1))
def test_share_memory(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.p = nn.Parameter(torch.eye(5))
self.par = nn.ParameterList()
self.par.append(nn.Parameter(torch.randn(10)))
def forward(self, inp):
# NB: dead code
return inp.clone()
net = Net()
for p in net.parameters():
self.assertFalse(p.storage().is_shared())
for b in net.buffers():
self.assertFalse(b.storage().is_shared())
net.share_memory()
for p in net.parameters():
self.assertTrue(p.storage().is_shared())
for b in net.buffers():
self.assertTrue(b.storage().is_shared())
def _test_hooks(self, backward_register_fn):
module = nn.Sigmoid()
input = torch.ones(5, 5, requires_grad=True)
counter = {
'forwards': 0,
'backwards': 0
}
def fw_hook(inc, h_module, input, output):
self.assertIsInstance(input, tuple)
self.assertTrue(isinstance(output, torch.Tensor))
self.assertTrue(h_module is module)
self.assertEqual(input[0], torch.ones(5, 5))
self.assertEqual(output, torch.empty(5, 5).fill_(1 / (1 + 1 / math.e)))
counter['forwards'] += inc
def bw_hook(inc, h_module, grad_input, grad_output):
self.assertIsInstance(grad_input, tuple)
self.assertIsInstance(grad_output, tuple)
self.assertTrue(h_module is module)
self.assertEqual(grad_output[0], torch.ones(5, 5) * 2)
counter['backwards'] += inc
test_fwd = module.register_forward_hook(lambda *args: fw_hook(1, *args))
module(input)
module(input)
self.assertEqual(counter['forwards'], 2)
self.assertEqual(counter['backwards'], 0)
test_bwd = getattr(module, backward_register_fn)(
lambda *args: bw_hook(1, *args))
output = module(input)
self.assertEqual(counter['forwards'], 3)
self.assertEqual(counter['backwards'], 0)
output.backward(torch.ones(5, 5) * 2, retain_graph=True)
self.assertEqual(counter['forwards'], 3)
self.assertEqual(counter['backwards'], 1)
output.backward(torch.ones(5, 5) * 2, retain_graph=True)
self.assertEqual(counter['forwards'], 3)
self.assertEqual(counter['backwards'], 2)
test2_fwd = module.register_forward_hook(lambda *args: fw_hook(2, *args))
output = module(input)
self.assertEqual(counter['forwards'], 6)
self.assertEqual(counter['backwards'], 2)
test2_bwd = getattr(module, backward_register_fn)(lambda *args: bw_hook(2, *args))
module(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 9)
self.assertEqual(counter['backwards'], 5)
test2_bwd.remove()
module(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 12)
self.assertEqual(counter['backwards'], 6)
test2_fwd.remove()
module(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 13)
self.assertEqual(counter['backwards'], 7)
test_fwd.remove()
test_bwd.remove()
@skipIfTorchDynamo("TorchDynamo does not work well with hooks")
def test_hooks(self):
self._test_hooks("register_backward_hook")
self._test_hooks("register_full_backward_hook")
def test_hook_cpp(self):
bn = nn.BatchNorm1d(5)
def hook(module, grad_inputs, grad_outputs):
self.assertEqual(len(grad_inputs), 1)
self.assertEqual(len(grad_outputs), 1)
self.assertEqual(module, bn)
bn.register_full_backward_hook(hook)
output = bn(torch.randn(5, 5, requires_grad=True))
output.sum().backward()
def test_hook_invalid_outputs(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
def bw_fail1(self, grad_input, grad_output):
return grad_input[:-1]
def bw_fail2(self, grad_input, grad_output):
return grad_input + (torch.randn(2, 2),)
with module.register_backward_hook(bw_fail1):
with self.assertRaisesRegex(RuntimeError, 'got 0, but expected 1'):
module(input).sum().backward()
with module.register_backward_hook(bw_fail2):
with self.assertRaisesRegex(RuntimeError, 'got 2, but expected 1'):
module(input).sum().backward()
def test_hook_requires_grad(self):
test_self = self
class MyModule(nn.Module):
def forward(self, arg1, arg2, arg3):
test_self.assertTrue(arg1.requires_grad)
test_self.assertFalse(arg2.requires_grad)
test_self.assertTrue(arg3.requires_grad)
return arg1.sum() + arg2.sum() + arg3.sum()
inp = torch.rand(2, requires_grad=True)
mod = MyModule()
mod(inp, inp.detach(), inp)
# Ensure that requires grad is properly propagated
mod.register_full_backward_hook(lambda mod, gI, gO: None)
mod(inp, inp.detach(), inp)
@skipIfTorchDynamo("TorchDynamo does not work well with hooks")
def test_hook_no_requires_grad(self):
mod = nn.Linear(2, 3)
inp = torch.rand(1, 2)
return_val = "None"
hook_called = [0]
def hook(mod, grad_input, grad_output):
hook_called[0] += 1
for gI in grad_input:
self.assertIsNone(gI)
for gO in grad_output:
self.assertEqual(gO.size(), (1, 3))
if return_val == "grad_input":
return grad_input
elif return_val == "invalid":
# If the inputs were requiring gradients, this would be
# a valid return
return inp
elif return_val == "None":
return None
else:
raise RuntimeError("Invalid return_val string")
mod.register_full_backward_hook(hook)
# This should run and trigger the hook properly
mod(inp).sum().backward()
self.assertEqual(hook_called[0], 1)
return_val = "grad_input"
mod(inp).sum().backward()
self.assertEqual(hook_called[0], 2)
return_val = "invalid"
with self.assertRaisesRegex(RuntimeError, "where no input requires gradient"):
mod(inp).sum().backward()
def test_hook_last_arg_requires_grad(self):
mod = nn.L1Loss()
inp = torch.rand(1, requires_grad=True)
mod.register_full_backward_hook(lambda m, gI, gO: None)
try:
mod(inp.detach(), inp)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
def test_hook_extra_input(self):
class MyModule(nn.Module):
def forward(self, non_tensor, tensor):
return tensor.clone(), non_tensor
inp = torch.rand(2, requires_grad=True)
mod = MyModule()
def hook(mod, grad_input, grad_output):
self.assertIsNone(grad_input[0])
self.assertIsInstance(grad_input[1], torch.Tensor)
self.assertIsInstance(grad_output[0], torch.Tensor)
self.assertIsNone(grad_output[1])
mod.register_full_backward_hook(hook)
out, _ = mod(True, inp)
out.sum().backward()
def test_hook_inplace(self):
class MyModule(nn.Module):
def forward(self, inp, do_inplace):
self.inp = inp
if do_inplace:
inp += 1
return inp.clone()
hook_called = [0]
def hook(mod, grad_input, grad_output):
hook_called[0] += 1
inp = torch.rand(10, requires_grad=True)
mod = MyModule()
mod.register_full_backward_hook(hook)
# No inplace should work
mod(inp, False).sum().backward()
self.assertEqual(hook_called[0], 1)
# Input inplace error should throw an error
with self.assertRaisesRegex(RuntimeError, "Output 0 of BackwardHookFunctionBackward is "
"a view and is being modified inplace."):
mod(inp.clone(), True)
# Input inplace error should throw an error if we try to re-use the view after they have
# been modified
local_inp = inp.clone()
out = mod(local_inp, False)
local_inp[0] *= 1
with self.assertRaisesRegex(RuntimeError, "Output 0 of BackwardHookFunctionBackward is "
"a view and its base or another view"):
# Any operation involving the view will fail here
mod.inp + 2
# Output inplace error should throw an error
out = mod(inp, False)
with self.assertRaisesRegex(RuntimeError, "BackwardHookFunctionBackward is a view "
"and is being modified inplace."):
out += 1
def test_hook_non_full_warning(self):
def noop(*args):
pass
a = torch.rand(2, requires_grad=True)
b = torch.rand(2, requires_grad=True)
# Check invalid input container
class MyModule(nn.Module):
def forward(self, l):
return l[0].clone(), l[1].clone()
m = MyModule()
m.register_backward_hook(noop)
with self.assertWarnsRegex(UserWarning, "does not take as input a single Tensor or a tuple of Tensors"):
m([a, b])
# Check invalid output container
class MyModule(nn.Module):
def forward(self, a, b):
return [a.clone(), b.clone()]
m = MyModule()
m.register_backward_hook(noop)
with self.assertWarnsRegex(UserWarning, "does not return a single Tensor or a tuple of Tensors"):
m(a, b)
# Check invalid output from different Nodes
class MyModule(nn.Module):
def forward(self, a, b):
return a.clone(), b.clone()
m = MyModule()
m.register_backward_hook(noop)
with self.assertWarnsRegex(UserWarning, "outputs are generated by different autograd Nodes"):
m(a, b)
# Check invalid forward with multiple Nodes
class MyModule(nn.Module):
def forward(self, a):
return a.clone().clone()
m = MyModule()
m.register_backward_hook(noop)
with self.assertWarnsRegex(UserWarning, "the forward contains multiple autograd Nodes"):
m(a)
def test_hook_backward_size(self):
# Make module with multiple operations in forward
# And different size for input and outputs
class MyModule(nn.Module):
def forward(self, arg1, arg2):
tmp = arg1.sum() * arg2
tmp = tmp + arg2.sum() * arg1.sum()
tmp = tmp.sum().view(1)
tmp = tmp.expand(8).contiguous()
return tmp
module = MyModule()
inp1 = torch.randn(5, 5, requires_grad=True)
inp2 = torch.randn(10, 10, requires_grad=True)
def bw_hook(module, grad_input, grad_output):
self.assertEqual(len(grad_input), 2)
self.assertEqual(grad_input[0].size(), torch.Size([5, 5]))
self.assertEqual(grad_input[1].size(), torch.Size([10, 10]))
self.assertEqual(len(grad_output), 1)
self.assertEqual(grad_output[0].size(), torch.Size([8]))
with module.register_full_backward_hook(bw_hook):
module(inp1, inp2).sum().backward()
@skipIfTorchDynamo("TorchDynamo does not work well with hooks")
def test_hook_backward_writeable(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
sig_x = torch.nn.functional.sigmoid(input)
def bw_hook(module, grad_input, grad_output):
for grad in grad_input:
self.assertTrue(isinstance(grad, torch.Tensor))
for grad in grad_output:
self.assertTrue(isinstance(grad, torch.Tensor))
return tuple(gi * 2 for gi in grad_input)
module.register_backward_hook(bw_hook)
module(input).backward(torch.ones(5, 5))
expected_grad = sig_x * (1 - sig_x) * 2
self.assertEqual(input.grad, expected_grad)
@skipIfTorchDynamo("TorchDynamo does not work well with hooks")
def test_hook_forward_preforward_writable(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
sig_x = torch.nn.functional.sigmoid(input)
def forward_pre_hook(m, input):
return torch.nn.functional.relu(input[0])
def forward_hook(m, input, output):
return -output
module.register_forward_pre_hook(forward_pre_hook)
module.register_forward_hook(forward_hook)
output = module(input)
expected_res = -torch.nn.functional.sigmoid(torch.nn.functional.relu(input))
self.assertEqual(output, expected_res)
output.backward(torch.ones(5, 5) * 2, retain_graph=True)
mask = (input > 0).double()
expected_grad = -sig_x * (1 - sig_x) * 2 * mask
self.assertEqual(input.grad, expected_grad)
def test_to(self):
m = nn.Linear(3, 5)
self.assertIs(m, m.to('cpu'))
self.assertIs(m, m.to('cpu', dtype=torch.float32))
self.assertEqual(m.double(), m.to(torch.float64))
self.assertRaises(RuntimeError, lambda: m.to('cpu', copy=True))
if torch.cuda.is_available():
for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:
m2 = m.cuda(device=cuda)
self.assertIs(m2, m2.to(cuda))
self.assertEqual(m, m2.to('cpu'))
self.assertEqual(m2, m.to(cuda))
self.assertIs(m2, m2.to(dtype=torch.float32))
self.assertEqual(m2.double(), m2.to(dtype=torch.float64))
def test_zero_grad(self):
i = torch.randn(2, 5, requires_grad=True)
module = nn.Linear(5, 5)
for p in module.parameters():
p.requires_grad = False
module.zero_grad()
module.weight.requires_grad = True
module.zero_grad()
self.assertIsNone(module.weight.grad) # uninitialized grad
module(i).sum().backward()
self.assertIsNotNone(module.weight.grad)
self.assertGreater(module.weight.grad.data.abs().sum(), 0)
module.zero_grad()
self.assertEqual(module.weight.grad.data, module.weight.data.clone().zero_())
module.bias.requires_grad = True
module.zero_grad()
self.assertIsNotNone(module.weight.grad)
self.assertIsNone(module.bias.grad)
module(i).sum().backward()
self.assertIsNotNone(module.weight.grad)
self.assertIsNotNone(module.bias.grad)
self.assertGreater(module.weight.grad.data.abs().sum(), 0)
self.assertGreater(module.bias.grad.data.abs().sum(), 0)
module.zero_grad()
self.assertEqual(module.weight.grad.data, module.weight.data.clone().zero_())
self.assertEqual(module.bias.grad.data, module.bias.data.clone().zero_())
# Force set to None.
module.zero_grad(set_to_none=True)
self.assertIsNone(module.weight.grad)
def test_no_grad(self):
for dtype in [torch.bfloat16, torch.float, torch.double]:
module = nn.Conv2d(2, 5, kernel_size=3, padding=1).to(dtype)
input = torch.randn(1, 2, 10, 10).to(dtype)
x = input
y = input.clone()
output = module(x)
self.assertTrue(output.requires_grad)
output.backward(torch.ones(1, 5, 10, 10))
with torch.no_grad():
output2 = module(y)
self.assertFalse(output2.requires_grad)
self.assertRaises(RuntimeError, lambda: output2.backward(torch.ones(1, 5, 10, 10)))
def test_invalid_conv1d(self):
for dtype in [torch.bfloat16, torch.float, torch.double, torch.cfloat, torch.cdouble]:
module = nn.Conv1d(in_channels=3, out_channels=33, kernel_size=10, stride=1, bias=True).to(dtype)
input = torch.randn(1, 3, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError,
r'Calculated padded input size per channel: \(4\). ' +
r'Kernel size: \(10\). Kernel size can\'t be greater than actual input size'):
module(input)
# Negative stride check
module = nn.Conv1d(in_channels=3, out_channels=6, kernel_size=3, stride=-1, bias=True).to(dtype)
input = torch.randn(1, 3, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
def test_mismatch_shape_conv2d(self):
for dtype in (torch.float, torch.cfloat):
x = torch.randn(1, 10, 1, 28, 28, dtype=dtype)
w = torch.randn(6, 1, 5, 5, dtype=dtype)
with self.assertRaisesRegex(RuntimeError,
r'Expected 3D \(unbatched\) or 4D \(batched\) input to conv2d, but got ' +
r'input of size: \[1, 10, 1, 28, 28\]'):
F.conv2d(x, w)
def test_conv2d_discontiguous_weight(self):
for dtype in (torch.float, torch.cfloat):
# Test for https://github.com/pytorch/pytorch/issues/55781
x = torch.ones(64, 16, 16, 16, dtype=dtype)
weight = torch.arange(0, 1.0, 1 / 2.0 ** 10).reshape(32, 16, 1, 2).to(dtype)[:, :, :, ::2]
self.assertFalse(weight.is_contiguous())
y = torch.nn.functional.conv2d(x, weight, None)
if torch.backends.mkldnn.is_available():
# Disable MKLDNN explicitly, so that either NNPACK or THCNN will be used
with torch.backends.mkldnn.flags(enabled=False):
y_ = torch.nn.functional.conv2d(x, weight, None)
self.assertEqual(y, y_)
self.assertEqual(y.sum(), 4186112.)
def test_invalid_conv2d(self):
for dtype in [torch.bfloat16, torch.float, torch.double, torch.cfloat, torch.cdouble]:
module = torch.nn.Conv2d(1, 1, kernel_size=3, dilation=2, stride=2).to(dtype)
input = torch.empty(1, 1, 4, 4).to(dtype)
self.assertRaises(RuntimeError, lambda: module(input))
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, stride=1, bias=True)
input = torch.randn(1, 3, 1, 1)
with self.assertRaisesRegex(RuntimeError,
r'Calculated padded input size per channel: \(1 x 1\). ' +
r'Kernel size: \(10 x 10\). Kernel size can\'t be greater than actual input size'):
module(input)
# Negative stride check
module = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=4, stride=-1, bias=True).to(dtype)
input = torch.randn(1, 3, 4, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
# Zero stride check
module = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=4, stride=0, bias=True).to(dtype)
input = torch.randn(1, 3, 4, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
def test_invalid_conv3d(self):
for dtype in [torch.bfloat16, torch.float, torch.double, torch.cfloat, torch.cdouble]:
module = torch.nn.Conv3d(1, 1, kernel_size=3, dilation=2, stride=2).to(dtype)
input = torch.empty(1, 1, 4, 4, 4).to(dtype)
self.assertRaises(RuntimeError, lambda: module(input))
# Negative stride check
module = torch.nn.Conv3d(1, 1, kernel_size=3, stride=-2)
input = torch.empty(1, 1, 4, 4, 4)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
def test_conv_invalid_groups(self):
with self.assertRaisesRegex(ValueError, 'groups must be a positive integer'):
torch.nn.Conv1d(1, 1, kernel_size=3, dilation=2, stride=2, groups=0)
with self.assertRaisesRegex(ValueError, 'groups must be a positive integer'):
torch.nn.Conv2d(1, 1, kernel_size=3, dilation=2, stride=2, groups=-1)
with self.assertRaisesRegex(ValueError, 'groups must be a positive integer'):
torch.nn.Conv3d(1, 1, kernel_size=3, dilation=2, stride=2, groups=-2)
def test_Conv1d_module_same_padding(self):
# Compare module against functional: without strides/dilation, asymmetric padding
x = torch.rand(1, 1, 20)
module = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=10,
padding='same')
expect = F.conv1d(x, module.weight, module.bias, padding='same')
self.assertEqual(expect, module(x))
# Test dilation, symmetric padding
module = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=10,
padding='same', dilation=2)
expect = F.conv1d(x, module.weight, module.bias, padding='same', dilation=2)
self.assertEqual(expect, module(x))
# Test non-zero padding_mode, requiring explicit padding
module = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=10,
padding='same', padding_mode='replicate')
x_padded = F.pad(x, [4, 5], mode='replicate')
expect = F.conv1d(x_padded, module.weight, module.bias, padding='valid')
self.assertEqual(expect, module(x))
self.assertEqual(x.size(), expect.size())
# Test connstruction with invalid padding string raises
with self.assertRaisesRegex(ValueError, 'Invalid padding string'):
module = nn.Conv1d(in_channels=3, out_channels=33, kernel_size=10, padding='foo')
# Test connstruction with same padding and strides raises
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv1d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=2)
def test_Conv2d_module_same_padding(self):
# Compare module against functional:
# without strides/dilation, both symmetric and asymmetric padding
x = torch.rand(1, 1, 9, 20)
module = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(5, 10),
padding='same')
expect = F.conv2d(x, module.weight, module.bias, padding='same')
self.assertEqual(expect, module(x))
# with dilation, symmetric padding
module = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(3, 4),
padding='same', dilation=(1, 2))
expect = F.conv2d(x, module.weight, module.bias, padding='same', dilation=(1, 2))
self.assertEqual(expect, module(x))
# Test non-zero padding_mode, requiring explicit padding
module = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(3, 4),
padding='same', padding_mode='reflect')
x_padded = F.pad(x, [1, 2, 1, 1], mode='reflect')
expect = F.conv2d(x_padded, module.weight, module.bias, padding='valid')
self.assertEqual(expect, module(x))
self.assertEqual(x.size(), expect.size())
# Test connstruction with invalid padding string raises
with self.assertRaisesRegex(ValueError, 'Invalid padding string'):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='foo')
# Test connstruction with same padding and strides raises
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=2)
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(1, 3))
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(4, 1))
def test_Conv3d_module_same_padding(self):
# Compare module against functional:
x = torch.rand(1, 1, 4, 4, 4)
# without dilation, both symmetric and asymmetric padding
module = nn.Conv3d(in_channels=1, out_channels=1, kernel_size=(2, 3, 4),
padding='same')
expect = F.conv3d(x, module.weight, module.bias, padding='same')
self.assertEqual(expect, module(x))
# with dilation, both symmetric and asymmetric padding
module = nn.Conv3d(in_channels=1, out_channels=1, kernel_size=(2, 3, 4),
padding='same', dilation=(3, 2, 1))
expect = F.conv3d(x, module.weight, module.bias, padding='same', dilation=(3, 2, 1))
self.assertEqual(expect, module(x))
# Test non-zero padding_mode, requiring explicit padding
module = nn.Conv3d(in_channels=1, out_channels=1, kernel_size=(2, 3, 4),
padding='same', padding_mode='circular')
x_padded = F.pad(x, [1, 2, 1, 1, 0, 1], mode='circular')
expect = F.conv3d(x_padded, module.weight, module.bias, padding='valid')
self.assertEqual(expect, module(x))
self.assertEqual(x.size(), expect.size())
# Test connstruction with invalid padding string raises
with self.assertRaisesRegex(ValueError, 'Invalid padding string'):
module = nn.Conv3d(in_channels=3, out_channels=33, kernel_size=10, padding='foo')
# Test connstruction with same padding and strides raises
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=2)
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(1, 1, 3))
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(1, 4, 1))
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(5, 1, 1))
def _test_alpha_dropout(self, cls, input):
mean = input.mean()
std = input.std()
for p in [0.2, 0.5, 0.8]:
module = cls(p)
input_var = input.detach().clone().requires_grad_()
output = module(input_var)
# output mean should be close to input mean
self.assertLess(abs(output.data.mean() - mean), 0.1)
# output std should be close to input std
self.assertLess(abs(output.data.std() - std), 0.1)
output.backward(input)
def test_parameters_and_named_parameters(self):
def names(named_parameters):
return [k for k, _ in named_parameters]
l, n, s = self._create_basic_net()
self.assertEqual(len(list(l.parameters())), 1)
self.assertEqual(
names(l.named_parameters()),
['layer_dummy_param'])
self.assertEqual(len(list(n.parameters())), 2)
self.assertEqual(
names(n.named_parameters()),
['dummy_param', 'l1.layer_dummy_param'])
self.assertEqual(len(list(n.parameters(recurse=False))), 1)
self.assertEqual(
names(n.named_parameters(recurse=False)),
['dummy_param'])
self.assertEqual(len(list(s.parameters())), 2)
self.assertEqual(
names(s.named_parameters()),
['0.dummy_param', '0.l1.layer_dummy_param'])
def test_buffers_and_named_buffers(self):
def names(named_buffers):
return [k for k, _ in named_buffers]
l, n, s = self._create_basic_net()
self.assertEqual(len(list(l.buffers())), 1)
self.assertEqual(
names(l.named_buffers()),
['layer_dummy_buf'])
self.assertEqual(len(list(n.buffers())), 2)
self.assertEqual(
names(n.named_buffers()),
['dummy_buf', 'l1.layer_dummy_buf'])
self.assertEqual(len(list(n.buffers(recurse=False))), 1)
self.assertEqual(
names(n.named_buffers(recurse=False)),
['dummy_buf'])
self.assertEqual(len(list(s.buffers())), 2)
self.assertEqual(
names(s.named_buffers()),
['0.dummy_buf', '0.l1.layer_dummy_buf'])
def test_call_supports_python_dict_output(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = nn.Linear(10, 20)
self.register_backward_hook(self.hook)
self.check_backward_hook_flag = False
def hook(self, module, grad_out, grad_in):
self.check_backward_hook_flag = True
def forward(self, inputs):
return {"output": self.l1(inputs).sum()}
net = Net()
model_output = net(torch.randn([5, 10]))
model_output["output"].backward()
self.assertTrue(net.check_backward_hook_flag)
def test_children(self):
l1 = nn.Linear(2, 2)
l2 = nn.Linear(2, 2)
l3 = nn.Linear(2, 2)
l4 = nn.Linear(2, 2)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential(l1, l2, l1, l2, subnet)
self.assertEqual(list(s.children()), [l1, l2, subnet])
def test_train_errors_for_invalid_mode(self):
class SubclassNet(nn.Module):
def __init__(self):
super(SubclassNet, self).__init__()
self.l1 = nn.Linear(2, 2)
def forward(self, inputs):
return self.l1(inputs)
subclass_net = SubclassNet()
sequential_net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
error_modes = ["invalid_str", torch.device('cpu')]
modules_to_check = [subclass_net, sequential_net]
for error_mode, module in itertools.product(error_modes, modules_to_check):
with self.assertRaises(ValueError):
module.train(error_mode)
def test_dir(self):
linear = nn.Linear(2, 2)
linear._test_submodule = nn.Linear(2, 2)
linear._test_parameter = Parameter(torch.empty(2, 2))
linear.register_buffer('_test_buffer', torch.empty(2, 2))
keys = dir(linear)
self.assertIn('_test_submodule', keys)
self.assertIn('_test_parameter', keys)
self.assertIn('_test_buffer', keys)
for key in keys:
self.assertTrue(hasattr(linear, key))
def test_repr(self):
# no extra information or sub-modules
empty_sequential = nn.Sequential()
expected_repr_empty = 'Sequential()'
self.assertEqual(repr(empty_sequential), expected_repr_empty)
# one liner extra information
linear = nn.Linear(1, 1)
expected_repr_linear = 'Linear(in_features=1, out_features=1, bias=True)'
self.assertEqual(repr(linear), expected_repr_linear)
# sub-modules repr
sequential = nn.Sequential(linear)
expected_repr_sequential = 'Sequential(\n' \
' (0): Linear(in_features=1, out_features=1, bias=True)\n' \
')'
self.assertEqual(repr(sequential), expected_repr_sequential)
def test_dir_digit(self):
model = nn.Sequential(nn.Linear(2, 2))
keys = dir(model)
self.assertNotIn('0', keys)
def test_named_children(self):
l1 = nn.Linear(2, 2)
l2 = nn.Linear(2, 2)
l3 = nn.Linear(2, 2)
l4 = nn.Linear(2, 2)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential()
with self.assertRaises(KeyError):
s.add_module('', l1)
with self.assertRaises(KeyError):
s.add_module('name.with.dot', l1)
s.add_module('layer1', l1)
s.add_module('layer2', l2)
s.add_module('layer3', l1)
s.add_module('layer4', l2)
s.add_module('subnet', subnet)
self.assertEqual(list(s.named_children()), [('layer1', l1), ('layer2', l2), ('subnet', subnet)])
def test_modules(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = l
self.l2 = l
self.param = torch.empty(3, 5)
l = nn.Linear(10, 20)
n = Net()
s = nn.Sequential(n, n, n, n)
self.assertEqual(list(s.modules()), [s, n, l])
def test_named_modules(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = l
self.l2 = l
self.param = torch.empty(3, 5)
self.block = block
l = nn.Linear(10, 20)
l1 = nn.Linear(10, 20)
l2 = nn.Linear(10, 20)
block = nn.Sequential()
block.add_module('linear1', l1)
block.add_module('linear2', l2)
n = Net()
s = nn.Sequential(n, n)
self.assertEqual(list(s.named_modules()), [('', s), ('0', n), ('0.l1', l),
('0.block', block), ('0.block.linear1', l1),
('0.block.linear2', l2)])
# test the option to not remove duplicate module instances
self.assertEqual(list(s.named_modules(remove_duplicate=False)), [
('', s), ('0', n), ('0.l1', l), ('0.l2', l),
('0.block', block), ('0.block.linear1', l1),
('0.block.linear2', l2),
('1', n), ('1.l1', l), ('1.l2', l),
('1.block', block), ('1.block.linear1', l1),
('1.block.linear2', l2)])
def test_register_buffer_raises_error_if_name_is_not_string(self):
m = nn.Module()
expected_error = 'buffer name should be a string. Got '
with self.assertRaisesRegex(TypeError, expected_error + 'int'):
m.register_buffer(1, torch.rand(5))
with self.assertRaisesRegex(TypeError, expected_error + 'NoneType'):
m.register_buffer(None, torch.rand(5))
def test_register_buffer_raises_error_if_attr_exists(self):
m = nn.Module()
m.attribute_name = 5
with self.assertRaises(KeyError):
m.register_buffer('attribute_name', torch.rand(5))
del m.attribute_name
m.register_parameter('attribute_name', nn.Parameter())
with self.assertRaises(KeyError):
m.register_buffer('attribute_name', torch.rand(5))
del m.attribute_name
m.add_module('attribute_name', nn.Module())
with self.assertRaises(KeyError):
m.register_buffer('attribute_name', torch.rand(5))
def test_register_buffer_raises_error_if_not_tensor(self):
m = nn.Module()
with self.assertRaises(TypeError):
m.register_buffer('attribute_name', 5)
def test_register_buffer_allows_overwriting_with_same_name(self):
m = nn.Module()
buffer1 = torch.rand(5)
buffer2 = buffer1 + 5
buffer3 = None
m.register_buffer('buffer_name', buffer1)
self.assertEqual(m.buffer_name, buffer1)
m.register_buffer('buffer_name', buffer2)
self.assertEqual(m.buffer_name, buffer2)
m.register_buffer('buffer_name', buffer3)
self.assertEqual(m.buffer_name, buffer3)
def test_get_buffer(self):
m = nn.Module()
buffer1 = torch.randn(2, 3)
buffer2 = torch.randn(4, 5)
m.register_buffer('foo', buffer1)
m.register_buffer('bar', buffer2)
self.assertEqual(buffer1, m.get_buffer('foo'))
self.assertEqual(buffer2, m.get_buffer('bar'))
def test_get_buffer_from_submodules(self):
class MyModule(nn.Module):
def __init__(self, foo, bar):
super().__init__()
self.sub = Sub(foo, bar)
class Sub(nn.Module):
def __init__(self, foo, bar):
super().__init__()
self.register_buffer('foo', foo)
self.subsub = SubSub(bar)
class SubSub(nn.Module):
def __init__(self, bar):
super().__init__()
self.register_buffer('bar', bar)
foo = torch.randn(2, 3)
bar = torch.randn(4, 5)
m = MyModule(foo, bar)
self.assertEqual(foo, m.get_buffer('sub.foo'))
self.assertEqual(bar, m.get_buffer('sub.subsub.bar'))
def test_buffer_not_persistent(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
self.assertTrue(len(list(m.buffers())) == 1)
self.assertTrue(len(m.state_dict()) == 0)
def test_buffer_not_persistent_del(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
del m.buf
self.assertTrue(len(list(m.buffers())) == 0)
def test_buffer_not_persistent_overwrite(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
m.register_buffer('buf', torch.rand(5))
# can we overwrite a non-persistent buffer with a persistent one?
self.assertTrue(len(list(m.buffers())) == 1)
self.assertTrue(len(m.state_dict()) == 1)
# can we overwrite a persistent buffer with a non-persistent one?
m.register_buffer('buf', torch.rand(5), persistent=False)
self.assertTrue(len(list(m.buffers())) == 1)
self.assertTrue(len(m.state_dict()) == 0)
def test_buffer_not_persistent_assign(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
# Assigning None removes the buffer but if we then assign a new Tensor
# to the same property, it should still be marked as a buffer.
m.buf = None
self.assertTrue(len(list(m.buffers())) == 0)
self.assertTrue(len(m.state_dict()) == 0)
m.buf = torch.rand(5)
self.assertTrue(len(list(m.buffers())) == 1)
self.assertTrue(len(m.state_dict()) == 0)
# Assigning a Parameter removes the buffer.
m.buf = nn.Parameter(torch.rand(5))
self.assertTrue(len(list(m.buffers())) == 0)
self.assertTrue(len(m.state_dict()) == 1)
@unittest.skipIf(not TEST_NUMPY, "numpy not found")
def test_load_state_dict_invalid(self):
m = torch.nn.Linear(2, 2, bias=False)
state_dict = {'weight': np.random.randn(2, 2)}
with self.assertRaisesRegex(RuntimeError,
"expected torch.Tensor or Tensor-like object from checkpoint but received"):
m.load_state_dict(state_dict)
state_dict = {'weight': ((1., 1.), (2., 2.))}
with self.assertRaisesRegex(RuntimeError,
"expected torch.Tensor or Tensor-like object from checkpoint but received"):
m.load_state_dict(state_dict)
def test_load_state_dict_type(self):
m = nn.Module()
with self.assertRaisesRegex(TypeError,
"Expected state_dict to be dict-like, got"):
m.load_state_dict("")
with self.assertRaisesRegex(TypeError,
"Expected state_dict to be dict-like, got"):
m.load_state_dict(2)
def test_buffer_not_persistent_load(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
m.load_state_dict({})
def test_register_parameter_raises_error_if_name_is_not_string(self):
m = nn.Module()
expected_error = 'parameter name should be a string. Got '
with self.assertRaisesRegex(TypeError, expected_error + 'int'):
m.register_parameter(1, nn.Parameter())
with self.assertRaisesRegex(TypeError, expected_error + 'NoneType'):
m.register_parameter(None, nn.Parameter())
def test_register_parameter_raises_error_if_attr_exists(self):
m = nn.Module()
m.attribute_name = 5
with self.assertRaises(KeyError):
m.register_parameter('attribute_name', nn.Parameter())
del m.attribute_name
m.register_buffer('attribute_name', torch.rand(5))
with self.assertRaises(KeyError):
m.register_parameter('attribute_name', nn.Parameter())
del m.attribute_name
m.add_module('attribute_name', nn.Module())
with self.assertRaises(KeyError):
m.register_parameter('attribute_name', nn.Parameter())
def test_register_parameter_allows_overwriting_with_same_name(self):
m = nn.Module()
param1 = nn.Parameter(torch.rand(5))
param2 = nn.Parameter(param1.data + 5)
param3 = None
m.register_parameter('param_name', param1)
self.assertEqual(m.param_name, param1)
m.register_parameter('param_name', param2)
self.assertEqual(m.param_name, param2)
m.register_parameter('param_name', param3)
self.assertEqual(m.param_name, param3)
def test_add_module_raises_error_if_attr_exists(self):
methods_to_test = ['add_module', 'register_module']
for fn in methods_to_test:
m = nn.Module()
m.attribute_name = 5
with self.assertRaises(KeyError):
getattr(m, fn)('attribute_name', nn.Module())
del m.attribute_name
m.register_buffer('attribute_name', torch.rand(5))
with self.assertRaises(KeyError):
getattr(m, fn)('attribute_name', nn.Module())
del m.attribute_name
m.register_parameter('attribute_name', nn.Parameter())
with self.assertRaises(KeyError):
getattr(m, fn)('attribute_name', nn.Module())
@unittest.expectedFailure
def test_getattr_with_property(self):
class Model(nn.Module):
@property
def some_property(self):
return self.something_that_doesnt_exist
model = Model()
with self.assertRaisesRegex(
AttributeError,
r"'Model' object has no attribute 'something_that_doesnt_exist'"):
model.some_property
def test_Sequential_getitem(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3, l4)
self.assertIs(n[0], l1)
self.assertIs(n[1], l2)
self.assertIs(n[2], l3)
self.assertIs(n[3], l4)
self.assertIs(n[torch.tensor(3, dtype=torch.int64)], l4)
self.assertEqual(n[1:], nn.Sequential(l2, l3, l4))
self.assertEqual(n[3:], nn.Sequential(l4))
self.assertEqual(n[:-1], nn.Sequential(l1, l2, l3))
self.assertEqual(n[:-3], nn.Sequential(l1))
self.assertEqual(n[::-1], nn.Sequential(l4, l3, l2, l1))
def test_Sequential_setitem(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3)
n[0] = l4
n[-1] = l4
n[torch.tensor(1, dtype=torch.int16)] = l1
self.assertIs(n[0], l4)
self.assertIs(n[1], l1)
self.assertIs(n[2], l4)
def test_Sequential_setitem_named(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(OrderedDict([
('linear1', l1),
('linear2', l2),
('linear3', l3),
]))
n[0] = l4
n[-1] = l4
self.assertEqual(n.linear1, l4)
self.assertEqual(n.linear3, l4)
def test_Sequential_delitem(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3, l4)
del n[-1]
self.assertEqual(n, nn.Sequential(l1, l2, l3))
del n[1::2]
self.assertEqual(n, nn.Sequential(l1, l3))
def test_Sequential_add(self):
l1 = nn.Linear(1, 2)
l2 = nn.Linear(2, 3)
l3 = nn.Linear(3, 4)
l4 = nn.Linear(4, 5)
n = nn.Sequential(l1, l2)
other = nn.Sequential(l3, l4)
self.assertEqual(n + other, nn.Sequential(l1, l2, l3, l4))
def test_Sequential_iadd(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3)
n2 = nn.Sequential(l4)
n += n2
n2 += n
self.assertEqual(n, nn.Sequential(l1, l2, l3, l4))
self.assertEqual(n2, nn.Sequential(l4, l1, l2, l3, l4))
def test_Sequential_mul(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3, l4)
n2 = n * 2
self.assertEqual(n2, nn.Sequential(l1, l2, l3, l4, l1, l2, l3, l4))
def test_Sequential_rmul(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3, l4)
n2 = 2 * n
self.assertEqual(n2, nn.Sequential(l1, l2, l3, l4, l1, l2, l3, l4))
def test_Sequential_imul(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3, l4)
n *= 2
self.assertEqual(n, nn.Sequential(l1, l2, l3, l4, l1, l2, l3, l4))
n *= 2
self.assertEqual(
n,
nn.Sequential(l1, l2, l3, l4, l1, l2, l3, l4, l1, l2, l3, l4, l1, l2, l3, l4)
)
def test_Sequential_append(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3)
n2 = n.append(l4)
self.assertEqual(n, nn.Sequential(l1, l2, l3, l4))
self.assertEqual(n2, nn.Sequential(l1, l2, l3, l4))
self.assertEqual(nn.Sequential(l1).append(l2).append(l4), nn.Sequential(l1, l2, l4))
def test_Sequential_pop(self):
l1 = nn.Linear(1, 2)
l2 = nn.Linear(2, 3)
l3 = nn.Linear(3, 4)
l4 = nn.Linear(4, 5)
n1 = nn.Sequential(l1, l2, l3, l4)
self.assertEqual(l4, n1.pop(3))
n2 = nn.Sequential(l1, l2, l3)
self.assertEqual(n1, n2)
# check order of the index
for k, mod in zip(range(len(n1)), n1):
self.assertIs(n1[k], mod)
def test_Sequential_insert(self):
l1 = nn.Linear(1, 2)
l2 = nn.Linear(2, 3)
l3 = nn.Linear(3, 4)
n1 = nn.Sequential(l1, l2, l3)
module_1 = nn.Linear(4, 5)
n2 = nn.Sequential(l1, module_1, l2, l3)
self.assertEqual(n1.insert(1, module_1), n2)
# test for negative support
n3 = nn.Sequential(l1, l2, l3)
module_2 = nn.Linear(5, 6)
n4 = nn.Sequential(l1, module_2, l2, l3)
self.assertEqual(n3.insert(-2, module_2), n4)
def test_Sequential_insert_fail_case(self):
l1 = nn.Linear(1, 2)
l2 = nn.Linear(2, 3)
l3 = nn.Linear(3, 4)
module = nn.Linear(5, 6)
# test for error case
n1 = nn.Sequential(l1, l2, l3)
with self.assertRaises(IndexError):
n1.insert(-5, module)
with self.assertRaises(AssertionError):
n1.insert(1, [nn.Linear(6, 7)])
def test_Sequential_extend(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n1 = nn.Sequential(l1, l2)
n2 = nn.Sequential(l3, l4)
n3 = nn.Sequential(l1, l2)
for l in n2:
n1.append(l)
n3.extend(n2)
self.assertEqual(n3, n1)
def test_ModuleList(self):
modules = [nn.ReLU(), nn.Linear(5, 5)]
module_list = nn.ModuleList(modules)
def check():
self.assertEqual(len(module_list), len(modules))
for m1, m2 in zip(modules, module_list):
self.assertIs(m1, m2)
for m1, m2 in zip(modules, module_list.children()):
self.assertIs(m1, m2)
for i in range(len(modules)):
self.assertIs(module_list[i], modules[i])
check()
modules += [nn.Conv2d(3, 4, 3)]
module_list += [modules[-1]]
check()
modules = modules + [nn.Conv2d(3, 4, 3, bias=False), nn.GELU()]
module_list = module_list + nn.ModuleList(modules[-2:])
check()
modules.insert(1, nn.Linear(3, 2))
module_list.insert(1, modules[1])
check()
modules.append(nn.Tanh())
module_list.append(modules[-1])
check()
next_modules = [nn.Linear(5, 5), nn.Sigmoid()]
modules.extend(next_modules)
module_list.extend(next_modules)
check()
modules[2] = nn.Conv2d(5, 3, 2)
module_list[2] = modules[2]
check()
modules[-1] = nn.Conv2d(5, 2, 1)
module_list[-1] = modules[-1]
check()
idx = torch.tensor(2, dtype=torch.int32)
modules[2] = nn.Conv2d(5, 3, 2)
module_list[idx] = modules[2]
self.assertIs(module_list[idx], modules[2])
check()
self.assertEqual(module_list[1:], nn.ModuleList(modules[1:]))
self.assertEqual(module_list[3:], nn.ModuleList(modules[3:]))
self.assertEqual(module_list[:-1], nn.ModuleList(modules[:-1]))
self.assertEqual(module_list[:-3], nn.ModuleList(modules[:-3]))
self.assertEqual(module_list[::-1], nn.ModuleList(modules[::-1]))
del module_list[-1]
self.assertEqual(module_list, nn.ModuleList(modules[:-1]))
del module_list[1::2]
self.assertEqual(module_list, nn.ModuleList(modules[:-1][0::2]))
with self.assertRaises(TypeError):
module_list += nn.ReLU()
with self.assertRaises(TypeError):
module_list.extend(nn.ReLU())
l1 = nn.Linear(1, 2)
l2 = nn.Linear(2, 3)
l3 = nn.Linear(3, 2)
l4 = nn.Linear(2, 3)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential(
OrderedDict([
("layer1", l1),
("layer2", l2),
("layer3", l3),
("layer4", l4),
("subnet_layer", subnet)
])
)
modules = list(s.modules())
module_list = nn.ModuleList()
module_list.extend(s.modules())
check()
modules = [nn.ReLU(), nn.Linear(5, 5), nn.Conv2d(3, 4, 3)]
module_list = nn.ModuleList(modules)
self.assertEqual(modules.pop(1), module_list.pop(1))
self.assertEqual(modules, module_list)
# check order of the index
for k, mod in zip(range(len(module_list)), module_list):
self.assertIs(module_list[k], mod)
# verify the right exception is thrown when trying to "forward" through a ModuleList
self.assertRaises(NotImplementedError, module_list)
self.assertRaises(NotImplementedError, module_list, torch.rand(1, 3))
def test_ModuleDict(self):
modules = OrderedDict([
('act', nn.ReLU()),
('conv', nn.Conv2d(10, 10, 5)),
('fc', nn.Linear(5, 5)),
])
module_dict = nn.ModuleDict(modules)
def check():
self.assertEqual(len(module_dict), len(modules))
for k1, m2 in zip(modules, module_dict.children()):
self.assertIs(modules[k1], m2)
for k1, k2 in zip(modules, module_dict):
self.assertIs(modules[k1], module_dict[k2])
for k in module_dict:
self.assertIs(module_dict[k], modules[k])
for k in module_dict.keys():
self.assertIs(module_dict[k], modules[k])
for k, v in module_dict.items():
self.assertIs(modules[k], v)
for k1, m2 in zip(modules, module_dict.values()):
self.assertIs(modules[k1], m2)
for k in modules.keys():
self.assertTrue(k in module_dict)
check()
modules['conv'] = nn.Conv2d(3, 4, 3)
module_dict['conv'] = modules['conv']
check()
next_modules = [
('fc2', nn.Linear(5, 5)),
('act', nn.Sigmoid()),
]
modules.update(next_modules)
module_dict.update(next_modules)
check()
next_modules = OrderedDict([
('fc3', nn.Linear(5, 5)),
('act2', nn.Sigmoid()),
])
modules.update(next_modules)
module_dict.update(next_modules)
check()
next_modules = {
'fc4': nn.Linear(5, 5),
'act3': nn.Sigmoid()
}
modules.update(next_modules.items())
module_dict.update(next_modules)
check()
next_modules = nn.ModuleDict([
('fc5', nn.Linear(5, 5)),
('act4', nn.Sigmoid()),
])
modules.update(next_modules)
module_dict.update(next_modules)
check()
del module_dict['fc']
del modules['fc']
check()
with self.assertRaises(TypeError):
module_dict.update(nn.ReLU())
with self.assertRaises(TypeError):
module_dict.update([nn.ReLU()])
with self.assertRaises(ValueError):
module_dict.update([[nn.ReLU()]])
with self.assertRaises(TypeError):
module_dict[1] = nn.ReLU()
s = nn.Sequential(modules)
module_dict = nn.ModuleDict(s.named_children())
check()
c = module_dict.pop('conv')
self.assertIs(c, modules['conv'])
modules.pop('conv')
check()
module_dict.clear()
self.assertEqual(len(module_dict), 0)
modules.clear()
check()
# verify the right exception is thrown when trying to "forward" through a ModuleDict
self.assertRaises(NotImplementedError, module_dict)
self.assertRaises(NotImplementedError, module_dict, torch.rand(1, 3))
@skipIfTorchDynamo("TorchDynamo fails here for unknown reasons")
def test_ParameterList(self):
def make_param():
return Parameter(torch.randn(2, 2))
parameters = [make_param(), make_param()]
param_list = nn.ParameterList(parameters)
def check():
self.assertEqual(len(parameters), len(param_list))
for p1, p2 in zip(parameters, param_list):
self.assertIs(p1, p2)
for p1, p2 in zip(filter(lambda x: isinstance(x, Parameter), parameters), param_list.parameters()):
self.assertIs(p1, p2)
for i in range(len(parameters)):
self.assertIs(parameters[i], param_list[i])
check()
parameters += [make_param()]
param_list += [parameters[-1]]
check()
parameters.append(make_param())
param_list.append(parameters[-1])
check()
next_params = [make_param(), make_param()]
parameters.extend(next_params)
param_list.extend(next_params)
check()
parameters[2] = make_param()
param_list[2] = parameters[2]
check()
parameters[-1] = make_param()
param_list[-1] = parameters[-1]
check()
idx = torch.tensor(2, dtype=torch.int32)
parameters[2] = make_param()
param_list[idx] = parameters[2]
self.assertIs(param_list[idx], parameters[2])
check()
self.assertEqual(param_list[1:], nn.ParameterList(parameters[1:]))
self.assertEqual(param_list[3:], nn.ParameterList(parameters[3:]))
self.assertEqual(param_list[:-1], nn.ParameterList(parameters[:-1]))
self.assertEqual(param_list[:-3], nn.ParameterList(parameters[:-3]))
self.assertEqual(param_list[::-1], nn.ParameterList(parameters[::-1]))
with self.assertRaises(TypeError):
param_list += make_param()
with self.assertRaises(TypeError):
param_list.extend(make_param())
l1 = nn.Linear(1, 2)
l2 = nn.Linear(2, 3)
l3 = nn.Linear(3, 2)
l4 = nn.Linear(2, 3)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential(
OrderedDict([
("layer1", l1),
("layer2", l2),
("layer3", l3),
("layer4", l4),
("subnet_layer", subnet)
])
)
parameters = list(s.parameters())
param_list = nn.ParameterList()
param_list.extend(s.parameters())
check()
param_list.append(torch.rand(2, 2))
self.assertIsInstance(param_list[-1], Parameter)
parameters.append(param_list[-1])
param_list.extend([torch.rand(2, 2), "foo"])
self.assertIsInstance(param_list[-2], Parameter)
self.assertIsInstance(param_list[-1], str)
parameters.extend(param_list[-2:])
param_list += ["bar", torch.rand(2, 2)]
self.assertIsInstance(param_list[-2], str)
self.assertIsInstance(param_list[-1], Parameter)
parameters += param_list[-2:]
check()
def test_ParameterList_meta(self):
p = torch.nn.Parameter(torch.empty(1, device='meta'))
self.assertExpectedInline(str(p), """\
Parameter containing:
tensor(..., device='meta', size=(1,), requires_grad=True)""")
pl = torch.nn.ParameterList([p])
self.assertExpectedInline(str(pl), """ParameterList( (0): Parameter containing: [torch.float64 of size 1])""")
def test_ParameterList_replication(self):
# The actual replication code from DP cannot be used on CPU so doing it manually here
def make_param():
return Parameter(torch.randn(2, 2))
parameters = [make_param(), make_param()]
param_list = nn.ParameterList(parameters)
new_param_list = param_list._replicate_for_data_parallel()
for n, p in param_list.named_parameters():
# Do a view here so that we can check the base later
setattr(new_param_list, n, p.view_as(p))
for p, p2 in zip(param_list, new_param_list):
self.assertEqual(p, p2)
self.assertIsNotNone(p2.grad_fn)
self.assertIs(p2._base, p)
@skipIfTorchDynamo("TorchDynamo fails here for unknown reasons")
def test_ParameterDict(self):
parameters = OrderedDict([
('p1', Parameter(torch.randn(10, 10))),
('p2', Parameter(torch.randn(10, 10))),
('p3', Parameter(torch.randn(10, 10))),
])
parameter_dict = nn.ParameterDict(parameters)
def check():
self.assertEqual(len(parameter_dict), len(parameters))
for i, (k1, (k2, m2)) in enumerate(zip(parameters, parameter_dict.named_parameters())):
self.assertEqual(k1, k2)
self.assertIs(parameters[k1], m2)
for k1, k2 in zip(parameters, parameter_dict):
self.assertIs(parameters[k1], parameter_dict[k2])
for k in parameter_dict:
self.assertIs(parameter_dict[k], parameters[k])
for k in parameter_dict.keys():
self.assertIs(parameter_dict[k], parameters[k])
for k, v in parameter_dict.items():
self.assertIs(v, parameters[k])
for k1, m2 in zip(parameters, parameter_dict.values()):
self.assertIs(parameters[k1], m2)
for k in parameters.keys():
self.assertTrue(k in parameter_dict)
check()
parameters['p4'] = Parameter(torch.randn(10, 10))
parameter_dict['p4'] = parameters['p4']
check()
next_parameters = [
('p5', Parameter(torch.randn(10, 10))),
('p2', Parameter(torch.randn(10, 10))),
]
parameters.update(next_parameters)
parameter_dict.update(next_parameters)
check()
next_parameters = OrderedDict([
('p6', Parameter(torch.randn(10, 10))),
('p5', Parameter(torch.randn(10, 10))),
])
parameters.update(next_parameters)
parameter_dict.update(next_parameters)
check()
next_parameters = {
'p8': Parameter(torch.randn(10, 10)),
'p7': Parameter(torch.randn(10, 10))
}
parameters.update(sorted(next_parameters.items()))
parameter_dict.update(next_parameters)
check()
next_parameters = nn.ParameterDict([
('p10', Parameter(torch.randn(10, 10))),
('p9', Parameter(torch.randn(10, 10))),
])
parameters.update(next_parameters)
parameter_dict.update(next_parameters)
check()
del parameter_dict['p3']
del parameters['p3']
check()
with self.assertRaises(TypeError):
parameter_dict.update(1)
with self.assertRaises(TypeError):
parameter_dict.update([1])
with self.assertRaises(ValueError):
parameter_dict.update(Parameter(torch.randn(10, 10)))
p_pop = parameter_dict.pop('p4')
self.assertIs(p_pop, parameters['p4'])
parameters.pop('p4')
check()
# Check reverse works
forward = list(iter(parameter_dict))
backward = list(reversed(parameter_dict))
self.assertEqual(len(forward), len(backward))
n = len(forward)
for i in range(n):
self.assertIs(forward[i], backward[n - i - 1])
check()
# Check copy works
copy = parameter_dict.copy()
# Check all keys are present and have shallow copied values
for key in parameter_dict:
self.assertTrue(key in copy)
self.assertEqual(parameter_dict[key], copy[key])
self.assertIs(parameter_dict[key], copy[key])
check()
parameter_dict["p20"] = Parameter(torch.randn(10, 10))
copy["p21"] = Parameter(torch.randn(9, 10))
self.assertTrue("p20" in parameter_dict)
self.assertFalse("p20" in copy)
self.assertFalse("p21" in parameter_dict)
self.assertTrue("p21" in copy)
parameter_dict.pop("p20")
check()
p = Parameter(torch.randn(10, 10))
parameter_dict['p12'] = p
p_popitem = parameter_dict.popitem()
self.assertEqual(p_popitem[0], 'p12')
self.assertIs(p_popitem[1], p)
check()
# Unit test for set_default
# 1. Ensure parameter is correctly inserted when
# the key is not present in `ParameterDict`
assert 'p11' not in parameter_dict
assert 'p11' not in parameters
parameters['p11'] = Parameter(torch.randn(10, 10))
p_setdefault = parameter_dict.setdefault('p11', parameters['p11'])
self.assertIs(p_setdefault, parameters['p11'])
self.assertIs(p_setdefault, parameter_dict['p11'])
check()
# 2. Ensure parameter is NOT inserted when the
# key is already present in `ParameterDict`
p = Parameter(torch.randn(10, 10))
self.assertFalse(parameter_dict.setdefault('p11', p) is p)
check()
# 3. Ensure `None` is inserted when the key is not
# present in `Parameter` and parameter is not specified
self.assertIs(parameter_dict.setdefault('p26'), None)
del parameter_dict['p26']
check()
parameters2 = OrderedDict([
('p13', Parameter(torch.randn(10, 10))),
('p2', Parameter(torch.randn(10, 10))),
('p3', Parameter(torch.randn(10, 10))),
])
parameter_dict2 = nn.ParameterDict(parameters2)
parameters.update(parameters2)
parameter_dict |= parameter_dict2
check()
parameters2 = OrderedDict()
parameter_dict2 = nn.ParameterDict(parameters2)
parameters.update(parameters2)
parameter_dict |= parameter_dict2
check()
parameters2 = OrderedDict([
('p14', Parameter(torch.randn(10, 10))),
('p15', Parameter(torch.randn(10, 10))),
('p13', Parameter(torch.randn(10, 10))),
])
parameter_dict2 = nn.ParameterDict(parameters2)
parameters.update(parameters2)
parameter_dict |= parameter_dict2
check()
# Check __or__ and __ror__ works
parameters2 = OrderedDict([
('p20', Parameter(torch.randn(10, 10))),
('p21', Parameter(torch.randn(10, 10))),
('p22', Parameter(torch.randn(10, 10))),
])
parameter_dict2 = nn.ParameterDict(parameters2)
parameters.update(parameters2)
parameter_dict = parameter_dict | parameter_dict2
check()
parameters2 = OrderedDict([
('p23', Parameter(torch.randn(10, 10))),
('p24', Parameter(torch.randn(10, 10))),
('p25', Parameter(torch.randn(10, 10))),
])
parameter_dict2 = nn.ParameterDict(parameters2)
parameters2.update(parameters)
parameters = parameters2
parameter_dict = parameter_dict2 | parameter_dict
check()
parameters['p17'] = Parameter(torch.randn(10, 10))
parameter_dict['p17'] = parameters['p17']
self.assertIs(parameters['p17'], parameter_dict.get('p17'))
temp_param = Parameter(torch.randn(10, 10))
self.assertIs(parameters['p17'], parameter_dict.get('p17', temp_param))
self.assertIs(None, parameter_dict.get('p18'))
self.assertIs(temp_param, parameter_dict.get('p18', temp_param))
check()
parameter_dict.clear()
self.assertEqual(len(parameter_dict), 0)
parameters.clear()
check()
parameter_dict2 = parameter_dict.fromkeys(['p19', 'p20'])
self.assertEqual({'p19': None, 'p20': None}, parameter_dict2)
check()
parameter_dict2 = parameter_dict.fromkeys(['p19', 'p20'], temp_param)
self.assertEqual({'p19': temp_param, 'p20': temp_param}, parameter_dict2)
check()
parameter_dict['p21'] = torch.rand(2, 2)
self.assertIsInstance(parameter_dict['p21'], Parameter)
parameters['p21'] = parameter_dict['p21']
parameter_dict.update({'p22': torch.rand(2, 2), 'foo': 'bar'})
self.assertIsInstance(parameter_dict['p22'], Parameter)
self.assertIsInstance(parameter_dict['foo'], str)
parameters['p22'] = parameter_dict['p22']
parameters['foo'] = parameter_dict['foo']
def test_ParameterDict_replication(self):
# The actual replication code from DP cannot be used on CPU so doing it manually here
def make_param():
return Parameter(torch.randn(2, 2))
parameters = {"foo": make_param(), "bar": make_param()}
param_dict = nn.ParameterDict(parameters)
new_param_dict = param_dict._replicate_for_data_parallel()
for n, p in param_dict.named_parameters():
# Do a view here so that we can check the base later
setattr(new_param_dict, n, p.view_as(p))
for (k, p), (k2, p2) in zip(param_dict.items(), new_param_dict.items()):
self.assertEqual(k, k2)
self.assertEqual(p, p2)
self.assertIsNotNone(p2.grad_fn)
self.assertIs(p2._base, p)
self.assertEqual(param_dict["foo"], new_param_dict["foo"])
def test_add_module(self):
methods_to_test = ['add_module', 'register_module']
for fn in methods_to_test:
l = nn.Linear(10, 20)
net = nn.Module()
net.l = l
net.l2 = l
getattr(net, fn)('empty', None)
self.assertEqual(net.l, l)
self.assertEqual(net.l2, l)
self.assertEqual(net.empty, None)
getattr(net, fn)('l3', l)
self.assertEqual(net.l3, l)
l3 = nn.Linear(20, 10)
getattr(net, fn)('l', l3)
self.assertEqual(net.l, l3)
self.assertRaises(TypeError, lambda: getattr(net, fn)('x', 'non-module'))
self.assertRaisesRegex(TypeError, 'module name should be a string. Got int',
lambda: getattr(net, fn)(1, l))
self.assertRaisesRegex(TypeError, 'module name should be a string. Got NoneType',
lambda: getattr(net, fn)(None, l))
def test_module_to_argparse(self):
net = nn.Sequential(nn.Linear(3, 3))
cpu = torch.device('cpu')
with self.assertRaises(TypeError):
net.to(cpu, True)
with self.assertRaises(TypeError):
net.to(torch.long)
with self.assertRaises(TypeError):
net.to(None, True)
with self.assertRaises(TypeError):
net.to(cpu, torch.long, True)
with self.assertRaises(TypeError):
net.to(cpu, dtype=torch.long, non_blocking=True)
with self.assertRaises(TypeError):
net.to([])
with self.assertRaises(TypeError):
net.to({}, non_blocking=True)
with self.assertRaises(TypeError):
net.to(torch.tensor(3, dtype=torch.long), non_blocking=True)
with self.assertRaises(TypeError):
net.to(cpu, torch.tensor(3, dtype=torch.long), non_blocking=True)
def test_RNN_nonlinearity(self):
rnn = torch.nn.RNN(1, 10)
self.assertEqual(rnn.nonlinearity, 'tanh')
rnn = torch.nn.RNN(1, 10, nonlinearity='relu')
self.assertEqual(rnn.nonlinearity, 'relu')
with self.assertRaisesRegex(ValueError, 'Unknown nonlinearity'):
rnn = torch.nn.RNN(1, 10, nonlinearity='garbage')
def test_module_apply_inplace_op(self):
def add_one_inplace(t):
return t.add_(1.0)
# Test that applying an in-place operation to a module would bump
# the module's parameters' version counter.
m = nn.Linear(20, 10)
pvm = m.weight.mul(m.weight)
m_weight_version_saved = m.weight._version
m = m._apply(add_one_inplace)
self.assertGreater(m.weight._version, m_weight_version_saved)
with self.assertRaisesRegex(RuntimeError, "modified by an inplace operation"):
pvm.backward(torch.randn(10, 20))
# Test that applying an in-place operation to a module would bump
# the module's parameters' gradients' version counter.
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20).requires_grad_()
pgm = m.weight.grad.mul(m.weight.grad)
m_weight_grad_version_saved = m.weight.grad._version
m = m._apply(add_one_inplace)
self.assertGreater(m.weight.grad._version, m_weight_grad_version_saved)
with self.assertRaisesRegex(RuntimeError, "modified by an inplace operation"):
pgm.backward(torch.randn(10, 20))
def test_overwrite_module_params_on_conversion(self):
# Test that if the conversion function passed to `module._apply()`
# changes the TensorImpl type of `module`'s parameters, the `module`'s
# parameters are always overwritten, regardless of the value of
# `torch.__future__.get_overwrite_module_params_on_conversion()`.
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20)
weight_ref = m.weight
weight_grad_ref = m.weight.grad
m = m._apply(lambda t: torch.sparse_coo_tensor(torch.zeros([2, 1]), torch.ones([1]), torch.Size([10, 20])))
self.assertNotEqual(weight_ref.layout, m.weight.layout)
self.assertNotEqual(weight_grad_ref.layout, m.weight.grad.layout)
# Test that under the current default settings
# (`torch.__future__.get_overwrite_module_params_on_conversion() == False`),
# a view to a module's parameters is not pointing to the same storage as
# its base variable after converting the module to a different dtype.
m = nn.Linear(20, 10).float()
mw = m.weight[:]
m.double()
with torch.no_grad():
mw[0][0] = 5
self.assertTrue(mw[0][0].dtype == torch.float)
self.assertTrue(mw._base[0][0].dtype == torch.double)
try:
torch.__future__.set_overwrite_module_params_on_conversion(True)
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# a view to a module's parameters is still pointing to the same storage as
# its base variable after converting the module to a different dtype.
m = nn.Linear(20, 10).float()
mw = m.weight[:]
m.double()
with torch.no_grad():
mw[0][0] = 5
self.assertTrue(mw[0][0] == mw._base[0][0])
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# `float_module.double()` doesn't preserve previous references to
# `float_module`'s parameters or gradients.
m = nn.Linear(20, 10).float()
m.weight.grad = torch.randn(10, 20).float()
weight_ref = m.weight
weight_grad_ref = m.weight.grad
m.double()
self.assertNotEqual(weight_ref.dtype, m.weight.dtype)
self.assertNotEqual(weight_grad_ref.dtype, m.weight.grad.dtype)
def add_one_inplace(t):
return t.add_(1.0)
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# applying an in-place operation to a module would bump the module's
# original parameters' version counter.
m = nn.Linear(20, 10)
pvm = m.weight.mul(m.weight)
weight_ref = m.weight
m_weight_version_saved = weight_ref._version
m = m._apply(add_one_inplace)
# Test that the in-place operation bumps the original parameter's version counter
self.assertGreater(weight_ref._version, m_weight_version_saved)
with self.assertRaisesRegex(RuntimeError, "modified by an inplace operation"):
pvm.backward(torch.randn(10, 20))
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# applying an in-place operation to a module would bump the module's
# original parameters' gradients' version counter.
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20).requires_grad_()
pgm = m.weight.grad.mul(m.weight.grad)
weight_grad_ref = m.weight.grad
m_weight_grad_version_saved = weight_grad_ref._version
m = m._apply(add_one_inplace)
self.assertGreater(weight_grad_ref._version, m_weight_grad_version_saved)
with self.assertRaisesRegex(RuntimeError, "modified by an inplace operation"):
pgm.backward(torch.randn(10, 20))
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# applying an out-of-place operation to a module doesn't bump
# the module's original parameters' version counter.
m = nn.Linear(20, 10)
weight_ref = m.weight
m_weight_version_saved = weight_ref._version
m = m._apply(lambda t: torch.randn(t.shape))
self.assertEqual(weight_ref._version, m_weight_version_saved)
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# applying an out-of-place operation to a module doesn't bump
# the module's original parameters' gradients' version counter.
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20).requires_grad_()
weight_grad_ref = m.weight.grad
m_weight_grad_version_saved = weight_grad_ref._version
m = m._apply(lambda t: torch.randn(t.shape))
self.assertEqual(weight_grad_ref._version, m_weight_grad_version_saved)
finally:
torch.__future__.set_overwrite_module_params_on_conversion(False)
def test_type(self):
l = nn.Linear(10, 20)
net = nn.Module()
net.l = l
net.l2 = l
net.add_module('empty', None)
net.register_buffer('indices', torch.LongTensor(1))
net.float()
self.assertIsInstance(l.weight.data, torch.FloatTensor)
self.assertIsInstance(l.bias.data, torch.FloatTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.double()
self.assertIsInstance(l.weight.data, torch.DoubleTensor)
self.assertIsInstance(l.bias.data, torch.DoubleTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.to(torch.half)
self.assertIsInstance(l.weight.data, torch.HalfTensor)
self.assertIsInstance(l.bias.data, torch.HalfTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
if TEST_CUDA:
net.float().cuda()
self.assertIsInstance(l.weight.data, torch.cuda.FloatTensor)
self.assertIsInstance(l.bias.data, torch.cuda.FloatTensor)
self.assertIsInstance(net.indices, torch.cuda.LongTensor)
net.cpu()
self.assertIsInstance(l.weight.data, torch.FloatTensor)
self.assertIsInstance(l.bias.data, torch.FloatTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.to("cuda", torch.double, True)
self.assertIsInstance(l.weight.data, torch.cuda.DoubleTensor)
self.assertIsInstance(l.bias.data, torch.cuda.DoubleTensor)
self.assertIsInstance(net.indices, torch.cuda.LongTensor)
net.to(torch.empty(1, device="cuda:0", dtype=torch.half))
self.assertIsInstance(l.weight.data, torch.cuda.HalfTensor)
self.assertIsInstance(l.bias.data, torch.cuda.HalfTensor)
self.assertIsInstance(net.indices, torch.cuda.LongTensor)
net.to(torch.device("cpu"), non_blocking=True)
self.assertIsInstance(l.weight.data, torch.HalfTensor)
self.assertIsInstance(l.bias.data, torch.HalfTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.to(torch.float)
self.assertIsInstance(l.weight.data, torch.FloatTensor)
self.assertIsInstance(l.bias.data, torch.FloatTensor)
net.to(torch.DoubleTensor(1))
self.assertIsInstance(l.weight.data, torch.DoubleTensor)
self.assertIsInstance(l.bias.data, torch.DoubleTensor)
if TEST_CUDA:
net.to(device='cuda', dtype=torch.float)
self.assertIsInstance(l.weight.data, torch.cuda.FloatTensor)
self.assertIsInstance(l.bias.data, torch.cuda.FloatTensor)
def test_non_leaf_parameters(self):
l1 = nn.Linear(10, 10)
l2 = nn.Linear(10, 10)
def assign_weight():
l2.weight = l1.weight + 2
self.assertRaises(TypeError, assign_weight)
# This should work though
l2.weight = Parameter(torch.randn(10, 10))
def test_clip_grad_norm(self):
l = nn.Linear(10, 10)
max_norm = 2
def compute_norm(norm_type):
norm_type = float(norm_type)
if norm_type != inf:
total_norm = 0
for p in l.parameters():
total_norm += p.grad.data.abs().pow(norm_type).sum()
return pow(total_norm, 1. / norm_type)
else:
return max(p.grad.data.abs().max() for p in l.parameters())
def compare_scaling(grads):
p_scale = [p.grad.data.div(g).view(-1) for p, g in zip(l.parameters(), grads)]
scale = torch.cat(p_scale)
self.assertEqual(scale.std(), 0)
return scale[0]
grads = torch.arange(1., 101).view(10, 10), torch.ones(10).div(1000)
for norm_type in [0.5, 1.5, 2, 4, 'inf']:
for p, g in zip(l.parameters(), grads):
p._grad = g.clone().view_as(p.data)
norm_before = compute_norm(norm_type)
norm = clip_grad_norm_(l.parameters(), max_norm, norm_type=norm_type)
norm_after = compute_norm(norm_type)
self.assertEqual(norm, norm_before)
self.assertEqual(norm_after, max_norm)
self.assertLessEqual(norm_after, norm_before)
compare_scaling(grads)
# Small gradients should be left unchanged
grads = torch.rand(10, 10).div(10000), torch.ones(10).div(500)
for norm_type in [0.5, 1.5, 2, 4, 'inf']:
for p, g in zip(l.parameters(), grads):
p.grad.data.copy_(g)
norm_before = compute_norm(norm_type)
norm = clip_grad_norm_(l.parameters(), max_norm, norm_type=norm_type)
norm_after = compute_norm(norm_type)
self.assertEqual(norm, norm_before)
self.assertEqual(norm_before, norm_after)
self.assertLessEqual(norm_after, max_norm)
scale = compare_scaling(grads)
self.assertEqual(scale, 1)
# Should accept a single Tensor as input
p1, p2 = torch.randn(10, 10), torch.randn(10, 10)
g = torch.arange(1., 101).view(10, 10)
p1._grad = g.clone()
p2._grad = g.clone()
for norm_type in [0.5, 1.5, 2, 4, 'inf']:
clip_grad_norm_(p1, max_norm, norm_type=norm_type)
clip_grad_norm_([p2], max_norm, norm_type=norm_type)
self.assertEqual(p1.grad, p2.grad)
def test_clip_grad_value(self):
l = nn.Linear(10, 10)
clip_value = 2.5
grad_w, grad_b = torch.arange(-50., 50).view(10, 10).div_(5), torch.ones(10).mul_(2)
for grad_list in [[grad_w, grad_b], [grad_w, None]]:
for p, g in zip(l.parameters(), grad_list):
p._grad = g.clone().view_as(p.data) if g is not None else g
clip_grad_value_(l.parameters(), clip_value)
for p in filter(lambda p: p.grad is not None, l.parameters()):
self.assertLessEqual(p.grad.data.max(), clip_value)
self.assertGreaterEqual(p.grad.data.min(), -clip_value)
# Should accept a single Tensor as input
p1, p2 = torch.randn(10, 10), torch.randn(10, 10)
g = torch.arange(-50., 50).view(10, 10).div_(5)
p1._grad = g.clone()
p2._grad = g.clone()
clip_grad_value_(p1, clip_value)
clip_grad_value_([p2], clip_value)
self.assertEqual(p1.grad, p2.grad)
def test_parameters_to_vector(self):
conv1 = nn.Conv2d(3, 10, 5)
fc1 = nn.Linear(10, 20)
model = nn.Sequential(conv1, fc1)
vec = parameters_to_vector(model.parameters())
self.assertEqual(vec.size(0), 980)
def test_vector_to_parameters(self):
conv1 = nn.Conv2d(3, 10, 5)
fc1 = nn.Linear(10, 20)
model = nn.Sequential(conv1, fc1)
vec = torch.arange(0., 980)
vector_to_parameters(vec, model.parameters())
sample = next(model.parameters())[0, 0, 0]
self.assertTrue(torch.equal(sample.data, vec.data[:5]))
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
# torch/nn/utils/parametrize
@skipIfNoLapack
def test_register_and_remove_parametrization(self):
r"""Test that it is possible to add a few parametrizations
on a parameter or a buffer and that removing them restores the initial state
It also tests that backpropagating through them works as expected
"""
# Define a couple matrix parametrizations
class Skew(nn.Module):
def forward(self, X):
X = X.tril(-1)
return X - X.T
class Orthogonal(nn.Module):
def forward(self, X):
# Cayley map
# If X is skew-symmetric it returns an orthogonal matrix
Id = torch.eye(X.size(0), device=X.device)
# We call contiguous because solve returns a tensor with strides that are Fortran-contiguous
# and autograd raises a performance warning.
# This happens when we remove the parametrization with leave_parametrized=True,
# which does a set_ with a non-contiguous tensor while the gradient is contiguous
return torch.linalg.solve(Id + X, Id - X).contiguous()
class Resize(nn.Module):
def forward(self, X):
return X[[0]]
class NoResize(nn.Module):
def forward(self, X):
return X
# Define a couple vector parametrizations
class FirstZero(nn.Module):
def forward(self, x):
return torch.cat([x.new_zeros(1), x[1:]])
class LastZero(nn.Module):
def forward(self, x):
return torch.cat([x[:-1], x.new_zeros(1)])
model = nn.Linear(8, 8)
initial_weight_id = id(model.weight)
initial_bias_id = id(model.bias)
initial_model = deepcopy(model)
# Test unsafe flag
with self.assertRaisesRegex(ValueError, "Registering a parametrization may not change the shape of the tensor"):
parametrize.register_parametrization(model, "weight", Resize()) # default unsafe = False
model(torch.ones(8, 8))
# One parametrization with unsafe=True
parametrize.register_parametrization(model, "weight", Resize(), unsafe=True)
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
A = model.weight
self.assertTrue(A.shape[0] == 1)
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
# Two parametrizations with unsafe=True
parametrize.register_parametrization(model, "weight", Resize(), unsafe=True)
parametrize.register_parametrization(model, "weight", NoResize(), unsafe=False)
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
A = model.weight
self.assertTrue(A.shape[0] == 1)
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
# Test unsafe flag doesn't change expected behavior
parametrize.register_parametrization(model, "weight", Skew(), unsafe=True)
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
# Result should be skew-symmetric
A = model.weight
self.assertEqual(A, -A.T)
# Remove and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
# Test one parametrization
parametrize.register_parametrization(model, "weight", Skew())
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
# Result should be skew-symmetric
A = model.weight
self.assertEqual(A, -A.T)
# Remove and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
# Test two parametrizations at the same time and removing them
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
# Result should be orthogonal
X = model.weight
Id = torch.eye(X.size(0), device=X.device)
self.assertEqual(X.T @ X, Id)
# Structure tests
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertIn("weight", model.parametrizations)
self.assertNotIn("weight", model._parameters)
# Remove
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
# Add everything
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
parametrize.register_parametrization(model, "bias", FirstZero())
parametrize.register_parametrization(model, "bias", LastZero())
# Basic tests
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertTrue(parametrize.is_parametrized(model, "bias"))
self.assertEqual(model.bias[0].item(), 0.)
self.assertEqual(model.bias[-1].item(), 0.)
self.assertEqual(len(list(model.parameters())), 2) # Nothing weird has happpened
# Should not throw
sgd = torch.optim.SGD(model.parameters(), lr=0.01)
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
# Remove first parametrization.
# Check that the model is still parametrized and so is the second parameter
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertTrue(parametrize.is_parametrized(model)) # Still parametrized
self.assertFalse(parametrize.is_parametrized(model, "weight")) # Parametrization removed
self.assertTrue(parametrize.is_parametrized(model, "bias")) # Still parametrized
self.assertEqual(model.bias[0].item(), 0.) # Still parametrized
self.assertEqual(model.bias[-1].item(), 0.) # Still parametrized
self.assertNotEqual(model.weight, initial_model.weight) # Has been updated
self.assertEqual(id(model.weight), initial_weight_id) # Keeps the same id
self.assertEqual(len(list(model.parameters())), 2) # Nothing weird has happened
# Should not throw
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
# Remove the second parametrization.
# Check that the module is not parametrized
parametrize.remove_parametrizations(model, "bias", leave_parametrized=False)
self.assertFalse(parametrize.is_parametrized(model)) # Not parametrized
self.assertNotEqual(model.bias, initial_model.bias) # Has been updated
self.assertNotEqual(model.bias[0].item(), 0.) # Not parametrized
self.assertNotEqual(model.bias[-1].item(), 0.) # Not parametrized
self.assertEqual(id(model.bias), initial_bias_id) # Keeps the same id
self.assertFalse(hasattr(model, "parametrizations")) # Not parametrized the module
self.assertEqual(model.__class__, nn.Linear) # Resores the previous class
self.assertEqual(len(list(model.parameters())), 2) # Nothing weird has happeed
# Should not throw things are updated
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
# Test leave_parametrized=True
for _ in range(2):
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
parametrize.remove_parametrizations(model, "weight", leave_parametrized=True)
# We didn't change the dtype nor had multiple inputs, so the id should be the same
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(id(model.bias), initial_bias_id)
# Should not throw. Things are updated
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
def test_register_and_remove_nested_parametrization(self):
r"""Test that it is possible to nest the parametrizations
meaning that the original param is parametrized again
"""
class Skew(nn.Module):
def forward(self, X):
X = X.tril(-1)
return X - X.T
model = nn.Linear(8, 8)
# Add top level parametrization
parametrize.register_parametrization(model, "weight", Skew())
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
# Result should be skew-symmetric
A = model.weight
self.assertEqual(A, -A.T)
# Add nested parametrization
param_mod = model.parametrizations.weight
self.assertFalse(hasattr(param_mod, "parametrizations"))
self.assertFalse(parametrize.is_parametrized(param_mod))
self.assertFalse(parametrize.is_parametrized(param_mod, "original"))
parametrize.register_parametrization(param_mod, "original", Skew())
self.assertTrue(hasattr(param_mod, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(param_mod))
self.assertTrue(parametrize.is_parametrized(param_mod, "original"))
self.assertNotIn("original", param_mod._parameters)
# Result should be skew-symmetric
A = param_mod.original
self.assertEqual(A, -A.T)
# Remove nested param and check consistency
parametrize.remove_parametrizations(param_mod, "original", leave_parametrized=False)
self.assertFalse(hasattr(param_mod, "parametrizations"))
self.assertEqual(param_mod.__class__, parametrize.ParametrizationList)
# Remove top level and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
def test_register_and_remove_buffer_parametrization(self):
r"""Test that it is possible to add and remove parametrizations on buffers"""
# Define a couple vector parametrizations
class FirstZero(nn.Module):
def forward(self, x):
return torch.cat([x.new_zeros(1), x[1:]])
class LastZero(nn.Module):
def forward(self, x):
return torch.cat([x[:-1], x.new_zeros(1)])
model = nn.Linear(8, 8)
# Instantiate parametrizations on buffers. It should work as expected
delattr(model, "bias")
model.register_buffer("bias", torch.ones(8))
parametrize.register_parametrization(model, "bias", FirstZero())
parametrize.register_parametrization(model, "bias", LastZero())
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "bias"))
self.assertEqual(model.bias[0].item(), 0.)
self.assertEqual(model.bias[-1].item(), 0.)
self.assertTrue((model.bias[1:-1] == torch.ones(6)).all())
self.assertEqual(len(list(model.parameters())), 1)
# Remove parametrizations on buffers. It should work as expected
parametrize.remove_parametrizations(model, "bias", leave_parametrized=True)
self.assertFalse(parametrize.is_parametrized(model))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertEqual(model.bias[0].item(), 0.)
self.assertEqual(model.bias[-1].item(), 0.)
self.assertTrue((model.bias[1:-1] == torch.ones(6)).all())
self.assertEqual(len(list(model.parameters())), 1)
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
@skipIfNoLapack
def test_serialization_parametrization(self):
r"""Test that it is possible to serialize a parametrized model via state_dict"""
# A stateful parametrization
class Orthogonal(nn.Module):
def __init__(self, n):
super().__init__()
self.register_buffer("id", torch.eye(n))
self.register_buffer("B", torch.empty(n, n))
init.orthogonal_(self.B)
def forward(self, X):
A = X.triu(1)
A = A - A.T
return self.B @ torch.linalg.solve(self.id + A, self.id - A)
def get_model():
model = torch.nn.Sequential(
torch.nn.Linear(5, 5),
torch.nn.ReLU(),
torch.nn.Linear(5, 1),
)
parametrize.register_parametrization(model[0], "weight", Orthogonal(5))
return model
model = get_model()
prev_weight = model[0].weight
prev_B = model[0].parametrizations.weight[0].B
new_model = get_model()
with TemporaryFileName() as fname:
torch.save(model.state_dict(), fname)
new_model.load_state_dict(torch.load(fname))
# Integrity tests
self.assertTrue(parametrize.is_parametrized(new_model[0], "weight"))
self.assertEqual(prev_weight, new_model[0].weight)
self.assertEqual(prev_B, new_model[0].parametrizations.weight[0].B)
# Trying to save the whole parametrized model raises
with self.assertRaisesRegex(RuntimeError, "state_dict"):
with TemporaryFileName() as fname:
torch.save(model, fname)
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
@skipIfNoLapack
def test_initialization_parametrization(self):
r"""Test that it is possible to initialize a parametrization when it
implements a `right_inverse` method
"""
class Skew(nn.Module):
def forward(self, X):
A = X.triu(1)
return A - A.T
def is_skew(self, A):
return torch.allclose(A, -A.T, atol=1e-6)
def right_inverse(self, X):
if not self.is_skew(X):
raise ValueError("The matrix is not skew-symmetric.")
return X.triu(1)
# Implements a Cayley map where right_inverse is not quite the inverse of forward
class Orthogonal(nn.Module):
def __init__(self, n):
super().__init__()
self.register_buffer("B", torch.eye(n))
def forward(self, X):
Id = torch.eye(X.size(0))
return self.B @ torch.linalg.solve(Id + X, Id - X)
def is_orthogonal(self, X):
Id = torch.eye(X.size(0))
return torch.allclose(X.T @ X, Id, atol=1e-4)
def right_inverse(self, X):
if not self.is_orthogonal(X):
raise ValueError("The input is not orthogonal.")
# cayley(0) == Id, so B @ cayley(0) == B
self.B = X
return torch.zeros_like(X)
N = 5
model = nn.Linear(N, N)
# Register the skew-symmetric constraint. The result is now skew-symmetric
skew = Skew()
# Make the weight skew-symmetric before registering the parametrization
with torch.no_grad():
model.weight.set_(skew(model.weight))
parametrize.register_parametrization(model, "weight", skew)
X = torch.rand(N, N)
# X is not skew-symmetric, so it throws an error
with self.assertRaises(ValueError):
model.weight = X
# Make X skew-symmetric
X = X - X.T
model.weight = X
self.assertEqual(model.parametrizations.weight.original, X.triu(1))
self.assertEqual(model.weight, X)
# Having several parametrizations registered should work in the same way
parametrize.register_parametrization(model, "weight", Orthogonal(N))
# Register now the Cayley map. The result is now orthogonal
X = torch.rand(N, N)
# X is not orthogonal, so it throws an error
with self.assertRaises(ValueError):
model.weight = X
init.orthogonal_(X)
model.weight = X
self.assertEqual(model.weight, X)
self.assertEqual(model.parametrizations.weight.original, torch.zeros_like(X))
def test_errors_unparametrized_tensor_parametrization(self):
# Test errors when registering a parametrization on an unparametrized tensor
module = nn.Linear(3, 4)
weight_init = module.weight.clone()
class Identity(nn.Module):
def forward(self, x):
return x
# Register a parametrization on a non-existing parameter throws
with self.assertRaisesRegex(ValueError, "does not have a parameter"):
parametrize.register_parametrization(module, "foo", Identity())
self.assertFalse(parametrize.is_parametrized(module))
# Removing parametrizations from an unparametrized tensor throws
with self.assertRaisesRegex(ValueError, "does not have a parametrization"):
parametrize.remove_parametrizations(module, "bias")
self.assertFalse(parametrize.is_parametrized(module))
# A correct parametrization with several outputs
class Sum(nn.Module):
def forward(self, x, y):
return x + y
def right_inverse(self, z):
return z, torch.zeros_like(z)
parametrize.register_parametrization(module, "weight", Sum())
# Cannot remove a parametrization with several outputs with `leave_parametrized=False`
with self.assertRaisesRegex(ValueError, "leave_parametrized=False"):
parametrize.remove_parametrizations(module, "weight", leave_parametrized=False)
parametrize.remove_parametrizations(module, "weight", leave_parametrized=True)
# A parametrization with an incorrect number of outputs
class WrongNumberParams(nn.Module):
def forward(self, x, y, z):
return x + y + z
def right_inverse(self, w):
return w, torch.zeros_like(w)
# Makes param(*param.right_inverse(X)) fail
with self.assertRaisesRegex(TypeError, "positional argument"):
parametrize.register_parametrization(module, "weight", WrongNumberParams())
self.assertFalse(parametrize.is_parametrized(module))
# A parametrization with a right_inverse that does not return a Tensor or Sequence[Tensor]
class WrongRightInverse(Identity):
def right_inverse(self, z):
return None
# right_inverse should return a Tensor or a Sequence[Tensor]
with self.assertRaisesRegex(ValueError, "Tensor or a Sequence of"):
parametrize.register_parametrization(module, "weight", WrongRightInverse())
self.assertFalse(parametrize.is_parametrized(module))
# If it's a sequence, it must to be a sequence of tensors
class WrongRightInverseSequence(nn.Module):
def forward(self, x, y):
return x
def right_inverse(self, z):
return None, z
with self.assertRaisesRegex(ValueError, "of the sequence with type"):
parametrize.register_parametrization(module, "weight", WrongRightInverseSequence())
self.assertFalse(parametrize.is_parametrized(module))
# A parametrization from one tensor to one tensor that changes the dtype
class ChangeDtypeInverse(nn.Module):
def forward(self, x):
return x.float()
def right_inverse(self, w):
return w.bool()
# For parametrizations that return one tensor, right_inverse may not change the dtype
with self.assertRaisesRegex(ValueError, "outputs one tensor, it may not change the dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtypeInverse())
self.assertFalse(parametrize.is_parametrized(module))
# Doesn't return a tensor
class NotTensor(nn.Module):
def forward(self, x):
return 2
# Forward must return a tensor
with self.assertRaisesRegex(ValueError, "must return a tensor"):
parametrize.register_parametrization(module, "weight", NotTensor())
self.assertFalse(parametrize.is_parametrized(module))
# A parametrization from one tensor to one tensor that changes the dtype
class ChangeDtype(nn.Module):
def forward(self, x):
return x.bool()
# forward should not change the initial dtype
with self.assertRaisesRegex(ValueError, "may not change the dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtype())
self.assertFalse(parametrize.is_parametrized(module))
# Change shape
class ChangeShape(nn.Module):
def forward(self, x):
return x[:-1]
# forward should not change the original shape
with self.assertRaisesRegex(ValueError, "may not change the shape"):
parametrize.register_parametrization(module, "weight", ChangeShape())
self.assertFalse(parametrize.is_parametrized(module))
# Many to one that changes dtype
class ChangeDtypeMulti(nn.Module):
def forward(self, x, y):
return (x + y).bool()
def right_inverse(self, w):
return w, w + 1
# forward should not change the original shape even for parametrizations with many inputs
with self.assertRaisesRegex(ValueError, "may not change the dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtypeMulti())
self.assertFalse(parametrize.is_parametrized(module))
# Returning a sequence of size one, although weird, it's correct
class SequenceLen1(nn.Module):
def forward(self, x):
return x
def right_inverse(self, w):
return (w,)
parametrize.register_parametrization(module, "weight", SequenceLen1())
self.assertTrue(hasattr(module.parametrizations.weight, "original0"))
self.assertFalse(hasattr(module.parametrizations.weight, "original1"))
_ = module.weight # Does not throw
self.assertTrue(parametrize.is_parametrized(module))
parametrize.remove_parametrizations(module, "weight", leave_parametrized=True)
# None of the operations above should have altered the weight
self.assertFalse(parametrize.is_parametrized(module))
self.assertEqual(module.weight, weight_init)
def test_errors_parametrized_tensor_parametrization(self):
# Test errors when registering a parametrization on a parametrized tensor
class Identity(nn.Module):
def forward(self, x):
return x
module = nn.Linear(3, 4)
parametrize.register_parametrization(module, "weight", Identity())
# Has to return a tensor
class WrongReturn(nn.Module):
def forward(self, x):
return x, x
with self.assertRaisesRegex(ValueError, "must return a tensor"):
parametrize.register_parametrization(module, "weight", WrongReturn())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change dtype
class ChangeDtype(nn.Module):
def forward(self, x):
return x.bool()
with self.assertRaisesRegex(ValueError, "may not change the dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtype())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change shape
class ChangeShape(nn.Module):
def forward(self, x):
return x[:-1]
with self.assertRaisesRegex(ValueError, "may not change the shape"):
parametrize.register_parametrization(module, "weight", ChangeShape())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# The following checks are mostly due to bugs in the code of the parametrization
# right_inverse has to return a tensor
class WrongReturnInverse(Identity):
def right_inverse(self, x):
return x, x
with self.assertRaisesRegex(ValueError, "right_inverse must return a tensor"):
parametrize.register_parametrization(module, "weight", WrongReturnInverse())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change dtype
class ChangeDtypeInverse(Identity):
def right_inverse(self, x):
return x.bool()
with self.assertRaisesRegex(ValueError, "must have the same dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtypeInverse())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change shape
class ChangeShapeInverse(Identity):
def right_inverse(self, x):
return x[:-1]
with self.assertRaisesRegex(ValueError, "must have the same shape"):
parametrize.register_parametrization(module, "weight", ChangeShapeInverse())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
@skipIfNoLapack
def test_multiple_inputs_parametrization(self):
# A parametrization with several outputs
class RankOne(nn.Module):
def forward(self, x, y):
# Form a rank-1 matrix from a pair of vectors
return x.unsqueeze(-1) @ y.unsqueeze(-2)
def right_inverse(self, Y):
# We project the given matrix onto the rank 1 matrices
U, S, Vh = torch.linalg.svd(Y, full_matrices=False)
# S is ordered in a decreasing way.
s0_sqrt = S[0].sqrt().unsqueeze(-1)
return U[..., :, 0] * s0_sqrt, Vh[..., 0, :] * s0_sqrt
# Simple parametrisation
class Double(nn.Module):
def forward(self, x):
return 2.0 * x
def right_inverse(self, w):
return 0.5 * w
model = nn.Linear(3, 3)
# Test one parametrization
parametrize.register_parametrization(model, "weight", RankOne())
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertTrue(hasattr(model.parametrizations.weight, "original0"))
self.assertIn("original0", model.parametrizations.weight._parameters)
self.assertTrue(hasattr(model.parametrizations.weight, "original1"))
self.assertIn("original1", model.parametrizations.weight._parameters)
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
# Result should be rank 1
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
with self.assertRaisesRegex(ValueError, "leave_parametrized=False"):
# Cannot remove a parametrization with multiple inputs and not leave it parametrized
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
# Remove parametrization and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=True)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
self.assertFalse(parametrize.is_parametrized(model))
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
self.assertIn("weight", model._parameters)
# Registering parametrizations with one input on top of one with multiple inputs should work
init_weight = model.weight.clone()
parametrize.register_parametrization(model, "weight", RankOne())
# Projecting a rank 1 matrix onto the matrices of rank one does not change the matrix
self.assertEqual(init_weight, model.weight)
parametrize.register_parametrization(model, "weight", Double())
# The matrix now is twice the initial matrix
self.assertEqual(2.0 * init_weight, model.weight)
# Multiplying by a scalar does not change the rank
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
# The model has now three parameters
self.assertEqual(len(list(model.parameters())), 3)
sgd = torch.optim.SGD(model.parameters(), lr=0.1)
# Test backward. Should not throw
for _ in range(2):
sgd.zero_grad()
loss = (model.weight.T @ model.bias).sum()
loss.backward()
sgd.step()
# Same drill as before, removing should work as expected
with self.assertRaisesRegex(ValueError, "leave_parametrized=False"):
# Cannot remove a parametrization with multiple inputs and not leave it parametrized
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
# Remove parametrization and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=True)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
self.assertFalse(parametrize.is_parametrized(model))
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
self.assertIn("weight", model._parameters)
# The model has now two parameters
self.assertEqual(len(list(model.parameters())), 2)
# Test backward. Should not throw
sgd = torch.optim.SGD(model.parameters(), lr=0.1)
for _ in range(2):
sgd.zero_grad()
loss = (model.weight.T @ model.bias).sum()
loss.backward()
sgd.step()
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
@skipIfNoLapack
def test_caching_parametrization(self):
r"""Test the caching system of a parametrization"""
# Define a couple matrix parametrizations
class Skew(nn.Module):
def forward(self, X):
X = X.tril(-1)
return X - X.T
class Orthogonal(nn.Module):
def forward(self, X):
Id = torch.eye(X.size(0), device=X.device)
return torch.linalg.solve(Id + X, Id - X)
model = nn.Linear(5, 5)
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
# Test that the caching system works
with parametrize.cached():
X = model.weight
Y = model.weight
self.assertEqual(id(X), id(Y))
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
@skipIfNoLapack
def test_caching_parametrization_with_transfer_parametrizations_and_params(self):
r"""Test that transferring parametrizations doesn't cause issues with caching"""
class Skew(nn.Module):
def forward(self, X):
X = X.tril(-1)
return X - X.T
class Orthogonal(nn.Module):
def forward(self, X):
Id = torch.eye(X.size(0), device=X.device)
return torch.linalg.solve(Id + X, Id - X)
model = nn.Linear(5, 5)
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
to_model = nn.Linear(5, 5)
parametrize.transfer_parametrizations_and_params(model, to_model)
with parametrize.cached():
X = model.weight
Y = model.weight
self.assertEqual(id(X), id(Y))
A = to_model.weight
B = to_model.weight
self.assertEqual(id(A), id(B))
# test that the results are distinct objects for each module
self.assertNotEqual(id(A), id(X))
def test_parametrization_same_training_mode(self):
r"""Test training mode updated on parametrization registration"""
class Identity(nn.Module):
def forward(self, X):
return X
module = nn.Linear(4, 4)
module.eval()
parametrize.register_parametrization(module, "weight", Identity())
self.assertFalse(module.parametrizations.weight[0].training)
module.train()
parametrize.register_parametrization(module, "weight", Identity().eval())
self.assertTrue(module.parametrizations.weight[0].training)
self.assertTrue(module.parametrizations.weight[1].training)
def test_type_before_parametrizations(self):
r"""Test that type_before_parametrizations always retrieves original type"""
class Identity(nn.Module):
def forward(self, X):
return X
model = nn.Linear(5, 5)
original_type = type(model)
self.assertTrue(
parametrize.type_before_parametrizations(model) == original_type
)
parametrize.register_parametrization(model, "weight", Identity())
self.assertTrue(
parametrize.type_before_parametrizations(model) == original_type
)
def test_deepcopy_after_parametrization(self):
r"""Test that we are able to create a deepcopy of the module when it's parametrized."""
class AddOne(nn.Module):
def forward(self, x):
return x + 1.0
class ModelWithoutDeepcopy(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.tensor([1., 1., 1., 1.]), requires_grad=True)
self.bias = nn.Parameter(torch.tensor([0., 0., 0., 0.]), requires_grad=True)
self.attr = [1.0, 2.0, 3.0, 4.0]
class ActualModel(ModelWithoutDeepcopy):
# Emulate custom implementation of the deepcopying.
def __deepcopy__(self, memo):
result = self.__new__(self.__class__)
memo[id(self)] = result
result.__dict__ = deepcopy(self.__dict__, memo)
return result
def check_deepcopy(m1: nn.Module, m2: nn.Module):
w1 = m1.parametrizations.weight.original
w2 = m2.parametrizations.weight.original
b1 = m1.parametrizations.bias.original if parametrize.is_parametrized(m1, "bias") else m1.bias
b2 = m2.parametrizations.bias.original if parametrize.is_parametrized(m2, "bias") else m2.bias
# Weights, biases and attributes should be equal but they must be different objects.
self.assertEqual(m1.__dict__.keys(), m2.__dict__.keys())
self.assertIsNot(m1, m2)
self.assertEqual(w1, w2)
self.assertIsNot(w1, w2)
self.assertEqual(b1, b2)
self.assertIsNot(b1, b2)
self.assertEqual(m1.attr, m2.attr)
self.assertIsNot(m1.attr, m2.attr)
for model in (ModelWithoutDeepcopy(), ActualModel()):
# General check that we are able to create deepcopy.
parametrize.register_parametrization(model, "weight", AddOne())
check_deepcopy(model, deepcopy(model))
# Check that this works on models with several parametrized tensors.
parametrize.register_parametrization(model, "bias", AddOne())
check_deepcopy(model, deepcopy(model))
# Check that this works on models where tensors have more than one parametrization.
parametrize.register_parametrization(model, "weight", AddOne())
check_deepcopy(model, deepcopy(model))
def test_transfer_parametrizations_and_params(self):
r"""Test that all parametrizations and their associated parameters are transferred."""
class AddOne(nn.Module):
def forward(self, x):
return x + 1.0
class Double(nn.Module):
def forward(self, x):
return 2.0 * x
def right_inverse(self, x):
return 0.5 * x
class MinusOne(nn.Module):
def forward(self, x):
return x - 1.0
model = nn.Linear(5, 5)
parametrize.register_parametrization(model, "weight", AddOne())
parametrize.register_parametrization(model, "weight", Double())
parametrize.register_parametrization(model, "weight", MinusOne())
hold_weight = model.weight
to_model = nn.qat.Linear(
5, 5, qconfig=torch.ao.quantization.get_default_qconfig()
)
parametrize.transfer_parametrizations_and_params(model, to_model)
# checks that final and original value are correct and the to_model is parametrized
self.assertTrue(torch.nn.utils.parametrize.is_parametrized(to_model, "weight"))
self.assertEqual(model.weight, to_model.weight)
self.assertEqual(
model.parametrizations.weight.original,
to_model.parametrizations.weight.original,
)
# check that the transfer didn't affect the original value
self.assertEqual(hold_weight, model.weight)
# testing that changes to one set of parametrizations do not affect the other
parametrize.remove_parametrizations(to_model, "weight")
self.assertFalse(torch.nn.utils.parametrize.is_parametrized(to_model, "weight"))
self.assertTrue(torch.nn.utils.parametrize.is_parametrized(model, "weight"))
# also test that parameters that don't exist in to_model get transferred
model.test_param = Parameter(torch.randn(5, 5))
self.assertTrue(not hasattr(to_model, "test_param"))
parametrize.register_parametrization(model, "test_param", Double())
hold_test_param = model.test_param
parametrize.transfer_parametrizations_and_params(model, to_model, "test_param")
# check that previously missing params got transferred correctly
self.assertEqual(model.test_param, to_model.test_param)
self.assertEqual(
model.parametrizations.test_param.original,
to_model.parametrizations.test_param.original,
)
# check that the new transfer didn't change the value for the from_module
self.assertEqual(hold_test_param, model.test_param)
def test_transfer_parametrizations_and_params_right_inverse(self):
r"""Test that all parametrizations and their associated parameters are transferred."""
class Double(nn.Module):
def forward(self, x):
return 2.0 * x
def right_inverse(self, x):
return 0.5 * x
model = nn.Linear(5, 5)
parametrize.register_parametrization(model, "weight", Double())
hold_weight = model.weight
to_model = nn.qat.Linear(
5, 5, qconfig=torch.ao.quantization.get_default_qconfig()
)
parametrize.transfer_parametrizations_and_params(model, to_model)
# check that transfer occurs successfully
self.assertEqual(model.weight, to_model.weight)
self.assertEqual(
model.parametrizations.weight.original,
to_model.parametrizations.weight.original,
)
# check that transfer doesn't affect the from_model weight
self.assertEqual(hold_weight, model.weight)
def test_transfer_parametrizations_and_params_single_param(self):
r"""Test that all parametrizations and their associated parameters are transferred."""
class AddOne(nn.Module):
def forward(self, x):
return x + 1.0
class Double(nn.Module):
def forward(self, x):
return 2.0 * x
class MinusOne(nn.Module):
def forward(self, x):
return x - 1.0
model = nn.Linear(5, 5, bias=True)
parametrize.register_parametrization(model, "weight", AddOne())
parametrize.register_parametrization(model, "weight", Double())
parametrize.register_parametrization(model, "weight", MinusOne())
parametrize.register_parametrization(model, "bias", AddOne())
parametrize.register_parametrization(model, "bias", Double())
parametrize.register_parametrization(model, "bias", MinusOne())
to_model = nn.qat.Linear(
5, 5, bias=True, qconfig=torch.ao.quantization.get_default_qconfig()
)
parametrize.transfer_parametrizations_and_params(model, to_model, "weight")
# check that weight and only weight was transferred
self.assertEqual(model.weight, to_model.weight)
self.assertEqual(
model.parametrizations.weight.original,
to_model.parametrizations.weight.original,
)
self.assertTrue("bias" not in to_model.parametrizations)
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
@skipIfNoLapack
def test_transfer_parametrizations_and_params_many_to_one(self):
# A parametrization with several outputs
class RankOne(nn.Module):
def forward(self, x, y):
# Form a rank-1 matrix from a pair of vectors
return x.unsqueeze(-1) @ y.unsqueeze(-2)
def right_inverse(self, Y):
# We project the given matrix onto the rank 1 matrices
U, S, Vh = torch.linalg.svd(Y, full_matrices=False)
# S is ordered in a decreasing way.
s0_sqrt = S[0].sqrt().unsqueeze(-1)
return U[..., :, 0] * s0_sqrt, Vh[..., 0, :] * s0_sqrt
class Double(nn.Module):
def forward(self, x):
return 2.0 * x
model = nn.Linear(3, 3)
parametrize.register_parametrization(model, "weight", RankOne())
parametrize.register_parametrization(model, "weight", Double())
hold_weight = model.weight
to_model = nn.qat.Linear(
3, 3, qconfig=torch.ao.quantization.get_default_qconfig()
)
parametrize.transfer_parametrizations_and_params(model, to_model)
# checks that final and original value are correct and the to_model is parametrized
self.assertTrue(torch.nn.utils.parametrize.is_parametrized(to_model, "weight"))
self.assertEqual(model.weight, to_model.weight)
self.assertEqual(
model.parametrizations.weight.original0,
to_model.parametrizations.weight.original0,
)
self.assertEqual(
model.parametrizations.weight.original1,
to_model.parametrizations.weight.original1,
)
# check that the transfer didn't affect the original value
self.assertEqual(hold_weight, model.weight)
# testing that changes to one set of parametrizations do not affect the other
model.test_param = Parameter(torch.randn(3, 3))
self.assertTrue(not hasattr(to_model, "test_param"))
parametrize.register_parametrization(model, "test_param", RankOne())
hold_test_param = model.test_param
parametrize.transfer_parametrizations_and_params(model, to_model, "test_param")
# also check that previously missing params got transferred correctly
self.assertEqual(model.test_param, to_model.test_param)
self.assertEqual(
model.parametrizations.test_param.original0,
to_model.parametrizations.test_param.original0,
)
self.assertEqual(
model.parametrizations.test_param.original1,
to_model.parametrizations.test_param.original1,
)
# check that the new transfer didn't change the value for the from_module
self.assertEqual(hold_test_param, model.test_param)
# torch/nn/utils/prune.py
@unittest.skipIf(not TEST_NUMPY, "numpy not found")
def test_validate_pruning_amount_init(self):
r"""Test the first util function that validates the pruning
amount requested by the user the moment the pruning method
is initialized. This test checks that the expected errors are
raised whenever the amount is invalid.
The original function runs basic type checking + value range checks.
It doesn't check the validity of the pruning amount with
respect to the size of the tensor to prune. That's left to
`_validate_pruning_amount`, tested below.
"""
# neither float not int should raise TypeError
with self.assertRaises(TypeError):
prune._validate_pruning_amount_init(amount="I'm a string")
# float not in [0, 1] should raise ValueError
with self.assertRaises(ValueError):
prune._validate_pruning_amount_init(amount=1.1)
with self.assertRaises(ValueError):
prune._validate_pruning_amount_init(amount=20.)
# negative int should raise ValueError
with self.assertRaises(ValueError):
prune._validate_pruning_amount_init(amount=-10)
# all these should pass without errors because they're valid amounts
prune._validate_pruning_amount_init(amount=0.34)
prune._validate_pruning_amount_init(amount=1500)
prune._validate_pruning_amount_init(amount=0)
prune._validate_pruning_amount_init(amount=0.)
prune._validate_pruning_amount_init(amount=1)
prune._validate_pruning_amount_init(amount=1.)
self.assertTrue(True)
@unittest.skipIf(not TEST_NUMPY, "numpy not found")
def test_validate_pruning_amount(self):
r"""Tests the second util function that validates the pruning
amount requested by the user, this time with respect to the size
of the tensor to prune. The rationale is that if the pruning amount,
converted to absolute value of units to prune, is larger than
the number of units in the tensor, then we expect the util function
to raise a value error.
"""
# if amount is int and amount > tensor_size, raise ValueError
with self.assertRaises(ValueError):
prune._validate_pruning_amount(amount=20, tensor_size=19)
# amount is a float so this should not raise an error
prune._validate_pruning_amount(amount=0.3, tensor_size=0)
# this is okay
prune._validate_pruning_amount(amount=19, tensor_size=20)
prune._validate_pruning_amount(amount=0, tensor_size=0)
prune._validate_pruning_amount(amount=1, tensor_size=1)
self.assertTrue(True)
@unittest.skipIf(not TEST_NUMPY, "numpy not found")
def test_compute_nparams_to_prune(self):
r"""Test that requested pruning `amount` gets translated into the
correct absolute number of units to prune.
"""
self.assertEqual(
prune._compute_nparams_toprune(amount=0, tensor_size=15),
0
)
self.assertEqual(
prune._compute_nparams_toprune(amount=10, tensor_size=15),
10
)
# if 1 is int, means 1 unit
self.assertEqual(
prune._compute_nparams_toprune(amount=1, tensor_size=15),
1
)
# if 1. is float, means 100% of units
self.assertEqual(
prune._compute_nparams_toprune(amount=1., tensor_size=15),
15
)
self.assertEqual(
prune._compute_nparams_toprune(amount=0.4, tensor_size=17),
7
)
def test_random_pruning_sizes(self):
r"""Test that the new parameters and buffers created by the pruning
method have the same size as the input tensor to prune. These, in
fact, correspond to the pruned version of the tensor itself, its
mask, and its original copy, so the size must match.
"""
# fixturize test
# TODO: add other modules
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
original_tensor = getattr(m, name)
prune.random_unstructured(m, name=name, amount=0.1)
# mask has the same size as tensor being pruned
self.assertEqual(
original_tensor.size(),
getattr(m, name + '_mask').size()
)
# 'orig' tensor has the same size as the original tensor
self.assertEqual(
original_tensor.size(),
getattr(m, name + '_orig').size()
)
# new tensor has the same size as the original tensor
self.assertEqual(
original_tensor.size(),
getattr(m, name).size()
)
def test_random_pruning_orig(self):
r"""Test that original tensor is correctly stored in 'orig'
after pruning is applied. Important to make sure we don't
lose info about the original unpruned parameter.
"""
# fixturize test
# TODO: add other modules
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
# tensor prior to pruning
original_tensor = getattr(m, name)
prune.random_unstructured(m, name=name, amount=0.1)
self.assertEqual(
original_tensor,
getattr(m, name + '_orig')
)
def test_random_pruning_new_weight(self):
r"""Test that module.name now contains a pruned version of
the original tensor obtained from multiplying it by the mask.
"""
# fixturize test
# TODO: add other modules
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
# tensor prior to pruning
original_tensor = getattr(m, name)
prune.random_unstructured(m, name=name, amount=0.1)
# weight = weight_orig * weight_mask
self.assertEqual(
getattr(m, name),
getattr(m, name + '_orig')
* getattr(m, name + '_mask').to(
dtype=original_tensor.dtype
),
)
def test_identity_pruning(self):
r"""Test that a mask of 1s does not change forward or backward.
"""
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
y_prepruning = m(input_) # output prior to pruning
# compute grad pre-pruning and check it's equal to all ones
y_prepruning.sum().backward()
old_grad_weight = m.weight.grad.clone() # don't grab pointer!
self.assertEqual(old_grad_weight, torch.ones_like(m.weight))
old_grad_bias = m.bias.grad.clone()
self.assertEqual(old_grad_bias, torch.ones_like(m.bias))
# remove grads
m.zero_grad()
# force the mask to be made of all 1s
prune.identity(m, name="weight")
# with mask of 1s, output should be identical to no mask
y_postpruning = m(input_)
self.assertEqual(y_prepruning, y_postpruning)
# with mask of 1s, grad should be identical to no mask
y_postpruning.sum().backward()
self.assertEqual(old_grad_weight, m.weight_orig.grad)
self.assertEqual(old_grad_bias, m.bias.grad)
# calling forward twice in a row shouldn't change output
y1 = m(input_)
y2 = m(input_)
self.assertEqual(y1, y2)
def test_random_pruning_0perc(self):
r"""Test that a mask of 1s does not change forward or backward.
"""
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
y_prepruning = m(input_) # output prior to pruning
# compute grad pre-pruning and check it's equal to all ones
y_prepruning.sum().backward()
old_grad_weight = m.weight.grad.clone() # don't grab pointer!
self.assertEqual(old_grad_weight, torch.ones_like(m.weight))
old_grad_bias = m.bias.grad.clone()
self.assertEqual(old_grad_bias, torch.ones_like(m.bias))
# remove grads
m.zero_grad()
# force the mask to be made of all 1s
with mock.patch(
"torch.nn.utils.prune.RandomUnstructured.compute_mask"
) as compute_mask:
compute_mask.return_value = torch.ones_like(m.weight)
prune.random_unstructured(m, name='weight', amount=0.9) # amount won't count
# with mask of 1s, output should be identical to no mask
y_postpruning = m(input_)
self.assertEqual(y_prepruning, y_postpruning)
# with mask of 1s, grad should be identical to no mask
y_postpruning.sum().backward()
self.assertEqual(old_grad_weight, m.weight_orig.grad)
self.assertEqual(old_grad_bias, m.bias.grad)
# calling forward twice in a row shouldn't change output
y1 = m(input_)
y2 = m(input_)
self.assertEqual(y1, y2)
def test_random_pruning(self):
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
# define custom mask to assign with mock
mask = torch.ones_like(m.weight)
mask[1, 0] = 0
mask[0, 3] = 0
# check grad is zero for masked weights
with mock.patch(
"torch.nn.utils.prune.RandomUnstructured.compute_mask"
) as compute_mask:
compute_mask.return_value = mask
prune.random_unstructured(m, name='weight', amount=0.9)
y_postpruning = m(input_)
y_postpruning.sum().backward()
# weight_orig is the parameter, so it's the tensor that will accumulate the grad
self.assertEqual(m.weight_orig.grad, mask) # all 1s, except for masked units
self.assertEqual(m.bias.grad, torch.ones_like(m.bias))
# make sure that weight_orig update doesn't modify [1, 0] and [0, 3]
old_weight_orig = m.weight_orig.clone()
# update weights
learning_rate = 1.
for p in m.parameters():
p.data.sub_(p.grad.data * learning_rate)
# since these are pruned, they should not be updated
self.assertEqual(old_weight_orig[1, 0], m.weight_orig[1, 0])
self.assertEqual(old_weight_orig[0, 3], m.weight_orig[0, 3])
def test_random_pruning_forward(self):
r"""check forward with mask (by hand).
"""
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
# define custom mask to assign with mock
mask = torch.zeros_like(m.weight)
mask[1, 0] = 1
mask[0, 3] = 1
with mock.patch(
"torch.nn.utils.prune.RandomUnstructured.compute_mask"
) as compute_mask:
compute_mask.return_value = mask
prune.random_unstructured(m, name='weight', amount=0.9)
yhat = m(input_)
self.assertEqual(yhat[0, 0], m.weight_orig[0, 3] + m.bias[0])
self.assertEqual(yhat[0, 1], m.weight_orig[1, 0] + m.bias[1])
def test_remove_pruning_forward(self):
r"""Remove pruning and check forward is unchanged from previous
pruned state.
"""
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
# define custom mask to assign with mock
mask = torch.ones_like(m.weight)
mask[1, 0] = 0
mask[0, 3] = 0
# check grad is zero for masked weights
with mock.patch(
"torch.nn.utils.prune.RandomUnstructured.compute_mask"
) as compute_mask:
compute_mask.return_value = mask
prune.random_unstructured(m, name='weight', amount=0.9)
y_postpruning = m(input_)
prune.remove(m, 'weight')
y_postremoval = m(input_)
self.assertEqual(y_postpruning, y_postremoval)
def test_pruning_id_consistency(self):
r"""Test that pruning doesn't change the id of the parameters, which
would otherwise introduce issues with pre-existing optimizers that
point to old parameters.
"""
m = nn.Linear(5, 2, bias=False)
tensor_id = id(list(m.parameters())[0])
prune.random_unstructured(m, name="weight", amount=0.9)
self.assertEqual(tensor_id, id(list(m.parameters())[0]))
prune.remove(m, "weight")
self.assertEqual(tensor_id, id(list(m.parameters())[0]))
def test_random_pruning_pickle(self):
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
prune.random_unstructured(m, name=name, amount=0.1)
m_new = pickle.loads(pickle.dumps(m))
self.assertIsInstance(m_new, type(m))
def test_multiple_pruning_calls(self):
# if you call pruning twice, the hook becomes a PruningContainer
m = nn.Conv3d(2, 2, 2)
prune.l1_unstructured(m, name='weight', amount=0.1)
weight_mask0 = m.weight_mask # save it for later sanity check
# prune again
prune.ln_structured(m, name='weight', amount=0.3, n=2, dim=0)
hook = next(iter(m._forward_pre_hooks.values()))
self.assertIsInstance(
hook,
torch.nn.utils.prune.PruningContainer
)
# check that container._tensor_name is correctly set no matter how
# many pruning methods are in the container
self.assertEqual(hook._tensor_name, 'weight')
# check that the pruning container has the right length
# equal to the number of pruning iters
self.assertEqual(len(hook), 2) # m.weight has been pruned twice
# check that the entries of the pruning container are of the expected
# type and in the expected order
self.assertIsInstance(hook[0], torch.nn.utils.prune.L1Unstructured)
self.assertIsInstance(hook[1], torch.nn.utils.prune.LnStructured)
# check that all entries that are 0 in the 1st mask are 0 in the
# 2nd mask too
self.assertTrue(torch.all(m.weight_mask[weight_mask0 == 0] == 0))
# prune again
prune.ln_structured(m, name='weight', amount=0.1, n=float('inf'), dim=1)
# check that container._tensor_name is correctly set no matter how
# many pruning methods are in the container
hook = next(iter(m._forward_pre_hooks.values()))
self.assertEqual(hook._tensor_name, 'weight')
def test_pruning_container(self):
# create an empty container
container = prune.PruningContainer()
container._tensor_name = 'test'
self.assertEqual(len(container), 0)
p = prune.L1Unstructured(amount=2)
p._tensor_name = 'test'
# test adding a pruning method to a container
container.add_pruning_method(p)
# test error raised if tensor name is different
q = prune.L1Unstructured(amount=2)
q._tensor_name = 'another_test'
with self.assertRaises(ValueError):
container.add_pruning_method(q)
# test that adding a non-pruning method object to a pruning container
# raises a TypeError
with self.assertRaises(TypeError):
container.add_pruning_method(10)
with self.assertRaises(TypeError):
container.add_pruning_method('ugh')
def test_pruning_container_compute_mask(self):
r"""Test `compute_mask` of pruning container with a known `t` and
`default_mask`. Indirectly checks that Ln structured pruning is
acting on the right axis.
"""
# create an empty container
container = prune.PruningContainer()
container._tensor_name = 'test'
# 1) test unstructured pruning
# create a new pruning method
p = prune.L1Unstructured(amount=2)
p._tensor_name = 'test'
# add the pruning method to the container
container.add_pruning_method(p)
# create tensor to be pruned
t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)
# create prior mask by hand
default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])
# since we are pruning the two lowest magnitude units, the outcome of
# the calculation should be this:
expected_mask = torch.tensor([[0, 0, 1, 0], [1, 1, 0, 1]])
computed_mask = container.compute_mask(t, default_mask)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected_mask, computed_mask)
# 2) test structured pruning
q = prune.LnStructured(amount=1, n=2, dim=0)
q._tensor_name = 'test'
container.add_pruning_method(q)
# since we are pruning the lowest magnitude one of the two rows, the
# outcome of the calculation should be this:
expected_mask = torch.tensor([[0, 0, 0, 0], [1, 1, 0, 1]])
computed_mask = container.compute_mask(t, default_mask)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected_mask, computed_mask)
# 2) test structured pruning, along another axis
r = prune.LnStructured(amount=1, n=2, dim=1)
r._tensor_name = 'test'
container.add_pruning_method(r)
# since we are pruning the lowest magnitude of the four columns, the
# outcome of the calculation should be this:
expected_mask = torch.tensor([[0, 1, 1, 0], [0, 1, 0, 1]])
computed_mask = container.compute_mask(t, default_mask)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected_mask, computed_mask)
def test_l1_unstructured_pruning(self):
r"""Test that l1 unstructured pruning actually removes the lowest
entries by l1 norm (by hand). It also checks that applying l1
unstructured pruning more than once respects the previous mask.
"""
m = nn.Linear(4, 2)
# modify its weight matrix by hand
m.weight = torch.nn.Parameter(
torch.tensor(
[[1, 2, 3, 4], [-4, -3, -2, -1]], dtype=torch.float32
)
)
prune.l1_unstructured(m, 'weight', amount=2)
expected_weight = torch.tensor([[0, 2, 3, 4], [-4, -3, -2, 0]],
dtype=m.weight.dtype)
self.assertEqual(expected_weight, m.weight)
# check that pruning again removes the next two smallest entries
prune.l1_unstructured(m, 'weight', amount=2)
expected_weight = torch.tensor([[0, 0, 3, 4], [-4, -3, 0, 0]],
dtype=m.weight.dtype)
self.assertEqual(expected_weight, m.weight)
def test_l1_unstructured_pruning_with_importance_scores(self):
r"""Test that l1 unstructured pruning actually removes the lowest
entries of importance scores and not the parameter by l1 norm (by hand).
It also checks that applying l1 unstructured pruning more than once
respects the previous mask.
"""
m = nn.Linear(4, 2)
# modify its weight matrix by hand
m.weight = torch.nn.Parameter(
torch.tensor(
[[1, 2, 3, 4], [-4, -3, -2, -1]], dtype=torch.float32
)
)
importance_scores = torch.tensor(
[[4, 2, 1, 3], [-3, -1, -2, -4]], dtype=torch.float32
)
prune.l1_unstructured(m, 'weight', amount=2, importance_scores=importance_scores)
expected_weight = torch.tensor([[1, 2, 0, 4], [-4, 0, -2, -1]],
dtype=m.weight.dtype)
self.assertEqual(expected_weight, m.weight)
# check that pruning again removes two entries of m.weight that are colocated with
# the next two smallest absolute values of importance scores.
prune.l1_unstructured(m, 'weight', amount=2, importance_scores=importance_scores)
expected_weight = torch.tensor([[1, 0, 0, 4], [-4, 0, 0, -1]],
dtype=m.weight.dtype)
self.assertEqual(expected_weight, m.weight)
def test_unstructured_pruning_same_magnitude(self):
r"""Since it may happen that the tensor to prune has entries with the
same exact magnitude, it is important to check that pruning happens
consistenly based on the bottom % of weights, and not by threshold,
which would instead kill off *all* units with magnitude = threshold.
"""
AMOUNT = 0.2
p = prune.L1Unstructured(amount=AMOUNT)
# create a random tensors with entries in {-2, 0, 2}
t = 2 * torch.randint(low=-1, high=2, size=(10, 7))
nparams_toprune = prune._compute_nparams_toprune(AMOUNT, t.nelement())
computed_mask = p.compute_mask(t, default_mask=torch.ones_like(t))
nparams_pruned = torch.sum(computed_mask == 0)
self.assertEqual(nparams_toprune, nparams_pruned)
def test_random_structured_pruning_amount(self):
AMOUNT = 0.6
AXIS = 2
p = prune.RandomStructured(amount=AMOUNT, dim=AXIS)
t = 2 * torch.randint(low=-1, high=2, size=(5, 4, 2)).to(
dtype=torch.float32
)
nparams_toprune = prune._compute_nparams_toprune(AMOUNT, t.shape[AXIS])
computed_mask = p.compute_mask(t, default_mask=torch.ones_like(t))
# check that 1 column is fully prune, the others are left untouched
remaining_axes = [_ for _ in range(len(t.shape)) if _ != AXIS]
per_column_sums = sorted(
torch.sum(computed_mask == 0, axis=remaining_axes)
)
assert per_column_sums == [0, 20]
def test_ln_structured_pruning(self):
r"""Check Ln structured pruning by hand.
"""
m = nn.Conv2d(3, 1, 2)
m.weight.data = torch.tensor(
[[[[1., 2.], [1., 2.5]],
[[0.5, 1.], [0.1, 0.1]],
[[-3., -5.], [0.1, -1.]]]]
)
# expected effect of pruning 1 of the 3 channels by L2-norm
expected_mask_axis1 = torch.ones_like(m.weight)
expected_mask_axis1[:, 1] = 0.
prune.ln_structured(m, 'weight', amount=1, n=2, dim=1)
self.assertEqual(expected_mask_axis1, m.weight_mask)
# expected effect of pruning 1 of the 2 columns along axis -1 by L1-norm
expected_mask_axis3 = expected_mask_axis1
expected_mask_axis3[:, :, :, 0] = 0.
prune.ln_structured(m, 'weight', amount=1, n=1, dim=-1)
self.assertEqual(expected_mask_axis3, m.weight_mask)
def test_ln_structured_pruning_importance_scores(self):
r"""Check Ln structured pruning by hand.
"""
m = nn.Conv2d(3, 1, 2)
m.weight.data = torch.tensor(
[[[[1., 2.], [1., 2.5]],
[[0.5, 1.], [0.1, 0.1]],
[[-3., -5.], [0.1, -1.]]]]
)
importance_scores = torch.tensor(
[[[[10., 1.], [10., 1.]],
[[30., 3.], [30., 3.]],
[[-20., -2.], [-20., -2.]]]]
)
# expected effect of pruning 1 of the 3 channels by L2-norm
expected_mask_axis1 = torch.ones_like(m.weight)
expected_mask_axis1[:, 0] = 0.
prune.ln_structured(m, 'weight', amount=1, n=2, dim=1, importance_scores=importance_scores)
self.assertEqual(expected_mask_axis1, m.weight_mask)
# expected effect of pruning 1 of the 2 columns along axis -1 by L1-norm
expected_mask_axis3 = expected_mask_axis1
expected_mask_axis3[:, :, :, 1] = 0.
prune.ln_structured(m, 'weight', amount=1, n=1, dim=-1, importance_scores=importance_scores)
self.assertEqual(expected_mask_axis3, m.weight_mask)
def test_remove_pruning(self):
r"""`prune.remove` removes the hook and the reparametrization
and makes the pruning final in the original parameter.
"""
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
# first prune
prune.random_unstructured(m, name, amount=0.5)
self.assertIn(name + "_orig", dict(m.named_parameters()))
self.assertIn(name + "_mask", dict(m.named_buffers()))
self.assertNotIn(name, dict(m.named_parameters()))
self.assertTrue(hasattr(m, name))
pruned_t = getattr(m, name)
# then remove pruning
prune.remove(m, name)
self.assertIn(name, dict(m.named_parameters()))
self.assertNotIn(name + "_orig", dict(m.named_parameters()))
self.assertNotIn(name + "_mask", dict(m.named_buffers()))
final_t = getattr(m, name)
self.assertEqual(pruned_t, final_t)
def test_remove_pruning_exception(self):
r"""Removing from an unpruned tensor throws an assertion error
"""
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
# check that the module isn't pruned
self.assertFalse(prune.is_pruned(m))
# since it isn't pruned, pruning can't be removed from it
with self.assertRaises(ValueError):
prune.remove(m, name)
def test_global_pruning(self):
r"""Test that global l1 unstructured pruning over 2 parameters removes
the `amount=4` smallest global weights across the 2 parameters.
"""
m = nn.Linear(4, 2)
n = nn.Linear(3, 1)
# modify the weight matrices by hand
m.weight = torch.nn.Parameter(
torch.tensor([[1, 2, 3, 4], [-4, -3, -2, -1]]).to(
dtype=torch.float32)
)
n.weight = torch.nn.Parameter(
torch.tensor([[0, 0.1, -2]]).to(
dtype=torch.float32)
)
params_to_prune = (
(m, 'weight'),
(n, 'weight'),
)
# prune the 4 smallest weights globally by L1 magnitude
prune.global_unstructured(
params_to_prune,
pruning_method=prune.L1Unstructured,
amount=4
)
expected_mweight = torch.tensor([[0, 2, 3, 4], [-4, -3, -2, 0]],
dtype=m.weight.dtype)
self.assertEqual(expected_mweight, m.weight)
expected_nweight = torch.tensor([[0, 0, -2]]).to(dtype=n.weight.dtype)
self.assertEqual(expected_nweight, n.weight)
def test_global_pruning_importance_scores(self):
r"""Test that global l1 unstructured pruning over 2 parameters removes
the `amount=4` smallest global weights across the 2 parameters.
"""
m = nn.Linear(4, 2)
n = nn.Linear(3, 1)
# modify the weight matrices by hand
m.weight = torch.nn.Parameter(
torch.tensor([[1, 2, 3, 4], [-4, -3, -2, -1]]).to(
dtype=torch.float32)
)
m_importance_scores = torch.tensor(
[[4, 2, 1, 3], [-3, -1, -2, -4]], dtype=torch.float32
)
n.weight = torch.nn.Parameter(
torch.tensor([[0, 0.1, -2]]).to(
dtype=torch.float32)
)
n_importance_scores = torch.tensor([[0, 10., -0.2]]).to(dtype=torch.float32)
params_to_prune = (
(m, 'weight'),
(n, 'weight'),
)
importance_scores = {
(m, 'weight'): m_importance_scores,
(n, 'weight'): n_importance_scores,
}
# prune the 4 smallest weights globally by L1 magnitude
prune.global_unstructured(
params_to_prune,
pruning_method=prune.L1Unstructured,
amount=4,
importance_scores=importance_scores,
)
expected_m_weight = torch.tensor([[1, 2, 0, 4], [-4, 0, -2, -1]],
dtype=m.weight.dtype)
self.assertEqual(expected_m_weight, m.weight)
expected_n_weight = torch.tensor([[0, 0.1, 0]]).to(dtype=n.weight.dtype)
self.assertEqual(expected_n_weight, n.weight)
def test_custom_from_mask_pruning(self):
r"""Test that the CustomFromMask is capable of receiving
as input at instantiation time a custom mask, and combining it with
the previous default mask to generate the correct final mask.
"""
# new mask
mask = torch.tensor([[0, 1, 1, 0], [0, 0, 1, 1]])
# old mask
default_mask = torch.tensor([[0, 0, 0, 0], [1, 1, 1, 1]])
# some tensor (not actually used)
t = torch.rand_like(mask.to(dtype=torch.float32))
p = prune.CustomFromMask(mask=mask)
computed_mask = p.compute_mask(t, default_mask)
expected_mask = torch.tensor([[0, 0, 0, 0], [0, 0, 1, 1]]).to(
dtype=t.dtype
)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(computed_mask, expected_mask)
def test_pruning_rollback(self):
r"""Test that if something fails when the we try to compute the mask,
then the model isn't left in some intermediate half-pruned state.
The try/except statement in `apply` should handle rolling back
to the previous state before pruning began.
"""
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
with mock.patch(
"torch.nn.utils.prune.L1Unstructured.compute_mask"
) as compute_mask:
compute_mask.side_effect = Exception('HA!')
with self.assertRaises(Exception):
prune.l1_unstructured(m, name=name, amount=0.9)
self.assertTrue(
name in dict(m.named_parameters())
)
self.assertFalse(
name + '_mask' in dict(m.named_buffers())
)
self.assertFalse(
name + '_orig' in dict(m.named_parameters())
)
def test_pruning_serialization_model(self):
# create a model
model = torch.nn.Sequential(
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1),
)
# check that everything looks normal before pruning
self.assertNotIn('0.weight_orig', model.state_dict())
self.assertNotIn('0.weight_mask', model.state_dict())
self.assertIn('0.weight', model.state_dict())
# prune one of its parameters
prune.l1_unstructured(module=model[0], name='weight', amount=0.9)
# check that the original weight and the new mask are present
self.assertIn('0.weight_orig', model.state_dict())
self.assertIn('0.weight_mask', model.state_dict())
self.assertNotIn('0.weight', model.state_dict())
self.assertTrue(hasattr(model[0], 'weight'))
pruned_weight = model[0].weight
with TemporaryFileName() as fname:
torch.save(model, fname)
new_model = torch.load(fname)
# check that the original weight and the new mask are present
self.assertIn('0.weight_orig', new_model.state_dict())
self.assertIn('0.weight_mask', new_model.state_dict())
self.assertNotIn('0.weight', new_model.state_dict())
self.assertTrue(hasattr(new_model[0], 'weight'))
self.assertEqual(pruned_weight, new_model[0].weight)
def test_pruning_serialization_state_dict(self):
# create a model
model = torch.nn.Sequential(
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1),
)
# check that everything looks normal before pruning
self.assertNotIn('0.weight_orig', model.state_dict())
self.assertNotIn('0.weight_mask', model.state_dict())
self.assertIn('0.weight', model.state_dict())
# prune one of its parameters
prune.l1_unstructured(module=model[0], name='weight', amount=0.9)
# check that the original weight and the new mask are present
self.assertIn('0.weight_orig', model.state_dict())
self.assertIn('0.weight_mask', model.state_dict())
self.assertNotIn('0.weight', model.state_dict())
self.assertTrue(hasattr(model[0], 'weight'))
pruned_weight = model[0].weight
# make pruning permanent and restore parameter names as in base
# architecture
prune.remove(module=model[0], name='weight')
# check that the original weight and the new mask are no longer present
self.assertNotIn('0.weight_orig', model.state_dict())
self.assertNotIn('0.weight_mask', model.state_dict())
self.assertIn('0.weight', model.state_dict())
# save the state dict of model and reload it into new_model
new_model = torch.nn.Sequential(
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1),
)
with TemporaryFileName() as fname:
torch.save(model.state_dict(), fname)
new_model.load_state_dict(torch.load(fname))
# check that the original weight and the new mask are not present in
# new_model either.
self.assertNotIn('0.weight_orig', new_model.state_dict())
self.assertNotIn('0.weight_mask', new_model.state_dict())
self.assertIn('0.weight', new_model.state_dict())
self.assertEqual(pruned_weight, new_model[0].weight)
def test_prune(self):
# create a new pruning method
p = prune.L1Unstructured(amount=2)
# create tensor to be pruned
t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)
# create prior mask by hand
default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])
# since we are pruning the two lowest magnitude units, the outcome of
# the calculation should be this:
expected_mask = torch.tensor([[0, 0, 1, 0], [1, 1, 0, 1]])
pruned_tensor = p.prune(t, default_mask)
self.assertEqual(t * expected_mask, pruned_tensor)
def test_prune_importance_scores(self):
# create a new pruning method
p = prune.L1Unstructured(amount=2)
# create tensor to be pruned
t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)
importance_scores = torch.tensor(
[[1, 2, 3, 4], [1.5, 1.6, 1.7, 1.8]]
).to(dtype=torch.float32)
# create prior mask by hand
default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])
# since we are pruning the two lowest magnitude units, the outcome of
# the calculation should be this:
expected_mask = torch.tensor([[0, 1, 1, 0], [0, 1, 0, 1]])
pruned_tensor = p.prune(t, default_mask, importance_scores=importance_scores)
self.assertEqual(t * expected_mask, pruned_tensor)
def test_prune_importance_scores_mimic_default(self):
# create a new pruning method
p = prune.L1Unstructured(amount=2)
# create tensor to be pruned
t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)
# create prior mask by hand
default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])
# since we are pruning the two lowest magnitude units, the outcome of
# the calculation should be this:
expected_mask = torch.tensor([[0, 0, 1, 0], [1, 1, 0, 1]])
pruned_tensor_without_importance_scores = p.prune(t, default_mask)
pruned_tensor_with_importance_scores = p.prune(t, default_mask, importance_scores=t)
self.assertEqual(pruned_tensor_without_importance_scores, pruned_tensor_with_importance_scores)
self.assertEqual(t * expected_mask, pruned_tensor_without_importance_scores)
@skipIfTorchDynamo("TorchDynamo fails here for unknown reasons")
def test_rnn_pruning(self):
l = torch.nn.LSTM(32, 32)
# This Module has 4 parameters called:
# 'weight_ih_l0', 'weight_hh_l0', 'bias_ih_l0', 'bias_hh_l0'
# Pruning one of them causes one of the weights to become a tensor
prune.l1_unstructured(l, 'weight_ih_l0', 0.5)
assert (
sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights])
== 3
)
# Removing the pruning reparametrization restores the Parameter
prune.remove(l, 'weight_ih_l0')
assert (
sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights])
== 4
)
# Make sure that, upon removal of the reparametrization, the
# `._parameters` and `.named_parameters` contain the right params.
# Specifically, the original weight ('weight_ih_l0') should be placed
# back in the parameters, while the reparametrization component
# ('weight_ih_l0_orig') should be removed.
assert 'weight_ih_l0' in l._parameters
assert l._parameters['weight_ih_l0'] is not None
assert 'weight_ih_l0_orig' not in l._parameters
assert 'weight_ih_l0' in dict(l.named_parameters())
assert dict(l.named_parameters())['weight_ih_l0'] is not None
assert 'weight_ih_l0_orig' not in dict(l.named_parameters())
@skipIfTorchDynamo("TorchDynamo fails here for unknown reasons")
def test_rnn_weight_norm(self):
def check_weight_norm(l, name, num_params):
# This Module has 4 or 5 parameters called:
# 'weight_ih_l0', 'weight_hh_l0', 'bias_ih_l0', 'bias_hh_l0', weight_hr_l0
# Applying weight norm on one of them causes it to become a tensor
l = torch.nn.utils.weight_norm(l, name=name)
self.assertEqual(
sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights]),
num_params - 1,
)
# Removing the weight norm reparametrization restores the Parameter
l = torch.nn.utils.remove_weight_norm(l, name=name)
self.assertEqual(
sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights]),
num_params,
)
# Make sure that, upon removal of the reparametrization, the
# `._parameters` and `.named_parameters` contain the right params.
# Specifically, the original weight ('weight_ih_l0') should be placed
# back in the parameters, while the reparametrization components
# ('weight_ih_l0_v' and 'weight_ih_l0_g') should be removed.
self.assertTrue(name in l._parameters)
self.assertIsNotNone(l._parameters[name])
self.assertTrue(name + '_v' not in l._parameters)
self.assertTrue(name + '_g' not in l._parameters)
self.assertTrue(name in dict(l.named_parameters()))
self.assertIsNotNone(dict(l.named_parameters())[name])
self.assertTrue(name + '_v' not in dict(l.named_parameters()))
self.assertTrue(name + '_g' not in dict(l.named_parameters()))
check_weight_norm(torch.nn.LSTM(32, 32), 'weight_ih_l0', 4)
check_weight_norm(torch.nn.LSTM(32, 32, proj_size=16), 'weight_hr_l0', 5)
def test_weight_norm(self):
for dtype in [torch.float, torch.bfloat16]:
input = torch.randn(3, 4, dtype=dtype)
m = nn.Linear(4, 5).to(dtype=dtype)
expected_output = m(input)
# add weight normalization
m = torch.nn.utils.weight_norm(m)
self.assertEqual(m.weight_v.size(), m.weight.size())
self.assertEqual(m.weight_g.size(), (5, 1))
self.assertEqual(m(input), expected_output, atol=dtype2prec_DONTUSE[dtype], rtol=0)
# remove weight norm
m = torch.nn.utils.remove_weight_norm(m)
self.assertFalse(hasattr(m, 'weight_g'))
self.assertFalse(hasattr(m, 'weight_v'))
self.assertEqual(m(input), expected_output, atol=dtype2prec_DONTUSE[dtype], rtol=0)
# test with dim=1
m = torch.nn.utils.weight_norm(m, dim=1)
self.assertEqual(m.weight_v.size(), m.weight.size())
self.assertEqual(m.weight_g.size(), (1, 4))
self.assertEqual(m(input), expected_output, atol=dtype2prec_DONTUSE[dtype], rtol=0)
# test with dim=None
m = nn.Linear(4, 5).to(dtype=dtype)
expected_output = m(input)
m = torch.nn.utils.weight_norm(m, dim=None)
self.assertEqual(m(input), expected_output)
with self.assertRaisesRegex(RuntimeError, 'register two weight_norm hooks'):
m = torch.nn.utils.weight_norm(m)
m = torch.nn.utils.weight_norm(m)
# For float16, the forward of the Module doesn't work but we must still be able
# to register the weight norm as this is often done before sending the Module to
# CUDA.
m = nn.Linear(4, 5, dtype=torch.float16)
m = torch.nn.utils.weight_norm(m)
def test_parameterlistdict_setting_attributes(self):
with warnings.catch_warnings(record=True) as w:
mod = nn.ParameterList(map(nn.Parameter, [torch.rand(2), torch.rand(2)]))
self.assertTrue(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
mod.train()
mod.eval()
self.assertTrue(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
mod = nn.ParameterDict({"a": nn.Parameter(torch.rand(2)), "b": nn.Parameter(torch.rand(2))})
self.assertTrue(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
mod.train()
mod.eval()
self.assertTrue(len(w) == 0)
def test_parameterlistdict_pickle(self):
m = nn.ParameterList(map(nn.Parameter, [torch.rand(2), torch.rand(2)]))
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
# Test whether loading from older checkpoints works without triggering warnings
m = nn.ParameterList(map(nn.Parameter, [torch.rand(2), torch.rand(2)]))
del m._forward_pre_hooks, m._state_dict_hooks, m._load_state_dict_pre_hooks, m._non_persistent_buffers_set
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
m = nn.ParameterDict({"a": nn.Parameter(torch.rand(2)), "b": nn.Parameter(torch.rand(2))})
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
# Test whether loading from older checkpoints works without triggering warnings
m = nn.ParameterDict({"a": nn.Parameter(torch.rand(2)), "b": nn.Parameter(torch.rand(2))})
del m._forward_pre_hooks, m._state_dict_hooks, m._load_state_dict_pre_hooks, m._non_persistent_buffers_set
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
def test_weight_norm_pickle(self):
m = torch.nn.utils.weight_norm(nn.Linear(5, 7))
m = pickle.loads(pickle.dumps(m))
self.assertIsInstance(m, nn.Linear)
@skipIfTorchDynamo("TorchDynamo fails here for unknown reasons")
def test_spectral_norm(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.spectral_norm(m)
self.assertEqual(m.weight_u.size(), torch.Size([m.weight.size(0)]))
# weight_orig should be trainable
self.assertTrue(hasattr(m, 'weight_orig'))
self.assertTrue('weight_orig' in m._parameters)
# weight_u should be just a reused buffer
self.assertTrue(hasattr(m, 'weight_u'))
self.assertTrue('weight_u' in m._buffers)
self.assertTrue('weight_v' in m._buffers)
# weight should be a plain attribute, not counted as a buffer or a param
self.assertFalse('weight' in m._buffers)
self.assertFalse('weight' in m._parameters)
# it should also be sharing storage as `weight_orig`
self.assertEqual(m.weight_orig.storage(), m.weight.storage())
self.assertEqual(m.weight_orig.size(), m.weight.size())
self.assertEqual(m.weight_orig.stride(), m.weight.stride())
m = torch.nn.utils.remove_spectral_norm(m)
self.assertFalse(hasattr(m, 'weight_orig'))
self.assertFalse(hasattr(m, 'weight_u'))
# weight should be converted back as a parameter
self.assertTrue(hasattr(m, 'weight'))
self.assertTrue('weight' in m._parameters)
with self.assertRaisesRegex(RuntimeError, 'register two spectral_norm hooks'):
m = torch.nn.utils.spectral_norm(m)
m = torch.nn.utils.spectral_norm(m)
# test correctness in training/eval modes and cpu/multi-gpu settings
for apply_dp in (True, False):
if apply_dp:
if not TEST_MULTIGPU:
continue
device = torch.device('cuda:0')
def maybe_wrap(m):
return torch.nn.DataParallel(m, [0, 1])
else:
device = torch.device('cpu')
def maybe_wrap(m):
return m
for requires_grad in (True, False):
m = nn.Linear(3, 4).to(device)
m.weight.requires_grad_(requires_grad)
m = torch.nn.utils.spectral_norm(m)
wrapped_m = maybe_wrap(m)
self.assertTrue(hasattr(m, 'weight_u'))
u0 = m.weight_u.clone()
v0 = m.weight_v.clone()
# TEST TRAINING BEHAVIOR
# assert that u and v are updated
input = torch.randn(2, 3, device=device)
out = wrapped_m(input)
self.assertNotEqual(u0, m.weight_u)
self.assertNotEqual(v0, m.weight_v)
# assert that backprop reaches weight_orig
# can't use gradcheck because the function changes as we
# activate through it in training mode
if requires_grad:
torch.autograd.grad(out.sum(), m.weight_orig)
# test backward works with multiple forwards
# it uses training mode so we need to reset `u` and `v` vectors
# to same value at beginning for finite difference test to pass
saved_u = m.weight_u.clone()
saved_v = m.weight_v.clone()
def fn(input):
m.weight_u.data.copy_(saved_u)
m.weight_v.data.copy_(saved_v)
out0 = wrapped_m(input)
out1 = wrapped_m(input)
return out0 + out1
gradcheck(fn, (input.clone().requires_grad_(),), check_batched_grad=False)
# test removing
pre_remove_out = wrapped_m(input)
m = torch.nn.utils.remove_spectral_norm(m)
self.assertEqual(wrapped_m(input), pre_remove_out)
m = torch.nn.utils.spectral_norm(m)
for _ in range(3):
pre_remove_out = wrapped_m(input)
m = torch.nn.utils.remove_spectral_norm(m)
self.assertEqual(wrapped_m(input), pre_remove_out)
# TEST EVAL BEHAVIOR
m = torch.nn.utils.spectral_norm(m)
wrapped_m(input)
last_train_out = wrapped_m(input)
last_train_u = m.weight_u.clone()
last_train_v = m.weight_v.clone()
wrapped_m.zero_grad()
wrapped_m.eval()
eval_out0 = wrapped_m(input)
# assert eval gives same result as last training iteration
self.assertEqual(eval_out0, last_train_out)
# assert doing more iteartion in eval don't change things
self.assertEqual(eval_out0, wrapped_m(input))
self.assertEqual(last_train_u, m.weight_u)
self.assertEqual(last_train_v, m.weight_v)
# FIXME: the code below is flaky when executed with DataParallel
# see https://github.com/pytorch/pytorch/issues/13818
if apply_dp:
continue
# test backward works with multiple forwards in mixed training
# and eval modes
# it uses training mode so we need to reset `u` and `v` vectors
# to same value at beginning for finite difference test to pass
saved_u = m.weight_u.clone()
saved_v = m.weight_v.clone()
def fn(input):
m.weight_u.data.copy_(saved_u)
m.weight_v.data.copy_(saved_v)
wrapped_m.train()
out0 = wrapped_m(input)
wrapped_m.eval()
out1 = wrapped_m(input)
wrapped_m.train()
out2 = wrapped_m(input)
wrapped_m.eval()
out3 = wrapped_m(input)
return out0 + out1 + out2 + out3
gradcheck(fn, (input.clone().requires_grad_(),))
# assert that backprop reaches weight_orig in eval
if requires_grad:
def fn(weight):
return wrapped_m(input)
gradcheck(fn, (m.weight_orig,))
def test_new_spectral_norm(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.parametrizations.spectral_norm(m)
spectral_norm_m = m.parametrizations.weight[0]
self.assertEqual(spectral_norm_m._u.size(), torch.Size([m.weight.size(0)]))
# .parametrizations.weight.original should be trainable
self.assertTrue(hasattr(m.parametrizations.weight, 'original'))
self.assertTrue('original' in m.parametrizations.weight._parameters)
# u should be just a reused buffer
self.assertTrue(hasattr(spectral_norm_m, '_u'))
self.assertTrue('_u' in spectral_norm_m._buffers)
self.assertTrue('_v' in spectral_norm_m._buffers)
# weight should be a plain attribute, not counted as a buffer or a param
self.assertIsNotNone(m.weight)
self.assertFalse('weight' in m._buffers)
self.assertFalse('weight' in m._parameters)
# it should also be sharing storage as `weight_orig`
# self.assertEqual(m.parametrizations.weight.original.storage(), m.weight.storage())
self.assertEqual(m.parametrizations.weight.original.size(), m.weight.size())
self.assertEqual(m.parametrizations.weight.original.stride(), m.weight.stride())
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')
# spectral_norm is the only parametrization
self.assertFalse(hasattr(m, 'parametrizations'))
self.assertTrue('weight' in m._parameters)
# We can register spectral_norm multiple times on the same parameter
# and on multiple parameters in the same module
m = torch.nn.utils.parametrizations.spectral_norm(m, 'weight')
m = torch.nn.utils.parametrizations.spectral_norm(m, 'weight')
m = torch.nn.utils.parametrizations.spectral_norm(m, 'bias')
# If we remove the parametrization on bias, weight is still parametrized
# Removing a parametrization runs forward in eval mode if leave_parametrized=True
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'bias')
self.assertTrue('bias' in m._parameters)
self.assertTrue(hasattr(m, 'parametrizations'))
self.assertFalse('weight' in m._parameters)
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')
# Neither weight and bias are parametrized
self.assertFalse(hasattr(m, 'parametrizations'))
self.assertTrue('weight' in m._parameters)
self.assertFalse(torch.nn.utils.parametrize.is_parametrized(m))
# test correctness in training/eval modes and cpu/multi-gpu settings
for apply_dp in (True, False):
if apply_dp:
if not TEST_MULTIGPU:
continue
device = torch.device('cuda:0')
def maybe_wrap(m):
return torch.nn.DataParallel(m, [0, 1])
else:
device = torch.device('cpu')
def maybe_wrap(m):
return m
for requires_grad in (True, False):
def get_modules():
m = nn.Linear(3, 4).to(device)
m.weight.requires_grad_(requires_grad)
m = torch.nn.utils.parametrizations.spectral_norm(m)
wrapped_m = maybe_wrap(m)
spectral_norm_m = m.parametrizations.weight[0]
return m, wrapped_m, spectral_norm_m
input = torch.randn(2, 3, device=device)
m, wrapped_m, spectral_norm_m = get_modules()
self.assertTrue(hasattr(spectral_norm_m, '_u'))
u0 = spectral_norm_m._u.clone()
v0 = spectral_norm_m._v.clone()
# TEST TRAINING BEHAVIOR
# We perform GD first to modify the initial matrix
opt = torch.optim.SGD(wrapped_m.parameters(), lr=0.1)
opt.zero_grad()
wrapped_m(input).sum().backward()
opt.step()
out = wrapped_m(input)
if requires_grad:
# run forward again and assert that u and v are updated
self.assertNotEqual(u0, spectral_norm_m._u)
self.assertNotEqual(v0, spectral_norm_m._v)
# assert that backprop reaches original weight
# can't use gradcheck because the function changes as we
# activate through it in training mode
if requires_grad:
torch.autograd.grad(out.sum(), m.parametrizations.weight.original)
# test backward works with multiple forwards
# it uses training mode so we need to reset `u` and `v` vectors
# to same value at beginning for finite difference test to pass
saved_u = spectral_norm_m._u.clone()
saved_v = spectral_norm_m._v.clone()
def fn(input):
spectral_norm_m._u.data.copy_(saved_u)
spectral_norm_m._v.data.copy_(saved_v)
out0 = wrapped_m(input)
out1 = wrapped_m(input)
return out0 + out1
# Make sure we can compute gradients wrt to all the parameters in the case
# of double forward
fn(input.clone().requires_grad_()).sum().backward()
gradcheck(fn, (input.clone().requires_grad_(),), check_batched_grad=False)
# test removing
# spectral norm module needs to be in eval mode if we'd like to
# avoid doing another power iteration
m, wrapped_m, _ = get_modules()
pre_remove_out = wrapped_m(input)
m.eval()
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')
self.assertEqual(wrapped_m(input), pre_remove_out)
torch.nn.utils.parametrizations.spectral_norm(m)
for _ in range(3):
pre_remove_out = wrapped_m(input)
m.eval()
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')
self.assertEqual(wrapped_m(input), pre_remove_out)
# TEST EVAL BEHAVIOR
m, wrapped_m, spectral_norm_m = get_modules()
wrapped_m(input)
last_train_out = wrapped_m(input)
last_train_u = spectral_norm_m._u.clone()
last_train_v = spectral_norm_m._v.clone()
wrapped_m.zero_grad()
wrapped_m.eval()
eval_out0 = wrapped_m(input)
# assert eval gives same result as last training iteration
self.assertEqual(eval_out0, last_train_out)
# assert doing more iteartion in eval don't change things
self.assertEqual(eval_out0, wrapped_m(input))
self.assertEqual(last_train_u, spectral_norm_m._u)
self.assertEqual(last_train_v, spectral_norm_m._v)
# FIXME: the code below is flaky when executed with DataParallel
# see https://github.com/pytorch/pytorch/issues/13818
if apply_dp:
continue
# test backward works with multiple forwards in mixed training
# and eval modes
# it uses training mode so we need to reset `u` and `v` vectors
# to same value at beginning for finite difference test to pass
saved_u = spectral_norm_m._u.clone()
saved_v = spectral_norm_m._v.clone()
def fn(input):
spectral_norm_m._u.data.copy_(saved_u)
spectral_norm_m._v.data.copy_(saved_v)
wrapped_m.train()
out0 = wrapped_m(input)
wrapped_m.eval()
out1 = wrapped_m(input)
wrapped_m.train()
out2 = wrapped_m(input)
wrapped_m.eval()
out3 = wrapped_m(input)
return out0 + out1 + out2 + out3
gradcheck(fn, (input.clone().requires_grad_(),))
# assert that backprop reaches weight_orig in eval
if requires_grad:
def fn(weight):
return wrapped_m(input)
gradcheck(fn, (m.parametrizations.weight.original,))
def test_new_spectral_norm_load_state_dict(self):
for activate_times in (0, 3):
inp = torch.randn(2, 3)
m = nn.Linear(3, 5)
snm = torch.nn.utils.parametrizations.spectral_norm(m)
snm.train()
for _ in range(activate_times):
snm(inp)
state_dict = deepcopy(snm.state_dict())
self.assertEqual({
'parametrizations.weight.original',
'bias',
'parametrizations.weight.0._v',
'parametrizations.weight.0._u'
}, set(state_dict.keys()))
# test that non-strict loading works
non_strict_state_dict = deepcopy(state_dict)
non_strict_state_dict['nonsense'] = 'nonsense'
with self.assertRaisesRegex(RuntimeError, r'Unexpected key\(s\) in state_dict: "nonsense"'):
snm.load_state_dict(non_strict_state_dict, strict=True)
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['parametrizations.weight.original']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['parametrizations.weight.0._u']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['parametrizations.weight.0._v']
snm.load_state_dict(non_strict_state_dict, strict=False)
non_strict_state_dict['weight'] = snm.weight.detach().clone() # set W as a buffer
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict._metadata['parametrizations.weight.0'] # remove metadata info
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight'] # remove W buffer
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['bias']
snm.load_state_dict(non_strict_state_dict, strict=False)
# normal state_dict
# test that re-wrapping does not matter
m = torch.nn.utils.parametrize.remove_parametrizations(snm, 'weight')
snm = torch.nn.utils.parametrizations.spectral_norm(m)
snm.load_state_dict(state_dict)
with torch.no_grad():
snm.eval()
out0_eval = snm(inp)
snm.train()
out1_train = snm(inp)
out2_train = snm(inp)
snm.eval()
out3_eval = snm(inp)
# test that re-wrapping does not matter
m = torch.nn.utils.parametrize.remove_parametrizations(snm, 'weight')
snm = torch.nn.utils.parametrizations.spectral_norm(m)
# Test normal loading
snm.load_state_dict(state_dict)
with torch.no_grad():
snm.eval()
self.assertEqual(out0_eval, snm(inp))
snm.train()
self.assertEqual(out1_train, snm(inp))
self.assertEqual(out2_train, snm(inp))
snm.eval()
self.assertEqual(out3_eval, snm(inp))
@skipIfNoLapack
def test_spectral_norm_load_state_dict(self):
inp = torch.randn(2, 3)
for activate_times in (0, 3):
# Test backward compatibility
# At version None -> 1: weight becomes not a buffer and v vector becomes a buffer
m = nn.Linear(3, 5)
snm = torch.nn.utils.spectral_norm(m)
snm.train()
for _ in range(activate_times):
snm(inp)
version_latest_ref_state_dict = deepcopy(snm.state_dict())
self.assertEqual({'weight_orig', 'bias', 'weight_u', 'weight_v'}, set(version_latest_ref_state_dict.keys()))
# test that non-strict loading works
non_strict_state_dict = deepcopy(version_latest_ref_state_dict)
non_strict_state_dict['nonsense'] = 'nonsense'
with self.assertRaisesRegex(RuntimeError, r'Unexpected key\(s\) in state_dict: "nonsense"'):
snm.load_state_dict(non_strict_state_dict, strict=True)
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight_orig']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight_u']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight_v']
snm.load_state_dict(non_strict_state_dict, strict=False)
non_strict_state_dict['weight'] = snm.weight.detach().clone() # set W as a buffer
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict._metadata['']['spectral_norm'] # remove metadata info
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight'] # remove W buffer
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['bias']
snm.load_state_dict(non_strict_state_dict, strict=False)
# craft a version None state_dict
version_none_state_dict = deepcopy(version_latest_ref_state_dict)
self.assertIn('spectral_norm', version_none_state_dict._metadata[''])
del version_none_state_dict._metadata['']['spectral_norm'] # remove metadata info
del version_none_state_dict['weight_v'] # remove v vector
version_none_state_dict['weight'] = snm.weight.detach().clone() # set W as a buffer
# normal state_dict
for version_latest_with_metadata in [True, False]:
version_latest_state_dict = deepcopy(version_latest_ref_state_dict)
if not version_latest_with_metadata:
# We want to still load a user-crafted state_dict, one without metadata
del version_latest_state_dict._metadata['']['spectral_norm']
# test that re-wrapping does not matter
m = torch.nn.utils.remove_spectral_norm(snm)
snm = torch.nn.utils.spectral_norm(m)
snm.load_state_dict(version_latest_ref_state_dict)
with torch.no_grad():
snm.eval()
out0_eval = snm(inp)
snm.train()
out1_train = snm(inp)
out2_train = snm(inp)
snm.eval()
out3_eval = snm(inp)
# test that re-wrapping does not matter
m = torch.nn.utils.remove_spectral_norm(snm)
snm = torch.nn.utils.spectral_norm(m)
snm.load_state_dict(version_none_state_dict)
if activate_times > 0:
# since in loading version None state dict, we assume that the
# values in the state dict have gone through at lease one
# forward, we only test for equivalence when activate_times > 0.
with torch.no_grad():
snm.eval()
self.assertEqual(out0_eval, snm(inp))
snm.train()
self.assertEqual(out1_train, snm(inp))
self.assertEqual(out2_train, snm(inp))
snm.eval()
self.assertEqual(out3_eval, snm(inp))
# test that re-wrapping does not matter
m = torch.nn.utils.remove_spectral_norm(snm)
snm = torch.nn.utils.spectral_norm(m)
# Test normal loading
snm.load_state_dict(version_latest_state_dict)
with torch.no_grad():
snm.eval()
self.assertEqual(out0_eval, snm(inp))
snm.train()
self.assertEqual(out1_train, snm(inp))
self.assertEqual(out2_train, snm(inp))
snm.eval()
self.assertEqual(out3_eval, snm(inp))
def test_spectral_norm_dim(self):
inp = torch.randn(2, 3, 10, 12)
m = nn.ConvTranspose2d(3, 4, (5, 6))
m = torch.nn.utils.spectral_norm(m)
# this should not run into incompatible shapes
x = m(inp)
# check that u refers to the same dimension
self.assertEqual(m.weight_u.shape, m.weight_orig[0, :, 0, 0].shape)
def test_new_spectral_norm_dim(self):
inp = torch.randn(2, 3, 10, 12)
m = nn.ConvTranspose2d(3, 4, (5, 6))
m = torch.nn.utils.parametrizations.spectral_norm(m)
snm = m.parametrizations.weight[0]
# this should not run into incompatible shapes
x = m(inp)
# check that u refers to the same dimension
self.assertEqual(snm._u.shape, m.parametrizations.weight.original[0, :, 0, 0].shape)
def test_spectral_norm_forward(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.spectral_norm(m)
# naive forward
_weight, _bias, _u = m.weight_orig, m.bias, m.weight_u
_weight_mat = _weight.view(_weight.size(0), -1)
_v = torch.mv(_weight_mat.t(), _u)
_v = F.normalize(_v, dim=0, eps=1e-12)
_u = torch.mv(_weight_mat, _v)
_u = F.normalize(_u, dim=0, eps=1e-12)
_weight.data /= torch.dot(_u, torch.matmul(_weight_mat, _v))
out_hat = torch.nn.functional.linear(input, _weight, _bias)
expect_out = m(input)
self.assertEqual(expect_out, out_hat)
def test_new_spectral_norm_forward(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.parametrizations.spectral_norm(m)
snm = m.parametrizations.weight[0]
# naive forward
_weight = m.parametrizations.weight.original
_bias, _v = m.bias, snm._v
_weight_mat = _weight.view(_weight.size(0), -1)
_u = torch.mv(_weight_mat, _v)
_u = F.normalize(_u, dim=0, eps=1e-12)
_v = torch.mv(_weight_mat.t(), _u)
_v = F.normalize(_v, dim=0, eps=1e-12)
_weight.data /= torch.dot(_u, torch.matmul(_weight_mat, _v))
out_hat = torch.nn.functional.linear(input, _weight, _bias)
expect_out = m(input)
self.assertEqual(expect_out, out_hat)
def test_spectral_norm_pickle(self):
m = torch.nn.utils.spectral_norm(nn.Linear(5, 7))
m = pickle.loads(pickle.dumps(m))
self.assertIsInstance(m, nn.Linear)
@skipIfNoLapack
def test_orthogonal_parametrization(self):
# Orthogonal implements 6 algorithms (3x parametrizations times 2 options of use_trivialization)
def assert_is_orthogonal(X):
n, k = X.size(-2), X.size(-1)
if n < k:
X = X.mT
n, k = k, n
Id = torch.eye(k, dtype=X.dtype, device=X.device).expand(*(X.size()[:-2]), k, k)
eps = 10 * n * torch.finfo(X.dtype).eps
torch.testing.assert_allclose(X.mH @ X, Id, atol=eps, rtol=0.)
def assert_weight_allclose_Q(weight, W):
# Test that weight is equal to the Q part of the QR decomposition of W
# (or of its transpose if the matrix is wide)
wide_matrix = W.size(-2) < W.size(-1)
if wide_matrix:
W = W.mT
Q, R = torch.linalg.qr(W)
Q *= R.diagonal(dim1=-2, dim2=-1).sgn().unsqueeze(-2)
if wide_matrix:
Q = Q.mT
torch.testing.assert_allclose(Q, weight, atol=1e-5, rtol=0.)
for shape, dtype, use_linear in product(((4, 4), (5, 3), (3, 5)), # square/ tall / wide
(torch.float32, torch.complex64),
(True, False)):
# Conv2d does not support complex yet
if not use_linear:
continue
if use_linear:
input = torch.randn(3, shape[0], dtype=dtype)
else:
input = torch.randn(2, 2, shape[0] + 2, shape[1] + 1, dtype=dtype)
for parametrization, use_trivialization in product(("matrix_exp", "cayley", "householder"),
(False, True)):
# right_inverse for Cayley and matrix_exp not implemented for use_trivialization=False
# See Note [right_inverse expm cayley]
can_initialize = use_trivialization or parametrization == "householder"
# We generate them every time to always start with fresh weights
if use_linear:
m = nn.Linear(*shape, dtype=dtype)
else:
m = nn.Conv2d(2, 3, shape, dtype=dtype)
# We do not support householder for complex inputs
# See Note [Householder complex]
w_init = m.weight.clone()
if parametrization == "householder" and m.weight.is_complex():
msg = "householder parametrization does not support complex tensors"
with self.assertRaisesRegex(ValueError, msg):
torch.nn.utils.parametrizations.orthogonal(m,
"weight",
parametrization,
use_trivialization=use_trivialization)
continue
wide_matrix = w_init.size(-2) < w_init.size(-1)
torch.nn.utils.parametrizations.orthogonal(m,
"weight",
parametrization,
use_trivialization=use_trivialization)
# Forwards works as expected
self.assertEqual(w_init.shape, m.weight.shape)
assert_is_orthogonal(m.weight)
if can_initialize:
assert_weight_allclose_Q(m.weight, w_init)
# Intializing with a given orthogonal matrix works
X = torch.randn_like(m.weight)
if wide_matrix:
X = X.mT
w_new = torch.linalg.qr(X).Q
if wide_matrix:
w_new = w_new.mT
if can_initialize:
m.weight = w_new
torch.testing.assert_allclose(w_new, m.weight, atol=1e-5, rtol=0.)
else:
msg = "assign to the matrix exponential or the Cayley parametrization"
with self.assertRaisesRegex(NotImplementedError, msg):
m.weight = w_new
# Intializing with a non-orthogonal matrix makes m.weight be the Q part of the given matrix
w_new = torch.randn_like(m.weight)
if can_initialize:
m.weight = w_new
assert_weight_allclose_Q(m.weight, w_new)
else:
msg = "assign to the matrix exponential or the Cayley parametrization"
with self.assertRaisesRegex(NotImplementedError, msg):
m.weight = w_new
opt = torch.optim.SGD(m.parameters(), lr=0.1)
for _ in range(2):
opt.zero_grad()
m(input).norm().backward()
grad = m.parametrizations.weight.original.grad
self.assertIsNotNone(grad)
# We do not update the upper triangular part of the matrix if tall tril if wide
if grad.size(-2) >= grad.size(-1):
zeros_grad = grad.triu(1)
else:
zeros_grad = grad.tril(-1)
self.assertEqual(zeros_grad, torch.zeros_like(zeros_grad))
# The gradient in the diagonal can only be imaginary because a skew-Hermitian
# matrix has imaginary diagonal
diag_grad = grad.diagonal(dim1=-2, dim2=-1)
if grad.is_complex():
diag_grad = diag_grad.real
self.assertEqual(diag_grad, torch.zeros_like(diag_grad))
opt.step()
assert_is_orthogonal(m.weight)
@skipIfNoLapack
def test_orthogonal_errors(self):
m = nn.Linear(3, 4)
with self.assertRaisesRegex(ValueError, "has to be one of"):
torch.nn.utils.parametrizations.orthogonal(m, "weight", "foo")
with self.assertRaisesRegex(ValueError, "Expected a matrix"):
torch.nn.utils.parametrizations.orthogonal(m, "bias")
torch.nn.utils.parametrizations.orthogonal(m, "weight")
with self.assertRaisesRegex(ValueError, "matrices of shape"):
m.weight = torch.randn(5, 5)
torch.nn.utils.parametrize.remove_parametrizations(m, "weight")
def test_threshold_int(self):
x = torch.tensor([-3, -2, -1, 0, 1, 2, 3])
expected = torch.tensor([99, 99, 99, 99, 1, 2, 3])
self.assertEqual(F.threshold(x, 0, 99), expected)
def test_threshold_bfloat16(self):
x = torch.randn(100)
for threshold in [0, -0.5, 0.5, float('inf'), float('-inf'), float('nan')]:
expected = F.threshold(x, threshold, 0).bfloat16().float()
res_bf16 = F.threshold(x.bfloat16(), threshold, 0).float()
self.assertEqual(res_bf16, expected)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_embedding_max_norm_unsorted_repeating_indices(self):
def create_embedding(device):
# Seed RNG so we get the same Embedding each time
torch.manual_seed(0)
return torch.nn.Embedding(
num_embeddings=20,
embedding_dim=64,
max_norm=1.0).to(device)
ix = torch.arange(2, device='cpu', dtype=torch.long).repeat(2000)
out_cpu = create_embedding('cpu')(ix)
ix = ix.to('cuda')
out = create_embedding('cuda')(ix)
self.assertEqual(out.cpu(), out_cpu)
def test_embedding_sparse_basic(self):
embedding = nn.Embedding(10, 20, sparse=True)
input = torch.tensor([[0, 2, 4, 5], [4, 3, 0, 9]], dtype=torch.long)
embedding(input).sum().backward()
self.assertTrue(embedding.weight.grad.is_sparse)
self.assertEqual(embedding.weight.grad.shape, embedding.weight.shape)
def test_embedding_sparse_empty_tensor(self):
embedding = nn.Embedding(0, 0, sparse=True)
input = torch.tensor([], dtype=torch.int64)
embedding(input).sum().backward()
self.assertTrue(embedding.weight.grad.is_sparse)
self.assertEqual(embedding.weight.grad.shape, embedding.weight.shape)
embedding = nn.Embedding(10, 0, sparse=True)
input = torch.LongTensor([[0, 2, 4, 5], [4, 3, 0, 9]])
embedding(input).sum().backward()
self.assertTrue(embedding.weight.grad.is_sparse)
self.assertEqual(embedding.weight.grad.shape, embedding.weight.shape)
def test_move_sparse_half_embedding(self):
embedding = nn.Embedding(10, 3, sparse=True)
self.assertEqual(embedding.weight.device.type, 'cpu')
self.assertEqual(embedding.weight.dtype, torch.float64)
embedding.to(torch.float16)
self.assertEqual(embedding.weight.dtype, torch.float16)
self.assertEqual(embedding.embedding_dim, 3)
self.assertEqual(embedding.num_embeddings, 10)
if torch.cuda.is_available():
embedding.to('cuda')
self.assertEqual(embedding.weight.device.type, 'cuda')
embedding.to('cpu')
self.assertEqual(embedding.weight.device.type, 'cpu')
def test_embedding_max_norm(self):
embedding = nn.Embedding(22, 5, max_norm=1.0)
input = torch.tensor([2, 8, 8, 6], dtype=torch.long)
output = embedding(input)
self.assertEqual(output[1], output[2])
self.assertTrue(output.data.norm(p=2, dim=1).le(1).all())
def test_embedding_from_pretrained(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
embedding = nn.Embedding.from_pretrained(a)
self.assertEqual(a, embedding.weight.data)
input = torch.LongTensor([0, 1])
output = embedding(input)
self.assertEqual(a, output)
def test_embedding_bag_from_pretrained(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
embedding = nn.EmbeddingBag.from_pretrained(a)
self.assertEqual(a, embedding.weight)
input = torch.tensor([0, 1], dtype=torch.long)
output = embedding(input, torch.arange(input.size(0)))
self.assertEqual(a, output)
def test_embedding_from_pretrained_padding_idx(self):
padding_idx = 2
padding_vec = torch.ones(3) * 7
embeddings = torch.rand(4, 3, requires_grad=True)
with torch.no_grad():
embeddings[padding_idx] = padding_vec
embedding_nn = nn.Embedding.from_pretrained(embeddings, padding_idx=padding_idx)
self.assertEqual(embedding_nn.weight[padding_idx], padding_vec)
def test_embedding_bag_from_pretrained_padding_idx(self):
padding_idx = 2
embeddings = torch.rand(4, 3, requires_grad=True)
embedding_nn = nn.EmbeddingBag.from_pretrained(embeddings, padding_idx=padding_idx)
self.assertEqual(embedding_nn.weight, embeddings)
def test_embedding_from_pretrained_options(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
opts = {
"max_norm": 2.,
"norm_type": .5,
"scale_grad_by_freq": False,
"sparse": True
}
embedding = nn.Embedding.from_pretrained(a, **opts)
input = torch.LongTensor([0, 1])
output = embedding(input)
# test output and that weight matrix was renormalized
self.assertEqual(a, output)
self.assertTrue(a.ne(torch.arange(1, 7, dtype=a.dtype).view(2, 3)).all())
self.assertTrue(output.data.norm(p=opts["norm_type"], dim=1).le(opts["max_norm"]).all())
def test_embedding_functional(self):
a = torch.tensor([
[1, 3, 2],
[0, 2, 1]
], dtype=torch.long)
embeddings = torch.rand(4, 3, requires_grad=True)
embed_old = torch.nn.Embedding(4, 3)
embed_old.weight.data = embeddings.data
res_old = embed_old(a)
res_F = F.embedding(a, embeddings)
self.assertEqual(res_old, res_F)
embed_old = torch.nn.Embedding(4, 3)
embed_old = embed_old.from_pretrained(embeddings, padding_idx=2)
res_old = embed_old(a)
res_F = F.embedding(a, embeddings, padding_idx=2)
self.assertEqual(res_old, res_F)
def test_embedding_bag_functional(self):
a = torch.tensor([
[1, 3, 2],
[0, 2, 1]
], dtype=torch.long)
embeddings = torch.rand(4, 3, requires_grad=True)
embed_old = torch.nn.EmbeddingBag(4, 3)
embed_old.weight = torch.nn.Parameter(embeddings)
res_old = embed_old(a)
res_F = F.embedding_bag(a, embeddings)
self.assertEqual(res_old, res_F)
embed_old = torch.nn.EmbeddingBag(4, 3)
embed_old = embed_old.from_pretrained(embeddings, padding_idx=2)
res_old = embed_old(a)
res_F = F.embedding_bag(a, embeddings, padding_idx=2)
self.assertEqual(res_old, res_F)
# Make sure that error is thrown if padding_idx is out of bounds
def test_embedding_bag_padding_idx_error(self):
a = torch.tensor([
[1, 3, 2],
[0, 2, 1]
], dtype=torch.long)
num_embeddings = 4
num_features = 3
embeddings = torch.rand(num_embeddings, num_features, requires_grad=True)
functional_err_msg = r'padding_idx must be within the number of embeddings'
module_err_msg = r'padding_idx must be within num_embeddings'
for padding_idx in range(-(num_embeddings + 2), (num_embeddings + 2)):
if (padding_idx < -num_embeddings) or (padding_idx >= num_embeddings):
with self.assertRaisesRegex(RuntimeError, functional_err_msg):
F.embedding_bag(a, embeddings, padding_idx=padding_idx)
with self.assertRaisesRegex(AssertionError, module_err_msg):
torch.nn.EmbeddingBag(num_embeddings, num_features, padding_idx=padding_idx)
else:
F.embedding_bag(a, embeddings, padding_idx=padding_idx)
torch.nn.EmbeddingBag(num_embeddings, num_features, padding_idx=padding_idx)
@unittest.skipUnless('fbgemm' in torch.backends.quantized.supported_engines,
'Linear_FP16_weight requires FBGEMM. FBGEMM is only optimized for CPUs'
' with instruction set support avx2 or newer.')
def test_fb_fc_packed(self):
X = np.random.rand(16, 16).astype(np.float32) - 0.5
W = np.random.rand(16, 16).astype(np.float32) - 0.5
b = np.random.rand(16).astype(np.float32) - 0.5
def fc_op(X, W, b):
return np.dot(X, W.T) + b
x_tensor = torch.tensor(X)
w_tensor = torch.tensor(W)
b_tensor = torch.tensor(b)
packed_w_tensor = torch.fbgemm_pack_gemm_matrix_fp16(w_tensor)
actual_output = torch.fbgemm_linear_fp16_weight(x_tensor, packed_w_tensor, b_tensor)
expected_output = fc_op(X, W, b)
torch.testing.assert_close(torch.from_numpy(expected_output), actual_output.cpu(), atol=1e-3, rtol=1e-3)
def test_embeddingbag_from_pretrained(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
embeddingbag = nn.EmbeddingBag.from_pretrained(a)
self.assertEqual(a, embeddingbag.weight.data)
input = torch.LongTensor([[0, 1]])
output = embeddingbag(input)
self.assertEqual(a.mean(0, keepdim=True), output)
def test_embeddingbag_from_pretrained_options(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
opts = {
"max_norm": 2.,
"norm_type": .5,
"scale_grad_by_freq": False,
"mode": "max",
"sparse": False
}
embeddingbag = nn.EmbeddingBag.from_pretrained(a, **opts)
input = torch.LongTensor([[0, 1]])
output = embeddingbag(input)
self.assertEqual(a.max(0, keepdim=True)[0], output)
self.assertTrue(a.ne(torch.arange(1, 7, dtype=a.dtype).view(2, 3)).all())
self.assertTrue(a.norm(p=opts["norm_type"], dim=1).le(opts["max_norm"]).all())
def test_AlphaDropout(self):
# generate random tensor with zero mean and unit std
input = torch.randn(5000)
self._test_alpha_dropout(nn.AlphaDropout, input)
def test_FeatureAlphaDropout(self):
b = random.randint(1, 5)
w = random.randint(1, 5)
h = random.randint(1, 5)
d = random.randint(1, 2)
num_features = 1000
input = torch.randn(num_features, b, d, w, h)
self._test_alpha_dropout(nn.FeatureAlphaDropout, input)
# no batch dims
input = torch.randn(50, 20, 64, 64)
self._test_alpha_dropout(nn.FeatureAlphaDropout, input)
def test_pad_scalar_error(self):
inputs = torch.tensor(0., requires_grad=True)
self.assertRaises(RuntimeError, lambda: F.pad(inputs, (1, 1)))
self.assertRaises(RuntimeError, lambda: F.pad(inputs, (1,)))
def test_nested_tensor_from_mask(self):
N, L, D = 10, 12, 14
input = torch.rand(N, L, D)
mask = torch.ones(N, L, dtype=torch.bool)
# Leave first row be all True to maintain the nt's size unchanged
for i in range(1, N):
end = torch.randint(1, L, size=()).item()
mask[i, end:] = False
nt = torch._nested_tensor_from_mask(input, mask)
input_convert = nt.to_padded_tensor(0.)
input.masked_fill_(mask.reshape(N, L, 1).logical_not(), 0.)
self.assertEqual(input, input_convert)
def test_nested_tensor_from_mask_error(self):
N, L, D = 10, 12, 14
input = torch.rand(N, L, D)
# Mask is not bool
mask = torch.zeros(N, L, dtype=torch.float)
self.assertRaises(RuntimeError, lambda: torch._nested_tensor_from_mask(input, mask))
# Mask size is not 2
mask = torch.zeros(N, L, D, dtype=torch.bool)
self.assertRaises(RuntimeError, lambda: torch._nested_tensor_from_mask(input, mask))
# Input size is not 3
mask = torch.zeros(N, L, dtype=torch.bool)
input = torch.rand(N, L)
self.assertRaises(RuntimeError, lambda: torch._nested_tensor_from_mask(input, mask))
# Mask size does not match input
mask = torch.zeros(N + 1, L + 1, dtype=torch.bool)
input = torch.rand(N, L, D)
self.assertRaises(RuntimeError, lambda: torch._nested_tensor_from_mask(input, mask))
# Mask is not padding format
mask = torch.ones(N, L, dtype=torch.bool)
mask[0, 0] = False
mask[0, 2] = False
self.assertRaises(RuntimeError, lambda: torch._nested_tensor_from_mask(input, mask))
@unittest.skipIf(not TEST_NUMPY, "numpy not found")
@parametrize_test("average_attn_weights", [True, False])
def test_multihead_attention(self, average_attn_weights):
def _scaled_dot_attn_ref(Q, K, V, dims, unseen_mask=None, key_padding_mask=None,
average_attn_weights=average_attn_weights):
""" Numpy-based reference implementation of scaled dot attention
for testing"""
QKT = _batchmatmul(
Q,
np.transpose(K, axes=[0, 1, 3, 2])
/ np.sqrt(dims[3], dtype=np.float32), # divide by sqrt(d_head)
)
b1, b2, s1, s2 = QKT.shape
if unseen_mask is not None or key_padding_mask is not None:
# assert s1 == s2
for i in range(b1):
for j in range(b2):
for m in range(s1):
for n in range(s2):
if unseen_mask is not None and unseen_mask[m][n] == 0:
QKT[i, j, m, n] = -np.inf
if key_padding_mask is not None and key_padding_mask[i][n]:
QKT[i, j, m, n] = -np.inf
reference = _softmax(QKT)
ref_attn_weight = reference
if average_attn_weights:
ref_attn_weight = np.sum(ref_attn_weight, axis=1) / b2
reference = _batchmatmul(reference, V)
return reference, ref_attn_weight
def _batchmatmul(a, b): # batchmatmul over 4 dim matrix
""" Numpy-based batch matrix multiply over 4 dim matrix"""
assert a.shape[0] == b.shape[0]
assert a.shape[1] == b.shape[1]
retval = np.zeros(
(a.shape[0], a.shape[1], a.shape[2], b.shape[3]), dtype=np.float32
)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
retval[i, j, :, :] = np.matmul(a[i, j, :, :], b[i, j, :, :])
return retval
def _softmax(x): # softmax over 4 dim matrix
""" Numpy-based reference softmax over 4 dim matrix"""
np.seterr(invalid='ignore')
output = np.zeros(x.shape, dtype=np.float64)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
for k in range(x.shape[2]):
x_curr = x[i, j, k, :]
e_x = np.exp(x_curr - np.amax(x_curr))
output[i, j, k, :] = e_x / np.sum(e_x)
return output
def _split_heads_ref(X, dims, nheads, d_head):
X_split = np.reshape(X, dims[:2] + [nheads, d_head])
X_split_transposed = np.transpose(X_split, [0, 2, 1, 3])
reference = np.reshape(X_split_transposed, [dims[0], nheads, dims[1], d_head])
return reference
def _combine_heads_ref(X, dims, nheads, d_head):
X_transposed = np.transpose(X, [0, 2, 1, 3])
reference = np.reshape(X_transposed, dims[:2] + [nheads * d_head])
return reference
def _fc(X, X_weight, X_bias):
X_fc_b = X_bias.detach().numpy()
X_fc_w = X_weight.detach().numpy()
return np.matmul(X, np.transpose(X_fc_w)) + X_fc_b
def _create_src_lengths_mask(batch_size, src_lengths):
"""
Generate boolean mask to prevent attention beyond the end of source
Inputs:
batch_size : int
src_lengths : [batch_size] of sentence lengths
Outputs:
[batch_size, max_src_len]
"""
max_srclen = src_lengths.max()
src_indices = torch.arange(0, max_srclen).unsqueeze(0).to(src_lengths)
src_indices = src_indices.expand(batch_size, max_srclen)
src_lengths = src_lengths.unsqueeze(dim=1).expand(batch_size, max_srclen)
# returns [batch_size, max_seq_len]
return (src_indices < src_lengths).int().detach()
def _multihead_attn_test_helper(add_key_padding_mask=False, add_bias_kv=False, add_zero_attn=False,
saved_kv=False, same_embed_dim=False, byte_mask=False,
average_attn_weights=average_attn_weights):
for _ in range(100):
batch_sz, seq_len = [random.randint(2, 10) for r in range(2)]
d_head = random.randint(3, 10)
nheads = random.randint(3, 10)
d_model = d_head * nheads
if same_embed_dim:
kv_dim = d_model
else:
kv_dim = random.randint(5, 20)
dims = [batch_sz, seq_len, kv_dim]
saved_k = None
saved_k_tensor = None
saved_v = None
saved_v_tensor = None
if saved_kv:
saved_k = np.random.rand(batch_sz * nheads, seq_len, d_head)
saved_k_tensor = torch.from_numpy(saved_k).to(torch.get_default_dtype())
saved_v = np.random.rand(batch_sz * nheads, seq_len, d_head)
saved_v_tensor = torch.from_numpy(saved_v).to(torch.get_default_dtype())
key_padding_mask = None
key_padding_mask_tensor = None
if add_key_padding_mask:
seq_mask = np.random.randint(0, 2, (1, seq_len))
key_padding_mask = (np.repeat(seq_mask, batch_sz, axis=0) == 1)
key_padding_mask_tensor = torch.from_numpy(key_padding_mask)
if byte_mask:
key_padding_mask_tensor = key_padding_mask_tensor.byte()
decoder_state = np.random.rand(batch_sz, d_model)
K = np.random.rand(*dims)
V = K
Q = np.expand_dims(decoder_state, 1)
attn_mask = np.random.randint(0 , 2, size=(1, seq_len))
attn_mask_tensor = torch.from_numpy(attn_mask).float()
if byte_mask:
attn_mask_tensor = (attn_mask_tensor == 0).byte()
else:
attn_mask_tensor.masked_fill_(attn_mask_tensor == 0, float('-inf'))
attn_mask_tensor.masked_fill_(attn_mask_tensor > 0, float('0.0'))
attn_mask_tensor = attn_mask_tensor.double()
decoder_state_tensor = torch.from_numpy(decoder_state).to(torch.get_default_dtype())
source_hid_tensor = torch.from_numpy(K).to(torch.get_default_dtype()).transpose(0, 1)
multihead_attn_module = MultiheadAttention(d_model, nheads,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
kdim=kv_dim, vdim=kv_dim)
if add_bias_kv:
bias_k = multihead_attn_module.bias_k.detach().numpy()
bias_v = multihead_attn_module.bias_v.detach().numpy()
else:
bias_k = None
bias_v = None
_Q = decoder_state_tensor.unsqueeze(1).transpose(0, 1)
_V = source_hid_tensor
_K = source_hid_tensor
if multihead_attn_module._qkv_same_embed_dim:
result, result_weight = torch.nn.functional.multi_head_attention_forward(
_Q, _K, _V,
d_model, nheads,
multihead_attn_module.in_proj_weight, multihead_attn_module.in_proj_bias,
multihead_attn_module.bias_k, multihead_attn_module.bias_v,
multihead_attn_module.add_zero_attn, multihead_attn_module.dropout,
multihead_attn_module.out_proj.weight, multihead_attn_module.out_proj.bias,
multihead_attn_module.training, key_padding_mask_tensor, True, attn_mask_tensor,
static_k=saved_k_tensor, static_v=saved_v_tensor,
average_attn_weights=average_attn_weights)
else:
result, result_weight = torch.nn.functional.multi_head_attention_forward(
_Q, _K, _V,
d_model, nheads,
None, multihead_attn_module.in_proj_bias,
multihead_attn_module.bias_k, multihead_attn_module.bias_v,
multihead_attn_module.add_zero_attn, multihead_attn_module.dropout,
multihead_attn_module.out_proj.weight, multihead_attn_module.out_proj.bias,
multihead_attn_module.training, key_padding_mask_tensor, True, attn_mask_tensor,
True, multihead_attn_module.q_proj_weight,
multihead_attn_module.k_proj_weight, multihead_attn_module.v_proj_weight,
static_k=saved_k_tensor, static_v=saved_v_tensor,
average_attn_weights=average_attn_weights)
result = result.squeeze(0).detach().numpy()
if multihead_attn_module._qkv_same_embed_dim:
q_proj_weight = multihead_attn_module.in_proj_weight[:d_model]
k_proj_weight = multihead_attn_module.in_proj_weight[d_model:(d_model * 2)]
v_proj_weight = multihead_attn_module.in_proj_weight[(d_model * 2):]
else:
q_proj_weight = multihead_attn_module.q_proj_weight
k_proj_weight = multihead_attn_module.k_proj_weight
v_proj_weight = multihead_attn_module.v_proj_weight
Q_fc = _fc(Q, q_proj_weight, multihead_attn_module.in_proj_bias[:d_model])
K_fc = _fc(K, k_proj_weight, multihead_attn_module.in_proj_bias[d_model:(d_model * 2)])
V_fc = _fc(V, v_proj_weight, multihead_attn_module.in_proj_bias[(d_model * 2):])
if add_bias_kv:
K_fc = np.concatenate((K_fc, np.repeat(bias_k, K_fc.shape[0], axis=0)), axis=1)
V_fc = np.concatenate((V_fc, np.repeat(bias_v, V_fc.shape[0], axis=0)), axis=1)
if attn_mask is not None:
attn_mask = np.concatenate((attn_mask, np.ones([1, 1])), axis=1)
if key_padding_mask is not None:
key_padding_mask = np.concatenate((key_padding_mask, np.full((batch_sz, 1), False, dtype=bool)), axis=1)
dims[1] += 1
Q_split = _split_heads_ref(
Q_fc, [batch_sz, 1, d_model], nheads, d_head
)
if saved_k is not None:
K_split = np.reshape(saved_k, [dims[0], nheads, dims[1], d_head])
else:
K_split = _split_heads_ref(K_fc, dims, nheads, d_head)
if saved_v is not None:
V_split = np.reshape(saved_v, [dims[0], nheads, dims[1], d_head])
else:
V_split = _split_heads_ref(V_fc, dims, nheads, d_head)
if add_zero_attn:
dims[1] += 1
K_split = np.concatenate((K_split, np.zeros([K_split.shape[0], K_split.shape[1], 1, K_split.shape[3]])), axis=2)
V_split = np.concatenate((V_split, np.zeros([V_split.shape[0], V_split.shape[1], 1, V_split.shape[3]])), axis=2)
if attn_mask is not None:
attn_mask = np.concatenate((attn_mask, np.ones([1, 1])), axis=1)
if key_padding_mask is not None:
key_padding_mask = np.concatenate((key_padding_mask, np.full((batch_sz, 1), False, dtype=bool)), axis=1)
attn_heads, ref_attn_weight = _scaled_dot_attn_ref(
Q=Q_split,
K=K_split,
V=V_split,
dims=Q_split.shape,
unseen_mask=attn_mask,
key_padding_mask=key_padding_mask
)
combined_attn_heads = _combine_heads_ref(
X=attn_heads, dims=[batch_sz, 1], nheads=nheads, d_head=d_head
)
reference = _fc(combined_attn_heads, multihead_attn_module.out_proj.weight, multihead_attn_module.out_proj.bias)
reference = np.squeeze(reference, axis=1)
# result = reference
self.assertEqual(tuple(result.shape), (batch_sz, d_model))
np.testing.assert_allclose(result, reference, atol=1e-5)
# result_weight = ref_attn_weight
result_weight = result_weight.detach().numpy()
self.assertEqual(tuple(result_weight.shape), tuple(ref_attn_weight.shape))
np.testing.assert_allclose(result_weight, ref_attn_weight, atol=1e-5)
def test_multihead_attn_add_bias_kv():
_multihead_attn_test_helper(add_bias_kv=True)
def test_multihead_attn_add_zero_attn():
_multihead_attn_test_helper(add_zero_attn=True)
def test_multihead_attn_no_masking():
_multihead_attn_test_helper()
def test_multihead_attn_key_padding_mask():
_multihead_attn_test_helper(add_key_padding_mask=True)
def test_multihead_attn_saved_kv():
_multihead_attn_test_helper(saved_kv=True)
def test_multihead_attn_add_bias_kv_zero_attn():
_multihead_attn_test_helper(add_key_padding_mask=True, add_bias_kv=True,
add_zero_attn=True)
def test_multihead_attn_all_arguments1():
_multihead_attn_test_helper(add_key_padding_mask=True, add_zero_attn=True, saved_kv=True)
def test_multihead_attn_all_arguments2():
_multihead_attn_test_helper(add_key_padding_mask=True, add_bias_kv=True,
add_zero_attn=True, saved_kv=True)
def test_multihead_attn_all_arguments3():
_multihead_attn_test_helper(add_key_padding_mask=True, add_zero_attn=True,
saved_kv=True, same_embed_dim=True)
def test_multihead_attn_all_arguments4():
_multihead_attn_test_helper(add_key_padding_mask=True, add_zero_attn=True,
saved_kv=True, same_embed_dim=True, byte_mask=True)
test_multihead_attn_add_zero_attn() # Test MultiheadAttention with add_zero_attn
test_multihead_attn_add_bias_kv() # Test MultiheadAttention with add_bias_kv
test_multihead_attn_no_masking() # Test MultiheadAttention without masking
test_multihead_attn_key_padding_mask() # Test MultiheadAttention with src lengths
test_multihead_attn_saved_kv() # Test MultiheadAttention with static kv.
test_multihead_attn_add_bias_kv_zero_attn() # Test MultiheadAttention with bias_kv and zero_attn.
test_multihead_attn_all_arguments1() # Test MultiheadAttention with all the argument.
with self.assertRaisesRegex(AssertionError, "bias cannot be added to static key."):
test_multihead_attn_all_arguments2() # Test MultiheadAttention with all the argument.
test_multihead_attn_all_arguments3() # Test MultiheadAttention with all the argument.
test_multihead_attn_all_arguments4() # Test MultiheadAttention with all the argument.
def test_multihead_attn_3d_attn_mask(self):
embed_dim = 8
num_heads = 4
batch_size = 8
src_len = 3
tgt_len = 2
query = torch.rand(batch_size, tgt_len, embed_dim) # [N, T, D]
key = torch.rand(batch_size, src_len, embed_dim) # [N, S, D]
value = key # [N, S, D]
attn_mask = torch.randint(0, 2, (batch_size, tgt_len, src_len)).float() # [N, T, S]
attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, float(0.0))
mta_model = torch.nn.MultiheadAttention(embed_dim, num_heads)
# Generate 3D results
attn_mask_3d = torch.repeat_interleave(attn_mask, num_heads, dim=0) # [N * H, T, S]
output_3d = mta_model(query.transpose(0, 1), key.transpose(0, 1), value.transpose(0, 1), attn_mask=attn_mask_3d)[0]
output_3d = output_3d.transpose(0, 1) # [N, T, D]
for i in range(0, batch_size):
output_2d = mta_model(query[i].unsqueeze(0).transpose(0, 1),
key[i].unsqueeze(0).transpose(0, 1),
value[i].unsqueeze(0).transpose(0, 1),
attn_mask=attn_mask[i])[0]
# output_2d in shape of [T, 1, D]
self.assertEqual(output_3d[i].unsqueeze(0).transpose(0, 1), output_2d)
def test_multihead_attn_no_bias(self):
embed_dim = 8
num_heads = 4
mha = torch.nn.MultiheadAttention(embed_dim, num_heads, bias=False)
# Verify that bias=False applies to both in and out projection layers.
self.assertIsNone(mha.in_proj_bias)
self.assertIsNone(mha.out_proj.bias)
def _test_multihead_attn_invalid_shape_impl(self, mha):
# Batched (3D) query cases
query = torch.randn(3, 3, 3)
key = torch.randn(3, 3, 3)
value = torch.randn(3, 3, 3)
msg = "expected `key` and `value` to be 3-D but found 2-D and 3-D tensors respectively"
# 3D query, 2D key and 3D value
with self.assertRaisesRegex(AssertionError, msg):
mha(query, torch.randn(3, 3), value)
msg = "expected `key` and `value` to be 3-D but found 3-D and 2-D tensors respectively"
# 3D query, 3D key and 2D value
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, torch.randn(3, 3))
msg = "expected `key_padding_mask` to be `None` or 2-D but found 1-D tensor instead"
# 3D query, 3D key, 3D value and 1D key_padding_mask
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, key_padding_mask=torch.tensor([False, True, True], dtype=torch.bool))
msg = "expected `attn_mask` to be `None`, 2-D or 3-D but found 1-D tensor instead"
# 3D query, 3D key, 3D value and 1D attn_mask
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, attn_mask=torch.tensor([False, True, True], dtype=torch.bool))
# Unbatched (2D) query cases
query = torch.randn(3, 3)
key = torch.randn(3, 3)
value = torch.randn(3, 3)
msg = "expected `key` and `value` to be 2-D but found 3-D and 2-D tensors respectively"
# 2D query, 3D key and 2D value
with self.assertRaisesRegex(AssertionError, msg):
mha(query, torch.randn(3, 3, 3), value)
msg = "expected `key` and `value` to be 2-D but found 2-D and 3-D tensors respectively"
# 2D query, 3D key and 2D value
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, torch.randn(3, 3, 3))
msg = "expected `key_padding_mask` to be `None` or 1-D but found 2-D tensor instead"
# 2D query, 2D key, 2D value and 1D key_padding_mask
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, key_padding_mask=torch.tensor([[False, True, True] * 2], dtype=torch.bool))
msg = "expected `attn_mask` to be `None`, 2-D or 3-D but found 1-D tensor instead"
# 2D query, 2D key, 2D value and 1D attn_mask
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, attn_mask=torch.tensor([False, True, True], dtype=torch.bool))
msg = r"Expected `attn_mask` shape to be \(3, 3, 3\)"
# 2D query, 2D key, 2D value and 3D incorrect attn_mask
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, attn_mask=torch.randn(4, 3, 3).bernoulli_().to(torch.bool))
def test_multihead_attn_invalid_shape(self):
mha = torch.nn.MultiheadAttention(3, 3)
self._test_multihead_attn_invalid_shape_impl(mha)
# Give the test a chance to hit the fast path. (Right now, it
# won't, but gating may be less restricted in the future.)
with torch.no_grad():
self._test_multihead_attn_invalid_shape_impl(mha.eval())
@torch.no_grad()
def test_multihead_attn_fast_path_invalid_shape(self):
mha = torch.nn.MultiheadAttention(3, 3, batch_first=True).eval()
# Batched (3D) query cases
query = torch.randn(3, 3, 3)
key = torch.randn(3, 3, 3)
value = torch.randn(3, 3, 3)
# Currently, this case will just go to the slow path and get
# the usual message because it fails the requirement to be
# batched.
msg = "expected `key` and `value` to be 3-D but found 2-D and 3-D tensors respectively"
# 3D query, 2D key and 3D value
with self.assertRaisesRegex(AssertionError, msg):
mha(query, torch.randn(3, 3), value, need_weights=False)
# Currently, this case will just go to the slow path and get
# the usual message because it fails the requirement to be
# batched.
msg = "expected `key` and `value` to be 3-D but found 3-D and 2-D tensors respectively"
# 3D query, 3D key and 2D value
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, torch.randn(3, 3), need_weights=False)
msg = "expected `key_padding_mask` to be `None` or 2-D but found 1-D tensor instead"
# 3D query, 3D key, 3D value and 1D key_padding_mask
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, key_padding_mask=torch.tensor([False, True, True], dtype=torch.bool), need_weights=False)
msg = "expected `attn_mask` to be `None`, 2-D or 3-D but found 1-D tensor instead"
# 3D query, 3D key, 3D value and 1D attn_mask
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, attn_mask=torch.tensor([False, True, True], dtype=torch.bool), need_weights=False)
# Unbatched (2D) query cases
# NOTE: error messages are the same as regular path because the fast path doesn't support 2D.
query = torch.randn(3, 3)
key = torch.randn(3, 3)
value = torch.randn(3, 3)
msg = "expected `key` and `value` to be 2-D but found 3-D and 2-D tensors respectively"
# 2D query, 3D key and 2D value
with self.assertRaisesRegex(AssertionError, msg):
mha(query, torch.randn(3, 3, 3), value)
msg = "expected `key` and `value` to be 2-D but found 2-D and 3-D tensors respectively"
# 2D query, 3D key and 2D value
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, torch.randn(3, 3, 3))
msg = "expected `key_padding_mask` to be `None` or 1-D but found 2-D tensor instead"
# 2D query, 2D key, 2D value and 1D key_padding_mask
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, key_padding_mask=torch.tensor([[False, True, True] * 2], dtype=torch.bool))
msg = "expected `attn_mask` to be `None`, 2-D or 3-D but found 1-D tensor instead"
# 2D query, 2D key, 2D value and 1D attn_mask
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, attn_mask=torch.tensor([False, True, True], dtype=torch.bool))
msg = r"Expected `attn_mask` shape to be \(3, 3, 3\)"
# 2D query, 2D key, 2D value and 3D incorrect attn_mask
with self.assertRaisesRegex(AssertionError, msg):
mha(query, key, value, attn_mask=torch.randn(4, 3, 3).bernoulli_().to(torch.bool))
def test_multihead_attn_nested_tensor_outside_fast_path(self):
mha = torch.nn.MultiheadAttention(3, 3, batch_first=True).eval()
nt = torch.nested_tensor([torch.randn(3, 3)])
# One tested platform (linux-bionic-py3.7-clang) has a torch_function for one
# or more of these. Take advantage of that to test the torch_function bailout.
has_torch_func = torch.overrides.has_torch_function(
(nt, mha.in_proj_weight, mha.in_proj_bias, mha.out_proj.weight, mha.out_proj.bias))
if has_torch_func:
msg = "MultiheadAttention does not support NestedTensor.*argument has_torch_function"
else:
msg = ("MultiheadAttention does not support NestedTensor outside of its fast path.*grad is " +
"enabled and.*or biases requires_grad")
with self.assertRaisesRegex(AssertionError, msg):
mha(nt, nt, nt)
if has_torch_func:
# Just give up, they're all going to fail with the same message.
return
with torch.no_grad():
mha(nt, nt, nt)
with torch.inference_mode():
mha(nt, nt, nt)
nt = torch.nested_tensor([torch.randn(3, 3, requires_grad=False)])
nt.requires_grad = False
with self.assertRaisesRegex(AssertionError, msg):
mha(nt, nt, nt)
mha.in_proj_weight.requires_grad = False
mha.in_proj_bias.requires_grad = False
mha.out_proj.weight.requires_grad = False
mha.out_proj.bias.requires_grad = False
mha(nt, nt, nt)
def test_normalize(self):
inputs = torch.randn(1, 3, 4, 4, requires_grad=True)
self.assertTrue(gradcheck(lambda x: F.normalize(x, p=1, dim=-1), (inputs,)))
self.assertTrue(gradcheck(lambda x: F.normalize(x, p=2, dim=-2), (inputs,)))
inputs = torch.randn((), requires_grad=True)
self.assertTrue(gradcheck(lambda x: F.normalize(x, p=1, dim=-1), (inputs,)))
def test_adaptive_pooling_input_size(self):
for numel in (2, 3):
for pool_type in ('Max', 'Avg'):
cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)
module_cls = getattr(nn, cls_name)
output_size = (2,) * numel
module = module_cls(output_size)
input = torch.randn(output_size)
self.assertRaises(ValueError, lambda: module(input))
def test_adaptive_pooling_size_none(self):
for numel in (2, 3):
for pool_type in ('Max', 'Avg'):
cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)
module_cls = getattr(nn, cls_name)
output_size = (2,) * (numel - 1) + (None,)
module = module_cls(output_size)
input = torch.randn((4,) * (numel + 1))
output = module(input)
self.assertEqual(output.size(), (4,) + (2,) * (numel - 1) + (4,))
@unittest.skipIf(TEST_WITH_UBSAN, "signed integer overflow error with UBSAN")
def test_adaptive_pooling_size_overflow(self):
# 0x0x3fffffffffffffff * 2 * 2 = 0xfffffffffffffffc = -4 as int64_t
# Tensor::numel() return int64_t, so following check that negative allocs are correctly handled
self.assertRaises(
RuntimeError,
lambda: torch.nn.AdaptiveMaxPool1d(0x3fffffffffffffff)(torch.empty([2, 2, 2])))
def test_adaptive_pooling_avg_nhwc(self):
device_list = ['cpu']
if TEST_CUDA:
device_list.append('cuda')
for device in device_list:
input = torch.randint(1, 10, (4, 8, 8, 8), dtype=torch.float32).to(device)
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randint(1, 10, (4, 8, 7, 7), dtype=torch.float32).to(device)
pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)
out = pool(input)
out.backward(grad)
ref_out = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
def test_adaptive_pooling_avg_nhwc_non_contiguous(self):
device_list = ['cpu']
if TEST_CUDA:
device_list.append('cuda')
for device in device_list:
input = torch.randint(1, 10, (4, 8, 8, 8), dtype=torch.float32).to(device)
input = input.contiguous(memory_format=torch.channels_last)
input = input[:, ::2, :, :].requires_grad_()
grad = torch.randint(1, 10, (4, 8, 7, 7), dtype=torch.float32).to(device)
grad = grad[:, ::2, :, :]
pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)
out = pool(input)
out.backward(grad)
ref_out = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
def test_adaptive_pooling_bfloat16(self):
def _test_adaptive_pooling_bfloat16(self, device, mod, memory_format):
input = torch.randint(1, 10, (3, 19, 8, 8), dtype=torch.float32)
input = input.to(device).to(memory_format=memory_format).requires_grad_()
pool = mod((7, 7)).to(device)
input2 = input.detach().clone().bfloat16().requires_grad_(True)
out = pool(input)
out.sum().backward()
out2 = pool(input2)
out2.sum().backward()
self.assertTrue(out2.is_contiguous(memory_format=memory_format))
self.assertEqual(out2.dtype, torch.bfloat16)
self.assertEqual(input2.grad.dtype, torch.bfloat16)
self.assertEqual(out, out2.float(), atol=0.1, rtol=0)
self.assertEqual(input.grad, input2.grad.float(), atol=0.1, rtol=0)
device_list = ['cpu']
for device in device_list:
_test_adaptive_pooling_bfloat16(self, device, torch.nn.AdaptiveAvgPool2d, torch.contiguous_format)
_test_adaptive_pooling_bfloat16(self, device, torch.nn.AdaptiveAvgPool2d, torch.channels_last)
_test_adaptive_pooling_bfloat16(self, device, torch.nn.AdaptiveMaxPool2d, torch.contiguous_format)
_test_adaptive_pooling_bfloat16(self, device, torch.nn.AdaptiveMaxPool2d, torch.channels_last)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@largeTensorTest('12GB', device='cuda')
def test_adaptive_pooling_avg_nhwc_launch_config_backward(self):
input = torch.randint(1, 10, (1, 32, 2 ** 17 + 1, 32), dtype=torch.float32, device="cuda")
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randint(1, 10, (1, 32, 10, 32), dtype=torch.float32, device="cuda")
pool = torch.nn.AdaptiveAvgPool2d((10, 32)).cuda()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AdaptiveAvgPool2d((10, 32)).cuda()
out = pool(input)
out.backward(grad)
ref_out = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@largeTensorTest('12GB', device='cuda')
def test_adaptive_pooling_avg_nhwc_launch_config_forward(self):
input = torch.randint(1, 10, (1, 32, 16, 16), dtype=torch.float32, device="cuda")
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
pool = torch.nn.AdaptiveAvgPool2d((2 ** 17 + 1, 32)).cuda()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_pool = torch.nn.AdaptiveAvgPool2d((2 ** 17 + 1, 32)).cuda()
out = pool(input)
ref_out = ref_pool(ref_input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
def test_broadcast_double_backwards_gpu(self):
tensors = (torch.randn(4, 4, device='cuda', requires_grad=True),
torch.randn(4, 4, device='cuda', requires_grad=True),
torch.randn(4, 4, device='cuda', requires_grad=True))
# TODO(#50743): the following segfaults with check_batched_grad=True
_assertGradAndGradgradChecks(self, lambda *i: Broadcast.apply((0, 1), *i), tensors,
check_batched_grad=False)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_broadcast_not_requiring_grad(self):
variables = [
torch.randn(1, 2, device='cuda', requires_grad=True),
torch.randn(1, 2, device='cuda', requires_grad=False),
torch.randn(1, 2, device='cuda', requires_grad=False),
torch.randn(1, 2, device='cuda', requires_grad=True),
torch.randn(1, 2, device='cuda', requires_grad=True),
]
broadcasted_variables = Broadcast.apply((0, 1), *variables)
for output_idx, broadcasted_var in enumerate(broadcasted_variables):
input_var = variables[output_idx % len(variables)]
self.assertEqual(input_var.requires_grad, broadcasted_var.requires_grad)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_broadcast_no_grad(self):
x = torch.randn(1, 2, dtype=torch.float32, requires_grad=True, device='cuda')
with torch.no_grad():
broadcasted = Broadcast.apply((0, 1), x)
self.assertTrue(x.requires_grad)
for output in broadcasted:
self.assertFalse(output.requires_grad)
def test_state_dict(self):
l = nn.Linear(5, 5)
block = nn.Module()
block.conv = nn.Conv2d(3, 3, 3, bias=False)
net = nn.Module()
net.linear1 = l
net.linear2 = l
net.bn = nn.BatchNorm2d(2)
net.block = block
net.add_module('empty', None)
state_dict = net.state_dict()
self.assertEqual(len(state_dict), 10)
self.assertEqual(len(state_dict._metadata), 6)
self.assertIn('', state_dict._metadata)
self.assertIn('linear1', state_dict._metadata)
self.assertIn('linear1.weight', state_dict)
self.assertIn('linear1.bias', state_dict)
self.assertIn('linear2', state_dict._metadata)
self.assertIn('linear2.weight', state_dict)
self.assertIn('linear2.bias', state_dict)
self.assertIn('block', state_dict._metadata)
self.assertIn('block.conv', state_dict._metadata)
self.assertIn('block.conv.weight', state_dict)
self.assertIn('block.conv.weight', state_dict)
self.assertNotIn('block.conv.bias', state_dict)
self.assertIn('bn', state_dict._metadata)
self.assertIn('bn.weight', state_dict)
self.assertIn('bn.bias', state_dict)
self.assertIn('bn.running_var', state_dict)
self.assertIn('bn.running_mean', state_dict)
self.assertIn('bn.num_batches_tracked', state_dict)
self.assertFalse(any(k.startswith('empty') for k in state_dict.keys()))
for k, v in state_dict.items():
param = net
for component in k.split('.'):
param = getattr(param, component)
if isinstance(param, Parameter):
param = param.data
self.assertEqual(v.data_ptr(), param.data_ptr())
l = nn.Linear(5, 5)
state_dict = l.state_dict()
self.assertEqual(len(state_dict), 2)
self.assertEqual(len(state_dict._metadata), 1)
self.assertIn('', state_dict._metadata)
self.assertTrue(state_dict._metadata['']['version'] >= 0)
self.assertEqual(state_dict['weight'].data_ptr(), l.weight.data_ptr())
self.assertEqual(state_dict['bias'].data_ptr(), l.bias.data_ptr())
# Reference https://github.com/pytorch/pytorch/pull/75507#issuecomment-1110291545
self.assertNotWarn(lambda: l.state_dict(destination=dict()), "Should not warn kwarg destination w/o _metadata")
def test_load_state_dict(self):
l = nn.Linear(5, 5)
block = nn.Module()
block.conv1 = nn.Conv2d(3, 3, 3, bias=True)
block.conv2 = nn.Conv2d(3, 3, 3, bias=False)
net = nn.Module()
net.linear1 = l
net.linear2 = l
net.bn = nn.BatchNorm2d(2)
net.block = block
net.add_module('empty', None)
conv1_bias_dtype = block.conv1.bias.dtype
state_dict = net.state_dict()
state_dict.update({
'linear1.weight': torch.ones(5, 5),
'block.conv1.bias': torch.arange(1, 4, dtype=conv1_bias_dtype),
'bn.running_mean': torch.randn(2),
})
# Also test if a DDP state_dict can be loaded from a local model.
ddp_state_dict = net.state_dict()
ddp_state_dict.update({
'module.linear1.weight': torch.ones(5, 5),
'module.block.conv1.bias': torch.arange(1, 4, dtype=conv1_bias_dtype),
'module.bn.running_mean': torch.randn(2),
})
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(ddp_state_dict, 'module.')
for sd in [state_dict, ddp_state_dict]:
incompatible_keys = net.load_state_dict(sd)
self.assertEqual(len(incompatible_keys.missing_keys), 0)
self.assertEqual(len(incompatible_keys.unexpected_keys), 0)
self.assertNotIn('Incompatible', str(incompatible_keys))
self.assertEqual(net.linear1.weight, sd['linear1.weight'])
self.assertEqual(net.block.conv1.bias, sd['block.conv1.bias'])
self.assertEqual(net.bn.running_mean, sd['bn.running_mean'])
state_dict = net.state_dict()
state_dict.update({'extra': torch.ones(5)})
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
incompatible_keys = net.load_state_dict(state_dict, strict=False)
self.assertEqual(len(incompatible_keys.missing_keys), 0)
self.assertEqual(len(incompatible_keys.unexpected_keys), 1)
self.assertIn('extra', incompatible_keys.unexpected_keys)
self.assertIn('Incompatible', str(incompatible_keys))
state_dict = net.state_dict()
state_dict.update({'extra.param': torch.ones(5)})
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
incompatible_keys = net.load_state_dict(state_dict, strict=False)
self.assertEqual(len(incompatible_keys.missing_keys), 0)
self.assertEqual(len(incompatible_keys.unexpected_keys), 1)
self.assertIn('extra.param', incompatible_keys.unexpected_keys)
state_dict = net.state_dict()
del state_dict['linear1.weight']
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
incompatible_keys = net.load_state_dict(state_dict, strict=False)
self.assertEqual(len(incompatible_keys.missing_keys), 1)
self.assertEqual(len(incompatible_keys.unexpected_keys), 0)
self.assertIn('linear1.weight', incompatible_keys.missing_keys)
state_dict.update({'extra.param': torch.ones(5)})
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
incompatible_keys = net.load_state_dict(state_dict, strict=False)
self.assertEqual(len(incompatible_keys.missing_keys), 1)
self.assertEqual(len(incompatible_keys.unexpected_keys), 1)
self.assertIn('linear1.weight', incompatible_keys.missing_keys)
self.assertIn('extra.param', incompatible_keys.unexpected_keys)
state_dict = net.state_dict()
state_dict.update({'bn.running_mean': torch.rand(14, 4)}) # wrong size
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict, strict=False))
state_dict = net.state_dict()
old_state_dict = deepcopy(state_dict)
state_dict = {
'linear1.weight': torch.ones(5, 5),
'block.conv1.bias': torch.arange(1, 4, dtype=conv1_bias_dtype),
'bn.running_mean': torch.randn(2),
'nonexistent_key': torch.rand(3)
}
net.load_state_dict(state_dict, strict=False)
self.assertEqual(net.linear1.weight, state_dict['linear1.weight'])
self.assertEqual(net.block.conv1.bias, state_dict['block.conv1.bias'])
self.assertEqual(net.bn.running_mean, state_dict['bn.running_mean'])
new_state_dict = net.state_dict()
del old_state_dict['linear1.weight']
del old_state_dict['block.conv1.bias']
del old_state_dict['bn.running_mean']
for k, v, in old_state_dict.items():
self.assertTrue(v.equal(new_state_dict[k]))
def test_load_state_dict_BC(self):
# BatchNormNd
# Added num_batches_tracked buffer at version 2. For state dict with
# earlier versions or no versions, it should provide default value of 0.
bn = nn.BatchNorm2d(3)
state_dict = bn.state_dict()
del state_dict['num_batches_tracked']
state_dict._metadata['']['version'] = 1 # version 1
bn.load_state_dict(state_dict)
self.assertEqual(bn.num_batches_tracked.dtype, torch.long)
self.assertEqual(bn.num_batches_tracked.item(), 0)
del state_dict._metadata['']['version'] # no version
bn.load_state_dict(state_dict)
self.assertEqual(bn.num_batches_tracked.dtype, torch.long)
self.assertEqual(bn.num_batches_tracked.item(), 0)
@skipIfTorchDynamo("TorchDynamo fails here for unknown reasons")
def test_load_state_dict_ref_cycle(self):
# load_state_dict shouldn't cause a reference cycle involving Tensors
import gc
m = torch.nn.LSTM(16, 16, bidirectional=True)
gc.collect()
m.load_state_dict(deepcopy(m).state_dict())
refcycles = gc.collect()
self.assertEqual(refcycles, 0)
def test_load_state_dict_custom(self):
class CustomState(nn.Module):
def __init__(self):
super(CustomState, self).__init__()
self.param = torch.nn.Parameter(torch.ones(1))
self.sub = torch.nn.Linear(5, 5)
def _save_to_state_dict(self, destination, prefix, keep_vars):
destination[prefix + "serialized"] = self.param.data + 1
def _load_from_state_dict(self, state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys,
error_msgs):
# skip some of the error handling
self.param.data.copy_(state_dict[prefix + "serialized"] - 1)
# use sequential to verify nesting
m = nn.Sequential(CustomState())
with torch.no_grad():
m[0].param[0] = 10
m[0].sub.weight[0, 0] = 555
state_dict = m.state_dict()
self.assertEqual(state_dict["0.serialized"].item(), 11)
self.assertIn("0.sub.weight", state_dict)
self.assertNotIn("0.param", state_dict)
del m
mm = nn.Sequential(CustomState())
self.assertEqual(mm[0].param[0].item(), 1)
mm.load_state_dict(state_dict)
self.assertEqual(mm[0].param[0].item(), 10)
self.assertEqual(mm[0].sub.weight[0, 0].item(), 555)
def test_extra_state(self):
class SubModule(torch.nn.Module):
def __init__(self, foo):
super().__init__()
self.foo = foo
def get_extra_state(self):
return {
'foo': self.foo
}
def set_extra_state(self, state):
self.foo = state['foo']
class MyModule(torch.nn.Module):
def __init__(self, foo, bar):
super().__init__()
self.sub = SubModule(foo)
self.bar = bar
def get_extra_state(self):
return {
'bar': self.bar
}
def set_extra_state(self, state):
self.bar = state['bar']
# Ensure state_dict contains the extra state by loading it into another module.
m = MyModule(3, 'something')
m2 = MyModule(5, 'something else')
m2.load_state_dict(m.state_dict())
self.assertEqual(m.state_dict(), m2.state_dict())
self.assertEqual(m2.bar, m.bar)
self.assertEqual(m2.sub.foo, m.sub.foo)
def test_extra_state_non_dict(self):
class MyModule(torch.nn.Module):
def __init__(self, foo):
super().__init__()
self.foo = foo
def get_extra_state(self):
return self.foo
def set_extra_state(self, state):
self.foo = state
# Test various types of extra state.
for state in ('something', 5, MyModule(3)):
m = MyModule(state)
m2 = MyModule('something else')
m2.load_state_dict(m.state_dict())
self.assertEqual(m.state_dict(), m2.state_dict())
self.assertEqual(m.foo, m2.foo)
def test_extra_state_missing_set_extra_state(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
def get_extra_state(self):
return {
'foo': 5
}
m = MyModule()
with self.assertRaisesRegex(RuntimeError, 'Unexpected key'):
m.load_state_dict(m.state_dict())
def test_extra_state_missing_get_extra_state(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
def set_extra_state(self):
pass
m = MyModule()
with self.assertRaisesRegex(RuntimeError, 'Missing key'):
m.load_state_dict(m.state_dict())
@skipIfTorchDynamo("TorchDynamo fails here for unknown reasons")
def test_parameter_assignment(self):
l = nn.Linear(5, 5)
def num_params():
return len(list(l.parameters()))
self.assertEqual(num_params(), 2)
new_param = Parameter(torch.randn(5, 5))
l.param_name = new_param
self.assertEqual(num_params(), 3)
self.assertObjectIn(new_param, l.parameters())
var = torch.randn(5, 5)
l.var_name = var
self.assertEqual(num_params(), 3)
self.assertNotIn(id(var), map(id, l.parameters()))
# Make sure Variables are not saved as parameters
l.variable_attr = torch.empty(5, 5)
self.assertEqual(num_params(), 3)
l.param_attr = Parameter(torch.empty(5, 5))
self.assertEqual(num_params(), 4)
# It shouldn't be possible to replace a parameter with a Variable
def assign_var():
l.param_attr = torch.empty(5, 5)
self.assertRaises(TypeError, assign_var)
# But replacing it with None should be fine
l.param_attr = None
self.assertEqual(num_params(), 3)
def test_assignment(self):
l = nn.Module()
a = nn.Parameter(torch.randn(2))
b = nn.Parameter(torch.randn(3))
c = nn.Parameter(torch.randn(4))
q = nn.Linear(4, 4)
r = nn.Linear(5, 5)
w = nn.Linear(6, 6)
def test_assignments(get_list, a, b, c):
# Check that None can be shadowed
l.a = None
self.assertIsNone(l.a)
self.assertIn('a', l.__dict__)
l.a = a
self.assertIs(l.a, a)
self.assertEqual(get_list(), [a])
self.assertNotIn('a', l.__dict__)
# Assign second object
l.b = None
self.assertIsNone(l.b)
self.assertIn('b', l.__dict__)
l.b = b
self.assertIs(l.b, b)
self.assertEqual(get_list(), [a, b])
self.assertNotIn('b', l.__dict__)
# Remove and add the object back. Order should be unchanged.
l.a = None
self.assertIsNone(l.a)
self.assertEqual(get_list(), [b])
l.a = a
self.assertIs(l.a, a)
self.assertEqual(get_list(), [a, b])
# Replace object with another one. Order should be unchanged.
l.a = c
self.assertIs(l.a, c)
self.assertEqual(get_list(), [c, b])
# Remove and reassign an attribute. It should appear at the end of the list now.
del l.a
self.assertFalse(hasattr(l, 'a'))
l.a = a
self.assertIs(l.a, a)
self.assertEqual(get_list(), [b, a])
test_assignments(lambda: list(l.parameters()), a, b, c)
del l.a, l.b
self.assertEqual(list(l.parameters()), [])
test_assignments(lambda: list(l.children()), q, r, w)
del l.a, l.b
self.assertEqual(list(l.children()), [])
buf = torch.randn(10)
l.register_buffer('buf', buf)
self.assertIs(l.buf, buf)
l.buf = None
self.assertIs(l.buf, None)
self.assertNotIn('buf', l.__dict__) # should be stored in l._buffers
l.buf = buf
self.assertIn('buf', l.state_dict())
self.assertEqual(l.state_dict()['buf'], buf)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_thnn_conv_strided_padded_dilated(self):
for convfn, dims, transposed in (
(torch.nn.functional.conv2d, 2, False),
(torch.nn.functional.conv_transpose2d, 2, True),
(torch.nn.functional.conv3d, 3, False),
(torch.nn.functional.conv_transpose3d, 3, True)):
for stride, padding, dilation in (
(2, 0, 1), (1, 1, 1), (2, 1, 1), (1, 0, 2)):
kwargs = {"stride": stride, "padding": padding, "dilation": dilation}
inp_shape = (1, 2) + dims * (4,)
weight_shape = (2, 2) + dims * (1,)
inputs = torch.randn(inp_shape, dtype=torch.double, device="cuda", requires_grad=True)
weight = torch.randn(weight_shape, dtype=torch.double, device="cuda", requires_grad=True)
bias = torch.randn(2, dtype=torch.double, device="cuda", requires_grad=True)
with torch.backends.cudnn.flags(enabled=False):
res = convfn(inputs, weight, bias, **kwargs)
res_cpu = convfn(inputs.cpu(), weight.cpu(), bias.cpu(), **kwargs)
self.assertEqual(res, res_cpu)
with torch.backends.cudnn.flags(enabled=False):
torch.autograd.gradcheck(
lambda x, w, b: convfn(x, w, b, **kwargs),
(inputs, weight, bias)
)
torch.autograd.gradcheck(
lambda x, w, b: convfn(x, w, b, **kwargs),
(inputs.cpu(), weight.cpu(), bias.cpu())
)
def test_Conv2d_inconsistent_types(self):
inputs = torch.randn(4, 1, 7, 7, dtype=torch.float)
weights = torch.randn(1, 1, 3, 3, dtype=torch.double)
# inconsistent types should raise an exception
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
# but it should work with the same type
nn.functional.conv2d(inputs.float(), weights.float())
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_Conv2d_inconsistent_types_on_GPU_without_cudnn(self):
inputs = torch.randn(4, 1, 7, 7, dtype=torch.float, device="cuda")
weights = torch.randn(1, 1, 3, 3, dtype=torch.double, device="cuda")
bias = torch.randn(1, dtype=torch.double, device="cuda")
with torch.backends.cudnn.flags(enabled=False):
# inconsistent types should raise an exception
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights.float(), bias))
# but it should work with the same type
nn.functional.conv2d(inputs.float(), weights.float(), bias.float())
def test_Conv2d_1x1(self):
in_channels = 2
out_channels = 2
mod = torch.nn.Conv2d(2, 2, 1, bias=False).to(dtype=torch.double)
input = torch.randn(1, in_channels, 5, 5, requires_grad=True, dtype=torch.double)
for enabled in (False, True):
with torch.backends.mkldnn.flags(enabled=enabled):
gradcheck(F.conv2d, (input, mod.weight))
def test_Conv2d_OneDNN(self):
def run_once(group_val=24, dilation=1):
ifm = torch.ones([1, group_val, 6, 6], dtype=torch.float32)
weights = torch.ones([group_val, 1, 3, 3], dtype=torch.float32)
op = torch.nn.Conv2d(
in_channels=group_val,
out_channels=group_val,
kernel_size=[3, 3],
stride=[2, 2],
padding=[1, 1],
dilation=[dilation, dilation],
groups=group_val,
bias=False,
padding_mode='zeros'
)
op.weight.data = weights
res = op(ifm)
grad_in = torch.ones(res.shape, dtype=torch.float32)
res.backward(grad_in)
return op.weight.grad
for gorup_val in (24, 48, 23, 25):
for dilation in (1, 2):
with torch.backends.mkldnn.flags(enabled=False):
without_onednn = run_once(gorup_val, dilation)
with torch.backends.mkldnn.flags(enabled=True):
with_onednn = run_once(gorup_val, dilation)
self.assertEqual(without_onednn, with_onednn)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_cudnn_non_contiguous(self):
x = torch.randn(192, 16, 50).cuda()
x = x.permute(0, 2, 1).contiguous().permute(0, 2, 1)
m = torch.nn.Conv1d(
in_channels=16,
out_channels=32,
kernel_size=2,
bias=True).cuda()
result = m(x)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_Conv2d_inconsistent_types_on_GPU_with_cudnn(self):
inputs = torch.randn(4, 1, 7, 7, dtype=torch.float, device="cuda")
weights = torch.randn(1, 1, 3, 3, dtype=torch.double, device="cuda")
bias = torch.randn(1, dtype=torch.double, device="cuda")
with torch.backends.cudnn.flags(enabled=True):
# inconsistent types should raise an exception
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights.float(), bias))
# but it should work with the same type
nn.functional.conv2d(inputs.float(), weights.float(), bias.float())
def test_Conv2d_missing_argument(self):
c = nn.Conv2d(3, 3, 3)
self.assertRaises(TypeError, lambda: c(None))
def test_Conv2d_backward_twice(self):
input = torch.randn(2, 3, 5, 5)
c = nn.Conv2d(3, 3, 3)
o1 = c(input)
o1.sum().backward()
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: o1.sum().backward())
def test_conv_modules_raise_error_on_incorrect_input_size(self):
for dtype in [torch.bfloat16, torch.double, torch.float]:
modules = [nn.Conv1d(3, 8, 3).to(dtype), nn.ConvTranspose1d(3, 8, 3).to(dtype),
nn.Conv2d(3, 8, 3).to(dtype), nn.ConvTranspose2d(3, 8, 3).to(dtype),
nn.Conv3d(3, 8, 3).to(dtype), nn.ConvTranspose3d(3, 8, 3).to(dtype)]
invalid_input_dims = [(1, 4), (1, 4),
(2, 5), (2, 5),
(3, 6), (3, 6)]
for invalid_dims, module in zip(invalid_input_dims, modules):
for dims in invalid_dims:
input = torch.empty(torch.Size((3, ) * dims))
self.assertRaises(RuntimeError, lambda: module(input))
def test_conv_shapecheck(self):
def test(should_raise, module, input_size, dtype):
input = torch.empty(3, *input_size).to(dtype)
if should_raise:
self.assertRaises(RuntimeError, lambda: module(input))
else:
# just run it to ensure no exception raised.
module(input)
for dtype in [torch.bfloat16, torch.float, torch.double, torch.cfloat, torch.cdouble]:
# Conv1d
test(True, nn.Conv1d(1, 1, 3).to(dtype), (1, 2), dtype)
test(True, nn.Conv1d(1, 1, 3, stride=2).to(dtype), (1, 2), dtype)
test(False, nn.Conv1d(1, 1, 2).to(dtype), (1, 2), dtype)
test(False, nn.Conv1d(1, 1, 2, stride=2).to(dtype), (1, 2), dtype)
test(False, nn.Conv1d(1, 1, 3, stride=2, padding=1).to(dtype), (1, 2), dtype)
# Conv2d
test(True, nn.Conv2d(1, 1, (3, 3)).to(dtype), (1, 2, 2), dtype)
test(False, nn.Conv2d(1, 1, (3, 3)).to(dtype), (1, 3, 3), dtype)
test(False, nn.Conv2d(1, 1, (3, 3), padding=1).to(dtype), (1, 2, 2), dtype)
# Conv3D
test(True, nn.Conv3d(1, 1, (3, 3, 3)).to(dtype), (1, 2, 2, 2), dtype)
test(False, nn.Conv3d(1, 1, (3, 3, 3)).to(dtype), (1, 3, 3, 3), dtype)
test(False, nn.Conv3d(1, 1, (3, 3, 3), padding=1).to(dtype), (1, 2, 2, 2), dtype)
def test_ConvTranspose2d_output_size(self):
m = nn.ConvTranspose2d(3, 4, 3, 3, 0, 2)
i = torch.randn(2, 3, 6, 6)
for h in range(15, 22):
for w in range(15, 22):
if 18 <= h <= 20 and 18 <= w <= 20:
output = m(i, output_size=(h, w))
self.assertEqual(output.size()[2:], (h, w))
else:
self.assertRaises(ValueError, lambda: m(i, (h, w)))
def test_ConvTranspose2d_output_size_downsample_upsample(self):
b, c, hid_c = 2, 3, 2
for h in range(13, 24):
for w in range(13, 17):
for k in range(2, 5):
for d in range(1, 5):
for s in range(1, 4):
for p in range(3):
conv = nn.Conv2d(
in_channels=c,
out_channels=hid_c,
kernel_size=k,
stride=s,
padding=p,
dilation=d,
)
t_conv = nn.ConvTranspose2d(
in_channels=hid_c,
out_channels=c,
kernel_size=k,
stride=s,
padding=p,
dilation=d,
)
i = torch.randn(b, c, h, w)
out = t_conv(conv(i), output_size=i.shape)
self.assertEqual(out.size()[2:], i.size()[2:])
def test_ConvTranspose3d_correct_output_size(self):
# Check that ConvTranspose3d can take a 5d output_size.
m = nn.ConvTranspose3d(2, 2, 2)
i = torch.rand(1, 2, 1, 1, 1)
out = m(i, output_size=(1, 2, 2, 2, 2))
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_ConvTranspose2d_half_cublas_gemm(self):
with torch.backends.cudnn.flags(enabled=False):
inputs = torch.randn(1, 1, 16, 16, device='cuda', dtype=torch.half)
deconv = nn.ConvTranspose2d(
1, 1, 3, stride=2, padding=1, output_padding=1).cuda().half()
output = deconv(inputs)
output.mean().backward()
# For https://github.com/pytorch/pytorch/pull/1273
# Almost identical to the above `test_Conv2d_naive_groups`
@torch.backends.cudnn.flags(enabled=True, benchmark=False)
def test_Conv2d_groups_nobias(self):
dev_dtypes = [("cpu", torch.float)]
if TEST_CUDA:
dev_dtypes += [("cuda", torch.float), ("cuda", torch.half)]
if AMPERE_OR_ROCM:
dev_dtypes += [("cuda", torch.bfloat16)]
for device, dtype in dev_dtypes:
m = nn.Conv2d(4, 4, kernel_size=3, groups=2, bias=False).to(device, dtype)
i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 4, 4, 4, device=device, dtype=dtype)
output.backward(grad_output)
m1 = nn.Conv2d(2, 2, kernel_size=3, bias=False).to(device, dtype)
m1.weight.data.copy_(m.weight.data[:2])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :2].contiguous())
m2 = nn.Conv2d(2, 2, kernel_size=3, bias=False).to(device, dtype)
m2.weight.data.copy_(m.weight.data[2:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 2:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=1e-1 if dtype == torch.half else dtype2prec_DONTUSE[dtype], rtol=0)
# Almost identical to the above `test_Conv2d_naive_groups`
# Covering special case when group > 1, input-channel / group < 16 and output-channel is multiple of 16
# See also https://github.com/pytorch/pytorch/pull/18463#issuecomment-476563686
# and https://github.com/pytorch/pytorch/pull/18463#issuecomment-477001024
@torch.backends.cudnn.flags(enabled=True, benchmark=False)
def test_Conv2d_groups_nobias_v2(self):
torch.manual_seed(123)
dev_dtypes = [("cpu", torch.float)]
if TEST_CUDA:
dev_dtypes += [("cuda", torch.float), ("cuda", torch.half)]
if AMPERE_OR_ROCM:
dev_dtypes += [("cuda", torch.bfloat16)]
for device, dtype in dev_dtypes:
m = nn.Conv2d(4, 16, kernel_size=3, groups=2, bias=False).to(device, dtype)
i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 16, 4, 4, device=device, dtype=dtype)
output.backward(grad_output)
m1 = nn.Conv2d(2, 8, kernel_size=3, bias=False).to(device, dtype)
m1.weight.data.copy_(m.weight.data[:8])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :8].contiguous())
m2 = nn.Conv2d(2, 8, kernel_size=3, bias=False).to(device, dtype)
m2.weight.data.copy_(m.weight.data[8:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 8:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=1e-1 if dtype == torch.half else dtype2prec_DONTUSE[dtype], rtol=0)
# CPU-only test for group conv3d fast implementation using bmm
# See: https://github.com/pytorch/pytorch/pull/36355
def test_Conv3d_groups_nobias(self):
torch.manual_seed(123)
m = nn.Conv3d(4, 16, kernel_size=3, groups=2, bias=False).to("cpu", torch.float)
i = torch.randn(2, 4, 6, 6, 6, device="cpu", dtype=torch.float, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 16, 4, 4, 4, device="cpu", dtype=torch.float)
output.backward(grad_output)
m1 = nn.Conv3d(2, 8, kernel_size=3, bias=False).to("cpu", torch.float)
m1.weight.data.copy_(m.weight.data[:8])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :8].contiguous())
m2 = nn.Conv3d(2, 8, kernel_size=3, bias=False).to("cpu", torch.float)
m2.weight.data.copy_(m.weight.data[8:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 8:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[torch.float], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[torch.float], rtol=dtype2prec_DONTUSE[torch.float])
def test_Conv3d_groups_wbias(self):
torch.manual_seed(123)
m = nn.Conv3d(4, 16, kernel_size=3, groups=2, bias=True).to("cpu", torch.float)
i = torch.randn(2, 4, 6, 6, 6, device="cpu", dtype=torch.float, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 16, 4, 4, 4, device="cpu", dtype=torch.float)
output.backward(grad_output)
m1 = nn.Conv3d(2, 8, kernel_size=3, bias=True).to("cpu", torch.float)
m1.weight.data.copy_(m.weight.data[:8])
m1.bias.data.copy_(m.bias.data[:8])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :8].contiguous())
m2 = nn.Conv3d(2, 8, kernel_size=3, bias=True).to("cpu", torch.float)
m2.weight.data.copy_(m.weight.data[8:])
m2.bias.data.copy_(m.bias.data[8:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 8:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[torch.float],
rtol=dtype2prec_DONTUSE[torch.float])
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[torch.float],
rtol=dtype2prec_DONTUSE[torch.float])
self.assertEqual(m.bias.grad.data,
torch.cat([m1.bias.grad.data, m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[torch.float], rtol=dtype2prec_DONTUSE[torch.float])
def test_MaxUnpool2d_output_size(self):
m = nn.MaxPool2d(3, stride=2, return_indices=True)
mu = nn.MaxUnpool2d(3, stride=2)
big_t = torch.rand(1, 1, 6, 6)
big_t[0][0][4][4] = 100
output_big, indices_big = m(big_t)
self.assertRaises(RuntimeError, lambda: mu(output_big, indices_big))
small_t = torch.rand(1, 1, 5, 5)
for i in range(0, 4, 2):
for j in range(0, 4, 2):
small_t[:, :, i, j] = 100
output_small, indices_small = m(small_t)
for h in range(3, 10):
for w in range(3, 10):
if 4 <= h <= 6 and 4 <= w <= 6:
size = (h, w)
if h == 6:
size = (1, 1) + size
mu(output_small, indices_small, output_size=size)
else:
self.assertRaises(ValueError, lambda: mu(output_small, indices_small, (h, w)))
def test_max_unpool2d_nhwc_cpu(self):
input = torch.randn(2, 10, 9, 9).float().cpu()
input = input.contiguous(memory_format=torch.channels_last)
ref_input = input.clone().contiguous()
pool = nn.MaxPool2d(3, stride=2, return_indices=True).cpu()
ref_pool = nn.MaxPool2d(3, stride=2, return_indices=True).cpu()
out, ind = pool(input)
ref_out, ref_ind = ref_pool(ref_input)
out.requires_grad_()
ref_out.requires_grad_()
unpool = nn.MaxUnpool2d(3, stride=2).cpu()
ref_unpool = nn.MaxUnpool2d(3, stride=2).cpu()
upout = unpool(out, ind)
ref_upout = ref_unpool(ref_out, ref_ind)
grad = torch.randn(upout.size()).float().cpu()
grad = grad.contiguous(memory_format=torch.channels_last)
ref_grad = grad.clone().contiguous()
upout.backward(grad)
ref_upout.backward(ref_grad)
self.assertTrue(upout.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_upout.is_contiguous())
self.assertTrue(torch.allclose(upout, ref_upout))
self.assertTrue(torch.allclose(out.grad, ref_out.grad))
def test_container_copy(self):
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = nn.Linear(4, 5)
def forward(self, input):
return self.linear(input)
input = torch.randn(2, 4)
model = Model()
model_cp = deepcopy(model)
self.assertEqual(model(input).data, model_cp(input).data)
model_cp.linear.weight.data[:] = 2
self.assertNotEqual(model(input).data, model_cp(input).data)
def test_RNN_cell(self):
# this is just a smoke test; these modules are implemented through
# autograd so no Jacobian test is needed
for module in (nn.RNNCell, nn.GRUCell):
for bias in (True, False):
input = torch.randn(3, 10)
hx = torch.randn(3, 20)
cell = module(10, 20, bias=bias)
for _ in range(6):
hx = cell(input, hx)
hx.sum().backward()
def test_RNN_cell_forward_input_size(self):
input = torch.randn(3, 11)
hx = torch.randn(3, 20)
for module in (nn.RNNCell, nn.GRUCell):
cell = module(10, 20)
self.assertRaises(Exception, lambda: cell(input, hx))
def test_RNN_cell_forward_hidden_size(self):
input = torch.randn(3, 10)
hx = torch.randn(3, 21)
cell_shared_param = (10, 20)
for cell in (nn.RNNCell(*cell_shared_param, nonlinearity="relu"),
nn.RNNCell(*cell_shared_param, nonlinearity="tanh"),
nn.GRUCell(*cell_shared_param)):
self.assertRaises(Exception, lambda: cell(input, hx))
def test_RNN_cell_forward_zero_hidden_size(self):
input = torch.randn(3, 10)
hx = torch.randn(3, 0)
cell_shared_param = (10, 0)
for cell in (nn.RNNCell(*cell_shared_param, nonlinearity="relu"),
nn.RNNCell(*cell_shared_param, nonlinearity="tanh"),
nn.GRUCell(*cell_shared_param)):
self.assertEqual(cell(input, hx).shape, torch.Size([3, 0]))
def _test_loss_equal_input_target_shape(self, cast):
# Tests losses whose inputs should have the same size.
losses = {
'mse_loss': lambda x, y: F.mse_loss(x, y),
'l1_loss': lambda x, y: F.l1_loss(x, y),
'smooth_l1_loss': lambda x, y: F.smooth_l1_loss(x, y),
'huber_loss': lambda x, y: F.huber_loss(x, y),
'kl_div': lambda x, y: F.kl_div(x, y),
'poisson_nll_loss': lambda x, y: F.poisson_nll_loss(x, y),
}
input = cast(torch.randn(3, 5))
target = cast(torch.randn(5, 3))
for _name, fn in losses.items():
self.assertRaises(Exception, lambda: fn(input, target))
def test_loss_equal_input_target_shape(self):
self._test_loss_equal_input_target_shape(lambda x: x)
def test_mse_loss_size_warning(self):
i = torch.randn((10, 1), requires_grad=True)
t = torch.randn((10,))
with warnings.catch_warnings(record=True) as w:
# Ensure warnings are being shown
warnings.simplefilter("always")
# Trigger Warning
F.mse_loss(i, t)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertIn('Please ensure they have the same size.', str(w[0]))
def test_poisson_nll_loss_reduction_modes(self):
input = torch.tensor([0.5, 1.5, 2.5])
target = torch.tensor([1., 2., 3.])
component_wise_loss = torch.exp(input) - target * input
self.assertEqual(component_wise_loss,
F.poisson_nll_loss(input, target, reduction='none'))
self.assertEqual(torch.sum(component_wise_loss),
F.poisson_nll_loss(input, target, reduction='sum'))
self.assertEqual(torch.mean(component_wise_loss),
F.poisson_nll_loss(input, target, reduction='mean'))
with self.assertRaisesRegex(ValueError, 'is not valid'):
F.poisson_nll_loss(input, target, reduction='total')
def test_gaussian_nll_loss_reduction_modes(self):
input = torch.tensor([[0.5, 1.5, 2.5], [2., 4., 6.]])
target = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
var = torch.tensor([[0.5, 1., 1.5], [1., 1.5, 2.]])
component_wise_loss = 0.5 * (torch.log(var) + (input - target)**2 / var)
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target, var, reduction='none'))
self.assertEqual(torch.sum(component_wise_loss),
F.gaussian_nll_loss(input, target, var, reduction='sum'))
self.assertEqual(torch.mean(component_wise_loss),
F.gaussian_nll_loss(input, target, var, reduction='mean'))
with self.assertRaisesRegex(ValueError, 'is not valid'):
F.gaussian_nll_loss(input, target, var, reduction='total')
def test_gaussian_nll_loss_broadcasting(self):
input = torch.tensor([[0.5, 1.5, 2.5], [2., 4., 6.]])
target_full = torch.tensor([[1., 2., 3.], [1., 2., 3.]])
target_part = torch.tensor([[1., 2., 3.]])
var_full = torch.tensor([[0.5, 0.5, 0.5], [1.5, 1.5, 1.5]])
var_part1 = torch.tensor([[0.5], [1.5]])
var_part2 = torch.tensor([0.5, 1.5])
component_wise_loss = 0.5 * (torch.log(var_full) + (input - target_full)**2 / var_full)
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_part, var_full, reduction='none'))
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_full, var_part1, reduction='none'))
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_full, var_part2, reduction='none'))
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_part, var_part1, reduction='none'))
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_part, var_part2, reduction='none'))
def test_gaussian_nll_loss_args(self):
input = torch.randn(3, 5)
with self.assertRaisesRegex(ValueError, 'var is of incorrect size'):
target = torch.randn(3, 5)
var = torch.ones(3, 3)
torch.nn.functional.gaussian_nll_loss(input, target, var)
with self.assertRaisesRegex(ValueError, 'var has negative entry/entries'):
var = -1 * torch.ones(3, 5)
torch.nn.functional.gaussian_nll_loss(input, target, var)
def test_KLDivLoss_batch_mean(self):
input_shape = (2, 5)
log_prob1 = F.log_softmax(torch.randn(input_shape), 1)
prob2 = F.softmax(torch.randn(input_shape), 1)
loss = nn.KLDivLoss(reduction='batchmean')
l = loss(log_prob1, prob2)
loss_none_reduce = nn.KLDivLoss(reduction='sum')(log_prob1, prob2)
expected = loss_none_reduce / input_shape[0]
self.assertEqual(l, expected)
def test_KLDivLoss_batch_mean_log_target(self):
input_shape = (2, 5)
log_prob1 = F.log_softmax(torch.randn(input_shape), 1)
log_prob2 = F.log_softmax(torch.randn(input_shape), 1)
loss = nn.KLDivLoss(reduction='batchmean', log_target=True)
l = loss(log_prob1, log_prob2)
loss_none_reduce = nn.KLDivLoss(reduction='sum', log_target=True)(log_prob1, log_prob2)
expected = loss_none_reduce / input_shape[0]
self.assertEqual(l, expected)
def test_CTCLoss_typechecks(self):
target_lengths = torch.tensor([30, 25, 20])
input_lengths = torch.tensor([50, 50, 50])
targets = torch.randint(1, 15, (sum(target_lengths),), dtype=torch.int)
log_probs = torch.randn(50, 3, 15, dtype=torch.float).log_softmax(2)
with self.assertRaises(RuntimeError):
_input_lengths = input_lengths.to(dtype=torch.float)
torch.nn.functional.ctc_loss(log_probs, targets, _input_lengths, target_lengths)
with self.assertRaises(RuntimeError):
target_lengths = target_lengths.to(dtype=torch.float)
torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_CTCLoss_lengthchecks_cuda(self):
target_lengths = [30, 25, 20]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (3, 29), dtype=torch.long, device='cuda')
log_probs = torch.randn(50, 3, 15, dtype=torch.float, device='cuda').log_softmax(2)
with self.assertRaises(RuntimeError):
torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
def test_CTCLoss_lengthchecks_cpu(self):
target_lengths = [30, 25, 20]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (3, 29), dtype=torch.int)
log_probs = torch.randn(50, 3, 15, dtype=torch.float).log_softmax(2)
with self.assertRaises(RuntimeError):
torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_CTCLoss_long_targets(self):
input_length = 4000
vocab_size = 3
batch_size = 4
target_length = 1200
log_probs = torch.randn(input_length, batch_size, vocab_size).log_softmax(2).requires_grad_()
targets = torch.randint(low=1, high=vocab_size - 1, size=(batch_size, target_length), dtype=torch.long)
input_lengths = batch_size * [input_length]
target_lengths = batch_size * [target_length]
res_cpu = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
grad_out = torch.randn_like(res_cpu)
grad_cpu, = torch.autograd.grad(res_cpu, log_probs, grad_out)
with torch.backends.cudnn.flags(enabled=False):
res_gpu = torch.nn.functional.ctc_loss(log_probs.cuda(), targets.cuda(), input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
grad_gpu, = torch.autograd.grad(res_gpu, log_probs, grad_out.cuda())
self.assertEqual(res_cpu, res_gpu, atol=1e-4, rtol=0)
self.assertEqual(grad_cpu, grad_gpu, atol=1e-4, rtol=0)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_CTCLoss_critical_target_len(self):
# cudnn has an unexpected problem with target length 256, see issue #53505
N = 1
S = 256
C = 10
T = 500
target = torch.randint(low=1, high=C, size=(S,), dtype=torch.int)
input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.int)
target_lengths = torch.tensor(S, dtype=torch.int)
inp = torch.randn(T, N, C, dtype=torch.float, device='cuda').log_softmax(2).requires_grad_()
with cudnn.flags(enabled=True):
res_gpu = torch.nn.functional.ctc_loss(inp, target, input_lengths, target_lengths, reduction='none')
res_cpu = torch.nn.functional.ctc_loss(inp.cpu(), target, input_lengths, target_lengths, reduction='none')
self.assertEqual(res_cpu, res_gpu, atol=1e-3, rtol=0)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_CTCLoss_zero_infinity(self):
target_lengths = [60, 25, 20]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (sum(target_lengths),), dtype=torch.int, device='cuda')
log_probs = torch.randn(50, 3, 15, dtype=torch.float, device='cuda').log_softmax(2).requires_grad_()
res = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
with torch.backends.cudnn.flags(enabled=False):
res2 = torch.nn.functional.ctc_loss(log_probs, targets.cuda().long(), input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
res_cpu = torch.nn.functional.ctc_loss(log_probs.cpu(), targets.cpu(), input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
self.assertEqual(res2, res, atol=1e-4, rtol=0)
self.assertEqual(res_cpu, res.cpu(), atol=1e-4, rtol=0)
g1, = torch.autograd.grad(res, log_probs)
g2, = torch.autograd.grad(res2, log_probs)
g3, = torch.autograd.grad(res_cpu, log_probs)
self.assertEqual(g2, g3, atol=1e-4, rtol=0)
self.assertEqual(g1, g2, atol=1e-4, rtol=0)
self.assertTrue((g1 == g1).all().item()) # check that we don't have NaN
def test_RNN_cell_no_broadcasting(self):
def test(cell_module, input, hx, input_size, hidden_size):
cell = cell_module(input_size, hidden_size)
self.assertRaises(RuntimeError, lambda: cell(input, hx))
def test_all(hidden_size, bad_hx, good_hx, input_size, input):
test(nn.RNNCell, input, bad_hx, input_size, hidden_size)
test(nn.GRUCell, input, bad_hx, input_size, hidden_size)
test(nn.LSTMCell, input, (bad_hx, good_hx), input_size, hidden_size)
test(nn.LSTMCell, input, (good_hx, bad_hx), input_size, hidden_size)
hidden_size = 20
input_size = 10
input = torch.randn(3, input_size)
bad_hx = torch.randn(1, hidden_size)
good_hx = torch.randn(3, hidden_size)
# Test hidden/input batch size broadcasting
test_all(hidden_size, bad_hx, good_hx, input_size, input)
# Test hx's hidden_size vs module's hidden_size broadcasting
bad_hx = torch.randn(3, 1)
test_all(hidden_size, bad_hx, good_hx, input_size, input)
# Test input's input_size vs module's input_size broadcasting
bad_input = torch.randn(3, 1)
test_all(hidden_size, good_hx, good_hx, input_size, bad_input)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_native_dropout_corner_case(self):
for train in [True, False]:
for p in [0.0, 1.0]:
for device in ["cuda", "cpu"]:
x = torch.randn(5).to(device=device).requires_grad_()
x_ref = x.detach().requires_grad_()
o = torch.native_dropout(x, p, train)[0]
o_ref = torch.dropout(x_ref, p, train)
o.sum().backward()
o_ref.sum().backward()
assert(o.equal(o_ref))
assert(x.grad.equal(x_ref.grad))
def test_invalid_dropout_p(self):
v = torch.ones(1)
self.assertRaises(ValueError, lambda: nn.Dropout(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout(1.1))
self.assertRaises(ValueError, lambda: nn.Dropout1d(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout1d(1.1))
self.assertRaises(ValueError, lambda: nn.Dropout2d(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout2d(1.1))
self.assertRaises(ValueError, lambda: nn.Dropout3d(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout3d(1.1))
self.assertRaises(ValueError, lambda: F.dropout(v, -0.1))
self.assertRaises(ValueError, lambda: F.dropout(v, 1.1))
def test_pad_sequence(self):
def pad(tensor, length):
return torch.cat(
[tensor.data, tensor.data.new(
length - tensor.size(0), *tensor.size()[1:]).zero_()])
# single dimensional
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5])
c = torch.tensor([6])
# batch_first = true
expected = torch.tensor([[4, 5, 0], [1, 2, 3], [6, 0, 0]])
padded = rnn_utils.pad_sequence([b, a, c], True)
self.assertEqual(padded, expected)
# batch_first = false
padded = rnn_utils.pad_sequence([b, a, c])
self.assertEqual(padded, expected.transpose(0, 1))
# pad with non-zero value
expected = torch.tensor([[4, 5, 1], [1, 2, 3], [6, 1, 1]])
padded = rnn_utils.pad_sequence([b, a, c], True, 1)
self.assertEqual(padded, expected)
# Test pad sorted sequence
expected = torch.tensor([[1, 2, 3], [4, 5, 0], [6, 0, 0]])
padded = rnn_utils.pad_sequence([a, b, c], True)
self.assertEqual(padded, expected)
# more dimensions
maxlen = 9
for num_dim in (0, 1, 2, 3):
sequences = []
trailing_dims = [4] * num_dim
for i in range(1, maxlen + 1):
seq_len = i * i
sequences.append(torch.rand(seq_len, 5, *trailing_dims))
random.shuffle(sequences)
expected = []
for seq in sequences:
expected.append(pad(seq, maxlen * maxlen))
# batch first = true
expected = torch.stack(expected)
padded = rnn_utils.pad_sequence(sequences, True)
self.assertEqual(padded, expected)
# batch first = false
padded = rnn_utils.pad_sequence(sequences)
self.assertEqual(padded, expected.transpose(0, 1))
def test_unpad_sequence(self):
# single dimensional
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5])
c = torch.tensor([6])
sequences = [a, b, c]
lengths = torch.as_tensor([v.size(0) for v in sequences])
for batch_first in [True, False]:
padded_sequences = rnn_utils.pad_sequence(sequences, batch_first=batch_first)
unpadded_sequences = rnn_utils.unpad_sequence(padded_sequences, lengths, batch_first=batch_first)
self.assertEqual(sequences, unpadded_sequences)
# more dimensions
maxlen = 9
for num_dim in (0, 1, 2, 3):
sequences = []
trailing_dims = [4] * num_dim
for i in range(1, maxlen + 1):
seq_len = i * i
sequences.append(torch.rand(seq_len, 5, *trailing_dims))
random.shuffle(sequences)
lengths = torch.as_tensor([v.size(0) for v in sequences])
padded_sequences = rnn_utils.pad_sequence(sequences, batch_first=batch_first)
unpadded_sequences = rnn_utils.unpad_sequence(padded_sequences, lengths, batch_first=batch_first)
self.assertEqual(sequences, unpadded_sequences)
def test_pack_sequence(self):
def _compatibility_test(sequences, lengths, batch_first, enforce_sorted=False):
padded = rnn_utils.pad_sequence(sequences, batch_first)
packed = rnn_utils.pack_sequence(sequences, enforce_sorted)
unpacked = rnn_utils.pad_packed_sequence(packed, batch_first)
self.assertEqual(padded, unpacked[0])
pack_padded = rnn_utils.pack_padded_sequence(
padded, lengths, batch_first, enforce_sorted)
self.assertEqual(packed, pack_padded)
# single dimensional
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5])
c = torch.tensor([6])
packed = rnn_utils.pack_sequence([a, b, c], enforce_sorted=False)
expected = torch.tensor([1, 4, 6, 2, 5, 3])
self.assertEqual(packed.batch_sizes, [3, 2, 1])
self.assertEqual(packed.data.data, expected)
self.assertEqual(packed.sorted_indices, [0, 1, 2])
self.assertEqual(packed.unsorted_indices, [0, 1, 2])
packed_unsorted = rnn_utils.pack_sequence([b, c, a], enforce_sorted=False)
self.assertEqual(packed_unsorted.batch_sizes, [3, 2, 1])
self.assertEqual(packed_unsorted.data.data, expected)
self.assertEqual(packed_unsorted.sorted_indices, [2, 0, 1])
self.assertEqual(packed_unsorted.unsorted_indices, [1, 2, 0])
# single dimensional, enforce_sorted = True
packed_enforce_sorted = rnn_utils.pack_sequence([a, b, c], enforce_sorted=True)
self.assertEqual(packed_enforce_sorted.batch_sizes, [3, 2, 1])
self.assertEqual(packed_enforce_sorted.data.data, expected)
self.assertTrue(packed_enforce_sorted.sorted_indices is None)
self.assertTrue(packed_enforce_sorted.unsorted_indices is None)
with self.assertRaisesRegex(RuntimeError, 'must be sorted in decreasing order'):
rnn_utils.pack_sequence([b, c, a], enforce_sorted=True)
with self.assertRaisesRegex(RuntimeError, 'You can pass `enforce_sorted=False`'):
rnn_utils.pack_sequence([b, c, a], enforce_sorted=True)
# more dimensions
maxlen = 9
for num_dim in (0, 1, 2, 3):
sequences = []
lengths = []
trailing_dims = [4] * num_dim
for i in range(maxlen, 0, -1):
seq_len = i * i
lengths.append(seq_len)
sequences.append(torch.rand(seq_len, 5, *trailing_dims))
unsorted_sequences = [s.clone() for s in sequences]
random.shuffle(unsorted_sequences)
unsorted_sequences_lengths = [t.size(0) for t in unsorted_sequences]
# compatibility with other utilities
for batch_first in (True, False):
for enforce_sorted in (True, False):
_compatibility_test(sequences, lengths, batch_first, enforce_sorted)
_compatibility_test(unsorted_sequences, unsorted_sequences_lengths,
batch_first)
def test_unpack_sequence(self):
# single dimensional
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5])
c = torch.tensor([6])
sequences = [a, b, c]
packed_sequences = rnn_utils.pack_sequence(sequences, enforce_sorted=False)
unpacked_sequences = rnn_utils.unpack_sequence(packed_sequences)
self.assertEqual(sequences, unpacked_sequences)
# more dimensions
maxlen = 9
for num_dim in (0, 1, 2, 3):
sequences = []
trailing_dims = [4] * num_dim
for i in range(1, maxlen + 1):
seq_len = i * i
sequences.append(torch.rand(seq_len, 5, *trailing_dims))
random.shuffle(sequences)
packed_sequences = rnn_utils.pack_sequence(sequences, enforce_sorted=False)
unpacked_sequences = rnn_utils.unpack_sequence(packed_sequences)
self.assertEqual(sequences, unpacked_sequences)
def test_pack_padded_sequence(self):
def generate_test_case(sorted_lengths, should_shuffle):
def pad(tensor, length):
return torch.cat([tensor, tensor.new(length - tensor.size(0), *tensor.size()[1:]).zero_()])
max_length = sorted_lengths[0]
batch_sizes = [sum(map(bool, filter(lambda x: x >= i, sorted_lengths)))
for i in range(1, max_length + 1)]
offset = 0
padded = torch.cat([pad(i * 100 + torch.arange(1., 5 * l + 1).view(l, 1, 5), max_length)
for i, l in enumerate(sorted_lengths, 1)], 1)
expected_data = [[torch.arange(1., 6) + (i + 1) * 100 + 5 * n for i in range(batch_size)]
for n, batch_size in enumerate(batch_sizes)]
expected_data = list(itertools.chain.from_iterable(expected_data))
expected_data = torch.stack(expected_data, dim=0)
if should_shuffle:
# Shuffle the padded sequence to create an unsorted sequence
permutation = list(range(len(sorted_lengths)))
random.shuffle(permutation)
unsorted_indices = torch.tensor(permutation)
padded = padded.index_select(1, unsorted_indices)
lengths = torch.tensor(sorted_lengths).index_select(0, unsorted_indices)
else:
unsorted_indices = None
lengths = sorted_lengths
return padded.requires_grad_(), lengths, expected_data, batch_sizes, unsorted_indices
test_cases = [
# sorted_lengths, should_shuffle
[[10, 8, 4, 2, 2, 2, 1], False],
[[11, 10, 8, 6, 4, 3, 1], False],
[[11, 10, 8, 6, 4, 3, 1], True],
]
for test_case, batch_first in itertools.product(test_cases, (True, False)):
sorted_lengths, should_shuffle = test_case
padded, lengths, expected_data, batch_sizes, unsorted_indices = generate_test_case(
sorted_lengths, should_shuffle)
src = padded
if batch_first:
src = src.transpose(0, 1)
# check output
packed = rnn_utils.pack_padded_sequence(src, lengths, batch_first=batch_first,
enforce_sorted=not should_shuffle)
self.assertEqual(packed.data.data, expected_data)
self.assertEqual(packed.batch_sizes, batch_sizes)
self.assertEqual(packed.unsorted_indices, unsorted_indices)
# test inverse
unpacked, unpacked_len = rnn_utils.pad_packed_sequence(packed, batch_first=batch_first)
self.assertEqual(unpacked, src)
self.assertEqual(unpacked_len, lengths)
# check grad
if padded.grad is not None:
padded.grad.data.zero_()
grad_output = unpacked.data.clone().normal_()
unpacked.backward(grad_output)
if batch_first:
grad_output.transpose_(0, 1)
for i, l in enumerate(lengths):
self.assertEqual(padded.grad.data[:l, i], grad_output[:l, i])
if l < 10:
self.assertEqual(padded.grad.data[l:, i].abs().sum(), 0)
# test error messages
with self.assertRaisesRegex(RuntimeError, 'You can pass `enforce_sorted=False`'):
packed = rnn_utils.pack_padded_sequence(torch.randn(3, 3), [1, 3, 2])
with self.assertRaisesRegex(RuntimeError, 'empty tensor'):
packed = rnn_utils.pack_padded_sequence(torch.randn(0, 0), [])
def test_LSTM_cell(self):
# this is just a smoke test; these modules are implemented through
# autograd so no Jacobian test is needed
for bias in (True, False):
input = torch.randn(3, 10)
hx = torch.randn(3, 20)
cx = torch.randn(3, 20)
lstm = nn.LSTMCell(10, 20, bias=bias)
for _ in range(6):
hx, cx = lstm(input, (hx, cx))
(hx + cx).sum().backward()
def test_LSTM_cell_forward_input_size(self):
input = torch.randn(3, 11)
hx = torch.randn(3, 20)
cx = torch.randn(3, 20)
lstm = nn.LSTMCell(10, 20)
self.assertRaises(Exception, lambda: lstm(input, (hx, cx)))
def test_LSTM_cell_forward_hidden_size(self):
input = torch.randn(3, 10)
hx = torch.randn(3, 21)
cx = torch.randn(3, 20)
lstm = nn.LSTMCell(10, 20)
self.assertRaises(Exception, lambda: lstm(input, (hx, cx)))
self.assertRaises(Exception, lambda: lstm(input, (cx, hx)))
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_pack_sequence_batch_sizes_throw(self):
with self.assertRaisesRegex(ValueError, r"batch_sizes should always be on CPU"):
m = nn.LSTM(3, 4, bidirectional=True, num_layers=2).to('cuda')
a = torch.rand(5, 3, device='cuda')
b = torch.tensor([1, 1, 1, 1, 1], device='cuda')
input = nn.utils.rnn.PackedSequence(a, b)
def test_Transformer_cell(self):
# this is just a smoke test; these modules are implemented through
# autograd so no Jacobian test is needed
d_model = 512
nhead = 16
num_encoder_layers = 4
num_decoder_layers = 3
dim_feedforward = 256
dropout = 0.3
bsz = 8
seq_length = 35
tgt_length = 15
for batch_first, src_size, tgt_size in zip((True, False),
[(bsz, seq_length, d_model),
(seq_length, bsz, d_model)],
[(bsz, tgt_length, d_model),
(tgt_length, bsz, d_model)]):
transformer = nn.Transformer(d_model, nhead, num_encoder_layers, num_decoder_layers,
dim_feedforward, dropout, batch_first=batch_first)
src = torch.randn(src_size)
src_mask = transformer.generate_square_subsequent_mask(seq_length).double()
tgt = torch.randn(tgt_size)
tgt_mask = transformer.generate_square_subsequent_mask(tgt_length).double()
memory_mask = torch.randn(tgt_length, seq_length).double()
src_key_padding_mask = torch.rand(bsz, seq_length) >= 0.5
tgt_key_padding_mask = torch.rand(bsz, tgt_length) >= 0.5
memory_key_padding_mask = torch.rand(bsz, seq_length) >= 0.5
output = transformer(src, tgt,
src_mask=src_mask,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
src_key_padding_mask=src_key_padding_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
output.sum().backward()
def test_transformerdecoderlayer(self):
# this is a deterministic test for TransformerDecoderLayer
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
bsz = 2
seq_length = 5
tgt_length = 3
for batch_first in (False, True):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
model = nn.TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout,
batch_first=batch_first)
# set constant weights of the model
for idx, p in enumerate(model.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
# deterministic input
decoder_input = torch.tensor([[[20., 30., 40., 50.]]])
memory_input = torch.tensor([[[60., 70., 80., 90.]]])
result = model(decoder_input, memory_input)
ref_output = torch.tensor([[[2.314351, 0.094805, -0.671322, 0.101977]]])
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]]))
memory_input = torch.tensor([[[1., 2., 3., 4.]]])
result = model(decoder_input, memory_input)
result = result.detach().numpy()
ref_output = perm_fn(torch.tensor([[[2.422245, 0.051716, -0.606338, -0.024756]],
[[2.422245, 0.051716, -0.606338, -0.024756]]]))
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]]))
memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.343536, 0.085561, -0.654954, 0.074991]],
[[2.343536, 0.085561, -0.654954, 0.074991]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]))
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# key_padding_mask
key_padding_mask = torch.zeros(2, 3) == 1
result = model(decoder_input, memory_input, tgt_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# key_padding_mask
key_padding_mask[0, 2] = 1
key_padding_mask[1, 1] = 1
key_padding_mask[1, 2] = 1
result = model(decoder_input, memory_input, tgt_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430025, 0.027643, -0.601164, -0.073476],
[2.4323, 0.029375, -0.599553, -0.071881]],
[[2.428523, 0.026838, -0.602226, -0.07391],
[2.432634, 0.029842, -0.599318, -0.071253]],
[[2.432278, 0.028152, -0.599555, -0.074139],
[2.432659, 0.029244, -0.599294, -0.072382]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# memory_key_padding_mask
key_padding_mask = torch.zeros(2, 5) == 1
result = model(decoder_input, memory_input, memory_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# memory_key_padding_mask
key_padding_mask[0, 4] = 1
key_padding_mask[1, 3] = 1
key_padding_mask[1, 4] = 1
result = model(decoder_input, memory_input, memory_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.429757, 0.027358, -0.601351, -0.073816],
[2.432692, 0.028583, -0.599263, -0.073634]],
[[2.428247, 0.02662, -0.602419, -0.074123],
[2.432657, 0.029055, -0.599293, -0.072732]],
[[2.431515, 0.027687, -0.600096, -0.074459],
[2.433075, 0.028543, -0.598987, -0.073985]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
def test_transformerdecoderlayer_gelu(self):
# this is a deterministic test for TransformerDecoderLayer with gelu activation
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
bsz = 2
seq_length = 5
tgt_length = 3
for activation, batch_first in product(('gelu', F.gelu, nn.GELU()), (True, False)):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
model = nn.TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout,
activation, batch_first=batch_first)
# set constant weights of the model
for idx, p in enumerate(model.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
# deterministic input
decoder_input = torch.tensor([[[20., 30., 40., 50.]]])
memory_input = torch.tensor([[[60., 70., 80., 90.]]])
result = model(decoder_input, memory_input)
ref_output = torch.tensor([[[2.306435, 0.095946, -0.675796, 0.10687]]])
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]]))
memory_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.415448, 0.054389, -0.610932, -0.0156613]],
[[2.415448, 0.054389, -0.610932, -0.0156613]]]))
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]]))
memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.338531, 0.087709, -0.65776, 0.080646]],
[[2.338531, 0.087709, -0.65776, 0.080646]]]))
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]))
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.42049104, 0.03443088, -0.60793706, -0.05436271],
[2.42210631, 0.03546578, -0.60679895, -0.05357488]],
[[2.41907674, 0.0336104, -0.60892977, -0.05490462],
[2.42216881, 0.03586554, -0.6067524, -0.05289126]],
[[2.42205716, 0.03488046, -0.60683681, -0.05460596],
[2.42240309, 0.0354595, -0.60659063, -0.05378816]]]))
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
def test_transformerdecoder(self):
def get_a_test_layer(use_cuda, activation, batch_first=False):
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
device = torch.device("cuda" if use_cuda else "cpu")
layer = nn.TransformerDecoderLayer(
d_model,
nhead,
dim_feedforward=dim_feedforward,
dropout=dropout,
activation=activation,
batch_first=batch_first).to(device)
with torch.no_grad():
# set constant weights of the model
for idx, p in enumerate(layer.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
return layer
# this is a deterministic test for TransformerDecoder
for batch_first in (False, True):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
activation = F.relu
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
decoder_layer = get_a_test_layer(use_cuda=use_cuda, activation=activation,
batch_first=batch_first)
model = nn.TransformerDecoder(decoder_layer, 1).to(device)
# deterministic input
decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)
memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)
result = model(decoder_input, memory_input)
ref_output = torch.tensor(
[[[2.314351, 0.094805, -0.671322, 0.101977]]]).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]])).to(device)
memory_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]]])).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.422245, 0.051716, -0.606338, -0.024756]],
[[2.422245, 0.051716, -0.606338, -0.024756]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]])).to(device)
memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]])).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.343536, 0.085561, -0.654954, 0.074991]],
[[2.343536, 0.085561, -0.654954, 0.074991]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]
)).to(device)
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# key_padding_mask
key_padding_mask = torch.zeros(2, 3).to(device) == 1
result = model(decoder_input, memory_input,
tgt_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# key_padding_mask
key_padding_mask[0, 2] = 1
key_padding_mask[1, 1] = 1
key_padding_mask[1, 2] = 1
result = model(decoder_input, memory_input,
tgt_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430025, 0.027643, -0.601164, -0.073476],
[2.4323, 0.029375, -0.599553, -0.071881]],
[[2.428523, 0.026838, -0.602226, -0.07391],
[2.432634, 0.029842, -0.599318, -0.071253]],
[[2.432278, 0.028152, -0.599555, -0.074139],
[2.432659, 0.029244, -0.599294, -0.072382]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# memory_key_padding_mask
key_padding_mask = torch.zeros(2, 5).to(device) == 1
result = model(decoder_input, memory_input,
memory_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# memory_key_padding_mask
key_padding_mask[0, 4] = 1
key_padding_mask[1, 3] = 1
key_padding_mask[1, 4] = 1
result = model(decoder_input,
memory_input,
memory_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.429757, 0.027358, -0.601351, -0.073816],
[2.432692, 0.028583, -0.599263, -0.073634]],
[[2.428247, 0.02662, -0.602419, -0.074123],
[2.432657, 0.029055, -0.599293, -0.072732]],
[[2.431515, 0.027687, -0.600096, -0.074459],
[2.433075, 0.028543, -0.598987, -0.073985]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# multiple layers no norm
model = nn.TransformerDecoder(decoder_layer, 2).to(device)
# deterministic input
decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)
memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)
result = model(decoder_input, memory_input)
ref_output = torch.tensor(
[[[2.31316, 0.0950293, -0.671995, 0.102802]]]).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)
# multiple layers no norm
model = nn.TransformerDecoder(decoder_layer, 6).to(device)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]
)).to(device)
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.42794, 0.026164, -0.60263, -0.0747591],
[2.43113, 0.0279516, -0.600376, -0.0736896]],
[[2.42794, 0.026164, -0.60263, -0.0747591],
[2.43113, 0.0279516, -0.600376, -0.0736896]],
[[2.42794, 0.026164, -0.60263, -0.0747591],
[2.43113, 0.0279516, -0.600376, -0.0736896]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# multiple layers with norm
# d_model = 4
norm = nn.LayerNorm(4)
model = nn.TransformerDecoder(decoder_layer, 2, norm=norm).to(device)
# deterministic input
decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)
memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)
result = model(decoder_input, memory_input)
ref_output = torch.tensor(
[[[1.66166, -0.326986, -1.01466, -0.320017]]]).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)
# multiple layers with norm
model = nn.TransformerDecoder(decoder_layer, 6, norm=norm).to(device)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]
)).to(device)
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[1.69559, -0.357291, -0.894741, -0.443553],
[1.69571, -0.357363, -0.894154, -0.444196]],
[[1.69559, -0.357291, -0.894741, -0.443553],
[1.69571, -0.357363, -0.894154, -0.444196]],
[[1.69559, -0.357291, -0.894741, -0.443553],
[1.69571, -0.357363, -0.894154, -0.444196]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# gelu activation test cases
activation = "gelu"
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
decoder_layer = get_a_test_layer(use_cuda=use_cuda, activation=activation,
batch_first=batch_first)
model = nn.TransformerDecoder(decoder_layer, 1).to(device)
# deterministic input
decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)
memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)
result = model(decoder_input, memory_input)
ref_output = torch.tensor([[[2.306435, 0.095946, -0.675796, 0.10687]]]).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]])).to(device)
memory_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]]])).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.415448, 0.054389, -0.610932, -0.0156613]],
[[2.415448, 0.054389, -0.610932, -0.0156613]]])).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]])).to(device)
memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]])).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.338531, 0.087709, -0.65776, 0.080646]],
[[2.338531, 0.087709, -0.65776, 0.080646]]])).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]
)).to(device)
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.42049104, 0.03443088, -0.60793706, -0.05436271],
[2.42210631, 0.03546578, -0.60679895, -0.05357488]],
[[2.41907674, 0.0336104, -0.60892977, -0.05490462],
[2.42216881, 0.03586554, -0.6067524, -0.05289126]],
[[2.42205716, 0.03488046, -0.60683681, -0.05460596],
[2.42240309, 0.0354595, -0.60659063, -0.05378816]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
@unittest.skipIf(not (TEST_CUDNN and TEST_MULTIGPU), 'CUDNN or multi-gpu not available')
def test_cudnn_rnn_dropout_states_device(self):
rnn = nn.RNN(10, 20, num_layers=2, dropout=.5)
device = 1
input = torch.randn(5, 4, 10).cuda(device)
rnn.cuda(device)
hx = torch.randn(2, 4, 20).cuda(device)
output = rnn(input, hx)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
@skipIfRocm
def test_cudnn_weight_format(self):
rnns = [
nn.LSTM(10, 20, batch_first=True),
nn.LSTM(10, 20, batch_first=True, proj_size=10),
nn.GRU(10, 20, batch_first=True),
nn.RNN(10, 20, batch_first=True)
]
first_warn = True
for rnn in rnns:
rnn.cuda()
input = torch.randn(5, 4, 10, requires_grad=True, device="cuda")
hx = torch.randn(1, 5, 20, requires_grad=True, device="cuda")
all_vars = [input, hx] + list(rnn.parameters())
if isinstance(rnn, nn.LSTM):
# LSTM with projections has different hx size
if rnn.proj_size > 0:
hx = torch.randn(1, 5, 10, requires_grad=True, device="cuda")
all_vars[1] = hx
cx = torch.randn(1, 5, 20, requires_grad=True, device="cuda")
all_vars[2:2] = [cx]
hx = (hx, cx)
output = rnn(input, hx)
output[0].sum().backward()
grads = [v.grad.data.clone() for v in all_vars]
for v in all_vars:
v.grad.data.zero_()
# Weights will no longer view onto the same chunk of memory
weight = all_vars[4]
weight_data = weight.data.clone()
with torch.no_grad():
weight.set_(weight_data)
for _ in range(2):
with warnings.catch_warnings(record=True) as w:
output_noncontig = rnn(input, hx)
if first_warn:
self.assertEqual(len(w), 1)
self.assertIn('weights are not part of single contiguous chunk of memory', w[0].message.args[0])
first_warn = False
warnings.resetwarnings()
output_noncontig[0].sum().backward()
grads_noncontig = [v.grad.data.clone() for v in all_vars]
for v in all_vars:
v.grad.data.zero_()
self.assertEqual(output, output_noncontig)
self.assertEqual(grads_noncontig, grads)
# Make sure these still share storage
weight_data[:] = 4
self.assertEqual(weight_data, all_vars[4].data)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_cudnn_weight_tying(self):
rnns = [
nn.LSTM(10, 20, batch_first=True, bidirectional=True),
nn.LSTM(10, 20, batch_first=True, bidirectional=True, proj_size=10),
nn.GRU(10, 20, batch_first=True, bidirectional=True),
nn.RNN(10, 20, batch_first=True, bidirectional=True)
]
for rnn in rnns:
rnn.bias_ih_l0_reverse = rnn.bias_ih_l0
rnn.cuda()
input = torch.randn(5, 4, 10, requires_grad=True, device="cuda")
hx = torch.randn(2, 5, 20, requires_grad=True, device="cuda")
all_vars = [input, hx] + list(rnn.parameters())
opt = torch.optim.SGD(rnn.parameters(), lr=0.1)
opt.zero_grad()
if isinstance(rnn, nn.LSTM):
# LSTM with projections has different hx size
if rnn.proj_size > 0:
hx = torch.randn(2, 5, 10, requires_grad=True, device="cuda")
all_vars[1] = hx
cx = torch.randn(2, 5, 20, requires_grad=True, device="cuda")
all_vars[2:2] = [cx]
hx = (hx, cx)
with warnings.catch_warnings(record=True) as w:
output = rnn(input, hx)
output[0].sum().backward()
opt.step()
with warnings.catch_warnings(record=True) as w:
output_cuda = rnn(input, hx)
rnn.cpu()
hx = (hx[0].cpu(), hx[1].cpu()) if isinstance(rnn, nn.LSTM) else hx.cpu()
output_cpu = rnn(input.cpu(), hx)
self.assertEqual(output_cuda, output_cpu)
def test_transformer_args_check(self):
model_name = 'Transformer'
d_model = 128
nhead = 4
num_encoder_layers = 2
num_decoder_layers = 3
dim_feedforward = 65
dropout = 0.3
bsz = 3
seq_len = 35
tgt_len = 15
activations = [F.relu, F.gelu]
wrong_bsz = 7
wrong_d_model = 63
wrong_nhead = 5
wrong_activation = "abc"
def test(encoder_input_shape, decoder_input_shape,
src_mask_len=None, tgt_mask_len=None, memory_mask_size=None,
src_key_padding_mask_size=None, tgt_key_padding_mask_size=None,
memory_key_padding_mask_size=None):
encoder_input = torch.randn(encoder_input_shape)
decoder_input = torch.randn(decoder_input_shape)
model = getattr(nn, model_name)(d_model, nhead, num_encoder_layers,
num_decoder_layers, dim_feedforward, dropout)
if src_mask_len is not None:
src_mask = model.generate_square_subsequent_mask(src_mask_len)
else:
src_mask = None
if tgt_mask_len is not None:
tgt_mask = model.generate_square_subsequent_mask(tgt_mask_len)
else:
tgt_mask = None
if memory_mask_size is not None:
memory_task = torch.rand(memory_mask_size)
else:
memory_task = None
if src_key_padding_mask_size is not None:
src_key_padding_mask = torch.rand(src_key_padding_mask_size) >= 0.5
else:
src_key_padding_mask = None
if tgt_key_padding_mask_size is not None:
tgt_key_padding_mask = torch.rand(tgt_key_padding_mask_size) >= 0.5
else:
tgt_key_padding_mask = None
if memory_key_padding_mask_size is not None:
memory_key_padding_mask = torch.rand(memory_key_padding_mask_size) >= 0.5
else:
memory_key_padding_mask = None
with self.assertRaises(RuntimeError):
model(encoder_input, decoder_input,
src_mask=src_mask,
tgt_mask=tgt_mask,
memory_mask=memory_task,
src_key_padding_mask=src_key_padding_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
correct_encoder_input_shape = (seq_len, bsz, d_model)
correct_decoder_input_shape = (tgt_len, bsz, d_model)
def update_shape(shape, dim, new_dim_size):
new_shape = list(shape)
new_shape[dim] = new_dim_size
return tuple(new_shape)
# Incorrect encoder_input batch size
encoder_input_shape = update_shape(correct_encoder_input_shape, 1, wrong_bsz)
decoder_input_shape = correct_decoder_input_shape
test(encoder_input_shape, decoder_input_shape)
# Incorrect decoder_input batch size
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = update_shape(correct_decoder_input_shape, 1, wrong_bsz)
test(encoder_input_shape, decoder_input_shape)
# Incorrect encoder_input input size
encoder_input_shape = update_shape(correct_encoder_input_shape, 2, wrong_d_model)
decoder_input_shape = correct_decoder_input_shape
test(encoder_input_shape, decoder_input_shape)
# Incorrect decoder_input input size
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = update_shape(correct_decoder_input_shape, 2, wrong_d_model)
test(encoder_input_shape, decoder_input_shape)
# Incorrect nhead
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
with self.assertRaises(AssertionError):
model = getattr(nn, model_name)(d_model, wrong_nhead, num_encoder_layers,
num_decoder_layers, dim_feedforward, dropout)
# Incorrect src_mask
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
wrong_src_mask_size = seq_len + 1
test(encoder_input_shape, decoder_input_shape, src_mask_len=wrong_src_mask_size)
# Incorrect tgt_mask
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
wrong_tgt_mask_size = tgt_len + 1
test(encoder_input_shape, decoder_input_shape, tgt_mask_len=wrong_tgt_mask_size)
# Incorrect memory_mask
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
wrong_tgt_mask_size = tgt_len + 1
test(encoder_input_shape, decoder_input_shape,
memory_mask_size=(wrong_tgt_mask_size, wrong_src_mask_size))
# Incorrect src_key_padding_mask
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
with self.assertRaises(AssertionError):
test(encoder_input_shape, decoder_input_shape,
src_key_padding_mask_size=(wrong_bsz, wrong_src_mask_size))
# Incorrect tgt_key_padding_mask
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
with self.assertRaises(AssertionError):
test(encoder_input_shape, decoder_input_shape,
tgt_key_padding_mask_size=(wrong_bsz, wrong_tgt_mask_size))
# Incorrect memory_key_padding_mask
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
with self.assertRaises(AssertionError):
test(encoder_input_shape, decoder_input_shape,
memory_key_padding_mask_size=(wrong_bsz, wrong_src_mask_size))
# Correct activations
for activation in activations:
model = getattr(nn, model_name)(d_model, nhead, num_encoder_layers, num_decoder_layers,
dim_feedforward, dropout, activation)
# Incorrect activation
with self.assertRaises(RuntimeError):
model = getattr(nn, model_name)(d_model, nhead, num_encoder_layers, num_decoder_layers,
dim_feedforward, dropout, wrong_activation)
def test_transformer_layer_args_check(self):
model_names = ['TransformerEncoderLayer', 'TransformerDecoderLayer']
d_model = 128
nhead = 4
dim_feedforward = 65
dropout = 0.3
bsz = 3
seq_len = 35
tgt_len = 15
activations = [F.relu, F.gelu]
wrong_activation = "abc"
encoder_input_shape = (seq_len, bsz, d_model)
decoder_input_shape = (tgt_len, bsz, d_model)
encoder_input = torch.randn(encoder_input_shape)
decoder_input = torch.randn(decoder_input_shape)
for model_name in model_names:
for activation in activations:
model = getattr(nn, model_name)(d_model, nhead, dim_feedforward,
dropout, activation)
# Incorrect activation
for model_name in model_names:
with self.assertRaises(RuntimeError):
model = getattr(nn, model_name)(d_model, nhead, dim_feedforward,
dropout, wrong_activation)
def test_rnn_args_check(self):
input_size = 3
hidden_size = 5
num_layers = 2
batch_size = 4
seq_len = 6
num_directions = 1
bad_size = 7 # prime number so that no size can divide it.
def test(input_shape, hidden_shape, mode):
for input, hidden in get_inputs(input_shape, hidden_shape, mode):
model = getattr(nn, mode)(input_size, hidden_size, num_layers)
self.assertRaises(RuntimeError, lambda: model(input, hidden))
correct_input_shape = (seq_len, batch_size, input_size)
correct_hidden_shape = (num_layers * num_directions, batch_size, hidden_size)
def update_shape(shape, dim, new_dim_size):
new_shape = list(shape)
new_shape[dim] = new_dim_size
return tuple(new_shape)
def get_inputs(input_shape, hidden_shape, mode):
'''returns list( tuple(input, hidden) )
where input, hidden are inputs to a model'''
input = torch.randn(input_shape)
hidden = torch.randn(hidden_shape)
if mode != 'LSTM':
return [(input, hidden)]
if hidden_shape == correct_hidden_shape:
return [(input, (hidden, hidden))]
good_hidden = torch.randn(correct_hidden_shape)
return [
(input, (hidden, good_hidden)),
(input, (good_hidden, hidden)),
]
rnn_modes = ['RNN', 'GRU', 'LSTM']
for mode in rnn_modes:
# Incorrect input batch size
input_shape = update_shape(correct_input_shape, 1, bad_size)
hidden_shape = correct_hidden_shape
test(input_shape, hidden_shape, mode)
# Incorrect hidden batch size
input_shape = correct_input_shape
hidden_shape = update_shape(correct_hidden_shape, 1, bad_size)
test(input_shape, hidden_shape, mode)
# Incorrect input size
input_shape = update_shape(correct_input_shape, 2, bad_size)
hidden_shape = correct_hidden_shape
test(input_shape, hidden_shape, mode)
# Incorrect hidden size
input_shape = correct_input_shape
hidden_shape = update_shape(correct_hidden_shape, 2, bad_size)
test(input_shape, hidden_shape, mode)
# Incorrect hidden[0]
input_shape = correct_input_shape
hidden_shape = update_shape(correct_hidden_shape, 0, bad_size)
test(input_shape, hidden_shape, mode)
def test_projections_lstm_args_check(self):
input_size = 3
hidden_size = 5
proj_size = 2
num_layers = 2
batch_size = 4
seq_len = 6
num_directions = 1
bad_size = 7 # prime number so that no size can divide it.
def test(input_shape, hidden_h_shape, hidden_c_shape):
for input, hidden in get_inputs(input_shape, hidden_h_shape, hidden_c_shape):
model = nn.LSTM(input_size, hidden_size, num_layers, proj_size=proj_size)
self.assertRaises(RuntimeError, lambda: model(input, hidden))
correct_input_shape = (seq_len, batch_size, input_size)
correct_hidden_h_shape = (num_layers * num_directions, batch_size, proj_size)
correct_hidden_c_shape = (num_layers * num_directions, batch_size, hidden_size)
def update_shape(shape, dim, new_dim_size):
new_shape = list(shape)
new_shape[dim] = new_dim_size
return tuple(new_shape)
def get_inputs(input_shape, hidden_h_shape, hidden_c_shape):
'''returns list( tuple(input, hidden) )
where input, hidden are inputs to a model'''
input = torch.randn(input_shape)
hidden_h = torch.randn(hidden_h_shape)
hidden_c = torch.randn(hidden_c_shape)
return [(input, (hidden_h, hidden_c))]
# Incorrect input batch size
input_shape = update_shape(correct_input_shape, 1, bad_size)
test(input_shape, correct_hidden_h_shape, correct_hidden_c_shape)
# Incorrect hidden batch size
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 1, bad_size)
hidden_c_shape = update_shape(correct_hidden_c_shape, 1, bad_size)
test(input_shape, hidden_h_shape, hidden_c_shape)
# Incorrect input size
input_shape = update_shape(correct_input_shape, 2, bad_size)
test(input_shape, correct_hidden_h_shape, correct_hidden_c_shape)
# Incorrect hidden size
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 2, bad_size)
hidden_c_shape = update_shape(correct_hidden_c_shape, 2, bad_size)
test(input_shape, hidden_h_shape, hidden_c_shape)
# Incorrect hidden[0]
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 0, bad_size)
hidden_c_shape = update_shape(correct_hidden_c_shape, 0, bad_size)
test(input_shape, hidden_h_shape, hidden_c_shape)
# Incorrect proj size = hidden size
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 0, hidden_size)
hidden_c_shape = correct_hidden_c_shape
test(input_shape, hidden_h_shape, hidden_c_shape)
# Incorrect proj size != hidden size
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 0, bad_size)
hidden_c_shape = correct_hidden_c_shape
test(input_shape, hidden_h_shape, hidden_c_shape)
# Incorrect cell size != hidden size
input_shape = correct_input_shape
hidden_h_shape = correct_hidden_h_shape
hidden_c_shape = update_shape(correct_hidden_c_shape, 0, bad_size)
test(input_shape, hidden_h_shape, hidden_c_shape)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_rnn_check_device(self):
input_size = 3
hidden_size = 5
num_layers = 2
batch_size = 4
seq_len = 6
num_directions = 1
correct_input_shape = (seq_len, batch_size, input_size)
correct_hidden_shape = (num_layers * num_directions, batch_size, hidden_size)
rnn_modes = ['RNN', 'GRU', 'LSTM']
for mode in rnn_modes:
model = getattr(nn, mode)(input_size, hidden_size, num_layers)
input = torch.randn(correct_input_shape)
hidden = torch.randn(correct_hidden_shape)
# input and weights are not at the same device
with self.assertRaisesRegex(RuntimeError,
"Input and parameter tensors are not at the same device"):
model(input.to('cuda:0'))
# input and hiddens are not at the same device
with self.assertRaisesRegex(RuntimeError,
r"Input and hidden tensors are not at the same device"):
if mode == 'LSTM':
model(input, (hidden.to('cuda:0'), hidden.to('cuda:0')))
else:
model(input, (hidden.to('cuda:0')))
# hidden tensors are not at the same CUDA device
if mode == 'LSTM':
with self.assertRaisesRegex(RuntimeError,
"Input and hidden tensors are not at the same device"):
model(input.to('cuda:0'), (hidden.to('cuda:0'), hidden.to('cuda:1')))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_projections_lstm_check_device(self):
input_size = 3
hidden_size = 5
proj_size = 2
num_layers = 2
batch_size = 4
seq_len = 6
num_directions = 1
correct_input_shape = (seq_len, batch_size, input_size)
correct_hidden_h_shape = (num_layers * num_directions, batch_size, proj_size)
correct_hidden_c_shape = (num_layers * num_directions, batch_size, hidden_size)
model = nn.LSTM(input_size, hidden_size, num_layers, proj_size=proj_size)
input = torch.randn(correct_input_shape)
hidden_h = torch.randn(correct_hidden_h_shape)
hidden_c = torch.randn(correct_hidden_c_shape)
# input and weights are not at the same device
with self.assertRaisesRegex(RuntimeError,
"Input and parameter tensors are not at the same device"):
model(input.to('cuda:0'))
# input and hiddens are not at the same device
with self.assertRaisesRegex(RuntimeError,
r"Input and hidden tensors are not at the same device"):
model(input, (hidden_h.to('cuda:0'), hidden_c.to('cuda:0')))
# hidden tensors are not at the same CUDA device
with self.assertRaisesRegex(RuntimeError,
"Input and hidden tensors are not at the same device"):
model(input.to('cuda:0'), (hidden_h.to('cuda:0'), hidden_c.to('cuda:1')))
def test_rnn_initial_hidden_state(self):
rnn_modes = ['RNN', 'GRU', 'LSTM']
for mode in rnn_modes:
rnn = getattr(nn, mode)(30, 20, 2)
input = torch.randn(10, 32, 30)
hidden = torch.zeros(2, 32, 20)
if mode == 'LSTM':
hidden = (hidden, hidden)
output1, hidden1 = rnn(input, hidden)
output2, hidden2 = rnn(input)
self.assertEqual(output1, output2)
self.assertEqual(hidden1, hidden2)
def test_projections_lstm_initial_hidden_state(self):
for bidir in [False, True]:
rnn = nn.LSTM(30, 20, 2, bidirectional=bidir, proj_size=10)
num_dirs = 2 if bidir else 1
input = torch.randn(10, 32, 30)
hidden_h = torch.zeros(2 * num_dirs, 32, 10)
hidden_c = torch.zeros(2 * num_dirs, 32, 20)
hidden = (hidden_h, hidden_c)
output1, hidden1 = rnn(input, hidden)
output2, hidden2 = rnn(input)
self.assertEqual(output1, output2)
self.assertEqual(hidden1, hidden2)
def test_projections_errors_on_gru_and_rnn(self):
error_msg = "proj_size argument is only supported for LSTM, not RNN or GRU"
for mode in ['RNN', 'GRU']:
with self.assertRaisesRegex(ValueError, error_msg):
rnn = getattr(nn, mode)(30, 20, 2, proj_size=10)
def _test_RNN_cpu_vs_cudnn(self, dropout, dtype=torch.double):
def forward_backward(cuda, rnn, input_val, grad_output, weights_val, hx_val, grad_hy,
cx_val=None, grad_cy=None):
is_lstm = isinstance(rnn, nn.LSTM)
for x_layer, y_layer in zip(rnn.all_weights, weights_val):
for x, y in zip(x_layer, y_layer):
x.data.copy_(y.data)
if isinstance(input_val, rnn_utils.PackedSequence):
input = rnn_utils.PackedSequence(
input_val.data.data.requires_grad_(True), input_val.batch_sizes)
input_var = input.data
else:
input = input_val.clone().requires_grad_(True)
input_var = input
if is_lstm:
if cx_val is None:
hx = (hx_val.clone().requires_grad_(True),
hx_val.add(1).requires_grad_(True))
else:
hx = (hx_val.clone().requires_grad_(True),
cx_val.add(1).requires_grad_(True))
else:
hx = hx_val.clone().requires_grad_(True)
if cuda:
rnn.cuda()
input_var.data = input_var.data.cuda()
if is_lstm:
hx[0].data = hx[0].data.cuda()
hx[1].data = hx[1].data.cuda()
else:
hx.data = hx.data.cuda()
grad_hy = grad_hy.cuda()
if grad_cy is not None:
grad_cy = grad_cy.cuda()
grad_output = grad_output.cuda()
output, hy = rnn(input, hx)
if isinstance(output, rnn_utils.PackedSequence):
output = output.data
if is_lstm:
if grad_cy is None:
torch.autograd.backward([output, hy[0], hy[1]], [grad_output, grad_hy, grad_hy + 1])
else:
torch.autograd.backward([output, hy[0], hy[1]], [grad_output, grad_hy, grad_cy + 1])
else:
torch.autograd.backward([output, hy], [grad_output, grad_hy])
return {'output': output.data,
'hy': hy[0].data if is_lstm else hy.data,
'weights': rnn.all_weights,
'grad_input': input_var.grad.data,
'grad_hx': hx[0].grad.data if is_lstm else hx.grad.data,
'cy': hy[1].data if is_lstm else None,
'grad_cx': hx[1].grad.data if is_lstm else None}
input_size = 10
hidden_size = 6
proj_size = 3
num_layers = 2
seq_length = 7
batch = 6
def make_noncontig(tensor):
ndim = tensor.dim()
return torch.stack([tensor.clone().zero_(), tensor], ndim).select(ndim, 1)
def compare_cpu_gpu(outputs_cpu, outputs_gpu):
self.assertEqual(list(outputs_cpu.keys()), list(outputs_gpu.keys()))
for key in outputs_cpu.keys():
if key != 'weights':
self.assertEqual(outputs_cpu[key], outputs_gpu[key], atol=5e-5, rtol=0, msg=key)
# check grad weights separately, as nested dict
for cpu_layer_weight, gpu_layer_weight in zip(outputs_cpu['weights'], outputs_gpu['weights']):
for (cpu_weight, gpu_weight) in zip(cpu_layer_weight, gpu_layer_weight):
self.assertEqual(cpu_weight.grad.data, gpu_weight.grad.data, atol=5e-5, rtol=0)
for module in (nn.RNN, nn.LSTM, nn.GRU):
for bias, bidirectional, batch_first, contig, variable_len, lens_as_tensor \
in product((True, False), repeat=6):
num_directions = 2 if bidirectional else 1
if batch_first:
input_val = torch.randn(batch, seq_length, input_size, dtype=dtype)
grad_output = torch.randn(batch, seq_length, hidden_size * num_directions, dtype=dtype)
else:
input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)
grad_output = torch.randn(seq_length, batch, hidden_size * num_directions, dtype=dtype)
hx_val = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)
grad_hy = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)
if not contig:
grad_output = make_noncontig(grad_output)
grad_hy = make_noncontig(grad_hy)
input_var = make_noncontig(input_val)
hx_val = make_noncontig(hx_val)
if variable_len:
lengths = [7, 5, 5, 2, 1, 1]
if lens_as_tensor:
lengths = torch.tensor(lengths, dtype=torch.long)
input_val = rnn_utils.pack_padded_sequence(input_val, lengths, batch_first=batch_first)
grad_output = rnn_utils.pack_padded_sequence(grad_output, lengths, batch_first=batch_first).data
rnn = module(input_size,
hidden_size,
num_layers,
bias=bias,
dropout=dropout,
bidirectional=bidirectional,
batch_first=batch_first).to(dtype)
outputs_cpu = forward_backward(
False, rnn, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)
rnn_gpu = module(input_size,
hidden_size,
num_layers,
bias=bias,
dropout=dropout,
bidirectional=bidirectional,
batch_first=batch_first).to(dtype)
outputs_gpu = forward_backward(
True, rnn_gpu, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)
compare_cpu_gpu(outputs_cpu, outputs_gpu)
for nonlinearity in ('tanh', 'relu'):
hx_val = torch.randn(num_layers, batch, hidden_size, dtype=dtype)
input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)
grad_output = torch.randn(
seq_length, batch, hidden_size * num_directions, dtype=dtype)
grad_hy = torch.randn(
num_layers * num_directions, batch, hidden_size, dtype=dtype)
rnn = nn.RNN(input_size, hidden_size, num_layers, bias=bias, nonlinearity=nonlinearity).to(dtype)
outputs_cpu = forward_backward(False, rnn, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)
rnn_gpu = nn.RNN(input_size, hidden_size, num_layers, bias=bias, nonlinearity=nonlinearity).to(dtype)
outputs_gpu = forward_backward(True, rnn_gpu, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)
compare_cpu_gpu(outputs_cpu, outputs_gpu)
# checking LSTM with projections
for bias, bidirectional, batch_first, contig, variable_len, lens_as_tensor \
in product((True, False), repeat=6):
num_directions = 2 if bidirectional else 1
if batch_first:
input_val = torch.randn(batch, seq_length, input_size, dtype=dtype)
grad_output = torch.randn(batch, seq_length, proj_size * num_directions, dtype=dtype)
else:
input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)
grad_output = torch.randn(seq_length, batch, proj_size * num_directions, dtype=dtype)
hx_val = torch.randn(num_layers * num_directions, batch, proj_size, dtype=dtype)
cx_val = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)
grad_hy = torch.randn(num_layers * num_directions, batch, proj_size, dtype=dtype)
grad_cy = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)
if not contig:
grad_output = make_noncontig(grad_output)
grad_hy = make_noncontig(grad_hy)
grad_cy = make_noncontig(grad_cy)
input_var = make_noncontig(input_val)
hx_val = make_noncontig(hx_val)
cx_val = make_noncontig(cx_val)
if variable_len:
lengths = [7, 5, 5, 2, 1, 1]
if lens_as_tensor:
lengths = torch.tensor(lengths, dtype=torch.long)
input_val = rnn_utils.pack_padded_sequence(input_val, lengths, batch_first=batch_first)
grad_output = rnn_utils.pack_padded_sequence(grad_output, lengths, batch_first=batch_first).data
rnn = nn.LSTM(input_size,
hidden_size,
num_layers,
bias=bias,
dropout=dropout,
bidirectional=bidirectional,
batch_first=batch_first,
proj_size=proj_size).to(dtype)
outputs_cpu = forward_backward(
False, rnn, input_val, grad_output, rnn.all_weights,
hx_val, grad_hy, cx_val, grad_cy)
rnn_gpu = nn.LSTM(input_size,
hidden_size,
num_layers,
bias=bias,
dropout=dropout,
bidirectional=bidirectional,
batch_first=batch_first,
proj_size=proj_size).to(dtype)
outputs_gpu = forward_backward(
True, rnn_gpu, input_val, grad_output, rnn.all_weights,
hx_val, grad_hy, cx_val, grad_cy)
compare_cpu_gpu(outputs_cpu, outputs_gpu)
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
def test_RNN_cpu_vs_cudnn_no_dropout(self):
dtype = torch.double
self._test_RNN_cpu_vs_cudnn(0, dtype)
@unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), "needs cudnn >= 5.1")
def test_RNN_cpu_vs_cudnn_with_dropout(self):
# Because of dropout randomness, can only compare dropout=0 and dropout=1
self._test_RNN_cpu_vs_cudnn(1)
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
def test_RNN_cudnn_weight_norm(self):
input_size = 10
hidden_size = 6
num_layers = 2
seq_length = 7
batch = 6
# runs on CPU to acquire expected output
def check_weight_norm(m, name):
input = torch.randn(seq_length, batch, input_size)
expected_output = m(input)
# adds weight normalization
m = torch.nn.utils.weight_norm(m, name=name)
# moves to CUDA
m = m.cuda()
input = input.cuda()
# otherwise, subsequent warnings will be hidden, and further tests rely on them
warnings.simplefilter("always")
self.assertEqual(m(input), expected_output)
# remove weight norm
m = torch.nn.utils.remove_weight_norm(m, name=name)
self.assertEqual(m(input), expected_output)
check_weight_norm(nn.LSTM(input_size, hidden_size, num_layers), 'weight_hh_l0')
check_weight_norm(nn.LSTM(input_size, hidden_size, num_layers, proj_size=3), 'weight_hr_l0')
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_partial_flat_weights(self):
input_size = 10
hidden_size = 6
num_layers = 2
m = nn.LSTM(input_size, hidden_size, num_layers)
inp = torch.randn(3, 2, 10)
out_expected = m(inp)
# deletes an attribute of original LSTM
weight_orig = m.weight_hh_l0
del m.weight_hh_l0
self.assertFalse(hasattr(m, "weight_hh_l0"))
# verifies that moving to CUDA with only some attributes defined
# does not throw an error
m.cuda()
# recompute the weight and make sure that module can be used
m.weight_hh_l0 = weight_orig.cuda()
inp = inp.cuda()
# otherwise, subsequent warnings will be hidden, and further tests rely on them
warnings.simplefilter("always")
self.assertEqual(m(inp)[0].cpu(), out_expected[0])
@unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), "needs cudnn >= 5.1")
def test_RNN_dropout(self):
# checking the assumption that cuDNN sticks dropout in between
# RNN layers
for p in (0, 0.276, 0.731, 1):
for train in (True, False):
for cuda in (True, False):
rnn = nn.RNN(10, 1000, 2, bias=False, dropout=p, nonlinearity='relu')
if cuda:
rnn.cuda()
if train:
rnn.train()
else:
rnn.eval()
rnn.weight_ih_l0.data.fill_(1)
rnn.weight_hh_l0.data.fill_(1)
rnn.weight_ih_l1.data.fill_(1)
rnn.weight_hh_l1.data.fill_(1)
input = torch.ones(1, 1, 10)
hx = torch.zeros(2, 1, 1000)
if cuda:
input = input.cuda()
hx = hx.cuda()
output, hy = rnn(input, hx)
self.assertEqual(output.data.min(), output.data.max())
output_val = output.data[0][0][0]
if p == 0 or not train:
self.assertEqual(output_val, 10000)
elif p == 1:
self.assertEqual(output_val, 0)
else:
self.assertGreater(output_val, 8000)
self.assertLess(output_val, 12000)
denorm_mod = (output_val * (1 - p)) % 10
self.assertLess(min(denorm_mod, 10 - denorm_mod), 1e-2)
self.assertEqual(hy[0].data.min(), hy[0].data.max())
self.assertEqual(hy[1].data.min(), hy[1].data.max())
self.assertEqual(hy.data[0][0][0], 10)
self.assertEqual(hy.data[1][0][0], output_val)
def test_error_RNN_seq_len_zero(self):
# checking error message when RNN has seq_len = 0
for module in (nn.RNN, nn.LSTM, nn.GRU):
for bidirectional in [True, False]:
for device in get_all_device_types():
input = torch.ones(0, 10, 5)
rnn = module(5, 6, bidirectional=bidirectional)
if device == 'cuda':
rnn.cuda()
input = input.cuda()
with self.assertRaisesRegex(RuntimeError, "Expected sequence length to be larger than 0 in RNN"):
rnn(input)
def test_RNN_input_size_zero(self):
for module in (nn.RNN, nn.LSTM, nn.GRU):
for device in get_all_device_types():
input = torch.zeros((5, 0, 3))
rnn = module(input_size=3, hidden_size=4)
if device == 'cuda':
rnn.cuda()
input = input.cuda()
outs = rnn(input)
self.assertEqual(outs[0].shape, torch.Size([5, 0, 4]))
# Check that backward does not cause a hard error
outs[0].sum().backward()
@unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), "needs cudnn >= 5.1")
def test_RNN_dropout_state(self):
for p in (0, 0.1234):
for train in (True, False):
for cuda in (True, False):
rnn = nn.RNN(100, 100, 2, bias=False, dropout=p, nonlinearity='relu')
if cuda:
rnn.cuda()
if train:
rnn.train()
else:
rnn.eval()
input = torch.rand(1, 1, 100)
hx = torch.rand(2, 1, 100)
if cuda:
input = input.cuda()
hx = hx.cuda()
output1, hy1 = rnn(input, hx)
output2, hy2 = rnn(input, hx)
buf = io.BytesIO()
rnn_pickle = torch.save(rnn, buf)
buf.seek(0)
rnn2 = torch.load(buf)
rnn2.flatten_parameters()
output3, hy3 = rnn2(input, hx)
if p == 0 or not train:
self.assertEqual(output1, output2)
self.assertEqual(output1, output3)
self.assertEqual(hy1, hy2)
self.assertEqual(hy1, hy3)
else:
self.assertNotEqual(output1, output2)
self.assertNotEqual(output1, output3)
self.assertNotEqual(hy1, hy2)
self.assertNotEqual(hy1, hy3)
@unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), "needs cudnn >= 5.1")
def test_RNN_change_dropout(self):
for train, cuda in product((True, False), repeat=2):
rnn = nn.RNN(100, 100, 2, dropout=0, nonlinearity='relu')
input = torch.rand(3, 2, 100)
if cuda:
input.data = input.data.cuda()
rnn.cuda()
if train:
rnn.train()
else:
rnn.eval()
prev_output = None
for p in (0, 0.5, 0, 0.7, 0.2, 1, 0.2, 0):
rnn.dropout = p
output1, hy1 = rnn(input)
output2, hy2 = rnn(input)
if p == 0 or p == 1 or not train:
self.assertEqual(output1, output2)
self.assertEqual(hy1, hy2)
else:
self.assertNotEqual(output1, output2)
self.assertNotEqual(hy1, hy2)
if prev_output is not None:
if not train:
self.assertEqual(output1.data, prev_output)
self.assertEqual(output2.data, prev_output)
else:
self.assertNotEqual(output1.data, prev_output)
self.assertNotEqual(output2.data, prev_output)
prev_output = output1.data
def test_inplace_thnn(self):
modules = [nn.ReLU, nn.ELU, nn.SELU, nn.CELU, nn.RReLU]
for mod in modules:
r = mod(inplace=True)
input = torch.randn(5, 5, requires_grad=True)
output = r(input + 0)
grad_output = torch.randn(5, 5)
grad_output_clone = grad_output.clone()
output.backward(grad_output)
self.assertEqual(grad_output, grad_output_clone)
@skipIfTorchDynamo("TorchDynamo fails here for unknown reasons")
def test_pixel_shuffle_unshuffle(self):
def _test_pixel_shuffle_unshuffle_helper(num_input_dims, valid_channels_dim=True,
upscale_factor=None):
# Function to imperatively ensure pixels are shuffled to the correct locations.
# Used to validate the batch operations in pixel_shuffle.
def _verify_pixel_shuffle(input, output, upscale_factor):
for c in range(output.size(-3)):
for h in range(output.size(-2)):
for w in range(output.size(-1)):
height_idx = h // upscale_factor
weight_idx = w // upscale_factor
channel_idx = (upscale_factor * (h % upscale_factor)) + (w % upscale_factor) + \
(c * upscale_factor ** 2)
self.assertEqual(output[..., c, h, w], input[..., channel_idx, height_idx, weight_idx])
upscale_factor = random.randint(2, 5) if upscale_factor is None else upscale_factor
# If valid_channels_dim=False, add 1 to make channels dim indivisible by upscale_factor ** 2.
channels = random.randint(1, 4) * upscale_factor ** 2 + (0 if valid_channels_dim else 1)
height = random.randint(5, 10)
width = random.randint(5, 10)
if num_input_dims == 1:
input = torch.rand(channels, requires_grad=True)
elif num_input_dims == 2:
input = torch.rand(height, width, requires_grad=True)
else:
batch_sizes = [random.randint(1, 3) for _ in range(num_input_dims - 3)]
input = torch.rand(*batch_sizes, channels, height, width, requires_grad=True)
ps = nn.PixelShuffle(upscale_factor)
pus = nn.PixelUnshuffle(downscale_factor=upscale_factor)
if num_input_dims >= 3 and valid_channels_dim and upscale_factor > 0:
output = ps(input)
_verify_pixel_shuffle(input, output, upscale_factor)
output.backward(output.data)
self.assertEqual(input.data, input.grad.data)
# Ensure unshuffle properly inverts shuffle.
unshuffle_output = pus(output)
self.assertEqual(input, unshuffle_output)
else:
self.assertRaises(RuntimeError, lambda: ps(input))
def _test_pixel_unshuffle_error_case_helper(num_input_dims, valid_height_dim=True, valid_width_dim=True,
downscale_factor=None):
downscale_factor = random.randint(2, 5) if downscale_factor is None else downscale_factor
channels = random.randint(1, 4)
# If valid_height_dim=False, add 1 to make height dim indivisible by downscale_factor.
height = random.randint(3, 5) * abs(downscale_factor) + (0 if valid_height_dim else 1)
# If valid_width_dim=False, add 1 to make width dim indivisible by downscale_factor.
width = random.randint(3, 5) * abs(downscale_factor) + (0 if valid_width_dim else 1)
if num_input_dims == 1:
input = torch.rand(channels, requires_grad=True)
elif num_input_dims == 2:
input = torch.rand(height, width, requires_grad=True)
else:
batch_sizes = [random.randint(1, 3) for _ in range(num_input_dims - 3)]
input = torch.rand(*batch_sizes, channels, height, width, requires_grad=True)
pus = nn.PixelUnshuffle(downscale_factor)
self.assertRaises(RuntimeError, lambda: pus(input))
def _test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims):
# For 1D - 2D, this is an error case.
# For 3D - 5D, this is a success case for pixel_shuffle + pixel_unshuffle.
_test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims)
# Error cases for pixel_shuffle.
_test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims, valid_channels_dim=False)
_test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims, upscale_factor=0)
_test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims, upscale_factor=-2)
# Error cases for pixel_unshuffle.
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, valid_height_dim=False)
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, valid_width_dim=False)
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, downscale_factor=0)
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, downscale_factor=-2)
def test_pixel_shuffle_unshuffle_1D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=1)
def test_pixel_shuffle_unshuffle_2D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=2)
def test_pixel_shuffle_unshuffle_3D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=3)
def test_pixel_shuffle_unshuffle_4D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=4)
def test_pixel_shuffle_unshuffle_5D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=5)
test_pixel_shuffle_unshuffle_1D()
test_pixel_shuffle_unshuffle_2D()
test_pixel_shuffle_unshuffle_3D()
test_pixel_shuffle_unshuffle_4D()
test_pixel_shuffle_unshuffle_5D()
def test_pixel_shuffle_nhwc_cpu(self):
input = torch.randn(3, 18, 4, 4, device='cpu')
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randn(3, 18, 4, 4, device='cpu')
ps = torch.nn.PixelShuffle(3)
pus = torch.nn.PixelUnshuffle(3)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_ps = torch.nn.PixelShuffle(3)
ref_pus = torch.nn.PixelUnshuffle(3)
out = pus(ps(input))
out.backward(grad)
ref_out = ref_pus(ref_ps(ref_input))
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
# These tests should be OpInfo'd
def test_elu_inplace_on_view(self):
v = torch.tensor([1.0, -1.0, 1.0, -1.0], requires_grad=True)
def func(root):
x = root.clone()
view = x.narrow(0, 1, 2)
res = F.elu(view, inplace=True)
self.assertIs(res, view)
return x
gradcheck(func, [v])
gradgradcheck(func, [v])
def test_elu_inplace_gradgrad(self):
v = torch.randn(8, requires_grad=True)
def func(root):
x = root.clone()
return F.elu(x, inplace=True)
gradcheck(func, [v])
gradgradcheck(func, [v])
def test_relu_inplace_on_view(self):
v = torch.tensor([1.0, -1.0, 1.0, -1.0], requires_grad=True)
def func(root):
x = root.clone()
view = x.narrow(0, 1, 2)
res = F.relu(view, inplace=True)
self.assertIs(res, view)
return x
gradcheck(func, [v])
gradgradcheck(func, [v])
def test_PReLU_backward_requires_grad_false(self):
devices = ['cpu']
devices += ['cuda'] if TEST_CUDA else []
for d in devices:
m = nn.PReLU().to(d)
x = torch.randn(2, 3, 4, 5, device=d, requires_grad=False)
y = m(x)
y.mean().backward()
self.assertEqual(x.grad, None)
def test_bce_loss_always_nonnegative(self):
target = torch.ones(5)
input = torch.ones(5)
self.assertEqual((nn.BCELoss()(input, target) < 0).sum(), 0)
target = torch.zeros(5)
input = torch.zeros(5)
self.assertEqual((nn.BCELoss()(input, target) < 0).sum(), 0)
def test_bce_with_logits_raises_if_target_and_input_are_different_size(self):
target = torch.rand(5)
input = torch.rand(5, 1)
with self.assertRaises(ValueError):
nn.BCEWithLogitsLoss()(input, target)
target = torch.rand(5, 1)
input = torch.rand(5)
with self.assertRaises(ValueError):
nn.BCEWithLogitsLoss()(input, target)
def test_bce_with_logits_gives_same_result_as_sigmoid_and_bce_loss(self):
sigmoid = nn.Sigmoid()
target = torch.rand(64, 4)
output = torch.rand(64, 4) - 0.5
self.assertEqual(nn.BCEWithLogitsLoss()(output, target), nn.BCELoss()(sigmoid(output), target))
weight = torch.rand(4)
self.assertEqual(nn.BCEWithLogitsLoss(weight)(output, target), nn.BCELoss(weight)(sigmoid(output), target))
target = torch.zeros(4, 1, dtype=torch.float)
output = torch.empty(4, 1, dtype=torch.float).fill_(-100)
self.assertEqual(nn.BCEWithLogitsLoss()(output, target), nn.BCELoss()(sigmoid(output), target))
self.assertEqual(nn.BCEWithLogitsLoss(reduction='none')(output, target),
nn.BCELoss(reduction='none')(sigmoid(output), target))
weight = torch.rand(1, dtype=torch.float)
self.assertEqual(nn.BCEWithLogitsLoss(weight)(output, target), nn.BCELoss(weight)(sigmoid(output), target))
def test_bce_loss_input_range(self):
bceloss = nn.BCELoss()
target = torch.rand(25, 25)
output_valid = torch.rand(25, 25)
output_too_negative = output_valid - 1.0
output_too_positive = output_valid + 1.0
loss_valid = bceloss(output_valid, target)
with self.assertRaisesRegex(RuntimeError, 'between 0 and 1'):
loss_too_negative = bceloss(output_too_negative, target)
with self.assertRaisesRegex(RuntimeError, 'between 0 and 1'):
loss_too_positive = bceloss(output_too_positive, target)
def test_bce_loss_size_mismatch(self):
bceloss = nn.BCELoss()
a = torch.rand(25)
b = torch.rand(25, 1)
with self.assertRaisesRegex(ValueError, r'Using a target size \('):
bceloss(a, b)
def test_bce_with_logits_gives_same_result_as_sigmoid_and_bce_loss_large_tensors_with_grad(self):
x_size = 1024
y_size = 256
target = torch.rand(x_size, y_size)
for reduction in ['none', 'mean', 'sum']:
output_sig = torch.rand(x_size, y_size) - 0.5
output_logits = output_sig.clone().detach()
output_sig.requires_grad = True
output_logits.requires_grad = True
weight = torch.rand(y_size)
loss_sig = nn.BCELoss(weight, reduction=reduction)(
torch.sigmoid(output_sig), target
)
loss_logits = nn.BCEWithLogitsLoss(weight, reduction=reduction)(
output_logits, target
)
self.assertEqual(loss_logits, loss_sig)
if reduction == 'none':
grad = torch.rand(x_size, y_size)
loss_sig.backward(grad)
loss_logits.backward(grad)
else:
loss_sig.backward()
loss_logits.backward()
self.assertEqual(output_sig.grad, output_logits.grad)
def test_bce_with_logits_has_correct_forward_grad(self):
output = torch.randn(3, 5, requires_grad=True)
target = torch.randn(3, 5)
for reduction in ('sum', 'mean', 'none'):
gradcheck(lambda self, target: nn.BCEWithLogitsLoss(reduction=reduction)(self, target),
(output, target), check_forward_ad=True)
def test_bce_with_logits_has_correct_grad_at_zero(self):
output = torch.zeros(3, 1, requires_grad=True)
target = torch.zeros(3, 1)
nn.BCEWithLogitsLoss(reduction='sum')(output, target).backward()
expected_grad = torch.empty(3, 1).fill_(0.5)
self.assertEqual(output.grad, expected_grad)
def test_bce_with_logits_broadcasts_weights(self):
target = torch.rand(16, 4)
output = torch.rand(16, 4) - 0.5
weight = torch.rand(4)
out1 = nn.BCEWithLogitsLoss(weight)(output, target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCEWithLogitsLoss(weight)(output, target)
self.assertEqual(out1, out2)
weight = torch.rand(16, 1)
out1 = nn.BCEWithLogitsLoss(weight)(output, target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCEWithLogitsLoss(weight)(output, target)
self.assertEqual(out1, out2)
def test_bce_with_logits_ones_in_pos_weights_are_the_same_as_none(self):
target = torch.rand(64, 4)
output = torch.rand(64, 4) - 0.5
pos_weight = torch.ones(64, 4)
self.assertEqual(nn.BCEWithLogitsLoss()(output, target),
nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target))
def test_bce_with_logits_broadcasts_pos_weights(self):
target = torch.rand(64, 4)
output = torch.rand(64, 4) - 0.5
pos_weight = torch.rand(4)
out1 = nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target)
pos_weight1 = pos_weight.expand(1, 4)
out2 = nn.BCEWithLogitsLoss(pos_weight=pos_weight1)(output, target)
pos_weight2 = pos_weight.expand(64, 4)
out3 = nn.BCEWithLogitsLoss(pos_weight=pos_weight2)(output, target)
self.assertEqual(out1, out2)
self.assertEqual(out1, out3)
def test_bce_with_logits_with_pos_weight_has_correct_grad_at_zero(self):
output = torch.zeros(3, 1, requires_grad=True)
target = torch.zeros(3, 1)
pos_weight = torch.ones(3, 1)
nn.BCEWithLogitsLoss(pos_weight=pos_weight, reduction='sum')(output, target).backward()
expected_grad = torch.empty(3, 1).fill_(0.5)
grad = output.grad
self.assertEqual(grad, expected_grad)
def test_bce_with_logits_stability(self):
output = torch.tensor([0., -120.])
target = torch.tensor([0., 1.])
pos_weight = torch.tensor([1., 1.])
out1 = nn.BCEWithLogitsLoss()(output, target)
self.assertTrue(torch.isfinite(out1).all().item())
out2 = nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target)
self.assertTrue(torch.isfinite(out2).all().item())
def test_bce_loss_broadcasts_weights(self):
sigmoid = nn.Sigmoid()
target = torch.rand(16, 4)
output = torch.rand(16, 4) - 0.5
weight = torch.rand(4)
out1 = nn.BCELoss(weight)(sigmoid(output), target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCELoss(weight)(sigmoid(output), target)
self.assertEqual(out1, out2)
weight = torch.rand(16, 1)
out1 = nn.BCELoss(weight)(sigmoid(output), target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCELoss(weight)(sigmoid(output), target)
self.assertEqual(out1, out2)
def test_hardtanh_inplace_gradgrad(self):
v = torch.randn(8, requires_grad=True)
def func(root):
x = root.clone()
return F.hardtanh(x, inplace=True)
gradcheck(func, [v])
gradgradcheck(func, [v])
# test hardtanh backward froo large tensor
def test_hardtanh_backward(self):
x = torch.randn(128, 10000, requires_grad=True)
grad = torch.randn(128, 10000)
z = torch.zeros(128, 10000)
y = F.hardtanh(x)
y.backward(grad)
# ref backward path for hardtanh
mask = (x > -1) & (x < 1)
x_grad_ref = torch.where(mask, grad, z)
self.assertEqual(x.grad, x_grad_ref)
def test_batchnorm_nhwc_cpu(self):
def helper(self, size, dtype, mixed_dtype=False):
channels = size[1]
input = torch.randn(size, dtype=dtype, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=torch.channels_last).to(dtype)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device='cpu')
grad = grad.contiguous(memory_format=torch.channels_last)
bn = nn.BatchNorm2d(channels).cpu().to(dtype)
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = nn.BatchNorm2d(channels).cpu().to(dtype)
ref_bn.load_state_dict(bn.state_dict())
if mixed_dtype:
bn.float()
ref_bn.float()
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
# test NC11 and N1HW; test mixed dtype
for shape in [(4, 8, 10, 10), (4, 1, 9, 9), (4, 9, 1, 1)]:
helper(self, shape, torch.float, False)
helper(self, shape, torch.bfloat16, False)
helper(self, shape, torch.bfloat16, True)
def test_batchnorm_non_contig_cpu(self):
input = torch.arange(6, dtype=torch.float).reshape(1, 3, 2, 1).cpu()
input = input.permute(0, 2, 1, 3)
bn = torch.nn.BatchNorm2d(2).cpu().float().eval()
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous()
ref_bn = nn.BatchNorm2d(2).cpu().float().eval()
ref_bn.load_state_dict(bn.state_dict())
out = bn(input)
ref_out = ref_bn(ref_input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
@skipIfRocm
def test_batchnorm_cudnn_nhwc(self):
def run_test(input, grad_output):
c = input.size(1)
mod = nn.BatchNorm2d(c).cuda().float()
mod.weight.data.uniform_()
mod.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_mod = nn.BatchNorm2d(c).cuda().float()
ref_mod.load_state_dict(mod.state_dict())
out = mod(input)
out.backward(grad_output)
ref_out = ref_mod(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(mod.weight.grad, ref_mod.weight.grad)
self.assertEqual(mod.bias.grad, ref_mod.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
input = torch.randint(1, 10, (4, 8, 2, 2), dtype=torch.float32, device="cuda")
input = input.contiguous(memory_format=torch.channels_last).detach().requires_grad_()
grad = torch.randint(1, 10, (4, 8, 2, 2), dtype=torch.float32, device="cuda")
grad = grad.contiguous(memory_format=torch.channels_last)
run_test(input, grad)
# see #42588, grad is channels_last contiguous, but grad.suggest_memory_format (rightly) return "contiguous"
# not channels_last
input = torch.randint(1, 10, (2, 8, 8, 1), dtype=torch.float32, device="cuda")
input = input.contiguous(memory_format=torch.channels_last).detach().requires_grad_()
grad = torch.randint(1, 10, (2, 8, 8, 1), dtype=torch.float32, device="cuda")
grad = grad.permute(0, 2, 1, 3)
run_test(input, grad)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_batchnorm_cudnn_half(self):
# THNN
input = torch.randint(1, 10, (2, 3, 2, 2), dtype=torch.half, device="cuda", requires_grad=True)
m = nn.BatchNorm2d(3).half().cuda()
thnn_output = m(input)
thnn_output.sum().backward()
thnn_input_grad = input.grad.data.clone()
self.assertEqualTypeString(thnn_output, input)
# cuDNN
if TEST_CUDNN:
input.grad = None
m = m.float()
cudnn_output = m(input)
cudnn_output.sum().backward()
cudnn_input_grad = input.grad.data.clone()
self.assertEqualTypeString(cudnn_output, input)
self.assertEqual(cudnn_output, thnn_output)
self.assertEqual(cudnn_input_grad, thnn_input_grad, atol=1e-3, rtol=0)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_batchnorm_nonaffine_cuda_half_input(self):
input = torch.randn(16, 3, 24, 24, dtype=torch.half, device="cuda")
m = nn.BatchNorm2d(3, affine=False).cuda().float() # keep running stats in FP32
output = m(input)
self.assertEqualTypeString(output, input)
m.eval()
output = m(input)
self.assertEqualTypeString(output, input)
def test_batchnorm_raises_error_if_less_than_one_value_per_channel(self):
x = torch.rand(10)[None, :, None]
with self.assertRaises(ValueError):
torch.nn.BatchNorm1d(10)(x)
def test_batchnorm_raises_error_if_running_mean_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_var = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, torch.rand(size), running_var)
def test_batchnorm_raises_error_if_running_var_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_mean = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, running_mean, torch.rand(size))
def test_batchnorm_raises_error_if_weight_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_mean = torch.rand(10)
running_var = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, running_mean, running_var, weight=Parameter(torch.rand(size)))
def test_batchnorm_raises_error_if_bias_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_mean = torch.rand(10)
running_var = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, running_mean, running_var, bias=Parameter(torch.rand(size)))
def test_batchnorm_raises_error_if_running_var_or_running_mean_have_forward_grad(self):
args = (
torch.randn(3, 2, 5), # input
torch.randn(2), # running_mean
torch.randn(2), # running_var
)
kwargs = {'training': False, 'momentum': -1.2}
fn = partial(F.batch_norm, **kwargs)
for dual_indices in ((0,), (1,), (1, 2), (0, 1), (0, 1, 2),):
tangents = tuple(torch.rand_like(x) for x in args)
with fwAD.dual_level():
duals = [fwAD.make_dual(primal, tangent) if i in dual_indices else primal
for i, (primal, tangent) in enumerate(zip(args, tangents))]
msg = "batch_norm is not differentiable wrt running_mean and running_var"
# 0 needs to have forward grad because otherwise we won't even run batch_norm_jvp
if (1 in dual_indices or 2 in dual_indices) and 0 in dual_indices:
with self.assertRaisesRegex(RuntimeError, msg):
fn(*duals)
else:
fn(*duals)
def test_batchnorm_buffer_update_when_stats_are_not_tracked(self):
input_size = (32, 4)
# Instantiate BN with buffers that are not None
bn = nn.BatchNorm1d(input_size[1], track_running_stats=True)
# Use buffers for normalization but don't update them
bn.track_running_stats = False
# Store initial values
num_batches = bn.num_batches_tracked.clone()
running_mean = bn.running_mean.clone()
running_var = bn.running_var.clone()
# Forward random tensor
_ = bn(torch.rand(input_size))
# Ensure none of the buffers has been updated
self.assertTrue(torch.equal(num_batches, bn.num_batches_tracked))
self.assertTrue(torch.equal(running_mean, bn.running_mean))
self.assertTrue(torch.equal(running_var, bn.running_var))
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_batchnorm_nhwc_cuda(self):
for dtype in (torch.half, torch.float):
(N, C, H, W) = 2, 64, 50, 50
model = torch.nn.BatchNorm2d(C, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
model = model.eval().cuda().to(dtype)
inp1 = torch.randn(N, C, H, W, device=torch.device('cuda'), dtype=dtype)
inp2 = inp1.contiguous(memory_format=torch.channels_last)
out1 = model(inp1)
out2 = model(inp2)
self.assertTrue(torch.equal(out1, out2))
def test_pairwise_distance(self):
input1 = torch.randn(4, 4, requires_grad=True)
input2 = torch.randn(4, 4, requires_grad=True)
self.assertTrue(gradcheck(lambda x, y: F.pairwise_distance(x, y), (input1, input2)))
# TODO: Create an OpInfo for pdist
def test_pdist(self):
for device, trans in itertools.product(device_(), [False, True]):
inp = torch.randn(4, 5, dtype=torch.double, device=device, requires_grad=True)
if trans:
inp = inp.transpose(0, 1)
for p in [0, 1, 2, 0.5, 1.5, 2.5, float('inf')]:
self.assertTrue(gradcheck(lambda x: F.pdist(x, p), (inp,)))
def test_pdist_zeros(self):
"""Test that grad is still valid when dist is 0"""
for device in device_():
inp = torch.randn(1, 3, dtype=torch.double, device=device, requires_grad=True).repeat([2, 1])
for p in [0, 1, 2, 0.5, 1.5, 2.5, float('inf')]:
self.assertTrue(gradcheck(lambda x: F.pdist(x, p), (inp,)))
def test_pdist_empty_row(self):
for device in device_():
inp = torch.randn(1, 3, dtype=torch.double, device=device, requires_grad=True)
self.assertTrue(gradcheck(F.pdist, (inp,)))
def test_pdist_empty_col(self):
for device in device_():
inp = torch.randn(4, 0, dtype=torch.double, device=device, requires_grad=True)
self.assertTrue(gradcheck(F.pdist, (inp,)))
@skipIfTorchDynamo("TorchDynamo fails here for unknown reasons")
@unittest.expectedFailure
def test_pdist_cpu_gradgrad_unimplemented(self):
inp = torch.randn(4, 5, requires_grad=True)
gradgradcheck(F.pdist, (inp,))
@unittest.expectedFailure
def test_pdist_cuda_gradgrad_unimplemented(self):
inp = torch.randn(4, 5, device='cuda', requires_grad=True)
gradgradcheck(F.pdist, (inp,))
# Merge into OpInfo?
# test for backward in https://github.com/pytorch/pytorch/issues/15511
def test_pdist_large(self):
for device in device_():
def func(x):
return torch.pdist(x, p=2)
# shape[0] should be able to be (roughly) arbitrarily large, but the kernel
# is currently limited to smaller sizes (see issue above); this is just testing
# a floor.
shape = (1000, 1)
x = torch.randn(shape, device=device).requires_grad_()
output = torch.pdist(x, p=2)
# just run a single backward, as gradcheck/gradgradcheck is expensive here
output.sum().backward()
def test_cosine_embedding_loss_with_diff_type(self):
for device in device_():
input1 = torch.tensor([[2, 3, 4], [6, 2, 4]], dtype=torch.double, device=device)
input2 = torch.tensor([[2, 3, 5], [3, 2, 1]], dtype=torch.double, device=device)
target = torch.tensor([1, -1], dtype=torch.int, device=device)
expected = torch.nn.functional.cosine_embedding_loss(input1, input2, target)
for dt1 in get_all_math_dtypes(device):
for dt2 in get_all_math_dtypes(device):
for dt3 in get_all_math_dtypes(device):
# dt3 is used as dtype for target = [1, -1], so let's skip unsigned type
if dt3 == torch.uint8:
continue
if dt1.is_complex or dt2.is_complex or dt3.is_complex:
continue
input1 = input1.to(dt1)
input2 = input2.to(dt2)
target = target.to(dt3)
result = torch.nn.functional.cosine_embedding_loss(input1, input2, target)
self.assertEqual(result.item(), expected.item(), atol=0.001, rtol=0)
def test_kl_div_with_diff_type(self):
for device in device_():
input = torch.tensor([[2, 3, 5], [3, 2, 1]], dtype=torch.double, device=device)
target = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.double, device=device)
expected = torch.nn.functional.kl_div(input, target)
for input_dtype in get_all_math_dtypes(device):
if input_dtype.is_complex:
continue
for target_dtype in [torch.float32, torch.float64, torch.float16]:
if (torch.device(device).type == 'cpu' and target_dtype == torch.float16):
continue
input = input.to(input_dtype)
target = target.to(target_dtype)
result = torch.nn.functional.kl_div(input, target)
self.assertEqual(result.item(), expected.item(), atol=0.001, rtol=0)
def test_kl_div_with_diff_type_log_target(self):
for device in device_():
input = torch.tensor([[2, 3, 5], [3, 2, 1]], dtype=torch.double, device=device)
target = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.double, device=device).log()
expected = torch.nn.functional.kl_div(input, target, log_target=True)
for input_dtype in get_all_math_dtypes(device):
if input_dtype.is_complex:
continue
for target_dtype in [torch.float32, torch.float64, torch.float16]:
if (torch.device(device).type == 'cpu' and target_dtype == torch.float16):
continue
input = input.to(input_dtype)
target = target.to(target_dtype)
result = torch.nn.functional.kl_div(input, target, log_target=True)
self.assertEqual(result.item(), expected.item(), atol=0.001, rtol=0)
def test_kl_div_log_softmax_target(self):
for device in device_():
a = torch.tensor([[1.0, 2, 3], [5.0, 5, 5]], device=device)
b = torch.tensor([[1.0, 2, 3], [5.0, 5, 5]], device=device)
self.assertEqual(
F.kl_div(F.log_softmax(a, 1), F.log_softmax(b, 1), reduction='none', log_target=True),
torch.zeros_like(a)
)
def test_cosine_embedding_loss_no_reduce(self):
input1 = torch.randn(15, 10, requires_grad=True)
input2 = torch.randn(15, 10, requires_grad=True)
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.cosine_embedding_loss(
x, y, z, reduction='none'), (input1, input2, target)))
self.assertEqual(F.cosine_embedding_loss(input1, input2, target, reduction='none'),
loss_reference_fns['CosineEmbeddingLoss'](input1, input2, target, reduction='none'))
def test_cosine_embedding_loss_margin_no_reduce(self):
input1 = torch.randn(15, 10, requires_grad=True)
input2 = torch.randn(15, 10, requires_grad=True)
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.cosine_embedding_loss(
x, y, z, margin=0.5, reduction='none'), (input1, input2, target)))
self.assertEqual(F.cosine_embedding_loss(input1, input2, target, margin=0.5, reduction='none'),
loss_reference_fns['CosineEmbeddingLoss'](input1, input2, target,
margin=0.5, reduction='none'))
def test_cosine_embedding_loss_invalid_shape(self):
input1 = torch.randn(15, 10)
input2 = torch.randn(15, 10)
target = torch.randn(15, 1).sign()
with self.assertRaisesRegex(RuntimeError, "1D target tensor expected"):
F.cosine_embedding_loss(input1, input2, target)
with self.assertRaisesRegex(RuntimeError, "1D target tensor expects 2D input tensors"):
F.cosine_embedding_loss(torch.randn(10), torch.randn(10), torch.randn(10))
with self.assertRaisesRegex(RuntimeError, "0D target tensor expects 1D input tensors"):
F.cosine_embedding_loss(torch.randn(2, 5), torch.randn(2, 5), torch.randn(()))
def test_margin_ranking_loss_no_reduce(self):
input1 = torch.randn(15).mul_(10).requires_grad_()
input2 = torch.randn(15).mul_(10).requires_grad_()
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.margin_ranking_loss(
x, y, z, reduction='none'), (input1, input2, target)))
self.assertEqual(F.margin_ranking_loss(input1, input2, target, reduction='none'),
loss_reference_fns['MarginRankingLoss'](input1, input2, target, reduction='none'))
def test_margin_ranking_loss_margin_no_reduce(self):
input1 = torch.randn(15).mul_(10).requires_grad_()
input2 = torch.randn(15).mul_(10).requires_grad_()
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.margin_ranking_loss(
x, y, z, margin=0.5, reduction='none'), (input1, input2, target)))
self.assertEqual(F.margin_ranking_loss(input1, input2, target, margin=0.5, reduction='none'),
loss_reference_fns['MarginRankingLoss'](input1, input2, target, margin=0.5, reduction='none'))
def test_triplet_margin_loss(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3))
def test_triplet_margin_loss_swap(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3, swap=True), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3, swap=True),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3, swap=True))
def test_triplet_margin_loss_no_reduce(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3, reduction='none'), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3, reduction='none'),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3, reduction='none'))
def test_triplet_margin_loss_swap_no_reduce(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3, swap=True, reduction='none'), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3, swap=True, reduction='none'),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3, swap=True, reduction='none'))
def test_triplet_margin_loss_invalid(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
input_1d = torch.randn(10, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "All inputs should have same dimension"):
F.triplet_margin_loss(input1, input2, input_1d)
with self.assertRaisesRegex(RuntimeError, "All inputs should have same dimension"):
F.triplet_margin_loss(input1, input_1d, input3)
with self.assertRaisesRegex(RuntimeError, "All inputs should have same dimension"):
F.triplet_margin_loss(input_1d, input2, input3)
def test_pointwise_loss_target_grad_none_reduction(self):
i = torch.randn(5, 10)
t = torch.randn(5, 10, requires_grad=True)
self.assertEqual(F.mse_loss(i, t, reduction='none').size(), t.size())
self.assertEqual(F.l1_loss(i, t, reduction='none').size(), t.size())
def test_pointwise_loss_broadcast(self):
losses = {
'mse_loss': lambda x, y, r: F.mse_loss(x, y, reduction=r),
'l1_loss': lambda x, y, r: F.l1_loss(x, y, reduction=r),
'smooth_l1_loss': lambda x, y, r: F.smooth_l1_loss(x, y, reduction=r),
'huber_loss': lambda x, y, r: F.huber_loss(x, y, reduction=r),
}
input = torch.randn(2, 1, requires_grad=True)
for _name, fn in losses.items():
for requires_grad in [True, False]:
# When target.requires_grad=True, its impl is in Python, while the other is in TH.
target = torch.randn(2, 10, requires_grad=requires_grad)
for reduction in ['none', 'mean', 'sum']:
l = fn(input, target, reduction)
if reduction == 'none':
self.assertEqual(l.size(), target.size())
self.assertTrue(gradcheck(fn, (input, target, reduction)))
# https://github.com/pytorch/pytorch/issues/27692 reports
# that l1_loss get a wrong result for big batch size
def test_l1_loss_correct(self):
for dtype in [torch.float, torch.cfloat]:
for N in range(1, 50, 10):
input = torch.rand(N, 3, 1024, 1024, dtype=dtype)
self.assertEqual(
torch.nn.L1Loss()(input, torch.zeros_like(input)),
input.abs().mean())
def test_smoothl1loss_intergral_target(self):
def _input_grad(input, target, reduction):
output = F.smooth_l1_loss(input, target, reduction=reduction, beta=0.5)
output.sum().backward()
return input.grad
for device, dtype, reduction in product(device_(),
integral_types(),
('none', 'sum', 'mean')):
input = torch.randn(2, 2, device=device, requires_grad=True)
target = torch.randint(0, 9, (2, 2), device=device, dtype=dtype)
input_grad_with_float_target = _input_grad(input, target.float(), reduction)
input_grad = _input_grad(input.detach().clone().requires_grad_(True),
target,
reduction)
self.assertEqual(input_grad, input_grad_with_float_target)
def test_smoothl1loss_negative_beta_not_supported(self):
with self.assertRaises(RuntimeError):
F.smooth_l1_loss(torch.randn(2, 2), torch.randn(2, 2), beta=-1.0)
def test_huber_loss_invalid_delta(self):
def _test_huber_loss_delta_error_helper(delta):
input, target = torch.randn(2, 2), torch.randn(2, 2)
loss = torch.nn.HuberLoss(delta=delta)
with self.assertRaises(RuntimeError):
loss(input, target)
def test_huber_loss_negative_delta():
_test_huber_loss_delta_error_helper(delta=-0.5)
def test_huber_loss_zero_delta():
_test_huber_loss_delta_error_helper(delta=0.0)
test_huber_loss_negative_delta()
test_huber_loss_zero_delta()
def test_cosine_similarity(self):
# Check cosine_similarity input/output shapes
input_size = (1, 3, 2, 1)
expected_size = (1, 2, 1)
input1 = torch.randn(input_size, requires_grad=True)
input2 = torch.randn(input_size, requires_grad=True)
self.assertEqual(F.cosine_similarity(input1, input2, dim=1).size(), expected_size)
# Check numerical precision, issue #18057
vv1 = torch.tensor(list([float(i) for i in range(84)])).unsqueeze(0)
vv2 = torch.tensor(list([float(i) for i in range(84)])).unsqueeze(0)
out = F.cosine_similarity(vv1, vv2)
self.assertLessEqual(out, 1.0)
# Check dividing by 0.
# previous behavior: <x,y>/max(eps, ||x|| * ||y||)
# current: <x/max(eps, ||x||), y/max(eps,||y||)>
# if f(x,y) is the cosine similarity, then
# df/dx = y/(||x|| * ||y||) - (x * <x,y> * ||y||/||x||)/(||x|| * ||y||)^2
# the tests below check division by zero in the backward formula when
# x := input2 = 0, y := input1 != 0.
# For these inputs the gradient wrt x simplifies to g(x,y) := y/(||x|| * ||y||)
# Previous test checks g(x,y) == y/eps,
# Current test checks g(x,y) == (y/||y||)/eps.
input1 = torch.randn(10).requires_grad_()
input2 = torch.zeros_like(input1).requires_grad_()
torch.cosine_similarity(input1, input2, 0).sum().backward()
self.assertEqual(input1.grad, torch.zeros_like(input1))
self.assertEqual(input2.grad, input1 / input1.norm() * 1e8)
# Check type promotion, issue #61454
input = torch.tensor(12.)
out = F.cosine_similarity(input.to(torch.int8), input, dim=-1)
self.assertEqual(out, 1.)
def test_grid_sample_error_checking(self):
input = torch.empty(1, 1, 2, 2)
grid = torch.empty(1, 1, 1, 2)
# assert no error
F.grid_sample(input, grid, align_corners=False)
with self.assertRaisesRegex(ValueError, "but got: 'garbage'"):
F.grid_sample(input, grid, mode='garbage', align_corners=False)
with self.assertRaisesRegex(ValueError, "but got: 'garbage'"):
F.grid_sample(input, grid, padding_mode='garbage', align_corners=False)
with self.assertRaisesRegex(RuntimeError, "expected grid to have size 1 in last dimension"):
F.grid_sample(input[0], grid, align_corners=False)
with self.assertRaisesRegex(RuntimeError, "expected grid to have size 2 in last dimension"):
F.grid_sample(input, torch.empty(1, 1, 1, 1, 3), align_corners=False)
with self.assertRaisesRegex(RuntimeError, "expected grid and input to have same batch size"):
F.grid_sample(input, torch.empty(2, 1, 1, 2), align_corners=False)
with self.assertRaisesRegex(RuntimeError, "expected grid to have size 2 in last dimension"):
F.grid_sample(input, torch.empty(1, 1, 1, 3), align_corners=False)
with self.assertRaisesRegex(RuntimeError, "expected input to have non-empty spatial dimensions"):
F.grid_sample(torch.empty(1, 1, 0, 2), grid, align_corners=False)
with self.assertRaisesRegex(RuntimeError, "bicubic interpolation only supports 4D input"):
F.grid_sample(torch.empty(1, 1, 2, 2, 2), torch.empty(1, 1, 1, 1, 3), mode='bicubic')
if TEST_CUDA:
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
F.grid_sample(input.cuda(), grid, align_corners=False)
def test_affine_grid_error_checking(self):
# 2D affine
theta = torch.empty(1, 2, 3, dtype=torch.double)
size = torch.Size([1, 1, 2, 2])
# assert no error
F.affine_grid(theta, size, align_corners=False)
# check for warning for empty span along dimension
with warnings.catch_warnings(record=True) as w:
# Ensure warnings are being shown
warnings.simplefilter("always")
# Should not trigger warning
F.affine_grid(theta, torch.Size([1, 1, 2, 1]), align_corners=False)
# Check no warning occurs
self.assertNotIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))
# Should trigger warning
F.affine_grid(theta, torch.Size([1, 1, 2, 1]), align_corners=True)
# Check warning occurs
self.assertIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))
with self.assertRaisesRegex(ValueError, "Expected theta to have floating point type"):
F.affine_grid(theta.int(), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 2D affine matrices of shape Nx2x3"):
F.affine_grid(theta[0], size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 2D affine matrices of shape Nx2x3"):
F.affine_grid(theta.unsqueeze(0), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 2D affine matrices of shape Nx2x3"):
F.affine_grid(theta.repeat(1, 2, 1), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 2D affine matrices of shape Nx2x3"):
F.affine_grid(theta.repeat(1, 1, 2), size, align_corners=False)
# 3D affine
theta = torch.empty(1, 3, 4, dtype=torch.double)
size = torch.Size([1, 1, 2, 2, 2])
# assert no error
F.affine_grid(theta, size, align_corners=False)
# check for warning for empty span along dimension
with warnings.catch_warnings(record=True) as w:
# Ensure warnings are being shown
warnings.simplefilter("always")
# Should not trigger warning
F.affine_grid(theta, torch.Size([1, 1, 3, 2, 1]), align_corners=False)
# Check no warning occurs
self.assertNotIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))
# Should trigger warning
F.affine_grid(theta, torch.Size([1, 1, 3, 2, 1]), align_corners=True)
# Check warning occurs
self.assertIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))
with self.assertRaisesRegex(ValueError, "Expected a batch of 3D affine matrices of shape Nx3x4"):
F.affine_grid(theta[0], size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 3D affine matrices of shape Nx3x4"):
F.affine_grid(theta.unsqueeze(0), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 3D affine matrices of shape Nx3x4"):
F.affine_grid(theta.repeat(1, 2, 1), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 3D affine matrices of shape Nx3x4"):
F.affine_grid(theta.repeat(1, 1, 2), size, align_corners=False)
with self.assertRaisesRegex(NotImplementedError, "affine_grid only supports 4D and 5D sizes"):
F.affine_grid(theta, torch.Size([1, 2, 2]), align_corners=False)
with self.assertRaisesRegex(NotImplementedError, "affine_grid only supports 4D and 5D sizes"):
F.affine_grid(theta, torch.Size([1, 1, 2, 2, 2, 2]), align_corners=False)
def test_grid_sample(self):
# Backward pass of native C++ and CUDA kernels branch depending on whether input requires gradient,
# so we test both cases.
def test(N, C, H, W, mode, padding_mode, align_corners, input_requires_grad):
def test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners):
for grid_dim_contig_order in [(0, 1, 2, 3), (0, 3, 1, 2), (3, 0, 1, 2), (0, 2, 1, 3)]:
# grid_dim_contig_order specifies the dimension order that can
# make grid to be contiguous.
# i.e., grid.permute(grid_dim_contig_order) is contiguous.
# e.g., with grid_dim_contig_order=[0, 3, 1, 2], grid should be
# initialized with contiguous tensor of shape [N, 2, H, W]
# and permuted to [N, H, W, 2] afterwards.
grid_shape = [N, H, W, 2]
grid_init_shape = [grid_shape[d] for d in grid_dim_contig_order]
grid_fwd_permute = [None, None, None, None]
for i, d in enumerate(grid_dim_contig_order):
grid_fwd_permute[d] = i
def get_grid(device='cpu', data=None):
if data is not None:
assert list(data.shape) == grid_shape
data = data.permute(grid_dim_contig_order).to(device)
else:
data = torch.randn(grid_init_shape, device=device)
grid = data.permute(grid_fwd_permute)
assert grid.permute(grid_dim_contig_order).is_contiguous()
return grid
input_cpu = torch.randn(C, N, IH, IW).transpose(0, 1).requires_grad_(input_requires_grad)
grid_cpu = get_grid().requires_grad_()
out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertTrue(out_cpu.size() == torch.Size([N, C, H, W]))
gradients = torch.randn_like(out_cpu)
out_cpu.backward(gradients)
# Compare against unvectorized CPU fallback
# NOTE [ grid_sample CPU fallback ]
# grid_sample uses AVX for 2d images, but that requires 32-bit indexing for
# 32-bit floats. So we also have a fallback that is used only for float tensors
# requiring 64-bit indexing. That requires too much memory to run on CI, so we
# also export the fallback and test it here to ensure feature parity with
# the vectorized version.
input_fallback = input_cpu.float().detach_().requires_grad_()
grid_fallback = grid_cpu.float().detach_().requires_grad_()
out_fallback = torch._grid_sampler_2d_cpu_fallback(
input_fallback, grid_fallback,
F.GRID_SAMPLE_INTERPOLATION_MODES[mode],
F.GRID_SAMPLE_PADDING_MODES[padding_mode],
align_corners)
self.assertEqual(out_fallback, out_cpu.float(), atol=1e-5, rtol=5e-5)
out_fallback.backward(gradients.float())
if input_requires_grad:
self.assertEqual(input_fallback.grad, input_cpu.grad.float(), atol=1e-4, rtol=5e-5)
self.assertEqual(grid_fallback.grad, grid_cpu.grad.float(), atol=1e-4, rtol=5e-5)
if TEST_CUDA:
input_cuda = input_cpu.detach().transpose(0, 1).cuda().transpose(0, 1).requires_grad_(input_requires_grad)
grid_cuda = get_grid('cuda', grid_cpu.detach()).requires_grad_()
out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(out_cpu, out_cuda)
out_cuda.backward(gradients.cuda())
if input_requires_grad:
self.assertEqual(input_cpu.grad, input_cuda.grad)
self.assertEqual(grid_cpu.grad, grid_cuda.grad, atol=5e-5, rtol=0)
# check that zero-dimensional input strides don't error out
base_input = torch.randn(N, C, 1, IW)
input_cpu = base_input.expand_as(input_cuda).requires_grad_(input_requires_grad)
out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
input_cuda = base_input.cuda().expand_as(input_cuda).requires_grad_(input_requires_grad)
out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(out_cpu, out_cuda)
# test same size output
test_shape(N, C, H, W, H, W, mode, padding_mode, align_corners)
# test larger output
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(IH + 1, 12)
W = random.randint(IW + 1, 12)
test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners)
# test smaller output
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(2, IH)
W = random.randint(2, IW)
test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners)
# test 1x1 inpput
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = 1
IW = 1
H = random.randint(2, 5)
W = random.randint(2, 5)
test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners)
# testing empty grid
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
W = random.randint(3, IW + 2)
test_shape(N, C, IH, IW, 0, W, mode, padding_mode, align_corners)
# testing empty channel
N = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(3, IH + 2)
W = random.randint(3, IW + 2)
test_shape(N, 0, IH, IW, H, W, mode, padding_mode, align_corners)
# testing empty batch
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(3, IH + 2)
W = random.randint(3, IW + 2)
test_shape(0, C, IH, IW, H, W, mode, padding_mode, align_corners)
for mode in ('bilinear', 'nearest', 'bicubic'):
for padding_mode in ('zeros', 'border', 'reflection'):
for align_corners in (True, False):
# test known input on CPU
input = torch.arange(1., 11).view(1, 1, 2, 5)
grid = torch.tensor(
[[[-0.9, -4.1], [0, 0.2000], [1, -1], [-0.333, 1e-6], [0.5, 1.0]],
[[-1.0, -0.5], [0, 0.3333], [1, -1], [-0.200, 1e-6], [1.5, 0.5]]]).view(1, 2, 5, 2)
if mode == 'bilinear':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[0.0000, 6.0000000000, 5.0000, 4.8340, 9.0000],
[2.2500, 6.3332500450, 5.0000, 5.1000, 0.0000]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0.0000, 6.5000000000, 1.2500, 4.6675000191, 4.6250],
[0.5000, 7.1665000916, 1.2500, 5.0000000000, 0.0000]]).view(1, 1, 2, 5)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[1.2000, 6.0000000000, 5.0000, 4.8340, 9.0000],
[2.2500, 6.3332500450, 5.0000, 5.1000, 8.7500]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[1.0000, 6.5000000000, 5.0000, 4.6675000191, 9.2500],
[1.0000, 7.1665000916, 5.0000, 5.0000000000, 10.0000]]).view(1, 1, 2, 5)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[3.4500, 6.0000000000, 5.0000, 4.8340, 9.0000],
[2.2500, 6.3332500450, 5.0000, 5.1000, 7.7500]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[3.0000004768, 6.5000000000, 5.0000, 4.6675000191, 9.2500],
[1.0000000000, 7.1665000916, 5.0000, 5.0000000000, 9.2500]]).view(1, 1, 2, 5)
else:
raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode))
elif mode == 'nearest':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[0., 8., 5., 7., 9.],
[1., 8., 5., 8., 0.]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0., 8., 5., 7., 0.],
[1., 8., 5., 8., 0.]]).view(1, 1, 2, 5)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 10.]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 10.]]).view(1, 1, 2, 5)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 9.]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 9.]]).view(1, 1, 2, 5)
else:
raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode))
elif mode == 'bicubic':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[-0.10424726, 7.1400003, 5.0000, 5.7842274, 9.0000],
[2.4492188, 7.4814040, 5.0000, 6.0277520, 0.0000]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0.00000, 7.6287503, 1.0625, 5.5977230, 5.3270264],
[0.40625, 8.0288770, 1.0625, 5.9375067, -0.3515625]]).view(1, 1, 2, 5)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[1.1520010, 6.0599990, 5.0000, 4.870930, 9.0000000],
[2.1328125, 6.4258375, 5.0000, 5.076003, 8.8671875]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0.894531, 6.6050020, 4.625, 4.7138715, 9.800781],
[0.906250, 7.2822485, 4.625, 5.0000052, 10.00000]]).view(1, 1, 2, 5)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[3.1822524, 6.239998, 5.0000, 4.8709273, 9.00000],
[1.7812500, 6.703594, 5.0000, 5.0760007, 8.21875]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[2.7993753, 6.6050020, 4.25, 4.7138715, 10.269531],
[0.8125000, 7.2822485, 4.25, 5.0000052, 9.332031]]).view(1, 1, 2, 5)
else:
raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode))
else:
raise AssertionError("missing groundtruth test for interpolation mode '{}'".format(mode))
output = F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(output, groundtruth, atol=1e-5, rtol=0,
msg="groundtruth comparison failed for mode={}, "
"padding_mode={}".format(mode, padding_mode))
# See NOTE [ grid_sample CPU fallback ]
output = torch._grid_sampler_2d_cpu_fallback(
input.float(), grid.float(),
F.GRID_SAMPLE_INTERPOLATION_MODES[mode],
F.GRID_SAMPLE_PADDING_MODES[padding_mode],
align_corners)
self.assertEqual(output, groundtruth.float(), atol=1e-5, rtol=0)
# explicit check for gradient edge cases
input = torch.arange(0., 5).expand((1, 1, 5, 5))
grid = torch.tensor(
[[[1.0, 1.0], [1.0, -1.0], [0.8, 0.8], [0.8, -0.8]],
[[-1.0, -1.0], [-1.0, 1.0], [-0.8, -0.8], [-0.8, 0.8]]]).view(1, 2, 4, 2).requires_grad_()
if mode == 'bilinear':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[[[-8., -8.], [-8., 0.], [2., 0.], [2., 0.]],
[[2., 0.], [2., 0.], [2., 0.], [2., 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-5., -5.], [-5., 5.], [-10., -10.], [-10., 10.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [2., 0.], [2., 0.]],
[[0., 0.], [0., 0.], [2., 0.], [2., 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [-0., -0.], [-0., 0.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [2., 0.], [2., 0.]],
[[0., 0.], [0., 0.], [2., 0.], [2., 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [-0., -0.], [-0., 0.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)
else:
raise AssertionError("missing gradient groundtruth test for padding mode '{}'".format(padding_mode))
elif mode == 'nearest':
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [-0., -0.], [-0., 0.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)
elif mode == 'bicubic':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[[[-4.5, -6.], [-4.5, 6.], [2.725679, 0.740878], [2.725679, -0.740878]],
[[1.5, 0.], [1.5, 0.], [1.927921, -0.05688], [1.927921, 0.05688]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-5.859375, -5.888672], [-5.859375, 5.888672], [-5.6250, -7.5000], [-5.6250, 7.5000]],
[[-0.234375, -0.263672], [-0.234375, 0.263672], [1.8750, 0.], [1.8750, 0.]]]]
).view(1, 2, 4, 2)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[[[1.5, 0.], [1.5, 0.], [1.74, 0.], [1.74, 0.]],
[[1.5, 0.], [1.5, 0.], [1.74, 0.], [1.74, 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-0.46875, 0.], [-0.46875, 0.], [1.8750, 0.], [1.8750, 0.]],
[[-0.46875, 0.], [-0.46875, 0.], [1.8750, 0.], [1.8750, 0.]]]]).view(1, 2, 4, 2)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[[[0., 0.], [0., 0.], [1.92, 0.], [1.92, 0.]],
[[0., 0.], [0., 0.], [1.92, 0.], [1.92, 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[0., 0.], [0., 0.], [1.875, 0.], [1.875, 0.]],
[[0., 0.], [0., 0.], [1.875, 0.], [1.875, 0.]]]]).view(1, 2, 4, 2)
else:
raise AssertionError("missing gradient groundtruth test for padding mode '{}'".format(padding_mode))
else:
raise AssertionError("missing gradient groundtruth test for interpolation mode '{}'".format(mode))
for input_requires_grad in [False, True]:
input = input.requires_grad_(input_requires_grad)
F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners).sum().backward()
self.assertEqual(grid.grad, groundtruth, atol=1e-5, rtol=0,
msg="gradient groundtruth comparison failed for mode={}, "
"padding_mode={}, input_requires_grad={}".format(mode, padding_mode, input_requires_grad))
grid.grad.zero_()
# See NOTE [ grid_sample CPU fallback ]
torch._grid_sampler_2d_cpu_fallback(
input.float(), grid.float(),
F.GRID_SAMPLE_INTERPOLATION_MODES[mode],
F.GRID_SAMPLE_PADDING_MODES[padding_mode],
align_corners).sum().backward()
self.assertEqual(grid.grad, groundtruth, atol=1e-5, rtol=0)
# do gradcheck
N = random.randint(2, 8)
C = random.randint(2, 6)
H = random.randint(2, 8)
W = random.randint(2, 8)
input = torch.randn(N, C, H, W, requires_grad=True)
grid = torch.randn(N, H, W, 2, requires_grad=True)
for input_requires_grad in [False, True]:
input.requires_grad_(input_requires_grad)
self.assertTrue(gradcheck(
lambda inp, grd: F.grid_sample(inp, grd, mode=mode, padding_mode=padding_mode,
align_corners=align_corners),
(input, grid)))
test(N, C, H, W, mode, padding_mode, align_corners, input_requires_grad)
if TEST_CUDNN:
with cudnn.flags(enabled=False):
test(N, C, H, W, mode, padding_mode, align_corners, input_requires_grad)
def test_grid_sample_3d(self):
# Backward pass of native C++ and CUDA kernels branch depending on whether input requires gradient,
# so we test both cases.
def test(N, C, D, H, W, mode, padding_mode, align_corners, input_requires_grad):
def test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners):
input_cpu = torch.randn(C, N, ID, IH, IW).transpose(0, 1).requires_grad_(input_requires_grad)
grid_cpu = torch.randn(D, N, H, W, 3).transpose(0, 1).requires_grad_()
out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertTrue(out_cpu.size() == torch.Size([N, C, D, H, W]))
gradients = torch.randn_like(out_cpu)
out_cpu.backward(gradients)
if TEST_CUDA:
input_cuda = input_cpu.detach().transpose(0, 1).cuda().transpose(0, 1).requires_grad_(input_requires_grad)
grid_cuda = grid_cpu.detach().transpose(0, 1).cuda().transpose(0, 1).requires_grad_()
out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(out_cpu, out_cuda)
out_cuda.backward(gradients.cuda())
if input_requires_grad:
self.assertEqual(input_cpu.grad, input_cuda.grad)
self.assertEqual(grid_cpu.grad, grid_cuda.grad, atol=5e-5, rtol=0)
# check that zero-dimensional input strides don't error out
base_input = torch.randn(N, C, 1, IH, IW)
input_cpu = base_input.expand_as(input_cuda).requires_grad_(input_requires_grad)
grid_cpu = torch.randn(N, D, H, W, 3, requires_grad=True)
out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
input_cuda = base_input.cuda().expand_as(input_cuda).requires_grad_(input_requires_grad)
grid_cuda = grid_cpu.detach().cuda().requires_grad_()
out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(out_cpu, out_cuda)
# test same size output
test_shape(N, C, D, H, W, D, H, W, mode, padding_mode, align_corners)
# test larger output
N = random.randint(2, 7)
C = random.randint(2, 5)
ID = random.randint(2, 7)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(ID + 1, 10)
H = random.randint(IH + 1, 10)
W = random.randint(IW + 1, 10)
test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
# test smaller output
N = random.randint(2, 7)
C = random.randint(2, 5)
ID = random.randint(2, 7)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(2, ID)
H = random.randint(2, IH)
W = random.randint(2, IW)
test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
# test 1x1 inpput
N = random.randint(2, 7)
C = random.randint(2, 7)
ID = 1
IH = 1
IW = 1
H = random.randint(2, 5)
W = random.randint(2, 5)
test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
# testing empty grid
N = random.randint(2, 7)
C = random.randint(2, 5)
ID = random.randint(2, 7)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(3, ID + 2)
W = random.randint(3, IW + 2)
test_shape(N, C, ID, IH, IW, D, 0, W, mode, padding_mode, align_corners)
# testing empty channel
N = random.randint(2, 7)
ID = random.randint(2, 5)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(3, ID + 2)
H = random.randint(3, IH + 2)
W = random.randint(3, IW + 2)
test_shape(N, 0, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
# testing empty batch
C = random.randint(2, 5)
ID = random.randint(2, 7)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(3, ID + 2)
H = random.randint(3, IH + 2)
W = random.randint(3, IW + 2)
test_shape(0, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
for mode in ('bilinear', 'nearest'):
for padding_mode in ('zeros', 'border', 'reflection'):
for align_corners in (True, False):
# do gradcheck
N = random.randint(2, 5)
C = random.randint(2, 4)
D = random.randint(2, 5)
H = random.randint(2, 5)
W = random.randint(2, 5)
input = torch.randn(N, C, D, H, W, requires_grad=True)
grid = torch.randn(N, D, H, W, 3, requires_grad=True)
self.assertTrue(gradcheck(
lambda inp, grid: F.grid_sample(inp, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners),
(input, grid)))
input = input.requires_grad_(False)
self.assertTrue(gradcheck(
lambda grid: F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners),
(grid,)))
for input_requires_grad in [False, True]:
test(N, C, D, H, W, mode, padding_mode, align_corners, input_requires_grad)
def test_affine_grid(self):
# test known input on CPU
input = torch.arange(1., 7).view(1, 2, 3)
output = F.affine_grid(input, torch.Size([1, 1, 2, 2]), align_corners=True)
groundtruth = torch.tensor(
[[[0., -3.], [2., 5.]], [[4., 7.], [6., 15.]]]).view(1, 2, 2, 2)
self.assertEqual(output, groundtruth)
output = F.affine_grid(input, torch.Size([1, 1, 2, 2]), align_corners=False)
groundtruth = torch.tensor(
[[[1.5, 1.5], [2.5, 5.5]], [[3.5, 6.5], [4.5, 10.5]]]).view(1, 2, 2, 2)
self.assertEqual(output, groundtruth)
for align_corners in (True, False):
# do gradcheck
N = random.randint(1, 8)
C = random.randint(1, 8)
H = random.randint(1, 8)
W = random.randint(1, 8)
sz = torch.Size([N, C, H, W])
inp = torch.randn(N, 2, 3, requires_grad=True)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
self.assertTrue(gradcheck(
lambda inp: F.affine_grid(inp, sz, align_corners=align_corners),
(inp,)))
# test CPU against CUDA
if TEST_CUDA:
N = random.randint(1, 8)
C = random.randint(1, 8)
H = random.randint(1, 8)
W = random.randint(1, 8)
sz = torch.Size([N, C, H, W])
for align_corners in (True, False):
input_cpu = torch.randn(N, 2, 3, requires_grad=True)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
out_cpu = F.affine_grid(input_cpu, sz, align_corners=align_corners)
gradients = torch.randn(out_cpu.size())
out_cpu.backward(gradients)
input_gpu = input_cpu.detach().cuda().requires_grad_()
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
out_cuda = F.affine_grid(input_gpu, sz, align_corners=align_corners)
out_cuda.backward(gradients.cuda())
self.assertEqual(out_cpu, out_cuda)
self.assertEqual(input_cpu.grad, input_gpu.grad)
def test_affine_grid_3d(self):
# test known input on CPU
input = torch.arange(1., 13).view(1, 3, 4)
output = F.affine_grid(input, torch.Size([1, 1, 2, 2, 2]), align_corners=True)
groundtruth = torch.tensor(
[[[[[-2., -10., -18.], [0., 0., 0.]], [[2., 2., 2.], [4., 12., 20.]]],
[[[4., 4., 4.], [6., 14., 22.]], [[8., 16., 24.], [10., 26., 42.]]]]]).view(1, 2, 2, 2, 3)
self.assertEqual(output, groundtruth)
output = F.affine_grid(input, torch.Size([1, 1, 2, 2, 2]), align_corners=False)
groundtruth = torch.tensor(
[[[[[1., -1., -3.], [2., 4., 6.]], [[3., 5., 7.], [4., 10., 16.]]],
[[[4., 6., 8.], [5., 11., 17.]], [[6., 12., 18.], [7., 17., 27.]]]]]).view(1, 2, 2, 2, 3)
self.assertEqual(output, groundtruth)
for align_corners in (True, False):
# do gradcheck
N = random.randint(1, 8)
C = random.randint(1, 8)
D = random.randint(1, 8)
H = random.randint(1, 8)
W = random.randint(1, 8)
sz = torch.Size([N, C, D, H, W])
inp = torch.randn(N, 3, 4, requires_grad=True)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
self.assertTrue(gradcheck(
lambda inp: F.affine_grid(inp, sz, align_corners=align_corners),
(inp,)))
# test CPU against CUDA
if TEST_CUDA:
N = random.randint(1, 8)
C = random.randint(1, 8)
D = random.randint(1, 8)
H = random.randint(1, 8)
W = random.randint(1, 8)
sz = torch.Size([N, C, D, H, W])
for align_corners in (True, False):
input_cpu = torch.randn(N, 3, 4, requires_grad=True)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
out_cpu = F.affine_grid(input_cpu, sz, align_corners=align_corners)
gradients = torch.randn(out_cpu.size())
out_cpu.backward(gradients)
input_gpu = input_cpu.detach().cuda().requires_grad_()
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
out_cuda = F.affine_grid(input_gpu, sz, align_corners=align_corners)
out_cuda.backward(gradients.cuda())
self.assertEqual(out_cpu, out_cuda)
self.assertEqual(input_cpu.grad, input_gpu.grad)
def test_channel_shuffle(self):
# 3D tensor
x = torch.tensor(
[[[1, 2],
[5, 6],
[9, 10],
[13, 14],
]]
)
y_ref = torch.tensor(
[[[1, 2],
[9, 10],
[5, 6],
[13, 14],
]]
)
# ChannelsFirst
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x, 2)
self.assertEqual(len(w), 0)
self.assertEqual(y, y_ref)
# ChannelsLast not supported for 3dim
# 4D tensor
x = torch.tensor(
[[[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]],
[[9, 10],
[11, 12]],
[[13, 14],
[15, 16]],
]]
)
y_ref = torch.tensor(
[[[[1, 2],
[3, 4]],
[[9, 10],
[11, 12]],
[[5, 6],
[7, 8]],
[[13, 14],
[15, 16]],
]]
)
# ChannelsFirst NCHW
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x, 2)
self.assertEqual(len(w), 0)
self.assertEqual(y, y_ref)
# ChannelsLast NHWC
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x.contiguous(memory_format=torch.channels_last), 2)
self.assertEqual(len(w), 0)
y = y.contiguous(memory_format=torch.contiguous_format)
self.assertEqual(y, y_ref)
# 5D tensor
x = torch.tensor(
[[[[[1, 2],
[3, 4]]],
[[[5, 6],
[7, 8]]],
[[[9, 10],
[11, 12]]],
[[[13, 14],
[15, 16]]],
]]
)
y_ref = torch.tensor(
[[[[[1, 2],
[3, 4]]],
[[[9, 10],
[11, 12]]],
[[[5, 6],
[7, 8]]],
[[[13, 14],
[15, 16]]],
]]
)
# ChannelsFirst NCHW
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x, 2)
self.assertEqual(len(w), 0)
self.assertEqual(y, y_ref)
# ChannelsLast NHWC
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x.contiguous(memory_format=torch.channels_last_3d), 2)
self.assertEqual(len(w), 0)
y = y.contiguous(memory_format=torch.contiguous_format)
self.assertEqual(y, y_ref)
def test_channel_shuffle_return_self(self):
# gh-76616: nn.ChannelShuffle will return self with an empty input tensor
groups = 3
input_tensor = torch.rand([0, 9, 4, 4])
output = torch.nn.ChannelShuffle(groups)(input_tensor)
torch.testing.assert_close(output, input_tensor)
def test_upsamplingLinear1d(self):
for align_corners in [True, False]:
for recompute_scale_factor in [True, False]:
kwargs = dict(
mode='linear', align_corners=align_corners, recompute_scale_factor=recompute_scale_factor
)
# test float scale factor up & downsampling
for scale_factor in [0.5, 1.5, 2]:
m = nn.Upsample(scale_factor=scale_factor, **kwargs)
in_t = torch.ones(1, 1, 2)
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
with warnings.catch_warnings(record=True) as w:
out_t = m(in_t)
self.assertEqual(torch.ones(1, 1, out_size), out_t.data)
input = torch.randn(1, 1, 2, requires_grad=True)
if not recompute_scale_factor:
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), (input,))
else:
gradcheck(lambda x: F.interpolate(x, scale_factor=scale_factor, **kwargs), (input,))
def test_upsamplingLinear1d_spatial_invariance(self):
m = nn.Upsample(scale_factor=3, mode='linear', align_corners=False)
in_t_9 = torch.zeros(1, 1, 9)
in_t_9[:, :, :4].normal_()
with warnings.catch_warnings(record=True) as w:
out_t_9 = m(in_t_9)
out_t_5 = m(in_t_9[:, :, :5])
self.assertEqual(out_t_9[:, :, :15], out_t_5)
def test_upsampling_not_recompute_scale_factor(self):
# test output against known input: result must match opencv
in_t = torch.arange(8.).view(1, 2, 2, 2)
expected_out_t = torch.tensor(
[[[[-0.32725, -0.08843, 0.37933, 0.79744],
[0.15039, 0.38921, 0.85697, 1.27508],
[1.08591, 1.32473, 1.79249, 2.21060],
[1.92213, 2.16095, 2.62871, 3.04682]],
[[3.67275, 3.91157, 4.37933, 4.79744],
[4.15039, 4.38921, 4.85697, 5.27508],
[5.08591, 5.32473, 5.79249, 6.21060],
[5.92213, 6.16095, 6.62871, 7.04682]]]])
if IS_PPC:
# Both OpenCV and PyTorch give a slightly different result on PPC
expected_out_t = torch.tensor(
[[[[-0.32725, -0.08843, 0.37933, 0.79744],
[0.15039, 0.38921, 0.85697, 1.27508],
[1.08591, 1.32473, 1.79249, 2.21060],
[1.92212, 2.16094, 2.62870, 3.04681]],
[[3.67275, 3.91157, 4.37933, 4.79743],
[4.15039, 4.38921, 4.85697, 5.27508],
[5.08591, 5.32473, 5.79249, 6.21059],
[5.92212, 6.16094, 6.62870, 7.04680]]]])
out_t = F.interpolate(in_t, scale_factor=2.3, mode='bicubic', align_corners=False, recompute_scale_factor=False)
torch.set_printoptions(precision=5)
self.assertEqual(out_t, expected_out_t, atol=1e-4, rtol=0)
device_list = ['cpu']
if TEST_CUDA:
device_list.append('cuda')
for align_corners in [True, False]:
kwargs = dict(mode='bicubic', align_corners=align_corners)
# test float scale factor up & downsampling
for device in device_list:
for scale_factor in [0.6, 1.6, 2.3]:
in_t = torch.ones(2, 2, 2, 2).to(device)
out_t = F.interpolate(in_t, scale_factor=scale_factor, **kwargs)
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
self.assertEqual(torch.ones(2, 2, out_size, out_size), out_t.data, atol=1e-5, rtol=0)
input = torch.randn(2, 2, 2, 2, requires_grad=True)
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [input])
def test_upsamplingBilinear2d_spatial_invariance(self):
m = nn.Upsample(scale_factor=3, mode='bilinear', align_corners=False)
in_t_9 = torch.zeros(1, 1, 9, 9)
in_t_9[:, :, :4, :4].normal_()
with warnings.catch_warnings(record=True) as w:
out_t_9 = m(in_t_9)
out_t_5 = m(in_t_9[:, :, :5, :5])
self.assertEqual(out_t_9[:, :, :15, :15], out_t_5)
def test_upsamplingTrilinear3d(self):
for align_corners in [True, False]:
kwargs = dict(mode='trilinear', align_corners=align_corners)
for memory_format in [torch.contiguous_format, torch.channels_last_3d]:
# test float scale factor up & downsampling
for scale_factor in [0.5, 1.5, 2]:
m = nn.Upsample(scale_factor=scale_factor, **kwargs)
in_t = torch.ones(1, 2, 2, 2, 2).contiguous(memory_format=memory_format)
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
with warnings.catch_warnings(record=True) as w:
out_t = m(in_t)
self.assertEqual(torch.ones(1, 2, out_size, out_size, out_size), out_t.data)
# Assert that memory format is carried through to the output
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
input = torch.randn(1, 2, 2, 2, 2, requires_grad=True)
self.assertEqual(
F.interpolate(input, (out_size, out_size, out_size), **kwargs),
F.interpolate(input, scale_factor=scale_factor, **kwargs))
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [input])
gradgradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [input])
def test_upsamplingTrilinear3d_spatial_invariance(self):
m = nn.Upsample(scale_factor=3, mode='trilinear', align_corners=False)
in_t_9 = torch.zeros(1, 1, 9, 9, 9)
in_t_9[:, :, :4, :4, :4].normal_()
with warnings.catch_warnings(record=True) as w:
out_t_9 = m(in_t_9)
out_t_5 = m(in_t_9[:, :, :5, :5, :5])
self.assertEqual(out_t_9[:, :, :15, :15, :15], out_t_5)
def test_upsampling_small_scale(self):
m = torch.nn.Upsample(scale_factor=0.5, mode="bilinear")
in_t = torch.arange(1, 5, dtype=torch.float64).reshape(1, 1, 2, 2)
out_t = m(in_t)
expected_out_t = torch.tensor([[[[2.5]]]])
self.assertEqual(expected_out_t, out_t)
def test_upsampling_bfloat16(self, dtype=torch.bfloat16):
def helper(size, scale_factor, mode, device):
inputf = torch.randn(size, device=device, dtype=torch.float, requires_grad=True)
input = inputf.to(dtype).detach().requires_grad_(True)
m = nn.Upsample(scale_factor=scale_factor, mode=mode)
outf = m(inputf)
out = m(input)
self.assertEqual(out.dtype, dtype)
self.assertEqualIgnoreType(out, outf, atol=0.1, rtol=0.0)
out.sum().backward()
outf.sum().backward()
self.assertEqual(input.grad.dtype, dtype)
self.assertEqual(input.grad, inputf.grad.to(dtype), atol=0.1, rtol=0)
for device in ['cpu']:
helper([3, 20, 30], 2, 'nearest', device)
helper([3, 20, 11, 7], 2, 'nearest', device)
helper([3, 20, 11, 7, 3], 2, 'nearest', device)
helper([3, 20, 30], 2, 'linear', device)
helper([3, 20, 11, 7], 2, 'bilinear', device)
helper([3, 20, 11, 7, 3], 2, 'trilinear', device)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_interpolate_illegal_memory_access(self):
in_s = 45
out_s = 14
input = torch.ones((1, 1, in_s), device='cuda', requires_grad=True)
# note we allocated grad_output to be larger so out of bound access
# woudl be visible in grad_input
grad = torch.ones((1, 1, out_s * 2), device='cuda', requires_grad=True)
grad = grad[:, :, :out_s]
input_ref = input.detach().cpu().requires_grad_()
grad_ref = grad.cpu()
out = F.interpolate(input, size=(out_s,), mode='nearest')
out.backward(grad)
out_ref = F.interpolate(input_ref, size=(out_s,), mode='nearest')
out_ref.backward(grad_ref)
self.assertEqual(out_ref, out)
self.assertEqual(input_ref.grad, input.grad)
def test_interpolate(self):
def _test_interpolate_helper(in_t, scale_factor, layer):
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
dim = len(in_t.shape) - 2
out_shape = [1, 1] + [out_size] * dim
with warnings.catch_warnings(record=True) as w:
out_t = layer(in_t)
self.assertEqual(torch.ones(out_shape), out_t)
self.assertEqual(
F.interpolate(in_t, (out_size,) * dim, **kwargs),
F.interpolate(in_t, scale_factor=scale_factor, **kwargs))
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [in_t], nondet_tol=GRADCHECK_NONDET_TOL)
gradgradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [in_t], nondet_tol=GRADCHECK_NONDET_TOL)
def _make_input(dim, device):
size = [1, 1]
size += [2] * dim
return torch.ones(size, requires_grad=True, device=device)
device_list = ['cpu']
if TEST_CUDA:
device_list.append('cuda')
for device in device_list:
for scale_factor in [0.5, 1.5, 2]:
for mode in ['nearest', 'area']:
kwargs = dict(mode=mode)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
for input in [_make_input(1, device), _make_input(2, device), _make_input(3, device)]:
_test_interpolate_helper(input, scale_factor, m)
for align_corners in [True, False]:
kwargs = dict(mode='linear', align_corners=align_corners)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
_test_interpolate_helper(_make_input(1, device), scale_factor, m)
kwargs = dict(mode='bilinear', align_corners=align_corners)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
_test_interpolate_helper(_make_input(2, device), scale_factor, m)
kwargs = dict(mode='bicubic', align_corners=align_corners)
def m(t):
return F.interpolate(t, scale_factor=scale_factor, **kwargs).to(device)
_test_interpolate_helper(_make_input(2, device), scale_factor, m)
kwargs = dict(mode='trilinear', align_corners=align_corners)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
_test_interpolate_helper(_make_input(3, device), scale_factor, m)
def test_linear_broadcasting(self):
m = nn.Linear(5, 8)
inp = torch.randn(2, 3, 5)
expected = m(inp.view(6, 5)).view(2, 3, 8)
self.assertEqual(expected, m(inp))
def test_bilinear(self):
module = nn.Bilinear(10, 10, 8)
input1 = torch.randn(4, 10, requires_grad=True)
input2 = torch.randn(4, 10, requires_grad=True)
grad_output = torch.randn(4, 8)
res = module(input1, input2)
expected = (torch.einsum("bi,kij,bj->bk", input1, module.weight, input2) +
module.bias)
self.assertEqual(res, expected)
grads = torch.autograd.grad(res, [module.weight, module.bias, input1, input2], grad_output)
grads_expected = torch.autograd.grad(expected, [module.weight, module.bias, input1, input2], grad_output)
for g, ge in zip(grads, grads_expected):
self.assertEqual(g, ge)
def test_bilinear_non_contiguous(self):
module = nn.Bilinear(7, 7, 5)
input1 = torch.randn(4, 7, 10, requires_grad=True)
input2 = torch.randn(4, 7, 10, requires_grad=True)
input1_tp = input1.transpose(1, 2)
input2_tp = input2.transpose(1, 2)
grad_output = torch.randn(4, 10, 5)
def run(input1_tp, input2_tp):
input1.grad = input2.grad = None
output = module(input1_tp, input2_tp)
output.backward(grad_output)
return output.data, input1.grad.data, input2.grad.data
out_nc, g1_nc, g2_nc = run(input1_tp, input2_tp)
input1_tp = input1_tp.contiguous()
input2_tp = input2_tp.contiguous()
out, g1, g2 = run(input1_tp, input2_tp)
self.assertEqual(out, out_nc)
self.assertEqual(g1, g1_nc)
self.assertEqual(g2, g2_nc)
def test_bilinear_no_bias(self):
module = nn.Bilinear(10, 10, 8)
module_no_bias = nn.Bilinear(10, 10, 8, False)
module.bias.data.zero_()
module.weight.data.copy_(module_no_bias.weight)
input1 = torch.randn(4, 10, requires_grad=True)
input2 = torch.randn(4, 10, requires_grad=True)
grad_output = torch.randn(4, 8)
def run(net):
input1.grad = input2.grad = None
output = net(input1, input2)
output.backward(grad_output)
return output.data, input1.grad.data, input2.grad.data
out, g1, g2 = run(module)
out_nb, g1_nb, g2_nb = run(module_no_bias)
self.assertEqual(out, out_nb)
self.assertEqual(g1, g1_nb)
self.assertEqual(g2, g2_nb)
_assertGradAndGradgradChecks(self,
lambda x1, x2: F.bilinear(x1, x2, module_no_bias.weight, module_no_bias.bias),
(input1, input2))
def test_bilinear_broadcasting(self):
m = nn.Bilinear(5, 6, 8)
input1 = torch.randn(2, 3, 5)
input2 = torch.randn(2, 3, 6)
expected = m(input1.view(6, 5), input2.view(6, 6)).view(2, 3, 8)
self.assertEqual(expected, m(input1, input2))
def test_conv_tbc(self):
inp = torch.randn(9, 4, 5, requires_grad=True)
weight = torch.randn(3, 5, 6, requires_grad=True)
bias = torch.randn(6, requires_grad=True)
gradcheck(lambda i, w, b, pad: F.conv_tbc(i, w, b, pad), (inp, weight, bias, 3))
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
@skipIfRocmVersionLessThan((4, 3))
@skipIfNotMiopenSuggestNHWC
def test_grouped_conv_cudnn_nhwc_support(self):
# in order to catch the hols in grouped convolution in nhwc support for earlier cudnn version
input = torch.randn((16, 16, 8, 8), dtype=torch.float16, device="cuda").to(memory_format=torch.channels_last)
weight = torch.randn((8, 4, 3, 3), dtype=torch.float16, device="cuda").to(memory_format=torch.channels_last)
out = torch.convolution(input, weight, None, (1, 1), (1, 1), (1, 1), False, (0, 0), 4)
input = torch.randn((16, 8, 8, 8), dtype=torch.float16, device="cuda").to(memory_format=torch.channels_last)
out_transpose = torch.convolution(input, weight, None, (1, 1), (1, 1), (1, 1), True, (0, 0), 4)
@unittest.expectedFailure
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
def test_conv_cudnn_memory_layout_dominance(self):
# desired behavior here is to have the memory_layout of conv.weight to
# dominante the layout of output.
# which is not the same as current behavior, we'll fix this in
# following up PRs and remove the `expectedFailure` tag
input = torch.randint(1, 10, (2, 8, 4, 4), dtype=torch.float32, device="cuda", requires_grad=True)
conv = nn.Conv2d(8, 4, 3).cuda().float()
out = conv(input)
self.assertTrue(out.is_contiguous())
input = input.contiguous(memory_format=torch.channels_last)
out = conv(input)
self.assertTrue(out.is_contiguous())
conv.weight.data = conv.weight.contiguous(memory_format=torch.channels_last)
out = conv(input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
input = input.contiguous()
out = conv(input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_cudnn_noncontiguous_weight(self):
# Noncontiguous weights must be contiguous() before being
# passed to cuDNN
input = torch.tensor([1, 1, 1], dtype=torch.double, device="cuda").view(1, 1, 3)
weights1 = torch.tensor([1], dtype=torch.double, device="cuda").expand(1, 1, 2)
weights2 = torch.tensor([1], dtype=torch.double, device="cuda").expand(1, 1, 2).contiguous()
self.assertEqual(F.conv1d(input, weights1, bias=None, stride=2, dilation=2),
F.conv1d(input, weights2, bias=None, stride=2, dilation=2))
def run_grad_conv_test(self, func_forward, func_backward, dim=1, gradient='input'):
for kern, inp_size in [(3, 6), (3, 7), (4, 9)]:
for batch, stride, padding, chan_in, chan_out, dilation in \
product([1, 2], [1, 2], [0, 1, 2], [2], [3], [1]):
for has_bias in [True, False]:
input_shape = [batch, chan_in]
weight_shape = [chan_out, chan_in]
for _ in range(dim):
input_shape.append(inp_size)
weight_shape.append(kern)
input = torch.randn(input_shape, requires_grad=True)
weight = torch.randn(weight_shape, requires_grad=True)
if has_bias:
bias = torch.randn([chan_out], requires_grad=True)
output = func_forward(input, weight, stride=stride, padding=padding, dilation=dilation, bias=bias)
gradient_o = torch.randn(output.shape)
gradient_w = torch.autograd.grad(output, input if (gradient == 'input') else weight, gradient_o)
self.assertEqual(gradient_w[0],
func_backward(
input_shape if (gradient == 'input') else input,
weight_shape if (gradient == 'weight') else weight,
gradient_o,
stride=stride,
padding=padding,
dilation=dilation))
def test_grad_conv1d_input(self):
self.run_grad_conv_test(F.conv1d, F.grad.conv1d_input, 1, 'input')
def test_grad_conv1d_weight(self):
self.run_grad_conv_test(F.conv1d, F.grad.conv1d_weight, 1, 'weight')
def test_grad_conv2d_input(self):
self.run_grad_conv_test(F.conv2d, F.grad.conv2d_input, 2, 'input')
def test_grad_conv2d_weight(self):
self.run_grad_conv_test(F.conv2d, F.grad.conv2d_weight, 2, 'weight')
def test_grad_conv3d_input(self):
self.run_grad_conv_test(F.conv3d, F.grad.conv3d_input, 3, 'input')
def test_grad_conv3d_weight(self):
self.run_grad_conv_test(F.conv3d, F.grad.conv3d_weight, 3, 'weight')
@unittest.skipIf(not torch._nnpack_available(), "NNPACK unavailable")
def test_nnpack_conv(self):
for kern, inp_size in [(3, 6), (3, 7), (4, 9)]:
for batch, stride, padding, chan_in, chan_out in \
product([1, 2, 3, 4], [1, 2], [0, 1, 2], [2], [3]):
for has_bias in [True, False]:
input_shape = [batch, chan_in]
weight_shape = [chan_out, chan_in]
for _ in range(2):
input_shape.append(inp_size)
weight_shape.append(kern)
input = torch.randn(input_shape, requires_grad=True, dtype=torch.float)
weight = torch.randn(weight_shape, requires_grad=True, dtype=torch.float)
if has_bias:
bias = torch.randn([chan_out], requires_grad=True, dtype=torch.float)
output = torch._nnpack_spatial_convolution(input, weight, stride=stride, padding=padding, bias=bias)
output_expected = torch.nn.functional.conv2d(input, weight, stride=stride, padding=padding, bias=bias)
self.assertEqual(output, output_expected, atol=3e-4, rtol=0)
gradient_o = torch.randn(output.shape, dtype=torch.float)
grads = torch.autograd.grad(output, [input, weight], gradient_o)
grads_expected = torch.autograd.grad(output_expected, [input, weight], gradient_o)
for gr, gr_expected in zip(grads, grads_expected):
self.assertEqual(gr, gr_expected, atol=3e-4, rtol=0)
def test_fold_invalid_arg(self):
# input.size(1) not divisible by \prod(kernel_size)
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3))
with self.assertRaisesRegex(RuntimeError, r"be divisible by the product of kernel_size"):
fold(torch.randn(1, 5, 9))
with self.assertRaisesRegex(RuntimeError, r"be divisible by the product of kernel_size"):
fold(torch.randn(1, 19, 9))
# input.size(2) not matching the total number of sliding blocks
with self.assertRaisesRegex(RuntimeError, r"match the calculated number of sliding blocks"):
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3))
fold(torch.randn(1, 6, 10))
with self.assertRaisesRegex(RuntimeError, r"match the calculated number of sliding blocks"):
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3), stride=(2, 2))
fold(torch.randn(1, 6, 5))
with self.assertRaisesRegex(RuntimeError, r"match the calculated number of sliding blocks"):
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3), stride=(2, 2), dilation=(1, 2), padding=(2, 0))
fold(torch.randn(1, 6, 5)) # should be 4 * 1 = 4 sliding blocks
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 2), stride=1, dilation=8, padding=0)
with self.assertRaisesRegex(RuntimeError, r"calculated shape of the array of sliding blocks as"):
fold(torch.randn(1, 12, 12))
def test_unfold_invalid_arg(self):
# input wrong dimension
unfold = nn.Unfold(kernel_size=(2, 3))
with self.assertRaisesRegex(NotImplementedError, r"Only 4D input Tensors are supported"):
unfold(torch.randn(1, 5, 2))
# calculated output shape is too small
with self.assertRaisesRegex(RuntimeError, r"too small \(non-positive\)"):
unfold = nn.Unfold(kernel_size=(2, 3))
unfold(torch.randn(1, 2, 2, 2))
with self.assertRaisesRegex(RuntimeError, r"too small \(non-positive\)"):
unfold = nn.Unfold(kernel_size=(5, 3), padding=(1, 1))
unfold(torch.randn(1, 2, 2, 3))
with self.assertRaisesRegex(RuntimeError, r"too small \(non-positive\)"):
unfold = nn.Unfold(kernel_size=(1, 3), padding=(1, 1), dilation=(1, 2))
unfold(torch.randn(1, 2, 2, 2))
def test_conv_padding_mode(self):
with self.assertRaisesRegex(ValueError, "padding_mode must be one of"):
nn.Conv2d(3, 3, 3, padding_mode="xyz")
with self.assertRaisesRegex(ValueError, "padding_mode must be one of"):
nn.Conv2d(3, 3, 3, padding_mode=3)
with self.assertRaisesRegex(ValueError, "Only \"zeros\" "):
nn.ConvTranspose2d(3, 3, 3, padding_mode="reflect")
def test_softmin(self):
x = torch.randn(2, 16)
self.assertEqual(F.softmin(x, 1), F.softmax(-x, 1))
self.assertEqual(F.softmin(x, 0), F.softmax(-x, 0))
def test_log_softmax_cpu(self, dtype=torch.bfloat16):
for dim in [0, 1]:
inputf = torch.rand(200, 200, device="cpu", dtype=torch.float, requires_grad=True)
input = inputf.to(dtype).detach().requires_grad_(True)
outf = F.log_softmax(inputf, dim=dim)
out = F.log_softmax(input, dim=dim)
self.assertEqual(out.dtype, dtype)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(out, outf, atol=0.1, rtol=0)
out.sum().backward()
outf.sum().backward()
self.assertEqual(input.grad.dtype, dtype)
self.assertEqual(input.grad, inputf.grad.to(dtype), atol=0.1, rtol=0)
def test_softmax_cpu(self, dtype=torch.bfloat16):
for dim in [0, 1]:
inputf = torch.rand(200, 200, device="cpu", dtype=torch.float, requires_grad=True)
input = inputf.to(dtype).detach().requires_grad_(True)
outf = F.softmax(inputf, dim=dim)
out = F.softmax(input, dim=dim)
self.assertEqual(out.dtype, dtype)
self.assertEqualIgnoreType(out, outf, atol=1e-3, rtol=0)
out.sum().backward()
outf.sum().backward()
self.assertEqual(input.grad.dtype, dtype)
self.assertEqual(input.grad, inputf.grad.to(dtype), atol=1e-3, rtol=0)
def test_adaptive_log_softmax(self):
# args validation
with self.assertRaises(ValueError):
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 15, 15], div_value=2.)
with self.assertRaises(ValueError):
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 15, 10], div_value=2.)
with self.assertRaises(ValueError):
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 25], div_value=2.)
with self.assertRaisesRegex(ValueError, "cutoffs should be a sequence of unique,"):
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 20], div_value=2.)
# not raise
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 19], div_value=2.)
# input shapes
with self.assertRaisesRegex(RuntimeError, r"Input and target should have the same size"):
asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)
x = torch.randn(2, 16)
y = torch.tensor([0, 5, 10])
asfm(x, y)
# out-of-bound targets
with self.assertRaisesRegex(RuntimeError, r"Target values should be in"):
asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)
x = torch.randn(2, 16)
y = torch.tensor([0, 20])
asfm(x, y)
# cluster sizes
asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)
x = torch.randn(2, 16)
y = torch.tensor([0, 17])
self.assertEqual(asfm.head.weight.size(), (5 + 3, 16)) # 5 targets in head, 3 clusters, dimensionality 16
self.assertEqual(asfm.tail[0][1].weight.size(), (5, 8)) # 5 targets in this cluster, dimensionality 8
self.assertEqual(asfm.tail[1][1].weight.size(), (5, 4))
self.assertEqual(asfm.tail[2][1].weight.size(), (5, 2))
self.assertEqual(asfm(x, y).output.size(), (2, ))
# test no_batch_dim support
asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)
x = torch.randn(1, 16)
y = torch.tensor([17])
x2 = x.squeeze(0)
y2 = y.squeeze(0)
self.assertEqual(asfm(x, y).output.squeeze(0), asfm(x2, y2).output)
# log_probs actually returns log_proba
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 4, [2], div_value=2.)
x = torch.randn(4, 8)
logprob_out = asfm.log_prob(x)
self.assertEqual(torch.exp(logprob_out).data.sum(1), torch.ones(4))
# forward returns the same thing as log_probs
for v in [0, 1, 2, 3]:
y = torch.full((4,), v, dtype=torch.long)
out, loss = asfm(x, y)
self.assertEqual(out, logprob_out.gather(1, y.unsqueeze(1)).squeeze())
self.assertEqual(loss, F.nll_loss(logprob_out, y))
# predict
x = torch.randn(64, 8).abs_()
# argmax in shortlist
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 10, [4, 8], div_value=2., head_bias=True)
asfm.head.weight.data.abs_()
asfm.head.bias.data.abs_()
asfm.head.weight.data[asfm.shortlist_size:, :].zero_()
out = asfm.predict(x)
self.assertEqual(out, asfm.log_prob(x).argmax(dim=1))
# argmax outside of shortlist
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 10, [4, 8], div_value=2., head_bias=True)
asfm.head.weight.data.abs_()
asfm.head.bias.data.abs_()
asfm.head.weight.data[:asfm.shortlist_size, :].zero_()
out = asfm.predict(x)
self.assertEqual(out, asfm.log_prob(x).argmax(dim=1))
# half of the argmax in shortlist, half in clusters
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 10, [4, 8], div_value=2., head_bias=True)
asfm.head.weight.data.abs_()
asfm.head.bias.data.abs_()
x[:32, :asfm.shortlist_size].zero_()
x[32:, asfm.shortlist_size:].zero_()
asfm.head.weight.data[:asfm.shortlist_size, asfm.shortlist_size:].zero_()
asfm.head.weight.data[asfm.shortlist_size:, :asfm.shortlist_size].zero_()
out = asfm.predict(x)
self.assertEqual(out, asfm.log_prob(x).argmax(dim=1))
def test_cross_entropy_loss(self, dtype=torch.bfloat16):
loss_cpu = nn.CrossEntropyLoss().cpu()
inputf = torch.randn(15, 10, device="cpu", dtype=torch.float, requires_grad=True)
input = inputf.to(dtype).detach().requires_grad_(True)
target = torch.empty(15, dtype=torch.long).random_(10)
outf = loss_cpu(inputf, target)
out = loss_cpu(input, target)
self.assertEqual(out.dtype, dtype)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(out, outf, atol=1e-1, rtol=0)
outf.backward()
out.backward()
self.assertEqual(input.grad.dtype, dtype)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(input.grad, inputf.grad, atol=1e-1, rtol=0)
def test_cross_entropy_loss_precision(self):
# Regression test for #55657
loss_cpu = nn.CrossEntropyLoss().cpu()
inputf = torch.randn(128, 2, 768, 768, device="cpu", dtype=torch.float)
inputd = inputf.double()
target = torch.randint(2, (128, 768, 768), dtype=torch.long)
outf = loss_cpu(inputf, target)
outd = loss_cpu(inputd, target)
self.assertEqual(outf, outd, exact_dtype=False)
def test_cross_entropy_loss_zero_div(self):
# Test for issue #73165
input_1 = torch.rand([5, 0], dtype=torch.float32)
input_2 = torch.rand([5, 0], dtype=torch.float32)
torch.nn.CrossEntropyLoss()(input_1, input_2)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_convert_sync_batchnorm(self):
module = torch.nn.Sequential(
torch.nn.BatchNorm1d(100),
torch.nn.InstanceNorm1d(100)
).cuda()
# necessary to have an anchor point for comparison, in case the
# convert_sync_batchnorm updates in place
comp_module = torch.nn.Sequential(
torch.nn.BatchNorm1d(100),
torch.nn.InstanceNorm1d(100)
).cuda()
comp_module.load_state_dict(module.state_dict())
sync_bn_module = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module)
children = list(sync_bn_module.children())
self.assertEqual(children[0].__class__, torch.nn.SyncBatchNorm)
self.assertEqual(children[1].__class__, torch.nn.InstanceNorm1d)
for layer, converted_layer in zip(comp_module.children(), sync_bn_module.children()):
for key in layer.state_dict().keys():
self.assertEqual(layer.state_dict()[key].device, converted_layer.state_dict()[key].device)
self.assertEqual(layer.state_dict()[key], converted_layer.state_dict()[key])
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
def test_sync_batchnorm_backward_elemt(self):
device = 'cuda'
saved_input = torch.rand(2, 3, 2, 1, device=device)
grad_output = torch.rand(2, 3, 2, 1, device=device)
mean = torch.rand(3, device=device)
invstd = torch.rand(3, device=device)
weight = torch.rand(3, device=device)
sum_dy = torch.rand(3, device=device)
sum_dy_xmu = torch.rand(3, device=device)
count_tensor = torch.tensor([5, 5, 5], dtype=torch.int32, device=device)
gI_contiguous = torch.batch_norm_backward_elemt(
grad_output,
saved_input,
mean,
invstd,
weight,
sum_dy,
sum_dy_xmu,
count_tensor
)
# Test batch_norm_backward_elemt gives the same answer for all
# combinations of contiguous as channels_last input
for a, b in [
(torch.channels_last, torch.contiguous_format),
(torch.contiguous_format, torch.channels_last),
(torch.channels_last, torch.channels_last),
]:
gI_actual = torch.batch_norm_backward_elemt(
grad_output.contiguous(memory_format=a),
saved_input.contiguous(memory_format=b),
mean,
invstd,
weight,
sum_dy,
sum_dy_xmu,
count_tensor
)
self.assertEqual(gI_actual, gI_contiguous)
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
def test_sync_batchnorm_accuracy_cuda(self):
# The target of this test is to test the functionality and accuracy of
# those single-GPU cuda kernels used in SyncBatchNorm
# They are:
# fwd: torch.batch_norm_stats, torch.batch_norm_gather_stats_with_counts, torch.batch_norm_elemt
# bwd: torch.batch_norm_backward_reduce, torch.batch_norm_backward_elemt
def _batch_norm_stats(data):
mean1, _ = torch.batch_norm_stats(data, 1e-5)
mean2, _ = torch.batch_norm_stats(data.to(memory_format=torch.channels_last), 1e-5)
mean_ref = torch.mean(data, (0, 2, 3), keepdim=False)
self.assertEqual(mean_ref, mean1)
self.assertEqual(mean_ref, mean2)
data = torch.randn(1, 96, 112, 112, dtype=torch.float, device='cuda')
_batch_norm_stats(data)
def test_functional_grad_conv(self):
# Conv 1D
input = torch.randn(1, 1, 5, requires_grad=True)
weight = torch.randn(1, 1, 3, requires_grad=True)
output = F.conv1d(input, weight, dilation=2)
grad_output = torch.randn(output.shape)
grad_input_autograd, grad_weight_autograd = torch.autograd.grad(output, (input, weight), grad_output)
grad_input_functional = torch.nn.grad.conv1d_input(input.shape, weight, grad_output, dilation=2)
self.assertEqual(grad_input_functional, grad_input_autograd)
grad_weight_functional = torch.nn.grad.conv1d_weight(input, weight.shape, grad_output, dilation=2)
self.assertEqual(grad_weight_functional, grad_weight_autograd)
# Conv 2D
input = torch.randn(1, 1, 5, 5, requires_grad=True)
weight = torch.randn(1, 1, 3, 3, requires_grad=True)
output = F.conv2d(input, weight, dilation=2)
grad_output = torch.randn(output.shape)
(grad_input_autograd, grad_weight_autograd) = torch.autograd.grad(output, (input, weight), grad_output)
grad_input_functional = torch.nn.grad.conv2d_input(input.shape, weight, grad_output, dilation=2)
self.assertEqual(grad_input_functional, grad_input_autograd)
grad_weight_functional = torch.nn.grad.conv2d_weight(input, weight.shape, grad_output, dilation=2)
self.assertEqual(grad_weight_functional, grad_weight_autograd)
# Conv 3D
input = torch.randn(1, 1, 5, 5, 5, requires_grad=True)
weight = torch.randn(1, 1, 3, 3, 3, requires_grad=True)
output = F.conv3d(input, weight, dilation=2)
grad_output = torch.randn(output.shape)
(grad_input_autograd, grad_weight_autograd) = torch.autograd.grad(output, (input, weight), grad_output)
grad_input_functional = torch.nn.grad.conv3d_input(input.shape, weight, grad_output, dilation=2)
self.assertEqual(grad_input_functional, grad_input_autograd)
grad_weight_functional = torch.nn.grad.conv3d_weight(input, weight.shape, grad_output, dilation=2)
self.assertEqual(grad_weight_functional, grad_weight_autograd)
def test_functional_grad_conv2d(self):
BATCH_SIZE = 4
IN_CH = 8
OUT_CH = 16
SPATIAL = 32
def _test_conv2d(stride, kernel_size, groups, dilation):
padding = kernel_size // 2
input = torch.empty(BATCH_SIZE, IN_CH, SPATIAL, SPATIAL).uniform_(-8.0, 8.0).requires_grad_(True)
weight = torch.empty(OUT_CH, IN_CH // groups, kernel_size, kernel_size).uniform_(-4.0, 4.0).requires_grad_(True)
output = F.conv2d(input, weight,
stride=stride, padding=padding, dilation=dilation, groups=groups)
grad_output = torch.randn(output.shape)
(grad_input_autograd, grad_weight_autograd) = torch.autograd.grad(output, (input, weight), grad_output)
grad_input_functional = torch.nn.grad.conv2d_input(input.shape, weight, grad_output,
stride=stride, padding=padding, dilation=dilation, groups=groups)
self.assertEqual(grad_input_functional, grad_input_autograd)
grad_weight_functional = torch.nn.grad.conv2d_weight(input, weight.shape, grad_output,
stride=stride, padding=padding, dilation=dilation, groups=groups)
self.assertEqual(grad_weight_functional, grad_weight_autograd)
strides = [1, 2]
kernel_sizes = [1, 3, 5]
groups = [1, 2, 4]
dilates = [1, 2]
for s, k, g, d in product(strides, kernel_sizes, groups, dilates):
_test_conv2d(s, k, g, d)
def test_flatten(self):
tensor_input = torch.randn(2, 1, 2, 3)
# Flatten Tensor
flatten = nn.Flatten(start_dim=1, end_dim=-1)
tensor_output = flatten(tensor_input)
self.assertEqual(tensor_output.size(), torch.Size([2, 6]))
def test_unflatten(self):
tensor_input = torch.randn(2, 50)
# Unflatten Tensor (unflattened_size as a tuple of ints and list of ints)
for us in ((2, 5, 5), [2, 5, 5]):
unflatten = nn.Unflatten(dim=1, unflattened_size=us)
tensor_output = unflatten(tensor_input)
self.assertEqual(tensor_output.size(), torch.Size([2, 2, 5, 5]))
# Unflatten NamedTensor
unflatten = nn.Unflatten(dim='features', unflattened_size=(('C', 2), ('H', 5), ('W', 5)))
named_tensor_input = tensor_input.refine_names('N', 'features')
named_tensor_output = unflatten(named_tensor_input)
self.assertEqual(named_tensor_output.size(), torch.Size([2, 2, 5, 5]))
def test_unflatten_invalid_arg(self):
# Wrong type for unflattened_size (tuple of floats)
with self.assertRaisesRegex(
TypeError,
r"unflattened_size must be tuple of ints, but found element of type float at pos 2"):
nn.Unflatten(dim=1, unflattened_size=(2, 5, 5.0))
# Wrong type for unflattened_size (list of lists and list of tuples)
for us in ([['C', 2], ['W', 5], ['H', 5]], [('C', 2), ('W', 5), ('H', 5)]):
with self.assertRaisesRegex(
TypeError,
r"unflattened_size must be a tuple of tuples, but found type list"):
nn.Unflatten(dim='features', unflattened_size=us)
# Wrong type for unflattened_size (tuple of lists)
with self.assertRaisesRegex(
TypeError,
r"unflattened_size must be tuple of tuples, but found element of type list at pos 0"):
nn.Unflatten(dim='features', unflattened_size=(['C', 2], ['W', 5], ['H', 5]))
# Wrong type for unflattened_size (tuple of dicts)
with self.assertRaisesRegex(
TypeError,
r"unflattened_size must be tuple of tuples, but found element of type dict at pos 0"):
nn.Unflatten(dim='features', unflattened_size=({'C': 2}, {'W': 5}, {'H': 5}))
def test_layer_norm_grads_with_create_graph_flag(self):
atol = 1e-5
rtol = 1e-3
x = torch.randn((4, 4, 16), requires_grad=True)
layer_norm = nn.LayerNorm((16,), 1e-5, True)
with torch.no_grad():
layer_norm.weight = torch.nn.Parameter(0.1 * torch.ones_like(layer_norm.weight))
grads1 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=False)[0]
grads2 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=True)[0]
self.assertEqual(grads1, grads2, rtol=rtol, atol=atol)
if TEST_CUDA:
x = x.to('cuda')
layer_norm = layer_norm.to('cuda')
grads1 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=False)[0]
grads2 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=True)[0]
self.assertEqual(grads1, grads2, rtol=rtol, atol=atol)
def test_padding_list(self):
# Padding can be a list, or tuple (regression test for gh-54452)
x = torch.randn(4, 8, 32, 32)
net = torch.nn.ConvTranspose2d(8, 16, kernel_size=3, padding=[3, 3])
y = net(x)
net = torch.nn.ConvTranspose2d(8, 16, kernel_size=3, padding=(3, 3))
y = net(x)
class TestNNInit(TestCase):
def setUp(self):
super(TestNNInit, self).setUp()
random.seed(123)
def _is_normal(self, tensor, mean, std):
samples = tensor.view(-1).tolist()
p_value = stats.kstest(samples, 'norm', args=(mean, std))[1]
return p_value > 0.0001
def _is_trunc_normal(self, tensor, mean, std, a, b):
# scipy's trunc norm is suited for data drawn from N(0, 1),
# so we need to transform our data to test it using scipy.
z_samples = (tensor.view(-1) - mean) / std
z_samples = z_samples.tolist()
a0 = (a - mean) / std
b0 = (b - mean) / std
p_value = stats.kstest(z_samples, 'truncnorm', args=(a0, b0))[1]
return p_value > 0.0001
def _is_uniform(self, tensor, a, b):
samples = tensor.view(-1).tolist()
p_value = stats.kstest(samples, 'uniform', args=(a, (b - a)))[1]
return p_value > 0.0001
def _create_random_nd_tensor(self, dims, size_min, size_max):
size = [random.randint(size_min, size_max) for _ in range(dims)]
tensor = torch.zeros(size)
return tensor
def _random_float(self, a, b):
return (b - a) * random.random() + a
def test_calculate_gain_linear(self):
for fn in ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose2d', 'conv_transpose2d', 'conv_transpose3d']:
gain = init.calculate_gain(fn)
self.assertEqual(gain, 1)
def test_calculate_gain_nonlinear(self):
for fn in ['sigmoid', 'tanh', 'relu', 'leaky_relu']:
gain = init.calculate_gain(fn)
if fn == 'sigmoid':
self.assertEqual(gain, 1)
elif fn == 'tanh': # 5 / 3
self.assertEqual(gain, 1.6666666666666667)
elif fn == 'relu': # sqrt(2)
self.assertEqual(gain, 1.4142135623730951)
elif fn == 'leaky_relu': # sqrt(2 / 1 + slope^2))
self.assertEqual(gain, 1.4141428569978354)
elif fn == 'selu':
self.assertEqual(gain, 0.75)
def test_calculate_gain_leaky_relu(self):
for param in [None, 0, 0.01, 10]:
gain = init.calculate_gain('leaky_relu', param)
if param is None: # Default slope is 0.01
self.assertEqual(gain, 1.4141428569978354)
elif param == 0: # No slope = same gain as normal ReLU
self.assertEqual(gain, 1.4142135623730951)
elif param == 0.01:
self.assertEqual(gain, 1.4141428569978354)
elif param == 10:
self.assertEqual(gain, 0.14071950894605836)
def test_calculate_gain_leaky_relu_only_accepts_numbers(self):
for param in [True, [1], {'a': 'b'}]:
with self.assertRaises(ValueError):
init.calculate_gain('leaky_relu', param)
def test_calculate_gain_only_accepts_valid_nonlinearities(self):
for n in [2, 5, 25]:
# Generate random strings of lengths that definitely aren't supported
random_string = ''.join([random.choice(string.ascii_lowercase) for i in range(n)])
with self.assertRaises(ValueError):
init.calculate_gain(random_string)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_uniform(self):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50)
a = self._random_float(-3, 3)
b = a + self._random_float(1, 5)
init.uniform_(input_tensor, a=a, b=b)
assert self._is_uniform(input_tensor, a, b)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_normal(self):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50)
mean = self._random_float(-3, 3)
std = self._random_float(1, 5)
init.normal_(input_tensor, mean=mean, std=std)
assert self._is_normal(input_tensor, mean, std)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_trunc_normal(self):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50)
mean = self._random_float(-3, 3)
std = self._random_float(.01, 1)
a = self._random_float(mean - 2 * std, mean)
b = self._random_float(mean, mean + 2 * std)
init.trunc_normal_(input_tensor, mean=mean, std=std, a=a, b=b)
assert self._is_trunc_normal(input_tensor, mean, std, a, b)
def test_constant(self):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5)
val = self._random_float(1, 10)
init.constant_(input_tensor, val)
self.assertEqual(input_tensor, input_tensor.clone().fill_(val))
def test_ones_and_zeros(self):
for init_fn_, val in zip([init.ones_, init.zeros_], [1, 0]):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5)
init_fn_(input_tensor)
self.assertEqual(input_tensor, input_tensor.clone().fill_(val))
def test_eye(self):
input_tensor = self._create_random_nd_tensor(2, size_min=1, size_max=5)
init.eye_(input_tensor)
# Check every single element
for i in range(input_tensor.size(0)):
for j in range(input_tensor.size(1)):
if i == j:
assert input_tensor[i][j] == 1
else:
assert input_tensor[i][j] == 0
def test_eye_only_works_on_2d_inputs(self):
for dims in [1, 3]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3)
init.eye_(tensor)
def test_max_unpool(self):
# Test 1D
output, indices = F.max_pool1d(torch.randn([1, 1, 4]), 2, stride=2, return_indices=True)
self.assertEqual(F.max_unpool1d(output, indices, 2), F.max_unpool1d(output, indices, 2, stride=2))
# Test list / tuple passed as argument to max_unpool1d
input = torch.randn([1, 1, 5], requires_grad=True)
output, indices = F.max_pool1d(input, 2, stride=2, return_indices=True)
self.assertEqual(F.max_unpool1d(output, indices, 2, stride=2, output_size=input.shape),
F.max_unpool1d(output, indices, 2, stride=2, output_size=input.size()))
gradcheck(F.max_unpool1d, (output, indices, 2), check_forward_ad=True)
# Test 2D
output, indices = F.max_pool2d(torch.randn([1, 1, 4, 4], requires_grad=True), 2, stride=2, return_indices=True)
self.assertEqual(F.max_unpool2d(output, indices, 2), F.max_unpool2d(output, indices, 2, stride=2))
gradcheck(F.max_unpool2d, (output, indices, 2), check_forward_ad=True)
# Test 3D
output, indices = F.max_pool3d(torch.randn([4, 4, 4, 4, 4], requires_grad=True), 2, stride=2, return_indices=True)
self.assertEqual(F.max_unpool3d(output, indices, 2), F.max_unpool3d(output, indices, 2, stride=2))
gradcheck(F.max_unpool3d, (output, indices, 2), check_forward_ad=True)
def test_dirac_properties(self):
for dims in [3, 4, 5]:
for groups in [1, 2, 3]:
# prepare random tensor with random sizes, but fits groups
a, c, d, e = (random.randint(1, 5) for _ in range(4))
b = random.randint(1, 5 * groups) # same range as a*groups but all range allowed
# make sure first dim divides by groups
input_tensor = torch.randn((a * groups, b, c, d, e)[:dims])
init.dirac_(input_tensor, groups)
c_out, c_in = input_tensor.size(0) // groups, input_tensor.size(1)
min_d = min(c_out, c_in)
# Check number of nonzeros is equivalent to smallest dim (for each group)
assert torch.nonzero(input_tensor).size(0) == min_d * groups
# Check sum of values (can have precision issues, hence assertEqual) is also equivalent
self.assertEqual(input_tensor.sum(), min_d * groups)
def test_dirac_identity(self):
for groups in [1, 3]:
batch, in_c, out_c, size, kernel_size = 8, 3, 9, 5, 3 # in_c, out_c must divide by groups
eff_out_c = out_c // groups
# Test 1D
input_var = torch.randn(batch, in_c, size)
filter_var = torch.zeros(eff_out_c, in_c, kernel_size)
filter_var = torch.cat([filter_var] * groups)
init.dirac_(filter_var, groups)
output_var = F.conv1d(input_var, filter_var)
input_tensor, output_tensor = input_var.data, output_var.data # Variables do not support nonzero
for g in range(groups):
# Assert in_c outputs are preserved (per each group)
self.assertEqual(input_tensor[:, :, 1:-1],
output_tensor[:, eff_out_c * g:eff_out_c * g + in_c, :])
# Assert extra outputs are 0
assert torch.nonzero(output_tensor[:, eff_out_c * g + in_c:eff_out_c * (g + 1), :]).numel() == 0
# Test 2D
input_var = torch.randn(batch, in_c, size, size)
filter_var = torch.zeros(eff_out_c, in_c, kernel_size, kernel_size)
filter_var = torch.cat([filter_var] * groups)
init.dirac_(filter_var, groups)
output_var = F.conv2d(input_var, filter_var)
input_tensor, output_tensor = input_var.data, output_var.data # Variables do not support nonzero
for g in range(groups):
# Assert in_c outputs are preserved (per each group)
self.assertEqual(input_tensor[:, :, 1:-1, 1:-1],
output_tensor[:, eff_out_c * g:eff_out_c * g + in_c, :, :])
# Assert extra outputs are 0
assert torch.nonzero(output_tensor[:, eff_out_c * g + in_c:eff_out_c * (g + 1), :, :]).numel() == 0
# Test 3D
input_var = torch.randn(batch, in_c, size, size, size)
filter_var = torch.zeros(eff_out_c, in_c, kernel_size, kernel_size, kernel_size)
filter_var = torch.cat([filter_var] * groups)
init.dirac_(filter_var, groups)
output_var = F.conv3d(input_var, filter_var)
input_tensor, output_tensor = input_var.data, output_var.data
for g in range(groups):
# Assert in_c outputs are preserved (per each group)
self.assertEqual(input_tensor[:, :, 1:-1, 1:-1, 1:-1],
output_tensor[:, eff_out_c * g:eff_out_c * g + in_c, :, :, :])
# Assert extra outputs are 0
assert torch.nonzero(output_tensor[:, eff_out_c * g + in_c:eff_out_c * (g + 1), :, :, :]).numel() == 0
def test_dirac_only_works_on_3_4_5d_inputs(self):
for dims in [1, 2, 6]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3)
init.dirac_(tensor)
def test_xavier_uniform_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
with self.assertRaises(ValueError):
init.xavier_uniform_(tensor)
def test_xavier_normal_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
with self.assertRaises(ValueError):
init.xavier_normal_(tensor)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_xavier_uniform(self):
for use_gain in [True, False]:
for dims in [2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
gain = 1
if use_gain:
gain = self._random_float(0.1, 2)
init.xavier_uniform_(input_tensor, gain=gain)
else:
init.xavier_uniform_(input_tensor)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
expected_std = gain * math.sqrt(2.0 / (fan_in + fan_out))
bounds = expected_std * math.sqrt(3)
assert self._is_uniform(input_tensor, -bounds, bounds)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_xavier_normal(self):
for use_gain in [True, False]:
for dims in [2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
gain = 1
if use_gain:
gain = self._random_float(0.1, 2)
init.xavier_normal_(input_tensor, gain=gain)
else:
init.xavier_normal_(input_tensor)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
expected_std = gain * math.sqrt(2.0 / (fan_in + fan_out))
assert self._is_normal(input_tensor, 0, expected_std)
def test_kaiming_uniform_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
init.kaiming_uniform_(tensor)
def test_kaiming_normal_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
init.kaiming_normal_(tensor)
def test_kaiming_uniform_warning_on_0element_tensor(self):
tensor = torch.empty(0, 1)
with self.assertWarnsRegex(UserWarning, "Initializing zero-element tensors is a no-op"):
_ = init.kaiming_uniform_(tensor)
def test_kaiming_normal_warning_on_0element_tensor(self):
tensor = torch.empty(0, 1)
with self.assertWarnsRegex(UserWarning, "Initializing zero-element tensors is a no-op"):
_ = init.kaiming_normal_(tensor)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_kaiming_uniform(self):
for use_a in [True, False]:
for dims in [2, 4]:
for mode in ['fan_in', 'fan_out']:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
if use_a:
a = self._random_float(0.1, 2)
init.kaiming_uniform_(input_tensor, a=a, mode=mode)
else:
a = 0
init.kaiming_uniform_(input_tensor, mode=mode)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
if mode == 'fan_in':
n = fan_in
else:
n = fan_out
expected_std = math.sqrt(2.0 / ((1 + a**2) * n))
bounds = expected_std * math.sqrt(3.0)
assert self._is_uniform(input_tensor, -bounds, bounds)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_kaiming_normal(self):
for use_a in [True, False]:
for dims in [2, 4]:
for mode in ['fan_in', 'fan_out']:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
if use_a:
a = self._random_float(0.1, 2)
init.kaiming_normal_(input_tensor, a=a, mode=mode)
else:
a = 0
init.kaiming_normal_(input_tensor, mode=mode)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
if mode == 'fan_in':
n = fan_in
else:
n = fan_out
expected_std = math.sqrt(2.0 / ((1 + a**2) * n))
assert self._is_normal(input_tensor, 0, expected_std)
def test_sparse_only_works_on_2d_inputs(self):
for dims in [1, 3]:
with self.assertRaises(ValueError):
sparsity = self._random_float(0.1, 0.9)
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3)
init.sparse_(tensor, sparsity)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_sparse_default_std(self):
for use_random_std in [True, False]:
input_tensor = self._create_random_nd_tensor(2, size_min=30, size_max=35)
rows, cols = input_tensor.size(0), input_tensor.size(1)
sparsity = self._random_float(0.1, 0.2)
std = 0.01 # default std
if use_random_std:
std = self._random_float(0.01, 0.2)
init.sparse_(input_tensor, sparsity=sparsity, std=std)
else:
init.sparse_(input_tensor, sparsity=sparsity)
for col_idx in range(input_tensor.size(1)):
column = input_tensor[:, col_idx]
assert column[column == 0].nelement() >= math.ceil(sparsity * rows)
assert self._is_normal(input_tensor[input_tensor != 0], 0, std)
@skipIfNoLapack
def test_orthogonal(self):
for use_gain in [True, False]:
for tensor_size in [[3, 4], [4, 3], [20, 2, 3, 4], [2, 3, 4, 5]]:
input_tensor = torch.zeros(tensor_size)
gain = 1.0
if use_gain:
gain = self._random_float(0.1, 2)
init.orthogonal_(input_tensor, gain=gain)
else:
init.orthogonal_(input_tensor)
rows, cols = tensor_size[0], reduce(mul, tensor_size[1:])
flattened_tensor = input_tensor.view(rows, cols)
if rows > cols:
self.assertEqual(torch.mm(flattened_tensor.t(), flattened_tensor),
torch.eye(cols) * gain ** 2, atol=1e-6, rtol=0)
else:
self.assertEqual(torch.mm(flattened_tensor, flattened_tensor.t()),
torch.eye(rows) * gain ** 2, atol=1e-6, rtol=0)
def test_deprecation(self):
x = torch.randn(3, 3)
def fn():
init.normal(x)
with self.assertWarnsRegex(UserWarning, 'deprecated', msg='methods not suffixed with underscore should be deprecated'):
fn()
class TestFusionEval(TestCase):
@given(X=hu.tensor(shapes=((5, 3, 5, 5),)),
running_mean=hu.tensor(shapes=(6,)),
running_var=hu.tensor(shapes=(6,)))
def test_fuse_module_eval_numerics(self, X, running_mean, running_var):
inputs, _ = X
iC, oC = inputs.shape[1], len(running_mean[0])
inputs = torch.from_numpy(inputs).to(torch.double)
kernel_size = (3, 3)
conv_ref = torch.nn.Conv2d(iC, oC, bias=True, kernel_size=kernel_size)
bn_ref = torch.nn.BatchNorm2d(oC)
bn_ref.running_mean = torch.from_numpy(running_mean[0]).to(torch.double)
bn_ref.running_var = torch.from_numpy(running_var[0]).to(torch.double)
conv_ref.eval()
bn_ref.eval()
Y_ref = bn_ref(conv_ref(inputs))
conv_bn_fused = torch.nn.utils.fusion.fuse_conv_bn_eval(conv_ref,
bn_ref)
Y_hat = conv_bn_fused(inputs)
self.assertEqual(Y_ref, Y_hat, msg="Conv+BN fusion results are off")
na_bn_ref = torch.nn.BatchNorm2d(oC, affine=False)
na_bn_ref.running_mean = torch.from_numpy(running_mean[0]).to(torch.double)
na_bn_ref.running_var = torch.from_numpy(running_var[0]).to(torch.double)
na_bn_ref.eval()
Y_ref = na_bn_ref(conv_ref(inputs))
conv_na_bn_fused = torch.nn.utils.fusion.fuse_conv_bn_eval(conv_ref,
na_bn_ref)
Y_hat = conv_na_bn_fused(inputs)
self.assertEqual(Y_ref, Y_hat, msg="Conv+BN(non-affine) fusion results are off")
class TestConstantPadNd(TestCase):
def test_constant_pad_nd(self):
a = torch.tensor([[1, 2], [3, 4]])
res = torch.constant_pad_nd(a, [1, 2, 1, 0], 9)
expected = torch.tensor([
[9, 9, 9, 9, 9],
[9, 1, 2, 9, 9],
[9, 3, 4, 9, 9]
])
self.assertEqual(res, expected)
def test_preserves_memory_format(self):
nchw_tensor = torch.rand((1, 2, 5, 3))
nchw_padded = torch.constant_pad_nd(nchw_tensor, [1, 2], 0.5)
self.assertTrue(nchw_padded.is_contiguous(memory_format=torch.contiguous_format))
nhwc_tensor = nchw_tensor.contiguous(memory_format=torch.channels_last)
nhwc_padded = torch.constant_pad_nd(nhwc_tensor, [1, 2], 0.5)
self.assertTrue(nhwc_padded.is_contiguous(memory_format=torch.channels_last))
class TestAddRelu(TestCase):
def test_add_relu(self):
a = torch.rand((7, 11))
b = torch.rand((7, 11))
a = a.float()
b = b.float()
a = a * -10
a = a + 5
add_res = a + b
relu_res = torch.relu(add_res)
add_relu_res = torch._VF._add_relu(a, b)
self.assertEqual(add_relu_res, relu_res)
def test_add_relu_broadcasting(self):
a = torch.rand((1, 32))
b = 1
b_scalar = torch.ones(1, 32)
res = torch._VF._add_relu(a, b)
broadcasted_res = torch._VF._add_relu(a, b_scalar)
self.assertEqual(broadcasted_res, res)
def add_test(test, decorator=None):
def add(test_name, fn):
if hasattr(TestNN, test_name):
raise RuntimeError('Found two tests with the same name: ' + test_name)
if decorator is not None:
fn = decorator(fn)
setattr(TestNN, test_name, fn)
test_name = test.get_name()
if not hasattr(test, 'test_cpu') or test.test_cpu:
add(test_name, lambda self, test=test: test(self))
cuda_test_name = test_name + '_cuda'
# With dtype enable, it's good enough to test against three floating types
kwargs = {}
if 'extra_args' in get_function_arglist(test.test_cuda):
kwargs['extra_args'] = test.extra_args
if 'dtype' in get_function_arglist(test.test_cuda):
if tf32_is_not_fp32() and test.with_tf32:
def with_tf32_off(self, test=test, kwargs=kwargs):
with tf32_off():
test.test_cuda(self, dtype=torch.float, **kwargs)
add(cuda_test_name + '_fp32', with_tf32_off)
def with_tf32_on(self, test=test, kwargs=kwargs):
with tf32_on(self, test.tf32_precision):
test.test_cuda(self, dtype=torch.float, **kwargs)
add(cuda_test_name + '_tf32', with_tf32_on)
else:
add(cuda_test_name + '_float', lambda self,
test=test, kwargs=kwargs: test.test_cuda(self, dtype=torch.float, **kwargs))
add(cuda_test_name + '_double', lambda self,
test=test, kwargs=kwargs: test.test_cuda(self, dtype=torch.double, **kwargs))
def test_half(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.half, **kwargs)
if getattr(test, 'check_half', True):
add(cuda_test_name + '_half', test_half)
def test_bfloat16(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.bfloat16, **kwargs)
if getattr(test, 'check_bfloat16', True):
add(cuda_test_name + '_bfloat16', test_bfloat16)
def test_cfloat(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.cfloat, **kwargs)
def test_cdouble(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.cdouble, **kwargs)
if getattr(test, 'check_complex', False):
add(cuda_test_name + '_cfloat', test_cfloat)
add(cuda_test_name + '_cdouble', test_cdouble)
else:
def with_tf32_off(self, test=test, kwargs=kwargs):
with tf32_off():
test.test_cuda(self, **kwargs)
if tf32_is_not_fp32() and test.with_tf32:
add(cuda_test_name + '_fp32', with_tf32_off)
def with_tf32_on(self, test=test, kwargs=kwargs):
with tf32_on(self, test.tf32_precision):
test.test_cuda(self, **kwargs)
add(cuda_test_name + '_tf32', with_tf32_on)
else:
add(cuda_test_name, with_tf32_off)
for test_params in module_tests + new_module_tests:
# TODO: CUDA is not implemented yet
if 'constructor' not in test_params:
name = test_params.pop('module_name')
test_params['constructor'] = getattr(nn, name)
decorator = test_params.pop('decorator', None)
test = NewModuleTest(**test_params)
add_test(test, decorator)
if 'check_eval' in test_params:
# create a new test that is identical but that sets module.training to False
desc = test_params.get('desc', None)
test_params['desc'] = 'eval' if desc is None else desc + '_eval'
def gen_eval_constructor(constructor):
def eval_constructor(*args, **kwargs):
cons = constructor(*args, **kwargs)
cons.training = False
return cons
eval_constructor.__name__ = constructor.__name__
return eval_constructor
test_params['constructor'] = gen_eval_constructor(test_params['constructor'])
test = NewModuleTest(**test_params)
add_test(test, decorator)
if 'check_with_long_tensor' in test_params:
fullname = test_params.get('fullname', None)
if fullname:
test_params['fullname'] = fullname + '_with_long_tensor'
else:
desc = test_params.get('desc', None)
test_params['desc'] = 'with_long_tensor' if desc is None else desc + '_with_long_tensor'
def double_equivalent_of_long_tensor(size):
return torch.randint(-1000, 1000, size=size).double()
def apply_to_cons(t):
if t.is_floating_point():
if isinstance(t, Parameter):
return Parameter(double_equivalent_of_long_tensor(t.size()))
elif isinstance(t, torch.Tensor):
return double_equivalent_of_long_tensor(t.size())
else:
return t
def gen_long_tensor_constructor(constructor):
def long_tensor_constructor(*args, **kwargs):
cons = constructor(*args, **kwargs)
cons._apply(apply_to_cons)
return cons
long_tensor_constructor.__name__ = constructor.__name__
return long_tensor_constructor
def gen_long_tensor_input(input_size):
def input_func():
return double_equivalent_of_long_tensor(input_size)
return input_func
def reference_fn(i, p, m):
# For bad reasons this would create LongTensors that requires gradients
# Remove requires_grad to avoid this
for p in m.parameters():
p.requires_grad_(False)
m._apply(lambda t: t.long())
input = i.long()
out = m.forward(input)
return out
test_params['constructor'] = gen_long_tensor_constructor(test_params['constructor'])
test_params['input_fn'] = gen_long_tensor_input(test_params['input_size'])
test_params['reference_fn'] = reference_fn
test_params['check_forward_only'] = True
# Currently we don't support conv2d/conv3d for LongTensor in CUDA
test_params['test_cuda'] = False
test = NewModuleTest(**test_params)
add_test(test, decorator)
for test_params in criterion_tests:
if 'constructor' not in test_params:
name = test_params.pop('module_name')
test_params['constructor'] = getattr(nn, name)
test = CriterionTest(**test_params)
decorator = test_params.pop('decorator', None)
add_test(test, decorator)
if 'check_sum_reduction' in test_params:
desc = test_params.get('desc', None)
test_params['desc'] = 'sum_reduction' if desc is None else desc + '_sum_reduction'
def gen_sum_reduction_constructor(constructor):
def sum_reduction_constructor(*args, **kwargs):
cons = constructor(*args, reduction='sum', **kwargs)
return cons
sum_reduction_constructor.__name__ = constructor.__name__
return sum_reduction_constructor
test_params['constructor'] = gen_sum_reduction_constructor(test_params['constructor'])
test = CriterionTest(**test_params)
add_test(test, decorator)
class UnpoolingNet(nn.Module):
def __init__(self, pool, unpool):
super(UnpoolingNet, self).__init__()
self.pool = pool
self.unpool = unpool
def forward(self, input):
return self.unpool(*self.pool(input))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool1d(2, return_indices=True),
nn.MaxUnpool1d(2)),
input_size=(1, 1, 4),
fullname='MaxUnpool1d_net',))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool2d(2, return_indices=True),
nn.MaxUnpool2d(2)),
input_size=(1, 1, 2, 4),
fullname='MaxUnpool2d_net',))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool3d(2, return_indices=True),
nn.MaxUnpool3d(2)),
input_size=(1, 1, 2, 4, 6),
fullname='MaxUnpool3d_net',
check_gradgrad=False,))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool1d(2, return_indices=True),
nn.MaxUnpool1d(2)),
input_size=(1, 4),
reference_fn=single_batch_reference_fn,
fullname='MaxUnpool1d_net_no_batch_dim',))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool2d(2, return_indices=True),
nn.MaxUnpool2d(2)),
input_size=(1, 2, 4),
reference_fn=single_batch_reference_fn,
fullname='MaxUnpool2d_net_no_batch_dim',))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool3d(2, return_indices=True),
nn.MaxUnpool3d(2)),
input_size=(1, 2, 4, 6),
reference_fn=single_batch_reference_fn,
fullname='MaxUnpool3d_net_no_batch_dim',
check_gradgrad=False))
class _AdaptiveLogSoftmaxWithLoss(nn.AdaptiveLogSoftmaxWithLoss):
def __call__(self, input):
t = torch.tensor([0, 1, 4, 8]).to(input.device)
return nn.AdaptiveLogSoftmaxWithLoss.__call__(self, input, t).output
add_test(NewModuleTest(
constructor=lambda: _AdaptiveLogSoftmaxWithLoss(16, 10, [2, 6]),
input_size=(4, 16),
fullname='AdaptiveLogSoftmax',
with_tf32=True,
tf32_precision=0.005))
# The following are helpers for TestNN.test_affine_*
if torch.cuda.is_available():
def device_():
return ['cpu', 'cuda']
else:
def device_():
return ['cpu']
def angle_rad_():
return [r * math.pi * 2 for r in [0.0, 0.5, 0.25, 0.125, random.random()]]
def axis_vector_():
t = (random.random(), random.random(), random.random())
l = sum(x ** 2 for x in t) ** 0.5
return [(1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0), tuple(x / l for x in t)]
def input_size2d_():
return [[1, 1, 3, 5], [1, 1, 3, 3], [1, 1, 4, 4], [1, 1, 3, 4]]
def output_size2d_():
return [[1, 1, 5, 3], [1, 1, 3, 5], [1, 1, 4, 3], [1, 1, 5, 5], [1, 1, 6, 6]]
def input_size2dsq_():
return [[1, 1, 2, 2], [1, 1, 3, 3], [1, 1, 4, 4], [1, 1, 6, 6]]
def output_size2dsq_():
return [[1, 1, 2, 2], [1, 1, 3, 3], [1, 1, 4, 4], [1, 1, 5, 5], [1, 1, 6, 6]]
def input_size3d_():
return [[1, 1, 2, 2, 2], [1, 1, 2, 3, 4], [1, 1, 3, 3, 3], [1, 1, 4, 4, 4], [1, 1, 3, 4, 5]]
def input_size3dsq_():
return [[1, 1, 2, 2, 2], [1, 1, 3, 3, 3], [1, 1, 4, 4, 4], [1, 1, 6, 6, 6]]
def output_size3dsq_():
return [[1, 1, 2, 2, 2], [1, 1, 3, 3, 3], [1, 1, 4, 4, 4], [1, 1, 5, 5, 5], [1, 1, 6, 6, 6]]
def output_size3d_():
return [[1, 1, 2, 2, 2], [1, 1, 3, 3, 3], [1, 1, 3, 4, 5], [1, 1, 4, 3, 2], [1, 1, 5, 5, 5], [1, 1, 6, 6, 6]]
def _buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad):
input_center = [(x - 1) / 2.0 for x in input_size]
output_center = [(x - 1) / 2.0 for x in output_size]
s = math.sin(angle_rad)
c = math.cos(angle_rad)
intrans_ary = np.array([
[1, 0, input_center[2]],
[0, 1, input_center[3]],
[0, 0, 1],
], dtype=np.float64)
inscale_ary = np.array([
[input_center[2], 0, 0],
[0, input_center[3], 0],
[0, 0, 1],
], dtype=np.float64)
rotation_ary = np.array([
[c, -s, 0],
[s, c, 0],
[0, 0, 1],
], dtype=np.float64)
outscale_ary = np.array([
[1.0 / output_center[2], 0, 0],
[0, 1.0 / output_center[3], 0],
[0, 0, 1],
], dtype=np.float64)
outtrans_ary = np.array([
[1, 0, -output_center[2]],
[0, 1, -output_center[3]],
[0, 0, 1],
], dtype=np.float64)
reorder_ary = np.array([
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
], dtype=np.float64)
transform_ary = np.dot(np.dot(np.dot(np.dot(
intrans_ary,
inscale_ary),
rotation_ary.T),
outscale_ary),
outtrans_ary)
grid_ary = np.dot(np.dot(np.dot(reorder_ary, rotation_ary.T), outscale_ary), outtrans_ary)
transform_tensor = torch.from_numpy((rotation_ary)).to(device, torch.float32)
transform_tensor = transform_tensor[:2].unsqueeze(0)
return transform_tensor, transform_ary, grid_ary
def _buildEquivalentAffineTransforms3d(device, input_size, output_size, angle_rad, axis_vector):
input_center = [(x - 1) / 2.0 for x in input_size]
output_center = [(x - 1) / 2.0 for x in output_size]
s = math.sin(angle_rad)
c = math.cos(angle_rad)
c1 = 1 - c
intrans_ary = np.array([
[1, 0, 0, input_center[2]],
[0, 1, 0, input_center[3]],
[0, 0, 1, input_center[4]],
[0, 0, 0, 1],
], dtype=np.float64)
inscale_ary = np.array([
[input_center[2], 0, 0, 0],
[0, input_center[3], 0, 0],
[0, 0, input_center[4], 0],
[0, 0, 0, 1],
], dtype=np.float64)
l, m, n = axis_vector
scipyRotation_ary = np.array([
[l * l * c1 + c, m * l * c1 - n * s, n * l * c1 + m * s, 0],
[l * m * c1 + n * s, m * m * c1 + c, n * m * c1 - l * s, 0],
[l * n * c1 - m * s, m * n * c1 + l * s, n * n * c1 + c, 0],
[0, 0, 0, 1],
], dtype=np.float64)
z, y, x = axis_vector
torchRotation_ary = np.array([
[x * x * c1 + c, y * x * c1 - z * s, z * x * c1 + y * s, 0],
[x * y * c1 + z * s, y * y * c1 + c, z * y * c1 - x * s, 0],
[x * z * c1 - y * s, y * z * c1 + x * s, z * z * c1 + c, 0],
[0, 0, 0, 1],
], dtype=np.float64)
outscale_ary = np.array([
[1.0 / output_center[2], 0, 0, 0],
[0, 1.0 / output_center[3], 0, 0],
[0, 0, 1.0 / output_center[4], 0],
[0, 0, 0, 1],
], dtype=np.float64)
outtrans_ary = np.array([
[1, 0, 0, -output_center[2]],
[0, 1, 0, -output_center[3]],
[0, 0, 1, -output_center[4]],
[0, 0, 0, 1],
], dtype=np.float64)
reorder_ary = np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 1],
], dtype=np.float64)
transform_ary = np.dot(np.dot(np.dot(np.dot(
intrans_ary,
inscale_ary),
np.linalg.inv(scipyRotation_ary)),
outscale_ary),
outtrans_ary)
grid_ary = np.dot(np.dot(np.dot(reorder_ary, np.linalg.inv(scipyRotation_ary)), outscale_ary), outtrans_ary)
transform_tensor = torch.from_numpy((torchRotation_ary)).to(device, torch.float32)
transform_tensor = transform_tensor[:3].unsqueeze(0)
return transform_tensor, transform_ary, grid_ary
# end TestNN.test_affine_* helpers
class TestNNDeviceType(NNTestCase):
def run_conv_double_back_test(self, kern, stride, padding, chan_in, chan_out, batch_size,
inp_size, dilation, no_weight, groups=1, use_cuda=False,
use_bias=True, dtype=torch.double):
if use_cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
x = torch.randn(batch_size, chan_in, inp_size, inp_size, device=device,
dtype=dtype, requires_grad=True)
weight = torch.randn(chan_out, chan_in // groups, kern, kern, device=device,
dtype=dtype, requires_grad=not no_weight)
if use_bias:
bias = torch.randn(chan_out, device=device, dtype=dtype, requires_grad=True)
else:
bias = None
def func(*inputs):
if use_bias:
lx, lweight, lbias = inputs
else:
lx, lweight = inputs
lbias = None
# We disable cudnn during forward to avoid finite difference imprecision issues
with cudnn.flags(enabled=False):
out = F.conv2d(lx, lweight, lbias, stride, padding, dilation, groups)
return out
if use_bias:
inputs = x, weight, bias
else:
inputs = x, weight
dummy_out = func(*inputs)
grad_y = torch.randn_like(dummy_out, device=device, dtype=dtype, requires_grad=True)
# Issue #15353: test mkldnn double backward, don't run gradgradcheck due
# to imprecision issues
if dtype == torch.float:
g, = torch.autograd.grad(dummy_out.sum(), x, create_graph=True)
return g.requires_grad
return gradgradcheck(func, inputs, (grad_y,))
def _test_dropout(self, cls, device, input, memory_format=torch.contiguous_format):
p = 0.2
input = input.to(device).fill_(1 - p)
module = cls(p)
input_var = input.clone(memory_format=memory_format).requires_grad_()
output = module(input_var)
self.assertTrue(output.is_contiguous(memory_format=memory_format))
self.assertLess(abs(output.data.mean() - (1 - p)), 0.05)
output.backward(input)
self.assertTrue(input_var.grad.is_contiguous(memory_format=memory_format))
self.assertLess(abs(input_var.grad.data.mean() - (1 - p)), 0.05)
module = cls(p, True)
input_var = input.clone(memory_format=memory_format).requires_grad_()
output = module(input_var + 0)
self.assertTrue(output.is_contiguous(memory_format=memory_format))
self.assertLess(abs(output.data.mean() - (1 - p)), 0.05)
output.backward(input)
self.assertTrue(input_var.grad.is_contiguous(memory_format=memory_format))
self.assertLess(abs(input_var.grad.data.mean() - (1 - p)), 0.05)
# check eval mode doesn't change anything
for inplace in [True, False]:
module = cls(p, inplace).eval()
self.assertEqual(input, module(input))
# Check that these don't raise errors
module.__repr__()
str(module)
def _test_dropout_discontiguous(self, cls, device, memory_format=torch.contiguous_format):
# In this test, we verify that dropout preserves the layout and data for different memory formats.
# We check whether, we get same values for the output of dropout, when the probability
# of dropout is 0 or very close to 0.
# Reference: https://github.com/pytorch/pytorch/issues/47176
close_to_zero_p = 1e-10 # Should be almost zero but not zero, as for p=0 different path is taken
for p in [0, close_to_zero_p]:
inp = torch.ones(2, 3, 3, 3, device=device)
inp_discontiguous = torch.empty(2, 3, 3, 6, device=device, memory_format=memory_format)[..., ::2]
inp_discontiguous.copy_(inp)
mod = cls(p=p)
out = mod(inp_discontiguous)
if p != 0: # Zero will keep strides as is based on input.
# When prob == 0, input stride (54, 18, 6, 2) -> output stride (54, 18, 6, 2)
# When prob != 0, input stride (54, 18, 6, 2) -> output stride (27, 9, 3, 1)
self.assertTrue(out.is_contiguous(memory_format=memory_format))
self.assertEqual(inp_discontiguous, out)
def _test_dropout_stride_mean_preserve(self, cls, device):
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2], d[3])
inp = torch.ones(2, 3, 4, 5, device=device)
shifts = [(0, 0), (1, 0), (0, 1), (1, 1)]
for perm in itertools.permutations((0, 1, 2, 3), r=4):
for shift in shifts:
for p in [1e-10, 0.3, 0.5, 0.7]:
mod = cls(p=p)
permuted_inp = inp.permute(perm).contiguous().permute(invert_perm(perm))
permuted_inp = permuted_inp[shift[0]:, shift[1]:, :, :]
out = mod(permuted_inp)
self.assertTrue(out.permute(perm).is_contiguous())
self.assertEqual(inp.mean(), out.mean(), rtol=0.5, atol=0.5)
if p == 1e-10:
self.assertEqual(permuted_inp, out)
else:
self.assertNotEqual(permuted_inp, out)
def _test_InstanceNorm_general(self, cls, input, device, dtype=torch.float):
# default case track_running_stats=False
b, c = input.size(0), input.size(1)
input_var = input.to(device=device, dtype=dtype).requires_grad_()
IN = cls(c, eps=0).to(device, dtype)
output = IN(input_var)
out_reshaped = output.view(b * c, -1)
mean = out_reshaped.mean(1)
var = out_reshaped.var(1, unbiased=False)
self.assertEqual(torch.abs(mean.data).mean(), 0, atol=1e-5, rtol=0)
self.assertEqual(torch.abs(var.data).mean(), 1, atol=1e-5, rtol=0)
# check that eval mode doesn't change behavior
grad_out = torch.randn_like(output)
res1 = output.data.clone()
output.backward(grad_out)
grad1 = input_var.grad.data.clone()
IN.eval()
output = IN(input_var)
input_var.grad = None
output.backward(grad_out)
res2 = output.data
grad2 = input_var.grad.data
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
# If track_running_stats=True and momentum=1, running_mean/var should be
# equal to mean/var of the input (with unbias correction)
IN = cls(c, momentum=1, eps=0, track_running_stats=True).to(device, dtype)
output = IN(input_var)
input_reshaped = input_var.transpose(1, 0).reshape(c, -1)
mean = input_reshaped.mean(1)
input_reshaped = input_var.transpose(1, 0).reshape(c, b, -1)
var = input_reshaped.var(2, unbiased=True)[:, :]
self.assertEqual(torch.abs(mean.data - IN.running_mean).mean(), 0, atol=1e-5, rtol=0)
self.assertEqual(torch.abs(var.data.mean(1) - IN.running_var).mean(), 0, atol=1e-5, rtol=0)
# in eval mode, adding X * std to a channel in input should make the
# corresponding channel in output have mean X
IN.eval()
delta = IN.running_var.sqrt() * torch.arange(c, device=device, dtype=dtype)
delta = delta.view(-1, *[1 for _ in range(2, input.dim())])
output = IN(input_var + delta)
self.assertEqual(output.transpose(0, 1).reshape(c, -1).mean(1), torch.arange(c, dtype=dtype))
def _test_InstanceNorm_cuda_half(self, cls, input, device):
# THNN
input = input.to(device=device, dtype=torch.half).random_(1, 10).requires_grad_(True)
m = cls(input.size(1), affine=True, track_running_stats=True).to(device, torch.half)
thnn_output = m(input)
thnn_output.sum().backward()
thnn_input_grad = input.grad.data.clone()
self.assertEqualTypeString(thnn_output, input)
# cuDNN
if TEST_CUDNN:
input.grad = None
m = m.float()
cudnn_output = m(input)
cudnn_output.sum().backward()
cudnn_input_grad = input.grad.data.clone()
self.assertEqualTypeString(cudnn_output, input)
self.assertEqual(cudnn_output, thnn_output, atol=1e-4, rtol=0)
self.assertEqual(cudnn_input_grad, thnn_input_grad, atol=1e-3, rtol=0)
def _test_LayerNorm_general(self, device, dtype=torch.float):
for i in range(2, 6):
shape = torch.randint(3, 6, (i,), dtype=torch.long).tolist()
x = torch.empty(*shape, device=device, dtype=dtype).uniform_(0, 10)
normalized_ndim = random.randint(1, i - 1) # inclusive
normalized_shape = shape[-normalized_ndim:]
unnormalized_shape = shape[:-normalized_ndim]
# test that LN normalizes to mean 0 and stddev 1
ln = nn.LayerNorm(normalized_shape, eps=0).to(device, dtype)
ln.weight.data.fill_(1)
ln.bias.data.fill_(0)
output = ln(x)
out_reshaped = output.view(*(unnormalized_shape + [-1]))
mean = out_reshaped.mean(-1)
var = out_reshaped.var(-1, unbiased=False)
delta = 1e-1 if dtype == torch.bfloat16 else 1e-5
self.assertEqual(torch.abs(mean.data).mean(), 0, atol=delta, rtol=0)
self.assertEqual(torch.abs(var.data).mean(), 1, atol=delta, rtol=0)
# test that LN applies weight and bias correctly
scale, bias = torch.empty(2).uniform_(0.2, 2).tolist()
ln.weight.data.fill_(scale)
ln.bias.data.fill_(bias)
output = ln(x)
out_reshaped = output.view(*(unnormalized_shape + [-1]))
mean = out_reshaped.mean(-1)
var = out_reshaped.var(-1, unbiased=False)
self.assertEqual(torch.abs(mean.data).mean(), bias, atol=delta, rtol=0)
self.assertEqual(torch.abs(var.data).mean(), scale ** 2, atol=delta, rtol=0)
bad_norm_shape_input_shape = {
(): (),
(2, 3): (3,),
(2,): (1, 2, 3),
(10,): (2, 3),
10: (2, 3),
}
for norm_shape, input_shape in bad_norm_shape_input_shape.items():
ln = nn.LayerNorm(norm_shape)
input = torch.empty(input_shape, device=device, dtype=dtype).uniform_(0, 10)
self.assertRaises(RuntimeError, lambda: ln(input))
def _test_LayerNorm_cuda_half(self, device):
input = torch.empty(2, 3, 3, 2, device=device, dtype=torch.half).random_(1, 10).requires_grad_(True)
m = nn.LayerNorm([3, 2]).to(device, torch.half)
output = m(input)
output.sum().backward()
self.assertEqualTypeString(output, input)
def _test_GroupNorm_general(self, device, dtype=torch.float):
good_shape_g = {
(1, 2, 3, 4): 2,
(2, 3, 10): 3,
(3, 1, 1, 1, 2): 1,
(2, 6, 4, 2, 2): 3,
(1, 256, 1, 1): 32,
}
for shape_g, grad in product(good_shape_g.items(), [True, False]):
shape, g = shape_g
x = torch.empty(*shape, device=device, dtype=dtype).uniform_(0, 10)
x.requires_grad_(grad)
b = shape[0]
c = shape[1]
# test that GN normalizes to mean 0 and stddev 1
gn = nn.GroupNorm(g, c, eps=0).to(device, dtype)
gn.weight.data.fill_(1)
gn.bias.data.fill_(0)
output = gn(x)
out_reshaped = output.view(b, g, -1)
mean = out_reshaped.mean(-1)
var = out_reshaped.var(-1, unbiased=False)
self.assertEqual(torch.abs(mean).mean(), 0, atol=1e-5, rtol=0)
self.assertEqual(torch.abs(var).mean(), 1, atol=1e-5, rtol=0)
output.backward(torch.randn_like(output))
if output.is_cuda:
torch.cuda.synchronize()
# test that GN applies weight and bias correctly
scale = torch.empty(c, device=device, dtype=dtype).uniform_(0.2, 2)
bias = torch.empty(c, device=device, dtype=dtype).uniform_(0.2, 2)
gn.weight.data.copy_(scale)
gn.bias.data.copy_(bias)
output = gn(x)
out_reshaped = output.view(b, c, -1)
out_normed = (out_reshaped - bias.view(c, 1)) / scale.view(c, 1)
out_normed_reshaped = out_normed.view(b, g, -1)
mean = out_normed_reshaped.mean(-1)
var = out_normed_reshaped.var(-1, unbiased=False)
self.assertEqual(torch.abs(mean).mean(), 0, atol=1e-5, rtol=0)
self.assertEqual(torch.abs(var).mean(), 1, atol=1e-5, rtol=0)
bad_shape_g = {
(1, 2, 3, 4): 3,
(2, 3, 10): 2,
(3, 1, 1, 1, 2): 10,
(2, 6, 4, 2, 2): 4,
}
for shape, g in bad_shape_g.items():
with self.assertRaises(ValueError):
gn = nn.GroupNorm(g, shape[1])
def _test_GroupNorm_cuda_half(self):
input = torch.zeros(2, 4, 3, 2, requires_grad=True).cuda().half().random_(1, 10)
m = nn.GroupNorm(2, 4).to("cuda", torch.half)
output = m(input)
output.sum().backward()
self.assertEqualTypeString(output, input)
def _test_module_empty_input(self, module, inp, check_size=True, inference=False):
if not inference:
inp.requires_grad_(True)
out = module(inp)
if not inference:
gO = torch.rand_like(out)
out.backward(gO)
if check_size:
self.assertEqual(out.size(), inp.size())
if not inference:
for p in module.parameters():
if p.requires_grad:
self.assertEqual(p.grad, torch.zeros_like(p.grad))
self.assertEqual(inp.grad, torch.zeros_like(inp))
def _test_module_empty_inputs(self, module, inputs):
for _inp in inputs:
_inp.requires_grad_(True)
out = module(*inputs)
gO = torch.rand_like(out)
out.backward(gO)
for p in module.parameters():
if p.requires_grad:
self.assertEqual(p.grad, torch.zeros_like(p.grad))
for _inp in inputs:
self.assertEqual(_inp.grad, torch.zeros_like(_inp))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off()
def test_affine_2d_rotate0(self, device):
# scipy before 1.0.0 do not support homogeneous coordinate
# scipy.ndimage.affine_transform, so we need to skip.
input_size = [1, 1, 3, 3]
input_ary = np.array(np.random.random(input_size), dtype=np.float32)
output_size = [1, 1, 5, 5]
angle_rad = 0.
transform_tensor, transform_ary, offset = \
_buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
offset=offset,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=False))
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
self.assertEqual(scipy_ary.mean(), gridsample_ary.mean())
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off(0.001)
def test_affine_2d_rotate90(self, device):
# scipy before 1.0.0 do not support homogeneous coordinate
# scipy.ndimage.affine_transform, so we need to skip.
for input_size2dsq, output_size2dsq in \
itertools.product(input_size2dsq_(), output_size2dsq_()):
input_size = input_size2dsq
input_ary = np.array(np.random.random(input_size), dtype=np.float32)
output_size = output_size2dsq
angle_rad = 0.25 * math.pi * 2
transform_tensor, transform_ary, offset = \
_buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
offset=offset,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=True))
if input_size2dsq == output_size2dsq:
self.assertEqual(scipy_ary.mean(), input_ary.mean())
self.assertEqual(scipy_ary[0, 0], input_ary[0, 0, 0, -1])
self.assertEqual(scipy_ary[0, -1], input_ary[0, 0, -1, -1])
self.assertEqual(scipy_ary[-1, -1], input_ary[0, 0, -1, 0])
self.assertEqual(scipy_ary[-1, 0], input_ary[0, 0, 0, 0])
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
self.assertEqual(scipy_ary.mean(), gridsample_ary.mean())
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off(0.005)
def test_affine_2d_rotate45(self, device):
# scipy before 1.0.0 do not support homogeneous coordinate
# scipy.ndimage.affine_transform, so we need to skip.
input_size = [1, 1, 3, 3]
input_ary = np.array(np.zeros(input_size), dtype=np.float32)
input_ary[0, 0, 0, :] = 0.5
input_ary[0, 0, 2, 2] = 1.0
output_size = [1, 1, 3, 3]
angle_rad = 0.125 * math.pi * 2
transform_tensor, transform_ary, offset = \
_buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
offset=offset,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=False))
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off(0.005)
def test_affine_2d_rotateRandom(self, device):
# scipy before 1.0.0 do not support homogeneous coordinate
# scipy.ndimage.affine_transform, so we need to skip.
for angle_rad, input_size2d, output_size2d in \
itertools.product(angle_rad_(), input_size2d_(), output_size2d_()):
input_size = input_size2d
input_ary = np.array(np.random.random(input_size), dtype=np.float32).round(3)
output_size = output_size2d
input_ary[0, 0, 0, 0] = 2
input_ary[0, 0, 0, -1] = 4
input_ary[0, 0, -1, 0] = 6
input_ary[0, 0, -1, -1] = 8
transform_tensor, transform_ary, grid_ary = \
_buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=False))
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
affine_tensor = affine_tensor.to('cpu')
for r in range(affine_tensor.size(1)):
for c in range(affine_tensor.size(2)):
grid_out = np.dot(grid_ary, [r, c, 1])
self.assertEqual(affine_tensor[0, r, c], grid_out[:2], exact_dtype=False)
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off(0.005)
def test_affine_3d_rotateRandom(self, device):
# scipy before 1.0.0 do not support homogeneous coordinate
# scipy.ndimage.affine_transform, so we need to skip.
for angle_rad, axis_vector, input_size3d, output_size3d in \
itertools.product(angle_rad_(), axis_vector_(), input_size3d_(), output_size3d_()):
input_size = input_size3d
input_ary = np.array(np.random.random(input_size), dtype=np.float32)
output_size = output_size3d
input_ary[0, 0, 0, 0, 0] = 2
input_ary[0, 0, 0, 0, -1] = 3
input_ary[0, 0, 0, -1, 0] = 4
input_ary[0, 0, 0, -1, -1] = 5
input_ary[0, 0, -1, 0, 0] = 6
input_ary[0, 0, -1, 0, -1] = 7
input_ary[0, 0, -1, -1, 0] = 8
input_ary[0, 0, -1, -1, -1] = 9
transform_tensor, transform_ary, grid_ary = \
_buildEquivalentAffineTransforms3d(device, input_size, output_size, angle_rad, axis_vector)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=False))
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
affine_tensor = affine_tensor.to('cpu')
for i in range(affine_tensor.size(1)):
for r in range(affine_tensor.size(2)):
for c in range(affine_tensor.size(3)):
grid_out = np.dot(grid_ary, [i, r, c, 1])
self.assertEqual(affine_tensor[0, i, r, c], grid_out[:3], exact_dtype=False)
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
@onlyCUDA
@skipCUDAIfNoCudnn
@dtypes(*floating_and_complex_types_and(torch.half, *[torch.bfloat16] if AMPERE_OR_ROCM else []))
def test_Conv2d_deterministic_cudnn(self, device, dtype):
inputs = torch.randn(2, 3, 5, 5, device=device, dtype=dtype, requires_grad=True)
with cudnn.flags(enabled=True, benchmark=True, deterministic=True):
conv1 = torch.nn.Conv2d(3, 3, 3).to(device, dtype)
conv2 = torch.nn.Conv2d(3, 3, 3).to(device, dtype)
conv2.bias.data.copy_(conv1.bias.data)
conv2.weight.data.copy_(conv1.weight.data)
out1 = conv1(inputs)
out2 = conv2(inputs)
self.assertEqual(out1, out2, atol=0.0, rtol=0)
y = torch.randn(out1.size(), device=device, dtype=dtype)
out1.backward(y)
out2.backward(y)
self.assertEqual(conv1.bias.grad.data, conv2.bias.grad.data, atol=0.0, rtol=0)
self.assertEqual(conv1.weight.grad.data, conv2.weight.grad.data, atol=0.0, rtol=0)
@onlyCUDA
@dtypes(*floating_types_and(torch.half, *[torch.bfloat16] if AMPERE_OR_ROCM else []))
def test_Conv2d_large_workspace(self, device, dtype):
# These sizes require huge cuDNN workspaces. Make sure we choose a
# reasonable algorithm that does not run out of memory
sizes = [
(1, 256, 109, 175),
(1, 256, 80, 128),
(1, 256, 120, 192),
]
def run_test(benchmark):
with torch.backends.cudnn.flags(benchmark=benchmark):
conv = torch.nn.Conv2d(256, 256, kernel_size=3, padding=1).to(device, dtype)
for size in sizes:
x = torch.randn(size, device=device, dtype=dtype)
out = conv(x.detach().clone().requires_grad_())
out.backward(torch.ones_like(out))
run_test(benchmark=False)
run_test(benchmark=True)
@onlyCUDA
@dtypes(torch.half, torch.float)
def test_ConvTranspose2d_large_output_padding(self, device, dtype):
net1 = torch.nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, output_padding=1)\
.to(device=device, dtype=dtype)
net2 = torch.nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, output_padding=1)\
.to(device=device, dtype=dtype)
net3 = torch.nn.ConvTranspose2d(32, 3, kernel_size=3, stride=2, padding=1, output_padding=1)\
.to(device=device, dtype=dtype)
x = torch.rand(1, 128, 6, 6, device=device, dtype=dtype, requires_grad=True)
x = net1(x)
x = net2(x)
x = net3(x)
x.backward(torch.randn_like(x))
torch.cuda.synchronize()
@onlyCUDA
@tf32_on_and_off(0.01)
@dtypes(torch.float, torch.double, torch.half)
# Very similar to test_Conv2d_naive_groups but with special care to handle
# the number of groups == number of input channels
@torch.backends.cudnn.flags(enabled=True, benchmark=False)
def test_Conv2d_depthwise_naive_groups(self, device, dtype):
for depth_multiplier in [1, 2]:
m = nn.Conv2d(2, 2 * depth_multiplier, kernel_size=3, groups=2).to(device, dtype)
i = torch.randn(2, 2, 6, 6, device="cuda", dtype=dtype).div_(2).requires_grad_()
output = m(i)
grad_output = torch.randn(2, 2 * depth_multiplier, 4, 4, device=device, dtype=dtype) / 2
output.backward(grad_output)
offset = 1 * depth_multiplier
m1 = nn.Conv2d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)
m1.weight.data = m.weight.data[:offset].clone()
m1.bias.data = m.bias.data[:offset].clone()
i1 = i.detach()[:, :1].clone().requires_grad_()
output1 = m1(i1)
output1.backward(grad_output[:, :offset].contiguous())
m2 = nn.Conv2d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)
m2.weight.data.copy_(m.weight.data[offset:])
m2.bias.data.copy_(m.bias.data[offset:])
i2 = i.detach()[:, 1:].clone().requires_grad_()
output2 = m2(i2)
output2.backward(grad_output[:, offset:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.bias.grad.data,
torch.cat([m1.bias.grad.data,
m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data,
m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
@onlyCUDA
@dtypes(torch.float, torch.double, torch.half)
@tf32_on_and_off(0.005)
@torch.backends.cudnn.flags(enabled=True, benchmark=False)
def test_Conv3d_depthwise_naive_groups(self, device, dtype):
for depth_multiplier in [1, 2]:
m = nn.Conv3d(2, 2 * depth_multiplier, kernel_size=3, groups=2).to(device, dtype)
i = torch.randn(2, 2, 6, 6, 6, device="cuda", dtype=dtype).div_(2).requires_grad_()
output = m(i)
grad_output = torch.randn(2, 2 * depth_multiplier, 4, 4, 4, device=device, dtype=dtype) / 2
output.backward(grad_output)
offset = 1 * depth_multiplier
m1 = nn.Conv3d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)
m1.weight.data = m.weight.data[:offset].clone()
m1.bias.data = m.bias.data[:offset].clone()
i1 = i.detach()[:, :1].clone().requires_grad_()
output1 = m1(i1)
output1.backward(grad_output[:, :offset].contiguous())
m2 = nn.Conv3d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)
m2.weight.data.copy_(m.weight.data[offset:])
m2.bias.data.copy_(m.bias.data[offset:])
i2 = i.detach()[:, 1:].clone().requires_grad_()
output2 = m2(i2)
output2.backward(grad_output[:, offset:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.bias.grad.data,
torch.cat([m1.bias.grad.data,
m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data,
m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
@onlyCUDA
@dtypes(*floating_types_and(torch.half, *[torch.bfloat16] if AMPERE_OR_ROCM else []))
def test_noncontig_conv_grad(self, device, dtype):
# FIXME: remove after adding non-contiguous grad tests for all modules
module = nn.Conv2d(3, 5, kernel_size=3, padding=1).to(device, dtype)
input = torch.randn(2, 3, 10, 10, dtype=dtype, device=device, requires_grad=True)
output = module(input)
grad = torch.randn(2, 2, 5, 10, 10, dtype=dtype, device=device)[:, 1]
assert not grad.is_contiguous()
output.backward(grad, retain_graph=True)
self.assertIsNotNone(input.grad)
result = input.grad.data.clone()
input.grad.data.zero_()
output.backward(grad.contiguous())
self.assertEqual(result, input.grad.data, atol=dtype2prec_DONTUSE[dtype], rtol=0)
@onlyCUDA
@dtypes(torch.float, torch.half)
def test_batchnorm_large_batch(self, device, dtype):
bn = nn.BatchNorm2d(1).to(device, dtype)
data = torch.rand(880801, 1, 1, 1, device=device, dtype=dtype)
out = bn(data).sum().backward()
@onlyCUDA
@dtypes(torch.double)
def test_conv_double_backward(self, device, dtype):
with torch.backends.cudnn.flags(deterministic=True):
# Double backward only runs with DoubleTensor due to precision reason
batch_size = 1
for kern, inp_size, dilations in [(3, 5, [1, 2]), (4, 9, [1])]:
for stride, padding, chan_in, chan_out, dilation in product([1], [2], [2], [3], dilations):
no_weight = stride == 2
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight, use_cuda=True, dtype=dtype)
self.assertTrue(result,
"Conv double backward test failed with parameters:" +
"\nkern: " + str(kern) +
"\nstride: " + str(stride) +
"\npadding: " + str(padding) +
"\nchan_in: " + str(chan_in) +
"\nchan_out: " + str(chan_out) +
"\nbatch_size: " + str(batch_size) +
"\ninp_size: " + str(inp_size) +
"\ndilation: " + str(dilation))
def test_conv_double_backward_no_bias(self):
kern = 3
stride = 2
chan_in, chan_out = 2, 4
batch_size = 2
inp_size = 5
padding = 1
dilation = 1
no_weight = False
use_bias = True
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight, use_bias=use_bias)
self.assertTrue(result,
"Conv double backward test failed with parameters:" +
"\nkern: " + str(kern) +
"\nstride: " + str(stride) +
"\npadding: " + str(padding) +
"\nchan_in: " + str(chan_in) +
"\nchan_out: " + str(chan_out) +
"\nbatch_size: " + str(batch_size) +
"\ninp_size: " + str(inp_size) +
"\ndilation: " + str(dilation))
def test_conv_double_backward_groups(self):
kern = 3
stride = 1
padding = 2
chan_in, chan_out = 2, 4
batch_size = 2
inp_size = 6
dilation = 1
no_weight = False
groups = 2
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in * groups, chan_out * groups,
batch_size, inp_size, dilation,
no_weight, groups=groups)
self.assertTrue(result,
"Conv double backward test failed with parameters:" +
"\nkern: " + str(kern) +
"\nstride: " + str(stride) +
"\npadding: " + str(padding) +
"\nchan_in: " + str(chan_in) +
"\nchan_out: " + str(chan_out) +
"\nbatch_size: " + str(batch_size) +
"\ninp_size: " + str(inp_size) +
"\ndilation: " + str(dilation) +
"\ngroups: " + str(groups))
def test_conv_double_backward_stride(self):
batch_size = 2
# Cannot provide ggW when stride is > 1
for kern, inp_size, dilations in [(3, 5, [1, 2]), (3, 7, [1])]:
for stride, padding, chan_in, chan_out, dilation in product([2], [0, 1], [1], [2], dilations):
no_weight = False
self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight)
@dtypes(torch.float, torch.cfloat)
@torch.backends.cudnn.flags(enabled=True, benchmark=False)
def test_conv1d_same_padding(self, device, dtype):
# Test padding='same' outputs the correct shape
test_args = [
# in_size
range(50, 55),
# kernel_size
[1, 2, 3, 8],
# dilation
range(1, 4),
# stride
[1],
]
for in_size, k_size, dilation, stride in itertools.product(*test_args):
x = torch.rand(1, 1, in_size, device=device, dtype=dtype)
y = torch.rand(1, 1, k_size, device=device, dtype=dtype)
z = F.conv1d(x, y, padding='same', dilation=dilation, stride=stride)
self.assertEqual(z.size(2), int(math.ceil(in_size / stride)))
# Compare F.conv1d padding='same' output against manual padding
# Without strides/dilation
x = torch.rand(1, 1, 12, device=device, dtype=dtype)
y = torch.rand(1, 1, 3, device=device, dtype=dtype)
expect = F.conv1d(x, y, padding=1)
actual = F.conv1d(x, y, padding='same')
self.assertEqual(expect, actual)
# With dilation
x = torch.rand(1, 1, 12, device=device, dtype=dtype)
y = torch.rand(1, 1, 4, device=device, dtype=dtype)
expect = F.conv1d(x, y, padding=3, dilation=2)
actual = F.conv1d(x, y, padding='same', dilation=2)
self.assertEqual(expect, actual)
# Dilation with asymmetric padding
expect = F.conv1d(x, y, padding=5, dilation=3)[..., 1:]
actual = F.conv1d(x, y, padding='same', dilation=3)
self.assertEqual(expect, actual)
@dtypes(torch.float, torch.cfloat)
def test_conv2d_same_padding(self, device, dtype):
if dtype is torch.cfloat:
rtol, atol = 2e-6, 2e-6
else:
rtol, atol = None, None
# Compare F.conv2d padding='same' output against manual padding
# Without strides/dilation
x = torch.rand(1, 1, 10, 11, device=device, dtype=dtype)
y = torch.rand(1, 1, 4, 5, device=device, dtype=dtype)
expect = F.conv2d(x, y, padding=(2, 2))[..., 1:, :]
actual = F.conv2d(x, y, padding='same')
self.assertEqual(expect, actual, rtol=rtol, atol=atol)
# With dilation
y = torch.rand(1, 1, 3, 4, device=device, dtype=dtype)
expect = F.conv2d(x, y, padding=(2, 3), dilation=2)
actual = F.conv2d(x, y, padding='same', dilation=2)
self.assertEqual(expect, actual, rtol=rtol, atol=atol)
# Dilation with asymmetric padding
y = torch.rand(1, 1, 4, 4, device=device, dtype=dtype)
expect = F.conv2d(x, y, padding=5, dilation=3)[..., 1:, 1:]
actual = F.conv2d(x, y, padding='same', dilation=3)
self.assertEqual(expect, actual, rtol=rtol, atol=atol)
@dtypes(torch.float, torch.cfloat)
def test_conv3d_same_padding(self, device, dtype):
if dtype is torch.cfloat:
rtol, atol = 2e-6, 2e-6
else:
rtol, atol = None, None
# Compare F.conv3d padding='same' output against manual padding
# Without strides/dilation
x = torch.rand(1, 1, 10, 11, 12, device=device, dtype=dtype)
y = torch.rand(1, 1, 1, 2, 5, device=device, dtype=dtype)
expect = F.conv3d(x, y, padding=(0, 1, 2))[..., :, 1:, :]
actual = F.conv3d(x, y, padding='same')
self.assertEqual(expect, actual, rtol=rtol, atol=atol)
# With dilation
expect = F.conv3d(x, y, padding=(0, 1, 4), dilation=2)
actual = F.conv3d(x, y, padding='same', dilation=2)
self.assertEqual(expect, actual, rtol=rtol, atol=atol)
# Dilation with asymmetric padding
y = torch.rand(1, 1, 4, 4, 4, device=device, dtype=dtype)
expect = F.conv3d(x, y, padding=5, dilation=3)[..., 1:, 1:, 1:]
actual = F.conv3d(x, y, padding='same', dilation=3)
self.assertEqual(expect, actual, rtol=rtol, atol=atol)
@dtypes(torch.float, torch.cfloat)
def test_conv1d_valid_padding(self, device, dtype):
# Test F.conv1d padding='valid' is the same as no padding
x = torch.rand(1, 1, 10, device=device, dtype=dtype)
y = torch.rand(1, 1, 4, device=device, dtype=dtype)
expect = F.conv1d(x, y)
actual = F.conv1d(x, y, padding='valid')
self.assertEqual(expect, actual)
@dtypes(torch.float, torch.cfloat)
def test_conv2d_valid_padding(self, device, dtype):
# Test F.conv2d padding='valid' is the same as no padding
x = torch.rand(1, 1, 1, 10, device=device, dtype=dtype)
y = torch.rand(1, 1, 1, 4, device=device, dtype=dtype)
expect = F.conv2d(x, y)
actual = F.conv2d(x, y, padding='valid')
self.assertEqual(expect, actual)
@dtypes(torch.float, torch.cfloat)
def test_conv3d_valid_padding(self, device, dtype):
# Test F.conv3d padding='valid' is the same as no padding
x = torch.rand(1, 1, 1, 1, 10, dtype=dtype, device=device)
y = torch.rand(1, 1, 1, 1, 4, dtype=dtype, device=device)
expect = F.conv3d(x, y)
actual = F.conv3d(x, y, padding='valid')
self.assertEqual(expect, actual)
@dtypes(torch.float, torch.cfloat)
def test_conv1d_same_padding_backward(self, device, dtype):
# Test F.conv1d gradients work with padding='same'
x = torch.rand(1, 1, 12, dtype=dtype, device=device, requires_grad=True)
y = torch.rand(1, 1, 4, dtype=dtype, device=device, requires_grad=True)
# Symmetric padding
z = F.conv1d(x, y, padding=3, dilation=2)
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv1d(x, y, padding='same', dilation=2)
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
x.grad, y.grad = None, None
# Asymmetric padding
z = F.conv1d(x, y, padding=2)[..., 1:]
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv1d(x, y, padding='same')
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
@dtypes(torch.float, torch.cfloat)
def test_conv2d_same_padding_backward(self, device, dtype):
# Test F.conv2d gradients work with padding='same'
x = torch.rand(1, 1, 10, 11, device=device, dtype=dtype, requires_grad=True)
y = torch.rand(1, 1, 4, 5, device=device, dtype=dtype, requires_grad=True)
# Symmetric padding
z = F.conv2d(x, y, padding=(3, 4), dilation=2)
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv2d(x, y, padding='same', dilation=2)
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
x.grad, y.grad = None, None
# Asymmetric padding
y = torch.rand(1, 1, 4, 4, device=device, dtype=dtype, requires_grad=True)
z = F.conv2d(x, y, padding=2)[..., 1:, 1:]
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv2d(x, y, padding='same')
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
@dtypes(torch.double, torch.cdouble)
def test_conv3d_same_padding_backward(self, device, dtype):
check_forward_ad = torch.device(device).type != 'xla'
# Test F.conv3d gradients work with padding='same'
x = torch.rand(1, 1, 1, 11, 12, dtype=dtype, device=device, requires_grad=True)
y = torch.rand(1, 1, 1, 2, 5, dtype=dtype, device=device, requires_grad=True)
# Symmetric padding
z = F.conv3d(x, y, padding=(0, 1, 4), dilation=2)
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv3d(x, y, padding='same', dilation=2)
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
x.grad, y.grad = None, None
gradcheck(lambda x, y: F.conv3d(x, y, padding='same', dilation=2), (x, y),
check_forward_ad=check_forward_ad, nondet_tol=1e-5)
if torch.device(device).type != 'cuda':
# https://github.com/pytorch/pytorch/issues/70702
gradgradcheck(lambda x, y: F.conv3d(x, y, padding='same', dilation=2), (x, y),
check_fwd_over_rev=True)
# Asymmetric padding
y = torch.rand(1, 1, 1, 4, 4, dtype=dtype, device=device, requires_grad=True)
z = F.conv3d(x, y, padding=2)[..., 1:, 1:]
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv3d(x, y, padding='same')
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
gradcheck(lambda x, y: F.conv3d(x, y, padding='same'), (x, y),
check_forward_ad=check_forward_ad, nondet_tol=1e-5)
if torch.device(device).type != 'cuda':
# https://github.com/pytorch/pytorch/issues/70702
gradgradcheck(lambda x, y: F.conv3d(x, y, padding='same'), (x, y),
check_fwd_over_rev=True)
@dtypes(torch.float, torch.cfloat)
def test_conv1d_valid_padding_backward(self, device, dtype):
# Test F.conv1d gradients work with padding='valid'
x = torch.rand(1, 1, 10, dtype=dtype, device=device, requires_grad=True)
y = torch.rand(1, 1, 4, dtype=dtype, device=device, requires_grad=True)
F.conv1d(x, y, padding=0).sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
F.conv1d(x, y, padding='valid').sum().backward()
gx_actual, gy_actual = x.grad, y.grad
self.assertEqual(gx_expect, gx_actual)
self.assertEqual(gy_expect, gy_actual)
@unittest.skipIf(not TEST_SCIPY, "Scipy required for the test.")
@dtypes(torch.float, torch.cfloat)
@parametrize_test("mode", ('valid', 'same'))
def test_conv1d_vs_scipy(self, device, dtype, mode):
t = make_tensor((1, 10), device=device, dtype=dtype)
feat_dim = t.shape[1]
weight_even = make_tensor((1, 1, 4), device=device, dtype=dtype)
weight_odd = make_tensor((1, 1, 5), device=device, dtype=dtype)
def _test(t, weight, mode):
# SciPy expects two 1-D inputs.
t_a = t.view(-1).cpu().numpy()
w_a = weight.view(-1).cpu().numpy()
expected = scipy.signal.convolve(t_a, w_a, mode=mode)
kwargs = {'padding': mode}
if mode == 'same':
# `same` padding in PyTorch conv1d is different
# from SciPy
p = weight.shape[2] // 2
t = torch.nn.functional.pad(t, (p, p))
# We have already taken care of padding
kwargs.pop("padding")
# second input is flipped in SciPy's convolve
weight_flipped = torch.flip(weight, (2,))
actual = torch.nn.functional.conv1d(t, weight_flipped, **kwargs).squeeze(0)
if mode == 'same':
actual = actual[:feat_dim]
self.assertEqual(actual, expected)
# Global dtype for this test suite is torch.double
# This leads to change in type-promotion
# and conv1d outputs `complex128` for `complex64` input.
with set_default_dtype(torch.float):
_test(t, weight_even, mode)
_test(t, weight_odd, mode)
@unittest.skipIf(not TEST_SCIPY, "Scipy required for the test.")
@dtypes(torch.float, torch.cfloat)
@parametrize_test("mode", ('valid', 'same'))
def test_conv2d_vs_scipy(self, device, dtype, mode):
t = make_tensor((1, 5, 10), device=device, dtype=dtype)
weight_even = make_tensor((1, 1, 2, 4), device=device, dtype=dtype)
weight_odd = make_tensor((1, 1, 3, 5), device=device, dtype=dtype)
def _test(t, weight, mode):
# SciPy expects two 2-D inputs.
t_a = t.squeeze(0).cpu().numpy()
w_a = weight.squeeze(0).squeeze(0).cpu().numpy()
expected = scipy.signal.convolve2d(t_a, w_a, mode=mode)
kwargs = {'padding': mode}
if mode == 'same':
# `same` padding in PyTorch conv2d is different
# from SciPy
left_right_pad = weight.shape[3] // 2
top_bottom_pad = weight.shape[2] // 2
p = (left_right_pad, left_right_pad, top_bottom_pad, top_bottom_pad)
t = torch.nn.functional.pad(t, p)
# We have already taken care of padding
kwargs.pop("padding")
# second input is flipped in SciPy's convolve2d
weight_flipped = torch.flip(weight, (2, 3))
actual = torch.nn.functional.conv2d(t, weight_flipped, **kwargs).squeeze(0)
if mode == 'same':
actual = actual[:5, :10]
self.assertEqual(actual, expected, rtol=2e-5, atol=5e-6)
# Global dtype for this test suite is torch.double
# This leads to change in type-promotion
# and conv1d outputs `complex128` for `complex64` input.
with set_default_dtype(torch.float):
_test(t, weight_even, mode)
_test(t, weight_odd, mode)
@unittest.skipIf(not TEST_SCIPY, "Scipy required for the test.")
@dtypes(torch.float, torch.cfloat)
@parametrize_test("mode", ('valid', 'same'))
def test_conv3d_vs_scipy(self, device, dtype, mode):
t = make_tensor((1, 5, 5, 10), device=device, dtype=dtype)
weight_even = make_tensor((1, 1, 2, 2, 4), device=device, dtype=dtype)
weight_odd = make_tensor((1, 1, 2, 3, 5), device=device, dtype=dtype)
def _test(t, weight, mode):
# SciPy expects two 3-D inputs.
t_a = t.squeeze(0).cpu().numpy()
w_a = weight.squeeze(0).squeeze(0).cpu().numpy()
expected = scipy.signal.convolve(t_a, w_a, mode=mode)
kwargs = {'padding': mode}
if mode == 'same':
# `same` padding in PyTorch conv3d is different
# from SciPy
left_right_pad = weight.shape[4] // 2
top_bottom_pad = weight.shape[3] // 2
front_back_pad = weight.shape[2] // 2
p = (left_right_pad, left_right_pad, top_bottom_pad, top_bottom_pad,
front_back_pad, front_back_pad)
t = torch.nn.functional.pad(t, p)
# We have already taken care of padding
kwargs.pop("padding")
# second input is flipped in SciPy's convolve
weight_flipped = torch.flip(weight, (2, 3, 4))
actual = torch.nn.functional.conv3d(t, weight_flipped, **kwargs).squeeze(0)
if mode == 'same':
actual = actual[:5, :5, :10]
if tf32_is_not_fp32() and (dtype == torch.float or dtype == torch.complex64):
self.assertEqual(actual, expected, atol=0.05, rtol=0.05)
else:
self.assertEqual(actual, expected, rtol=2e-5, atol=5e-6)
# Global dtype for this test suite is torch.double
# This leads to change in type-promotion
# and conv1d outputs `complex128` for `complex64` input.
with set_default_dtype(torch.float):
_test(t, weight_even, mode)
_test(t, weight_odd, mode)
@dtypes(torch.float, torch.complex64)
def test_conv2d_valid_padding_backward(self, device, dtype):
# Test F.conv2d gradients work with padding='valid'
x = torch.rand(1, 1, 1, 10, device=device, dtype=dtype, requires_grad=True)
y = torch.rand(1, 1, 1, 4, device=device, dtype=dtype, requires_grad=True)
F.conv2d(x, y, padding=0).sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
F.conv2d(x, y, padding='valid').sum().backward()
gx_actual, gy_actual = x.grad, y.grad
self.assertEqual(gx_expect, gx_actual)
self.assertEqual(gy_expect, gy_actual)
@dtypes(torch.double, torch.cdouble)
def test_conv3d_valid_padding_backward(self, device, dtype):
check_forward_ad = torch.device(device).type != 'xla'
# Test F.conv3d gradients work with padding='valid'
x = torch.rand(1, 1, 1, 1, 10, dtype=dtype, device=device, requires_grad=True)
y = torch.rand(1, 1, 1, 1, 4, dtype=dtype, device=device, requires_grad=True)
F.conv3d(x, y, padding=0).sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
F.conv3d(x, y, padding='valid').sum().backward()
gx_actual, gy_actual = x.grad, y.grad
self.assertEqual(gx_expect, gx_actual)
self.assertEqual(gy_expect, gy_actual)
gradcheck(lambda x, y: F.conv3d(x, y, padding='valid'), (x, y), check_forward_ad=check_forward_ad)
gradgradcheck(lambda x, y: F.conv3d(x, y, padding='valid'), (x, y), check_fwd_over_rev=check_forward_ad)
@parametrize_test("N", range(2, 4), name_fn=lambda N: 'ConvTranspose{}d'.format(N))
def test_conv_transpose_with_output_size_and_no_batch_dim(self, device, N):
# For inputs with no batch dim, verify output is the correct shape when output_size is set.
# See https://github.com/pytorch/pytorch/issues/75889
inp = torch.randn((1, 15, 13) if N == 2 else (1, 15, 13, 13), device=device)
output_size = (1, 240, 200) if N == 2 else (1, 240, 200, 200)
ConvTransposeNd = getattr(nn, 'ConvTranspose{}d'.format(N))
m = ConvTransposeNd(1, 1, kernel_size=16, stride=16, padding=7, bias=False, device=device)
output = m(inp, output_size=output_size)
self.assertEqual(output.shape, output_size)
@skipMeta
@parametrize_test("input_shape,transposed,dilated,groups,layout,backend_expected", [
# === slow ===
subtest(((2, 6, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Slow2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow1d'),
subtest(((2, 6, 7), True, False, 3, torch.strided, torch._C._ConvBackend.SlowTranspose2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow1d_transposed'),
subtest(((2, 6, 7), False, True, 3, torch.strided, torch._C._ConvBackend.SlowDilated2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow1d_dilated'),
subtest(((2, 6, 7), True, True, 3, torch.strided, torch._C._ConvBackend.SlowTranspose2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow1d_dilated_transposed'),
subtest(((2, 6, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Slow2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow2d'),
subtest(((2, 6, 7, 8), True, False, 3, torch.strided, torch._C._ConvBackend.SlowTranspose2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow2d_transposed'),
subtest(((2, 6, 7, 8), False, True, 3, torch.strided, torch._C._ConvBackend.SlowDilated2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow2d_dilated'),
subtest(((2, 6, 7, 8), True, True, 3, torch.strided, torch._C._ConvBackend.SlowTranspose2d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow2d_dilated_transposed'),
subtest(((2, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Slow3d),
decorators=[onlyCPU, disableMkldnn], name='slow3d_cpu'),
# CUDA doesn't have a slow 3D implementation, so it goes to the dilated 3D implementation instead
subtest(((2, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.SlowDilated3d),
decorators=[onlyCUDA, disablecuDNN], name='slow3d_cuda'),
# FIXME: RuntimeError: CUDA out of memory.
# subtest(((2, 6, 7, 8, 9), True, False, 3, torch.strided, torch._C._ConvBackend.SlowTranspose3d),
# decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow3d_transposed'),
subtest(((2, 6, 7, 8, 9), False, True, 3, torch.strided, torch._C._ConvBackend.SlowDilated3d),
decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow3d_dilated'),
# FIXME: RuntimeError: CUDA out of memory.
# subtest(((2, 6, 7, 8, 9), True, True, 3, torch.strided, torch._C._ConvBackend.SlowTranspose3d),
# decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow3d_dilated_transposed'),
subtest(((0, 6, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch1d'),
subtest(((2, 0, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_channel1d'),
subtest(((0, 0, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch_channel1d'),
subtest(((0, 6, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch2d'),
subtest(((2, 0, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_channel2d'),
subtest(((0, 0, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch_channel2d'),
subtest(((0, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch3d'),
subtest(((2, 0, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_channel3d'),
subtest(((0, 0, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),
decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch_channel3d'),
# === cuda ===
# Note that disablecuDNN disables miopen as well.
subtest(((2, 6, 7), False, False, 6, torch.strided, torch._C._ConvBackend.CudaDepthwise2d),
decorators=[onlyCUDA, disablecuDNN], name='cuda_depthwise1d'),
subtest(((2, 6, 7, 8), False, False, 6, torch.strided, torch._C._ConvBackend.CudaDepthwise2d),
decorators=[onlyCUDA, disablecuDNN], name='cuda_depthwise2d'),
subtest(((2, 6, 7, 8, 9), False, False, 6, torch.strided, torch._C._ConvBackend.CudaDepthwise3d),
decorators=[onlyCUDA, disablecuDNN], name='cuda_depthwise3d'),
# === cudnn ===
subtest(((2, 6, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Cudnn),
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn1d'),
subtest(((2, 6, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Cudnn),
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn2d'),
subtest(((2, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Cudnn),
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn3d'),
subtest(((2, 6, 7), True, False, 3, torch.strided, torch._C._ConvBackend.CudnnTranspose),
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn1d_transposed'),
subtest(((2, 6, 7, 8), True, False, 3, torch.strided, torch._C._ConvBackend.CudnnTranspose),
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn2d_transposed'),
# FIXME: RuntimeError: CUDA out of memory.
# subtest(((2, 6, 7, 8, 9), True, False, 3, torch.strided, torch._C._ConvBackend.CudnnTranspose),
# decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn3d_transposed'),
# === miopen ===
subtest(((2, 6, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Miopen),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen1d'),
subtest(((2, 6, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Miopen),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen2d'),
subtest(((2, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Miopen),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen3d'),
subtest(((2, 6, 7), True, False, 3, torch.strided, torch._C._ConvBackend.MiopenTranspose),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen1d_transposed'),
subtest(((2, 6, 7, 8), True, False, 3, torch.strided, torch._C._ConvBackend.MiopenTranspose),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen2d_transposed'),
subtest(((2, 6, 7, 8, 9), True, False, 3, torch.strided, torch._C._ConvBackend.MiopenTranspose),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen3d_transposed'),
subtest(((2, 6, 7), False, False, 6, torch.strided, torch._C._ConvBackend.MiopenDepthwise),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen_depthwise1d'),
subtest(((2, 6, 7, 8), False, False, 6, torch.strided, torch._C._ConvBackend.MiopenDepthwise),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen_depthwise2d'),
subtest(((2, 6, 7, 8, 9), False, False, 6, torch.strided, torch._C._ConvBackend.MiopenDepthwise),
decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen_depthwise3d'),
# === mkldnn ===
subtest(((2, 6, 7), False, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn1d'),
subtest(((2, 6, 7, 8), False, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn2d'),
subtest(((2, 6, 7, 8, 9), False, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn3d'),
# Transposed convolution is broken for mkldnn. See https://github.com/pytorch/pytorch/issues/68775.
subtest(((2, 6, 7), True, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn, unittest.expectedFailure], name='mkldnn1d_transposed'),
subtest(((2, 6, 7, 8), True, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn, unittest.expectedFailure], name='mkldnn2d_transposed'),
subtest(((2, 6, 7, 8, 9), True, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn, unittest.expectedFailure], name='mkldnn3d_transposed'),
subtest(((2, 6, 7), False, True, 3, torch.strided, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn1d_cpu_input'),
subtest(((2, 6, 7, 8), False, True, 3, torch.strided, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn2d_cpu_input'),
subtest(((2, 6, 7, 8, 9), False, True, 3, torch.strided, torch._C._ConvBackend.Mkldnn),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn3d_cpu_input'),
subtest(((0, 6, 7), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch1d'),
subtest(((2, 0, 7), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_channel1d'),
subtest(((0, 0, 7), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch_channel1d'),
subtest(((0, 6, 7, 8), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch2d'),
subtest(((2, 0, 7, 8), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_channel2d'),
subtest(((0, 0, 7, 8), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch_channel2d'),
subtest(((0, 6, 7, 8, 9), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch3d'),
subtest(((2, 0, 7, 8, 9), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_channel3d'),
subtest(((0, 0, 7, 8, 9), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),
decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch_channel3d'),
# Note: Tests for mobile backends are not currently supported. This comprises
# NnpackSpatial, Winograd3x3Depthwise, and Xnnpack2d backends. Testing these
# requires the ability to gate tests by whether PyTorch is built with USE_MOBILE=1.
])
# Test with both bias and no bias.
@parametrize_test("has_bias", [False, True])
# Test with both stride=1 and stride>1 cases.
@parametrize_test("strided", [False, True])
# Test with both contiguous and non-contiguous inputs.
@parametrize_test("contiguous", [False, True])
def test_conv_backend(
self, device, input_shape, has_bias, strided, contiguous, transposed, dilated, groups,
layout, backend_expected):
# Build up inputs.
dtype = torch.float32
C_in, C_out, dim, kernel_size = input_shape[1], 12, len(input_shape) - 2, 3
x = torch.randn(*input_shape, device=device, dtype=dtype, requires_grad=True)
weight = torch.randn(C_in if transposed else C_out,
C_out // groups if transposed else C_in // groups,
*[kernel_size for _ in range(dim)],
device=device, dtype=dtype, requires_grad=True)
bias = torch.randn(C_out, device=device, dtype=dtype, requires_grad=True) if has_bias else None
def _make_noncontiguous(inp):
if inp is None:
return None
old_requires_grad = inp.requires_grad
inp = torch.repeat_interleave(inp, 2, dim=-1)
inp = inp[..., ::2].detach().requires_grad_(old_requires_grad)
return inp
if not contiguous:
x = _make_noncontiguous(x)
weight = _make_noncontiguous(weight)
bias = _make_noncontiguous(bias)
if layout is torch._mkldnn:
x = x.to_mkldnn()
# Note that weight and bias are not supported as mkldnn tensors during training.
stride = (2,) * dim if strided else (1,) * dim
padding = (0,) * dim
dilation = (2,) * dim if dilated else (1,) * dim
output_padding = (0,) * dim
inputs = [x, weight, bias, stride, padding, dilation, transposed, output_padding, groups]
# Ensure correct backend is selected.
backend_actual = torch._C._select_conv_backend(*inputs)
self.assertEqual(backend_actual, backend_expected)
# Ensure backward call succeeds.
convolution = torch.ops.aten.convolution
output = convolution(*inputs)
grad_output = torch.randn(output.shape, device=device, dtype=dtype)
if not contiguous:
grad_output = _make_noncontiguous(grad_output)
if layout is torch._mkldnn:
grad_output = grad_output.to_mkldnn()
output.backward(grad_output)
# mkldnn doesn't support gradcheck :(
if layout is torch._mkldnn:
return
if backend_actual != torch._C._ConvBackend.Empty: # FIXME: forward AD fails
# Forward AD and forward-over-reverse AD smoke test in float32
# TODO: remove this if we introduce per-op gradient tests for float32
with fwAD.dual_level():
dual_inputs = [(fwAD.make_dual(i, torch.rand_like(i)) if isinstance(i, torch.Tensor) else i) for i in inputs]
# Forward AD
output = convolution(*dual_inputs)
# Forward over reverse AD
grad_output_d = fwAD.make_dual(torch.rand_like(output), torch.rand_like(output))
if has_bias:
torch.autograd.grad(output, [x, weight, bias], grad_output_d)
else:
torch.autograd.grad(output, [x, weight], grad_output_d)
# Convert to float64 for gradcheck.
x = x.to(torch.float64).detach().requires_grad_(True)
weight = weight.to(torch.float64).detach().requires_grad_(True)
if bias is not None:
bias = bias.to(torch.float64).detach().requires_grad_(True)
inputs = [x, weight, bias, stride, padding, dilation, transposed, output_padding, groups]
# Set some backend-specific validation settings.
gradcheck_nondet_tol = 0.0
if torch.backends.cudnn.is_available():
# cuDNN introduces non-determinism
gradcheck_nondet_tol = GRADCHECK_NONDET_TOL
self.assertTrue(gradcheck(convolution, inputs, nondet_tol=gradcheck_nondet_tol))
# double backward doesn't support bias gradients
if bias is not None:
bias.requires_grad_(False)
self.assertTrue(gradgradcheck(convolution, inputs, nondet_tol=gradcheck_nondet_tol))
def test_Dropout(self, device):
input = torch.empty(1000)
self._test_dropout(nn.Dropout, device, input)
self._test_dropout_discontiguous(nn.Dropout, device)
self._test_dropout_discontiguous(nn.Dropout, device, memory_format=torch.channels_last)
self._test_dropout_stride_mean_preserve(nn.Dropout, device)
if self.device_type == 'cuda' or self.device_type == 'cpu':
input = input.bfloat16()
self._test_dropout(nn.Dropout, device, input)
def _test_dropoutNd_no_batch(self, dropout, input):
input_clone = input.clone()
with freeze_rng_state():
res_no_batch = dropout(input)
with freeze_rng_state():
res_batched = dropout(input_clone.unsqueeze(0)).squeeze(0)
self.assertEqual(res_no_batch, res_batched)
def _test_dropoutNd_channel_zero(self, dropout, input):
# Verify the number of zeros in a channel is 0 or the number of elements in the channel
# for a fully positive input tensor
shape = input.shape
B = shape[0]
C = shape[1]
channel_numel = torch.tensor(shape[2:]).prod()
result = dropout(input)
for b, c in product(range(B), range(C)):
self.assertTrue(result[b, c].count_nonzero() in (0, channel_numel))
@expectedFailureXLA # seems like freeze_rng_state is not honoured by XLA
def test_Dropout1d(self, device):
N, C, L = random.randint(10, 15), random.randint(10, 15), random.randint(10, 15)
input = torch.empty(N, C, L)
self._test_dropout(nn.Dropout1d, device, input)
with self.assertRaisesRegex(RuntimeError, "Expected 2D or 3D input, but received a 4D input"):
nn.Dropout1d(p=0.5)(torch.rand(1, 2, 2, 2, device=device))
with self.assertRaisesRegex(RuntimeError, "Expected 2D or 3D input, but received a 1D input"):
nn.Dropout1d(p=0.5)(torch.rand(2, device=device))
# no batch dims
input = torch.rand(50, 2, device=device)
self._test_dropoutNd_no_batch(nn.Dropout1d(p=0.5), input)
self._test_dropoutNd_no_batch(nn.Dropout1d(p=0.5, inplace=True), input)
# check that complete channels are dropped
input = torch.ones(10, 4, 2, device=device)
self._test_dropoutNd_channel_zero(nn.Dropout1d(p=0.5), input)
self._test_dropoutNd_channel_zero(nn.Dropout1d(p=0.5, inplace=True), input)
@expectedFailureXLA # seems like freeze_rng_state is not honoured by XLA
def test_Dropout2d(self, device):
b = random.randint(1, 5)
w = random.randint(1, 5)
h = random.randint(1, 5)
num_features = 1000
input = torch.empty(num_features, b, w, h)
self._test_dropout(nn.Dropout2d, device, input)
self._test_dropout(nn.Dropout2d, device, input, memory_format=torch.channels_last)
self._test_dropout_discontiguous(nn.Dropout2d, device)
self._test_dropout_discontiguous(nn.Dropout2d, device, memory_format=torch.channels_last)
with self.assertWarnsRegex(UserWarning, "Received a 5-D input to dropout2d"):
nn.Dropout2d(p=0.5)(torch.rand(1, 2, 2, 2, 2, device=device))
with self.assertWarnsRegex(UserWarning, "Received a 2-D input to dropout2d"):
nn.Dropout2d(p=0.5)(torch.rand(1, 2, device=device))
# TODO: Uncomment these lines once no-batch-dim inputs are supported.
# For now, the historical dropout1d behavior is performed for 3D inputs.
# See https://github.com/pytorch/pytorch/issues/77081
# input = torch.rand(50, 2, 2, device=device)
# self._test_dropoutNd_no_batch(nn.Dropout2d(p=0.5), input)
# self._test_dropoutNd_no_batch(nn.Dropout2d(p=0.5, inplace=True), input)
with self.assertWarnsRegex(UserWarning, "assuming that channel-wise 1D dropout behavior is desired"):
nn.Dropout2d(p=0.5)(torch.rand(1, 2, 2, device=device))
# check that complete channels are dropped
input = torch.ones(10, 4, 2, 2, device=device)
self._test_dropoutNd_channel_zero(nn.Dropout2d(p=0.5), input)
self._test_dropoutNd_channel_zero(nn.Dropout2d(p=0.5, inplace=True), input)
@expectedFailureXLA # seems like freeze_rng_state is not honoured by XLA
def test_Dropout3d(self, device):
b = random.randint(1, 5)
w = random.randint(1, 5)
h = random.randint(1, 5)
d = random.randint(1, 2)
num_features = 1000
input = torch.empty(num_features, b, d, w, h)
self._test_dropout(nn.Dropout3d, device, input)
self._test_dropout_discontiguous(nn.Dropout3d, device)
self._test_dropout_discontiguous(nn.Dropout3d, device, memory_format=torch.channels_last)
with self.assertWarnsRegex(UserWarning, "Received a 6-D input to dropout3d"):
nn.Dropout3d(p=0.5)(torch.rand(1, 2, 2, 2, 2, 2, device=device))
with self.assertWarnsRegex(UserWarning, "Received a 3-D input to dropout3d"):
nn.Dropout3d(p=0.5)(torch.rand(1, 2, 2, device=device))
# no batch dims
input = torch.rand(50, 2, 2, 2, device=device)
self._test_dropoutNd_no_batch(nn.Dropout3d(p=0.5), input)
self._test_dropoutNd_no_batch(nn.Dropout3d(p=0.5, inplace=True), input)
# check that complete channels are dropped
input = torch.ones(10, 4, 2, 2, 2, device=device)
self._test_dropoutNd_channel_zero(nn.Dropout3d(p=0.5), input)
self._test_dropoutNd_channel_zero(nn.Dropout3d(p=0.5, inplace=True), input)
def test_InstanceNorm1d_general(self, device):
b = random.randint(3, 5)
c = random.randint(3, 5)
d = random.randint(8, 10)
input = torch.rand(b, c, d)
self._test_InstanceNorm_general(nn.InstanceNorm1d, input, device)
if self.device_type == 'cuda':
self._test_InstanceNorm_cuda_half(nn.InstanceNorm1d, input, device)
def test_InstanceNorm2d_general(self, device):
b = random.randint(3, 5)
c = random.randint(3, 5)
w = random.randint(3, 6)
h = random.randint(6, 8)
input = torch.rand(b, c, h, w)
self._test_InstanceNorm_general(nn.InstanceNorm2d, input, device)
if self.device_type == 'cuda':
self._test_InstanceNorm_cuda_half(nn.InstanceNorm2d, input, device)
def test_InstanceNorm3d_general(self, device):
b = random.randint(3, 5)
c = random.randint(3, 5)
w = random.randint(2, 5)
h = random.randint(2, 5)
d = random.randint(2, 5)
input = torch.rand(b, c, h, w, d)
self._test_InstanceNorm_general(nn.InstanceNorm3d, input, device)
if self.device_type == 'cuda':
self._test_InstanceNorm_cuda_half(nn.InstanceNorm3d, input, device)
def test_instancenorm_raises_error_if_less_than_one_value_per_channel(self, device):
x = torch.rand(10)[None, :, None]
with self.assertRaises(ValueError):
torch.nn.InstanceNorm1d(10)(x).to(device)
def test_instancenorm_raises_error_for_single_spatial_element_during_training(self, device):
BATCH_SIZE = 10
NUM_CHANNELS = 3
norms = [torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d]
for i, norm in enumerate(norms):
m = norm(NUM_CHANNELS, track_running_stats=True)
m.to(device)
# Create an appropriately-sized input with a single spatial element.
input = torch.randn(BATCH_SIZE, NUM_CHANNELS, *[1 for _ in range(i + 1)],
device=device)
with self.assertRaises(ValueError):
m(input)
# Single spatial element should be fine in eval.
m.eval()
m(input)
def test_LayerNorm_general(self, device):
self._test_LayerNorm_general(device)
if self.device_type == 'cuda' or self.device_type == 'cpu':
self._test_LayerNorm_general(device, dtype=torch.bfloat16)
if self.device_type == 'cuda':
self._test_LayerNorm_cuda_half(device)
@onlyNativeDeviceTypes
def test_LayerNorm_numeric(self, device):
def layer_norm_ref(X, gamma, beta, normalized_shape, eps):
feature_size = np.prod(normalized_shape)
X_view = X.view(-1, feature_size)
mean = X_view.mean(dim=-1, keepdim=True)
var = X_view.var(dim=-1, unbiased=False, keepdim=True)
Y = (X_view - mean) / torch.sqrt(var + eps)
Y = Y * gamma.view(-1) + beta.view(-1)
return Y.view(*X.size())
normalized_shape = [256, 256, 144]
layer_norm = nn.LayerNorm(normalized_shape).float().to(device)
X = torch.rand(2, *normalized_shape, dtype=torch.float32,
device=device)
Y = layer_norm(X)
Y_ref = layer_norm_ref(X, layer_norm.weight.data, layer_norm.bias.data,
normalized_shape, layer_norm.eps)
self.assertEqual(Y, Y_ref, rtol=0, atol=1e-5)
if self.device_type == 'cuda':
layer_norm.cpu()
Y_cpu = layer_norm(X.cpu())
self.assertEqual(Y_cpu, Y, rtol=0, atol=1e-5)
@onlyCPU
def test_glu_bfloat16(self, device):
def test_dtype(fn, input, dtype):
input = input.detach().clone().to(dtype=dtype).requires_grad_(True)
input2 = input.detach().clone().float().requires_grad_(True)
out = fn(input)
out.sum().backward()
out2 = fn(input2)
out2.sum().backward()
self.assertEqual(out.dtype, dtype)
self.assertEqual(input.grad.dtype, dtype)
self.assertEqual(out, out2, exact_dtype=False)
self.assertEqual(input.grad, input2.grad, atol=1e-2, rtol=0, exact_dtype=False)
def func(device):
return torch.nn.GLU(dim=-1).to(device)
shapes = [[1, 3, 1, 6], [1, 3, 1, 128], [1, 3, 256, 256]]
for shape in shapes:
x = torch.randn(shape, device=device)
test_dtype(func(device), x, torch.bfloat16)
@onlyNativeDeviceTypes
def test_GroupNorm_general(self, device):
self._test_GroupNorm_general(device)
if self.device_type == 'cuda':
self._test_GroupNorm_cuda_half()
def test_GroupNorm_raises_error_if_one_value_per_group(self, device):
x = torch.rand(10)[None, :, None]
with self.assertRaises(ValueError):
torch.nn.GroupNorm(10, 10)(x).to(device)
def test_GroupNorm_empty(self, device):
mod = torch.nn.GroupNorm(2, 4).to(device)
inp = torch.randn(0, 4, 2, 2, device=device)
self._test_module_empty_input(mod, inp)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp)
@onlyCPU
@dtypes(torch.float, torch.double)
def test_groupnorm_nhwc(self, device, dtype):
def helper(self, size, groups, memory_format):
channels = size[1]
input = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
input = input.contiguous(memory_format=memory_format)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device=device)
grad = grad.contiguous(memory_format=memory_format)
gn = nn.GroupNorm(groups, channels).to(device).to(dtype)
gn.weight.data.uniform_()
gn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_gn = nn.GroupNorm(groups, channels).to(device).to(dtype)
ref_gn.load_state_dict(gn.state_dict())
out = gn(input)
out.backward(grad)
ref_out = ref_gn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=memory_format))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(gn.weight.grad, ref_gn.weight.grad)
self.assertEqual(gn.bias.grad, ref_gn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
helper(self, (4, 8, 10, 10), 4, torch.channels_last)
helper(self, (2, 30, 9, 9), 3, torch.channels_last)
helper(self, (2, 9, 7, 11, 15), 3, torch.channels_last_3d)
@onlyNativeDeviceTypes
def test_GroupNorm_numeric(self, device):
def group_norm_ref(X, gamma, beta, groups, channels, eps):
batch_size = X.size()[0]
X_view = X.view(batch_size, groups, -1)
mean = X_view.mean(dim=-1, keepdim=True)
var = X_view.var(dim=-1, unbiased=False, keepdim=True)
Y = ((X_view - mean) / torch.sqrt(var + eps)).view(
batch_size, channels, -1)
Y = Y * gamma.view(channels, 1) + beta.view(channels, 1)
return Y.view(*X.size())
batch_size = 1
groups = 2
channels = 8
group_norm = nn.GroupNorm(groups, channels).float().to(device)
X = torch.rand(batch_size, channels, 256, 256, 72,
dtype=torch.float32, device=device)
Y = group_norm(X)
Y_ref = group_norm_ref(
X, group_norm.weight.data, group_norm.bias.data, groups,
channels, group_norm.eps)
self.assertEqual(Y, Y_ref, rtol=0, atol=1e-5)
if self.device_type == 'cuda':
group_norm.cpu()
Y_cpu = group_norm(X.cpu())
self.assertEqual(Y_cpu, Y, rtol=0, atol=1e-5)
@onlyNativeDeviceTypes
@dtypes(torch.float64, torch.complex128)
def test_pad(self, device, dtype):
# Assert assertion errors are raised for invalid circular padding values
inputs = torch.randn(1, 1, 4, device=device, dtype=dtype, requires_grad=True)
# Should raise error when trying to wrap around more than once
self.assertRaises(RuntimeError, lambda: F.pad(inputs, (5, 4), mode='circular'))
self.assertRaises(RuntimeError, lambda: F.pad(inputs, (3, 6), mode='circular'))
# Should raise error when negative padding results in negative output shape
self.assertRaises(RuntimeError, lambda: F.pad(inputs, (-3, -2), mode='circular'))
# assert that relfection padding errors when pad >= input size
expected_err_msg = r"Padding size should be less than the corresponding input dimension"
inputs = torch.randn(1, 1, 2, 3, device=device, dtype=dtype)
self.assertRaisesRegex(RuntimeError, expected_err_msg,
lambda: F.pad(inputs, (1, 1, 3, 0), mode='reflect'))
inputs = torch.randn(1, 1, 2, device=device, dtype=dtype)
self.assertRaisesRegex(RuntimeError, expected_err_msg,
lambda: F.pad(inputs, (2, 1), mode='reflect'))
inputs = torch.rand(1, 3, 4, 4, device=device, dtype=dtype)
# assert that pad doesn't return a view into the input tensor
for mode in 'constant', 'reflect', 'replicate', 'circular':
out = F.pad(inputs, (0, 0, 0, 0), mode=mode)
out.fill_(4)
self.assertTrue(torch.all(torch.abs(inputs) < 2))
out = F.pad(inputs, (0, 0, -1, -1), mode=mode)
out.fill_(4)
self.assertTrue(torch.all(torch.abs(inputs) < 2))
@onlyNativeDeviceTypes
@dtypes(torch.float64, torch.complex128)
def test_ReplicationPad_empty(self, device, dtype):
for mod, inp in [
(torch.nn.ReplicationPad1d(3), torch.randn(0, 3, 10, device=device, dtype=dtype)),
(torch.nn.ReplicationPad2d(3), torch.randn(0, 3, 10, 10, device=device, dtype=dtype)),
(torch.nn.ReplicationPad3d(3), torch.randn(0, 3, 10, 10, 10, device=device, dtype=dtype))]:
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, 'Expected 2D or 3D'):
mod = torch.nn.ReplicationPad1d(2)
inp = torch.randn(3, 0, 10, device=device, dtype=dtype)
mod(inp)
with self.assertRaisesRegex(RuntimeError, 'Expected 3D or 4D'):
mod = torch.nn.ReplicationPad2d((2, 2, 2, 2))
inp = torch.randn(43, 0, 10, 10, device=device, dtype=dtype)
mod(inp)
with self.assertRaisesRegex(RuntimeError, 'Expected 4D or 5D'):
mod = torch.nn.ReplicationPad3d((2, 2, 2, 2, 2, 2))
inp = torch.randn(3, 0, 10, 10, 10, device=device, dtype=dtype)
mod(inp)
def test_ReplicationPad1d_large(self, device):
shapes = ([2, 65736, 4], [65736, 2, 4])
pl, pr = 3, 4
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
model = torch.nn.ReplicationPad1d((pl, pr))
# forward
out = model(x)
self.assertEqual(out[:, :, pl : -pr], x)
left_padding = out[:, :, : pl]
self.assertEqual(left_padding, x[:, :, :1].expand_as(left_padding))
right_padding = out[:, :, -pr :]
self.assertEqual(right_padding, x[:, :, -1:].expand_as(right_padding))
# backward
g = torch.randn_like(out)
out.backward(g)
self.assertEqual(x.grad[:, :, 1 : -1], g[:, :, pl + 1 : -pr - 1])
self.assertEqual(x.grad[:, :, 0], g[:, :, : pl + 1].sum(-1))
self.assertEqual(x.grad[:, :, -1], g[:, :, -pr - 1:].sum(-1))
def test_ReplicationPad2d_large(self, device):
shapes = ([2, 65736, 4, 4], [65736, 2, 4, 4])
pl, pr, pt, pb = 3, 4, 5, 6
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
model = torch.nn.ReplicationPad2d((pl, pr, pt, pb))
# forward center, edge
out = model(x)
self.assertEqual(out[:, :, pt : -pb, pl : -pr], x)
left_padding = out[:, :, pt : -pb, : pl]
self.assertEqual(left_padding, x[:, :, :, :1].expand_as(left_padding))
right_padding = out[:, :, pt : -pb, -pr :]
self.assertEqual(right_padding, x[:, :, :, -1:].expand_as(right_padding))
top_padding = out[:, :, : pt, pl : -pr]
self.assertEqual(top_padding, x[:, :, :1, :].expand_as(top_padding))
bottom_padding = out[:, :, -pb : , pl : -pr]
self.assertEqual(bottom_padding, x[:, :, -1:, :].expand_as(bottom_padding))
# forward corner
tl_padding = out[:, :, : pt + 1, : pl + 1]
self.assertEqual(tl_padding, x[:, :, :1, :1].expand_as(tl_padding))
tr_padding = out[:, :, : pt + 1, -pr - 1:]
self.assertEqual(tr_padding, x[:, :, :1, -1:].expand_as(tr_padding))
bl_padding = out[:, :, -pb - 1:, : pl + 1]
self.assertEqual(bl_padding, x[:, :, -1:, :1].expand_as(bl_padding))
br_padding = out[:, :, -pb - 1:, -pr - 1:]
self.assertEqual(br_padding, x[:, :, -1:, -1:].expand_as(br_padding))
# backward center, edge
g = torch.randn_like(out)
out.backward(g)
self.assertEqual(x.grad[:, :, 1:-1, 1:-1], g[:, :, pt + 1 : -pb - 1, pl + 1 : -pr - 1])
self.assertEqual(x.grad[:, :, 1:-1, 0], g[:, :, pt + 1 : -pb - 1, : pl + 1].sum(-1))
self.assertEqual(x.grad[:, :, 1:-1, -1], g[:, :, pt + 1 : -pb - 1, -pr - 1 :].sum(-1))
self.assertEqual(x.grad[:, :, 0, 1:-1], g[:, :, : pt + 1, pl + 1 : -pr - 1].sum(-2))
self.assertEqual(x.grad[:, :, -1, 1:-1], g[:, :, -pb - 1 :, pl + 1 : -pr - 1].sum(-2))
# backward corner
self.assertEqual(x.grad[:, :, 0, 0], g[:, :, : pt + 1, : pl + 1].sum((-2, -1)))
self.assertEqual(x.grad[:, :, 0, -1], g[:, :, : pt + 1, -pr - 1 :].sum((-2, -1)))
self.assertEqual(x.grad[:, :, -1, 0], g[:, :, -pb - 1 :, : pl + 1].sum((-2, -1)))
self.assertEqual(x.grad[:, :, -1, -1], g[:, :, -pb - 1 :, -pr - 1 :].sum((-2, -1)))
@largeTensorTest("6GB")
def test_ReplicationPad3d_large(self, device):
shapes = ([1, 65736, 2, 2, 2], [65736, 1, 2, 2, 2])
pl, pr, pt, pbt, pf, pbk = 3, 4, 5, 6, 7, 8
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
model = torch.nn.ReplicationPad3d((pl, pr, pt, pbt, pf, pbk))
# forward center
out = model(x)
self.assertEqual(out[:, :, pf : -pbk, pt : -pbt, pl : -pr], x)
# backward center
g = torch.randn_like(out)
out.backward(g)
self.assertEqual(x.grad[:, :, 1:-1, 1:-1, 1:-1], g[:, :, pf + 1 : -pbk - 1, pt + 1 : -pbt - 1, pl + 1 : -pr - 1])
@onlyNativeDeviceTypes
def test_Bilinear_empty(self, device):
mod = torch.nn.Bilinear(20, 30, 40).to(device)
inp1 = torch.randn(0, 10, 20, requires_grad=True, device=device)
inp2 = torch.randn(0, 10, 30, requires_grad=True, device=device)
output = mod(inp1, inp2)
output.sum().backward()
self.assertEqual(inp1, torch.zeros_like(inp1))
self.assertEqual(inp2, torch.zeros_like(inp2))
self.assertEqual(inp1.grad, torch.zeros_like(inp1))
self.assertEqual(inp2.grad, torch.zeros_like(inp2))
@expectedFailureMeta # RuntimeError: cannot reshape tensor of 0 elements into shape [1, 0, -1]
@onlyNativeDeviceTypes
def test_TransformerEncoderLayer_empty(self, device):
for training in (True, False):
for batch_first, input_shape in [(True, (0, 10, 512)),
(False, (10, 0, 512))]:
input = torch.rand(*input_shape, device=device)
encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)
if not training:
encoder_layer = encoder_layer.eval()
with torch.no_grad():
self._test_module_empty_input(encoder_layer, input, check_size=False, inference=True)
if batch_first and not TEST_WITH_CROSSREF:
with torch.no_grad():
# A NestedTensor with no tensors inside it doesn't have dim 3 (or dim
# 2, for that matter) so it can't hit the fast path, nor can we give a
# result.
with self.assertRaisesRegex(
AssertionError, 'MultiheadAttention does not support NestedTensor outside'):
nt = torch.nested_tensor([], device=device)
self._test_module_empty_input(encoder_layer, nt, check_size=False, inference=True)
nt = torch.nested_tensor([torch.rand(0, 512, device=device)], device=device)
self._test_module_empty_input(encoder_layer, nt, check_size=False, inference=True)
else:
self._test_module_empty_input(encoder_layer, input, check_size=False)
@expectedFailureMeta # RuntimeError: cannot reshape tensor of 0 elements into shape [1, 0, -1]
@onlyNativeDeviceTypes
def test_TransformerEncoder_empty(self, device):
for batch_first, input_shape in [(True, (0, 10, 512)),
(False, (10, 0, 512))]:
input = torch.rand(*input_shape, device=device)
encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)
transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6).to(device)
self._test_module_empty_input(transformer_encoder, input, check_size=False)
@expectedFailureMeta # RuntimeError: cannot reshape tensor of 0 elements into shape [1, 0, -1]
@onlyNativeDeviceTypes
def test_TransformerDecoderLayer_empty(self, device):
for batch_first, memory_shape, tgt_shape in [(True, (0, 10, 512), (0, 20, 512)),
(False, (10, 0, 512), (20, 0, 512))]:
memory = torch.rand(*memory_shape, device=device)
tgt = torch.rand(*tgt_shape, requires_grad=True, device=device)
decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)
self._test_module_empty_inputs(decoder_layer, [tgt, memory])
@expectedFailureMeta # RuntimeError: cannot reshape tensor of 0 elements into shape [1, 0, -1]
@onlyNativeDeviceTypes
def test_TransformerDecoder_empty(self, device):
for batch_first, memory_shape, tgt_shape in [(True, (0, 10, 512), (0, 20, 512)),
(False, (10, 0, 512), (20, 0, 512))]:
memory = torch.rand(*memory_shape, device=device)
tgt = torch.rand(*tgt_shape, requires_grad=True, device=device)
decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)
transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6).to(device)
self._test_module_empty_inputs(transformer_decoder, [tgt, memory])
@expectedFailureMeta # RuntimeError: cannot reshape tensor of 0 elements into shape [1, 0, -1]
@onlyNativeDeviceTypes
def test_Transformer_empty(self, device):
for batch_first, src_shape, tgt_shape in [(True, (10, 0, 512), (20, 0, 512))]:
transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12).to(device)
src = torch.rand(*src_shape, requires_grad=True, device=device)
tgt = torch.rand(*tgt_shape, requires_grad=True, device=device)
self._test_module_empty_inputs(transformer_model, [src, tgt])
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.complex64)
def test_ReflectionPad_empty(self, device, dtype):
for mod, inp in [
(torch.nn.ReflectionPad1d(2), torch.randn(0, 3, 10, device=device, dtype=dtype)),
(torch.nn.ReflectionPad2d(2), torch.randn(0, 3, 10, 10, device=device, dtype=dtype)),
(torch.nn.ReflectionPad3d(3), torch.randn(0, 3, 10, 10, 10, device=device, dtype=dtype))]:
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, '2D or 3D'):
mod = torch.nn.ReflectionPad1d(2)
inp = torch.randn(3, 0, 10, device=device, dtype=dtype)
mod(inp)
with self.assertRaisesRegex(RuntimeError, '3D or 4D'):
mod = torch.nn.ReflectionPad2d(2)
inp = torch.randn(3, 0, 10, 10, device=device, dtype=dtype)
mod(inp)
with self.assertRaisesRegex(RuntimeError, '4D or 5D'):
mod = torch.nn.ReflectionPad3d(3)
inp = torch.randn(3, 0, 10, 10, 10, device=device, dtype=dtype)
mod(inp)
@onlyCUDA # Test if CPU and GPU results match
def test_ReflectionPad2d_large(self, device):
shapes = ([2, 65736, 6, 6], [65736, 2, 6, 6])
pad = (1, 2, 3, 4)
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
ref_x = x.detach().cpu().requires_grad_()
out = F.pad(x, pad, mode='reflect')
ref_out = F.pad(ref_x, pad, mode='reflect')
self.assertEqual(out, ref_out)
g = torch.randn_like(out)
ref_g = g.cpu()
out.backward(g)
ref_out.backward(ref_g)
self.assertEqual(x.grad, ref_x.grad)
@onlyNativeDeviceTypes
def test_LocalResponseNorm_empty(self, device):
mod = torch.nn.LocalResponseNorm(2).to(device)
inp = torch.ones(0, 5, 24, 24, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
@onlyCUDA # Test if CPU and GPU results match
def test_ReflectionPad3d_large(self, device):
shapes = ([2, 1000, 7, 7, 7], [1000, 2, 7, 7, 7])
pad = (1, 2, 3, 4, 5, 6)
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
ref_x = x.detach().cpu().requires_grad_()
out = F.pad(x, pad, mode='reflect')
ref_out = F.pad(ref_x, pad, mode='reflect')
self.assertEqual(out, ref_out)
g = torch.randn_like(out)
ref_g = g.cpu()
out.backward(g)
ref_out.backward(ref_g)
self.assertEqual(x.grad, ref_x.grad)
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
def test_MarginLoss_empty(self, device, dtype):
for mod, x, y in [
(torch.nn.MultiMarginLoss().to(device),
torch.randn(0, 10, requires_grad=True, device=device, dtype=dtype),
torch.ones(0, device=device).type(torch.long)),
(torch.nn.MultiLabelMarginLoss().to(device),
torch.randn(0, 10, requires_grad=True, device=device, dtype=dtype),
torch.ones(0, 10, device=device).type(torch.long))]:
out = mod(x, y)
out.sum().backward()
self.assertEqual(x, torch.zeros_like(x))
self.assertEqual(x.grad, torch.zeros_like(x))
with self.assertRaisesRegex(RuntimeError, 'Expected'):
x = torch.randn(0, requires_grad=True, device=device, dtype=dtype)
y = torch.ones(10, device=device).type(torch.long)
mod(x, y)
with self.assertRaisesRegex(RuntimeError, 'Expected'):
x = torch.randn(10, 0, requires_grad=True, device=device, dtype=dtype)
y = torch.ones(10, 0, device=device).type(torch.long)
mod(x, y)
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
def test_adaptive_pooling_zero_batch(self, dtype, device):
inp = torch.ones(0, 10, dtype=dtype, device=device)
mod = torch.nn.AdaptiveAvgPool1d(5).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
inp = torch.ones(0, 10, 10, dtype=dtype, device=device)
mod = torch.nn.AdaptiveAvgPool2d((5, 5)).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
inp = torch.ones(0, 10, 10, 10, dtype=dtype, device=device)
mod = torch.nn.AdaptiveAvgPool3d((5, 5, 5)).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
@onlyNativeDeviceTypes
def test_FractionalMaxPool2d_zero_batch(self, device):
mod = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5))
inp = torch.ones(0, 16, 50, 32, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected input"):
inp = torch.randn(1, 0, 50, 32, device=device)
mod(inp)
@onlyNativeDeviceTypes
def test_FractionalMaxPool3d_zero_batch(self, device):
mod = nn.FractionalMaxPool3d(3, output_ratio=(0.5, 0.5, 0.5)).to(device)
inp = torch.ones(0, 16, 50, 32, 32, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected input"):
inp = torch.randn(1, 0, 50, 32, 32, device=device)
mod(inp)
@onlyNativeDeviceTypes
def test_FractionalMaxPool2d_zero_out_size(self, device):
mod = nn.FractionalMaxPool2d([2, 2], output_size=[0, 1])
inp = torch.rand([16, 50, 32, 32], device=device)
out = mod(inp)
self.assertEqual(out, torch.empty((16, 50, 0, 1), device=device))
@onlyNativeDeviceTypes
def test_FractionalMaxPool3d_zero_out_size(self, device):
mod = nn.FractionalMaxPool3d([3, 2, 2], output_size=[0, 1, 1])
inp = torch.rand([16, 50, 32, 32], device=device)
out = mod(inp)
self.assertEqual(out, torch.empty((16, 0, 1, 1), device=device))
@onlyNativeDeviceTypes
def test_Unfold_empty(self, device):
inp = torch.randn(0, 3, 3, 4, device=device)
unfold = torch.nn.Unfold(kernel_size=(2, 3)).to(device)
self._test_module_empty_input(unfold, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, 'Expected 3D or 4D'):
inp = torch.randn(3, 0, 3, 4, device=device)
unfold = torch.nn.Unfold(kernel_size=(2, 3)).to(device)
unfold(inp)
@onlyNativeDeviceTypes
def test_MaxPool_zero_batch_dim(self, device):
inp = torch.randn(0, 16, 50, device=device)
mod = torch.nn.MaxPool1d(3, stride=2).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
# 1D is supposed to be okay with 0 numel() inputs so dont test
# error raising for that case.
inp = torch.randn(0, 16, 50, 32, device=device)
mod = torch.nn.MaxPool2d(3, stride=2).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.randn(1, 0, 50, 32, device=device)
mod(inp)
inp = torch.ones(0, 16, 50, 44, 31, device=device)
mod = torch.nn.MaxPool3d(3, stride=2).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.ones(1, 0, 50, 44, 31, device=device)
mod(inp)
@onlyNativeDeviceTypes
def test_MaxUnpool_zero_batch_dim(self, device):
pool = torch.nn.MaxPool1d(2, stride=2, return_indices=True).to(device)
unpool = torch.nn.MaxUnpool1d(2, stride=2).to(device)
inp = torch.randn(0, 10, 10, requires_grad=True, device=device)
output, indices = pool(inp)
output.requires_grad_(True)
unpool_out = unpool(output, indices)
unpool_out.sum().backward()
self.assertEqual(inp.grad, torch.zeros_like(inp))
self.assertEqual(unpool_out, torch.zeros_like(unpool_out))
pool = torch.nn.MaxPool2d(2, stride=2, return_indices=True).to(device)
unpool = torch.nn.MaxUnpool2d(2, stride=2).to(device)
inp = torch.randn(0, 10, 10, 10, requires_grad=True, device=device)
output, indices = pool(inp)
unpool_out = unpool(output, indices)
unpool_out.sum().backward()
self.assertEqual(inp.grad, torch.zeros_like(inp))
self.assertEqual(unpool_out, torch.zeros_like(unpool_out))
pool = torch.nn.MaxPool3d(2, stride=2, return_indices=True).to(device)
unpool = torch.nn.MaxUnpool3d(2, stride=2).to(device)
inp = torch.randn(0, 10, 10, 10, 10, requires_grad=True, device=device)
output, indices = pool(inp)
output.requires_grad_(True)
unpool_out = unpool(output, indices)
unpool_out.sum().backward()
self.assertEqual(inp.grad, torch.zeros_like(inp))
self.assertEqual(unpool_out, torch.zeros_like(unpool_out))
@slowTest
@onlyNativeDeviceTypes
@skipCUDAIfRocm
@parametrize_test("module_name,module_size,output_size,test_index,should_error", [
subtest(('MaxUnpool2d', (2, 2), (1, 3, 4, 5), -1, True), name='case1'),
subtest(('MaxUnpool2d', (2, 2), (1, 3, 4, 5), 2 * 2 * 4 * 5, True), name='case2'),
subtest(('MaxUnpool2d', (2, 2), (1, 3, 4, 5), (2 * 2 * 4 * 5) - 1, False), name='case3'),
subtest(('MaxUnpool2d', (2, 3), (2, 1, 4, 2), 2 * 3 * 4 * 2, True), name='case4'),
subtest(('MaxUnpool2d', (2, 3), (2, 1, 4, 2), (2 * 3 * 4 * 2) - 1, False), name='case5'),
subtest(('MaxUnpool3d', (2, 2, 2), (1, 3, 4, 5), -1, True), name='case6'),
subtest(('MaxUnpool3d', (2, 2, 2), (1, 3, 4, 5), 2 * 2 * 2 * 3 * 4 * 5, True), name='case7'),
subtest(('MaxUnpool3d', (2, 2, 2), (1, 3, 4, 5), (2 * 2 * 2 * 3 * 4 * 5) - 1, False), name='case8'),
subtest(('MaxUnpool3d', (2, 2, 2), (2, 3, 4, 1), 2 * 2 * 2 * 3 * 4 * 1, True), name='case9'),
subtest(('MaxUnpool3d', (2, 2, 2), (2, 3, 4, 1), (2 * 2 * 2 * 3 * 4 * 1) - 1, False), name='case10'),
])
def test_MaxUnpool_index_errors(self, device, module_name, module_size, output_size, test_index, should_error):
# NOTE: CUDA tests need to be run in a subprocess because they cause device asserts
if torch.device(device).type == 'cuda':
error_msgs = {
'MaxUnpool2d': r'Assertion `maxind >= 0 && maxind < outputImageSize` failed',
'MaxUnpool3d': r'Assertion `index >= 0 && index < outputImageSize` failed'}
script = f'''
import torch
unpool = torch.nn.{module_name}({module_size}).to('{device}')
output = torch.rand({output_size}, dtype=torch.float32, device='{device}')
indices = torch.zeros({output_size}, dtype=torch.int64, device='{device}')
indices.flatten()[0] = {test_index}
unpool(output, indices)
torch.cuda.synchronize()
'''
p = subprocess.run(
[sys.executable, '-c', script],
cwd=os.path.dirname(os.path.realpath(__file__)),
capture_output=True,
text=True,
)
output = p.stdout + '\n' + p.stderr
error_msg = error_msgs[module_name]
if should_error:
self.assertIn(
error_msg,
output,
'The expected error was not found')
else:
self.assertNotIn(
'Error',
output,
'Should not have produced an error')
else:
module_class = getattr(torch.nn, module_name)
unpool = module_class(module_size).to(device)
output = torch.rand(output_size, dtype=torch.float32, device=device)
indices = torch.zeros(output_size, dtype=torch.int64, device=device)
indices.flatten()[0] = test_index
if should_error:
with self.assertRaisesRegex(RuntimeError, r'Found an invalid max index:'):
unpool(output, indices)
else:
unpool(output, indices)
@onlyNativeDeviceTypes
def test_AdaptiveMaxPool_zero_batch_dim(self, device):
inp = torch.randn(0, 16, 50, device=device)
mod = torch.nn.AdaptiveMaxPool1d(3).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.randn(1, 0, 50, device=device)
mod(inp)
inp = torch.randn(0, 16, 50, 32, device=device)
mod = torch.nn.AdaptiveMaxPool2d(3).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.randn(1, 0, 50, 32, device=device)
mod(inp)
inp = torch.ones(0, 16, 50, 44, 31, device=device)
mod = torch.nn.AdaptiveMaxPool3d(3).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.ones(1, 0, 50, 44, 31, device=device)
mod(inp)
@onlyCUDA
@dtypes(torch.float, torch.double)
@tf32_on_and_off(0.005)
def test_rnn_fused(self, device, dtype):
def copy_rnn(rnn1, rnn2):
for x_layer, y_layer in zip(rnn1.all_weights, rnn2.all_weights):
for x, y in zip(x_layer, y_layer):
x.data.copy_(y.data)
def check_rnn_grads(rnn1, rnn2):
for x_layer, y_layer in zip(rnn1.all_weights, rnn2.all_weights):
for x, y in zip(x_layer, y_layer):
self.assertEqual(x.grad, y.grad, atol=5e-5, rtol=0)
input_size = 10
hidden_size = 6
num_layers = 2
seq_length = 7
batch = 6
input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)
grad_output = torch.randn(seq_length, batch, hidden_size, dtype=dtype)
hx_val = torch.randn(num_layers, batch, hidden_size, dtype=dtype)
grad_hy = torch.randn(num_layers, batch, hidden_size, dtype=dtype)
with torch.backends.cudnn.flags(enabled=False, allow_tf32=None):
for module in (nn.GRU, nn.LSTM):
for bias in (True, False):
rnn = module(input_size, hidden_size, num_layers, bias=bias).to(dtype)
rnn_device = module(input_size, hidden_size, num_layers, bias=bias).to(device, dtype)
copy_rnn(rnn, rnn_device)
is_lstm = isinstance(rnn, nn.LSTM)
if is_lstm:
hx = (hx_val.clone().requires_grad_(True),
hx_val.clone().add(1).requires_grad_(True))
hx_device = (hx_val.clone().to(device).requires_grad_(True),
hx_val.clone().to(device).add(1).requires_grad_(True))
else:
hx = hx_val.clone().requires_grad_(True)
hx_device = hx_val.clone().to(device).requires_grad_(True)
inp = input_val.clone().requires_grad_(True)
inp_cu = input_val.clone().to(device).requires_grad_(True)
output1, hy1 = rnn(inp, hx)
output2, hy2 = rnn_device(inp_cu, hx_device)
if is_lstm:
torch.autograd.backward(
[output1, hy1[0], hy1[1]], [grad_output, grad_hy, grad_hy + 1]
)
torch.autograd.backward(
[output2, hy2[0], hy2[1]],
[grad_output.to(device), grad_hy.to(device), (grad_hy + 1).to(device)]
)
else:
torch.autograd.backward([output1, hy1], [grad_output, grad_hy])
torch.autograd.backward([output2, hy2], [grad_output.to(device), grad_hy.to(device)])
self.assertEqual(output1, output2)
self.assertEqual(hy1, hy2)
check_rnn_grads(rnn, rnn_device)
self.assertEqual(inp.grad, inp_cu.grad)
if is_lstm:
self.assertEqual(hx[0].grad, hx_device[0].grad)
self.assertEqual(hx[1].grad, hx_device[1].grad)
else:
self.assertEqual(hx.grad, hx_device.grad)
def test_BatchNorm_empty(self, device):
mod = torch.nn.BatchNorm2d(3).to(device)
inp = torch.randn(0, 3, 2, 2, device=device)
self._test_module_empty_input(mod, inp)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp)
self.assertEqual(mod.running_mean, torch.tensor([0., 0, 0], device=device))
self.assertEqual(mod.running_var, torch.tensor([1., 1, 1], device=device))
self.assertEqual(mod.weight.grad, torch.tensor([0., 0, 0], device=device))
self.assertEqual(mod.bias.grad, torch.tensor([0., 0, 0], device=device))
@dtypes(torch.float, torch.cfloat)
def test_conv_empty_channel(self, device, dtype):
in_channels = 0
mod = torch.nn.Conv1d(in_channels, 8, 2, stride=2, dtype=dtype).to(device)
inp = torch.randn(2, 0, 15, device=device, dtype=dtype)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Given groups=1, weight"):
inp = torch.randn(2, 1, 0, device=device)
mod(inp)
mod = torch.nn.Conv2d(in_channels, 33, 3, stride=2, dtype=dtype).to(device)
inp = torch.randn(2, 0, 50, 100, device=device, dtype=dtype)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Given groups=1, weight"):
inp = torch.randn(2, 1, 40, 0, device=device)
mod(inp)
mod = torch.nn.Conv3d(in_channels, 33, 3, stride=2, dtype=dtype).to(device)
inp = torch.randn(2, 0, 50, 20, 40, device=device, dtype=dtype)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Given groups=1, weight"):
inp = torch.randn(2, 1, 50, 0, 40, device=device)
mod(inp)
def test_group_conv_empty(self, device):
mod = torch.nn.Conv2d(4, 4, stride=2, kernel_size=3, padding=1, groups=4).to(device)
inp = torch.randn(0, 4, 4, 4, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp, check_size=False)
def test_group_convTranspose_empty(self, device):
mod = torch.nn.ConvTranspose2d(4, 4, stride=2, kernel_size=3, padding=1, groups=4).to(device)
inp = torch.randn(0, 4, 4, 4, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp, check_size=False)
def test_convTranspose_empty(self, device):
mod = torch.nn.ConvTranspose2d(4, 4, stride=2, kernel_size=3, padding=1).to(device)
inp = torch.randn(0, 4, 4, 4, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp, check_size=False)
@onlyNativeDeviceTypes
def test_AvgPool2d_empty(self, device):
avgpool = torch.nn.AvgPool2d(3, stride=2).to(device)
inp = torch.randn(0, 16, 20, 32, device=device)
self._test_module_empty_input(avgpool, inp, check_size=False)
clast_inp = torch.randn(0, 16, 20, 32, device=device).contiguous(memory_format=torch.channels_last)
self._test_module_empty_input(avgpool, clast_inp, check_size=False)
# test with empty non-batch input
with self.assertRaisesRegex(RuntimeError, '3D or 4D'):
inp = torch.randn(16, 0, 20, 32, device=device)
avgpool(inp)
@onlyCUDA
@largeTensorTest('16GB')
def test_prelu_backward_32bit_indexing(self, device):
m = torch.nn.PReLU().cuda().half()
input_ = torch.ones((1024, 1024, 1024, 2), dtype=torch.half, device=device)
output = m(input_)
output.backward(input_)
def test_linear_empty(self, device):
mod = torch.nn.Linear(7, 7).to(device)
inp = torch.randn(0, 7, device=device)
self._test_module_empty_input(mod, inp)
def test_one_hot(self, device):
if self.device_type != 'cuda': # cuda throws device assert for invalid data
with self.assertRaises(RuntimeError):
torch.nn.functional.one_hot(torch.tensor([3, 4, -1, 0], device=device), -1)
with self.assertRaises(RuntimeError):
torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), 3)
t = torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device))
expected = torch.tensor([[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), -1)
expected = torch.tensor([[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), 6)
expected = torch.tensor([[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0]], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.tensor([[3, 4], [1, 0]], device=device))
expected = torch.tensor([[[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]],
[[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]]], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.tensor(4, device=device))
expected = torch.tensor([0, 0, 0, 0, 1], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.empty([4, 0], dtype=torch.long, device=device), 100)
expected = torch.empty([4, 0, 100], dtype=torch.long)
self.assertEqual(t, expected)
with self.assertRaises(RuntimeError):
torch.nn.functional.one_hot(torch.empty([4, 0], dtype=torch.long, device=device))
with self.assertRaises(RuntimeError):
torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), -2)
def test_nn_empty(self, device):
# One off tests to ensure scalars from nn.yaml are properly applied
def verify_scalars(input, output):
self.assertEqual(input.shape, output.shape)
self.assertEqual(0, output.numel())
for input_shape in [(0), (0, 2)]:
for module in [torch.nn.ELU, torch.nn.Hardtanh, torch.nn.LeakyReLU, torch.nn.LogSigmoid,
torch.nn.RReLU, torch.nn.Softshrink, torch.nn.Softplus, torch.nn.Sigmoid,
torch.nn.Tanh]:
input = torch.randn(input_shape, device=device, requires_grad=True)
m = module()
output = m(input)
verify_scalars(input, output)
def test_nn_scalars(self, device):
# One off tests to ensure scalars from nn.yaml are properly applied
def verify_scalars(input, output):
if input.dim() == 0:
self.assertEqual((), output.shape)
else:
self.assertNotEqual((), output.shape)
output.sum().backward()
self.assertEqual(input.shape, input.grad.shape)
for input_shape in [(5, 6), ()]:
for module in [torch.nn.ELU, torch.nn.Hardtanh, torch.nn.LeakyReLU, torch.nn.LogSigmoid,
torch.nn.RReLU, torch.nn.Softshrink, torch.nn.Softplus, torch.nn.Sigmoid,
torch.nn.Tanh]:
input = torch.randn(input_shape, device=device, requires_grad=True)
m = module()
output = m(input)
verify_scalars(input, output)
def test_nn_scalars_reductions(self, device):
# One off tests to ensure scalars from nn.yaml are properly applied
def verify_reduction_scalars(input, reduction, output):
if reduction != 'none' or input.dim() == 0:
self.assertEqual((), output.shape)
else:
self.assertNotEqual((), output.shape)
output.sum().backward()
self.assertEqual(input.shape, input.grad.shape)
for input_shape in [(5, 6), ()]:
for reduction in ['none', 'mean', 'sum']:
for module in [torch.nn.BCELoss, torch.nn.L1Loss, torch.nn.MSELoss,
torch.nn.SmoothL1Loss, torch.nn.SoftMarginLoss]:
input = torch.randn(input_shape, device=device, requires_grad=True)
target = torch.empty(input_shape, device=device).random_(2)
sigmoid = nn.Sigmoid()
input = torch.randn(input_shape, device=device, requires_grad=True)
m = module(reduction=reduction)
output = m(sigmoid(input), target)
verify_reduction_scalars(input, reduction, output)
# verify that bogus reduction strings are errors
@onlyNativeDeviceTypes
def test_invalid_reduction_strings(self, device):
input = torch.randn(3, 5, requires_grad=True, device=device)
cinput = torch.randn(3, 5, requires_grad=True, device=device, dtype=torch.cfloat)
target = torch.tensor([1, 0, 4], device=device)
var = torch.ones(size=input.size(), requires_grad=True, device=device)
for reduction in ['none', 'invalid']:
def v(fn):
if reduction == 'invalid':
self.assertRaises(ValueError, lambda: fn())
else:
fn()
v(lambda: F.nll_loss(input, target, reduction=reduction))
v(lambda: F.cross_entropy(input, target, reduction=reduction))
v(lambda: F.multi_margin_loss(input, target, reduction=reduction))
v(lambda: F.kl_div(input, input, reduction=reduction))
v(lambda: F.huber_loss(input, input, reduction=reduction))
v(lambda: F.smooth_l1_loss(input, input, reduction=reduction))
v(lambda: F.l1_loss(input, input, reduction=reduction))
v(lambda: F.l1_loss(cinput, cinput, reduction=reduction))
v(lambda: F.mse_loss(input, input, reduction=reduction))
v(lambda: F.hinge_embedding_loss(input, input, reduction=reduction))
v(lambda: F.poisson_nll_loss(input, input, reduction=reduction))
v(lambda: F.gaussian_nll_loss(input, input, var, reduction=reduction))
v(lambda: F.binary_cross_entropy(torch.sigmoid(input), input, reduction=reduction))
v(lambda: F.binary_cross_entropy_with_logits(input, input, reduction=reduction))
zeros = torch.zeros_like(input).to(torch.int64)
v(lambda: F.multilabel_soft_margin_loss(input, zeros, reduction=reduction))
v(lambda: F.multilabel_margin_loss(input, zeros, reduction=reduction))
v(lambda: F.triplet_margin_loss(input, input, input, reduction=reduction))
v(lambda: F.triplet_margin_with_distance_loss(input, input, input, reduction=reduction))
v(lambda: F.margin_ranking_loss(input, input, input.sign(), reduction=reduction))
v(lambda: F.cosine_embedding_loss(input, input, input[:, 0].sign(), reduction=reduction))
log_probs = torch.randn(50, 16, 20, requires_grad=True, device=device).log_softmax(2)
targets = torch.randint(1, 20, (16, 30), dtype=torch.long, device=device)
input_lengths = torch.full((16,), 50, dtype=torch.long, device=device)
target_lengths = torch.randint(10, 30, (16,), dtype=torch.long, device=device)
v(lambda: F.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction=reduction))
# FIXME: should we allow derivatives on these?
v(lambda: F.soft_margin_loss(input, input.sign().detach(), reduction=reduction))
@onlyNativeDeviceTypes
def test_smooth_l1_loss_vs_huber_loss(self, device):
def _make_test_tensor(shape, contiguous=True):
if contiguous:
test_tensor = torch.randn(shape, device=device)
else:
# Select every other element in the innermost dimension to
# make it non-contiguous.
doubled_shape = list(shape)
doubled_shape[-1] *= 2
test_tensor = torch.randn(doubled_shape, device=device)
test_tensor = test_tensor[..., ::2]
return test_tensor
def _test_smooth_l1_loss_vs_huber_loss_helper(input, target, beta, require_equal):
for reduction in ['mean', 'sum', 'none']:
smooth_l1 = torch.nn.SmoothL1Loss(beta=beta, reduction=reduction)
# beta hyper-parameter is called delta for Huber
huber = torch.nn.HuberLoss(delta=beta, reduction=reduction)
smooth_l1_loss = smooth_l1(input, target)
huber_loss = huber(input, target)
if require_equal:
self.assertEqual(smooth_l1_loss, huber_loss)
else:
# Huber loss should be larger than smooth L1 loss by a factor of beta.
self.assertEqual(smooth_l1_loss * beta, huber_loss)
def _test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta, require_equal):
# Test the non-vectorized case.
shape = (2, 2)
_test_smooth_l1_loss_vs_huber_loss_helper(input=_make_test_tensor(shape),
target=_make_test_tensor(shape),
beta=beta,
require_equal=require_equal)
# Test the vectorized case (innermost dim > 32).
shape = (64, 64)
_test_smooth_l1_loss_vs_huber_loss_helper(input=_make_test_tensor(shape),
target=_make_test_tensor(shape),
beta=beta,
require_equal=require_equal)
# Test the non-contiguous case.
_test_smooth_l1_loss_vs_huber_loss_helper(input=_make_test_tensor(shape, contiguous=False),
target=_make_test_tensor(shape, contiguous=False),
beta=beta,
require_equal=require_equal)
def test_equal_when_beta_is_one():
_test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta=1.0, require_equal=True)
def test_unequal_when_beta_is_less_than_one():
_test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta=0.5, require_equal=False)
def test_unequal_when_beta_is_greater_than_one():
_test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta=1.5, require_equal=False)
test_equal_when_beta_is_one()
test_unequal_when_beta_is_less_than_one()
test_unequal_when_beta_is_greater_than_one()
@onlyCPU
def test_smooth_l1_loss_bfloat16(self, device):
def test_dtype(fn, input, target, dtype):
input = input.detach().clone().to(dtype=dtype).requires_grad_(True)
input2 = input.detach().clone().float().requires_grad_(True)
target = target.detach().clone().to(dtype=dtype)
target2 = target.detach().clone().float()
out = fn(input, target)
out.sum().backward()
out2 = fn(input2, target2)
out2.sum().backward()
self.assertEqual(out.dtype, dtype)
self.assertEqual(input.grad.dtype, dtype)
self.assertEqual(out, out2, exact_dtype=False)
self.assertEqual(input.grad, input2.grad, exact_dtype=False)
def func(device):
return nn.SmoothL1Loss().to(device=device)
shapes = [[1, 3, 1, 6], [1, 3, 1, 128], [1, 3, 128, 128]]
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
t = torch.randn(shape, device=device)
test_dtype(func(device), x, t, torch.bfloat16)
# We don't want to make propagating NaN a hard requirement on ops, but for
# these easy ones, we should make them do so.
def test_nonlinearity_propagate_nan(self, device):
def test(nonlinearity, *args, **kwargs):
x = torch.tensor([nan], device=device)
fn = getattr(F, nonlinearity)
try:
self.assertTrue(math.isnan(fn(x, *args, **kwargs).item()))
except Exception as e:
if 'not implemented' not in str(e):
raise
test('relu')
test('relu', inplace=True)
test('relu6')
test('elu')
test('selu')
test('celu')
test('rrelu')
test('rrelu', inplace=True)
test('hardtanh')
test('tanh')
test('sigmoid')
test('logsigmoid')
test('hardshrink')
test('tanhshrink')
test('softsign')
test('softmin', 0)
test('softmax', 0)
test('log_softmax', 0)
test('leaky_relu', 0.2)
test('threshold', 3, 2)
test('threshold', 3, 2, inplace=True)
def test_pooling_shape(self, device):
''' Test the output shape calculation for pooling functions '''
# Checks output shape against expected for 1D, 2D and 3D
def check(expected_out_shape, sizes, *args, **kwargs):
for kernel in ['max', 'avg']:
for i in [1, 2, 3]:
if hasattr(torch.nn.functional, f'{kernel}_pool{i}d'):
op = getattr(torch.nn.functional, f'{kernel}_pool{i}d')
t = torch.randn(sizes[:i + 2], device=device)
self.assertEqual(op(t, *args, **kwargs).shape, expected_out_shape[:i + 2])
check((1, 1, 3, 3, 4), (1, 1, 5, 6, 7), kernel_size=1, stride=2, padding=0, ceil_mode=True)
check((1, 1, 2, 3, 3), (1, 1, 3, 4, 5), kernel_size=2, stride=2, padding=1, ceil_mode=False)
check((1, 1, 2, 3, 3), (1, 1, 3, 4, 5), kernel_size=2, stride=2, padding=1, ceil_mode=True)
# Test case from issue https://github.com/pytorch/pytorch/issues/45357
x = torch.randn(1, 1, 6, 7, device=device)
y = torch.nn.functional.max_pool2d(x, 1, stride=(2, 2), padding=0, ceil_mode=True)
self.assertEqual(y.size(), (1, 1, 3, 4))
@onlyNativeDeviceTypes # TODO: fix on XLA
def test_adaptive_avg_pool2d_output_size_one(self, device):
def helper(size, memory_format):
x = torch.randint(1, 10, size, dtype=torch.float, device=device, requires_grad=True)
if memory_format == 'non_contiguous':
x = x[::2, ::2, ::2, ::2]
else:
x = x.to(memory_format=memory_format)
net = torch.nn.AdaptiveAvgPool2d((1, 1))
out = net(x)
ref_out = x.contiguous().mean((-1, -2)).view((x.size(0), x.size(1), 1, 1))
out.sum().backward() # make sure it doesn't crash
self.assertEqual(out, ref_out)
if memory_format == torch.channels_last:
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
c = out.size(1)
self.assertEqual(out.stride(), [c, 1, c, c])
else:
self.assertTrue(out.is_contiguous())
c = out.size(1)
self.assertEqual(out.stride(), [c, 1, 1, 1])
for mf in (torch.contiguous_format, torch.channels_last, 'non_contiguous'):
helper((2, 3, 6, 6), mf)
@onlyNativeDeviceTypes
def test_adaptive_avg_pool3d_output_size_one(self, device):
x = torch.randn((2, 3, 6, 6, 6) , dtype=torch.float, device=device, requires_grad=True)
net = torch.nn.AdaptiveAvgPool3d(1)
out = net(x)
ref_out = x.contiguous().mean((-1, -2, -3)).view(out.shape)
out.sum().backward() # make sure it doesn't crash
self.assertEqual(out, ref_out)
self.assertTrue(out.is_contiguous())
c = out.size(1)
self.assertEqual(out.stride(), [c, 1, 1, 1, 1])
@expectedFailureMeta # Runtime Error not raised for meta
@onlyNativeDeviceTypes
@dtypes(torch.uint8, torch.int8, torch.short, torch.int, torch.long)
def test_adaptive_pooling_no_suppot_input(self, device, dtype):
for numel in (2, 3):
for pool_type in ('Max', 'Avg'):
cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)
module_cls = getattr(nn, cls_name)
output_size = (2,) * numel
module = module_cls(output_size)
input = torch.randn((4,) * (numel + 1), device=device).to(dtype)
with self.assertRaisesRegex(RuntimeError, "not implemented"):
output = module(input)
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
def test_avg_pool2d_nhwc(self, device, dtype):
def helper(n, c, h, w, kernel_size, stride=None,
count_include_pad=True, divisor_override=None, padding=0):
if stride is None:
stride = kernel_size
input = torch.randn(n, c, h, w, dtype=dtype, device=device)
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randn(n, c, (h - kernel_size) // stride + 1, (w - kernel_size) // stride + 1,
dtype=dtype, device=device)
pool = torch.nn.AvgPool2d(kernel_size, stride=stride, count_include_pad=count_include_pad,
divisor_override=divisor_override).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AvgPool2d(kernel_size, stride=stride, count_include_pad=count_include_pad,
divisor_override=divisor_override).to(device)
out = pool(input)
out.backward(grad)
ref_out = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
helper(4, 8, 8, 8, 3)
helper(4, 8, 8, 8, 3, count_include_pad=False, padding=1)
helper(4, 8, 8, 8, 3, count_include_pad=False, padding=2, stride=2)
helper(4, 8, 8, 8, 3, divisor_override=42)
helper(4, 8, 8, 8, 7)
# ROCm 16GB MI25 hits OOM error. Clear caching allocator prior to running large subtest.
if TEST_WITH_ROCM and 'cuda' in device:
torch.cuda.empty_cache()
helper(200, 512, 28, 28, 2)
helper(4, 8, 7, 7, 3, stride=1)
helper(4, 8, 7, 7, 3, padding=2, stride=1)
helper(10, 512, 31, 31, 3, stride=2)
helper(1, 129, 8, 8, 3, stride=2)
@onlyCPU
@dtypes(torch.float)
def test_max_pool1d_errors(self, device, dtype):
def check(x, args, message):
model = torch.nn.MaxPool1d(*args)
with self.assertRaisesRegex(RuntimeError, r'max_pool1d\(\) ' + message):
model(torch.tensor(x, device=device, dtype=dtype))
# Pooling args: (kernel_size, stride, padding, dilation, return_indices, ceil_mode)
check(0, (1,), "Expected 2D or 3D input tensor, but got")
check([], (1,), "Expected 2D or 3D input tensor, but got")
check([[]], (1, 0), "stride must be greater than zero, but got 0")
check([[]], (1, 1, -1), "padding must be non-negative, but got -1")
check([[]], (1, 1, 2), "padding should be at most half of kernel size, but got padding=2 and kernel_size=1")
check([[]], (1, 1, 0, 0), "dilation must be greater than zero, but got 0")
check([[]], (5, 1, 0, 1), "Invalid computed output size: -4")
@onlyCPU
@dtypes(torch.float, torch.double)
def test_max_pool1d_corner_cases(self, device, dtype):
def check(x, args, expected):
model = torch.nn.MaxPool1d(*args)
if isinstance(x, list):
x = torch.tensor(x, device=device, dtype=dtype)
expected = torch.tensor(expected, device=device, dtype=dtype)
self.assertEqual(model(x), expected)
# Pooling args: (kernel_size, stride, padding, dilation, return_indices, ceil_mode)
check([[]], (1, None, 0, 1, False, False), [[]])
check([[[]]], (1, None, 0, 1, False, False), [[[]]])
check([[[]]], (2, 1, 1, 2, False, True), [[[]]])
check([[1]], (1, None, 0, 1, False, False), [[1]])
check([[1]], (2, None, 1, 2, False, False), [[float('-inf')]])
check([[1], [1]], (2, None, 1, 2, False, False), [[float('-inf')], [float('-inf')]])
check([[1, 2]], (2, 1, 1, 2, False, False), [[2, 1]])
check([[1, 2]], (2, 2, 1, 2, False, True), [[2, 2]])
empty_tensor = torch.empty((2, 0, 1), device=device, dtype=dtype)
check(empty_tensor, (1, None, 0, 1, False, False), empty_tensor)
@onlyCPU
@dtypes(torch.float, torch.double)
def test_max_pool1d(self, device, dtype):
# FIXME For now compare against max_pool1d with indices
def check(x, *args, **kwargs):
model = torch.nn.MaxPool1d(*args, **kwargs)
ref_model = torch.nn.MaxPool1d(*args, **kwargs, return_indices=True)
self.assertEqual(model(x), ref_model(x)[0])
sizes = [random.sample(range(8, 128), 3) for _ in range(3)]
kernel_sizes = random.sample(range(1, 5), 3)
strides = random.sample(range(1, 5), 3)
dilations = random.sample(range(1, 5), 3)
ceil_modes = [True, False]
for size, kernel_size, stride, dilation, ceil_mode in \
itertools.product(sizes, kernel_sizes, strides, dilations, ceil_modes):
padding = random.sample(range(0, math.floor(kernel_size / 2) + 1), 1)
check(torch.randn(size, device=device, dtype=dtype),
kernel_size, stride, padding, dilation, ceil_mode=ceil_mode)
# Non-contiguous test
tensor = torch.randn(5, 151, 33, device=device, dtype=dtype)[::2, ::3, ::2]
check(tensor, 3, 2, 1, 2, ceil_mode=True)
check(tensor.transpose(1, 2), 3, 2, 1, 2, ceil_mode=True)
@onlyCUDA
def test_max_pool2d(self, device):
def helper(n, c, h, w, ks):
x = torch.randn(n, c, h, w, device='cuda', dtype=torch.float, requires_grad=True)
ref_x = x.detach().clone().cpu().requires_grad_()
pool = torch.nn.MaxPool2d(kernel_size=ks)
y = pool(x)
ref_y = pool(ref_x)
y.sum().backward()
ref_y.sum().backward()
self.assertEqual(y, ref_y)
self.assertEqual(x.grad, ref_x.grad)
helper(2, 8, 4, 4, ks=2)
helper(1, 100000, 32, 32, ks=4)
helper(1, 100000, 1, 4, ks=(1, 4)) # test for max_pool1d
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
def test_max_pool2d_nhwc(self, device, dtype):
def helper(n, c, h, w, kernel_size, stride=None):
if stride is None:
stride = kernel_size
input = torch.randn(n, c, h, w, dtype=dtype, device=device)
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randn(n, c, (h - kernel_size) // stride + 1, (w - kernel_size) // stride + 1,
dtype=dtype, device=device)
pool = torch.nn.MaxPool2d(kernel_size, stride, return_indices=True).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.MaxPool2d(kernel_size, stride, return_indices=True).to(device)
out, ind = pool(input)
out.backward(grad)
ref_out, ref_ind = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ind.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_ind.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(ind, ref_ind)
self.assertEqual(input.grad, ref_input.grad)
helper(4, 8, 8, 8, 7)
helper(200, 512, 28, 28, 2)
helper(4, 8, 7, 7, 3, stride=1)
helper(10, 512, 31, 31, 3, stride=2)
helper(1, 129, 8, 8, 3, stride=2)
@onlyNativeDeviceTypes
@dtypes(torch.half, torch.float, torch.double)
@onlyCUDA
def test_max_pool3d_ndhwc(self, device, dtype):
def helper(n, c, h, w, d, kernel_size, stride=None):
batch = n
if not batch:
batch = 1
input = torch.randn(batch, c, d, h, w, dtype=dtype, device=device)
input = input.contiguous(memory_format=torch.channels_last_3d).requires_grad_()
if not n:
input = input.squeeze(0).detach().clone().requires_grad_()
if isinstance(kernel_size, int):
kernel_size = [kernel_size] * 3
if stride is None:
stride = kernel_size
elif isinstance(stride, int):
stride = [stride] * 3
grad = torch.randn(batch, c,
(d - kernel_size[0]) // stride[0] + 1,
(h - kernel_size[1]) // stride[1] + 1,
(w - kernel_size[2]) // stride[2] + 1,
dtype=dtype, device=device)
grad = grad.contiguous(memory_format=torch.channels_last_3d)
if not n:
grad = grad.squeeze(0)
pool = torch.nn.MaxPool3d(kernel_size, stride, return_indices=True).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.MaxPool3d(kernel_size, stride, return_indices=True).to(device)
out, ind = pool(input)
out.backward(grad)
ref_out, ref_ind = ref_pool(ref_input)
ref_out.backward(ref_grad)
if len(out.shape) == 4:
self.assertTrue(out.unsqueeze(0).is_contiguous(memory_format=torch.channels_last_3d))
else:
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(ref_out.is_contiguous())
if len(ind.shape) == 4:
self.assertTrue(ind.unsqueeze(0).is_contiguous(memory_format=torch.channels_last_3d))
else:
self.assertTrue(ind.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(ref_ind.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(ind, ref_ind)
if dtype == torch.half:
self.assertEqual(input.grad, ref_input.grad, atol=0.05, rtol=0.01)
else:
self.assertEqual(input.grad, ref_input.grad)
helper(4, 8, 8, 8, 8, 7)
helper(4, 8, 8, 8, 8, (5, 6, 7))
helper(1, 8, 8, 8, 8, (5, 6, 7))
helper(0, 6, 12, 13, 14, (5, 6, 7))
helper(4, 8, 7, 7, 7, 3, stride=1)
helper(10, 128, 19, 19, 19, 3, stride=2)
helper(10, 128, 19, 19, 19, (1, 2, 3), stride=2)
helper(1, 128, 19, 19, 19, (1, 2, 3), stride=2)
helper(0, 128, 19, 19, 19, (1, 2, 3), stride=2)
helper(1, 79, 4, 4, 4, 3, stride=2)
helper(0, 79, 4, 4, 4, 3, stride=2)
@onlyCPU
def test_max_pool2d_bfloat16(self, device):
def helper(n, c, h, w, kernel_size, stride, memory_format):
input = torch.randn(n, c, h, w, dtype=torch.float32, device=device).bfloat16()
input = input.to(memory_format=memory_format).requires_grad_()
pool = torch.nn.MaxPool2d(kernel_size, stride, return_indices=True).to(device)
input2 = input.detach().clone().float().requires_grad_(True)
out, ind = pool(input)
out.sum().backward()
out2, ind2 = pool(input2)
out2.sum().backward()
self.assertTrue(out.is_contiguous(memory_format=memory_format))
self.assertEqual(out.dtype, torch.bfloat16)
self.assertEqual(input.grad.dtype, torch.bfloat16)
self.assertEqual(out, out2.bfloat16())
self.assertEqual(ind, ind2)
self.assertEqual(input.grad, input2.grad.bfloat16())
helper(4, 30, 8, 8, 7, 1, torch.contiguous_format)
helper(4, 65, 8, 8, 7, 1, torch.channels_last)
helper(1, 19, 20, 10, 8, 2, torch.contiguous_format)
helper(1, 19, 20, 10, 8, 2, torch.channels_last)
@onlyCUDA
def test_max_pool2d_indices(self, device):
def helper(n, c, h, w, ks):
if n is None:
x = torch.randn(c, h, w, device='cuda', dtype=torch.float, requires_grad=True)
else:
x = torch.randn(n, c, h, w, device='cuda', dtype=torch.float, requires_grad=True)
ref_x = x.detach().clone().cpu().requires_grad_()
pool = torch.nn.MaxPool2d(kernel_size=ks, return_indices=True)
y, idx = pool(x)
ref_y, ref_idx = pool(ref_x)
y.sum().backward()
ref_y.sum().backward()
self.assertEqual(y, ref_y)
self.assertEqual(idx, ref_idx) # assertEqual implicitly compares shape for tensors
self.assertEqual(x.grad, ref_x.grad)
helper(2, 8, 4, 4, ks=2)
helper(None, 3, 50, 50, ks=5)
@onlyCPU
def test_avg_pool2d_bfloat16(self, device):
def helper(n, c, h, w, kernel_size, stride, memory_format):
input = torch.randn(n, c, h, w, dtype=torch.float32, device=device).bfloat16()
input = input.to(memory_format=memory_format).requires_grad_()
pool = torch.nn.AvgPool2d(kernel_size, stride).to(device)
input2 = input.detach().clone().float().requires_grad_(True)
out = pool(input)
out.sum().backward()
out2 = pool(input2)
out2.sum().backward()
self.assertTrue(out.is_contiguous(memory_format=memory_format))
self.assertEqual(out.dtype, torch.bfloat16)
self.assertEqual(input.grad.dtype, torch.bfloat16)
self.assertEqual(out, out2.bfloat16())
self.assertEqual(input.grad, input2.grad.bfloat16())
helper(4, 30, 8, 8, 7, 1, torch.contiguous_format)
helper(4, 65, 8, 8, 7, 1, torch.channels_last)
helper(1, 19, 20, 10, 8, 2, torch.contiguous_format)
helper(1, 19, 20, 10, 8, 2, torch.channels_last)
def test_upsamplingNearest1d(self, device):
# Forward AD does not support XLA because XLA tensors don't have storage
check_forward_ad = torch.device(device).type != 'xla'
def helper(mode):
m = nn.Upsample(size=4, mode=mode)
in_t = torch.ones(1, 1, 2, device=device)
in_uint8_t = torch.ones(1, 1, 2, dtype=torch.uint8, device=device)
with warnings.catch_warnings(record=True) as w:
out_t = m(in_t)
out_uint8_t = m(in_uint8_t)
self.assertEqual(torch.ones(1, 1, 4, device=device), out_t.data)
self.assertEqual(torch.ones(1, 1, 4, dtype=torch.uint8, device=device), out_uint8_t.data)
# Checks upsampling
input = torch.randn(1, 1, 2, requires_grad=True, device=device)
gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_fwd_over_rev=check_forward_ad)
# Checks downsampling
input = torch.randn(1, 1, 20, requires_grad=True, device=device)
gradcheck(lambda x: F.interpolate(x, 11, mode=mode), [input], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_fwd_over_rev=check_forward_ad)
# consistency CUDA/CPU check
if torch.device(device).type == 'cuda':
input_cuda = torch.randn(1, 1, 20, device=device)
input_cpu = input_cuda.cpu()
output_cuda = F.interpolate(input_cuda, 4, mode=mode)
output_cpu = F.interpolate(input_cpu, 4, mode=mode)
self.assertEqual(output_cuda.cpu(), output_cpu)
output_cuda = F.interpolate(input_cuda, 24, mode=mode)
output_cpu = F.interpolate(input_cpu, 24, mode=mode)
self.assertEqual(output_cuda.cpu(), output_cpu)
helper("nearest")
helper("nearest-exact")
def test_upsamplingNearest1d_correctness(self, device):
# Here we check if output matches OpenCV's INTER_NEAREST-like result
def helper(isize, osize):
in_t = torch.arange(isize, dtype=torch.float, device=device).unsqueeze(0).unsqueeze(0)
out_t = F.interpolate(
in_t, size=(osize, ), recompute_scale_factor=False, mode="nearest"
)
# compute expected output as OpenCV
expected_out = torch.zeros(osize, dtype=torch.float).unsqueeze(0).unsqueeze(0)
scale = 1.0 * isize / osize
for o in range(osize):
i_f32 = o * scale
i = int(i_f32)
expected_out[0, 0, o] = in_t[0, 0, i]
expected_out = expected_out.to(device=device)
self.assertEqual(out_t, expected_out)
helper(20, 11)
helper(10, 15)
def test_upsamplingNearestExact1d_rescale(self, device):
# Checks https://github.com/pytorch/pytorch/issues/62237
isize = 20
in_t = torch.arange(isize, dtype=torch.float, device=device).unsqueeze(0).unsqueeze(0)
# for s in [1.00001, 0.99999]: # 0.9999 case is broken
# See issue: https://github.com/pytorch/pytorch/issues/62396
for s in [1.00001, ]:
out_t = F.interpolate(
in_t, scale_factor=s, recompute_scale_factor=False, mode="nearest-exact"
)
expected_out = in_t
self.assertEqual(out_t, expected_out, msg=f"scale: {s}")
# checks data duplication if output_size == 2 * input_size
# for s in [2.00001, 1.99999]: # 1.99999 case is broken
# See issue: https://github.com/pytorch/pytorch/issues/62396
for s in [2.00001, ]:
out_t = F.interpolate(
in_t, scale_factor=s, recompute_scale_factor=False, mode="nearest-exact"
)
# input is [[[0, 1, 2, 3, ..., 9]]]
# expected out is [[[0, 0, 1, 1, 2, 2, ..., 9, 9]]]
expected_out = in_t.repeat_interleave(2, dim=-1)
self.assertEqual(out_t, expected_out)
def test_upsamplingNearestExact1d_correctness(self, device):
# Here we check if output matches Scikit-Image/Scipy-like result
# Checks https://github.com/pytorch/pytorch/issues/34808
def helper(isize, osize):
in_t = torch.arange(isize, dtype=torch.float, device=device).unsqueeze(0).unsqueeze(0)
out_t = F.interpolate(
in_t, size=(osize, ), recompute_scale_factor=False, mode="nearest-exact"
)
# compute expected output as scikit-image/scipy
expected_out = torch.zeros(osize, dtype=torch.float).unsqueeze(0).unsqueeze(0)
scale = 1.0 * isize / osize
for o in range(osize):
i_f32 = (o + 0.5) * scale
i = int(i_f32)
expected_out[0, 0, o] = in_t[0, 0, i]
expected_out = expected_out.to(device=device)
self.assertEqual(out_t, expected_out)
helper(20, 11)
helper(10, 15)
def test_upsamplingNearest2d(self, device):
# Forward AD does not support XLA because XLA tensors don't have storage
check_forward_ad = torch.device(device).type != 'xla'
def helper(memory_format, mode):
in_t = torch.ones(1, 2, 2, 2, device=device).contiguous(memory_format=memory_format)
in_uint8_t = torch.ones(1, 2, 2, 2, dtype=torch.uint8, device=device).contiguous(memory_format=memory_format)
with warnings.catch_warnings(record=True) as w:
out_t = F.interpolate(in_t, size=4, mode=mode)
out_uint8_t = F.interpolate(in_uint8_t, size=4, mode=mode)
self.assertEqual(len(w), 0)
self.assertEqual(torch.ones(1, 2, 4, 4, device=device), out_t)
self.assertEqual(torch.ones(1, 2, 4, 4, dtype=torch.uint8, device=device), out_uint8_t)
# Assert that memory format is carried through to the output
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
# test forward when input's height is not same as width
in_t = torch.ones(1, 2, 2, 1, device=device).contiguous(memory_format=memory_format).requires_grad_()
out_t = F.interpolate(in_t, size=(4, 2), mode=mode)
self.assertEqual(torch.ones(1, 2, 4, 2, device=device), out_t)
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
out_t.backward(torch.randn_like(out_t))
self.assertTrue(in_t.grad.is_contiguous(memory_format=memory_format))
# test backward when input's height is not same as width
input = torch.ones(1, 2, 2, 1, requires_grad=True, device=device).contiguous(memory_format=memory_format)
gradcheck(lambda x: F.interpolate(x, size=(4, 2), mode=mode), [input], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, size=(4, 2), mode=mode), [input], check_fwd_over_rev=check_forward_ad)
input = torch.randn(1, 2, 2, 2, requires_grad=True, device=device).contiguous(memory_format=memory_format)
self.assertEqual(
F.interpolate(input, 4, mode=mode),
F.interpolate(input, scale_factor=2, mode=mode))
gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_fwd_over_rev=check_forward_ad)
# Assert that cpu and cuda handle channels_last memory format in the same way
# https://github.com/pytorch/pytorch/issues/54590
if torch.device(device).type == 'cuda':
for shapes, scale_factor in product([
(2, 2, 3, 4), (2, 3, 4, 5), (3, 1, 2, 2), (1, 5, 3, 2)
], [0.5, 1.5, 2]):
a_cuda = torch.randn(*shapes, device=device).contiguous(memory_format=memory_format).requires_grad_()
a_cpu = a_cuda.detach().cpu().requires_grad_()
out_cuda = F.interpolate(a_cuda, scale_factor=scale_factor, mode=mode)
out_cpu = F.interpolate(a_cpu, scale_factor=scale_factor, mode=mode)
self.assertEqual(out_cpu.cuda(), out_cuda)
g_cuda = torch.randn_like(out_cuda)
g_cpu = g_cuda.cpu()
out_cuda.backward(g_cuda)
out_cpu.backward(g_cpu)
self.assertEqual(a_cuda.grad, a_cpu.grad)
helper(torch.contiguous_format, "nearest")
helper(torch.channels_last, "nearest")
# Uncomment below once F.interpolate is updated
helper(torch.contiguous_format, "nearest-exact")
helper(torch.channels_last, "nearest-exact")
def test_upsamplingNearest2d_correctness(self, device):
# Here we check if output matches OpenCV's INTER_NEAREST-like result
def helper(memory_format, isize, osize):
in_t = torch.arange(isize * isize, dtype=torch.float, device=device).reshape(1, 1, isize, isize)
in_t = in_t.contiguous(memory_format=memory_format)
out_t = F.interpolate(
in_t, size=(osize, osize), recompute_scale_factor=False, mode="nearest"
)
# compute expected output as OpenCV
expected_out = torch.zeros(1, 1, osize, osize, dtype=torch.float)
scale = 1.0 * isize / osize
for o1 in range(osize):
i1_f32 = o1 * scale
i1 = int(i1_f32)
for o2 in range(osize):
i2_f32 = o2 * scale
i2 = int(i2_f32)
expected_out[0, 0, o1, o2] = in_t[0, 0, i1, i2]
expected_out = expected_out.to(device=device)
self.assertEqual(out_t, expected_out)
helper(torch.contiguous_format, 20, 11)
helper(torch.channels_last, 20, 11)
helper(torch.contiguous_format, 10, 15)
helper(torch.channels_last, 10, 15)
def test_upsamplingNearestExact2d_correctness(self, device):
# Here we check if output matches Scikit-Image/Scipy-like result
# Checks https://github.com/pytorch/pytorch/issues/34808
def helper(memory_format, isize, osize):
in_t = torch.arange(isize * isize, dtype=torch.float, device=device).reshape(1, 1, isize, isize)
in_t = in_t.contiguous(memory_format=memory_format)
out_t = F.interpolate(
in_t, size=(osize, osize), recompute_scale_factor=False, mode="nearest-exact"
)
# compute expected output as Scikit-Image/Scipy
expected_out = torch.zeros(1, 1, osize, osize, dtype=torch.float)
scale = 1.0 * isize / osize
for o1 in range(osize):
i1_f32 = (o1 + 0.5) * scale
i1 = int(i1_f32)
for o2 in range(osize):
i2_f32 = (o2 + 0.5) * scale
i2 = int(i2_f32)
expected_out[0, 0, o1, o2] = in_t[0, 0, i1, i2]
expected_out = expected_out.to(device=device)
self.assertEqual(out_t, expected_out)
helper(torch.contiguous_format, 20, 11)
helper(torch.channels_last, 20, 11)
helper(torch.contiguous_format, 10, 15)
helper(torch.channels_last, 10, 15)
def test_upsamplingNearest3d(self, device):
# Forward AD does not support XLA because XLA tensors don't have storage
check_forward_ad = torch.device(device).type != 'xla'
def helper(memory_format, mode):
m = nn.Upsample(size=4, mode=mode)
in_t = torch.ones(1, 2, 2, 2, 2, device=device).contiguous(memory_format=memory_format)
in_uint8_t = torch.ones(
1, 2, 2, 2, 2, dtype=torch.uint8, device=device
).contiguous(memory_format=memory_format)
with warnings.catch_warnings(record=True) as w:
out_t = m(in_t)
out_uint8_t = m(in_uint8_t)
expected_output = torch.ones(1, 2, 4, 4, 4, device=device)
self.assertEqual(expected_output, out_t)
self.assertEqual(expected_output.to(torch.uint8), out_uint8_t)
# Assert that memory format is carried through to the output
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
input = torch.randn(
1, 2, 2, 2, 2, requires_grad=True, device=device
).contiguous(memory_format=memory_format)
gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_fwd_over_rev=check_forward_ad)
# Assert that cpu and cuda handle channels_last memory format in the same way
# https://github.com/pytorch/pytorch/issues/54590
if torch.device(device).type == 'cuda':
a = torch.ones(
2, 2, 2, 3, 4, device=device, requires_grad=True
).contiguous(memory_format=torch.channels_last_3d)
# make the data asymmetric; ensure that cuda/cpu handle channels_last appropriately.
a[1][1][1][2][2] = a[1][1][1][2][3] = 0
out_cuda = torch.nn.functional.interpolate(a, scale_factor=2, mode=mode)
out_cpu = torch.nn.functional.interpolate(a.to('cpu'), scale_factor=2, mode=mode)
self.assertEqual(out_cpu, out_cuda.to('cpu'))
gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [a], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [a], check_fwd_over_rev=check_forward_ad)
gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [a.to('cuda')], check_forward_ad=check_forward_ad)
gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [a.to('cuda')], check_fwd_over_rev=check_forward_ad)
helper(torch.contiguous_format, "nearest")
helper(torch.channels_last_3d, "nearest")
helper(torch.contiguous_format, "nearest-exact")
helper(torch.channels_last_3d, "nearest-exact")
def test_upsamplingNearest3d_correctness(self, device):
# Here we check if output matches OpenCV's INTER_NEAREST-like result
def helper(memory_format, isize, osize):
in_t = torch.arange(isize * isize * isize, dtype=torch.float, device=device)
in_t = in_t.reshape(1, 1, isize, isize, isize)
in_t = in_t.contiguous(memory_format=memory_format)
out_t = F.interpolate(
in_t, size=(osize, osize, osize), recompute_scale_factor=False, mode="nearest"
)
# compute expected output as OpenCV
expected_out = torch.zeros(1, 1, osize, osize, osize, dtype=torch.float)
scale = 1.0 * isize / osize
for o1 in range(osize):
i1_f32 = o1 * scale
i1 = int(i1_f32)
for o2 in range(osize):
i2_f32 = o2 * scale
i2 = int(i2_f32)
for o3 in range(osize):
i3_f32 = o3 * scale
i3 = int(i3_f32)
expected_out[0, 0, o1, o2, o3] = in_t[0, 0, i1, i2, i3]
expected_out = expected_out.to(device=device)
self.assertEqual(out_t, expected_out)
helper(torch.contiguous_format, 20, 11)
helper(torch.channels_last_3d, 20, 11)
helper(torch.contiguous_format, 10, 15)
helper(torch.channels_last_3d, 10, 15)
def test_upsamplingNearestExact3d_correctness(self, device):
# Here we check if output matches Scikit-Image/Scipy-like result
# Checks https://github.com/pytorch/pytorch/issues/34808
def helper(memory_format, isize, osize):
in_t = torch.arange(isize * isize * isize, dtype=torch.float, device=device)
in_t = in_t.reshape(1, 1, isize, isize, isize)
in_t = in_t.contiguous(memory_format=memory_format)
out_t = F.interpolate(
in_t, size=(osize, osize, osize), recompute_scale_factor=False, mode="nearest-exact"
)
# compute expected output as Scikit-Image/Scipy
expected_out = torch.zeros(1, 1, osize, osize, osize, dtype=torch.float)
scale = 1.0 * isize / osize
for o1 in range(osize):
i1_f32 = (o1 + 0.5) * scale
i1 = int(i1_f32)
for o2 in range(osize):
i2_f32 = (o2 + 0.5) * scale
i2 = int(i2_f32)
for o3 in range(osize):
i3_f32 = (o3 + 0.5) * scale
i3 = int(i3_f32)
expected_out[0, 0, o1, o2, o3] = in_t[0, 0, i1, i2, i3]
expected_out = expected_out.to(device=device)
self.assertEqual(out_t, expected_out)
helper(torch.contiguous_format, 20, 11)
helper(torch.channels_last_3d, 20, 11)
helper(torch.contiguous_format, 10, 15)
helper(torch.channels_last_3d, 10, 15)
@parametrize_test("antialias", [True, False])
@parametrize_test("align_corners", [True, False])
def test_upsamplingBilinear2d(self, device, antialias, align_corners):
# Forward AD does not support XLA because XLA tensors don't have storage
check_forward_ad = torch.device(device).type != 'xla'
kwargs = dict(mode='bilinear', align_corners=align_corners, antialias=antialias)
for memory_format in [torch.contiguous_format, torch.channels_last]:
# test float scale factor up & downsampling
for scale_factor in [0.5, 1.5, 2]:
in_t = torch.ones(2, 3, 8, 8, device=device).contiguous(memory_format=memory_format).requires_grad_()
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
with warnings.catch_warnings(record=True) as w:
out_t = F.interpolate(in_t, scale_factor=scale_factor, **kwargs)
self.assertEqual(torch.ones(2, 3, out_size, out_size, device=device), out_t.data)
# Assert that memory format is carried through to the output
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
out_t.backward(torch.randn_like(out_t))
self.assertTrue(in_t.grad.is_contiguous(memory_format=memory_format))
if torch.device(device).type == 'cuda':
# Bilinear backward is nondeterministic because of atomicAdd usage
nondet_tol = 1e-5
else:
nondet_tol = 0.0
input = torch.randn(2, 3, 8, 8, device=device).contiguous(memory_format=memory_format).requires_grad_()
gradcheck(
lambda x: F.interpolate(x, out_size, **kwargs),
[input],
check_forward_ad=check_forward_ad, nondet_tol=nondet_tol
)
gradgradcheck(
lambda x: F.interpolate(x, out_size, **kwargs),
[input],
check_fwd_over_rev=check_forward_ad, nondet_tol=nondet_tol
)
# Assert that cpu and cuda give same results
if torch.device(device).type == 'cuda':
for shapes in [
(2, 2, 3, 4), (2, 3, 4, 5), (3, 1, 2, 2), (1, 5, 3, 2)
]:
a_cuda = torch.randn(
*shapes, device=device
).contiguous(memory_format=memory_format).requires_grad_()
a_cpu = a_cuda.detach().cpu().requires_grad_()
with warnings.catch_warnings(record=True):
out_cuda = F.interpolate(a_cuda, scale_factor=scale_factor, **kwargs)
out_cpu = F.interpolate(a_cpu, scale_factor=scale_factor, **kwargs)
self.assertEqual(out_cpu, out_cuda.cpu())
g_cuda = torch.randn_like(out_cuda)
g_cpu = g_cuda.cpu()
out_cuda.backward(g_cuda)
out_cpu.backward(g_cpu)
self.assertEqual(a_cuda.grad, a_cpu.grad)
@parametrize_test("memory_format", [torch.contiguous_format, torch.channels_last])
def test_upsamplingBilinear2d_aa_correctness(self, device, memory_format):
t_in = torch.arange(3 * 8 * 8, dtype=torch.float, device=device).reshape(1, 3, 8, 8)
t_in = t_in.contiguous(memory_format=memory_format)
# This expected result is obtain using PIL.Image.resize
# for c in range(3):
# a_in = t_in.numpy()[0, c, ...]
# pil_in = Image.fromarray(a_in)
# pil_out = pil_in.resize((2, 2), resample=Image.LINEAR)
expected_out = torch.tensor([
17.035713, 20.25, 42.75, 45.964287, 81.03572, 84.25,
106.75, 109.96428, 145.0357, 148.25, 170.75, 173.9643
], device=device, dtype=t_in.dtype).reshape(1, 3, 2, 2)
t_out = F.interpolate(t_in, size=(2, 2), mode="bilinear", align_corners=False, antialias=True)
self.assertEqual(expected_out, t_out)
@parametrize_test("antialias", [True, False])
@parametrize_test("align_corners", [True, False])
def test_upsamplingBicubic2d(self, device, antialias, align_corners):
kwargs = dict(mode='bicubic', align_corners=align_corners, antialias=antialias)
# test float scale factor up & downsampling
# for scale_factor in [0.5, 1, 1.5, 2]:
for scale_factor in [2, ]:
in_t = torch.ones(2, 3, 8, 8, device=device)
out_t = F.interpolate(in_t, scale_factor=scale_factor, **kwargs)
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
expected_out = torch.ones(2, 3, out_size, out_size, device=device)
self.assertEqual(expected_out, out_t, atol=1e-5, rtol=0)
if torch.device(device).type == 'cuda':
# Bicubic backward is nondeterministic because of atomicAdd usage
nondet_tol = 1e-5
else:
nondet_tol = 0.0
inpt = torch.ones(2, 3, 8, 8, requires_grad=True, device=device)
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [inpt], nondet_tol=nondet_tol)
def test_upsamplingBicubic2d_correctness(self, device):
# test output against known input: align_corners=False result must match opencv
in_t = torch.arange(8., device=device).view(1, 2, 2, 2)
expected_out_t = torch.tensor(
[[[[-0.31641, 0.01562, 0.56250, 0.89453],
[0.34766, 0.67969, 1.22656, 1.55859],
[1.44141, 1.77344, 2.32031, 2.65234],
[2.10547, 2.43750, 2.98438, 3.31641]],
[[3.68359, 4.01562, 4.56250, 4.89453],
[4.34766, 4.67969, 5.22656, 5.55859],
[5.44141, 5.77344, 6.32031, 6.65234],
[6.10547, 6.43750, 6.98438, 7.31641]]]], device=device)
out_t = F.interpolate(in_t, scale_factor=2, mode='bicubic', align_corners=False)
torch.set_printoptions(precision=5)
self.assertEqual(out_t, expected_out_t, atol=1e-5, rtol=0)
@parametrize_test("memory_format", [torch.contiguous_format, torch.channels_last])
def test_upsamplingBicubic2d_aa_correctness(self, device, memory_format):
t_in = torch.arange(3 * 8 * 8, dtype=torch.float, device=device).reshape(1, 3, 8, 8)
t_in = t_in.contiguous(memory_format=memory_format)
# This expected result is obtain using PIL.Image.resize
# for c in range(3):
# a_in = t_in.numpy()[0, c, ...]
# pil_in = Image.fromarray(a_in)
# pil_out = pil_in.resize((2, 2), resample=Image.BICUBIC)
expected_out = torch.tensor([
15.1205635, 18.760439, 44.23956, 47.879436, 79.12056, 82.76044,
108.23956, 111.87944, 143.12057, 146.76044, 172.23956, 175.87943
], device=device, dtype=t_in.dtype).reshape(1, 3, 2, 2)
t_out = F.interpolate(t_in, size=(2, 2), mode="bicubic", align_corners=False, antialias=True)
self.assertEqual(expected_out, t_out)
@dtypes(torch.float, torch.double)
def test_adaptive_pooling_max_nhwc(self, device, dtype):
def helper(n, c, h, w, output_height, output_width, contig):
input = torch.randint(1, 10, (n, c, h, w), device=device, dtype=dtype)
input = input.contiguous(memory_format=torch.channels_last)
grad = torch.randint(1, 10, (4, 8, output_height, output_width), device=device, dtype=dtype)
grad = grad.contiguous(memory_format=torch.channels_last)
if not contig:
input = input[:, ::2, :, :]
grad = grad[:, ::2, :, :]
input.requires_grad_(True)
pool = torch.nn.AdaptiveMaxPool2d((output_height, output_width), return_indices=True).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AdaptiveMaxPool2d((output_height, output_width), return_indices=True).to(device)
out, ind = pool(input)
out.backward(grad)
ref_out, ref_ind = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ind.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_ind.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(ind, ref_ind)
self.assertEqual(input.grad, ref_input.grad)
for contig in [True, False]:
helper(4, 8, 10, 10, 7, 7, contig)
helper(4, 8, 9, 14, 5, 8, contig)
helper(4, 8, 11, 11, 1, 1, contig)
@dtypes(torch.float, torch.double)
def test_pooling_max_nhwc(self, device, dtype):
def helper(n, c, h, w, kernel_size, stride, padding, dilation, contig, device):
output_height = math.floor((h + 2 * padding[0] - dilation[0] * (kernel_size[0] - 1) - 1) / stride[0] + 1)
output_width = math.floor((w + 2 * padding[1] - dilation[1] * (kernel_size[1] - 1) - 1) / stride[1] + 1)
input = torch.randint(1, 10, (n, c, h, w), device=device, dtype=dtype)
input = input.contiguous(memory_format=torch.channels_last)
grad = torch.randint(1, 10, (n, c, output_height, output_width), device=device, dtype=dtype)
grad = grad.contiguous(memory_format=torch.channels_last)
if not contig:
input = input[:, ::2, :, :]
grad = grad[:, ::2, :, :]
input.requires_grad_(True)
pool = torch.nn.MaxPool2d(
kernel_size, stride, padding, dilation, return_indices=True, ceil_mode=False
)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.MaxPool2d(
kernel_size, stride, padding, dilation, return_indices=True, ceil_mode=False
).to(device)
out, ind = pool(input)
out.backward(grad)
ref_out, ref_ind = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ind.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_ind.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(ind, ref_ind)
self.assertEqual(input.grad, ref_input.grad)
for contig in [True, False]:
helper(4, 8, 10, 10, (2, 2), (1, 1), (1, 1), (2, 2), contig, device)
helper(4, 8, 9, 14, (2, 2), (1, 1), (1, 1), (2, 2), contig, device)
helper(4, 8, 11, 11, (4, 4), (2, 2), (2, 2), (2, 2), contig, device)
def test_embedding_dense_grad(self, device):
embd = nn.Embedding(20, 20).to(device)
weight = embd.weight
def fn_wrapper(device):
def fn(weight):
inp = torch.tensor([[0, 1, 1, 2], [3, 5, 7, 11]], dtype=torch.long).to(device)
return torch.nn.functional.embedding(inp, weight)
return fn
fn = fn_wrapper(device)
_assertGradAndGradgradChecks(self, fn, (weight, ))
def test_embedding_scalar_weight_error(self, device):
indices = torch.rand(2, 2, device=device).long()
weights = [
torch.tensor(1.0, device=device),
torch.tensor(1.0, device=device).reshape(1, 1, 1),
]
for weight in weights:
with self.assertRaisesRegex(RuntimeError, "'weight' must be 2-D"):
torch.nn.functional.embedding(indices, weight)
@dtypesIfCUDA(torch.float16, torch.float64)
@dtypes(torch.float64)
def test_embedding_backward(self, device, dtype):
embedding = nn.Embedding(10, 3, sparse=True)
tensor = torch.tensor([[7, 1, 3]])
ones = torch.tensor(1., dtype=dtype).expand(3, 3)
tensorTwice = tensor.repeat(1, 2)
onesTwice = torch.cat((ones, ones))
embedding = embedding.to(dtype=dtype).to(device)
tensor = tensor.to(device)
ones = ones.to(device)
tensorTwice = tensorTwice.to(device)
onesTwice = onesTwice.to(device)
embedding.zero_grad()
embedding(tensor[0]).sum().backward()
self.assertEqual(embedding.weight.grad._indices(), tensor)
self.assertEqual(embedding.weight.grad._values(), ones)
embedding.zero_grad()
embedding(tensor[0]).sum().backward()
embedding(tensor[0]).sum().backward()
self.assertEqual(embedding.weight.grad._indices(), tensorTwice)
self.assertEqual(embedding.weight.grad._values(), onesTwice)
embedding.zero_grad()
embedding(tensor[0]).sum().backward()
tensor[0, 0] = 8
embedding(tensor[0]).sum().backward()
tensorTwice[0, 3] = 8
self.assertEqual(embedding.weight.grad._indices(), tensorTwice)
self.assertEqual(embedding.weight.grad._values(), onesTwice)
@dtypesIfCUDA(*((torch.float, torch.double, torch.bfloat16, torch.half)
if TEST_WITH_ROCM else (torch.float, torch.double, torch.half)))
@dtypes(torch.float32)
def test_embedding_max_norm_backward(self, device, dtype):
# can't use gradcheck since in place renorm makes analytical gradients different from produced ones
weight = torch.randn((4, 4), device=device, dtype=dtype) * 2
weight.requires_grad_()
inp_list = [0, 1, 2, 2]
inp = torch.tensor(inp_list, device=device)
out = nn.functional.embedding(inp, weight, max_norm=1.).sum()
out.backward()
expected_grad = torch.tensor([[1., 1., 2., 0.]], device=device, dtype=dtype).transpose(0, 1).expand(4, 4)
self.assertEqual(weight.grad, expected_grad)
@dtypesIfCUDA(*((torch.float, torch.double, torch.bfloat16, torch.half)
if TEST_WITH_ROCM else (torch.float, torch.double, torch.half)))
@dtypes(torch.float32)
def test_embedding_max_norm_fwd_AD(self, device, dtype):
if torch.device(device).type == 'xla':
self.skipTest("forward AD doesn't work on xla")
# can't use gradcheck since in place renorm makes analytical gradients different from produced ones
weight = torch.randn((4, 4), device=device, dtype=dtype) * 2
tangent = torch.ones((4, 4), device=device, dtype=dtype)
inp = torch.tensor([[0, 1], [2, 2]], device=device)
with torch.autograd.forward_ad.dual_level():
dual_weight = torch.autograd.forward_ad.make_dual(weight, tangent)
out = nn.functional.embedding(inp, dual_weight, max_norm=1.)
jvp = torch.autograd.forward_ad.unpack_dual(out).tangent
expected_grad = torch.ones((2, 2, 4), device=device, dtype=dtype)
self.assertEqual(jvp, expected_grad)
@dtypesIfCUDA(*((torch.float, torch.double, torch.bfloat16, torch.half)
if TEST_WITH_ROCM else (torch.float, torch.double, torch.half)))
@dtypes(torch.float32)
def test_embedding_padding_idx(self, device, dtype):
embedding = nn.Embedding(10, 20, padding_idx=0).to(device, dtype)
input = torch.tensor([[0, 2, 4, 5], [4, 3, 0, 9]], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[0][0].sum(), 0)
self.assertEqual(output[1][2].sum(), 0)
embedding = nn.Embedding(10, 20, padding_idx=0, sparse=True).to(device, dtype)
input = torch.tensor([[0, 2, 4, 5], [4, 3, 0, 9]], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[0][0].sum(), 0)
self.assertEqual(output[1][2].sum(), 0)
# negative indexing check for padding_idx
# padding_idx=-2, num_embeddings=10 ==> index 8 padded
embedding = nn.Embedding(10, 20, padding_idx=-2).to(device, dtype)
input = torch.tensor([[0, 2, 8, 5], [4, 8, 0, 9]], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[0][2].sum(), 0)
self.assertEqual(output[1][1].sum(), 0)
embedding = nn.Embedding(10, 20, padding_idx=-2, sparse=True).to(device, dtype)
input = torch.tensor([[0, 2, 8, 5], [4, 8, 0, 9]], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[0][2].sum(), 0)
self.assertEqual(output[1][1].sum(), 0)
# change padding vector
padding_vector = torch.ones(20, dtype=dtype, device=device)
embedding = nn.Embedding(10, 20, padding_idx=2, sparse=True).to(device, dtype)
with torch.no_grad():
embedding.weight[2] = padding_vector
input = torch.tensor([0, 2], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[1], padding_vector)
# out of bounds check for padding_idx
self.assertRaises(AssertionError, nn.Embedding, num_embeddings=10, embedding_dim=20, padding_idx=25)
self.assertRaises(AssertionError, nn.Embedding, num_embeddings=10, embedding_dim=20, padding_idx=-25)
padding_idx = 0
embedding = nn.Embedding(5, 2, padding_idx=padding_idx).to(device, dtype)
for n in (1, 2, 1000): # Need large N to trigger all the methods we have implemented
for other_indices in ([], [1, 3], [2]):
indices = torch.tensor(other_indices + [padding_idx] * n, dtype=torch.long).to(device)
pre = embedding.weight[padding_idx].clone()
embedding(indices).sum().backward()
after = (embedding.weight + embedding.weight.grad)[padding_idx]
embedding.zero_grad()
self.assertEqual(after, pre)
# test double backward
emb_sum = embedding(indices).sum()
emb_grad = torch.autograd.grad(outputs=emb_sum, inputs=list(embedding.parameters()), retain_graph=True)
scalar = emb_grad[0].sum() + emb_sum
scalar.backward()
after = (embedding.weight + embedding.weight.grad)[padding_idx]
embedding.zero_grad()
self.assertEqual(after, pre)
# Check correctness of torch.nn.functional.embedding_bag forward and
# backward functions with padding_idx, given a 1D input separated into bags
# with an offset array. Compare against an equivalent 2D input that uses
# padding indices to fill in the gaps indicated by the offset array
@skipIfTorchDynamo("TorchDynamo fails here for unknown reasons")
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
@dtypesIfCUDA(torch.half, torch.bfloat16)
def test_embedding_bag_1D_padding_idx(self, device, dtype):
num_features = 3
max_indices_per_bag = 10
num_bags = 10
num_words = 100
def gen_1D_indices_offsets(include_last_offset, allpad):
indices = []
offsets = []
cur_offset = 0
# Make one bag full and one bag empty, for extra coverage
empty_bag = random.randint(0, num_bags - 1)
full_bag = empty_bag
while full_bag == empty_bag:
full_bag = random.randint(0, num_bags - 1)
for bag in range(num_bags):
offsets.append(cur_offset)
if bag == full_bag:
bag_size = max_indices_per_bag
elif bag == empty_bag:
bag_size = 0
else:
bag_size = random.randint(1, max_indices_per_bag - 1)
indices += [1 if allpad else random.randint(0, num_words - 1) for _ in range(bag_size)]
cur_offset += bag_size
# embedding_bag requires first entry of offsets to be 0
assert offsets[0] == 0
indices = torch.tensor(indices, device=device)
if include_last_offset:
offsets.append(indices.size(0))
offsets = torch.tensor(offsets, device=device)
return indices, offsets
# Convert a 1-D indices-offsets representation into 2-D. Fill any empty
# indices with padding_idx
def gen_2D_indices_from_1D(indices_1D, offsets, include_last_offset, padding_idx):
assert offsets[0] == 0
if include_last_offset:
offsets = offsets[:-1]
indices_2D = torch.empty(num_bags, max_indices_per_bag, device=device, dtype=torch.long)
for bag in range(num_bags):
# Determine the start and end position of the bag within indices_1D
start = offsets[bag]
end = len(indices_1D) if bag + 1 == num_bags else offsets[bag + 1]
end = min(len(indices_1D), end)
# Pull out the bag's indices from indices_1D, and fill any
# remaining space with padding indices
indices_in_bag = []
for item_pos in range(0, max_indices_per_bag):
if (start + item_pos) < end:
indices_in_bag.append(indices_1D[start + item_pos])
else:
indices_in_bag.append(padding_idx)
indices_2D[bag] = torch.tensor(indices_in_bag, device=device)
return indices_2D
test_cases = product(['max', 'mean', 'sum'], [False, True], [False, True], [False, True])
for mode, sparse, include_last_offset, allpad in test_cases:
# Max sparse and bfloat16 are not supported
if mode == 'max':
if sparse or (dtype == torch.bfloat16):
continue
indices_1D, offsets = gen_1D_indices_offsets(include_last_offset, allpad)
for padding_idx_1D in list(set(indices_1D.tolist())) + [None]:
msg = (
f"mode: '{mode}', sparse: {sparse}, include_last_offset: {include_last_offset}, "
f"padding_idx_1D: {padding_idx_1D}")
# If 1D input does not use a padding index, we still need one for the 2D input,
# so we can add one dummy word to the weights to act as the padded word
padding_idx_2D = padding_idx_1D if padding_idx_1D is not None else num_words
num_words_with_padding = num_words if padding_idx_1D is not None else num_words + 1
indices_2D = gen_2D_indices_from_1D(
indices_1D,
offsets,
include_last_offset,
padding_idx_2D)
weights = torch.randn(
num_words_with_padding,
num_features,
dtype=dtype,
device=device,
requires_grad=True)
weights_check = weights.clone().detach().requires_grad_(True)
bag = torch.nn.functional.embedding_bag(
indices_1D,
weights,
offsets,
padding_idx=padding_idx_1D,
mode=mode,
sparse=sparse,
include_last_offset=include_last_offset)
bag_check = torch.nn.functional.embedding_bag(
indices_2D,
weights_check,
padding_idx=padding_idx_2D,
mode=mode,
sparse=sparse)
self.assertEqual(bag, bag_check, msg=msg)
bag.sum().backward()
bag_check.sum().backward()
# Sometimes, half dtype gradients mismatch by a greater amount
# than other dtypes
if dtype in [torch.half, torch.bfloat16]:
atol = 0.01
rtol = 0.01
else:
atol = None
rtol = None
self.assertEqual(weights.grad, weights_check.grad, msg=msg, atol=atol, rtol=rtol)
# Check correctness of torch.nn.functional.embedding_bag forward and
# backward functions with padding_idx, given a 2D indices input. Compare
# against torch.nn.functional.embedding followed by a reduction.
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
@dtypesIfCUDA(torch.half, torch.bfloat16)
def test_embedding_bag_2D_padding_idx(self, device, dtype):
# Use a Python implementation of embedding_bag with padding_idx support
# to check torch.nn.functional.embedding_bag correctness
def embedding_bag_check(indices, weights, mode, sparse, padding_idx):
assert padding_idx is not None
embedding = torch.nn.functional.embedding(
indices,
weights,
padding_idx=padding_idx,
sparse=sparse)
reduction_dim = indices.dim() - 1
if mode == 'sum' or mode == 'mean':
# We must avoid including elements at padding_idx in the
# sum/mean, so multiply those elements by 0, and multiply
# all other elements by 1
per_sample_weights = indices.ne(padding_idx).to(dtype).unsqueeze(-1)
res = embedding.mul(per_sample_weights).sum(dim=reduction_dim)
if mode == 'mean':
weights_sum = per_sample_weights.sum(dim=reduction_dim)
res = res.div(weights_sum)
elif mode == 'max':
# We must avoid allowing elements at padding_idx to be chosen
# as the max, so set those elements to negative infinity
res = embedding.masked_fill(
indices.unsqueeze(-1) == padding_idx, -float('inf')
).amax(dim=reduction_dim)
else:
raise RuntimeError(f"mode '{mode}' is not available")
# If a row is all padding, set its corresponding result row to 0.
# This is needed because the above mean and max mode
# implementations set these elements to nan and -inf, respectively
if mode in ['mean', 'max']:
res = res.masked_fill(
indices.eq(padding_idx).all(dim=-1).unsqueeze(-1),
0)
return res
num_features = 3
num_words = 10
indices_dim1 = 10
for mode, sparse, allpad, indices_dim0 in product(['max', 'mean', 'sum'], [False, True], [False, True], [1, 10]):
# Max sparse and bfloat16 are not supported
if mode == 'max':
if sparse or (dtype == torch.bfloat16):
continue
if allpad:
indices = torch.empty(indices_dim0, indices_dim1, dtype=torch.long, device=device).fill_(1)
else:
indices = torch.randint(0, num_words, (indices_dim0, indices_dim1), device=device)
if indices_dim0 > 1:
# Fill one row with duplicate index so we can test with a fully
# padded row
duplicate_row = random.randint(0, indices_dim0 - 1)
indices[duplicate_row] = indices[duplicate_row][0]
for padding_idx in list(set(indices.flatten(0, -1).tolist())):
weights = torch.randn(num_words, num_features, dtype=dtype, device=device, requires_grad=True)
weights_check = weights.clone().detach().requires_grad_(True)
msg = (
f"mode: '{mode}', sparse: {sparse}, padding_idx: {padding_idx}, "
f"allpad: {allpad}, indices.size(): {indices.size()}")
# Check forward with a Python implementation of padding_idx embedding_bag
bag_check = embedding_bag_check(
indices,
weights_check,
mode,
sparse,
padding_idx)
bag = torch.nn.functional.embedding_bag(
indices,
weights,
padding_idx=padding_idx,
mode=mode,
sparse=sparse)
self.assertEqual(bag, bag_check, msg=msg)
bag_check.sum().backward()
grad_check = weights_check.grad
bag.sum().backward()
grad = weights.grad
# Sometimes, half dtype gradients mismatch by a greater amount
# than other dtypes
if dtype in [torch.half, torch.bfloat16]:
atol = 0.01
rtol = 0.01
else:
atol = None
rtol = None
self.assertEqual(grad, grad_check, msg=msg, atol=atol, rtol=rtol)
def _slow_masked_softmax(self, input, mask):
exp = torch.exp(input)
exp = exp * mask
s = exp.sum(dim=3, keepdim=True).expand(exp.size())
return exp / s
def test_masked_softmax(self, device):
sizes = [(1, 1, 32), (3, 16, 310), (12, 4, 1024), (4, 2, 1200)]
for (B, num_heads, L) in sizes:
for dim in [0, 3]:
input = torch.randn((B, num_heads, L, L))
mask = torch.randint(0, 2, (B, L))
mask = mask.reshape(B, 1, 1, L).expand(B, num_heads, L, L).bool()
mask_type = 1 # BxL => src_key_padding_mask
if (self.device_type == "cuda"):
input = input.cuda()
mask = mask.cuda()
native_res = torch._masked_softmax(input, mask, dim, mask_type)
mask = ~mask
def slow_masked_softmax(input, mask):
exp = torch.exp(input)
exp = exp * mask
s = exp.sum(dim=dim, keepdim=True).expand(exp.size())
return exp / s
pt_res = slow_masked_softmax(input, mask)
pt_res = torch.nan_to_num(pt_res)
mask_not = mask.logical_not()
# In result, should only fill the entirely masked out rows since those are non-deterministic (*may* be 0)
# Converts rows with all True's to False
mask_out = mask_not.all(dim, keepdim=True).expand(mask_not.shape)
self.assertEqual(
pt_res.masked_fill(mask_out, 0),
native_res.masked_fill(mask_out, 0),
exact_dtype=True
)
def _test_masked_softmax_helper(self, input, dim, mask, mask_type):
input_ref = input.detach().clone().requires_grad_()
result = torch._masked_softmax(input, mask, dim, mask_type)
expected = torch._softmax(input_ref.masked_fill(mask, float('-inf')), dim, False)
grad = torch.randn_like(expected).to(dtype=expected.dtype)
result.backward(grad)
expected.backward(grad)
# Make sure the optional argument works as well
if dim == input.dim() - 1:
input_ref_default = input.detach().clone().requires_grad_()
result_default = torch._masked_softmax(input_ref_default, mask, None, mask_type)
result_default.backward(grad)
self.assertEqual(result, result_default)
self.assertEqual(input.grad, input_ref_default.grad)
# In result, should only fill the entirely masked out rows since those are non-deterministic (*may* be 0)
# Converts rows with all True's to False
mask_out = mask.all(dim, keepdim=True).expand(mask.shape)
self.assertEqual(result.masked_fill(mask_out, 0), expected.masked_fill(mask_out, 0))
self.assertEqual(input.grad, torch.nan_to_num(input_ref.grad))
self.assertEqual(input.grad, input.grad.masked_fill(mask, 0.0))
def test_masked_softmax_grad(self, device):
shapes = [(1, 1, 32), (3, 16, 310), (12, 4, 1024), (4, 2, 1200)]
for shape in shapes:
dims = [0, len(shape) - 1] if len(shape) > 0 else [0]
for dim in dims:
input = torch.randn(shape, requires_grad=True)
mask = torch.randint(0, 2, shape).bool()
mask_type = 1 # BxL => src_key_padding_mask
if (self.device_type == "cuda"):
input = input.cuda().detach().requires_grad_()
mask = mask.cuda()
self._test_masked_softmax_helper(input, dim, mask, mask_type)
# In this test, the forward pass is expected to produce nan's because when dim=0, we only have unspecified values
def test_masked_softmax_forward_with_nans(self, device):
dim = 0
shapes = [(4, 5), (50, 100), (1500, 1200)]
for (x, y) in shapes:
input = torch.randn((x, y), requires_grad=True)
mask = torch.tensor([i % 2 for i in range(y)]).expand((x, y)).bool()
mask_type = 1 # BxL => src_key_padding_mask
if (self.device_type == "cuda"):
input = input.cuda().detach().requires_grad_()
mask = mask.cuda()
self._test_masked_softmax_helper(input, dim, mask, mask_type)
@onlyCUDA
def test_masked_softmax_transformer_layout(self, device):
B = 211
num_heads = 16
L = 42
input = torch.randn((B, num_heads, L, L))
dim = input.dim() - 1
mask = torch.randint(0, 2, (B, L))
mask_type = 1 # BxL => src_key_padding_mask
if (self.device_type == "cuda"):
input = input.cuda()
mask = mask.cuda()
mask = mask.bool()
native_res = torch._masked_softmax(input, mask, dim, mask_type)
mask = mask.reshape(B, 1, 1, L).expand(B, num_heads, L, L)
mask = ~mask
mask = mask.float()
pt_res = self._slow_masked_softmax(input, mask)
self.assertEqual(pt_res, native_res, exact_dtype=True)
@onlyCUDA
def test_masked_softmax_TxT_layout(self, device):
B = 211
num_heads = 16
L = 42
input = torch.randn((B, num_heads, L, L))
dim = input.dim() - 1
mask = torch.randint(0, 2, (L, L))
mask_type = 0 # LxL => src_mask
if (self.device_type == "cuda"):
input = input.cuda()
mask = mask.cuda()
mask = mask.bool()
native_res = torch._masked_softmax(input, mask, dim, mask_type)
mask = mask.expand(B, num_heads, L, L)
mask = ~mask
mask = mask.float()
pt_res = self._slow_masked_softmax(input, mask)
self.assertEqual(pt_res, native_res, exact_dtype=True)
@dtypesIfCUDA(torch.half, torch.float)
@dtypes(torch.float)
def test_softmax_results(self, device, dtype):
# Non-even sizes and non-zero shifts test fallback paths in vectorized kernel
# Note: dim1 > 1024 is needed to exercise the vectorized (non-persistent) path, (16, 30576) is BERT-esque
sizes = [(0, 10), (32, 20), (10, 0), (31, 20), (32, 21), (31, 23), (32, 1536), (31, 2048), (33, 2049), (16, 30576)]
shifts = [(0, 0), (1, 0), (0, 1), (1, 1)]
for fn in [F.softmax, F.log_softmax]:
for size in sizes:
for shift in shifts:
input = torch.rand(size, device=device, dtype=dtype)
# Note: With the largest tests we can hit upper limit of fp16 when we
# sum, so scale the input down to stay in a nicer range.
if dtype == torch.float16:
input = input / 100.
input = input[shift[0]:, shift[1]:]
# Note; Don't want to bprop back through slice op
input = input.detach().requires_grad_(True)
ref_input = input.clone().cpu().detach().requires_grad_(True)
for dim in [0, 1]:
ref_output = fn(ref_input, dtype=torch.float, dim=dim)
output = fn(input, dtype=torch.float, dim=dim)
grad_output = torch.rand(size, device=device, dtype=dtype)
grad_output = grad_output[shift[0]:, shift[1]:]
ref_grad_output = grad_output.clone().cpu().detach()
grad_input, = torch.autograd.grad(output, input, grad_outputs=(grad_output), create_graph=True)
ref_grad_input, = torch.autograd.grad(ref_output, ref_input,
grad_outputs=(ref_grad_output), create_graph=True)
grad_input.sum().backward()
ref_grad_input.sum().backward()
self.assertEqual(output, ref_output)
self.assertEqual(grad_input, ref_grad_input)
self.assertEqual(input.grad, ref_input.grad)
@onlyCUDA
@dtypes(torch.float, torch.half)
@largeTensorTest("20GB")
@largeTensorTest("90GB", "cpu")
@precisionOverride({torch.half: 0.001})
def test_softmax_64bit_indexing(self, device, dtype):
def run_test(*shape):
x = torch.randn(shape, device="cuda", dtype=torch.float16, requires_grad=True)
y = F.log_softmax(x, dim=-1, dtype=dtype)
y.backward(y)
with torch.no_grad():
xx = x.cpu().requires_grad_()
yy = F.log_softmax(xx.float(), dim=-1).to(dtype)
yy.backward(yy)
self.assertEqual(y, yy)
self.assertEqual(x.grad, xx.grad)
run_test(1100000000, 2) # Illegal memory access https://github.com/pytorch/pytorch/issues/52715
run_test(2200000000, 1) # invalid configuration argument https://github.com/pytorch/pytorch/issues/52716
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.half)
def test_log_softmax_big(self, device, dtype):
def _test_helper(shape):
# generate a tensor with big numbers that are exactly representable in dtype
# and are at a constant offset from tensor with small numbers
# the logsoftmax of a small and big tensors should be equal
x_small = torch.randint(100, shape, dtype=dtype, device=device)
offset = 1.5e3 if dtype == torch.half else 1e7
x_big = x_small + offset
self.assertEqual(F.log_softmax(x_small, -1), F.log_softmax(x_big, -1))
_test_helper((16, 4))
if self.device_type == 'cuda':
# test non-persistent softmax kernel
_test_helper((4, 1536))
@onlyCUDA
@largeTensorTest('12GB')
def test_conv_large_nosplit(self, device):
# Here we just test the convolution correctly route to the fallback implementation
# that is, it does not crash. The correctness of fallback implementation should be
# covered in other tests
dtype = torch.half if self.device_type == 'cuda' else torch.float
conv1 = nn.Conv2d(2, 2, 8, 8).to(device).to(dtype)
input_large = torch.randn(1, 2, 1024, 1024 * 1024, dtype=dtype, device=device)
conv1(input_large)
conv2 = torch.nn.Conv2d(1, 1024, 1, 1).to(device).to(dtype)
input_large = torch.randn(1, 1, 2048, 1024 , dtype=dtype, device=device)
conv2(input_large)
def test_conv_noncontig_weights(self, device):
for dim in (1, 2, 3):
for grouped in (False, True):
nc = 3
groups = 3 if grouped else 1
w = torch.randn([3] * dim, device=device)
w = w.expand([nc, int(nc / groups)] + list(w.shape))
w = w.detach().requires_grad_()
x = torch.randn([1, nc] + ([5] * dim), device=device, requires_grad=True)
y = getattr(F, 'conv{}d'.format(dim))(x, w, groups=groups)
y.sum().backward()
y = getattr(F, 'conv_transpose{}d'.format(dim))(x, w, groups=groups)
y.sum().backward()
def test_conv_noncontig_weights_and_bias(self, device):
# need floats to exercise https://github.com/pytorch/pytorch/issues/16018
for bias in [True, False]:
conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=bias).to(device, torch.float)
input_nc = torch.randn((1, 3, 224, 224, 2), device=device, dtype=torch.float)[:, :, :, :, 1]
input_c = input_nc.contiguous()
weight_nc = torch.randn((64, 3, 7, 7, 2), device=device, dtype=torch.float)[:, :, :, :, 1]
conv1.weight = nn.Parameter(weight_nc)
weight_c = conv1.weight.contiguous()
if bias:
bias_nc = torch.randn((64, 2), device=device, dtype=torch.float)[:, 1]
conv1.bias = nn.Parameter(bias_nc)
bias_c = conv1.bias.contiguous()
out1 = conv1(input_nc)
conv1.weight = nn.Parameter(weight_c)
if bias:
conv1.bias = nn.Parameter(bias_c)
out2 = conv1(input_c)
self.assertEqual(out1, out2)
def test_save_lstm_compatibility(self, device):
# Test that saving an LSTM in PyTorch 1.7 and older can still be
# loaded in newer versions of PyTorch.
model = nn.LSTM(2, 3)
x = torch.randn(32, 5, 2)
expected = model(x)
# Get a state dict for PyTorch 1.7 LSTM. Before PyTorch 1.8, proj_size
# didn't exist.
assert model.proj_size == 0
state_dict = model.__dict__
del state_dict['proj_size']
# load a model
loaded_model = nn.LSTM(2, 3)
loaded_model.__setstate__(state_dict)
result = loaded_model(x)
self.assertEqual(result, expected)
@onlyCUDA
@tf32_on_and_off(0.005)
def test_grid_sample_large(self, device):
def issue_35202():
input_tensor = torch.rand(1, 1, 480, 640, dtype=torch.float, device=device, requires_grad=True)
coords = torch.tensor([[-10059144, 67680944], [67680944, 67680944]], dtype=torch.float, device=device)
coords = coords.unsqueeze(0).unsqueeze(0).repeat(1, 1, 1, 1)
result = torch.nn.functional.grid_sample(input_tensor, coords)
self.assertEqual(result, torch.tensor([[[[0., 0.]]]], dtype=torch.float, device=device))
result.backward(torch.ones_like(result))
torch.cuda.synchronize()
issue_35202()
def issue_24823_1(dtype):
image = torch.arange(27, 0, -1, dtype=dtype, device=device).view(1, 1, 3, 3, 3)
image.requires_grad_()
grid = torch.nn.functional.affine_grid(
torch.tensor([[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]], dtype=dtype, device=device),
(1, 1, 3, 3, 3))
grid[:, 1, 1, 1, 0] = float('inf')
result = torch.nn.functional.grid_sample(image, grid, padding_mode='zeros')
self.assertEqual(result, torch.tensor([[[[[27., 26., 25.], [24., 23., 22.], [21., 20., 19.]],
[[18., 17., 16.], [15., 0., 13.], [12., 11., 10.]],
[[9., 8., 7.], [6., 5., 4.], [3., 2., 1.]]]]],
device=device, dtype=dtype))
result.backward(torch.ones_like(result))
expected_grad = torch.ones_like(image)
expected_grad[0, 0, 1, 1, 1] = 0
self.assertEqual(image.grad, expected_grad, atol=0.005, rtol=0)
issue_24823_1(torch.half)
issue_24823_1(torch.float)
issue_24823_1(torch.double)
def issue_24823_2():
param = torch.tensor([[[-1.0e+20, 0.0, 0.0], [0.0, -1.0e+20, 0.0]]], dtype=torch.float, device=device)
img = torch.zeros((1, 1, 4, 4), dtype=torch.float, device=device, requires_grad=True)
grid = torch.nn.functional.affine_grid(param, img.size())
result = torch.nn.functional.grid_sample(img, grid)
self.assertEqual(result, torch.zeros(1, 1, 4, 4, device=device, dtype=torch.float))
result.backward(torch.ones_like(result))
torch.cuda.synchronize()
issue_24823_2()
@dtypes(torch.float, torch.double)
@largeTensorTest(lambda self, device, dtype:
# Compute sum of the large tensor sizes:
# (im.numel() + small_image.numel() + small_image.grad.numel() +
# large_view.grad.numel()) * sizeof(dtype)
32769 * (65536 + 3 * 65536 / 128) *
torch.tensor([], dtype=dtype).element_size())
def test_grid_sample_large_index_2d(self, device, dtype):
# Test 64-bit indexing with grid_sample (gh-41656)
# Try accessing the corners, there should be no segfault
coords = torch.tensor([[[-1., -1.],
[+1., -1.]],
[[-1., +1.],
[+1., +1.]]], device=device, dtype=dtype)
coords = coords.expand(1, 2, 2, 2)
im = torch.zeros([1, 1, 32769, 65536], device=device, dtype=dtype)
# Compare sampling with large strides to the same op on a contiguous tensor
coords = torch.rand(1, 4, 4, 2, device=device, dtype=dtype)
large_view = im[..., 127::128]
small_image = torch.rand_like(large_view)
large_view[...] = small_image
large_view.requires_grad, small_image.requires_grad = True, True
self.assertTrue(
sum(i * s for i, s in zip(large_view.size(), large_view.stride())) >= 2 ** 31,
msg="View must use 64-bit indexing")
for mode, padding_mode, align_corners in itertools.product(
('nearest', 'bilinear', 'bicubic'), ('zeros', 'border', 'reflection'), (True, False)):
a = F.grid_sample(
small_image, coords, mode=mode,
padding_mode=padding_mode, align_corners=align_corners)
a.sum().backward()
b = F.grid_sample(
large_view, coords, mode=mode,
padding_mode=padding_mode, align_corners=align_corners)
b.sum().backward()
self.assertEqual(a, b)
self.assertEqual(small_image.grad, large_view.grad)
small_image.grad.zero_()
large_view.grad.zero_()
@dtypes(torch.float, torch.double)
@largeTensorTest(lambda self, device, dtype:
# Compute sum of the large tensor sizes:
# (im.numel() + small_image.numel() + small_image.grad.numel() +
# large_view.grad.numel()) * sizeof(dtype)
2 * 32769 * (32768 + 3 * 32768 / 128) *
torch.tensor([], dtype=dtype).element_size())
def test_grid_sample_large_index_3d(self, device, dtype):
# Test 64-bit indexing with grid_sample (gh-41656)
# Try accessing the corners, there should be no segfault
coords = torch.full((1, 2, 2, 2, 3), 1., device=device, dtype=dtype)
im = torch.zeros([1, 1, 2, 32769, 32768], device=device, dtype=dtype)
result = F.grid_sample(im, coords, align_corners=False)
self.assertEqual(result, torch.zeros((1, 1, 2, 2, 2), device=device, dtype=dtype))
# Compare sampling with large strides to the same op on a contiguous tensor
coords = torch.rand(1, 1, 4, 4, 3, device=device, dtype=dtype)
large_view = im[..., 127::128]
small_image = torch.rand_like(large_view)
large_view[...] = small_image
small_image.requires_grad, large_view.requires_grad = True, True
self.assertTrue(
sum(i * s for i, s in zip(large_view.size(), large_view.stride())) >= 2 ** 31,
msg="View must use 64-bit indexing")
for mode, padding_mode, align_corners in itertools.product(
('nearest', 'bilinear'), ('zeros', 'border', 'reflection'), (True, False)):
a = F.grid_sample(
small_image, coords, mode=mode,
padding_mode=padding_mode, align_corners=align_corners)
a.sum().backward()
b = F.grid_sample(
large_view, coords, mode=mode,
padding_mode=padding_mode, align_corners=align_corners)
b.sum().backward()
self.assertEqual(a, b)
self.assertEqual(small_image.grad, large_view.grad)
small_image.grad.zero_()
large_view.grad.zero_()
@onlyCUDA
@largeTensorTest('12GB')
def test_conv_transposed_large(self, device):
dtype = torch.half if self.device_type == 'cuda' else torch.float
conv = nn.ConvTranspose2d(1, 1, 1, 1, bias=False).to(device).to(dtype)
input_large = torch.randn(4096, 1, 512, 1024, dtype=dtype, device=device)
# forward
ret = conv(input_large)
maxdiff0 = (ret.narrow(0, 0, 1024) - conv(input_large.narrow(0, 0, 1024))).abs_().max().item()
maxdiff1 = (ret.narrow(0, 1024, 1024) - conv(input_large.narrow(0, 1024, 1024))).abs_().max().item()
maxdiff2 = (ret.narrow(0, 2048, 1024) - conv(input_large.narrow(0, 2048, 1024))).abs_().max().item()
maxdiff3 = (ret.narrow(0, 3072, 1024) - conv(input_large.narrow(0, 3072, 1024))).abs_().max().item()
if self.device_type == 'cuda':
# cuDNN may use algorithms such as FFT that don't guarantee a diff of 0
self.assertEqual(maxdiff0, 0, atol=2e-3, rtol=1e-5)
self.assertEqual(maxdiff1, 0, atol=2e-3, rtol=1e-5)
self.assertEqual(maxdiff2, 0, atol=2e-3, rtol=1e-5)
self.assertEqual(maxdiff3, 0, atol=2e-3, rtol=1e-5)
else:
self.assertEqual(maxdiff0, 0)
self.assertEqual(maxdiff1, 0)
self.assertEqual(maxdiff2, 0)
self.assertEqual(maxdiff3, 0)
@onlyCUDA
@skipCUDAIfRocm
@largeTensorTest('12GB')
def test_conv_large(self, device):
dtype = torch.half if self.device_type == 'cuda' else torch.float
conv = nn.Conv2d(2, 2, 8, 8, bias=False).to(device).to(dtype)
input_large = torch.randn(4097, 2, 512, 512, dtype=dtype, device=device)
# forward
ret = conv(input_large)
self.assertEqual(ret[:2048], conv(input_large[:2048]))
self.assertEqual(ret[2048:4096], conv(input_large[2048:4096]))
self.assertEqual(ret[4096:], conv(input_large[4096:]))
# backward
conv.zero_grad()
# When computing the backward, we are using the `max(dim=1)`` to create
# some sparsity. Without this sparsity, the rounding error would be
# too large (as large as 1e-5) to satisfy the creterion (1e-6) of `assertEqual`
ret.view(4097, -1).max(dim=1).values.sum().backward()
del ret
grad1 = conv.weight.grad.detach().clone()
conv.zero_grad()
conv(input_large[:2048]).view(2048, -1).max(dim=1).values.sum().backward()
conv(input_large[2048:4096]).view(2048, -1).max(dim=1).values.sum().backward()
conv(input_large[4096:]).view(1, -1).max(dim=1).values.sum().backward()
grad2 = conv.weight.grad.detach().clone()
# gradients are at the order of hundreds, we need to scale it to
# the order of one so that we can compare
scale = 1 / grad2.abs().mean()
grad1 = grad1 * scale
grad2 = grad2 * scale
self.assertEqual(grad1, grad2, atol=5e-2, rtol=5e-3)
def _test_gumbel_softmax_st_shapes(self, device, dtype, shape, dim, count_expected):
logits = torch.randn(shape, dtype=torch.float, device=device)
logits = logits.to(dtype)
y_draw = F.gumbel_softmax(logits, hard=True, dim=dim)
# All values positive
self.assertGreaterEqual(y_draw.min(), 0)
# Shape unchanged
self.assertTrue(y_draw.shape == logits.shape)
# One choice per draw
self.assertEqual(y_draw.sum(), count_expected, atol=torch.finfo(y_draw.dtype).eps, rtol=0)
def _test_gumbel_softmax_straight_through(self, device, dtype):
num_draws = 100
logits = torch.tensor([[0.2, 0.8, 0.1]], device=device)
logits = logits.reshape([1, 3])
logits = logits.to(dtype).requires_grad_()
probs = logits.softmax(dim=-1)
counts = torch.zeros_like(logits)
for _ in range(num_draws):
y_draw = F.gumbel_softmax(logits, hard=True)
counts = counts + y_draw
# All values positive
self.assertGreaterEqual(y_draw.min(), 0)
# Each experiment should result in 1 draw.
self.assertEqual(counts.sum(), num_draws, atol=torch.finfo(counts.dtype).eps, rtol=0)
# check results is asymptotically as expected.
expected = probs * num_draws
# ~z is approximately N(0,1) for unbiased count
z = (counts - expected) / (expected * (1 - probs)).sqrt()
# A (lazy) approximate 99% two-sided test:
# occurs with prob alpha~>=0.01 if unbiased
self.assertLess(z.abs().max().item(), 2.58)
def _test_gumbel_softmax_grad(self, device, dtype):
# "hard" and "not hard" should propagate same gradient.
logits_soft = torch.zeros(10, 10, dtype=dtype, device=device, requires_grad=True)
logits_hard = torch.zeros(10, 10, dtype=dtype, device=device, requires_grad=True)
seed = torch.random.get_rng_state()
y_soft = F.gumbel_softmax(logits_soft, hard=False)
torch.random.set_rng_state(seed)
y_hard = F.gumbel_softmax(logits_hard, hard=True)
y_soft.sum().backward()
y_hard.sum().backward()
# 2eps = 1x addition + 1x subtraction.
tol = 2 * torch.finfo(dtype).eps
self.assertEqual(logits_soft.grad, logits_hard.grad, atol=tol, rtol=0)
@skipIfMps
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_gumbel_softmax(self, device, dtype):
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5], dim=0, count_expected=1)
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5], dim=-1, count_expected=1)
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5, 4], dim=1, count_expected=5)
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5, 4, 3], dim=1, count_expected=5 * 3)
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5, 4, 3], dim=-1, count_expected=5 * 4)
self._test_gumbel_softmax_straight_through(device, dtype)
self._test_gumbel_softmax_grad(device, dtype)
def _test_rnn_retain_variables(self, device, dtype):
rnns = [nn.LSTM(10, 20, num_layers=2).to(device, dtype),
nn.GRU(10, 20, num_layers=2).to(device, dtype),
nn.RNN(10, 20, num_layers=2).to(device, dtype)]
for rnn in rnns:
input = torch.randn(5, 6, 10, device=device, dtype=dtype, requires_grad=True)
output = rnn(input)
output[0].sum().backward(retain_graph=True)
grads = [input.grad.data.clone()] + [p.grad.data.clone() for p in rnn.parameters()]
for _ in range(4):
rnn.zero_grad()
input.grad.data.zero_()
output[0].sum().backward(retain_graph=True)
grads2 = [input.grad.data] + [p.grad.data for p in rnn.parameters()]
self.assertEqual(grads, grads2)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.double)
def test_rnn_retain_variables(self, device, dtype):
self._test_rnn_retain_variables(device, dtype)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_rnn_retain_variables(device, dtype)
@onlyCUDA
@dtypes(torch.double)
def test_lstmcell_backward_only_one_output_grad(self, device, dtype):
# checks that undefined gradients doen't hamper the backward
# see #11872
l = torch.nn.LSTMCell(2, 3).to(device).to(dtype=dtype)
s = torch.randn(1, 2, device=device, dtype=dtype, requires_grad=True)
for i in range(2):
out = l(s)[i]
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
def _test_rnn_mod(self, mod, inp):
def flatten_out(mod, inp):
out = mod(inp)
return tuple([t if isinstance(t, torch.Tensor) else tt for t in out for tt in t])
gradcheckfunc = partial(flatten_out, mod)
with torch.backends.cudnn.flags(enabled=False):
gradcheck(gradcheckfunc, inp, check_batched_grad=False)
gradgradcheck(gradcheckfunc, inp, check_batched_grad=False)
if inp.is_cuda and not TEST_WITH_ROCM:
# Assert that we have good error message around unsupported CuDNN double backward
# NB: we trigger double backward using .backward() instead of autograd.grad due to
# https://github.com/pytorch/pytorch/issues/37874
with torch.backends.cudnn.flags(enabled=True):
result = gradcheckfunc(inp)
result[0].sum().backward(create_graph=True)
grad0 = next(mod.parameters()).grad
with self.assertRaisesRegex(RuntimeError,
"please disable the CuDNN backend temporarily"):
grad0.sum().backward()
# Here we avoid the backward(create_graph=True) memory leak
# described in https://github.com/pytorch/pytorch/issues/7343
for param in mod.parameters():
param.grad = None
inp.grad = None
# Merge into OpInfo?
@skipMeta # LSTM cell reuses output which was resized
@dtypes(torch.double)
def test_LSTM_grad_and_gradgrad(self, device, dtype):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=dtype, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.LSTM(hsize, hsize, bias=bias).to(device).to(dtype)
self._test_rnn_mod(mod, inp)
@skipMeta # GRU cell reuses output which was resized
@dtypes(torch.double)
def test_GRU_grad_and_gradgrad(self, device, dtype):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=dtype, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.GRU(hsize, hsize, bias=bias).to(device).to(dtype)
self._test_rnn_mod(mod, inp)
@onlyCUDA
def test_upsamplingNearest1d_launch_config(self, device):
m = nn.Upsample(scale_factor=2)
inp = torch.rand(2**25, 1, 1, device=device)
out = m(inp)
inp_ref = inp.cpu()
out_ref = m(inp_ref)
self.assertEqual(out_ref, out)
@onlyCUDA
def test_upsamplingNearest2d_launch_config(self, device):
m = nn.Upsample(scale_factor=2)
inp = torch.rand(2**25, 1, 1, 1, device=device)
out = m(inp)
inp_ref = inp.cpu()
out_ref = m(inp_ref)
self.assertEqual(out_ref, out)
@onlyCUDA
def test_upsamplingNearest3d_launch_config(self, device):
m = nn.Upsample(scale_factor=2)
inp = torch.rand(2**25, 1, 1, 1, 1, device=device)
out = m(inp)
inp_ref = inp.cpu()
out_ref = m(inp_ref)
self.assertEqual(out_ref, out)
@unittest.expectedFailure
@skipIfRocm
@onlyCUDA
def test_upsamplingNearest2d_launch_fail(self, device):
m = nn.Upsample(scale_factor=2)
# launch grid_y == 2**16 (larger than maximum y-dimension limit 65535)
inp = torch.rand(1, 1, 2**15, 2**8, device=device)
out = m(inp)
@onlyCUDA
@skipCUDAIfNotRocm
def test_upsamplingNearest2d_launch_rocm(self, device):
# test_upsamplingNearest2d_launch_fail should run OK on ROCm
m = nn.Upsample(scale_factor=2)
inp = torch.rand(1, 1, 2**15, 2**8, device=device)
out = m(inp)
@onlyCUDA
@skipCUDAIfCudnnVersionLessThan(7600)
def test_CTCLoss_cudnn(self, device):
def _helper(zero_infinity):
target_lengths = [30, 25, 20]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (sum(target_lengths),), dtype=torch.int)
log_probs = torch.randn(50, 3, 15, dtype=torch.float, device=device).log_softmax(2).requires_grad_()
log_probs_ref = log_probs.detach().clone().requires_grad_()
with torch.backends.cudnn.flags(enabled=True):
res = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, zero_infinity=zero_infinity)
res.backward()
expected = ctcloss_reference(log_probs, targets.cuda(), input_lengths, target_lengths).float()
with torch.backends.cudnn.flags(enabled=False):
res2 = torch.nn.functional.ctc_loss(log_probs_ref, targets.cuda().long(), input_lengths, target_lengths,
zero_infinity=zero_infinity)
res2.backward()
self.assertEqual(res, expected)
self.assertEqual(res2, res)
self.assertEqual(log_probs.grad, log_probs_ref.grad)
_helper(zero_infinity=True)
_helper(zero_infinity=False)
def _CTCLoss_gen_losses(self, device, input_length, vocab_size, target_length, reduction, use_module_form):
batch_size = 1
log_probs = torch.randn(input_length, batch_size, vocab_size, dtype=torch.float, device=device) \
.log_softmax(2).requires_grad_()
targets = torch.randint(low=1, high=vocab_size - 1, size=(batch_size, target_length),
dtype=torch.int, device=device)
input_lengths = batch_size * [input_length]
target_lengths = batch_size * [target_length]
log_probs_no_bd = log_probs.squeeze(1).detach().clone().requires_grad_()
targets_no_bd = targets.squeeze(0).detach().clone()
input_lengths_no_bd = torch.tensor(input_length)
target_lengths_no_bd = torch.tensor(target_length)
# currently only length 2 and 1 right now, but left flexible for additional potential cases
log_probs_refs = [log_probs.detach().clone().requires_grad_() for _ in range(2)]
log_probs_no_bd_refs = [log_probs_no_bd.detach().clone().requires_grad_() for _ in range(1)]
losses = []
losses_no_bd = []
has_cuda = torch.cuda.is_available()
has_cudnn = has_cuda and 'cuda' in device and self.has_cudnn()
# cudnn requires a cpu target
if has_cuda and has_cudnn:
targets = targets.cpu()
targets_no_bd = targets_no_bd.cpu()
ctc_loss = (
nn.CTCLoss(reduction=reduction, zero_infinity=True)
if use_module_form
else partial(torch.nn.functional.ctc_loss, reduction=reduction, zero_infinity=True)
)
with torch.backends.cudnn.flags(enabled=has_cudnn):
# batched case. log_probs.shape = (T, N, C), targets = (N, S), input_lengths/target_lengths = (N,)
losses.append(ctc_loss(log_probs_refs[0], targets, input_lengths, target_lengths))
# batched case. input.shape = (T, N, C), targets = (S,), input_lengths/target_lengths = (N,)
losses.append(ctc_loss(log_probs_refs[1], targets_no_bd, input_lengths, target_lengths))
# unbatched case. input.shape = (T, C), targets = (S,), input_lengths/target_lengths = (N,)
losses_no_bd.append(ctc_loss(log_probs_no_bd_refs[0], targets_no_bd,
input_lengths_no_bd, target_lengths_no_bd))
for loss in losses + losses_no_bd:
loss.backward()
return losses, losses_no_bd, log_probs_refs, log_probs_no_bd_refs
def _assertEqual_list(self, expected, list_to_compare, atol=None, rtol=None):
for ele in list_to_compare:
self.assertEqual(expected, ele, atol=atol, rtol=rtol)
@parametrize_test("reduction", ['none', 'mean', 'sum'])
@parametrize_test("use_module_form", [True, False])
def test_CTCLoss_no_batch_dim(self, device, reduction, use_module_form):
input_length = 40
vocab_size = 3
target_length = 12
args = self._CTCLoss_gen_losses(device, input_length, vocab_size, target_length, reduction, use_module_form)
losses, losses_no_bd, log_probs_refs, log_probs_no_bd_refs = args
# test output values
self._assertEqual_list(losses[0], losses[1:], atol=1e-4, rtol=0)
self._assertEqual_list(losses[0].squeeze(0), losses_no_bd, atol=1e-4, rtol=0)
# test gradient values
self._assertEqual_list(log_probs_refs[0].grad, [t.grad for t in log_probs_refs[1:]], atol=1e-4, rtol=0)
self._assertEqual_list(
log_probs_refs[0].grad.squeeze(1),
[t.grad for t in log_probs_no_bd_refs],
atol=1e-4,
rtol=0,
)
# checking the output's shape
# batch dim case should be (N,). no batch dim case should be ()
self._assertEqual_list((1,) if reduction == 'none' else (), [loss.shape for loss in losses])
self._assertEqual_list((), [loss.shape for loss in losses_no_bd])
# checking the gradient's shape
# batch dim case should have shape (T, N, C). no batch dim case should have shape (T, C)
self._assertEqual_list((input_length, 1, vocab_size), [t.grad.shape for t in log_probs_refs])
self._assertEqual_list((input_length, vocab_size), [t.grad.shape for t in log_probs_no_bd_refs])
@onlyCUDA
@skipCUDAIfNoCudnn
def test_contig_wrong_stride_cudnn(self, device):
# x has to have batch_size 1 to test contiguous checks
x = torch.randn(1, 16, 5, 5, device=device)
stride = list(x.stride())
stride[0] = 20
# change the stride in dimension 0. the tensor is still contiguous because size[0] is 1
x.set_(x.storage(), 0, x.size(), stride)
self.assertTrue(x.is_contiguous())
F.conv_transpose2d(x, torch.randn(16, 1, 1, 1, device=device))
F.conv2d(x, torch.randn(1, 16, 1, 1, device=device))
@onlyCUDA
def test_Conv2d_size_1_kernel(self, device):
x_cpu = torch.randn(2, 3, 5, 5)
conv_cpu = torch.nn.Conv2d(3, 3, kernel_size=1)
y_cpu = conv_cpu(x_cpu)
y = torch.rand_like(y_cpu)
y_cpu.backward(y)
with cudnn.flags(enabled=False):
conv_cuda = torch.nn.Conv2d(3, 3, kernel_size=1).to(device)
conv_cuda.bias.data.copy_(conv_cpu.bias.data)
conv_cuda.weight.data.copy_(conv_cpu.weight.data)
y_cuda = conv_cuda(x_cpu.to(device))
y_cuda.backward(y.to(device))
self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.bias.grad.data, conv_cuda.bias.grad.data, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.weight.grad.data, conv_cuda.weight.grad.data, atol=1e-5, rtol=0, exact_device=False)
@onlyCUDA
def test_ConvTranspose2d_size_1_kernel(self, device):
x_cpu = torch.randn(2, 3, 5, 5)
conv_cpu = torch.nn.ConvTranspose2d(3, 3, kernel_size=1)
y_cpu = conv_cpu(x_cpu)
y = torch.rand_like(y_cpu)
y_cpu.backward(y)
with cudnn.flags(enabled=False):
conv_cuda = torch.nn.ConvTranspose2d(3, 3, kernel_size=1).to(device)
conv_cuda.bias.data.copy_(conv_cpu.bias.data)
conv_cuda.weight.data.copy_(conv_cpu.weight.data)
y_cuda = conv_cuda(x_cpu.to(device))
y_cuda.backward(y.to(device))
self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.bias.grad.data, conv_cuda.bias.grad.data, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.weight.grad.data, conv_cuda.weight.grad.data, atol=1e-5, rtol=0, exact_device=False)
@onlyCUDA
def test_ConvTranspose3d_size_1_kernel(self, device):
x_cpu = torch.randn(2, 3, 3, 5, 5)
conv_cpu = torch.nn.ConvTranspose3d(3, 3, kernel_size=1)
y_cpu = conv_cpu(x_cpu)
y = torch.rand_like(y_cpu)
y_cpu.backward(y)
with cudnn.flags(enabled=False):
conv_cuda = torch.nn.ConvTranspose3d(3, 3, kernel_size=1).to(device)
conv_cuda.bias.data.copy_(conv_cpu.bias.data)
conv_cuda.weight.data.copy_(conv_cpu.weight.data)
y_cuda = conv_cuda(x_cpu.to(device))
y_cuda.backward(y.to(device))
self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.bias.grad.data, conv_cuda.bias.grad.data, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.weight.grad.data, conv_cuda.weight.grad.data, atol=1e-5, rtol=0, exact_device=False)
def _ordered_sequence(self, device, dtype):
"""Create ordered list of random sequences"""
seqs = [torch.empty(random.randint(1, 6), device=device, dtype=dtype)
for _ in range(5)]
seqs = [s.random_(-128, 128) for s in seqs]
ordered = sorted(seqs, key=len, reverse=True)
return ordered
def _padded_sequence(self, device, dtype):
"""Create Tensor of random padded sequences"""
ordered = self._ordered_sequence(device, dtype)
lengths = [len(i) for i in ordered]
padded_tensor = rnn_utils.pad_sequence(ordered)
return padded_tensor, lengths
@onlyCUDA
def test_device_mask(self, device):
for enforce_sorted in [True, False]:
padded, lengths = self._padded_sequence('cpu', torch.float)
packed = rnn_utils.pack_padded_sequence(
padded, lengths, enforce_sorted=enforce_sorted)
self.assertFalse(packed.is_cuda)
packed = packed.to(device)
self.assertTrue(packed.is_cuda)
unpacked, _ = rnn_utils.pad_packed_sequence(packed)
self.assertTrue(unpacked.is_cuda)
self.assertEqual(unpacked.dtype, torch.float)
@onlyCUDA
def test_overwrite_module_params_on_conversion_cpu_device(self, device):
# Test that under the current default settings
# (`torch.__future__.get_overwrite_module_params_on_conversion() == False`),
# a view to a module's parameters is not pointing to the same storage as
# its base variable after converting the module to a different device.
m = nn.Linear(20, 10)
mw = m.weight[:]
m.to(device)
with torch.no_grad():
# Without using `torch.no_grad()`, this will leak CUDA memory.
# (Issue is filed at https://github.com/pytorch/pytorch/issues/21875)
mw[0][0] = 5
self.assertTrue(mw[0][0].device.type == "cpu")
self.assertTrue(mw._base[0][0].device.type == "cuda")
try:
torch.__future__.set_overwrite_module_params_on_conversion(True)
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# a view to a module's parameters is still pointing to the same storage as
# its base variable after converting the module to a different device.
m = nn.Linear(20, 10)
mw = m.weight[:]
m.to(device)
with torch.no_grad():
mw[0][0] = 5
self.assertTrue(mw[0][0] == mw._base[0][0])
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# `cpu_module.to("cuda")` doesn't preserve previous references to
# `cpu_module`'s parameters or gradients.
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20)
weight_ref = m.weight
weight_grad_ref = m.weight.grad
m.to(device)
self.assertNotEqual(weight_ref.device, m.weight.device)
self.assertNotEqual(weight_grad_ref.device, m.weight.grad.device)
finally:
torch.__future__.set_overwrite_module_params_on_conversion(False)
@onlyCUDA
@dtypes(*((torch.float, torch.double, torch.bfloat16, torch.half)
if TEST_WITH_ROCM else (torch.float, torch.double, torch.half)))
def test_embedding_max_norm_device(self, device, dtype):
embedding = nn.Embedding(22, 5, max_norm=1.0).to(device, dtype=dtype)
# nn.Embedding only takes LongTensor as input
input = torch.tensor([2, 8, 8, 6], device=device, dtype=torch.long)
output = embedding(input)
self.assertEqual(output[1], output[2])
self.assertTrue(output.data.norm(p=2, dim=1).le(1).all())
@onlyCUDA
@dtypes(torch.half, torch.float)
def test_softmax(self, device, dtype):
input = torch.rand(32, 100, device=device, dtype=dtype, requires_grad=True)
inputf = input.to(torch.float).detach().requires_grad_(True)
out = F.softmax(input, dim=-1, dtype=torch.float)
outf = F.softmax(inputf, dim=-1)
# should be bitwise equal
self.assertEqual(out, outf, atol=0, rtol=0)
gO = torch.empty_like(outf).uniform_()
out.backward(gO)
outf.backward(gO)
# should be bitwise equal
self.assertEqual(input.grad, inputf.grad.to(dtype), atol=0, rtol=0)
@onlyCUDA
def test_pool3d_size_one_feature_dim(self, device):
# Tests crazy strides for feature dim of size 1
x = torch.randn(7, 1, 5, 3, 2, device=device)
strange_strides = [30, 1234, 6, 2, 1]
y = x.as_strided(x.size(), strange_strides)
x = x.cpu().as_strided(x.size(), strange_strides)
to_test = {
'max_pool3d': lambda t: F.max_pool3d(t, (5, 1, 1), stride=(5, 1, 1)),
'avg_pool3d': lambda t: F.avg_pool3d(t, (5, 1, 1), stride=(5, 1, 1)),
}
for test, fn in to_test.items():
# Should not crash
out_y = fn(y)
out_x = fn(x)
self.assertEqual(out_y, out_x.to(device), msg=test)
@onlyCUDA
@largeTensorTest('18GB')
@largeTensorTest('180GB', 'cpu')
def test_pool3d_large_size_int64(self, device):
# See https://github.com/pytorch/pytorch/issues/52822
x = torch.randn(70, 32, 100, 100, 100, dtype=torch.half, device=device, requires_grad=True)
y = torch.nn.functional.max_pool3d(x, 5)
g = torch.randn_like(y, dtype=torch.half)
torch.cuda.synchronize()
y.backward(g)
torch.cuda.synchronize()
ref_x = x.detach().cpu().float() # max_pool3d_cpu is not implemented for half
ref_x.requires_grad = True
ref_g = g.cpu().float()
ref_y = torch.nn.functional.max_pool3d(ref_x, 5)
ref_y.backward(ref_g)
self.assertEqual(y, ref_y, exact_dtype=False)
self.assertEqual(x.grad, ref_x.grad, exact_dtype=False)
@onlyCUDA
def test_AvgPool3d_backward_after_cat_dim1_device(self, device):
# x has to have batch_size 1 to test contiguous checks
x = torch.randn(1, 3, 4, 4, 4, device=device, requires_grad=True)
y = F.avg_pool3d(x, kernel_size=3, padding=1, stride=2)
grad = torch.randn(y.size(), device=device)
# increase the stride in dimension 0. the tensor is still contiguous because size[0] is 1
stride = list(grad.stride())
stride[0] = stride[0] * 2
grad.set_(grad.storage(), 0, grad.size(), stride)
assert grad.is_contiguous()
y.backward(grad)
def test_pooling_size_empty(self, device):
t = torch.rand([1, 2, 3, 4], device=device)
self.assertRaises(RuntimeError, lambda: F.adaptive_avg_pool1d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_avg_pool2d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_avg_pool3d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_max_pool1d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_max_pool2d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_max_pool3d(t, []))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long)))
def test_embedding_bag_empty_input(self, device, dtypes):
m = 4
n = 3
x = torch.tensor([], device=device, dtype=dtypes[0])
for sparse in [True, False]:
Embed = torch.nn.EmbeddingBag(m, n, sparse=sparse)
Embed.to(device)
output = Embed(input=x, offsets=torch.tensor([0], device=device, dtype=dtypes[1]))
self.assertEqual(output, torch.zeros_like(output))
output = Embed(input=x, offsets=torch.tensor([0, 0], device=device, dtype=dtypes[1]))
self.assertEqual(output, torch.zeros_like(output))
@skipCUDAIf(True, "no out-of-bounds check on CUDA for perf.")
@dtypes(*itertools.product((torch.float, torch.double), (torch.int, torch.long)))
@parametrize_test("padding_idx", [None, 0])
@parametrize_test("mode", ["sum", "mean", "max"])
def test_embedding_bag_out_of_bounds_idx(self, device, dtypes, padding_idx, mode):
padding_idx = 0
w_dtype, idx_dtype = dtypes
# negative out-of-bound
idx1 = torch.tensor([[-1, 1]], device=device, dtype=idx_dtype)
# positive out-of-bound
idx2 = torch.tensor([[11, 8]], device=device, dtype=idx_dtype)
weight = torch.randn(10, 2, device=device, dtype=w_dtype)
if mode == 'sum':
# Only `sum` supports per_sample_weight
per_sample_weights = (None, torch.randn_like(idx1, device=device, dtype=w_dtype))
else:
per_sample_weights = (None,)
for p_s_weights, idx in itertools.product(per_sample_weights, (idx1, idx2)):
msg = "Expected idx >= 0 && idx < num_embeddings"
with self.assertRaisesRegex(RuntimeError, msg):
torch.nn.functional.embedding_bag(idx, weight,
per_sample_weights=p_s_weights, padding_idx=padding_idx,
mode=mode)
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long)))
def test_EmbeddingBag_per_sample_weights_failures(self, device, dtypes):
# Failure 1: mismatched embeddings / per_sample_weights dtype
es = nn.EmbeddingBag(5, 2, mode='sum').to(dtype=torch.float, device=device)
input = torch.tensor([3, 1, 1, 1, 4, 0], dtype=dtypes[0], device=device)
offsets = torch.tensor([0, 0, 3, 3, 6], dtype=dtypes[1], device=device)
per_sample_weights = torch.randn_like(input, dtype=torch.double, device=device)
if device == 'cpu':
with self.assertRaisesRegex(RuntimeError, 'have the same type as'):
es(input, offsets, per_sample_weights)
else:
with self.assertRaisesRegex(RuntimeError, 'expected scalar type'):
es(input, offsets, per_sample_weights)
# Failure 2.1: input/per_sample_weights have different sizes (1d input)
input = torch.tensor([3, 1, 1, 1, 4, 0], dtype=dtypes[0], device=device)
offsets = torch.tensor([0, 0, 3, 3, 6], dtype=dtypes[1], device=device)
per_sample_weights = torch.randn(5, dtype=torch.float, device=device)
with self.assertRaisesRegex(ValueError, 'same shape as the input'):
es(input, offsets, per_sample_weights)
# Failure 2.2: input/per_sample_weights have different sizes (2d input)
input = torch.randint(5, (7, 3), dtype=dtypes[0], device=device)
offsets = None
per_sample_weights = torch.randn(7 * 3, dtype=torch.float, device=device)
with self.assertRaisesRegex(ValueError, 'same shape as the input'):
es(input, offsets, per_sample_weights)
# Failure 3: Unsupported per_sample_weights and mode=('max', 'mean')
for unsupported_mode in ('max', 'mean'):
es = nn.EmbeddingBag(5, 2, mode=unsupported_mode).to(
dtype=torch.float, device=device)
input = torch.randint(5, (7, 3), dtype=dtypes[0], device=device)
offsets = None
per_sample_weights = torch.randn(7, 3, dtype=torch.float, device=device)
with self.assertRaisesRegex(NotImplementedError,
"only supported for mode='sum'"):
es(input, offsets, per_sample_weights)
def _embedding_bag_reference_impl(self, input, weight, offsets=None, mode='sum',
per_sample_weights=None, include_last_offset=False):
assert mode == 'sum' or per_sample_weights is None
assert offsets is not None
if per_sample_weights is None:
per_sample_weights = torch.ones(input.size()).to(
dtype=weight.dtype, device=weight.device
)
assert input.numel() == per_sample_weights.numel()
bags = []
long_input = input.to(torch.long)
embeddings = weight.index_select(0, long_input) * per_sample_weights.unsqueeze(1)
if include_last_offset:
for index in range(len(offsets) - 1):
offset = offsets[index]
next_offset = offsets[index + 1]
length = next_offset - offset
if length == 0:
bags.append(
torch.tensor([0] * weight.size(1)).to(
dtype=embeddings.dtype, device=embeddings.device
)
)
else:
if mode == 'sum':
bags.append(embeddings.narrow(0, offset, length).sum(0))
elif mode == 'mean':
bags.append(embeddings.narrow(0, offset, length).sum(0).div(length))
else:
assert mode == 'max'
bags.append(embeddings.narrow(0, offset, length).max(0)[0])
else:
for index, offset in enumerate(offsets):
if index + 1 < len(offsets):
next_offset = offsets[index + 1]
else:
next_offset = len(long_input)
length = next_offset - offset
if length == 0:
bags.append(
torch.tensor([0] * weight.size(1)).to(
dtype=embeddings.dtype, device=embeddings.device
)
)
else:
if mode == 'sum':
bags.append(embeddings.narrow(0, offset, length).sum(0))
elif mode == 'mean':
bags.append(embeddings.narrow(0, offset, length).sum(0).div(length))
else:
assert mode == 'max'
bags.append(embeddings.narrow(0, offset, length).max(0)[0])
return torch.stack(bags)
@skipMeta
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.half, torch.float, torch.double)))
def test_EmbeddingBag_empty_per_sample_weights_and_offsets(self, device, dtypes):
# Test empty input and per sample weight, and backward pass. There was a CUDA
# invalid configuration bug (more context in #46572)
def test_per_sample_weights(mode, trainable_scale):
es = nn.EmbeddingBag(5, 2, mode=mode).to(dtype=dtypes[2], device=device)
es.weight.data.copy_(
torch.arange(1, 11, device=device).view_as(es.weight).to(dtypes[2]))
input = torch.tensor([], device=device, dtype=dtypes[0])
offsets = torch.tensor([0, 0, 0, 0, 0], device=device, dtype=dtypes[1])
per_sample_weights = torch.randn_like(input, dtype=dtypes[2]) \
.requires_grad_(trainable_scale)
ref_per_sample_weights = \
per_sample_weights.detach().requires_grad_(trainable_scale)
reference_weights = es.weight.detach().requires_grad_()
expected = self._embedding_bag_reference_impl(
input, reference_weights, offsets, mode, ref_per_sample_weights)
result = es(input, offsets, per_sample_weights)
self.assertEqual(result, expected, atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
grad = torch.randn_like(expected)
result.backward(grad)
# the reference impl doesn't have grad fn for empty input; but the grad should
# simply be a zero tensor
ref_weights_grad = torch.zeros_like(es.weight)
self.assertEqual(es.weight.grad, ref_weights_grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
if trainable_scale:
ref_per_sample_weights_grad = torch.empty_like(per_sample_weights)
self.assertEqual(per_sample_weights.grad, ref_per_sample_weights_grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
modes = ('sum',)
trainable_scale = (True, False)
for mode, trainable in itertools.product(modes, trainable_scale):
test_per_sample_weights(mode, trainable)
@skipMeta
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
def test_EmbeddingBag_per_sample_weights_and_offsets(self, device, dtypes):
def test_per_sample_weights(mode, trainable_scale):
es = nn.EmbeddingBag(5, 2, mode=mode).to(dtype=dtypes[2], device=device)
es.weight.data.copy_(
torch.arange(1, 11, device=device).view_as(es.weight).to(dtypes[2]))
input = torch.tensor([3, 1, 1, 1, 4, 0], device=device, dtype=dtypes[0])
offsets = torch.tensor([0, 0, 3, 3, 6], device=device, dtype=dtypes[1])
per_sample_weights = torch.randn_like(input, dtype=dtypes[2]) \
.requires_grad_(trainable_scale)
ref_per_sample_weights = \
per_sample_weights.detach().requires_grad_(trainable_scale)
reference_weights = es.weight.detach().requires_grad_()
expected = self._embedding_bag_reference_impl(
input, reference_weights, offsets, mode, ref_per_sample_weights)
result = es(input, offsets, per_sample_weights)
self.assertEqual(result, expected, atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
grad = torch.randn_like(expected).to(dtype=dtypes[2], device=device)
result.backward(grad)
expected.backward(grad)
self.assertEqual(es.weight.grad, reference_weights.grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
if trainable_scale:
self.assertEqual(per_sample_weights.grad, ref_per_sample_weights.grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
modes = ('sum',)
trainable_scale = (True, False)
for mode, trainable in itertools.product(modes, trainable_scale):
test_per_sample_weights(mode, trainable)
@skipMeta
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
def test_EmbeddingBag_per_sample_weights_and_new_offsets(self, device, dtypes):
def test_per_sample_weights_new_offsets(mode, trainable_scale, include_last_offset, has_weight=True):
es = nn.EmbeddingBag(5, 2, mode=mode, include_last_offset=include_last_offset).to(dtype=dtypes[2], device=device)
es.weight.data.copy_(
torch.arange(1, 11, device=device).view_as(es.weight).to(dtypes[2]))
input = torch.tensor([3, 1, 1, 1, 4, 0], device=device, dtype=dtypes[0])
offsets = torch.tensor([0, 0, 3, 3, 6], device=device, dtype=dtypes[1])
if include_last_offset:
offsets = torch.cat((offsets, torch.tensor([input.size(0)], device=device, dtype=dtypes[1])), 0)
if has_weight:
per_sample_weights = torch.randn_like(input, device=device, dtype=dtypes[2]) \
.requires_grad_(trainable_scale)
ref_per_sample_weights = \
per_sample_weights.detach().requires_grad_(trainable_scale)
else:
per_sample_weights = None
ref_per_sample_weights = None
reference_weights = es.weight.detach().requires_grad_()
expected = self._embedding_bag_reference_impl(
input, reference_weights, offsets, mode, ref_per_sample_weights, include_last_offset)
result = es(input, offsets, per_sample_weights)
self.assertEqual(result, expected, atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
grad = torch.randn_like(expected)
result.backward(grad)
expected.backward(grad)
self.assertEqual(es.weight.grad, reference_weights.grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
if has_weight and trainable_scale:
self.assertEqual(per_sample_weights.grad, ref_per_sample_weights.grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
trainable_scale = (True, False)
include_last_offset = (True, False)
modes = (('sum', False), ('sum', True), ('max', False), ('mean', False))
for (mode, has_weight), trainable, include_last_offset in itertools.product(
modes, trainable_scale, include_last_offset
):
test_per_sample_weights_new_offsets(
mode, trainable, include_last_offset, has_weight
)
def _test_EmbeddingBag_vs_Embedding(self, N, D, B, L, max_norm=None,
mode='mean',
device='cpu',
wdtype=torch.float,
dtype=torch.long,
test_per_sample_weights=False,
trainable_per_sample_weights=False,
sparse=False,
test_backward=True,
backward_prec=None):
es = nn.EmbeddingBag(N, D, mode=mode, sparse=sparse, max_norm=max_norm).to(device, wdtype)
e = nn.Embedding(N, D, max_norm=max_norm).to(device, wdtype)
e.weight.data.copy_(es.weight)
input = torch.randint(N, (B, L), device=device, dtype=dtype)
offsets = torch.arange(0, B, device=device, dtype=dtype).mul_(L)
grad_output = torch.rand(B, D, device=device, dtype=wdtype)
if test_per_sample_weights:
# To prevent large gradients, weights should sum to 1 for each bag
per_sample_weights = \
torch.randn(B, L, device=device, dtype=wdtype).softmax(dim=-1)
per_sample_weights_reference = \
per_sample_weights.clone().requires_grad_(trainable_per_sample_weights)
per_sample_weights.requires_grad_(trainable_per_sample_weights)
output = es(input.view(-1), offsets, per_sample_weights.view(-1))
else:
output = es(input.view(-1), offsets)
per_sample_weights = None
per_sample_weights_reference = None
if mode == 'sum':
if test_per_sample_weights:
ref_output = (e(input) * per_sample_weights_reference.unsqueeze(-1)).sum(1)
else:
ref_output = e(input).sum(1)
elif mode == 'mean':
assert not test_per_sample_weights
ref_output = e(input).mean(1)
elif mode == 'max':
assert not test_per_sample_weights
ref_output = e(input).max(1)[0]
self.assertEqual(output, ref_output, atol=dtype2prec_DONTUSE[wdtype], rtol=0)
if not test_backward:
return
output.backward(grad_output)
ref_output.backward(grad_output)
es_weight_grad = es.weight.grad.data
if sparse:
es_weight_grad = es.weight.grad.data.to_dense()
# We have more floating point error here because we are dealing with larger numbers
if backward_prec is None:
needed_prec = dtype2prec_DONTUSE[wdtype] * 5
else:
needed_prec = backward_prec
self.assertEqual(es_weight_grad, e.weight.grad, atol=needed_prec, rtol=0)
if test_per_sample_weights and trainable_per_sample_weights:
self.assertEqual(per_sample_weights.grad, per_sample_weights_reference.grad,
atol=dtype2prec_DONTUSE[wdtype], rtol=0)
@skipCUDAIf(True, "Temporarily disabled. See t54369166")
@dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.half, torch.float, torch.double)))
@dtypes(*itertools.product((torch.int, torch.long), (torch.float, torch.double)))
def test_EmbeddingBag_per_sample_weights_and_no_offsets(self, device, dtypes):
def run_tests(mode, sparse, trainable_per_sample_weights):
kwargs = dict(test_per_sample_weights=True, device=device,
mode=mode, wdtype=dtypes[1], dtype=dtypes[0], sparse=sparse,
trainable_per_sample_weights=trainable_per_sample_weights)
# Simple case
self._test_EmbeddingBag_vs_Embedding(2, 3, 5, 7, **kwargs)
# B * L > 1000
self._test_EmbeddingBag_vs_Embedding(2, 5, 53, 23, **kwargs)
# Large num_embedding
self._test_EmbeddingBag_vs_Embedding(101, 5, 3, 7, **kwargs)
# Large embedding_dim
self._test_EmbeddingBag_vs_Embedding(2, 101, 3, 7, **kwargs)
modes = ('sum',)
sparsity = (True, False)
trainable_scale = (True, False)
for mode, sparse, trainable_per_sample_weights in \
itertools.product(modes, sparsity, trainable_scale):
run_tests(mode, sparse, trainable_per_sample_weights)
# Test CUDA Dense on half precision
if device == 'cuda':
modes = ('sum',)
sparsity = (False,)
trainable_scale = (True, False)
for mode, sparse, trainable_per_sample_weights in \
itertools.product(modes, sparsity, trainable_scale):
run_tests(mode, sparse, trainable_per_sample_weights)
def _test_EmbeddingBag(
self,
device,
mode,
sparse,
wdtype=torch.double,
dtype=torch.long,
odtype=torch.long,
test_backward=True,
):
# check a known test example
es = nn.EmbeddingBag(5, 2, mode=mode, sparse=sparse).to(device, wdtype)
es.weight.data.copy_(torch.arange(1, 11, device=device).view_as(es.weight).to(wdtype))
input = torch.tensor([3, 1, 1, 1, 4, 0], device=device, dtype=dtype)
offsets = torch.tensor([0, 0, 3, 3, 6], device=device, dtype=odtype)
grad_output = torch.tensor(
[1, 2,
3, 4], device=device, dtype=wdtype).view(2, 2)
grad_output_with_empty = torch.tensor(
[99, 99,
1, 2,
99, 99,
3, 4,
99, 99], device=device, dtype=wdtype).view(5, 2)
if mode == "sum" or mode == "mean":
denominator = 1 if mode == "sum" else 3
expected_output = torch.tensor(
[[13, 16],
[13, 16]], device=device, dtype=wdtype) / denominator
expected_output_with_empty = torch.tensor(
[[0, 0],
[13, 16],
[0, 0],
[13, 16],
[0, 0]], device=device, dtype=wdtype) / denominator
expected_grad_weight = torch.tensor(
[[3, 4],
[5, 8],
[0, 0],
[1, 2],
[3, 4]], device=device, dtype=wdtype) / denominator
elif mode == "max":
expected_output = torch.tensor(
[[7, 8],
[9, 10]], device=device, dtype=wdtype)
expected_output_with_empty = torch.tensor(
[[0, 0],
[7, 8],
[0, 0],
[9, 10],
[0, 0]], device=device, dtype=wdtype)
expected_grad_weight = torch.tensor(
[[0, 0],
[0, 0],
[0, 0],
[1, 2],
[3, 4]], device=device, dtype=wdtype)
output = es(input, offsets)
output.backward(grad_output_with_empty)
es_weight_grad = es.weight.grad.data
if sparse:
es_weight_grad = es.weight.grad.to_dense()
self.assertEqual(output, expected_output_with_empty)
self.assertEqual(es_weight_grad, expected_grad_weight, atol=dtype2prec_DONTUSE[wdtype], rtol=0)
# check same example except as 2D (2 x 3)
input = input.view(2, -1)
es.zero_grad()
output = es(input)
output.backward(grad_output)
es_weight_grad = es.weight.grad
if sparse:
es_weight_grad = es.weight.grad.to_dense()
self.assertEqual(output, expected_output)
self.assertEqual(es_weight_grad, expected_grad_weight, atol=dtype2prec_DONTUSE[wdtype], rtol=0)
# test all empty bags
es.zero_grad()
inputs = torch.tensor([], dtype=dtype, device=device)
offsets = torch.tensor([0, 0, 0, 0], dtype=odtype, device=device)
es(inputs, offsets).sum().backward()
dense_grad = es.weight.grad
if dense_grad.is_sparse:
dense_grad = dense_grad.to_dense()
self.assertEqual(dense_grad, torch.zeros_like(es.weight))
# now compare EmbeddingBag vs Embedding + Sum/Mean, for constant bag length
N, D, B, L = random.randint(1, 100), random.randint(1, 100), random.randint(1, 50), random.randint(1, 50)
kwargs = dict(mode=mode, sparse=sparse, device=device, wdtype=wdtype, dtype=dtype, test_backward=test_backward)
self._test_EmbeddingBag_vs_Embedding(N, D, B, L, **kwargs)
for max_norm in (None, 3):
for p in itertools.product([1, 2], repeat=4):
self._test_EmbeddingBag_vs_Embedding(*p, max_norm=max_norm, **kwargs)
# check that giving illegal input combos raises error
es = nn.EmbeddingBag(10, 20, mode=mode, sparse=sparse)
input = torch.ones(3, 4, dtype=dtype)
offset = torch.arange(0, 3, dtype=odtype)
self.assertRaises(ValueError, lambda: es(input, offset))
self.assertRaises(ValueError, lambda: es(input.view(-1)))
offset[0] = 1
if self.device_type == "cpu":
self.assertRaises(RuntimeError, lambda: es(input.view(-1), offset))
offset[0] = 0
offset[-1] = 100
self.assertRaises(RuntimeError, lambda: es(input.view(-1), offset))
@skipMeta
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
def test_embedding_bag_device(self, device, dtypes):
self._test_EmbeddingBag(device, 'sum', False, wdtype=dtypes[2], dtype=dtypes[0], odtype=dtypes[1])
self._test_EmbeddingBag(device, 'mean', False, wdtype=dtypes[2], dtype=dtypes[0], odtype=dtypes[1])
self._test_EmbeddingBag(device, 'max', False, wdtype=dtypes[2], dtype=dtypes[0], odtype=dtypes[1])
test_backward = False
if self.device_type == 'cuda':
# see 'todo' in test_embedding_bag.
test_backward = dtypes[2] is not torch.float16
elif self.device_type == 'cpu':
# TODO: figure out why precision on sparse embeddings isn't the
# same as for dense.
test_backward = dtypes[2] is not torch.float and dtypes[2] is not torch.float16
self._test_EmbeddingBag(
device,
'sum',
True,
wdtype=dtypes[2],
dtype=dtypes[0],
odtype=dtypes[1],
test_backward=test_backward,
)
self._test_EmbeddingBag(
device,
'mean',
True,
wdtype=dtypes[2],
dtype=dtypes[0],
odtype=dtypes[1],
test_backward=test_backward,
)
@skipMeta
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
def test_embedding_bag_non_contiguous_weight(self, device, dtypes):
weight_tensor = torch.randn(3, 4, dtype=dtypes[2], device=device)
weight_tensor_non_contig = weight_tensor[:, :3] # This is non-contiguous strided.
weight_tensor_contig = weight_tensor_non_contig.clone().contiguous() # Contig-strided.
index = torch.tensor([0, 1, 2], dtype=dtypes[0], device=device)
offsets = torch.tensor([0, 2], dtype=dtypes[1], device=device)
for mode in ['sum', 'mean', 'max']:
output_non_contig = F.embedding_bag(
input=index,
weight=weight_tensor_non_contig,
offsets=offsets,
mode=mode,
)
output_contig = F.embedding_bag(
input=index,
weight=weight_tensor_contig,
offsets=offsets,
mode=mode,
)
self.assertEqual(output_non_contig, output_contig)
@onlyCUDA
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long)))
def test_embedding_bag_bfloat16(self, device, dtypes):
self._test_EmbeddingBag(device, 'sum', True, wdtype=torch.bfloat16, dtype=dtypes[0], odtype=dtypes[1], test_backward=True)
self._test_EmbeddingBag(device, 'mean', True, wdtype=torch.bfloat16, dtype=dtypes[0], odtype=dtypes[1], test_backward=True)
@onlyNativeDeviceTypes # currently fails on XLA
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long)))
def test_embedding_bag_half(self, device, dtypes):
self._test_EmbeddingBag(device, 'sum', True, wdtype=torch.float16, dtype=dtypes[0], odtype=dtypes[1], test_backward=True)
@onlyCUDA
@dtypes(torch.half, torch.float, torch.double)
def test_multihead_attention_dtype(self, device, dtype):
embed_dim = 128
num_heads = 8
sl = 10
bs = 8
model = nn.MultiheadAttention(embed_dim, num_heads).cuda().to(dtype)
q = torch.randn(sl, bs, embed_dim, device=device, dtype=dtype)
k = torch.randn(sl, bs, embed_dim, device=device, dtype=dtype)
v = torch.randn(sl, bs, embed_dim, device=device, dtype=dtype)
out = model(q, k, v)
self.assertEqual(q.size(), out[0].size())
self.assertEqual(dtype, out[0].dtype)
@onlyCUDA
@dtypes(torch.half, torch.float, torch.double)
def test_multihead_attention_dtype_batch_first(self, device, dtype):
embed_dim = 128
num_heads = 8
sl = 10
bs = 8
# With batch_first=True, we have the possibility of hitting
# the native fast path if we call .eval() and enable inference
# mode. Test both paths.
for training in (True, False):
model = nn.MultiheadAttention(embed_dim, num_heads, batch_first=True).cuda().to(dtype)
if not training:
model = model.eval()
cm = torch.no_grad()
else:
cm = contextlib.nullcontext()
with cm:
q = torch.randn(bs, sl, embed_dim, device=device, dtype=dtype)
k = torch.randn(bs, sl, embed_dim, device=device, dtype=dtype)
v = torch.randn(bs, sl, embed_dim, device=device, dtype=dtype)
# fast path currently doesn't support weights
out = model(q, k, v, need_weights=False)
self.assertEqual(q.size(), out[0].size())
self.assertEqual(dtype, out[0].dtype)
@dtypesIfCUDA(*floating_types_and(torch.half, *[torch.bfloat16] if AMPERE_OR_ROCM else []))
@dtypes(torch.float)
@torch.backends.cudnn.flags(enabled=True, benchmark=False)
def test_Conv2d_naive_groups(self, device, dtype):
# Check that grouped convolutions matches two half convolutions
m = nn.Conv2d(4, 4, kernel_size=3, groups=2).to(device, dtype)
i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 4, 4, 4, device=device, dtype=dtype)
output.backward(grad_output)
m1 = nn.Conv2d(2, 2, kernel_size=3).to(device, dtype)
m1.weight.data.copy_(m.weight.data[:2])
m1.bias.data.copy_(m.bias.data[:2])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :2].contiguous())
m2 = nn.Conv2d(2, 2, kernel_size=3).to(device, dtype)
m2.weight.data.copy_(m.weight.data[2:])
m2.bias.data.copy_(m.bias.data[2:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 2:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.bias.grad.data,
torch.cat([m1.bias.grad.data, m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
@dtypes(torch.double, torch.cdouble)
def test_Conv2d_backward_depthwise(self, device, dtype):
x = torch.randn(2, 2, 4, 20, device=device, dtype=dtype, requires_grad=True)
weight = torch.randn(2, 1, 3, 5, device=device, dtype=dtype, requires_grad=True)
def conv2d_depthwise(x, weight):
return torch.nn.functional.conv2d(
x, weight, bias=None, stride=(1, 10), groups=2)
for cudnn_enabled in [False, True]:
with torch.backends.cudnn.flags(enabled=cudnn_enabled):
torch.autograd.gradcheck(conv2d_depthwise, (x, weight))
def _test_batchnorm_grad(self, device, dtype=torch.double):
bs, n_feat, size_feat = 4, 5, 6
input = torch.arange(bs * n_feat * size_feat, device=device,
requires_grad=True, dtype=dtype).view(bs, n_feat, size_feat)
weight = torch.arange(1, n_feat + 1, device=device, requires_grad=True, dtype=dtype)
bias = torch.arange(n_feat, device=device, requires_grad=True, dtype=dtype)
running_mean = 1 - torch.arange(n_feat, device=device, dtype=dtype)
running_var = 2 * torch.arange(n_feat, device=device, dtype=dtype)
for training in [False, True]:
_assertGradAndGradgradChecks(self, F.batch_norm, (input, running_mean, running_var, weight, bias,
training, 0.1, 0.0001))
def test_batchnorm_grad(self, device):
self._test_batchnorm_grad(device)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_grad(device)
@onlyCUDA
def test_layernorm_half_precision(self):
width = 128
input = torch.rand(1, 5, width, device="cuda", dtype=torch.half) * 0.1
normalized_shape = (width,)
weight = torch.ones(width, device="cuda", dtype=torch.half)
bias = torch.zeros(width, device="cuda", dtype=torch.half)
eps = 1e-5
output_fp16 = torch.layer_norm(input, normalized_shape, weight, bias, eps)
output_fp32 = torch.layer_norm(input.float(), normalized_shape, weight.float(), bias.float(), eps).half()
self.assertEqual(output_fp16, output_fp32, atol=0, rtol=0)
@onlyCUDA
def test_layernorm_weight_bias(self):
width = 128
input = torch.rand(1, 5, width, device="cuda", dtype=torch.float32) * 0.1
normalized_shape = (width,)
data = torch.randn(width, device="cuda", dtype=torch.float32)
weight = torch.ones(width, device="cuda", dtype=torch.float32)
bias = torch.zeros(width, device="cuda", dtype=torch.float32)
eps = 1e-5
out_none_weight = torch.layer_norm(input, normalized_shape, None, data, eps)
out_one_weight = torch.layer_norm(input, normalized_shape, weight, data, eps)
self.assertEqual(out_none_weight, out_one_weight)
out_none_bias = torch.layer_norm(input, normalized_shape, data, None, eps)
out_zero_bias = torch.layer_norm(input, normalized_shape, data, bias, eps)
self.assertEqual(out_none_bias, out_zero_bias)
def test_hardsigmoid_grad(self, device):
inputs = (torch.randn(4, 16, 16, device=device) - 0.5) * 10
inputs.requires_grad = True
self.assertTrue(gradcheck(F.hardsigmoid, (inputs,)))
# currently fails on XLA
@onlyNativeDeviceTypes
def test_hardswish_grad(self, device):
inputs = (torch.randn(4, 16, 16, device=device) - 0.5) * 10
inputs.requires_grad = True
self.assertTrue(gradcheck(F.hardswish, (inputs,)))
def _test_batchnorm_eval(self, ndim, device, dtype, module_dtype=None):
module_dtype = module_dtype or dtype
module = nn.BatchNorm1d(3).to(device, module_dtype)
module.eval()
data = torch.rand([3] * ndim, device=device, dtype=dtype, requires_grad=True)
grad = torch.rand([3] * ndim, device=device, dtype=dtype)
# 1st pass
res1 = module(data)
res1.backward(grad)
grad1 = data.grad.clone()
# 2nd pass
if data.grad is not None:
data.grad.data.zero_()
res2 = module(data)
res2.backward(grad)
grad2 = data.grad.clone()
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
# track_running_stats=False
module = nn.BatchNorm1d(3, track_running_stats=False).to(device, module_dtype)
data = torch.rand(4, 3, device=device, dtype=dtype, requires_grad=True)
grad = torch.rand(4, 3, device=device, dtype=dtype)
# 1st pass
res1 = module(data)
res1.backward(grad)
grad1 = data.grad.clone()
# set eval
module.eval()
# 2nd pass
if data.grad is not None:
data.grad.data.zero_()
res2 = module(data)
res2.backward(grad)
grad2 = data.grad.clone()
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.bfloat16)
def test_batchnorm_eval(self, device, dtype):
self._test_batchnorm_eval(2, device, dtype)
self._test_batchnorm_eval(3, device, dtype)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_eval(2, device, dtype)
self._test_batchnorm_eval(3, device, dtype)
@onlyCUDA
@dtypes(torch.bfloat16, torch.half)
def test_batchnorm_eval_mixed(self, device, dtype):
# Test bfloat16 input with float module
self._test_batchnorm_eval(2, device, dtype, torch.float)
self._test_batchnorm_eval(3, device, dtype, torch.float)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_eval(2, device, dtype, torch.float)
self._test_batchnorm_eval(3, device, dtype, torch.float)
def _test_batchnorm_affine(self, ndim, device, dtype, module_dtype=None):
# Compare affine against no-op weights and bias
module_dtype = module_dtype or dtype
module = nn.BatchNorm1d(3, affine=False).to(device, module_dtype)
module_affine = nn.BatchNorm1d(3, affine=True).to(device, module_dtype)
with torch.no_grad():
module_affine.weight.fill_(1.0)
module_affine.bias.zero_()
data = torch.rand([3] * ndim, device=device, dtype=dtype, requires_grad=True)
grad = torch.ones_like(data, requires_grad=False)
# With weights all ones and bias all zeros
res1 = module_affine(data)
res1.backward(grad)
grad1 = data.grad.clone()
data.grad.zero_()
# Without any weights or bias
res2 = module(data)
res2.backward(grad)
grad2 = data.grad
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.bfloat16)
def test_batchnorm_affine(self, device, dtype):
self._test_batchnorm_affine(2, device, dtype)
self._test_batchnorm_affine(3, device, dtype)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_affine(2, device, dtype)
self._test_batchnorm_affine(3, device, dtype)
@onlyCUDA
@dtypes(torch.bfloat16, torch.half)
def test_batchnorm_affine_mixed(self, device, dtype):
cudnn_enabled = [False]
if self.device_type == 'cuda' and self.has_cudnn():
# TODO: Test fails with cudnn, see gh-62034
# cudnn_enabled = [False, True]
pass
# Test bfloat16 input with float module
for enabled in cudnn_enabled:
with torch.backends.cudnn.flags(enabled=enabled):
self._test_batchnorm_affine(2, device, dtype, torch.float)
self._test_batchnorm_affine(3, device, dtype, torch.float)
def _test_batchnorm_simple_average(self, device, dtype, module_dtype=None):
module_dtype = module_dtype or dtype
module = nn.BatchNorm1d(3, momentum=None).to(dtype=module_dtype, device=device)
zeros = torch.zeros(3, dtype=module_dtype, device=device)
ones = torch.ones(3, dtype=module_dtype, device=device)
self.assertEqual(module.running_mean, zeros)
self.assertEqual(module.running_var, ones)
data1 = torch.rand(4, 3, dtype=dtype, device=device)
data2 = torch.rand(4, 3, dtype=dtype, device=device)
# 1st pass
res1 = module(data1)
running_mean1 = module.running_mean.clone()
running_var1 = module.running_var.clone()
self.assertNotEqual(running_mean1, zeros)
self.assertNotEqual(running_var1, ones)
# reset stats
module.reset_running_stats()
self.assertEqual(module.running_mean, zeros)
self.assertEqual(module.running_var, ones)
# 2nd pass
res2 = module(data2)
running_mean2 = module.running_mean.clone()
running_var2 = module.running_var.clone()
self.assertNotEqual(running_mean2, zeros)
self.assertNotEqual(running_var2, ones)
# reset stats
module.reset_running_stats()
self.assertEqual(module.running_mean, zeros)
self.assertEqual(module.running_var, ones)
# 3rd (combined) pass
res3 = module(data1)
res4 = module(data2)
self.assertEqual(res3, res1)
self.assertEqual(res4, res2)
self.assertEqual(module.running_mean, (running_mean1 + running_mean2) / 2)
self.assertEqual(module.running_var, (running_var1 + running_var2) / 2)
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.bfloat16)
def test_batchnorm_simple_average(self, device, dtype):
self._test_batchnorm_simple_average(device, dtype)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_simple_average(device, dtype)
@onlyCUDA
@dtypes(torch.bfloat16, torch.half)
def test_batchnorm_simple_average_mixed(self, device, dtype):
self._test_batchnorm_simple_average(device, dtype, torch.float)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_simple_average(device, dtype, torch.float)
def _test_maxpool_indices(self, num_dim, adaptive=False, device="cpu", dtype=torch.float):
def expected_indices(dim):
if dim == 1:
return torch.tensor([1, 3], dtype=torch.double).repeat(2, 2, 1)
if dim == 2:
return torch.tensor([[5, 7], [13, 15]], dtype=torch.double).repeat(2, 2, 1, 1)
def expected_grad(dim):
if dim == 1:
return torch.tensor([0, 1, 0, 1], dtype=torch.double).repeat(2, 2, 1)
grad = expected_grad(dim - 1)
zero = torch.zeros(grad.size())
return torch.stack((zero, grad, zero, grad), 2)
def expected_output(dim):
if dim == 1:
return torch.arange(2, 17, 2).view(2, 2, 2)
if dim == 2:
col = torch.arange(6, 63, 8)
return torch.stack([col, col + 2], 1).view(2, 2, 2, 2)
if adaptive:
cls_name = 'AdaptiveMaxPool{}d'.format(num_dim)
else:
cls_name = 'MaxPool{}d'.format(num_dim)
module_cls = getattr(nn, cls_name)
module = module_cls(2, return_indices=True).to(device, dtype=dtype)
numel = 4 ** (num_dim + 1)
input = torch.arange(1, numel + 1).view(2, 2, *repeat(4, num_dim)).to(device, dtype=dtype)
input_var = input.clone().detach().requires_grad_()
# Check forward
output, indices = module(input_var)
if num_dim != 3:
expected_indices = expected_indices(num_dim)
expected_output = expected_output(num_dim)
self.assertEqual(indices.dim(), input.dim())
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(indices.data.squeeze(), expected_indices)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output.data.squeeze(), expected_output)
self.assertTrue(output.requires_grad)
self.assertFalse(indices.requires_grad)
# Make sure backward works
grad_output = torch.ones(output.size(), device=device, dtype=dtype)
output.backward(grad_output, retain_graph=True)
expected_grad = expected_grad(num_dim)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(input_var.grad.data, expected_grad.view_as(input))
# Make sure backward after changing indices will result in an error
indices.add_(1)
self.assertRaises(RuntimeError, lambda: output.backward(grad_output))
# Make sure -Infinity is handled correctly
t = torch.tensor([[[float("-inf")]]])
m = nn.MaxPool1d(kernel_size=1, return_indices=True)
output, indices = m(t)
self.assertEqual(output[0, 0, 0], float("-inf"))
self.assertEqual(indices[0, 0, 0], 0)
t = torch.tensor([[[float("-inf")]]])
m = nn.MaxPool2d(kernel_size=1, return_indices=True)
output, indices = m(t)
self.assertEqual(output[0, 0, 0], float("-inf"))
self.assertEqual(indices[0, 0, 0], 0)
t = torch.tensor([[[[float("-inf")]]]])
m = nn.MaxPool3d(kernel_size=1, return_indices=True)
output, indices = m(t)
self.assertEqual(output[0, 0, 0, 0], float("-inf"))
self.assertEqual(indices[0, 0, 0, 0], 0)
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@dtypes(torch.float)
def test_MaxPool1d_indices(self, device, dtype):
self._test_maxpool_indices(1, device=device, dtype=dtype)
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@dtypes(torch.float)
def test_MaxPool2d_indices(self, device, dtype):
self._test_maxpool_indices(2, device=device, dtype=dtype)
@skipIfMps
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@dtypes(torch.float)
def test_MaxPool3d_indices(self, device, dtype):
self._test_maxpool_indices(3, device=device, dtype=dtype)
@skipIfMps
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@dtypes(torch.float)
def test_AdaptiveMaxPool1d_indices(self, device, dtype):
self._test_maxpool_indices(1, adaptive=True, device=device, dtype=dtype)
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@skipIfMps
@dtypes(torch.float)
def test_AdaptiveMaxPool2d_indices(self, device, dtype):
self._test_maxpool_indices(2, adaptive=True, device=device, dtype=dtype)
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@skipIfMps
@dtypes(torch.float)
def test_AdaptiveMaxPool3d_indices(self, device, dtype):
self._test_maxpool_indices(3, adaptive=True, device=device, dtype=dtype)
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@skipIfMps
@dtypes(torch.float)
def test_maxpool_indices_no_batch_dim(self, device, dtype):
"""Check that indices with no batch dim is consistent with a single batch."""
max_pool_cases = [
(nn.MaxPool1d(3, return_indices=True),
torch.randn(3, 5, device=device, dtype=dtype)),
(nn.MaxPool2d(3, return_indices=True),
torch.randn(3, 5, 6, device=device, dtype=dtype)),
(nn.MaxPool3d(3, return_indices=True),
torch.randn(3, 5, 6, 7, device=device, dtype=dtype)),
(nn.AdaptiveMaxPool1d(3, return_indices=True),
torch.randn(3, 5, device=device, dtype=dtype)),
(nn.AdaptiveMaxPool2d(3, return_indices=True),
torch.randn(3, 5, 6, device=device, dtype=dtype)),
(nn.AdaptiveMaxPool3d(3, return_indices=True),
torch.randn(3, 5, 6, 7, device=device, dtype=dtype))]
for module, input in max_pool_cases:
_, indices_no_batch = module(input)
_, indicies_single_batch = module(input.unsqueeze(0))
self.assertEqual(indices_no_batch, indicies_single_batch.squeeze(0))
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float)
@onlyNativeDeviceTypes # TODO: Fails on XLA
def test_max_pool_nan_inf(self, device, dtype):
for adaptive in ['', 'adaptive_']:
for num_dim in [1, 2, 3]:
fn_name = '{}max_pool{}d'.format(adaptive, num_dim)
fn = getattr(F, fn_name)
x = torch.full([1, 1] + num_dim * [3], nan, device=device, dtype=dtype, requires_grad=True)
res = fn(x, 1 if adaptive else 3)
res.backward(torch.randn_like(res))
self.assertTrue(math.isnan(res.item()))
x.requires_grad_(False)
res = fn(x, 1 if adaptive else 3)
self.assertTrue(math.isnan(res.item()))
x2 = torch.full([1, 1] + num_dim * [3], -inf, device=device, dtype=dtype, requires_grad=True)
res2 = fn(x2, 1 if adaptive else 3)
res2.backward(torch.randn_like(res2))
self.assertTrue(math.isinf(res2.item()))
x2.requires_grad_(False)
res2 = fn(x2, 1 if adaptive else 3)
self.assertTrue(math.isinf(res2.item()))
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double)
def test_grid_sample_nan_inf(self, device, dtype):
input = torch.zeros([1, 1, 3, 3], device=device, dtype=dtype)
grid = torch.tensor([[[[nan, 0], [0, inf]]]], device=device, dtype=dtype)
for padding_mode in ('reflection', 'border', 'zeros'):
sample = torch.nn.functional.grid_sample(input=input, grid=grid, mode='nearest',
padding_mode=padding_mode, align_corners=False)
self.assertEqual(sample, torch.zeros([1, 1, 1, 2], device=device, dtype=dtype))
@expectedFailureMeta # RuntimeError: Unrecognized tensor type ID: Meta
@onlyNativeDeviceTypes
def test_fractional_max_pool2d(self, device):
x = torch.randn(1, 2, 7, 7, requires_grad=True, device=device)
samples = x.new(1, 2, 2).uniform_()
def func(x):
return F.fractional_max_pool2d(
x, (2, 2), output_size=(3, 3), _random_samples=samples)
self.assertEqual(func(x).shape, (1, 2, 3, 3))
gradcheck(func, [x])
gradgradcheck(func, [x])
x = torch.randn(2, 7, 7, requires_grad=True, device=device)
self.assertEqual(func(x).shape, (2, 3, 3))
if self.device_type != 'cuda':
# Reference: https://github.com/pytorch/pytorch/issues/52427
# Raises -> RuntimeError: TensorAccessor expected 4 dims but tensor has 3
# on CUDA in gradcheck
gradcheck(func, [x])
gradgradcheck(func, [x])
for kernel_size in [(), (1,)]:
with self.assertRaisesRegex(RuntimeError, "kernel_size must either"):
# Incorrect kernel_size
F.fractional_max_pool2d(x, kernel_size=kernel_size, output_size=(3, 3), _random_samples=samples)
err_large_msg = "too large relative to input "
err_out_size_msg = "output_size must either"
for output_size, msg in [((9, 3), err_large_msg + "height"),
((3, 9), err_large_msg + "width"),
((3,), err_out_size_msg),
((), err_out_size_msg)]:
with self.assertRaisesRegex(RuntimeError, msg):
# Incorrect output_size
F.fractional_max_pool2d(x, (2, 2), output_size=output_size, _random_samples=samples)
@expectedFailureMeta # RuntimeError: Unrecognized tensor type ID: Meta
@onlyNativeDeviceTypes
def test_fractional_max_pool3d(self, device):
x = torch.randn(1, 2, 7, 7, 7, requires_grad=True, device=device)
samples = x.new(1, 2, 3).uniform_()
def func(x):
return F.fractional_max_pool3d(
x, (2, 2, 2), output_size=(3, 3, 3), _random_samples=samples)
self.assertEqual(func(x).shape, (1, 2, 3, 3, 3))
gradcheck(func, [x])
gradgradcheck(func, [x])
x = torch.randn(2, 7, 7, 7, requires_grad=True, device=device)
self.assertEqual(func(x).shape, (2, 3, 3, 3))
gradcheck(func, [x])
gradgradcheck(func, [x])
for kernel_size in [(), (1,), (1, 1)]:
with self.assertRaisesRegex(RuntimeError, "kernel_size must either"):
# Incorrect kernel_size
F.fractional_max_pool3d(x, kernel_size=kernel_size, output_size=(3, 3, 3), _random_samples=samples)
err_large_msg = "too large relative to input "
err_out_size_msg = "output_size must either"
for output_size, msg in [((9, 3, 3), err_large_msg + "time"),
((3, 9, 3), err_large_msg + "height"),
((3, 3, 9), err_large_msg + "width"),
((3, 3), err_out_size_msg),
((3,), err_out_size_msg),
((), err_out_size_msg)]:
with self.assertRaisesRegex(RuntimeError, msg):
# Incorrect output_size
F.fractional_max_pool3d(x, (2, 2, 2), output_size=output_size, _random_samples=samples)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float)
@onlyNativeDeviceTypes # TODO: Fails on XLA
def test_fractional_max_pool_nan_inf(self, device, dtype):
for num_dim in [2, 3]:
fn_name = 'FractionalMaxPool{}d'.format(num_dim)
fn = getattr(nn, fn_name)(kernel_size=2, output_size=1)
x = torch.full([1, 1] + num_dim * [3], nan, device=device, dtype=dtype, requires_grad=True)
res = fn(x)
res.backward(torch.randn_like(res))
self.assertTrue(math.isnan(res.item()))
x2 = torch.full([1, 1] + num_dim * [3], -inf, device=device, dtype=dtype, requires_grad=True)
res2 = fn(x2)
res2.backward(torch.randn_like(res2))
self.assertTrue(math.isinf(res2.item()))
@onlyNativeDeviceTypes # TODO: RuntimeError message different on XLA
def test_pooling_zero_stride(self, device):
for op in ('max', 'avg'):
for num_dim in [1, 2, 3]:
fn_name = '{}_pool{}d'.format(op, num_dim)
fn = getattr(F, fn_name)
x = torch.ones([1, 2] + num_dim * [4], device=device, dtype=torch.float)
self.assertRaisesRegex(RuntimeError, r"stride should not be zero|stride must be greater than zero",
lambda: fn(x, kernel_size=2, stride=0))
fn_module_name = '{}Pool{}d'.format(op.title(), num_dim)
fn_module = getattr(nn, fn_module_name)(kernel_size=2, stride=0)
self.assertRaisesRegex(RuntimeError, r"stride should not be zero|stride must be greater than zero",
lambda: fn_module(x))
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@skipIfMps
@dtypes(torch.float)
def test_pool_large_size(self, device, dtype):
for op in ('max', 'avg'):
for num_dim in [1, 2, 3]:
fn_name = '{}_pool{}d'.format(op, num_dim)
fn = getattr(F, fn_name)
# 16777217 is the smallest integer not expressible in float32
x = torch.ones([1, 1, 16777217] + (num_dim - 1) * [1],
device=device, dtype=dtype)
res = fn(x, 1, stride=1, padding=0)
# check if the output shape was still computed correctly
self.assertEqual(x.shape[2], res.shape[2])
@dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))
@skipIfMps
@dtypes(torch.float)
def test_pool_invalid_size(self, device, dtype):
for op in ('max', 'avg'):
for num_dim in [1, 2, 3]:
fn_name = '{}_pool{}d'.format(op, num_dim)
if op == 'max':
# New implementation without indices supports empty tensors
# TODO(Heitor) change once with_indices code is updated
fn_name += '_with_indices'
fn = getattr(F, fn_name)
# use a configuration that gives zero outputs only
# when doing a correct floor division by the stride
x = torch.ones([1, 1] + num_dim * [4],
device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"too small|smaller than"):
try:
res = fn(x, 3, stride=2, padding=0, dilation=2)
except TypeError:
# some implementations do not support dilation
res = fn(x, 6, stride=2, padding=0)
def test_CTCLoss_empty_target(self, device):
target_lengths = [0, 0, 0]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (0,), dtype=torch.long, device=device)
log_probs = torch.randn(50, 3, 15, dtype=torch.double, device=device).log_softmax(2)
loss = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')
self.assertTrue((loss >= 0).all().item())
self.assertEqual(-log_probs.sum(0)[:, 0], loss)
target_lengths = [0, 9, 0]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (9,), dtype=torch.long, device=device)
log_probs = torch.randn(50, 3, 15, dtype=torch.double, device=device).log_softmax(2)
loss = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')
self.assertTrue((loss >= 0).all().item())
self.assertEqual(-log_probs.sum(0)[[0, 2], 0], loss[[0, 2]])
# Merge into OpInfo?
@skipCUDAIf(True, """Test is flaky on Linux and Windows, typical error message:
https://github.com/pytorch/pytorch/issues/34870""")
def test_ctc_loss(self, device):
batch_size = 64
num_labels = 101
target_length = 15
gradcheck_input_size = 10
ZERO_NONE = 0
ZERO_SOME = 1
ZERO_ALL = 2
# input_length, vary_lengths, zero_lengths
tests = [(150, False, ZERO_NONE),
(150, True, ZERO_NONE),
(50, True, ZERO_SOME),
(50, True, ZERO_ALL)]
if 'cuda' in device:
tests += [(50, False, ZERO_NONE),
(50, True, ZERO_NONE),
(150, True, ZERO_SOME),
(150, True, ZERO_ALL)]
for input_length, vary_lengths, zero_mode in tests:
targets = torch.randint(1, num_labels, (batch_size, target_length),
device=device, dtype=torch.long)
x = torch.randn(gradcheck_input_size, dtype=torch.double, device=device, requires_grad=True)
tile_factors = torch.randn(input_length * batch_size * num_labels // gradcheck_input_size + 1,
device=device)
input_lengths = [(torch.randint(input_length // 2, input_length + 1, ()).item()
if vary_lengths or i == 0 else input_length) for i in range(batch_size)]
if zero_mode == ZERO_ALL:
target_lengths = [0 for _ in range(batch_size)]
else:
target_lengths = [(torch.randint(target_length // 2, target_length + 1, ()).item()
if vary_lengths else target_length) for _ in range(batch_size)]
if zero_mode == ZERO_SOME:
idxes = torch.randint(0, batch_size, (10,))
for i in idxes:
target_lengths[i] = 0
def ctc_after_softmax(x):
x_full = ((x[:, None] * tile_factors[None, :]).view(-1)[:input_length * batch_size * num_labels]
.view(input_length, batch_size, num_labels))
log_probs = torch.log_softmax(x_full, 2)
return torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
gradcheck(ctc_after_softmax, [x])
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfCudnnVersionLessThan(7600)
def test_ctc_loss_cudnn(self, device):
batch_size = 16
input_length = 30
num_labels = 101
target_length = 15
targets = torch.randint(1, num_labels, (batch_size * target_length,),
device='cuda', dtype=torch.long)
log_probs = torch.log_softmax(torch.randn(input_length, batch_size, num_labels, device='cuda', dtype=torch.float), 2)
log_probs.requires_grad_()
input_lengths = batch_size * [input_length]
target_lengths = batch_size * [target_length]
grad_out = torch.randn(batch_size, device='cuda', dtype=torch.float)
with torch.backends.cudnn.flags(enabled=False):
loss_native = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')
grad_native, = torch.autograd.grad(loss_native, log_probs, grad_out)
loss_cudnn = torch.nn.functional.ctc_loss(log_probs, targets.to('cpu', torch.int32),
input_lengths, target_lengths, reduction='none')
self.assertTrue("Cudnn" in str(loss_cudnn.grad_fn))
grad_cudnn, = torch.autograd.grad(loss_cudnn, log_probs, grad_out)
self.assertEqual(grad_cudnn, grad_native, atol=1e-4, rtol=0)
def test_empty_dropout(self, device):
x = torch.tensor([]).to(device)
out = torch.nn.functional.dropout(x)
self.assertEqual(out.size(), x.size())
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float)
@tf32_on_and_off(0.005)
def test_variable_sequence(self, device, dtype):
def pad(var, length):
if var.size(0) == length:
return var
return torch.cat([var, var.new_zeros(length - var.size(0), *var.size()[1:])])
def maybe_index_tuple(maybe_tuple_of_tensors, index):
if maybe_tuple_of_tensors is None:
return None
return tuple(maybe_tuple_of_tensors[j][:, index:index + 1, :].contiguous()
for j in range(2))
def check_lengths(lengths, enforce_sorted, use_default_hiddens, proj_size):
input_size = 3
hidden_size = 4
num_layers = 2
bidirectional = True
max_length = max(lengths)
x_leaf = torch.randn(max_length, len(lengths), input_size, device=device,
dtype=dtype, requires_grad=True)
num_directions = 2 if bidirectional else 1
lstm = nn.LSTM(input_size, hidden_size, bidirectional=bidirectional,
num_layers=num_layers, proj_size=proj_size).to(device, dtype)
lstm2 = deepcopy(lstm).to(device, dtype)
x = x_leaf
hidden0 = None
if not use_default_hiddens:
real_hidden_size = hidden_size if proj_size == 0 else proj_size
hidden0 = (torch.randn(num_directions * num_layers, len(lengths), real_hidden_size,
device=device, dtype=dtype),
torch.randn(num_directions * num_layers, len(lengths), hidden_size,
device=device, dtype=dtype))
# Compute sequences separately
seq_outs = []
seq_hiddens = []
for i, l in enumerate(lengths):
hidden_i = maybe_index_tuple(hidden0, i)
out, hid = lstm2(x[:l, i:i + 1], hidden_i)
out_pad = pad(out, max_length)
seq_outs.append(out_pad)
seq_hiddens.append(hid)
seq_out = torch.cat(seq_outs, 1)
seq_hidden = tuple(torch.cat(hids, 1) for hids in zip(*seq_hiddens))
# Use packed format
packed = rnn_utils.pack_padded_sequence(x, lengths, enforce_sorted=enforce_sorted)
packed_out, packed_hidden = lstm(packed, hidden0)
unpacked, unpacked_len = rnn_utils.pad_packed_sequence(packed_out)
# Check forward
prec = dtype2prec_DONTUSE[dtype]
self.assertEqual(packed_hidden, seq_hidden, atol=prec, rtol=0)
self.assertEqual(unpacked, seq_out, atol=prec, rtol=0)
self.assertEqual(unpacked_len, lengths, atol=prec, rtol=0)
# Check backward
seq_out.sum().backward()
grad_x = x_leaf.grad.data.clone()
x_leaf.grad.data.zero_()
unpacked.sum().backward()
self.assertEqual(x_leaf.grad, grad_x, atol=dtype2prec_DONTUSE[dtype], rtol=0)
for p1, p2 in zip(lstm.parameters(), lstm2.parameters()):
prec = dtype2prec_DONTUSE[dtype]
if dtype == torch.float16:
prec = 4e-2
self.assertEqual(p1.grad, p2.grad, atol=prec, rtol=0)
tests = [
# enforce_sorted, lengths
[True, [5]],
[False, [5]],
[True, [10, 10, 6, 2, 2, 1, 1]],
[False, [10, 10, 6, 2, 2, 1, 1]],
[False, [2, 1, 3, 2, 10, 5, 3]],
]
for enforce_sorted, seq_lens, in tests:
for use_default_hiddens in (True, False):
for proj_size in [0, 2]:
check_lengths(seq_lens, enforce_sorted, use_default_hiddens, proj_size)
def _test_batchnorm_update_stats(self, device, dtype=torch.float):
module = nn.BatchNorm1d(3).to(device, dtype)
data = torch.rand(4, 3, device=device, dtype=dtype)
# training pass
old_running_mean = module.running_mean.clone()
old_running_var = module.running_var.clone()
old_num_batches_tracked = module.num_batches_tracked.clone()
module(data)
self.assertNotEqual(old_running_mean, module.running_mean)
self.assertNotEqual(old_running_var, module.running_var)
self.assertEqual(old_num_batches_tracked + 1, module.num_batches_tracked)
# eval pass
module.eval()
old_running_mean = module.running_mean.clone()
old_running_var = module.running_var.clone()
old_num_batches_tracked = module.num_batches_tracked.clone()
module(data)
self.assertEqual(old_running_mean, module.running_mean)
self.assertEqual(old_running_var, module.running_var)
self.assertEqual(old_num_batches_tracked, module.num_batches_tracked)
def test_batchnorm_update_stats(self, device):
self._test_batchnorm_update_stats(device)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_update_stats(device)
def test_multi_margin_loss_errors(self, device):
self.assertRaises(RuntimeError,
lambda: nn.functional.multi_margin_loss(torch.randn(5, device=device),
torch.zeros(3, device=device)))
@onlyCPU
def test_activations_bfloat16_cpu(self, device):
def test_bfloat16(fn, device, inp_dims, prec):
# bfloat16 compute
input = torch.randn(inp_dims, dtype=torch.bfloat16, device=device, requires_grad=True)
out = fn(input)
grad_input = torch.randn_like(out, dtype=torch.bfloat16, device=device)
out.backward(grad_input)
# fp32 compute
input2 = input.detach().clone().float().requires_grad_(True)
out2 = fn(input2)
grad_input2 = grad_input.detach().clone().float()
out2.backward(grad_input2)
self.assertEqual(out.dtype, torch.bfloat16)
self.assertEqual(input.grad.dtype, torch.bfloat16)
self.assertEqual(out, out2, atol=prec, rtol=0, exact_dtype=False)
self.assertEqual(input.grad.data, input2.grad.data, atol=prec, rtol=0, exact_dtype=False)
shapes = [[1, 3, 1, 6], [1, 3, 1, 128], [1, 3, 256, 256]]
for shape in shapes:
test_bfloat16(torch.nn.LogSigmoid(), device, shape, prec=2e-2)
test_bfloat16(torch.nn.Hardsigmoid(), device, shape, prec=1e-2)
test_bfloat16(torch.nn.Hardshrink(), device, shape, prec=1e-2)
test_bfloat16(torch.nn.Softshrink(), device, shape, prec=1e-2)
test_bfloat16(torch.nn.Hardswish(), device, shape, prec=2e-2)
test_bfloat16(torch.nn.Softplus(), device, shape, prec=1e-2)
def _test_bfloat16_ops(self, op, device, inp_dims=(), prec=1e-2, scale_factor=None):
# fp32 compute
input1 = torch.randn(inp_dims, dtype=torch.float32, device=device, requires_grad=True)
if scale_factor is not None:
input1 = (torch.rand(inp_dims, dtype=torch.bfloat16, device=device) * scale_factor).float().requires_grad_()
out1 = op(input1)
grad_input1 = torch.randn_like(out1, device=device)
out1.backward(grad_input1)
# bfloat16 compute
op_bfp16 = op.bfloat16()
input2 = input1.detach().bfloat16().requires_grad_()
grad_input2 = grad_input1.bfloat16()
out2 = op_bfp16(input2)
out2.backward(grad_input2)
self.assertEqual(out1, out2, atol=prec, rtol=prec, exact_dtype=False)
self.assertEqual(input1.grad.data, input2.grad.data, atol=prec, rtol=prec, exact_dtype=False)
@onlyCUDA
def test_activations_bfloat16(self, device):
self._test_bfloat16_ops(torch.nn.ReLU(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.Threshold(0.1, 20), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.ELU(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.Softplus(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.Hardshrink(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.Softshrink(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.LeakyReLU(), device, inp_dims=(5), prec=1e-2)
@onlyCUDA
def test_pooling_bfloat16(self, device):
self._test_bfloat16_ops(torch.nn.AvgPool1d(3, stride=2), device, inp_dims=(8, 4, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AvgPool2d(3, stride=2), device, inp_dims=(8, 4, 16, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AvgPool3d(3, stride=2), device, inp_dims=(8, 4, 16, 16, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AdaptiveAvgPool1d(3), device, inp_dims=(8, 4, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AdaptiveAvgPool2d((3, 5)), device, inp_dims=(8, 4, 16, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AdaptiveAvgPool3d((3, 5, 7)), device, inp_dims=(8, 4, 16, 16, 16), prec=0.05)
@onlyNativeDeviceTypes
def test_softmax_bfloat16(self, device):
for dim in [0, 1, 2, 3]:
self._test_bfloat16_ops(torch.nn.Softmax(dim=dim), device, inp_dims=(16, 33, 15, 16), prec=1e-2)
# test softmax with large input value which casues exp() to overflow
self._test_bfloat16_ops(torch.nn.Softmax(dim=dim), device, inp_dims=(16, 33, 15, 16), prec=0.05, scale_factor=1000.0)
@onlyCPU
@dtypes(torch.float, torch.double)
def test_conv_thnn_nhwc(self, device, dtype):
def helper(n, c, h, w, out_channels, kernel_size, dilation, groups, input_format, weight_format):
input = torch.randint(-3, 3, (n, c, h, w), dtype=dtype, device=device)\
.to(memory_format=input_format)
input.requires_grad_()
conv = nn.Conv2d(c, out_channels, kernel_size, dilation=dilation, groups=groups)\
.to(device='cpu', dtype=dtype, memory_format=weight_format)
for p in conv.parameters():
p.data = torch.randint_like(p, -3, 3)
ref_input = input.detach().clone().contiguous().requires_grad_()
ref_conv = nn.Conv2d(c, out_channels, kernel_size, dilation=dilation, groups=groups)
# load_state_dict will restore the stride & memory_layout on ref_conv.weight.
ref_conv.load_state_dict(conv.state_dict())
ref_conv = ref_conv.to(device='cpu', dtype=dtype, memory_format=torch.contiguous_format)
out = conv(input)
ref_out = ref_conv(ref_input)
grad = torch.randint_like(out, -3, 3)
ref_grad = grad.detach().clone().contiguous()
out.backward(grad)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out, exact_dtype=False)
self.assertEqual(conv.weight.grad, ref_conv.weight.grad, exact_dtype=False)
self.assertEqual(conv.bias.grad, ref_conv.bias.grad, exact_dtype=False)
self.assertEqual(input.grad, ref_input.grad, exact_dtype=False)
with torch.backends.mkldnn.flags(enabled=False):
formats = [[torch.channels_last, torch.channels_last],
[torch.channels_last, torch.contiguous_format],
[torch.contiguous_format, torch.channels_last]]
for input_format, weight_format in formats:
# non-dilated conv: thnn_conv2d normal path (with im2col)
helper(2, 8, 4, 4, out_channels=4, kernel_size=3, dilation=1, groups=1,
input_format=input_format, weight_format=weight_format)
helper(2, 8, 4, 4, out_channels=8, kernel_size=3, dilation=1, groups=8,
input_format=input_format, weight_format=weight_format)
# test when input chanels is 1 and not converted to channels last
helper(2, 1, 10, 10, out_channels=8, kernel_size=3, dilation=1, groups=1,
input_format=torch.contiguous_format, weight_format=torch.channels_last)
# non-dilated conv: thnn_conv2d fast path (skip im2col)
helper(1, 16, 56, 56, out_channels=16, kernel_size=1, dilation=1, groups=1,
input_format=input_format, weight_format=weight_format)
# ic == oc == 1 here, so need to stick input to CL to activate channels last
helper(1, 16, 56, 56, out_channels=16, kernel_size=1, dilation=1, groups=16,
input_format=torch.channels_last, weight_format=weight_format)
# dilated conv: slow_conv_dilated2d
helper(2, 8, 11, 13, out_channels=16, kernel_size=3, dilation=2, groups=1,
input_format=input_format, weight_format=weight_format)
helper(2, 16, 11, 13, out_channels=32, kernel_size=3, dilation=2, groups=16,
input_format=input_format, weight_format=weight_format)
@onlyCUDA
@skipCUDAIfRocmVersionLessThan((4, 3))
@skipCUDAIfNotMiopenSuggestNHWC
@skipCUDAIfCudnnVersionLessThan(7603)
@dtypes(torch.half, torch.float, torch.cfloat)
def test_conv_cudnn_nhwc(self, device, dtype):
def helper(n, c, h, w, out_channels, kernel_size, groups):
input = torch.randint(-3, 3, (n, c, h, w), dtype=dtype, device=device)\
.to(memory_format=torch.channels_last)
input.requires_grad_()
conv = nn.Conv2d(c, out_channels, kernel_size, groups=groups)\
.to(device='cuda', dtype=dtype, memory_format=torch.channels_last)
for p in conv.parameters():
p.data = torch.randint_like(p, -3, 3)
# use FP64 channels-first conv as reference
ref_input = input.detach().clone().contiguous().double().requires_grad_()
ref_conv = nn.Conv2d(c, out_channels, kernel_size, groups=groups)
# load_state_dict will restore the stride & memory_layout on ref_conv.weight.
ref_conv.load_state_dict(conv.state_dict())
ref_conv = ref_conv.to(device='cuda', dtype=torch.double, memory_format=torch.contiguous_format)
out = conv(input)
ref_out = ref_conv(ref_input)
grad = torch.randint_like(out, -3, 3)
ref_grad = grad.detach().clone().double().contiguous()
out.backward(grad)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(input.grad.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(conv.weight.grad.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ref_input.grad.is_contiguous())
self.assertTrue(ref_conv.weight.grad.is_contiguous())
self.assertEqual(out, ref_out, exact_dtype=False)
self.assertEqual(conv.weight.grad, ref_conv.weight.grad, exact_dtype=False)
self.assertEqual(conv.bias.grad, ref_conv.bias.grad, exact_dtype=False)
self.assertEqual(input.grad, ref_input.grad, exact_dtype=False)
helper(2, 8, 4, 4, out_channels=4, kernel_size=3, groups=1)
helper(2, 8, 4, 4, out_channels=8, kernel_size=3, groups=8)
helper(1, 16, 56, 56, out_channels=16, kernel_size=3, groups=1)
helper(1, 16, 56, 56, out_channels=16, kernel_size=3, groups=16)
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfCudnnVersionLessThan(8005)
@dtypes(torch.half, torch.float)
def test_conv_cudnn_ndhwc(self, device, dtype):
def helper(n, c, d, h, w, out_channels, kernel_size, groups):
input = torch.randint(-2, 2, (n, c, d, h, w), dtype=dtype, device=device)\
.to(memory_format=torch.channels_last_3d)
input.requires_grad_()
conv = nn.Conv3d(c, out_channels, kernel_size, groups=groups)\
.to(device='cuda', dtype=dtype, memory_format=torch.channels_last_3d)
for p in conv.parameters():
p.data = torch.randint_like(p, -2, 2)
# use FP64 channels-first conv as reference
ref_input = input.detach().clone().contiguous().double().requires_grad_()
ref_conv = nn.Conv3d(c, out_channels, kernel_size, groups=groups)
# load_state_dict will restore the stride & memory_layout on ref_conv.weight.
ref_conv.load_state_dict(conv.state_dict())
ref_conv = ref_conv.to(device='cuda', dtype=torch.double, memory_format=torch.contiguous_format)
out = conv(input)
ref_out = ref_conv(ref_input)
grad = torch.randint_like(out, -2, 2)
ref_grad = grad.detach().clone().double().contiguous()
out.backward(grad)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(input.grad.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(conv.weight.grad.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ref_input.grad.is_contiguous())
self.assertTrue(ref_conv.weight.grad.is_contiguous())
self.assertEqual(out, ref_out, exact_dtype=False)
self.assertEqual(conv.weight.grad, ref_conv.weight.grad, exact_dtype=False)
self.assertEqual(conv.bias.grad, ref_conv.bias.grad, exact_dtype=False)
self.assertEqual(input.grad, ref_input.grad, exact_dtype=False)
helper(2, 8, 4, 4, 4, out_channels=4, kernel_size=3, groups=1)
helper(2, 8, 4, 4, 4, out_channels=8, kernel_size=3, groups=8)
helper(1, 16, 18, 18, 18, out_channels=16, kernel_size=3, groups=1)
helper(1, 16, 18, 18, 18, out_channels=16, kernel_size=3, groups=16)
def _run_conv(self, layer, device, inp, grad, ref_conv, ref_input, ref_out,
input_format, weight_format, grad_format, output_format):
conv = layer(inp.size(1), grad.size(1),
ref_conv.weight.size(2)).float().to(device)
# load_state_dict will restore the stride & memory_layout on ref_conv.weight.
conv.load_state_dict(ref_conv.state_dict())
weight_data = conv.weight.detach().clone().contiguous(memory_format=weight_format)
conv.weight.data = weight_data.resize_(weight_data.size(), memory_format=weight_format)
input = inp.clone().contiguous(memory_format=input_format)
input.resize_(input.size(), memory_format=input_format)
input = input.requires_grad_()
grad = grad.contiguous(memory_format=grad_format)
grad.resize_(grad.size(), memory_format=grad_format)
out = conv(input)
out.backward(grad)
self.assertTrue(out.is_contiguous(memory_format=output_format))
self.assertEqual(out, ref_out)
self.assertEqual(conv.weight.grad, ref_conv.weight.grad)
self.assertEqual(conv.bias.grad, ref_conv.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
def _test_conv_cudnn_nhwc_nchw(self, layer, n, c, h, w, k, filter_size, device):
data = torch.randint(1, 10, (n, c, h, w), dtype=torch.float32, device=device)
ref_input = data.clone().contiguous().requires_grad_(True)
ref_conv = layer(c, k, filter_size).float().to(device)
ref_out = ref_conv(ref_input)
grad = torch.randint(1, 10, ref_out.size(), dtype=torch.float32, device="cuda")
ref_out.backward(grad)
for w_f in [torch.contiguous_format, torch.channels_last]:
for g_f in [torch.contiguous_format, torch.channels_last]:
for input_format in [torch.contiguous_format, torch.channels_last]:
output_format = torch.contiguous_format
# Older versions of CudNN have Channels Last support disabled
if torch.backends.cudnn.version() >= 7603:
if input_format == torch.channels_last:
output_format = torch.channels_last
# This is because we have N111 weight that cannot handle
# the ambiguous memory_format
if w_f == torch.channels_last:
if layer == nn.Conv2d and filter_size * c != 1:
output_format = torch.channels_last
if layer == nn.ConvTranspose2d and filter_size * k != 1:
output_format = torch.channels_last
self._run_conv(layer, device, data, grad, ref_conv, ref_input,
ref_out, input_format, w_f, g_f, output_format)
@onlyCUDA
@skipCUDAIfRocmVersionLessThan((4, 3))
@skipCUDAIfNotMiopenSuggestNHWC
@skipCUDAIfCudnnVersionLessThan(7603)
@tf32_on_and_off(0.05)
def test_conv_cudnn_mismatch_memory_format(self, device):
configs = [
[4, 2, 8, 8, 4, 2],
[4, 1, 8, 8, 4, 2],
[1, 1, 8, 8, 4, 2],
[4, 2, 2, 8, 4, 1],
[4, 2, 1, 8, 4, 1],
[4, 2, 8, 8, 4, 1],
[4, 1, 8, 8, 4, 1],
]
for n, c, h, w, k, filter_size in configs:
self._test_conv_cudnn_nhwc_nchw(nn.Conv2d, n, c, h, w, k, filter_size, device)
self._test_conv_cudnn_nhwc_nchw(nn.ConvTranspose2d, n, c, h, w, k, filter_size, device)
# torch.half is erroring out on Windows with CUDA 10.1 + cuDNN 7.6.4
# returning CUDNN_STATUS_BAD_PARAM
# Disabling that specific test for now [see issue # 33918]
@onlyCUDA
@skipCUDAIfNoCudnn
@dtypes(torch.float, torch.double)
def test_conv_cudnn_nhwc_support(self, device, dtype):
input = torch.randn((1, 16, 1, 1), dtype=dtype, device="cuda", requires_grad=True)
weight = torch.randn((8, 16, 3, 3), dtype=dtype, device="cuda", requires_grad=True)
weight = weight.to(memory_format=torch.channels_last)
o = torch.conv2d(input, weight, None, (2, 1), (1, 1), (1, 1), 1)
self.assertTrue(o.is_contiguous(memory_format=torch.channels_last))
o.sum().backward()
# Test that faster algorithms used for inference produce the same results
# Validates depthwise3x3 bug reported in https://github.com/pytorch/pytorch/issues/60176
@onlyCPU
@dtypes(torch.float)
def test_conv2d_no_grad(self, device, dtype):
for batch in [1, 2, 3]:
for groups in [1, 2, 4]:
input = torch.rand(batch, groups, 8, 8, dtype=dtype, device=device)
m = nn.Conv2d(groups, 8, kernel_size=(3, 3), groups=groups, dtype=dtype, device=device)
with torch.no_grad():
output_ng = m(input)
output = m(input)
self.assertEqual(output, output_ng, rtol=1e-2, atol=1e-5)
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfNoCudnn
@dtypes(torch.float, torch.float16)
@precisionOverride({torch.half: 0.002, torch.float: 1e-4})
def test_cudnn_convolution_relu(self, device, dtype):
for batch, groups, image_size, kernel_size, memory_format in \
product((1, 2, 3),
(1, 2, 4),
((1, 1), (8, 8)),
((1, 1), (3, 3)),
(torch.channels_last, torch.contiguous_format)):
if image_size[0] < kernel_size[0]:
continue
inp = torch.rand(batch, groups, *image_size, dtype=dtype, device=device)
w = torch.randn(8, groups, *kernel_size, dtype=dtype, device=device)
conv2d_out = torch.conv2d(inp, w, None, (1, 1), (0, 0), (1, 1), 1)
inp = inp.to(memory_format=memory_format)
w = w.to(memory_format=memory_format)
cudnn_out = torch.cudnn_convolution_relu(inp, w, None, (1, 1), (0, 0), (1, 1), 1)
self.assertTrue(cudnn_out.is_contiguous(memory_format=memory_format))
if tf32_is_not_fp32() and dtype == torch.float:
self.assertEqual(conv2d_out.relu(), cudnn_out, atol=2e-4, rtol=0.006)
else:
self.assertEqual(conv2d_out.relu(), cudnn_out)
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfNoCudnn
@dtypes(torch.float, torch.float16)
@precisionOverride({torch.half: 0.002, torch.float: 1e-4})
def test_cudnn_convolution_add_relu(self, device, dtype):
for batch, groups, image_size, kernel_size, memory_format in \
product((1, 2, 3),
(1, 2, 4),
((1, 1), (8, 8)),
((1, 1), (3, 3)),
(torch.channels_last, torch.contiguous_format)):
if image_size[0] < kernel_size[0]:
continue
inp = torch.rand(batch, groups, *image_size, dtype=dtype, device=device)
w = torch.randn(8, groups, *kernel_size, dtype=dtype, device=device)
conv2d_out = torch.conv2d(inp, w, None, (1, 1), (0, 0), (1, 1), 1)
alpha = 2.0
z = torch.randn_like(conv2d_out)
inp = inp.to(memory_format=memory_format)
w = w.to(memory_format=memory_format)
z = z.to(memory_format=memory_format)
cudnn_out = torch.cudnn_convolution_add_relu(inp, w, z, alpha, None, (1, 1), (0, 0), (1, 1), 1)
self.assertTrue(cudnn_out.is_contiguous(memory_format=memory_format))
if tf32_is_not_fp32() and dtype == torch.float:
self.assertEqual(F.relu(conv2d_out + alpha * z), cudnn_out, atol=3e-4, rtol=0.006)
else:
self.assertEqual(F.relu(conv2d_out + alpha * z), cudnn_out)
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfCudnnVersionLessThan(7603)
def test_convert_conv2d_weight_memory_format(self, device):
input = torch.randint(1, 10, (2, 8, 4, 4), dtype=torch.float32, device=device)
model = nn.Sequential(
nn.Conv2d(8, 4, 3),
nn.BatchNorm2d(4)).to(device).float()
for memory_format in [torch.channels_last, torch.contiguous_format]:
model = nn.utils.convert_conv2d_weight_memory_format(model, memory_format)
out = model(input)
self.assertTrue(out.is_contiguous(memory_format=memory_format))
model = nn.Sequential(
nn.ConvTranspose2d(8, 4, 3),
nn.BatchNorm2d(4)).to(device).float()
for memory_format in [torch.channels_last, torch.contiguous_format]:
model = nn.utils.convert_conv2d_weight_memory_format(model, memory_format)
out = model(input)
self.assertTrue(out.is_contiguous(memory_format=memory_format))
def test_conv_double_backward_strided_with_3D_input_and_weight(self, device):
# Test that _convolution_double_backward() outputs the correct grad shapes
# for 3D input / weight when stride > 1. This is an ad-hoc regression test for a
# specific case that was uncovered during the convolution consolidation effort.
# The test can be safely deleted if _convolution_double_backward() is removed.
input = torch.randn(2, 3, 6, device=device)
weight = torch.randn(3, 3, 3, device=device)
bias = torch.randn(3, device=device)
stride = (2,)
padding = (1,)
dilation = (1,)
transposed = False
output_padding = (0,)
groups = 1
output = torch.ops.aten.convolution(input, weight, bias, stride, padding, dilation, transposed,
output_padding, groups)
ggI = torch.randn(input.shape, device=device)
ggW = torch.randn(weight.shape, device=device)
ggB = torch.randn(bias.shape, device=device)
gO = torch.randn(output.shape, device=device)
output_mask = [True, True, True]
grad_grad_output, grad_input, grad_weight = torch.ops.aten._convolution_double_backward(
ggI, ggW, ggB, gO, weight, input, stride, padding, dilation, transposed,
output_padding, groups, output_mask)
# Make sure the correct shapes are computed.
self.assertEqual(grad_grad_output.shape, gO.shape)
self.assertEqual(grad_input.shape, input.shape)
self.assertEqual(grad_weight.shape, weight.shape)
def test_nll_loss_mismatched_batch(self, device):
x = torch.randn((10, 3), requires_grad=True, device=device)
# t should have size (10,)
t = torch.zeros((3,), dtype=torch.int64, device=device)
with self.assertRaisesRegex(ValueError, 'Expected.*batch_size'):
F.nll_loss(x, t)
def test_nll_loss_out_of_bounds_ignore_index(self, device):
x = torch.randn(6, 3, requires_grad=True, device=device)
t = torch.tensor([0, 1, 255, 0, 1, 2], dtype=torch.int64, device=device)
for reduction in ['mean', 'none']:
F.nll_loss(x, t, ignore_index=255, reduction=reduction).sum().backward()
def test_nll_loss_invalid_target_dim(self, device):
x = torch.randn((10, 3), device=device)
t = torch.zeros((10, 2), dtype=torch.int64, device=device)
with self.assertRaisesRegex(RuntimeError, "1D target tensor expected"):
F.nll_loss(x, t)
def test_nll_loss_invalid_weights(self, device):
x = torch.randn((10, 3), device=device)
t = torch.empty(10, dtype=torch.int64, device=device).random_(0, 3)
invalid_weights = [
torch.randn(4, device=device),
torch.randn(1, 3, device=device),
]
msg = "weight tensor should be defined either for all 3 classes or no classes"
for weight in invalid_weights:
with self.assertRaisesRegex(RuntimeError, msg):
F.nll_loss(x, t, weight=weight)
def _nll_loss_helper(self, input_size, reduction, expected, device):
input = torch.rand(input_size, requires_grad=True, device=device)
num_channels = input_size[1]
target_size = (input_size[0], ) + tuple(input_size[2:])
target = torch.randint(num_channels, target_size, device=device)
output = F.nll_loss(input, target, reduction=reduction)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output, expected)
output.sum().backward()
self.assertEqual(input.grad.size(), input.size())
def test_nll_loss_empty_tensor_reduction_none(self, device):
self._nll_loss_helper([0, 3], "none", torch.empty([0], device=device), device)
self._nll_loss_helper([0, 3, 5, 7], "none", torch.empty([0, 5, 7], device=device), device)
self._nll_loss_helper([2, 3, 0, 7], "none", torch.empty([2, 0, 7], device=device), device)
self._nll_loss_helper([2, 3, 5, 0], "none", torch.empty([2, 5, 0], device=device), device)
self._nll_loss_helper([2, 3, 5, 7, 0], "none", torch.empty([2, 5, 7, 0], device=device), device)
@unittest.skipIf(TEST_WITH_UBSAN, "division-by-zero error with UBSAN")
def test_nll_loss_empty_tensor_reduction_mean(self, device):
nan = torch.tensor(float('nan'), device=device)
self._nll_loss_helper([0, 3], "mean", nan, device)
self._nll_loss_helper([0, 3, 5, 7], "mean", nan, device)
self._nll_loss_helper([2, 3, 0, 7], "mean", nan, device)
self._nll_loss_helper([2, 3, 5, 0], "mean", nan, device)
self._nll_loss_helper([2, 3, 5, 7, 0], "mean", nan, device)
def test_nll_loss_empty_tensor_reduction_sum(self, device):
zero = torch.tensor(0, device=device)
self._nll_loss_helper([0, 3], "sum", zero, device)
self._nll_loss_helper([0, 3, 5, 7], "sum", zero, device)
self._nll_loss_helper([2, 3, 0, 7], "sum", zero, device)
self._nll_loss_helper([2, 3, 5, 0], "sum", zero, device)
self._nll_loss_helper([2, 3, 5, 7, 0], "sum", zero, device)
@unittest.skipIf(TEST_WITH_UBSAN, "division-by-zero error with UBSAN")
def test_nll_loss_total_weight_is_zero(self, device):
def helper(input_size):
input = torch.ones(input_size, requires_grad=True, device=device)
num_channels = input_size[1]
target_size = (input_size[0], ) + tuple(input_size[2:])
target = torch.zeros(target_size, dtype=torch.long, device=device)
weight = torch.zeros([num_channels], device=device)
self.assertEqual(F.nll_loss(input, target, weight, reduction="sum").item(), 0.)
self.assertEqual(F.nll_loss(input, target, weight, reduction="mean").item(), float("nan"))
self.assertEqual(F.nll_loss(input, target, weight, reduction="none"), torch.zeros(target.shape, device=device))
helper([2, 3])
helper([2, 3, 5, 7])
helper([2, 3, 5, 7, 9])
@unittest.skipIf(TEST_WITH_UBSAN, "division-by-zero error with UBSAN")
def test_nll_loss_all_ignored(self, device):
def helper(input_size):
input = torch.ones(input_size, device=device)
num_channels = input_size[1]
target_size = (input_size[0], ) + tuple(input_size[2:])
target = torch.zeros(target_size, dtype=torch.long, device=device)
self.assertEqual(F.nll_loss(input, target, ignore_index=0, reduction="sum").item(), 0)
self.assertEqual(F.nll_loss(input, target, ignore_index=0, reduction="mean").item(), float("nan"))
self.assertEqual(F.nll_loss(input, target, ignore_index=0, reduction="none"), torch.zeros(target.shape, device=device))
helper([2, 3])
helper([2, 3, 5, 7])
helper([2, 3, 5, 7, 9])
def test_nll_loss_byte_target_matches_long(self, device):
N, C = 10, 4
input = torch.randn(N, C, device=device, requires_grad=True)
target = torch.empty(N, dtype=torch.long, device=device).random_(0, C)
def compute_result_and_gradient(reduction, target_dtype):
input_ = input.detach()
input_.requires_grad_()
prob = F.log_softmax(input_, dim=-1)
loss = nn.NLLLoss(reduction=reduction)
result = loss(prob, target.to(target_dtype))
result.sum().backward()
return result, input_.grad
for reduction in ["none", "mean", "sum"]:
result_long, grad_long = compute_result_and_gradient(reduction, torch.long)
result_byte, grad_byte = compute_result_and_gradient(reduction, torch.uint8)
self.assertEqual(result_long, result_byte)
self.assertEqual(grad_long, grad_byte)
def test_cross_entropy_loss_prob_target_all_reductions(self, device):
# Test with k-dimensional loss.
for k in range(5):
N, C = 5, 4
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
weight = torch.randn(C, device=device).abs()
for reduction, w in product(['none', 'mean', 'sum'], [None, weight]):
m = torch.nn.CrossEntropyLoss(weight=w, reduction=reduction)
output = m(input, target)
output_ref = loss_reference_fns['CrossEntropyLoss'](
input, target, reduction=reduction, weight=w)
self.assertEqual(output, output_ref)
def test_cross_entropy_loss_prob_target_unit_weights(self, device):
# Test with k-dimensional loss.
for k in range(5):
N, C = 5, 4
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
for reduction in ['none', 'mean', 'sum']:
# Ensure result with unit weights is equivalent to result without weights.
m = torch.nn.CrossEntropyLoss(reduction=reduction)
unit_weight = torch.ones(C, device=device, dtype=target.dtype)
m_unit = torch.nn.CrossEntropyLoss(weight=unit_weight, reduction=reduction)
output = m(input, target)
output_unit = m_unit(input, target)
self.assertEqual(output, output_unit)
@parametrize_test('reduction', ['none', 'mean', 'sum'])
@parametrize_test('weighted', [False, True])
def test_cross_entropy_loss_prob_target_no_batch_dim(self, device, reduction, weighted):
C = 5
input = torch.randn(C, device=device).log_softmax(dim=-1)
target = torch.randn(C, device=device).softmax(dim=-1)
weight = torch.randn(C, device=device) if weighted else None
m = nn.CrossEntropyLoss(reduction=reduction, weight=weight)
loss_no_batch = m(input, target)
loss_batch = m(input.unsqueeze(0), target.unsqueeze(0))
if reduction == 'none':
loss_batch = loss_batch.squeeze(0)
self.assertEqual(loss_no_batch, loss_batch)
def test_cross_entropy_loss_index_target_unit_weights(self, device):
# Test with k-dimensional loss.
for k in range(5):
N, C = 5, 4
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.empty(N, *other_dims, dtype=torch.long, device=device).random_(0, C)
for reduction in ['none', 'mean', 'sum']:
# Ensure result with unit weights is equivalent to result without weights.
m = torch.nn.CrossEntropyLoss(reduction=reduction)
unit_weight = torch.ones(C, device=device, dtype=input.dtype)
m_unit = torch.nn.CrossEntropyLoss(weight=unit_weight, reduction=reduction)
output = m(input, target)
output_unit = m_unit(input, target)
self.assertEqual(output, output_unit)
def test_cross_entropy_loss_one_hot_target(self, device):
# Test with k-dimensional loss.
for k in range(5):
N, C = 5, 4
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.empty(N, *other_dims, dtype=torch.long, device=device).random_(0, C)
weight = torch.randn(C, device=device).abs()
# Get one-hot representation of the target.
target_one_hot = F.one_hot(target, num_classes=C).to(input.dtype)
# Need to put the C dim at index 1.
target_one_hot = target_one_hot.permute(0, -1, *range(1, target_one_hot.dim() - 1))
for reduction, w in product(['none', 'mean', 'sum'], [None, weight]):
# Skip this case for now because soft and hard label CE are not consistent
# in the way they apply class weights (see issue #61309).
if reduction == 'mean' and weight is not None:
continue
# Ensure loss computed with class indices matches loss
# computed with one-hot class probs.
m = torch.nn.CrossEntropyLoss(weight=w, reduction=reduction)
output = m(input, target)
output_one_hot = m(input, target_one_hot)
self.assertEqual(output, output_one_hot)
def test_cross_entropy_label_smoothing_errors(self, device):
N, C = 3, 4
input_args = [
(torch.randn((N, C), device=device), torch.arange(0, C, device=device)),
(torch.randn((N, C), device=device), torch.randn(N, C, device=device))
]
for input_arg in input_args:
loss = nn.CrossEntropyLoss(label_smoothing=1.2)
with self.assertRaisesRegex(RuntimeError,
r"label_smoothing must be between 0\.0"):
loss(*input_arg)
def test_cross_entropy_label_smoothing_consistent_index_target_and_probs(self, device):
N, C = 10, 4
ks = range(5)
reductions = ['none', 'mean', 'sum']
label_smoothings = [0.05, 0.15]
for k, reduction, label_smoothing in product(ks, reductions, label_smoothings):
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.empty(N, *other_dims, dtype=torch.long, device=device).random_(0, C)
# construct target probablity that should have the same result as label_smoothing
target_proba = F.one_hot(target, num_classes=C)
# Need to put the C dim at index 1.
target_proba = target_proba.permute(0, -1, *range(1, target_proba.dim() - 1))
target_mask = (target_proba == 1)
target_proba = target_proba.to(dtype=input.dtype)
# y_k^ls = y_k * (1 - label_smoothing) + label_smoothing / n_classes
# Get one-hot representation of the target.
target_proba.masked_fill_(target_mask, 1 - label_smoothing + label_smoothing / C)
target_proba.masked_fill_(~target_mask, label_smoothing / C)
loss = nn.CrossEntropyLoss(reduction=reduction)
output_with_prob = loss(input, target_proba)
loss = nn.CrossEntropyLoss(
reduction=reduction, label_smoothing=label_smoothing)
output_with_index = loss(input, target)
self.assertEqual(output_with_prob, output_with_index,
rtol=1e-07, atol=1e-05)
def test_cross_entropy_label_smoothing_with_probs(self, device):
N, C = 10, 4
ks = range(5)
reductions = ['none', 'mean', 'sum']
label_smoothings = [0.05, 0.15]
# Test with k-dimensional loss.
for k, label_smoothing in product(ks, label_smoothings):
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = F.log_softmax(torch.randn(N, C, *other_dims, device=device), dim=1)
for reduction in reductions:
# use with label_smoothing
loss = nn.CrossEntropyLoss(reduction=reduction, label_smoothing=label_smoothing)
output_with_smoothing = loss(input, target)
# manually smoothing target
# class_proba^ls = class_proba * (1 - label_smoothing) +
# label_smoothing / n_classes
target_with_smoothing = target * (1 - label_smoothing) + label_smoothing / C
loss = nn.CrossEntropyLoss(reduction=reduction)
output_with_manual_smoothing = loss(input, target_with_smoothing)
self.assertEqual(output_with_smoothing, output_with_manual_smoothing)
def test_cross_entropy_label_smoothing_weight_ignore_indices(self, device):
reductions = ['none', 'sum', 'mean']
label_smoothings = [0.05, 0.15]
weight = torch.tensor([0.3, 0.6], device=device)
inp1 = torch.tensor([[0.3, 0.4], [1, 2]], device=device)
inp2 = torch.tensor([[0.3, 0.6], [1, 2]], device=device)
targ_default_ignore_index = torch.tensor([-100, 1], device=device)
targ_negative_ignore_index = torch.tensor([-2, 1], device=device)
targ_positive_ignore_index = torch.tensor([2, 1], device=device)
for reduction, label_smoothing, weight in product(reductions, label_smoothings, (None, weight)):
def check_equal(loss, inp_targ_1, inp_targ_2):
inp1, targ1 = inp_targ_1
inp2, targ2 = inp_targ_2
l1 = loss(inp1, targ1)
l2 = loss(inp2, targ2)
self.assertEqual(l1, l2)
# Default ignore_index
loss = nn.CrossEntropyLoss(reduction=reduction,
label_smoothing=label_smoothing,
weight=weight)
check_equal(loss, (inp1, targ_default_ignore_index), (inp2, targ_default_ignore_index))
if reduction != 'none':
# Check that we correctly tally the denominator for `mean`
# i.e. we don't count the ignored_idx at all.
check_equal(loss, (inp1, targ_default_ignore_index), (inp2[1:], targ_default_ignore_index[1:]))
# negative ignore_index
loss = nn.CrossEntropyLoss(reduction=reduction,
label_smoothing=label_smoothing,
ignore_index=-2,
weight=weight)
check_equal(loss, (inp1, targ_negative_ignore_index), (inp2, targ_negative_ignore_index))
if reduction != 'none':
# Check that we correctly tally the denominator for `mean`
# i.e. we don't count the ignored_idx at all.
check_equal(loss, (inp1, targ_negative_ignore_index), (inp2[1:], targ_negative_ignore_index[1:]))
# positive ignore_index
loss = nn.CrossEntropyLoss(reduction=reduction,
label_smoothing=label_smoothing,
ignore_index=2,
weight=weight)
check_equal(loss, (inp1, targ_positive_ignore_index), (inp2, targ_positive_ignore_index))
if reduction != 'none':
# Check that we correctly tally the denominator for `mean`
# i.e. we don't count the ignored_idx at all.
check_equal(loss, (inp1, targ_positive_ignore_index), (inp2[1:], targ_positive_ignore_index[1:]))
def test_softshrink_negative(self, device):
input = torch.randn(5, device=device, requires_grad=True)
m = torch.nn.Softshrink(-1)
with self.assertRaisesRegex(RuntimeError,
r'lambda must be greater or equal to 0, but found to be -1\.'):
m(input)
def test_fold(self, device):
def test_dtype(fn, input, dtype):
input = input.detach().clone().to(dtype=dtype).requires_grad_(True)
input2 = input.detach().clone().float().requires_grad_(True)
out = fn(input)
out.sum().backward()
out2 = fn(input2)
out2.sum().backward()
self.assertEqual(out.dtype, dtype)
self.assertEqual(input.grad.dtype, dtype)
self.assertEqual(out, out2.to(dtype=dtype), atol=0.05, rtol=0)
self.assertEqual(input.grad, input2.grad.to(dtype=dtype))
def func(x):
return F.fold(x, output_size=(4, 5), kernel_size=(2, 2))
seeds = (44, 83, 71, 25, 999)
for sd in seeds:
torch.manual_seed(sd)
x = torch.randn(1, 12, 12, device=device, requires_grad=True)
gradcheck(func, [x], check_forward_ad=True)
gradgradcheck(func, [x], check_fwd_over_rev=True)
if device == 'cpu':
test_dtype(func, x, torch.bfloat16)
def test_logsigmoid_out(self, device):
# this isn't actually documented, but was broken previously:
# https://github.com/pytorch/pytorch/issues/36499
x = torch.randn(2, 3, device=device).t()
empty_out = torch.randn(0, device=device)
self.assertEqual(F.logsigmoid(x), F.logsigmoid(x, out=empty_out))
noncontig_out = torch.randn(2, 3, device=device).t()
self.assertEqual(F.logsigmoid(x), F.logsigmoid(x, out=noncontig_out))
def test_maxpool3d_non_square_backward(self, device):
# previous CUDA routine of this backward calculates kernel launch grid size
# with last two dimensions interchanged, so the tailing along the longer dim
# get ignored. Here we test whether every position gets gradient.
for dim in (2, 3, 4):
shape = tuple(32 if i != dim else 256 for i in range(4))
x = torch.randn(shape, device=device, requires_grad=True)
F.max_pool3d(x, kernel_size=(1, 1, 1)).sum().backward()
self.assertEqual(x.grad, torch.ones_like(x.grad))
# Check that clip_grad_norm_ raises an error if the total norm of the
# parameters' gradients is non-finite
@skipIfTorchDynamo("TorchDynamo fails here for unknown reasons")
def test_clip_grad_norm_error_if_nonfinite(self, device):
norms_pos = [0.1, 1, 2, 3.5, inf]
norms_neg = [-0.1, -1, -2, -3.5]
norms_except_0 = norms_pos + norms_neg
norms_all = norms_except_0 + [0]
# Each entry in test_cases has the following values, in this order:
#
# grad_only_one_elem If True, only one element of the parameter's
# gradient is set to the scalar grad, and the
# rest of the elements are 0. If False, all grad
# elements are equal to the scalar.
#
# prefix_finite_grad_param If True, prefix a parameter that has a grad
# of 1.
#
# scalars Scalars to use as the parameter's grad, through
# multiplication
#
# norms_nonfinite Norm types that should produce nonfinite total norm
#
# norms_finite Norm types that should produce finite total norm
test_cases = [
# Test errors from an infinite grad
(False, False, [inf, -inf], norms_except_0, [0]),
(False, True, [inf, -inf], norms_pos, norms_neg + [0]),
(True, False, [inf, -inf], norms_pos, norms_neg + [0]),
(True, True, [inf, -inf], norms_pos, norms_neg + [0]),
# Test errors from a NaN grad
(False, False, [nan], norms_except_0, [0]),
(False, True, [nan], norms_except_0, [0]),
(True, False, [nan], norms_except_0, [0]),
(True, True, [nan], norms_except_0, [0]),
# Test a grad that should never error
(False, False, [2e22, -2e22], [], norms_all),
(False, True, [2e22, -2e22], [], norms_all),
(True, False, [2e22, -2e22], [], norms_all),
(True, True, [2e22, -2e22], [], norms_all),
# Test a grad that will overflow to inf for only some norm orders
(False, False, [2e200, -2e200], [3.5, 2, -2, -3.5], [inf, 1, 0.1, 0, -1, -0.1]),
(False, True, [2e200, -2e200], [3.5, 2], norms_neg + [inf, 1, 0.1, 0]),
(True, False, [2e200, -2e200], [3.5, 2], norms_neg + [inf, 1, 0.1, 0]),
(True, True, [2e200, -2e200], [3.5, 2], norms_neg + [inf, 1, 0.1, 0]),
]
def gen_parameters(scalar, grad_only_one_elem, prefix_finite_grad_param):
param = torch.ones(10, dtype=torch.float64, device=device, requires_grad=True)
if grad_only_one_elem:
param[1].mul(scalar).sum().backward()
else:
param.mul(scalar).sum().backward()
if prefix_finite_grad_param:
prefix_param = torch.ones(1, dtype=torch.float64, device=device, requires_grad=True)
prefix_param.mul(1).sum().backward()
parameters = [prefix_param, param]
else:
parameters = [param]
return parameters
def run_test_case(norm_type, error_if_nonfinite, scalar, grad_only_one_elem, prefix_finite_grad_param, is_norm_nonfinite):
msg = (
f'norm_type: {norm_type}, ',
f'error_if_nonfinite: {error_if_nonfinite}, '
f'scalar: {scalar}, '
f'grad_only_one_elem: {grad_only_one_elem}, '
f'prefix_finite_grad_param: {prefix_finite_grad_param}, '
f'is_norm_nonfinite: {is_norm_nonfinite}')
parameters = gen_parameters(scalar, grad_only_one_elem, prefix_finite_grad_param)
# Should only throw an error if the total norm is expected to be
# nonfinite and `error_if_nonfinite=True`
if is_norm_nonfinite and error_if_nonfinite:
error_msg = f'The total norm of order {float(norm_type)} for gradients'
grads_before = [p.grad.clone() for p in parameters]
with self.assertRaisesRegex(RuntimeError, error_msg, msg=msg):
clip_grad_norm_(parameters, 1, norm_type=norm_type, error_if_nonfinite=True)
# Grad should not change if error is thrown
grads_after = [p.grad for p in parameters]
self.assertEqual(grads_before, grads_after, msg=msg)
else:
clip_grad_norm_(parameters, 1, norm_type=norm_type, error_if_nonfinite=error_if_nonfinite)
for grad_only_one_elem, prefix_finite_grad_param, scalars, norms_nonfinite, norms_finite in test_cases:
for error_if_nonfinite in [False, True]:
for norm_type, scalar in product(norms_nonfinite, scalars):
run_test_case(norm_type, error_if_nonfinite, scalar, grad_only_one_elem, prefix_finite_grad_param, True)
for norm_type, scalar in product(norms_finite, scalars):
run_test_case(norm_type, error_if_nonfinite, scalar, grad_only_one_elem, prefix_finite_grad_param, False)
@onlyCUDA
@deviceCountAtLeast(2)
def test_clip_grad_norm_multi_device(self, devices):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.layer1 = nn.Linear(10, 10)
self.layer2 = nn.Linear(10, 10)
test_model = TestModel()
test_model.layer1.to(devices[0])
test_model.layer2.to(devices[1])
ref_model = TestModel().to(devices[0])
for norm_type in [2., math.inf]:
for p in test_model.parameters():
p.grad = torch.ones_like(p)
for p in ref_model.parameters():
p.grad = torch.ones_like(p)
norm = clip_grad_norm_(test_model.parameters(), 0.5, norm_type=norm_type)
expected = clip_grad_norm_(ref_model.parameters(), 0.5, norm_type=norm_type)
self.assertEqual(norm, expected)
for p, pe in zip(test_model.parameters(), ref_model.parameters()):
self.assertEqual(p.grad.to(devices[0]), pe.grad)
def test_elu_inplace_overlap(self, device):
x = torch.randn((1, 6), dtype=torch.bfloat16, device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.elu(x, inplace=True)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.elu_(x)
# Merge into OpInfo?
@onlyNativeDeviceTypes
def test_elu_inplace_with_neg_alpha(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.elu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.celu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
@expectedFailureMeta # https://github.com/pytorch/pytorch/issues/54897
def test_hardswish_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.hardswish(x, inplace=True)
def test_silu_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.silu(x, inplace=True)
@onlyNativeDeviceTypes
def test_mish_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.mish(x, inplace=True)
def test_softplus_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.softplus(x, out=x)
def test_softplus_low_threshold(self, device):
# Ensure gradients are computed correctly with a low threshold.
model = torch.nn.Softplus(threshold=1).double()
input = torch.tensor(0.9, device=device, dtype=torch.double,
requires_grad=True)
output = model(input)
torch.autograd.gradcheck(model, input)
def test_softshrink_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.softshrink(x, out=x)
def test_leaky_relu_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.leaky_relu(x, inplace=True)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.leaky_relu_(x)
# Merge into OpInfo?
def test_leaky_relu_inplace_with_neg_slope(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), -2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.rrelu_(a.clone(), -5.0, 1.0)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
# Merge into OpInfo?
def test_leaky_relu_inplace_with_zero_slope(self, device):
a = torch.tensor([-2., 0., 2.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), 0.0)
b.backward(torch.ones(3, device=device))
expected = torch.tensor([0., 0., 1.], device=device)
self.assertEqual(a.grad, expected)
a_bf16 = torch.tensor([-2., 0., 2.], device=device, dtype=torch.bfloat16, requires_grad=True)
b_bf16 = torch.nn.functional.leaky_relu_(a_bf16.clone(), 0.0)
b_bf16.backward(torch.ones(3, device=device))
expected_bf16 = torch.tensor([0., 0., 1.], device=device, dtype=torch.bfloat16)
self.assertEqual(a_bf16.grad, expected_bf16)
@onlyCPU
def test_softshrink(self, device):
x = torch.tensor([[1.21, 0.56, 0.5001, 0.4999, 1.2357, -0.4999, -0.5001, -1.154,
0.254, -0.24, -0.225, 0.104, 0.002, -0.001, 0.0574, 1.2344,
0.1748, -0.1797, -0.8125, 0.2051, -1.1328, 1.2344, -0.1562, 2.3554,
-0.1953, 0.0304, -0.3613, -1.3047, 1.0312, 0.1436, -0.6953, 0.5664,
-0.5820, -0.3301, 0.8203, 0.6133, 0.5938],
[-0.8203, -1.2344, -0.5234, 2.5312, -0.4551, -0.6875, -1.5547, -0.2217,
-0.3027, 2.6406, 1.3047, 0.2344, -1.6719, 0.2773, -1.3516, 3.4575,
0.4414, 0.2656, 2.1094, -1.5156, 1.2344, -0.4336, 0.6797, -3.5486,
0.9766, -0.4062, 1.4844, 0.7500, -1.7578, 0.7461, 1.6094, 8.5458,
0.3730, -0.3477, -1.0625, 0.3848, 0.0557]], device=device)
expected = torch.tensor([[0.71, 0.06, 0.0001, 0., 0.7357, 0., -0.0001, -0.654,
0., 0., 0., 0., 0., 0., 0., 0.7344,
0., 0., -0.3125, 0., -0.6328, 0.7344, 0., 1.8554,
0., 0., 0., -0.8047, 0.5312, 0., -0.1953, 0.0664,
-0.0820, 0.0, 0.3203, 0.1133, 0.0938],
[-0.3203, -0.7344, -0.0234, 2.0312, 0.0, -0.1875, -1.0547, 0.,
0.0, 2.1406, 0.8047, 0., -1.1719, 0., -0.8516, 2.9575,
0., 0., 1.6094, -1.0156, 0.7344, 0., 0.1797, -3.0486,
0.4766, 0., 0.9844, 0.2500, -1.2578, 0.2461, 1.1094, 8.0458,
0., 0., -0.5625, 0., 0.]])
softshrink = torch.nn.Softshrink()
out = softshrink(x)
self.assertEqual(out, expected, atol=1e-2, rtol=0)
def test_threshold_inplace_overlap(self, device):
# Inplace threshold is okay, because it is idempotent
x = torch.randn((1, 6), device=device).expand((6, 6))
F.threshold(x, 0.5, 0.5, inplace=True)
F.threshold_(x, 0.5, 0.5)
@onlyNativeDeviceTypes
def test_triplet_margin_with_distance_loss_default_parity(self, device):
# Test for `nn.TripletMarginWithDistanceLoss` and
# `F.triplet_margin_with_distance_loss`. Checks
# for parity against the respective non-distance-agnostic
# implementations of triplet margin loss (``nn.TripletMarginLoss`
# and `F.triplet_margin_loss`) under *default args*.
for extra_args in \
itertools.product((0.5, 1, 1.5), (True, False), ('none', 'mean', 'sum')):
kwargs = {'margin': extra_args[0], 'swap': extra_args[1], 'reduction': extra_args[2]}
anchor = torch.randn(5, 10, device=device, requires_grad=True)
positive = torch.randn(5, 10, device=device, requires_grad=True)
negative = torch.randn(5, 10, device=device, requires_grad=True)
# Test forward, functional
expected = F.triplet_margin_loss(anchor, positive, negative, **kwargs)
actual = F.triplet_margin_with_distance_loss(anchor, positive, negative, **kwargs)
self.assertEqual(actual, expected, rtol=1e-6, atol=1e-6)
# Test forward, module
loss_ref = nn.TripletMarginLoss(**kwargs)
loss_op = nn.TripletMarginWithDistanceLoss(**kwargs)
self.assertEqual(loss_op(anchor, positive, negative),
loss_ref(anchor, positive, negative),
rtol=1e-6, atol=1e-6)
# Test backward
self.assertTrue(gradcheck(lambda a, p, n: F.triplet_margin_with_distance_loss(
a, p, n, **kwargs), (anchor, positive, negative)))
self.assertTrue(gradcheck(lambda a, p, n: loss_op(a, p, n),
(anchor, positive, negative)))
@onlyNativeDeviceTypes
def test_triplet_margin_with_distance_loss(self, device):
# Test for parity between `nn.TripletMarginWithDistanceLoss` and
# `F.triplet_margin_with_distance_loss`.
pairwise_distance = nn.PairwiseDistance()
def cosine_distance(x, y):
return 1.0 - F.cosine_similarity(x, y)
distance_functions = (pairwise_distance, cosine_distance,
lambda x, y: 1.0 - F.cosine_similarity(x, y))
reductions = ('mean', 'none', 'sum')
margins = (1.0, 1.5, 0.5)
swaps = (True, False)
for distance_fn, reduction, margin, swap \
in itertools.product(distance_functions, reductions, margins, swaps):
anchor = torch.randn(5, 10, device=device, requires_grad=True)
positive = torch.randn(5, 10, device=device, requires_grad=True)
negative = torch.randn(5, 10, device=device, requires_grad=True)
# Test backward
self.assertTrue(gradcheck(lambda a, p, n: F.triplet_margin_with_distance_loss(
a, p, n, distance_function=distance_fn, reduction=reduction, margin=margin, swap=swap),
(anchor, positive, negative)))
loss_op = nn.TripletMarginWithDistanceLoss(distance_function=distance_fn,
reduction=reduction, margin=margin, swap=swap)
self.assertTrue(gradcheck(lambda a, p, n: loss_op(
a, p, n), (anchor, positive, negative)))
traced_loss_op = torch.jit.trace(loss_op, (anchor, positive, negative))
self.assertTrue(gradcheck(lambda a, p, n: traced_loss_op(
a, p, n), (anchor, positive, negative)))
# Test forward parity
functional = F.triplet_margin_with_distance_loss(anchor, positive, negative,
distance_function=distance_fn,
reduction=reduction, margin=margin, swap=swap)
modular = loss_op(anchor, positive, negative)
traced = traced_loss_op(anchor, positive, negative)
self.assertEqual(functional, modular, atol=1e-6, rtol=1e-6)
self.assertEqual(traced, modular, atol=1e-6, rtol=1e-6)
def test_to_complex(self, device):
m = nn.Linear(3, 5).to(device)
self.assertIs(m, m.to(device))
m.to(torch.cfloat)
self.assertIs(m.weight.dtype, torch.cfloat)
m.to(torch.cdouble)
self.assertIs(m.weight.dtype, torch.cdouble)
m.to(torch.float)
self.assertIs(m.weight.dtype, torch.float)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
m.to(torch.cfloat)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("Complex modules are a new feature" in str(w[-1].message))
@skipMeta
@dtypes(torch.float32, torch.float64)
def test_module_to_empty(self, device, dtype):
class MyModule(nn.Module):
def __init__(self, in_features, out_features, device=None, dtype=None):
super().__init__()
factory_kwargs = {"device": device, "dtype": dtype}
self.weight = nn.Parameter(torch.randn(in_features, out_features, **factory_kwargs))
def forward(self, x):
return x @ self.weight
# Test meta module instantiation.
input = torch.randn(5, 10, device=device, dtype=dtype)
m = MyModule(10, 1, device='meta', dtype=dtype)
m(input)
# Test materializing meta module on a real device.
m.to_empty(device=device)
m(input)
with torch.no_grad():
torch.nn.init.kaiming_uniform_(m.weight)
m(input)
# Test creating meta module from materialized module.
m.to_empty(device='meta')
m(input)
@skipMeta
def test_skip_init(self, device):
torch.manual_seed(1)
m_initialized = torch.nn.Linear(5, 1)
m_initialized.to(device)
torch.manual_seed(1)
m_uninitialized = torch.nn.utils.skip_init(torch.nn.Linear, 5, 1, device=device)
self.assertEqual(m_initialized.weight.device, m_uninitialized.weight.device)
self.assertFalse(torch.allclose(m_initialized.weight, m_uninitialized.weight))
def test_adaptive_pool_invalid(self, device):
inp_1d = (torch.randn(1, 1, 1, device=device), (-1,))
inp_2d = (torch.randn(1, 1, 1, 1, device=device), (-1, 0))
inp_3d = (torch.randn(1, 1, 1, 1, 1, device=device), (-1, 0, 2))
module_input_dict = {torch.nn.AdaptiveAvgPool1d : inp_1d,
torch.nn.AdaptiveAvgPool2d : inp_2d,
torch.nn.AdaptiveAvgPool3d : inp_3d}
for m, inp in module_input_dict.items():
with self.assertRaisesRegex(RuntimeError,
r"elements of output_size must be greater than or equal to 0"):
t, output_size = inp
m(output_size)(t)
@dtypes(torch.float)
@dtypesIfCUDA(torch.double, torch.float, torch.half)
def test_transformerencoderlayer(self, device, dtype):
# this is a deterministic test for TransformerEncoderLayer
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
bsz = 2
atol = 1e-5
rtol = 1e-7
if "cuda" in device:
atol = 1e-3
rtol = 1e-2
def _test(training, batch_first, atol, rtol):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
model = nn.TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout,
batch_first=batch_first, device=device, dtype=dtype)
if not training:
assert dropout == 0
model = model.eval()
# set constant weights of the model
for idx, p in enumerate(model.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
# deterministic input
encoder_input = torch.tensor([[[20., 30., 40., 50.]]], device=device, dtype=dtype)
result = model(encoder_input)
ref_output = torch.tensor([[[2.258703, 0.127985, -0.697881, 0.170862]]], device=device, dtype=dtype)
self.assertEqual(result.shape, ref_output.shape)
torch.testing.assert_close(result, ref_output, atol=atol, rtol=rtol)
# 0 values are NOT masked. This shouldn't mask anything.
mask = torch.tensor([[0]], device=device) == 1
# TODO: enable fast path for calls with a mask!
result = model(encoder_input, src_key_padding_mask=mask)
self.assertEqual(result.shape, ref_output.shape)
torch.testing.assert_close(result, ref_output, atol=atol, rtol=rtol)
# 1 values are masked. Since there is only 1 input embedding this
# will result in nan.
mask = torch.tensor([[1]], device=device) == 1
result = model(encoder_input, src_key_padding_mask=mask)
result = result.cpu().detach().numpy()
self.assertTrue(np.isnan(result).all())
# deterministic input
encoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]], device=device, dtype=dtype))
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.272644, 0.119035, -0.691669, 0.153486]],
[[2.272644, 0.119035, -0.691669, 0.153486]]], device=device, dtype=dtype))
self.assertEqual(result.shape, ref_output.shape)
torch.testing.assert_close(result, ref_output, atol=atol, rtol=rtol)
# all 0 which is no masking
mask = torch.tensor([[0, 0]], device=device) == 1
result = model(encoder_input, src_key_padding_mask=mask)
self.assertEqual(result.shape, ref_output.shape)
torch.testing.assert_close(result, ref_output, atol=atol, rtol=rtol)
mask = torch.tensor([[1, 0]], device=device) == 1
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.301516, 0.092249, -0.679101, 0.103088]],
[[2.301516, 0.092249, -0.679101, 0.103088]]], device=device, dtype=dtype))
self.assertEqual(result.shape, ref_output.shape)
torch.testing.assert_close(result, ref_output, atol=atol, rtol=rtol)
# deterministic input
encoder_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]], device=device, dtype=dtype))
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.428589, 0.020835, -0.602055, -0.085249],
[2.427987, 0.021213, -0.602496, -0.084103]],
[[2.424689, 0.019155, -0.604793, -0.085672],
[2.413863, 0.022211, -0.612486, -0.072490]],
[[2.433774, 0.021598, -0.598343, -0.087548],
[2.425104, 0.019748, -0.604515, -0.084839]],
[[2.436185, 0.022682, -0.596625, -0.087261],
[2.433556, 0.021891, -0.598509, -0.086832]],
[[2.416246, 0.017512, -0.610712, -0.082961],
[2.422901, 0.024187, -0.606178, -0.074929]]], device=device, dtype=dtype))
self.assertEqual(result.shape, ref_output.shape)
torch.testing.assert_close(result, ref_output, atol=atol, rtol=rtol)
# all 0
mask = torch.zeros([2, 5], device=device) == 1
result = model(encoder_input, src_key_padding_mask=mask)
self.assertEqual(result.shape, ref_output.shape)
torch.testing.assert_close(result, ref_output, atol=atol, rtol=rtol)
mask[0, 1] = 1
mask[1, 3] = 1
mask[1, 4] = 1
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.429026, 0.020793, -0.601741, -0.085642],
[2.428811, 0.021445, -0.601912, -0.084252]],
[[2.425009, 0.019155, -0.604566, -0.085899],
[2.415408, 0.02249 , -0.611415, -0.073]],
[[2.434199, 0.021682, -0.598039, -0.087699],
[2.42598, 0.019941, -0.603896, -0.085091]],
[[2.436457, 0.022736, -0.59643 , -0.08736],
[2.434021, 0.022093, -0.598179, -0.08679]],
[[2.416531, 0.017498, -0.610513, -0.083181],
[2.4242, 0.024653, -0.605266, -0.074959]]], device=device, dtype=dtype))
self.assertEqual(result.shape, ref_output.shape)
torch.testing.assert_close(result, ref_output, atol=atol, rtol=rtol)
# NestedTensor is only supported for the fast path
# currently, which won't be used if training.
if (batch_first and not training and
('cuda' in str(device) or 'cpu' in str(device)) and not TEST_WITH_CROSSREF):
encoder_input[0][-1] = torch.zeros_like(encoder_input[0][1])
mask = torch.zeros(encoder_input.shape[:-1], device=device, dtype=torch.bool)
mask[0][-1] = True
nt = torch.nested_tensor([encoder_input[0][:-1], encoder_input[1]], device=device)
result = model(nt)
ref_output = torch.tensor(
[
[
[2.4268184, 0.02042419, -0.603311, -0.08476824],
[2.423306, 0.01889652, -0.6057701, -0.08519465],
[2.431538, 0.02078694, -0.5999354, -0.08746159],
[2.4348664, 0.02212971, -0.5975677, -0.08733892],
[2.423133, 0.02097577, -0.60594773, -0.08113337],
],
[
[2.4279876, 0.02121329, -0.60249615, -0.08410317],
[2.4138637, 0.02221113, -0.6124869, -0.07249016],
[2.4251041, 0.01974815, -0.6045152, -0.08483928],
[2.4335563, 0.0218913, -0.59850943, -0.08683228],
[2.4229012, 0.02418739, -0.6061784, -0.07492948],
],
],
device=device, dtype=dtype
)
result = result.to_padded_tensor(0)
ref_output[0][-1] = torch.zeros_like(
ref_output[0][-1], device=device, dtype=dtype
)
result[0][-1] = torch.zeros_like(
result[0][-1], device=device, dtype=dtype
)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
if 'cuda' in device:
if dtype == torch.float:
atol = 2e-4
rtol = 4e-3
else:
atol = 7e-4
rtol = 2e-2
torch.testing.assert_close(result, ref_output, atol=atol, rtol=rtol)
else:
torch.testing.assert_close(result, ref_output)
for batch_first in (True, False):
for training in (True, False):
if training:
cm = contextlib.nullcontext()
else:
# Fast path requires inference mode.
cm = torch.no_grad()
with cm:
_test(batch_first=batch_first, training=training, atol=atol, rtol=rtol)
@dtypes(torch.double)
@torch.no_grad()
def test_multihead_attn_fast_path_query_and_bias_have_different_dtypes(self, device, dtype):
mha = torch.nn.MultiheadAttention(3, 3, batch_first=True, dtype=dtype, device=device).eval()
mha.in_proj_bias = torch.nn.Parameter(mha.in_proj_bias.to(torch.half).to(device))
query = torch.randn(3, 3, 3, dtype=dtype, device=device)
mha(query, query, query)
@dtypes(torch.double)
@torch.no_grad()
def test_multihead_attn_fast_path_small_test(self, device, dtype):
mha = torch.nn.MultiheadAttention(3, 3, batch_first=True, dtype=dtype, device=device).eval()
query = torch.randn(3, 3, 3, dtype=dtype, device=device)
mha(query, query, query)
@dtypes(torch.double)
@torch.no_grad()
def test_multihead_attn_in_proj_bias_none(self, device, dtype):
mha = torch.nn.MultiheadAttention(1, 1, bias=False, dtype=dtype, device=device)
query = torch.rand(3, 2, 1, dtype=dtype, device=device)
mha(query, query, query)
@dtypes(torch.double)
@torch.no_grad()
def test_multihead_attn_in_proj_weight_none(self, device, dtype):
# Setting kdim == vdim == 2 means that vdim != embed_dim
# will cause the logic to use per-input project weights, thereby
# forcing self.in_proj_weight = None
mha = torch.nn.MultiheadAttention(4, 4, vdim=2, kdim=2, dtype=dtype, device=device)
query = torch.rand(4, 4, 4, dtype=dtype, device=device)
key = torch.rand(4, 4, 2, dtype=dtype, device=device)
mha(query, key, key)
@onlyCPU
@dtypes(torch.double)
def test_transformerencoderlayer_fast_path(self, device, dtype):
model = torch.nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True, device=device, dtype=dtype)
src = torch.rand(32, 10, 512)
src_mask = torch.zeros(10, 10).to(torch.bool)
model.eval()
with torch.no_grad():
model(src, src_mask)
@dtypes(torch.float)
@dtypesIfCUDA(torch.half, torch.float)
def test_transformerencoderlayer_gelu(self, device, dtype):
# this is a deterministic test for TransformerEncoderLayer with gelu activation
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
bsz = 2
atol = 0
rtol = 1e-5
if "cuda" in device:
atol = 1e-3
rtol = 1e-2
def _test(activation, batch_first, training):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
model = nn.TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout,
activation, batch_first=batch_first, device=device, dtype=dtype)
if not training:
assert dropout == 0
model = model.eval()
# set constant weights of the model
for idx, p in enumerate(model.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
# deterministic input
encoder_input = torch.tensor([[[20., 30., 40., 50.]]], device=device, dtype=dtype)
result = model(encoder_input)
ref_output = torch.tensor([[[2.249815, 0.131006, -0.702199, 0.177868]]], device=device, dtype=dtype)
torch.testing.assert_close(result, ref_output, rtol=rtol, atol=atol)
# deterministic input
encoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]], device=device, dtype=dtype))
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.264103, 0.121417, -0.696012, 0.159724]],
[[2.264103, 0.121417, -0.696012, 0.159724]]], device=device, dtype=dtype))
torch.testing.assert_close(result, ref_output, rtol=rtol, atol=atol)
# deterministic input
encoder_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]], device=device, dtype=dtype))
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.42163188, 0.03227153, -0.60714219, -0.05908082],
[2.42151276, 0.03302179, -0.60722523, -0.05762651]],
[[2.41926761, 0.02974034, -0.60879519, -0.0621269],
[2.41626395, 0.03539356, -0.61087842, -0.04978623]],
[[2.42382808, 0.03218872, -0.6055963, -0.06073591],
[2.41983477, 0.03085259, -0.60840145, -0.06046414]],
[[2.42500749, 0.03328855, -0.60476388, -0.0595334],
[2.4237977, 0.03290575, -0.60561789, -0.05940082]],
[[2.41383916, 0.02686345, -0.61256377, -0.06380707],
[2.42000277, 0.03800944, -0.60824798, -0.04754947]]], device=device, dtype=dtype))
torch.testing.assert_close(result, ref_output, rtol=rtol, atol=atol)
for activation, batch_first, training in product(('gelu', F.gelu, nn.GELU()), (True, False), (True, False)):
# Fast path requires inference mode.
if training:
cm = contextlib.nullcontext()
else:
cm = torch.no_grad()
with cm:
_test(activation=activation, batch_first=batch_first, training=training)
class TestModuleGlobalHooks(TestCase):
def tearDown(self):
nn.modules.module._global_backward_hooks = OrderedDict()
nn.modules.module._global_forward_hooks = OrderedDict()
nn.modules.module._global_forward_pre_hooks = OrderedDict()
@skipIfTorchDynamo("TorchDynamo does not work well with hooks")
def test_module_global_hooks(self):
module = nn.Sigmoid
module_1 = module()
module_2 = module()
module_3 = module()
input = torch.ones(5, 5, requires_grad=True)
counter = {
'forwards': 0,
'backwards': 0
}
def fw_hook(inc, h_module, input, output):
self.assertIsInstance(input, tuple)
self.assertTrue(isinstance(output, torch.Tensor))
self.assertTrue(isinstance(h_module, module))
self.assertEqual(input[0], torch.ones(5, 5))
self.assertEqual(output, torch.empty(5, 5).fill_(1 / (1 + 1 / math.e)))
counter['forwards'] += inc
def bw_hook(inc, h_module, grad_input, grad_output):
self.assertIsInstance(grad_input, tuple)
self.assertIsInstance(grad_output, tuple)
self.assertTrue(isinstance(h_module, module))
self.assertEqual(grad_output[0], torch.ones(5, 5) * 2)
counter['backwards'] += inc
test_fwd = nn.modules.module.register_module_forward_hook(lambda *args: fw_hook(1, *args))
module_1(input)
module_2(input)
module_3(input)
self.assertEqual(counter['forwards'], 3)
self.assertEqual(counter['backwards'], 0)
test_bwd = nn.modules.module.register_module_backward_hook(
lambda *args: bw_hook(1, *args))
output_1 = module_1(input)
output_2 = module_2(input)
output_3 = module_3(input)
self.assertEqual(counter['forwards'], 6)
self.assertEqual(counter['backwards'], 0)
output_1.backward(torch.ones(5, 5) * 2, retain_graph=True)
output_2.backward(torch.ones(5, 5) * 2, retain_graph=False)
output_3.backward(torch.ones(5, 5) * 2, retain_graph=False)
self.assertEqual(counter['forwards'], 6)
self.assertEqual(counter['backwards'], 3)
output_1.backward(torch.ones(5, 5) * 2, retain_graph=True)
self.assertEqual(counter['forwards'], 6)
self.assertEqual(counter['backwards'], 4)
test2_fwd = nn.modules.module.register_module_forward_hook(lambda *args: fw_hook(2, *args))
output = module_1(input)
output = module_2(input)
output = module_3(input)
self.assertEqual(counter['forwards'], 15)
self.assertEqual(counter['backwards'], 4)
test2_bwd = nn.modules.module.register_module_backward_hook(lambda *args: bw_hook(2, *args))
module_1(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 18)
self.assertEqual(counter['backwards'], 7)
test2_bwd.remove()
module_2(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 21)
self.assertEqual(counter['backwards'], 8)
test2_fwd.remove()
module_3(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 22)
self.assertEqual(counter['backwards'], 9)
test_fwd.remove()
test_bwd.remove()
def test_module_global_hook_invalid_outputs(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
def bw_fail1(self, grad_input, grad_output):
return grad_input[:-1]
def bw_fail2(self, grad_input, grad_output):
return grad_input + (torch.randn(2, 2),)
with nn.modules.module.register_module_backward_hook(bw_fail1):
with self.assertRaisesRegex(RuntimeError, 'got 0, but expected 1'):
module(input).sum().backward()
with nn.modules.module.register_module_backward_hook(bw_fail2):
with self.assertRaisesRegex(RuntimeError, 'got 2, but expected 1'):
module(input).sum().backward()
def test_module_backward_global_hook_writeable(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
sig_x = torch.sigmoid(input)
def bw_hook(module, grad_input, grad_output):
for grad in grad_input:
self.assertTrue(isinstance(grad, torch.Tensor))
for grad in grad_output:
self.assertTrue(isinstance(grad, torch.Tensor))
return tuple(gi * 2 for gi in grad_input)
nn.modules.module.register_module_backward_hook(bw_hook)
module(input).backward(torch.ones(5, 5))
expected_grad = sig_x * (1 - sig_x) * 2
self.assertEqual(input.grad, expected_grad)
@skipIfTorchDynamo("TorchDynamo does not work well with hooks")
def test_module_global_forward_preforward_hook_writeable(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
sig_x = torch.sigmoid(input)
def forward_pre_hook(m, input):
return torch.nn.functional.relu(input[0])
def forward_hook(m, input, output):
return -output
nn.modules.module.register_module_forward_pre_hook(forward_pre_hook)
nn.modules.module.register_module_forward_hook(forward_hook)
output = module(input)
expected_res = -torch.sigmoid(torch.nn.functional.relu(input))
self.assertEqual(output, expected_res)
output.backward(torch.ones(5, 5) * 2, retain_graph=True)
mask = (input > 0).double()
expected_grad = -sig_x * (1 - sig_x) * 2 * mask
self.assertEqual(input.grad, expected_grad)
@skipIfTorchDynamo("TorchDynamo does not work well with hooks")
def test_module_forward_preforward_hook_removable(self):
"""
This test is to test when multiple pre-forward hook functions can be
registered successfully and used correctly, if the handle can be removable
during the pre-forward hook function call.
"""
module = nn.Sigmoid()
def removable_hook(m, input):
nonlocal handle
handle.remove()
return input
def removable_hook_2(m, input):
nonlocal handle_2
handle_2.remove()
return input
handle = module.register_forward_pre_hook(removable_hook)
handle_2 = module.register_forward_pre_hook(removable_hook_2)
# make sure hook register is successful
self.assertEqual(len(handle.hooks_dict_ref()), 2)
self.assertEqual(len(handle_2.hooks_dict_ref()), 2)
input = torch.randn(2, 2)
output = module(input)
self.assertEqual(torch.sigmoid(input), output)
# make sure hook removal is successful
self.assertFalse(handle.id in handle.hooks_dict_ref())
self.assertFalse(handle_2.id in handle.hooks_dict_ref())
self.assertEqual(len(handle.hooks_dict_ref()), 0)
self.assertEqual(len(handle_2.hooks_dict_ref()), 0)
@skipIfTorchDynamo("TorchDynamo does not work well with hooks")
def test_module_forward_forward_hook_removable(self):
"""
This test is to test when multiple forward hook functions can be registered
successfully and used correctly, if the handle can be removable during the
forward hook function call.
"""
module = nn.Sigmoid()
def removable_hook(m, input, output):
nonlocal handle
handle.remove()
return output
def removable_hook_2(m, input, output):
nonlocal handle_2
handle_2.remove()
return output
handle = module.register_forward_hook(removable_hook)
handle_2 = module.register_forward_hook(removable_hook_2)
# make sure hook register is successful
self.assertEqual(len(handle.hooks_dict_ref()), 2)
self.assertEqual(len(handle_2.hooks_dict_ref()), 2)
input = torch.randn(2, 2)
output = module(input)
self.assertEqual(torch.sigmoid(input), output)
# make sure hook removal is successful
self.assertFalse(handle.id in handle.hooks_dict_ref())
self.assertFalse(handle_2.id in handle.hooks_dict_ref())
self.assertEqual(len(handle.hooks_dict_ref()), 0)
self.assertEqual(len(handle_2.hooks_dict_ref()), 0)
@skipIfTorchDynamo("TorchDynamo does not work well with hooks")
def test_global_and_local_hooks_order(self):
module = nn.Sigmoid()
global_forward_pre_called = False
local_forward_pre_called = False
global_forward_called = False
local_forward_called = False
global_backward_called = False
local_backward_called = False
def global_forward_pre_hook(m, input):
nonlocal global_forward_pre_called
self.assertTrue(not local_forward_pre_called)
global_forward_pre_called = True
return input
def local_forward_pre_hook(m, input):
nonlocal local_forward_pre_called
self.assertTrue(global_forward_pre_called)
local_forward_pre_called = True
return input
def global_forward_hook(m, input, output):
nonlocal global_forward_called
self.assertTrue(not local_forward_called)
global_forward_called = True
return output
def local_forward_hook(m, input, output):
nonlocal local_forward_called
self.assertTrue(global_forward_called)
local_forward_called = True
return output
def global_backward_hook(m, input, output):
nonlocal global_backward_called
self.assertTrue(not local_backward_called)
global_backward_called = True
return input
def local_backward_hook(m, input, output):
nonlocal local_backward_called
self.assertTrue(global_backward_called)
local_backward_called = True
return input
input = torch.randn(5, 5, requires_grad=True)
nn.modules.module.register_module_forward_pre_hook(global_forward_pre_hook)
module.register_forward_pre_hook(local_forward_pre_hook)
nn.modules.module.register_module_forward_hook(global_forward_hook)
module.register_forward_hook(local_forward_hook)
nn.modules.module.register_module_backward_hook(global_backward_hook)
module.register_backward_hook(local_backward_hook)
output = module(input)
self.assertTrue(local_forward_called and local_forward_pre_called and global_forward_called and global_forward_pre_called)
output.backward(torch.ones(5, 5), retain_graph=True)
self.assertTrue(local_backward_called and global_backward_called)
class LazyModule(torch.nn.modules.lazy.LazyModuleMixin, torch.nn.Module):
pass
class TestLazyModules(TestCase):
@suppress_warnings
def test_lazy_module_parameter(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
self.assertTrue(module.has_uninitialized_params())
state_dict = module.state_dict()
self.assertIsInstance(state_dict['test_param'], UninitializedParameter)
new_module = LazyModule()
# An error is raised when there is an attempt to replace an existing parameter
# with an uninitialized one
new_module.register_parameter('test_param', nn.Parameter(torch.ones(5, 5)))
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
new_module.load_state_dict(state_dict)
# Uninitialized parameters are overriden when the state dict to be loaded contains a valid one
new_module = LazyModule()
new_module.register_parameter('test_param', nn.Parameter(torch.ones(5, 5)))
module.load_state_dict(new_module.state_dict())
self.assertEqual(module.test_param, torch.ones((5, 5)))
# Uninitialized parameters are left unchanged
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
self.assertTrue(module.has_uninitialized_params())
new_module = LazyModule()
new_module.register_parameter('test_param', UninitializedParameter())
module.load_state_dict(new_module.state_dict())
self.assertTrue(module.has_uninitialized_params())
@suppress_warnings
def test_lazy_module_buffer(self):
module = LazyModule()
module.register_buffer('test_buffer', UninitializedBuffer())
self.assertTrue(module.has_uninitialized_params())
state_dict = module.state_dict()
self.assertIsInstance(state_dict['test_buffer'], UninitializedBuffer)
new_module = LazyModule()
# An error is raised when there is an attempt to replace an existing parameter
# with an uninitialized one
new_module.register_buffer('test_buffer', torch.ones(5, 5))
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
new_module.load_state_dict(state_dict)
# Uninitialized parameters are overriden when the state dict to be loaded contains a valid one
new_module = LazyModule()
new_module.register_buffer('test_buffer', torch.ones(5, 5))
module.load_state_dict(new_module.state_dict())
self.assertEqual(module.test_buffer, torch.ones((5, 5)))
# Uninitialized parameters are left unchanged
module = LazyModule()
module.register_buffer('test_buffer', UninitializedBuffer())
self.assertTrue(module.has_uninitialized_params())
new_module = LazyModule()
new_module.register_buffer('test_buffer', UninitializedBuffer())
module.load_state_dict(new_module.state_dict())
module.load_state_dict(new_module.state_dict())
self.assertTrue(module.has_uninitialized_params())
@suppress_warnings
def test_lazy_module_jit_param(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
self.assertTrue(module.has_uninitialized_params())
with self.assertRaisesRegex(RuntimeError, 'run a forward pass'):
torch.jit.script(module)
@suppress_warnings
def test_lazy_module_jit_buffer(self):
module = LazyModule()
module.register_buffer('test_buffer', UninitializedBuffer())
self.assertTrue(module.has_uninitialized_params())
with self.assertRaisesRegex(RuntimeError, 'run a forward pass'):
torch.jit.script(module)
@suppress_warnings
def test_lazy_share_memory_param(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
self.assertTrue(module.has_uninitialized_params())
with self.assertRaisesRegex(RuntimeError, 'share memory on an uninitialized'):
module.share_memory()
@suppress_warnings
def test_lazy_share_memory_buffer(self):
module = LazyModule()
module.register_buffer('test_buffer', UninitializedBuffer())
self.assertTrue(module.has_uninitialized_params())
with self.assertRaisesRegex(RuntimeError, 'share memory on an uninitialized'):
module.share_memory()
@suppress_warnings
def test_linear(self):
module = nn.LazyLinear(10)
self.assertIsInstance(module.weight, UninitializedParameter)
self.assertIsInstance(module.bias, UninitializedParameter)
input = torch.ones(5, 5)
module(input)
self.assertIsInstance(module, nn.Linear)
self.assertNotIsInstance(module, nn.LazyLinear)
self.assertTrue(module.weight.shape == (10, 5))
self.assertTrue(module.bias.shape == (10,))
y = module(input)
self.assertTrue(torch.equal(torch.nn.functional.linear(input, module.weight, module.bias), y))
@suppress_warnings
def test_lazy_linear_pickle(self):
module = nn.LazyLinear(10)
self.assertIsInstance(module.weight, UninitializedParameter)
self.assertIsInstance(module.bias, UninitializedParameter)
module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(module, nn.LazyLinear)
self.assertIsInstance(module.weight, UninitializedParameter)
self.assertIsInstance(module.bias, UninitializedParameter)
input = torch.ones(5, 5)
module(input) # fully materialized
new_module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(new_module, nn.Linear)
self.assertNotIsInstance(new_module, nn.LazyLinear)
self.assertTrue(new_module.weight.shape == (10, 5))
self.assertNotIsInstance(new_module.weight, UninitializedParameter)
self.assertTrue(new_module.bias.shape == (10,))
self.assertNotIsInstance(new_module.bias, UninitializedParameter)
@suppress_warnings
def test_linear_state(self):
module = nn.Linear(5, 10)
lazy_module = nn.LazyLinear(10)
lazy_module.load_state_dict(module.state_dict())
# Parameters have been initialized but the module won't become a full
# Linear one until the first iteration. This is due to
# limitations on the state_dict loading logic
self.assertFalse(lazy_module.has_uninitialized_params())
self.assertTrue(lazy_module.weight.shape == (10, 5))
self.assertTrue(lazy_module.bias.shape == (10,))
module = nn.Linear(5, 10)
lazy_module = nn.LazyLinear(10)
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
module.load_state_dict(lazy_module.state_dict())
def _check_lazy_conv(self, cls, lazy_cls, func, init_args, input_shape,
expected_weight_shape, expected_bias_shape):
module = lazy_cls(*init_args)
self.assertIsInstance(module.weight, UninitializedParameter)
if module.bias is not None:
self.assertIsInstance(module.bias, UninitializedParameter)
input = torch.ones(*input_shape)
module(input)
self.assertIsInstance(module, cls)
self.assertNotIsInstance(module, lazy_cls)
self.assertEqual(module.weight.shape, expected_weight_shape)
if module.bias is not None:
self.assertEqual(module.bias.shape, expected_bias_shape)
y = module(input)
self.assertTrue(torch.equal(func(input, module.weight, module.bias), y))
def _check_lazy_conv_pickle(self, cls, lazy_cls, init_args, input_shape,
expected_weight_shape, expected_bias_shape):
module = lazy_cls(*init_args)
self.assertIsInstance(module.weight, UninitializedParameter)
if module.bias is not None:
self.assertIsInstance(module.bias, UninitializedParameter)
module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(module, lazy_cls)
self.assertIsInstance(module.weight, UninitializedParameter)
if module.bias is not None:
self.assertIsInstance(module.bias, UninitializedParameter)
input = torch.ones(*input_shape)
module(input) # fully materialized
new_module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(new_module, cls)
self.assertNotIsInstance(new_module, lazy_cls)
self.assertEqual(new_module.weight.shape, expected_weight_shape)
self.assertNotIsInstance(new_module.weight, UninitializedParameter)
if new_module.bias is not None:
self.assertEqual(new_module.bias.shape, expected_bias_shape)
self.assertNotIsInstance(new_module.bias, UninitializedParameter)
def _check_lazy_conv_state(self, gen_module, gen_lazy_module,
expected_weight_shape, expected_bias_shape):
module = gen_module()
lazy_module = gen_lazy_module()
lazy_module.load_state_dict(module.state_dict())
# Parameters have been initialized but the module won't become a full
# Conv one until the first iteration. This is due to
# limitations on the state_dict loading logic
self.assertFalse(lazy_module.has_uninitialized_params())
self.assertEqual(lazy_module.weight.shape, expected_weight_shape)
if lazy_module.bias is not None:
self.assertEqual(lazy_module.bias.shape, expected_bias_shape)
module = gen_module()
lazy_module = gen_lazy_module()
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
module.load_state_dict(lazy_module.state_dict())
def test_lazy_pre_forward_hook(self):
"""
This test is to test whether lazymodule can register other pre-forward hook
functions successfully.
"""
class TestModule(torch.nn.modules.lazy.LazyModuleMixin, torch.nn.Module):
def __init__(self):
super().__init__()
def initialize_parameters(self, input):
return None
def forward(self, input):
return input
def hook_function(module, input):
return input[0] + 1
module = TestModule()
module.register_forward_pre_hook(hook_function)
output = module(torch.zeros(2, 2))
self.assertEqual(output, torch.ones(2, 2))
def test_lazy_forward_hook(self):
"""
This test is to test whether lazymodule can register other forward hook
functions successfully.
"""
class TestModule(torch.nn.modules.lazy.LazyModuleMixin, torch.nn.Module):
def __init__(self):
super().__init__()
def initialize_parameters(self, input):
return None
def forward(self, input):
return input
def hook_function(module, input, output):
return input[0] + 1
module = TestModule()
module.register_forward_hook(hook_function)
output = module(torch.zeros(2, 2))
self.assertEqual(output, torch.ones(2, 2))
@suppress_warnings
def test_lazy_conv1d(self):
self._check_lazy_conv(nn.Conv1d, nn.LazyConv1d, torch.nn.functional.conv1d,
(32, 2), (192, 16, 50), (32, 16, 2), (32,))
@suppress_warnings
def test_lazy_conv1d_pickle(self):
self._check_lazy_conv_pickle(nn.Conv1d, nn.LazyConv1d, (32, 2), (192, 16, 50),
(32, 16, 2), (32,))
@suppress_warnings
def test_lazy_conv1d_state(self):
self._check_lazy_conv_state(lambda: nn.Conv1d(16, 32, 2),
lambda: nn.LazyConv1d(32, 2),
(32, 16, 2), (32,))
@suppress_warnings
def test_lazy_conv2d(self):
self._check_lazy_conv(nn.Conv2d, nn.LazyConv2d, torch.nn.functional.conv2d,
(32, 2), (192, 16, 8, 6), (32, 16, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv2d_pickle(self):
self._check_lazy_conv_pickle(nn.Conv2d, nn.LazyConv2d, (32, 2), (192, 16, 8, 6),
(32, 16, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv2d_state(self):
self._check_lazy_conv_state(lambda: nn.Conv2d(16, 32, 2),
lambda: nn.LazyConv2d(32, 2),
(32, 16, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv3d(self):
self._check_lazy_conv(nn.Conv3d, nn.LazyConv3d, torch.nn.functional.conv3d,
(32, 2), (192, 16, 8, 7, 6), (32, 16, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv3d_pickle(self):
self._check_lazy_conv_pickle(nn.Conv3d, nn.LazyConv3d, (32, 2), (192, 16, 8, 7, 6),
(32, 16, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv3d_state(self):
self._check_lazy_conv_state(lambda: nn.Conv3d(16, 32, 2),
lambda: nn.LazyConv3d(32, 2),
(32, 16, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transposed1d(self):
self._check_lazy_conv(nn.ConvTranspose1d, nn.LazyConvTranspose1d, torch.nn.functional.conv_transpose1d,
(32, 2), (192, 16, 50), (16, 32, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose1d_pickle(self):
self._check_lazy_conv_pickle(nn.ConvTranspose1d, nn.LazyConvTranspose1d, (32, 2),
(192, 16, 50), (16, 32, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose1d_state(self):
self._check_lazy_conv_state(lambda: nn.ConvTranspose1d(16, 32, 2),
lambda: nn.LazyConvTranspose1d(32, 2),
(16, 32, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose2d(self):
self._check_lazy_conv(nn.ConvTranspose2d, nn.LazyConvTranspose2d, torch.nn.functional.conv_transpose2d,
(32, 2), (192, 16, 8, 6), (16, 32, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose2d_pickle(self):
self._check_lazy_conv_pickle(nn.ConvTranspose2d, nn.LazyConvTranspose2d, (32, 2),
(192, 16, 8, 6), (16, 32, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose2d_state(self):
self._check_lazy_conv_state(lambda: nn.ConvTranspose2d(16, 32, 2),
lambda: nn.LazyConvTranspose2d(32, 2),
(16, 32, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose3d(self):
self._check_lazy_conv(nn.ConvTranspose3d, nn.LazyConvTranspose3d, torch.nn.functional.conv_transpose3d,
(32, 2), (192, 16, 8, 7, 6), (16, 32, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose3d_pickle(self):
self._check_lazy_conv_pickle(nn.ConvTranspose3d, nn.LazyConvTranspose3d, (32, 2),
(192, 16, 8, 7, 6), (16, 32, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose3d_state(self):
self._check_lazy_conv_state(lambda: nn.ConvTranspose3d(16, 32, 2),
lambda: nn.LazyConvTranspose3d(32, 2),
(16, 32, 2, 2, 2), (32,))
def _check_lazy_norm(self, cls, lazy_cls, input_shape):
for affine in [False, True]:
for track_running_stats in [False, True]:
lazy_module = lazy_cls(affine=affine, track_running_stats=track_running_stats)
if affine:
self.assertIsInstance(lazy_module.weight, UninitializedParameter)
self.assertIsInstance(lazy_module.bias, UninitializedParameter)
if track_running_stats:
self.assertIsInstance(lazy_module.running_mean, UninitializedBuffer)
self.assertIsInstance(lazy_module.running_var, UninitializedBuffer)
input = torch.ones(*input_shape)
lazy_output = lazy_module(input)
self.assertIsInstance(lazy_module, cls)
self.assertNotIsInstance(lazy_module, lazy_cls)
num_features = input_shape[1]
module = cls(num_features, affine=affine, track_running_stats=track_running_stats)
expected_output = module(input)
self.assertEqual(lazy_output, expected_output)
if module.weight is not None:
self.assertEqual(lazy_module.weight.shape, module.weight.shape)
self.assertEqual(lazy_module.weight, module.weight)
if module.bias is not None:
self.assertEqual(lazy_module.bias.shape, module.bias.shape)
self.assertEqual(lazy_module.bias, module.bias)
if module.running_mean is not None:
self.assertEqual(lazy_module.running_mean.shape, module.running_mean.shape)
self.assertEqual(lazy_module.running_mean, module.running_mean)
if module.running_var is not None:
self.assertEqual(lazy_module.running_var.shape, module.running_var.shape)
self.assertEqual(lazy_module.running_var, module.running_var)
if module.num_batches_tracked is not None:
self.assertEqual(lazy_module.num_batches_tracked.shape, module.num_batches_tracked.shape)
self.assertEqual(lazy_module.num_batches_tracked, module.num_batches_tracked)
def _check_lazy_norm_pickle(self, cls, lazy_cls, input_shape):
for affine in [False, True]:
for track_running_stats in [False, True]:
module = lazy_cls(affine=affine, track_running_stats=track_running_stats)
module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(module, lazy_cls)
if affine:
self.assertIsInstance(module.weight, UninitializedParameter)
self.assertIsInstance(module.bias, UninitializedParameter)
if track_running_stats:
self.assertIsInstance(module.running_mean, UninitializedBuffer)
self.assertIsInstance(module.running_var, UninitializedBuffer)
input = torch.ones(*input_shape)
module(input) # fully materialized
module = pickle.loads(pickle.dumps(module))
self.assertNotIsInstance(module, lazy_cls)
self.assertIsInstance(module, cls)
if affine:
self.assertNotIsInstance(module.weight, UninitializedParameter)
self.assertNotIsInstance(module.bias, UninitializedParameter)
if track_running_stats:
self.assertNotIsInstance(module.running_mean, UninitializedBuffer)
self.assertNotIsInstance(module.running_var, UninitializedBuffer)
def _check_lazy_batchnorm_state(self, cls, lazy_cls):
module = cls(10)
lazy_module = lazy_cls(affine=True, track_running_stats=True)
lazy_module.load_state_dict(module.state_dict())
# Parameters have been initialized but the module won't become a full
# Conv one until the first iteration. This is due to
# limitations on the state_dict loading logic
self.assertFalse(lazy_module.has_uninitialized_params())
self.assertEqual(lazy_module.weight.shape, (10,))
self.assertEqual(lazy_module.bias.shape, (10,))
self.assertEqual(lazy_module.running_mean.shape, (10,))
self.assertEqual(lazy_module.running_var.shape, (10,))
module = cls(10)
lazy_module = lazy_cls()
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
module.load_state_dict(lazy_module.state_dict())
def _check_lazy_instancenorm_state(self, cls, lazy_cls):
for affine in [False, True]:
for track_running_stats in [False, True]:
module = cls(10, affine=affine, track_running_stats=track_running_stats)
lazy_module = lazy_cls(affine=affine, track_running_stats=track_running_stats)
lazy_module.load_state_dict(module.state_dict())
# Parameters have been initialized but the module won't become a full
# InstanceNorm one until the first iteration. This is due to
# limitations on the state_dict loading logic
self.assertFalse(lazy_module.has_uninitialized_params())
if affine:
self.assertEqual(lazy_module.weight.shape, (10,))
self.assertEqual(lazy_module.bias.shape, (10,))
if track_running_stats:
self.assertEqual(lazy_module.running_mean.shape, (10,))
self.assertEqual(lazy_module.running_var.shape, (10,))
module = cls(10, affine=True, track_running_stats=True)
lazy_module = lazy_cls(affine=True, track_running_stats=True)
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
module.load_state_dict(lazy_module.state_dict())
def test_lazy_batchnorm1d(self):
self._check_lazy_norm(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 3, 6))
self._check_lazy_norm(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 6))
def test_lazy_batchnorm1d_pickle(self):
self._check_lazy_norm_pickle(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 3, 6))
self._check_lazy_norm_pickle(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 6))
def test_lazy_batchnorm1d_state(self):
self._check_lazy_batchnorm_state(nn.BatchNorm1d, nn.LazyBatchNorm1d)
self._check_lazy_batchnorm_state(nn.BatchNorm1d, nn.LazyBatchNorm1d)
def test_lazy_batchnorm2d(self):
self._check_lazy_norm(nn.BatchNorm2d, nn.LazyBatchNorm2d, (16, 3, 6, 7))
def test_lazy_batchnorm2d_pickle(self):
self._check_lazy_norm_pickle(nn.BatchNorm2d, nn.LazyBatchNorm2d, (16, 3, 6, 7))
def test_lazy_batchnorm2d_state(self):
self._check_lazy_batchnorm_state(nn.BatchNorm2d, nn.LazyBatchNorm2d)
self._check_lazy_batchnorm_state(nn.BatchNorm2d, nn.LazyBatchNorm2d)
def test_lazy_batchnorm3d(self):
self._check_lazy_norm(nn.BatchNorm3d, nn.LazyBatchNorm3d, (16, 3, 6, 7, 8))
def test_lazy_batchnorm3d_pickle(self):
self._check_lazy_norm_pickle(nn.BatchNorm3d, nn.LazyBatchNorm3d, (16, 3, 6, 7, 8))
def test_lazy_batchnorm3d_state(self):
self._check_lazy_batchnorm_state(nn.BatchNorm3d, nn.LazyBatchNorm3d)
self._check_lazy_batchnorm_state(nn.BatchNorm3d, nn.LazyBatchNorm3d)
def test_lazy_instancenorm1d(self):
self._check_lazy_norm(nn.InstanceNorm1d, nn.LazyInstanceNorm1d, (16, 3, 6))
def test_lazy_instancenorm1d_pickle(self):
self._check_lazy_norm_pickle(nn.InstanceNorm1d, nn.LazyInstanceNorm1d, (16, 3, 6))
def test_lazy_instancenorm1d_state(self):
self._check_lazy_instancenorm_state(nn.InstanceNorm1d, nn.LazyInstanceNorm1d)
self._check_lazy_instancenorm_state(nn.InstanceNorm1d, nn.LazyInstanceNorm1d)
def test_lazy_instancenorm2d(self):
self._check_lazy_norm(nn.InstanceNorm2d, nn.LazyInstanceNorm2d, (16, 3, 6, 7))
def test_lazy_instancenorm2d_pickle(self):
self._check_lazy_norm_pickle(nn.InstanceNorm2d, nn.LazyInstanceNorm2d, (16, 3, 6, 7))
def test_lazy_instancenorm2d_state(self):
self._check_lazy_instancenorm_state(nn.InstanceNorm2d, nn.LazyInstanceNorm2d)
self._check_lazy_instancenorm_state(nn.InstanceNorm2d, nn.LazyInstanceNorm2d)
def test_lazy_instancenorm3d(self):
self._check_lazy_norm(nn.InstanceNorm3d, nn.LazyInstanceNorm3d, (16, 3, 6, 7, 8))
def test_lazy_instancenorm3d_pickle(self):
self._check_lazy_norm_pickle(nn.InstanceNorm3d, nn.LazyInstanceNorm3d, (16, 3, 6, 7, 8))
def test_lazy_instancenorm3d_state(self):
self._check_lazy_instancenorm_state(nn.InstanceNorm3d, nn.LazyInstanceNorm3d)
self._check_lazy_instancenorm_state(nn.InstanceNorm3d, nn.LazyInstanceNorm3d)
@suppress_warnings
def test_materialize_dtype(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
module.test_param.materialize(10)
self.assertTrue(module.test_param.dtype == torch.float64)
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
module.half()
module.test_param.materialize(10)
self.assertTrue(module.test_param.dtype == torch.float16)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@suppress_warnings
def test_materialize_device(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
module.test_param.materialize(10)
self.assertTrue(module.test_param.device.type == 'cpu')
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
module.cuda()
module.test_param.materialize(10)
self.assertTrue(module.test_param.device.type == 'cuda')
@suppress_warnings
def test_chained_initialization(self):
class MyNetwork(torch.nn.Module):
def __init__(self):
super(MyNetwork, self).__init__()
self.linear_1 = torch.nn.LazyLinear(15)
self.linear_2 = torch.nn.LazyLinear(10)
def forward(self, x):
y = self.linear_1(x)
return self.linear_2(y)
net = MyNetwork()
net(torch.ones(5, 10))
self.assertTrue(net.linear_1.weight.shape == (15, 10))
self.assertTrue(net.linear_1.bias.shape == (15,))
self.assertTrue(net.linear_2.weight.shape == (10, 15))
self.assertTrue(net.linear_2.bias.shape == (10,))
@suppress_warnings
def test_optimizer_pass(self):
optimizers = [torch.optim.Adadelta, torch.optim.Adagrad, torch.optim.Adam,
torch.optim.AdamW, torch.optim.Adamax,
torch.optim.ASGD, torch.optim.SGD, torch.optim.Rprop,
torch.optim.RMSprop, torch.optim.LBFGS]
def run_step(module, optim):
self.assertIsInstance(optim.param_groups[0]['params'][0], UninitializedParameter)
module.test_param.materialize(10)
self.assertIsInstance(optim.param_groups[0]['params'][0], Parameter)
self.assertNotIsInstance(optim.param_groups[0]['params'][0], UninitializedParameter)
for p in module.parameters():
p.grad = torch.rand_like(p)
if isinstance(optim, torch.optim.LBFGS):
optim.step(lambda: 1.0)
else:
optim.step()
for optim_cls in optimizers:
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
if optim_cls is torch.optim.SGD:
optim = optim_cls(module.parameters(), lr=0.0)
elif optim_cls is torch.optim.Adagrad:
with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):
optim = optim_cls(module.parameters())
continue
else:
optim = optim_cls(module.parameters())
run_step(module, optim)
@suppress_warnings
def test_weight_norm(self):
m = nn.LazyLinear(7)
with self.assertRaisesRegex(ValueError, 'have uninitialized parameters.'):
m = torch.nn.utils.weight_norm(m)
@suppress_warnings
def test_spectral_norm(self):
m = nn.LazyLinear(7)
with self.assertRaisesRegex(ValueError, 'have uninitialized parameters.'):
m = torch.nn.utils.spectral_norm(m)
@suppress_warnings
def test_invalid_functions(self):
param = torch.nn.parameter.UninitializedParameter()
with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):
torch.empty_like(param)
with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):
torch.add(param, param)
with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):
param + param
class TestFunctionalPickle(TestCase):
# issue gh-38137
def test_pickle_softsign(self):
# Make sure it does not throw an exception
s = pickle.dumps(F.softsign)
def _hook_to_pickle(*args, **kwargs):
pass
class TestStateDictHooks(TestCase):
def test_load_state_dict_pre_hook(self):
m = nn.Linear(10, 10)
m_state_dict = m.state_dict()
m_load = nn.Linear(10, 10)
hook_called = 0
def hook_without_module(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
self.assertEqual(m_state_dict, state_dict)
nonlocal hook_called
hook_called += 1
def hook_with_module(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
self.assertEqual(m_state_dict, state_dict)
self.assertTrue(m_load is module)
nonlocal hook_called
hook_called += 1
hook_called = 0
m_load._register_load_state_dict_pre_hook(hook_without_module)
m_load.load_state_dict(m_state_dict)
self.assertEqual(1, hook_called)
hook_called = 0
m_load._register_load_state_dict_pre_hook(hook_with_module, True)
m_load.load_state_dict(m_state_dict)
self.assertEqual(2, hook_called)
def test_no_extra_ref_to_module(self):
try:
gc.disable()
m = nn.Linear(10, 10)
m._register_load_state_dict_pre_hook(_hook_to_pickle, True)
weak_m = weakref.ref(m)
del m
self.assertEqual(weak_m(), None)
finally:
gc.enable()
def test_pickled_hook(self):
m = nn.Linear(10, 10)
m._register_load_state_dict_pre_hook(_hook_to_pickle, True)
pickle.loads(pickle.dumps(m))
def test_load_state_dict_module_pre_hook(self):
hook_called = 0
# Test with module instance method as hook
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.foo = torch.nn.Parameter(torch.rand(10))
def my_pre_load_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
assert [] == error_msgs
assert [] == unexpected_keys
assert [] == missing_keys
assert strict
nonlocal hook_called
hook_called += 1
def my_pre_load_hook_with_module(
self,
module,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
assert [] == error_msgs
assert [] == unexpected_keys
assert [] == missing_keys
assert strict
assert self is module
nonlocal hook_called
hook_called += 1
# Test that hooks registered on a submodule are also called
# appropriately, i.e. with the submodule as module argument in
# my_pre_load_hook_with_module.
class MyModuleContainer(nn.Module):
def __init__(self, mod):
super().__init__()
self.mod = mod
for ctor in [MyModuleContainer, lambda x: x]:
m = ctor(MyModule())
state_dict = m.state_dict()
if isinstance(m, MyModuleContainer):
mod = m.mod
else:
mod = m
hook_called = 0
mod._register_load_state_dict_pre_hook(
mod.my_pre_load_hook
)
m.load_state_dict(state_dict)
self.assertEqual(1, hook_called)
hook_called = 0
mod._register_load_state_dict_pre_hook(
mod.my_pre_load_hook_with_module, True
)
m.load_state_dict(state_dict)
self.assertEqual(2, hook_called)
def test_load_state_dict_post_hook(self):
hook_called = 0
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.foo = torch.nn.Parameter(torch.rand(10))
def my_post_load_hook(self, module, incompatible_keys):
assert module is self
nonlocal hook_called
incompatible_keys.missing_keys.append("foo")
incompatible_keys.unexpected_keys.append("bar")
hook_called += 1
nested = MyModule()
wrapped = nn.ModuleList([nested])
handle = nested.register_load_state_dict_post_hook(
nested.my_post_load_hook,
)
# Hook must be called even if it is wrapped
ret = wrapped.load_state_dict(wrapped.state_dict(), strict=False)
self.assertEqual(hook_called, 1)
# Ensure that the hook modified missing_keys and unexpected_keys
missing = ret.missing_keys
unexpected = ret.unexpected_keys
self.assertEqual(missing, ["foo"])
self.assertEqual(unexpected, ["bar"])
# When called with strict=True, the error raised should mention the
# missing and unexpected keys the hook added.
with self.assertRaisesRegex(RuntimeError, "foo.*\n.*bar"):
wrapped.load_state_dict(wrapped.state_dict(), strict=True)
self.assertEqual(hook_called, 2)
# Removing the hook via handle.remove() should cause it not to
# fire anymore.
handle.remove()
# Hook did not run so it should not have added any keys
ret = wrapped.load_state_dict(wrapped.state_dict(), strict=False)
self.assertEqual(ret.missing_keys, [])
self.assertEqual(ret.unexpected_keys, [])
# hook_called should not have been incremented
self.assertEqual(hook_called, 2)
def load_hook_clear_incompatible(module, incompatible_keys):
incompatible_keys.missing_keys.clear()
incompatible_keys.unexpected_keys.clear()
nested.register_load_state_dict_post_hook(load_hook_clear_incompatible)
state_dict = wrapped.state_dict()
state_dict["extra"] = torch.ones(1)
# load state_dict with strict=True should not throw.
ret = wrapped.load_state_dict(state_dict, strict=True)
# explicitly ensure that the post hook clearned out incompatible_keys
self.assertEqual([], ret.missing_keys)
self.assertEqual([], ret.unexpected_keys)
@unittest.skipIf(IS_WINDOWS, "Tempfile permission issue on windows")
def test_load_state_dict_post_hook_backward_compatibility(self):
def my_post_load_hook(mod, _):
nonlocal called
called = True
for m in [nn.Softmin(10), nn.Softmax(10), nn.LogSoftmax(10)]:
called = False
sd = deepcopy(m.state_dict())
self.assertTrue(hasattr(m, '_load_state_dict_post_hooks'))
# Simulate an older model that did not have this attr
delattr(m, '_load_state_dict_post_hooks')
# Save and load, and ensure that load_state_dict works (without proper
# BC we would run into errors because this attribute would be expected).
# In particular, Softmax runs into the issue described here:
# https://github.com/pytorch/pytorch/issues/77280
with NamedTemporaryFile() as f:
# Note that torch.save / torch.load is not recommended to save/load
# modules.
torch.save(m, f.name)
m = torch.load(f.name)
m.load_state_dict(sd)
self.assertFalse(called)
# Ensure hooks can be registered and called.
m.register_load_state_dict_post_hook(my_post_load_hook)
m.load_state_dict(sd)
self.assertTrue(called)
instantiate_device_type_tests(TestNNDeviceType, globals())
instantiate_parametrized_tests(TestNN)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_nn.py |
# Owner(s): ["oncall: mobile"]
import unittest
import io
import tempfile
import torch
import torch.utils.show_pickle
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS
class TestShowPickle(TestCase):
@unittest.skipIf(IS_WINDOWS, "Can't re-open temp file on Windows")
def test_scripted_model(self):
class MyCoolModule(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = weight
def forward(self, x):
return x * self.weight
m = torch.jit.script(MyCoolModule(torch.tensor([2.0])))
with tempfile.NamedTemporaryFile() as tmp:
torch.jit.save(m, tmp)
tmp.flush()
buf = io.StringIO()
torch.utils.show_pickle.main(["", tmp.name + "@*/data.pkl"], output_stream=buf)
output = buf.getvalue()
self.assertRegex(output, "MyCoolModule")
self.assertRegex(output, "weight")
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_show_pickle.py |
# Owner(s): ["module: unknown"]
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.check_kernel_launches import (
check_cuda_kernel_launches, check_code_for_cuda_kernel_launches
)
class AlwaysCheckCudaLaunchTest(TestCase):
def test_check_code(self):
"""Verifies that the regex works for a few different situations"""
# Try some different spacings
self.assertEqual(2, check_code_for_cuda_kernel_launches("""
some_function_call<TemplateArg><<<1,2,0,stream>>>(arg1,arg2,arg3);
C10_CUDA_KERNEL_LAUNCH_CHECK();
some_function_call<TemplateArg><<<1,2,0,stream>>>(arg1,arg2,arg3);
some_function_call<TemplateArg><<<1,2,0,stream>>>(arg1,arg2,arg3);
C10_CUDA_KERNEL_LAUNCH_CHECK();
some_function_call<TemplateArg><<<1,2,0,stream>>>(arg1,arg2,arg3);
some_other_stuff;
some_function_call<TemplateArg><<<1,2,0,stream>>>(arg1,arg2,arg3);
C10_CUDA_KERNEL_LAUNCH_CHECK();
some_function_call<TemplateArg><<<1,2,0,stream>>> (arg1,arg2,arg3);
C10_CUDA_KERNEL_LAUNCH_CHECK();
some_function_call<TemplateArg><<<1,2,0,stream>>> ( arg1 , arg2 , arg3 ) ;
C10_CUDA_KERNEL_LAUNCH_CHECK();
"""))
# Does it work for macros?
self.assertEqual(0, check_code_for_cuda_kernel_launches(r"""
#define SOME_MACRO(x) some_function_call<<<1,2>>> ( x ) ; \
C10_CUDA_KERNEL_LAUNCH_CHECK();
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \
indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sliceSize, selfAddDimSize); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
"""))
# Does it work for lambdas?
self.assertEqual(1, check_code_for_cuda_kernel_launches(r"""
rrelu_with_noise_cuda_kernel<scalar_t, 2><<<grid, block, 0, stream>>>(
numel,
rng_engine_inputs,
output_data,
input_data,
noise_data,
lower,
upper,
[] __device__ (curandStatePhilox4_32_10_t* state) {
return curand_uniform2_double(state);
});
C10_CUDA_KERNEL_LAUNCH_CHECK();
rrelu_with_noise_cuda_kernel<scalar_t, 2><<<grid, block, 0, stream>>>(
numel,
rng_engine_inputs,
output_data,
input_data,
noise_data,
lower,
upper,
[] __device__ (curandStatePhilox4_32_10_t* state) {
return curand_uniform2_double(state);
});
uh oh;
C10_CUDA_KERNEL_LAUNCH_CHECK();
"""))
def test_check_cuda_launches(self):
unsafeLaunchesCount = check_cuda_kernel_launches()
self.assertTrue(unsafeLaunchesCount == 0)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_kernel_launch_checks.py |
# Owner(s): ["module: tests"]
import torch
import numpy as np
from itertools import product, combinations, permutations, chain
from functools import partial
import random
import warnings
from torch._six import nan
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfTorchDynamo, torch_to_numpy_dtype_dict)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCPU, onlyCUDA, dtypes, onlyNativeDeviceTypes,
dtypesIfCUDA, largeTensorTest)
from torch.testing._internal.common_dtype import all_types_and_complex_and, all_types, all_types_and
# TODO: replace with make_tensor
def _generate_input(shape, dtype, device, with_extremal):
if shape == ():
x = torch.tensor((), dtype=dtype, device=device)
else:
if dtype.is_floating_point or dtype.is_complex:
# work around torch.randn not being implemented for bfloat16
if dtype == torch.bfloat16:
x = torch.randn(*shape, device=device) * random.randint(30, 100)
x = x.to(torch.bfloat16)
else:
x = torch.randn(*shape, dtype=dtype, device=device) * random.randint(30, 100)
x[torch.randn(*shape) > 0.5] = 0
if with_extremal and dtype.is_floating_point:
# Use extremal values
x[torch.randn(*shape) > 0.5] = float('nan')
x[torch.randn(*shape) > 0.5] = float('inf')
x[torch.randn(*shape) > 0.5] = float('-inf')
elif with_extremal and dtype.is_complex:
x[torch.randn(*shape) > 0.5] = complex('nan')
x[torch.randn(*shape) > 0.5] = complex('inf')
x[torch.randn(*shape) > 0.5] = complex('-inf')
elif dtype == torch.bool:
x = torch.zeros(shape, dtype=dtype, device=device)
x[torch.randn(*shape) > 0.5] = True
else:
x = torch.randint(15, 100, shape, dtype=dtype, device=device)
return x
class TestShapeOps(TestCase):
# TODO: update to work on CUDA, too
@onlyCPU
def test_unbind(self, device):
x = torch.rand(2, 3, 4, 5)
for dim in range(4):
res = torch.unbind(x, dim)
res2 = x.unbind(dim)
self.assertEqual(x.size(dim), len(res))
self.assertEqual(x.size(dim), len(res2))
for i in range(dim):
self.assertEqual(x.select(dim, i), res[i])
self.assertEqual(x.select(dim, i), res2[i])
# TODO: update to work on CUDA, too?
@skipIfTorchDynamo("TorchDynamo fails with an unknown error")
@onlyCPU
def test_tolist(self, device):
list0D = []
tensor0D = torch.tensor(list0D)
self.assertEqual(tensor0D.tolist(), list0D)
table1D = [1., 2., 3.]
tensor1D = torch.tensor(table1D)
storage = torch.Storage(table1D)
self.assertEqual(tensor1D.tolist(), table1D)
self.assertEqual(storage.tolist(), table1D)
self.assertEqual(tensor1D.tolist(), table1D)
self.assertEqual(storage.tolist(), table1D)
table2D = [[1, 2], [3, 4]]
tensor2D = torch.tensor(table2D)
self.assertEqual(tensor2D.tolist(), table2D)
tensor3D = torch.tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
tensorNonContig = tensor3D.select(1, 1)
self.assertFalse(tensorNonContig.is_contiguous())
self.assertEqual(tensorNonContig.tolist(), [[3, 4], [7, 8]])
@dtypes(torch.int64, torch.float, torch.complex128)
def test_movedim_invalid(self, device, dtype):
shape = self._rand_shape(4, min_size=5, max_size=10)
x = _generate_input(shape, dtype, device, False)
for fn in [torch.movedim, torch.moveaxis]:
# Invalid `source` and `destination` dimension
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
fn(x, 5, 0)
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
fn(x, 0, 5)
# Mismatch in size of `source` and `destination`
with self.assertRaisesRegex(RuntimeError, "movedim: Invalid source or destination dims:"):
fn(x, (1, 0), (0, ))
with self.assertRaisesRegex(RuntimeError, "movedim: repeated dim in `source`"):
fn(x, (0, 0), (0, 1))
with self.assertRaisesRegex(RuntimeError, "movedim: repeated dim in `source`"):
fn(x, (0, 1, 0), (0, 1, 2))
with self.assertRaisesRegex(RuntimeError, "movedim: repeated dim in `destination`"):
fn(x, (0, 1), (1, 1))
with self.assertRaisesRegex(RuntimeError, "movedim: repeated dim in `destination`"):
fn(x, (0, 1, 2), (1, 0, 1))
@dtypes(torch.int64, torch.float, torch.complex128)
def test_movedim(self, device, dtype):
for fn in [torch.moveaxis, torch.movedim]:
for nd in range(5):
shape = self._rand_shape(nd, min_size=5, max_size=10)
x = _generate_input(shape, dtype, device, with_extremal=False)
for random_negative in [True, False]:
for src_dim, dst_dim in permutations(range(nd), r=2):
random_prob = random.random()
if random_negative and random_prob > 0.66:
src_dim = src_dim - nd
elif random_negative and random_prob > 0.33:
dst_dim = dst_dim - nd
elif random_negative:
src_dim = src_dim - nd
dst_dim = dst_dim - nd
# Integer `source` and `destination`
torch_fn = partial(fn, source=src_dim, destination=dst_dim)
np_fn = partial(np.moveaxis, source=src_dim, destination=dst_dim)
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
if nd == 0:
continue
def make_index_negative(sequence, idx):
sequence = list(sequence)
sequence[random_idx] = sequence[random_idx] - nd
return tuple(src_sequence)
for src_sequence in permutations(range(nd), r=random.randint(1, nd)):
# Sequence `source` and `destination`
dst_sequence = tuple(random.sample(range(nd), len(src_sequence)))
# Randomly change a dim to a negative dim representation of itself.
random_prob = random.random()
if random_negative and random_prob > 0.66:
random_idx = random.randint(0, len(src_sequence) - 1)
src_sequence = make_index_negative(src_sequence, random_idx)
elif random_negative and random_prob > 0.33:
random_idx = random.randint(0, len(src_sequence) - 1)
dst_sequence = make_index_negative(dst_sequence, random_idx)
elif random_negative:
random_idx = random.randint(0, len(src_sequence) - 1)
dst_sequence = make_index_negative(dst_sequence, random_idx)
random_idx = random.randint(0, len(src_sequence) - 1)
src_sequence = make_index_negative(src_sequence, random_idx)
torch_fn = partial(fn, source=src_sequence, destination=dst_sequence)
np_fn = partial(np.moveaxis, source=src_sequence, destination=dst_sequence)
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
# Move dim to same position
x = torch.randn(2, 3, 5, 7, 11)
torch_fn = partial(fn, source=(0, 1), destination=(0, 1))
np_fn = partial(np.moveaxis, source=(0, 1), destination=(0, 1))
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
torch_fn = partial(fn, source=1, destination=1)
np_fn = partial(np.moveaxis, source=1, destination=1)
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
# Empty Sequence
torch_fn = partial(fn, source=(), destination=())
np_fn = partial(np.moveaxis, source=(), destination=())
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
@dtypes(torch.float, torch.bool)
def test_diag(self, device, dtype):
if dtype is torch.bool:
x = torch.rand(100, 100, device=device) >= 0.5
else:
x = torch.rand(100, 100, dtype=dtype, device=device)
res1 = torch.diag(x)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.diag(x, out=res2)
self.assertEqual(res1, res2)
def test_diagonal(self, device):
x = torch.randn((100, 100), device=device)
result = torch.diagonal(x)
expected = torch.diag(x)
self.assertEqual(result, expected)
x = torch.randn((100, 100), device=device)
result = torch.diagonal(x, 17)
expected = torch.diag(x, 17)
self.assertEqual(result, expected)
@onlyCPU
@dtypes(torch.float)
def test_diagonal_multidim(self, device, dtype):
x = torch.randn(10, 11, 12, 13, dtype=dtype, device=device)
xn = x.numpy()
for args in [(2, 2, 3),
(2,),
(-2, 1, 2),
(0, -2, -1)]:
result = torch.diagonal(x, *args)
expected = xn.diagonal(*args)
self.assertEqual(expected.shape, result.shape)
self.assertEqual(expected, result)
# test non-continguous
xp = x.permute(1, 2, 3, 0)
result = torch.diagonal(xp, 0, -2, -1)
expected = xp.numpy().diagonal(0, -2, -1)
self.assertEqual(expected.shape, result.shape)
self.assertEqual(expected, result)
@onlyNativeDeviceTypes
@dtypes(*all_types())
@dtypesIfCUDA(*all_types_and(torch.half))
def test_trace(self, device, dtype):
def test(shape):
tensor = make_tensor(shape, dtype=dtype, device=device, low=-9, high=9)
expected_dtype = tensor.sum().dtype
expected_dtype = torch_to_numpy_dtype_dict[expected_dtype]
result = np.trace(tensor.cpu().numpy(), dtype=expected_dtype)
expected = torch.tensor(result, device=device)
self.assertEqual(tensor.trace(), expected)
shapes = (
[10, 1],
[1, 10],
[100, 100],
[20, 100],
[100, 20],
)
for shape in shapes:
test(shape)
def generate_clamp_baseline(self, device, dtype, *, min_vals, max_vals, with_nans):
"""
Creates a random tensor for a given device and dtype, and computes the expected clamped
values given the min_vals and/or max_vals.
If with_nans is provided, then some values are randomly set to nan.
"""
X = torch.rand(100, device=device).mul(50).add(-25) # uniform in [-25, 25]
X = X.to(dtype)
if with_nans:
mask = torch.randint(0, 2, X.shape, dtype=torch.bool, device=device)
X[mask] = nan
if isinstance(min_vals, torch.Tensor):
min_vals = min_vals.cpu().numpy()
if isinstance(max_vals, torch.Tensor):
max_vals = max_vals.cpu().numpy()
# Use NumPy implementation as reference
X_clamped = torch.tensor(np.clip(X.cpu().numpy(), a_min=min_vals, a_max=max_vals), device=device)
return X, X_clamped
# Tests clamp and its alias, clip
@dtypes(torch.int64, torch.float32)
def test_clamp(self, device, dtype):
op_list = (torch.clamp, torch.Tensor.clamp, torch.Tensor.clamp_,
torch.clip, torch.Tensor.clip, torch.Tensor.clip_)
# min/max argument product
args = product((-10, None), (10, None))
for op in op_list:
for min_val, max_val in args:
if min_val is None and max_val is None:
continue
X, Y_expected = self.generate_clamp_baseline(device, dtype,
min_vals=min_val,
max_vals=max_val,
with_nans=False)
# Test op
X1 = X.clone() # So that the in-place ops do not change X
Y_actual = op(X1, min_val, max_val)
self.assertEqual(Y_expected, Y_actual)
# Test op-out behavior (out does not exist for method versions)
if op in (torch.clamp, torch.clip):
Y_out = torch.empty_like(X)
op(X, min=min_val, max=max_val, out=Y_out)
self.assertEqual(Y_expected, Y_out)
def test_clamp_propagates_nans(self, device):
op_list = (torch.clamp, torch.Tensor.clamp, torch.Tensor.clamp_,
torch.clip, torch.Tensor.clip, torch.Tensor.clip_)
# min/max argument product
args = product((-10, None), (10, None))
for op in op_list:
for min_val, max_val in args:
if min_val is None and max_val is None:
continue
X, Y_expected = self.generate_clamp_baseline(device, torch.float,
min_vals=min_val,
max_vals=max_val,
with_nans=True)
Y_expected = torch.isnan(Y_expected)
# Test op
X1 = X.clone() # So that the in-place ops do not change X
Y_actual = op(X1, min_val, max_val)
self.assertEqual(Y_expected, torch.isnan(Y_actual))
# Test op-out behavior (out does not exist for method versions)
if op in (torch.clamp, torch.clip):
Y_out = torch.empty_like(X)
op(X, min_val, max_val, out=Y_out)
self.assertEqual(Y_expected, torch.isnan(Y_out))
def test_clamp_raises_arg_errors(self, device):
X = torch.randn(100, dtype=torch.float, device=device)
error_msg = 'At least one of \'min\' or \'max\' must not be None'
with self.assertRaisesRegex(RuntimeError, error_msg):
X.clamp()
with self.assertRaisesRegex(RuntimeError, error_msg):
X.clamp_()
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.clamp(X)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_flip(self, device, dtype):
make_from_data = partial(torch.tensor, device=device, dtype=dtype)
make_from_size = partial(make_tensor, device=device, dtype=dtype)
def test_flip_impl(input_t, dims, output_t):
def all_t():
yield input_t, output_t
if dtype is torch.float:
# We generate quantized versions as well
for qdtype in (torch.quint8, torch.qint8, torch.qint32):
qinput_t = torch.quantize_per_tensor(input_t, 0.1, 5, qdtype)
qoutput_t = torch.quantize_per_tensor(output_t, 0.1, 5, qdtype)
yield qinput_t, qoutput_t
for in_t, out_t in all_t():
self.assertEqual(in_t.flip(dims), out_t)
n = in_t.ndim
if not isinstance(dims, tuple):
# Wrap dim
self.assertEqual(in_t.flip(-n + dims), out_t)
else:
# Permute dimensions
for p_dims in permutations(dims):
self.assertEqual(in_t.flip(p_dims), out_t)
if len(p_dims) > 0:
# Wrap 1st dim
self.assertEqual(in_t.flip((-n + p_dims[0],) + p_dims[1:]), out_t)
def gen_data():
# Basic tests
data = make_from_data([1, 2, 3, 4, 5, 6, 7, 8]).view(2, 2, 2)
nonctg = make_from_size((2, 2, 2), noncontiguous=True).copy_(data)
dims_result = ((0, make_from_data([5, 6, 7, 8, 1, 2, 3, 4]).view(2, 2, 2)),
(1, make_from_data([3, 4, 1, 2, 7, 8, 5, 6]).view(2, 2, 2)),
(2, make_from_data([2, 1, 4, 3, 6, 5, 8, 7]).view(2, 2, 2)),
((0, 1), make_from_data([7, 8, 5, 6, 3, 4, 1, 2]).view(2, 2, 2)),
((0, 1, 2), make_from_data([8, 7, 6, 5, 4, 3, 2, 1]).view(2, 2, 2)))
for in_tensor, (dims, out_tensor) in product((data, nonctg), dims_result):
yield in_tensor, dims, out_tensor
# Expanded
in_t = make_from_data([1, 2, 3]).view(3, 1).expand(3, 2)
dims = 0
out_t = make_from_data([3, 3, 2, 2, 1, 1]).view(3, 2)
yield in_t, dims, out_t
# Noop on expanded dimension
yield in_t, 1, in_t
# Transposed
in_t = make_from_data([1, 2, 3, 4, 5, 6, 7, 8]).view(2, 2, 2).transpose(0, 1)
dims = (0, 1, 2)
out_t = make_from_data([8, 7, 4, 3, 6, 5, 2, 1]).view(2, 2, 2)
yield in_t, dims, out_t
# Rectangular case
in_t = make_from_data([1, 2, 3, 4, 5, 6]).view(2, 3)
dims = 0
out_t = make_from_data([[4, 5, 6], [1, 2, 3]])
yield in_t, dims, out_t
dims = 1
out_t = make_from_data([[3, 2, 1], [6, 5, 4]])
yield in_t, dims, out_t
# Noops (edge cases)
# Size 0
in_t = make_from_data(())
yield in_t, 0, in_t
yield in_t, (), in_t
# dims = ()
in_t = make_from_size((3, 2, 1))
yield in_t, (), in_t
# Zero elements, non-zero size
in_t = make_from_size((3, 0, 2))
for i in range(in_t.ndim):
yield in_t, i, in_t
# Size 1
in_t = make_from_size(())
yield in_t, 0, in_t
in_t = make_from_size((1,))
yield in_t, 0, in_t
for in_tensor, dims, out_tensor in gen_data():
test_flip_impl(in_tensor, dims, out_tensor)
# test for shape
size = [2, 3, 4]
data = make_from_size(size)
possible_dims = range(len(size))
test_dims = chain(combinations(possible_dims, 1), combinations(possible_dims, 2))
for dims in test_dims:
self.assertEqual(size, list(data.flip(dims).size()))
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_flip_errors(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
data = make_arg((2, 2, 2))
# not allow flip on the same dim more than once
self.assertRaises(RuntimeError, lambda: data.flip(0, 1, 1))
# not allow empty list as input
self.assertRaises(TypeError, lambda: data.flip())
# not allow dim > max dim
self.assertRaises(IndexError, lambda: data.flip(0, 1, 2, 3))
self.assertRaises(IndexError, lambda: data.flip(3))
def _rand_shape(self, dim, min_size, max_size):
return tuple(torch.randint(min_size, max_size + 1, (dim,)))
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_flip_numpy(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
for ndim in [3, 4]:
shape = self._rand_shape(ndim, 5, 10)
data = make_arg(shape)
# Axis to sample for given shape.
for i in range(1, ndim + 1):
# Check all combinations of `i` axis.
for flip_dim in combinations(range(ndim), i):
torch_fn = partial(torch.flip, dims=flip_dim)
np_fn = partial(np.flip, axis=flip_dim)
self.compare_with_numpy(torch_fn, np_fn, data)
@onlyCUDA # CPU is too slow
@largeTensorTest('17GB') # 4 tensors of 4GB (in, out) x (torch, numpy) + 1GB
@largeTensorTest("81GB", "cpu") # even for CUDA test, sufficient system memory is required
def test_flip_large_tensor(self, device):
t_in = torch.empty(2**32 + 1, dtype=torch.uint8).random_()
torch_fn = partial(torch.flip, dims=(0,))
np_fn = partial(np.flip, axis=0)
self.compare_with_numpy(torch_fn, np_fn, t_in)
del t_in
def _test_fliplr_flipud(self, torch_fn, np_fn, min_dim, max_dim, device, dtype):
for dim in range(min_dim, max_dim + 1):
shape = self._rand_shape(dim, 5, 10)
# Randomly scale the input
if dtype.is_floating_point or dtype.is_complex:
data = torch.randn(*shape, device=device, dtype=dtype)
else:
data = torch.randint(0, 10, shape, device=device, dtype=dtype)
self.compare_with_numpy(torch_fn, np_fn, data)
@dtypes(torch.int64, torch.double, torch.cdouble)
def test_fliplr(self, device, dtype):
self._test_fliplr_flipud(torch.fliplr, np.fliplr, 2, 4, device, dtype)
@dtypes(torch.int64, torch.double, torch.cdouble)
def test_fliplr_invalid(self, device, dtype):
x = torch.randn(42).to(dtype)
with self.assertRaisesRegex(RuntimeError, "Input must be >= 2-d."):
torch.fliplr(x)
with self.assertRaisesRegex(RuntimeError, "Input must be >= 2-d."):
torch.fliplr(torch.tensor(42, device=device, dtype=dtype))
@dtypes(torch.int64, torch.double, torch.cdouble)
def test_flipud(self, device, dtype):
self._test_fliplr_flipud(torch.flipud, np.flipud, 1, 4, device, dtype)
@dtypes(torch.int64, torch.double, torch.cdouble)
def test_flipud_invalid(self, device, dtype):
with self.assertRaisesRegex(RuntimeError, "Input must be >= 1-d."):
torch.flipud(torch.tensor(42, device=device, dtype=dtype))
def test_rot90(self, device):
data = torch.arange(1, 5, device=device).view(2, 2)
self.assertEqual(torch.tensor([1, 2, 3, 4]).view(2, 2), data.rot90(0, [0, 1]))
self.assertEqual(torch.tensor([2, 4, 1, 3]).view(2, 2), data.rot90(1, [0, 1]))
self.assertEqual(torch.tensor([4, 3, 2, 1]).view(2, 2), data.rot90(2, [0, 1]))
self.assertEqual(torch.tensor([3, 1, 4, 2]).view(2, 2), data.rot90(3, [0, 1]))
# test for default args k=1, dims=[0, 1]
self.assertEqual(data.rot90(), data.rot90(1, [0, 1]))
# test for reversed order of dims
self.assertEqual(data.rot90(3, [0, 1]), data.rot90(1, [1, 0]))
# test for modulo of k
self.assertEqual(data.rot90(5, [0, 1]), data.rot90(1, [0, 1]))
self.assertEqual(data.rot90(3, [0, 1]), data.rot90(-1, [0, 1]))
self.assertEqual(data.rot90(-5, [0, 1]), data.rot90(-1, [0, 1]))
# test for dims out-of-range error
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, -3]))
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, 2]))
# test tensor with more than 2D
data = torch.arange(1, 9, device=device).view(2, 2, 2)
self.assertEqual(torch.tensor([2, 4, 1, 3, 6, 8, 5, 7]).view(2, 2, 2), data.rot90(1, [1, 2]))
self.assertEqual(data.rot90(1, [1, -1]), data.rot90(1, [1, 2]))
# test for errors
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, 3]))
self.assertRaises(RuntimeError, lambda: data.rot90(1, [1, 1]))
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, 1, 2]))
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0]))
@skipIfTorchDynamo("TorchDynamo fails with an unknown error")
@dtypes(torch.cfloat, torch.cdouble)
def test_complex_rot90(self, device, dtype):
shape = self._rand_shape(random.randint(2, 4), 5, 10)
for rot_times in range(4):
data = torch.randn(*shape, device=device, dtype=dtype)
torch_fn = partial(torch.rot90, k=rot_times, dims=[0, 1])
np_fn = partial(np.rot90, k=rot_times, axes=[0, 1])
self.compare_with_numpy(torch_fn, np_fn, data)
# TODO: update once warning flag is available to always trigger ONCE warnings
# Ensures nonzero does not throw a warning, even when the as_tuple argument
# is not provided
def test_nonzero_no_warning(self, device):
t = torch.randn((2, 2), device=device)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
torch.nonzero(t)
t.nonzero()
self.assertEqual(len(w), 0)
@dtypes(*all_types_and(torch.half, torch.bool, torch.bfloat16))
def test_nonzero(self, device, dtype):
shapes = [
torch.Size((12,)),
torch.Size((12, 1)),
torch.Size((1, 12)),
torch.Size((6, 2)),
torch.Size((3, 2, 2)),
torch.Size((5, 5, 5)),
]
def gen_nontrivial_input(shape, dtype, device):
if dtype != torch.bfloat16:
return torch.randint(2, shape, device=device, dtype=dtype)
else:
# windows does not work for bfloat16 randing
return torch.randint(2, shape, device=device, dtype=torch.float).to(dtype)
for shape in shapes:
tensor = gen_nontrivial_input(shape, dtype, device)
dst1 = torch.nonzero(tensor, as_tuple=False)
dst2 = tensor.nonzero(as_tuple=False)
dst3 = torch.empty([], dtype=torch.long, device=device)
torch.nonzero(tensor, out=dst3)
if self.device_type != 'xla':
# xla does not raise runtime error
self.assertRaisesRegex(
RuntimeError,
"scalar type Long",
lambda: torch.nonzero(tensor, out=torch.empty([], dtype=torch.float, device=device))
)
if self.device_type == 'cuda':
self.assertRaisesRegex(
RuntimeError,
"on the same device",
lambda: torch.nonzero(tensor, out=torch.empty([], dtype=torch.long))
)
np_array = tensor.cpu().numpy() if dtype != torch.bfloat16 else tensor.float().cpu().numpy()
np_result = torch.from_numpy(np.stack(np_array.nonzero())).t()
self.assertEqual(dst1.cpu(), np_result, atol=0, rtol=0)
self.assertEqual(dst2.cpu(), np_result, atol=0, rtol=0)
self.assertEqual(dst3.cpu(), np_result, atol=0, rtol=0)
tup1 = torch.nonzero(tensor, as_tuple=True)
tup2 = tensor.nonzero(as_tuple=True)
tup1 = torch.stack(tup1).t().cpu()
tup2 = torch.stack(tup2).t().cpu()
self.assertEqual(tup1, np_result, atol=0, rtol=0)
self.assertEqual(tup2, np_result, atol=0, rtol=0)
def test_nonzero_astuple_out(self, device):
t = torch.randn((3, 3, 3), device=device)
out = torch.empty_like(t, dtype=torch.long)
with self.assertRaises(RuntimeError):
torch.nonzero(t, as_tuple=True, out=out)
self.assertEqual(torch.nonzero(t, as_tuple=False, out=out), torch.nonzero(t, out=out))
# Verifies that JIT script cannot handle the as_tuple kwarg
# See Issue https://github.com/pytorch/pytorch/issues/45499.
def _foo(t):
tuple_result = torch.nonzero(t, as_tuple=True)
nontuple_result = torch.nonzero(t, as_tuple=False)
out = torch.empty_like(nontuple_result)
torch.nonzero(t, as_tuple=False, out=out)
return tuple_result, nontuple_result, out
with self.assertRaises(RuntimeError):
scripted_foo = torch.jit.script(_foo)
# Verifies that JIT tracing works fine
traced_foo = torch.jit.trace(_foo, t)
traced_tuple, traced_nontuple, traced_out = traced_foo(t)
expected_tuple = torch.nonzero(t, as_tuple=True)
expected_nontuple = torch.nonzero(t)
self.assertEqual(traced_tuple, expected_tuple)
self.assertEqual(traced_nontuple, expected_nontuple)
self.assertEqual(traced_out, expected_nontuple)
@onlyNativeDeviceTypes
def test_nonzero_discontiguous(self, device):
shape = (4, 4)
tensor = torch.randint(2, shape, device=device)
tensor_nc = torch.empty(shape[0], shape[1] * 2, device=device)[:, ::2].copy_(tensor)
dst1 = tensor.nonzero(as_tuple=False)
dst2 = tensor_nc.nonzero(as_tuple=False)
self.assertEqual(dst1, dst2, atol=0, rtol=0)
dst3 = torch.empty_like(dst1)
data_ptr = dst3.data_ptr()
# expect dst3 storage to be reused
torch.nonzero(tensor, out=dst3)
self.assertEqual(data_ptr, dst3.data_ptr())
self.assertEqual(dst1, dst3, atol=0, rtol=0)
# discontiguous out
dst4 = torch.empty(dst1.size(0), dst1.size(1) * 2, dtype=torch.long, device=device)[:, ::2]
data_ptr = dst4.data_ptr()
strides = dst4.stride()
torch.nonzero(tensor, out=dst4)
self.assertEqual(data_ptr, dst4.data_ptr())
self.assertEqual(dst1, dst4, atol=0, rtol=0)
self.assertEqual(strides, dst4.stride())
def test_nonzero_non_diff(self, device):
x = torch.randn(10, requires_grad=True)
nz = x.nonzero()
self.assertFalse(nz.requires_grad)
instantiate_device_type_tests(TestShapeOps, globals())
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_shape_ops.py |
#!/usr/bin/env python3
# Owner(s): ["oncall: mobile"]
import os
import ctypes
import torch
import unittest
from typing import Tuple
from torch.backends._nnapi.prepare import convert_model_to_nnapi
from torch.testing._internal.common_quantized import supported_qengines
from torch.testing._internal.common_utils import TestCase, run_tests
def qpt(t, scale, zero_point, dtype=torch.quint8):
t = torch.tensor(t)
return torch.quantize_per_tensor(t, scale, zero_point, dtype)
def nhwc(t):
t = t.clone().contiguous(memory_format=torch.channels_last)
t.nnapi_nhwc = True
return t
@unittest.skipUnless('qnnpack' in supported_qengines,
"This Pytorch Build has not been built with or does not support QNNPACK")
class TestNNAPI(TestCase):
def setUp(self):
# Avoid saturation in fbgemm
torch.backends.quantized.engine = 'qnnpack'
libneuralnetworks_path = os.environ.get("LIBNEURALNETWORKS_PATH")
if libneuralnetworks_path:
ctypes.cdll.LoadLibrary(libneuralnetworks_path)
print("Will attempt to run NNAPI models.")
self.can_run_nnapi = True
else:
self.can_run_nnapi = False
# Created for easy override by subclasses (eg TestNnapiBackend)
def call_lowering_to_nnapi(self, traced_module, args):
return convert_model_to_nnapi(traced_module, args)
# Created for subclasses to set can_run_nnapi (eg TestNnapiBackend)
def set_can_run_nnapi(self, can_run):
self.can_run_nnapi = can_run
def check(
self,
module,
arg_or_args,
*,
trace_args=None,
convert_args=None,
atol_rtol=None,
limit=None,
expected_memory_format=None
):
with torch.no_grad():
if isinstance(arg_or_args, torch.Tensor):
args = [arg_or_args]
else:
args = arg_or_args
module.eval()
traced = torch.jit.trace(module, trace_args or args)
nnapi_module = self.call_lowering_to_nnapi(traced, convert_args or args)
if not self.can_run_nnapi:
# Only test that the model was converted successfully.
return
eager_output = module(*args)
nnapi_output = nnapi_module(*args)
kwargs = {}
if atol_rtol is not None:
kwargs["atol"] = atol_rtol[0]
kwargs["rtol"] = atol_rtol[1]
self.assertEqual(eager_output, nnapi_output, **kwargs)
if limit is not None:
mismatches = \
eager_output.int_repr().to(torch.int32) - \
nnapi_output.int_repr().to(torch.int32)
if mismatches.count_nonzero() > limit:
# Too many mismatches. Re-run the check with no tolerance
# to get a nice message.
self.assertEqual(eager_output, nnapi_output, atol=0, rtol=0)
if expected_memory_format:
self.assertTrue(nnapi_output.is_contiguous(memory_format=expected_memory_format))
def float_and_quant_and_nhwc(self, inp_float, scale, zero_point):
torch.manual_seed(29)
inp_quant = qpt(inp_float, 0.03, 128)
return [
("float", inp_float),
("float-nhwc", nhwc(inp_float)),
("quant", inp_quant),
("quant-nhwc", nhwc(inp_quant)),
]
def test_prelu(self):
arg = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
single_a = torch.nn.PReLU()
self.check(single_a, arg)
multi_a = torch.nn.PReLU(4)
with torch.no_grad():
multi_a.weight.copy_(torch.tensor([.1, .2, .3, .4]))
self.check(multi_a, nhwc(arg))
# Test flexible size
self.check(
multi_a,
arg,
trace_args=[torch.zeros(1, 4, 3, 3)],
convert_args=[nhwc(torch.zeros(1, 4, 0, 0))],
)
def test_quantize(self):
self.check(
torch.nn.quantized.Quantize(0.25, 2, torch.quint8),
nhwc(torch.tensor([[[[1.0]], [[2.0]]]])))
def test_dequantize(self):
self.check(
torch.nn.quantized.DeQuantize(),
nhwc(qpt([[[[1.0]], [[2.0]]]], 0.25, 2)))
def test_unsqueeze(self):
class UnsqueezeModule(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, arg):
return arg.unsqueeze(self.dim)
self.check(UnsqueezeModule(-2), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(-1), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(0), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(1), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(2), torch.randn(4, 2, 2))
def test_reshape(self):
class ReshapeModule(torch.nn.Module):
def __init__(self, shape):
super().__init__()
self.shape = shape
def forward(self, arg):
return arg.reshape(self.shape)
self.check(
ReshapeModule((2, 4)),
torch.randn(4, 2, 1, 1))
self.check(
ReshapeModule((8, -1)),
nhwc(torch.randn(4, 2, 1, 1)))
with self.assertRaisesRegex(Exception, "target size"):
self.check(
ReshapeModule((2, 4)),
nhwc(torch.randn(4, 2, 1, 1)))
def test_flatten(self):
for mod in [
torch.nn.Flatten(),
torch.nn.Flatten(start_dim=2, end_dim=3),
torch.nn.Flatten(start_dim=2, end_dim=4),
torch.nn.Flatten(start_dim=0, end_dim=-2),
torch.nn.Flatten(start_dim=0, end_dim=4)
]:
self.check(mod, torch.randn(4, 2, 1, 3, 7))
# flex inputs
self.check(
torch.nn.Flatten(),
torch.randn(4, 2, 1, 3, 7),
convert_args=[torch.zeros(0, 2, 1, 3, 7)]
)
# channels last
self.check(
torch.nn.Flatten(),
nhwc(torch.randn(2, 1, 4, 7))
)
self.check(
torch.nn.Flatten(),
nhwc(torch.randn(2, 3, 1, 1))
)
# Exceptions
with self.assertRaisesRegex(Exception, "not supported on NHWC"):
self.check(
torch.nn.Flatten(),
nhwc(torch.randn(1, 3, 4, 4))
)
with self.assertRaisesRegex(Exception, "Flattening flexible dims is not supported yet"):
self.check(torch.nn.Flatten(), torch.randn(4, 2, 0, 0, 7))
with self.assertRaisesRegex(Exception, "Only 1 dim"):
self.check(
torch.nn.Flatten(start_dim=1, end_dim=-2),
torch.randn(0, 2, 1, 3, 0))
def test_slice(self):
class SliceModule(torch.nn.Module):
def __init__(self, start, stop, step):
super().__init__()
self.start = start
self.stop = stop
self.step = step
def forward(self, t):
return t[1:, self.start:self.stop:self.step, :]
class SliceModule2(torch.nn.Module):
def forward(self, t):
return t[3:]
self.check(
SliceModule(1, 5, 2),
torch.randn(4, 6, 2)
)
self.check(
SliceModule2(),
torch.randn(5)
)
# flex inputs
self.check(
SliceModule(1, 5, 2),
torch.randn(4, 6, 2),
convert_args=[torch.zeros(4, 6, 0)]
)
with self.assertRaisesRegex(Exception, "slice with flexible shape"):
self.check(
SliceModule(1, 5, 2),
torch.randn(4, 6, 2),
convert_args=[torch.zeros(0, 0, 0)]
)
def test_cat(self):
class CatModule(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, t1, t2):
return torch.cat([t1, t2], self.dim)
self.check(
CatModule(0),
[
torch.randn(1, 2, 3, 3),
torch.randn(2, 2, 3, 3),
])
self.check(
CatModule(1),
[
torch.randn(1, 2, 3, 3),
torch.randn(1, 4, 3, 3),
])
self.check(
CatModule(1),
[
nhwc(torch.randn(1, 2, 3, 3)),
nhwc(torch.randn(1, 4, 3, 3)),
])
self.check(
CatModule(1),
[
torch.randn(1, 2, 3, 3),
torch.randn(1, 4, 3, 3),
],
convert_args=[
torch.zeros(0, 0, 0, 0),
torch.zeros(0, 0, 0, 0)
])
def test_pointwise_unary(self):
for op in ["relu", "sigmoid"]:
with self.subTest(op):
class UnaryModule(torch.nn.Module):
def forward(self, arg):
if op == "relu":
return torch.nn.functional.relu(arg)
if op == "sigmoid":
return torch.sigmoid(arg)
raise Exception("Bad op")
self.check(UnaryModule(), torch.tensor([-1.0, 1.0]))
self.check(
UnaryModule(),
qpt(torch.tensor([-1.0, 1.0]), 1. / 256, 0),
)
def test_pointwise_binary(self):
for op in ["add", "sub", "mul", "div"]:
with self.subTest(op):
class BinaryModule(torch.nn.Module):
def forward(self, lhs, rhs):
if op == "add":
return lhs + rhs
if op == "sub":
return lhs - rhs
if op == "mul":
return lhs * rhs
if op == "div":
return lhs / rhs
raise Exception("Bad op")
self.check(
BinaryModule(),
[
torch.tensor([1.0, 2.0]),
torch.tensor([3.0, 4.0]),
])
self.check(
BinaryModule(),
[
torch.tensor([[1.0, 2.0]]),
torch.tensor([[3.0, 4.0], [5.0, 6.0]]),
])
with self.assertRaisesRegex(Exception, "Non-equal-rank broadcast"):
self.check(
BinaryModule(),
[
torch.tensor([1.0, 2.0]),
torch.tensor([[3.0, 4.0], [5.0, 6.0]]),
])
def test_pointwise_binary_const(self):
const = torch.randn(1, 4, 6, 6)
class ArgPlusConst(torch.nn.Module):
def forward(self, arg):
return arg + const
class ConstPlusArg(torch.nn.Module):
def forward(self, arg):
return const + arg
arg_contig = torch.randn(2, 4, 6, 6)
arg_nhwc = nhwc(torch.randn(2, 4, 6, 6))
for mod_class in [ArgPlusConst, ConstPlusArg]:
for use_nhwc in [False, True]:
with self.subTest(mod_class=mod_class.__name__, use_nhwc=use_nhwc):
arg = arg_nhwc if use_nhwc else arg_contig
memory_format = torch.channels_last if use_nhwc else torch.contiguous_format
self.check(mod_class(), arg,
expected_memory_format=memory_format)
def test_hardtanh(self):
inp = torch.tensor([-2.0, -0.5, 0.5, 2.0, 7.0])
self.check(torch.nn.Hardtanh(), inp)
self.check(torch.nn.Hardtanh(0.0, 6.0), inp)
with self.assertRaisesRegex(Exception, "hardtanh with args"):
self.check(torch.nn.Hardtanh(0.0, 5.0), inp)
def test_softmax(self):
inp = torch.tensor([[-2.0, -0.5], [0.5, 2.0]])
self.check(torch.nn.Softmax(), inp)
self.check(torch.nn.Softmax(dim=0), inp)
# Test flexible size
self.check(
torch.nn.Softmax(),
inp,
convert_args=[torch.zeros(0, 0)],
)
def test_to(self):
class ToCPU(torch.nn.Module):
def __init__(self):
super().__init__()
self.prelu = torch.nn.PReLU()
def forward(self, x):
y = x.to("cpu")
# add prelu since input operand can't be output
return self.prelu(y)
arg = torch.randn(1, 2, 3, 3)
self.check(ToCPU(), arg)
# Test flexible size
self.check(
ToCPU(),
arg,
convert_args=[torch.zeros(1, 2, 0, 0)],
)
def test_detach(self):
class DetachModule(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
y = x.detach()
return torch.nn.functional.relu(y)
self.check(DetachModule(), torch.randn(1, 2, 3, 3))
self.check(
DetachModule(), torch.randn(1, 2, 3, 3),
convert_args=[torch.zeros(1, 2, 0, 0)])
def test_log_softmax(self):
inp = torch.randn(3, 10)
self.check(torch.nn.LogSoftmax(), inp)
self.check(torch.nn.LogSoftmax(0), inp)
def test_mean(self):
class MeanModule(torch.nn.Module):
def __init__(self, dim, keep=False):
super().__init__()
self.dim = dim
self.keep = keep
def forward(self, t):
return torch.mean(t, dim=self.dim, keepdim=self.keep)
self.check(MeanModule(0), torch.randn(2, 3))
self.check(MeanModule(1), torch.randn(2, 3))
self.check(MeanModule([2, 3]), torch.randn(2, 3, 6, 6))
self.check(MeanModule([2, 3]), nhwc(torch.randn(2, 3, 6, 6)))
self.check(MeanModule([-1, -2]), nhwc(torch.randn(2, 3, 6, 6)))
self.check(MeanModule([-1, -2], keep=True), nhwc(torch.randn(2, 3, 6, 6)))
def test_max_pool2d(self):
for (name, inp) in self.float_and_quant_and_nhwc(torch.randn(2, 3, 12, 16), 0.3, 128):
with self.subTest(name):
self.check(torch.nn.MaxPool2d(2), inp)
self.check(torch.nn.MaxPool2d((3, 4)), inp)
self.check(torch.nn.MaxPool2d((3, 4), (1, 2)), inp)
def test_avg_pool2d(self):
for (name, inp) in self.float_and_quant_and_nhwc(torch.randn(2, 3, 12, 16), 0.3, 128):
with self.subTest(name):
atol_rtol = None
limit = None
convert_dims = (2, 3, 0, 0)
convert_arg = torch.zeros(*convert_dims)
for model in (
torch.nn.AvgPool2d(2),
torch.nn.AvgPool2d((3, 4)),
torch.nn.AvgPool2d((3, 4), (1, 2))):
if "quant" in name:
atol_rtol = (1, 0)
limit = model(inp).numel()
convert_arg = qpt(torch.zeros(*convert_dims), 1.0 / 16, 128)
if "nhwc" in name:
convert_arg = nhwc(convert_arg)
self.check(model, inp, atol_rtol=atol_rtol, limit=limit)
self.check(
model,
inp,
convert_args=[convert_arg],
atol_rtol=atol_rtol,
limit=limit
)
def test_adaptive_avg_pool2d(self):
for (name, inp) in self.float_and_quant_and_nhwc(torch.randn(2, 3, 12, 16), 0.3, 128):
with self.subTest(name):
self.check(torch.nn.AdaptiveAvgPool2d((1, 1)), inp)
with self.assertRaisesRegex(Exception, "with output size"):
self.check(torch.nn.AdaptiveAvgPool2d((2, 2)), inp)
def test_upsample_nearest2d(self):
convert_args = dict(self.float_and_quant_and_nhwc(torch.randn(2, 3, 0, 0), 0.3, 128))
for (name, inp) in self.float_and_quant_and_nhwc(torch.randn(2, 3, 12, 16), 0.3, 128):
with self.subTest(name):
self.check(torch.nn.UpsamplingNearest2d(size=(16, 20)), inp)
self.check(torch.nn.UpsamplingNearest2d(size=(24, 32)), inp)
self.check(torch.nn.UpsamplingNearest2d(size=(36, 48)), inp)
self.check(torch.nn.UpsamplingNearest2d(scale_factor=(1.5, 1.5)), inp)
self.check(torch.nn.UpsamplingNearest2d(scale_factor=(2.0, 2.0)), inp)
self.check(torch.nn.UpsamplingNearest2d(scale_factor=(3.0, 3.0)), inp)
self.check(
torch.nn.UpsamplingNearest2d(size=(24, 32)), inp,
convert_args=[convert_args[name]]
)
self.check(
torch.nn.UpsamplingNearest2d(scale_factor=(2.0, 2.0)), inp,
convert_args=[convert_args[name]]
)
def test_linear(self):
torch.manual_seed(29)
self.check(torch.nn.Linear(16, 32), torch.randn(2, 16))
self.check(
torch.nn.Linear(16, 32), torch.randn(2, 16),
convert_args=[torch.zeros(0, 16)])
def test_conv2d(self):
cases = [
# in_ch, out_ch, kernel, stride, padding, groups, bias, input_dim, name
( 4, 8, (3, 3), 1, 0, 1, 1, (2, 4, 16, 16), "3x3"), # noqa: E201,E241
( 4, 8, (3, 3), 1, 0, 1, 0, (2, 4, 16, 16), "3x3nobias"), # noqa: E201,E241
( 4, 16, (3, 3), 1, 1, 1, 1, (2, 4, 16, 16), "3x3p1"), # noqa: E201,E241
( 8, 8, (3, 3), 2, 0, 1, 1, (2, 8, 16, 16), "3x3s2"), # noqa: E201,E241
( 4, 8, (5, 5), 1, 0, 1, 1, (2, 4, 16, 16), "5x5"), # noqa: E201,E241
( 4, 4, (3, 3), 1, 0, 4, 1, (2, 4, 16, 16), "3x3dw"), # noqa: E201,E241
( 8, 4, (1, 1), 1, 0, 1, 1, (2, 8, 16, 16), "1x1"), # noqa: E201,E241
]
for kind in ["float", "float-nhwc", "quant", "quant-nhwc"]:
for case in cases:
in_ch, out_ch, kernel, stride, padding, groups, bias, input_dim, name = case
with self.subTest("{}-{}".format(kind, name)):
inp = torch.randn(input_dim)
model = torch.nn.Conv2d(in_ch, out_ch, kernel, stride, padding, groups=groups, bias=bool(bias))
output_size = model(inp).numel()
atol_rtol = None
limit = None
convert_dims = (0, in_ch, 0, 0)
convert_arg = torch.zeros(*convert_dims)
if "quant" in kind:
model = torch.nn.Sequential(model)
model.eval()
model.qconfig = torch.ao.quantization.get_default_qconfig('qnnpack')
model = torch.ao.quantization.prepare(model)
model(inp)
model = torch.ao.quantization.convert(model)
inp = qpt(inp, 1.0 / 16, 128)
# I've seen numerical differences between QNNPACK and NNAPI,
# but never more than 1 quantum, and never more than ~1% of
# the output in this test.
atol_rtol = (1, 0)
limit = output_size * 0.03
convert_arg = qpt(torch.zeros(*convert_dims), 1.0 / 16, 128)
if "nhwc" in kind:
inp = nhwc(inp)
convert_arg = nhwc(convert_arg)
self.check(model, inp, atol_rtol=atol_rtol, limit=limit)
self.check(
model,
inp,
convert_args=[convert_arg],
atol_rtol=atol_rtol,
limit=limit
)
def test_conv2d_transpose(self):
torch.manual_seed(29)
in_ch, out_ch, kernel = (5, 7, (2, 2))
input_dim = (4, 5, 3, 3)
convert_dims = input_dim[:2] + (0, 0)
for kind in ["float", "float-nhwc", "quant", "quant-nhwc"]:
with self.subTest(kind):
inp = torch.randn(input_dim)
model = torch.nn.ConvTranspose2d(in_ch, out_ch, kernel)
output_size = model(inp).numel()
atol_rtol = (0.0002, 0)
limit = None
convert_arg = torch.zeros(*convert_dims)
if "quant" in kind:
model = torch.nn.quantized.ConvTranspose2d(in_ch, out_ch, kernel)
model.qconfig = torch.ao.quantization.get_default_qconfig('qnnpack')
inp = qpt(inp, 1.0 / 16, 128)
# I've seen numerical differences between QNNPACK and NNAPI,
# but never more than 1 quantum, and never more than ~10% of
# the output in this test.
atol_rtol = (1, 0)
limit = output_size * 0.1
convert_arg = qpt(convert_arg, 1.0 / 16, 128)
if "nhwc" in kind:
inp = nhwc(inp)
convert_arg = nhwc(convert_arg)
self.check(model, inp, atol_rtol=atol_rtol, limit=limit)
self.check(
model,
inp,
convert_args=[convert_arg],
atol_rtol=atol_rtol,
limit=limit
)
def test_qadd(self):
func = torch.nn.quantized.QFunctional()
func.scale = 0.5
func.zero_point = 120
class AddMod(torch.nn.Module):
def forward(self, lhs, rhs):
return func.add(lhs, rhs)
class AddReluMod(torch.nn.Module):
def forward(self, lhs, rhs):
return func.add_relu(lhs, rhs)
class MulMod(torch.nn.Module):
def forward(self, lhs, rhs):
return func.mul(lhs, rhs)
for (name, mod) in [("add", AddMod), ("add_relu", AddReluMod), ("mul", MulMod)]:
with self.subTest(name):
self.check(
mod(),
[
qpt([1.0, 2.0], 0.25, 128),
qpt([3.0, 4.0], 0.25, 128),
])
self.check(
mod(),
[
qpt([[1.0, 2.0]], 0.25, 128),
qpt([[3.0, 4.0]], 0.25, 128),
],
convert_args=[
qpt([[1.0, 2.0]], 0.25, 128),
qpt(torch.zeros((1, 2)), 0.25, 128),
]
)
self.check(
mod(),
[
qpt([[1.0, 2.0]], 0.25, 128),
qpt([[3.0, 4.0]], 0.25, 128),
],
convert_args=[
qpt(torch.zeros((1, 2)), 0.25, 128),
qpt([[3.0, 4.0]], 0.25, 128),
]
)
self.check(
mod(),
[
qpt([[1.0, 2.0]], 0.25, 128),
qpt([[3.0, 4.0]], 0.25, 128),
],
convert_args=[
qpt(torch.zeros((1, 2)), 0.25, 128),
qpt(torch.zeros((1, 2)), 0.25, 128),
]
)
# NOTE: NNAPI qadd supports broadcast, but PT does not.
def test_qlinear(self):
torch.manual_seed(29)
weight = qpt(torch.randn(16, 32), 0.125, 0, torch.qint8)
bias = torch.randn(16)
mod = torch.nn.quantized.Linear(32, 16)
mod.set_weight_bias(weight, bias)
inp = qpt(torch.randn(2, 32), 0.05, 130, torch.quint8)
self.check(mod, inp)
def test_seblock_mul(self):
class MulModel(torch.nn.Module):
def forward(self, lhs, rhs):
return lhs * rhs
self.check(
MulModel(),
[
nhwc(torch.randn(2, 3, 4, 4)),
torch.randn(1, 3, 1, 1),
])
def test_multi_output(self):
class MultiModel(torch.nn.Module):
def forward(self, lhs, rhs) -> Tuple[torch.Tensor, torch.Tensor]:
the_sum = lhs + rhs
the_diff = lhs - rhs
return the_sum, the_diff
self.check(MultiModel(), [torch.tensor([1.0, 2.0]), torch.tensor([1.0, 3.0])])
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_nnapi.py |
# Owner(s): ["module: nn"]
import tempfile
import torch
from copy import deepcopy
from functools import partial
from torch import nn
from torch.nn.utils.parametrize import register_parametrization, remove_parametrizations
from torch.nn.modules.lazy import LazyModuleMixin
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest, instantiate_parametrized_tests)
from torch.testing._internal.common_subclass import subclass_db, DiagTensorBelow
from torch.testing._internal.logging_tensor import LoggingTensor
from torch.utils._pytree import tree_map
from unittest import expectedFailure
# The current test methodology in this file is to test a variety of real use cases
# with a set of fully-fledged tensor subclasses. In the future, this may change
# to more narrowly specify toy subclasses for each of the specific invariants under
# test, avoiding the need to maintain the set of fully-fledged tensor subclasses.
# Decorator for parametrizing tests across the various tensor classes.
parametrize_tensor_cls = parametrize("tensor_cls", [
subtest(tensor_cls, name=info.name) for tensor_cls, info in subclass_db.items()])
class TestSubclass(TestCase):
def _create_tensor(self, tensor_cls):
return subclass_db[tensor_cls].create_fn(3)
@parametrize_tensor_cls
@parametrize("tensor_requires_grad", [False, True])
def test_param_invariants(self, tensor_cls, tensor_requires_grad):
x = self._create_tensor(tensor_cls).requires_grad_(tensor_requires_grad)
param = nn.Parameter(x, requires_grad=(not tensor_requires_grad))
self.assertIsInstance(param, nn.Parameter)
# Ensure requires_grad passed to Parameter's constructor takes precedence.
self.assertEqual(param.requires_grad, not tensor_requires_grad)
# Ensure original tensor is not mutated by Parameter construction.
self.assertNotIsInstance(x, nn.Parameter)
self.assertEqual(x.requires_grad, tensor_requires_grad)
@parametrize_tensor_cls
@parametrize("as_param", [False, True])
def test_deepcopy(self, tensor_cls, as_param):
x = self._create_tensor(tensor_cls)
if as_param:
x = nn.Parameter(x)
x_copy = deepcopy(x)
self.assertEqual(x, x_copy)
self.assertEqual(x.__class__, x_copy.__class__)
self.assertIsNot(x, x_copy)
self.assertIsInstance(x_copy, tensor_cls)
if as_param:
# Deepcopy should preserve both custom type and "parameter-ness".
self.assertIsInstance(x_copy, nn.Parameter)
@parametrize_tensor_cls
@parametrize("as_param", [False, True])
def test_serialization(self, tensor_cls, as_param):
with tempfile.TemporaryFile() as f:
x = self._create_tensor(tensor_cls)
if as_param:
x = nn.Parameter(x)
torch.save(x, f)
f.seek(0)
x_loaded = torch.load(f)
self.assertEqual(x, x_loaded)
self.assertIsNot(x, x_loaded)
self.assertIsInstance(x_loaded, tensor_cls)
if as_param:
# Serialization should preserve both custom type and "parameter-ness".
self.assertIsInstance(x_loaded, nn.Parameter)
@parametrize_tensor_cls
@parametrize("as_param", [False, True])
def test_repr(self, tensor_cls, as_param):
x = self._create_tensor(tensor_cls)
if as_param:
x = nn.Parameter(x)
str_repr = x.__repr__()
if tensor_cls is not torch.Tensor:
self.assertEqual(str_repr.count(f"{tensor_cls.__name__}("), 1)
self.assertEqual(str_repr.count("Parameter"), 1 if as_param else 0)
@parametrize_tensor_cls
@parametrize("as_param", [False, True])
def test_type_propagation(self, tensor_cls, as_param):
x = self._create_tensor(tensor_cls)
if as_param:
x = nn.Parameter(x)
# Call the add operator to produce an output tensor.
output = x + self._create_tensor(torch.Tensor)
# Custom type should be propagated across operations if closed under the op, but
# "parameter-ness" should not be.
if subclass_db[tensor_cls].closed_under_ops:
self.assertIsInstance(output, tensor_cls)
else:
self.assertIsInstance(output, torch.Tensor)
self.assertNotIsInstance(output, nn.Parameter)
@parametrize_tensor_cls
def test_module_optimization(self, tensor_cls):
create_fn = partial(self._create_tensor, tensor_cls)
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.p1 = nn.Parameter(create_fn())
self.p_list = nn.ParameterList([create_fn() for _ in range(3)])
self.p_list.append(create_fn())
self.p_dict = nn.ParameterDict({
'foo': create_fn(),
'bar': create_fn(),
})
self.p_dict['baz'] = create_fn()
with torch.no_grad():
nn.init.normal_(self.p1)
for p in self.p_list:
nn.init.uniform_(p)
for _, p in self.p_dict.items():
nn.init.uniform_(p)
def forward(self, x):
out = self.p1 + x
for p in self.p_list:
out = p + out
for _, v in self.p_dict.items():
out = v + out
return out
m = MyModule()
self.assertEqual(len(m.state_dict()), 8)
optimizer = torch.optim.SGD(m.parameters(), lr=0.1)
m(create_fn()).sum().backward(torch.tensor(1))
optimizer.step()
@parametrize_tensor_cls
@parametrize("leave_parametrized", [False, True])
def test_parametrization(self, tensor_cls, leave_parametrized):
# TODO: Either implement set_() properly for these tensor subclasses or apply a
# more general fix to avoid the need for special set_() handling. For now, skip
# testing these as they're expected to fail.
if tensor_cls in [LoggingTensor, DiagTensorBelow]:
return
create_fn = partial(self._create_tensor, tensor_cls)
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(create_fn())
def forward(self, x):
return self.weight + x
class MyParametrization(nn.Module):
def forward(self, X):
return -X
m = MyModule()
self.assertEqual(len(m.state_dict()), 1)
register_parametrization(m, 'weight', MyParametrization())
self.assertIsInstance(m.weight, tensor_cls)
output = m(self._create_tensor(torch.Tensor))
self.assertIsInstance(output, tensor_cls)
remove_parametrizations(m, 'weight', leave_parametrized=leave_parametrized)
# Lazy modules with custom tensors are not supported yet.
@expectedFailure
@parametrize_tensor_cls
def test_lazy_module(self, tensor_cls):
if tensor_cls is torch.Tensor:
self.fail('dummy fail for base tensor until the test passes for subclasses')
class MyLazyModule(LazyModuleMixin, nn.Module):
def __init__(self):
super().__init__()
self.param = nn.UninitializedParameter()
def initialize_parameters(self, input) -> None: # type: ignore[override]
if self.has_uninitialized_params():
with torch.no_grad():
self.param.materialize(input.shape)
nn.init.uniform_(self.param)
def forward(self, x):
return self.param + x
m = MyLazyModule()
self.assertTrue(m.has_uninitialized_params())
output = m(self._create_tensor(tensor_cls))
self.assertFalse(m.has_uninitialized_params())
self.assertIsInstance(m.param, tensor_cls)
def test_non_rewrapping_torch_dispatch_subclass_as_parameter_throws_for_detach(self):
# Define a subclass that does not rewrap for any function in its __torch_dispatch__ impl.
class NonRewrappingTensor(torch.Tensor):
@staticmethod
def __new__(
cls, t: torch.Tensor
):
r = super(NonRewrappingTensor, cls)._make_wrapper_subclass(
cls, t.shape, dtype=t.dtype, requires_grad=t.requires_grad, device=t.device)
return r
def __init__(self, t) -> None:
self.tensor: torch.Tensor = t
__torch_function__ = torch._C._disabled_torch_function_impl
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
def unwrap(e) -> torch.Tensor:
if isinstance(e, NonRewrappingTensor):
t = e.tensor
return t
else:
return e
r = func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs))
# Return an unwrapped tensor no longer of original subclass type.
return r
with self.assertRaisesRegex(RuntimeError, r"requires that detach\(\) returns an instance of the same type"):
param = nn.Parameter(NonRewrappingTensor(torch.randn(3)))
instantiate_parametrized_tests(TestSubclass)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_subclass.py |
# -*- coding: utf-8 -*-
# Owner(s): ["oncall: jit"]
import torch
# This is how we include tests located in test/jit/...
# They are included here so that they are invoked when you call `test_jit.py`,
# do not run these test files directly.
from jit.test_tracer import TestTracer, TestMixTracingScripting # noqa: F401
from jit.test_recursive_script import TestRecursiveScript # noqa: F401
from jit.test_type_sharing import TestTypeSharing # noqa: F401
from jit.test_logging import TestLogging # noqa: F401
from jit.test_backends import TestBackends, TestBackendsWithCompiler # noqa: F401
from jit.test_backend_nnapi import TestNnapiBackend # noqa: F401
from jit.test_list_dict import TestList, TestDict, TestNamedTuple, TestScriptDict, TestScriptList # noqa: F401
from jit.test_async import TestAsync # noqa: F401
from jit.test_data_parallel import TestDataParallel # noqa: F401
from jit.test_models import TestModels # noqa: F401
from jit.test_modules import TestModules # noqa: F401
from jit.test_autodiff import TestAutodiffJit # noqa: F401
from jit.test_autodiff_subgraph_slicing import TestAutodiffSubgraphSlicing # noqa: F401
from jit.test_custom_operators import TestCustomOperators # noqa: F401
from jit.test_export_modes import TestExportModes # noqa: F401
from jit.test_graph_rewrite_passes import TestGraphRewritePasses # noqa: F401
from jit.test_class_type import TestClassType # noqa: F401
from jit.test_builtins import TestBuiltins, TestTensorBuiltins # noqa: F401
from jit.test_ignore_context_manager import TestIgnoreContextManager # noqa: F401
from jit.test_symbolic_shape_analysis import TestSymbolicShapeAnalysis # noqa: F401
from jit.test_op_decompositions import TestOpDecompositions # noqa: F401
from jit.test_unsupported_ops import TestUnsupportedOps # noqa: F401
from jit.test_freezing import TestFreezing, TestFrozenOptimizations, TestMKLDNNReinplacing # noqa: F401
from jit.test_peephole import TestPeephole # noqa: F401
from jit.test_alias_analysis import TestAliasAnalysis # noqa: F401
from jit.test_save_load import TestSaveLoad, TestSaveLoadFlatbuffer # noqa: F401
from jit.test_save_load_for_op_version import TestSaveLoadForOpVersion # noqa: F401
from jit.test_module_containers import TestModuleContainers # noqa: F401
from jit.test_python_bindings import TestPythonBindings # noqa: F401
from jit.test_python_ir import TestPythonIr # noqa: F401
from jit.test_functional_blocks import TestFunctionalBlocks # noqa: F401
from jit.test_remove_mutation import TestRemoveMutation # noqa: F401
from jit.test_torchbind import TestTorchbind # noqa: F401
from jit.test_module_interface import TestModuleInterface # noqa: F401 # noqa: F401
from jit.test_with import TestWith # noqa: F401
from jit.test_enum import TestEnum # noqa: F401
from jit.test_string_formatting import TestStringFormatting # noqa: F401
from jit.test_profiler import TestProfiler # noqa: F401
from jit.test_slice import TestSlice # noqa: F401
from jit.test_ignorable_args import TestIgnorableArgs # noqa: F401
from jit.test_hooks import TestHooks # noqa: F401
from jit.test_warn import TestWarn # noqa: F401
from jit.test_isinstance import TestIsinstance # noqa: F401
from jit.test_cuda import TestCUDA # noqa: F401
from jit.test_python_builtins import TestPythonBuiltinOP # noqa: F401
from jit.test_typing import TestTyping # noqa: F401
from jit.test_hash import TestHash # noqa: F401
from jit.test_complex import TestComplex # noqa: F401
from jit.test_jit_utils import TestJitUtils # noqa: F401
from jit.test_scriptmod_ann import TestScriptModuleInstanceAttributeTypeAnnotation # noqa: F401
from jit.test_types import TestTypesAndAnnotation # noqa: F401
from jit.test_misc import TestMisc # noqa: F401
from jit.test_upgraders import TestUpgraders # noqa: F401
from jit.test_pdt import TestPDT # noqa: F401
from jit.test_tensor_creation_ops import TestTensorCreationOps # noqa: F401
from jit.test_module_apis import TestModuleAPIs # noqa: F401
from jit.test_script_profile import TestScriptProfile # noqa: F401
from jit.test_convert_activation import TestFunctionalToInplaceActivation, TestInplaceToFunctionalActivation # noqa: F401
from jit.test_parametrization import TestParametrization # noqa: F401
from jit.test_attr import TestGetDefaultAttr # noqa: F401
from jit.test_aten_pow import TestAtenPow # noqa: F401
from jit.test_optimize_for_mobile_preserve_debug_info import TestOptimizeForMobilePreserveDebugInfo # noqa: F401
from jit.test_union import TestUnion # noqa: F401
from jit.test_batch_mm import TestBatchMM # noqa: F401
from jit.test_dtype_analysis import TestDtypeAnalysis, TestDtypeCustomRulesCPU # noqa: F401
from jit.test_device_analysis import TestDeviceAnalysis # noqa: F401
from jit.test_dce import TestDCE # noqa: F401
from jit.test_sparse import TestSparse # noqa: F401
from jit.test_tensor_methods import TestTensorMethods # noqa: F401
from jit.test_dataclasses import TestDataclasses # noqa: F401
# Torch
from torch import Tensor
from torch._C import TensorType, BoolType, parse_ir, _propagate_shapes
from torch.autograd import Variable
from torch.jit.annotations import BroadcastingList2, BroadcastingList3, Any # noqa: F401
from torch.nn.utils.rnn import PackedSequence
from torch.testing import FileCheck, make_tensor
import torch.autograd.profiler
import torch.cuda
import torch.jit
import torch.jit._logging
import torch.jit.frontend
import torch.nn as nn
import torch.nn.functional as F
# Testing utils
from torch.testing._internal import jit_utils
from torch.testing._internal.common_jit import check_against_reference
from torch.testing._internal.common_utils import run_tests, IS_WINDOWS, TEST_WITH_UBSAN, \
suppress_warnings, BUILD_WITH_CAFFE2, IS_SANDCASTLE, GRAPH_EXECUTOR, ProfilingMode, TestCase, \
freeze_rng_state, slowTest, TemporaryFileName, skipIfCompiledWithoutNumpy, \
enable_profiling_mode_for_profiling_tests, TEST_MKL, set_default_dtype, num_profiled_runs, \
skipIfCrossRef, IS_MACOS, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, enable_cpu_fuser, disable_autodiff_subgraph_inlining, \
_trace, do_input_map, get_execution_plan, make_global, \
execWrapper, _inline_everything, _tmp_donotuse_dont_inline_everything, \
RUN_CUDA
from torch.testing._internal.jit_metaprogramming_utils import (
get_script_args,
create_input, unpack_variables,
additional_module_tests, EXCLUDE_SCRIPT_MODULES,
get_nn_module_name_from_kwargs, get_nn_mod_test_name, script_method_template)
from torch.testing._internal.common_nn import module_tests, new_module_tests, criterion_tests
# For testing truediv in python 2
from torch.testing._internal.test_module.future_div import div_int_future, div_float_future
from torch.testing._internal.test_module.no_future_div import div_int_nofuture, div_float_nofuture
# Standard library
from collections import defaultdict, namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from textwrap import dedent
from typing import List, Dict, NamedTuple, Optional, Tuple, Union
import copy
import functools
import inspect
import io
import itertools
import math
import numpy as np
import os
import pickle
import pickletools
import random
import re
import shutil
import string
import sys
import tempfile
import types
import typing
import unittest
import warnings
import zipfile
def canonical(graph):
return torch._C._jit_pass_canonicalize(graph).str(False)
def LSTMCellF(input, hx, cx, *params):
return LSTMCell(input, (hx, cx), *params)
def doAutodiffCheck(testname):
# TODO: setting false on test itself is not working
if "test_t_" in testname or testname == "test_t":
return False
if GRAPH_EXECUTOR == ProfilingMode.SIMPLE:
return False
if GRAPH_EXECUTOR == ProfilingMode.LEGACY:
return True
# these tests are disabled because BailOut nodes
# inserted by ProfilingExecutor interfere with
# subgraph slicing of Differentiable Graphs
test_exceptions = [
# functional
'test_nn_dropout',
'test_nn_log_softmax',
'test_nn_relu',
'test_nn_softmax',
'test_nn_threshold',
'test_nn_lp_pool2d',
'test_nn_lp_pool1d',
'test_nn_gumbel_softmax_hard',
'test_nn_gumbel_softmax',
'test_nn_multilabel_soft_margin_loss',
'test_nn_batch_norm',
'test_nn_max_pool2d_with_indices',
# AutogradJitGenerated
'test___rdiv___constant',
'test___rdiv___scalar_constant',
'test_split',
'test_split_dim',
'test_split_dim_neg0',
'test_split_size_list',
'test_split_size_list_dim',
'test_split_size_list_dim_neg0',
'test_split_with_sizes',
'test_split_with_sizes_dim',
'test_split_with_sizes_dim_neg0',
'test_split_with_sizes_size_0',
'test_nn_max_pool2d_with_indices',
]
if testname in test_exceptions:
return False
return True
# TODO: enable TE in PE when all tests are fixed
torch._C._jit_set_texpr_fuser_enabled(GRAPH_EXECUTOR == ProfilingMode.PROFILING)
torch._C._jit_set_profiling_executor(GRAPH_EXECUTOR != ProfilingMode.LEGACY)
def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
hx, cx = hidden
gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * torch.tanh(cy)
return hy, cy
def LSTMCellC(*args, **kwargs):
hy, cy = LSTMCellF(*args, **kwargs)
return torch.cat((hy, cy))
def LSTMCellS(x, hx, cx, w_ih, w_hh, b_ih, b_hh):
gates = x.mm(w_ih.t()) + hx.mm(w_hh.t()) + b_ih + b_hh
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * torch.tanh(cy)
return hy, cy
# Code reference: https://github.com/pytorch/translate/blob/master/pytorch_translate/rnn_cell.py#L27:44
def MiLSTMCell(x, hx, cx, w_ih, w_hh, alpha, beta_i, beta_h, bias):
Wx = x.mm(w_ih.t())
Uz = hx.mm(w_hh.t())
# Section 2.1 in https://arxiv.org/pdf/1606.06630.pdf
gates = alpha * Wx * Uz + beta_i * Wx + beta_h * Uz + bias
# Same as LSTMCell after this point
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = ingate.sigmoid()
forgetgate = forgetgate.sigmoid()
cellgate = cellgate.tanh()
outgate = outgate.sigmoid()
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * cy.tanh()
return hy, cy
def get_lstm_inputs(device, training=False, seq_length=None):
input_shape = (3, 10) if seq_length is None else (seq_length, 3, 10)
input = torch.randn(*input_shape, dtype=torch.float, device=device, requires_grad=training)
hx = torch.randn(3, 20, dtype=torch.float, device=device, requires_grad=training)
cx = torch.randn(3, 20, dtype=torch.float, device=device, requires_grad=training)
module = nn.LSTMCell(10, 20).to(device, torch.float) # Just to allocate weights with correct sizes
if training:
params = tuple(module.parameters())
else:
params = tuple(p.requires_grad_(False) for p in module.parameters())
return (input, hx, cx) + params
def get_milstm_inputs(device, training=False):
minibatch = 3
input_size = 10
hidden_size = 20
x = torch.randn(minibatch, input_size, device=device, dtype=torch.float)
hx = torch.randn(minibatch, hidden_size, device=device, dtype=torch.float)
cx = torch.randn(minibatch, hidden_size, device=device, dtype=torch.float)
ih = torch.randn(4 * hidden_size, input_size, device=device, dtype=torch.float, requires_grad=training)
hh = torch.randn(4 * hidden_size, hidden_size, device=device, dtype=torch.float, requires_grad=training)
alpha = torch.randn(4 * hidden_size, dtype=torch.float, device=device, requires_grad=training)
ibeta = torch.randn(4 * hidden_size, dtype=torch.float, device=device, requires_grad=training)
hbeta = torch.randn(4 * hidden_size, dtype=torch.float, device=device, requires_grad=training)
bias = torch.randn(4 * hidden_size, dtype=torch.float, device=device, requires_grad=training)
return x, hx, cx, ih, hh, alpha, ibeta, hbeta, bias
def get_fn(file_name, script_path):
import importlib.util
spec = importlib.util.spec_from_file_location(file_name, script_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
fn = module.fn
return fn
def get_grad_executor(plan_state, diff_graph_idx=None, skip_check=False):
if diff_graph_idx is None:
nodes = list(plan_state.graph.nodes())
if not skip_check:
nodes = list(filter(lambda n : n.kind() != "prim::BailOut" and n.kind() != "prim::BailoutTemplate", nodes))
if len(nodes) == 1 or (len(nodes) == 2 and nodes[1].kind() == "prim::TupleConstruct"):
pass
elif len(nodes) == 2 and nodes[0].kind() == "prim::RequiresGradCheck" and nodes[1].kind() == "prim::If":
pass
else:
raise RuntimeError("Can't get a grad_executor for a non-differentiable graph")
grad_executors = list(plan_state.code.grad_executor_states())
return grad_executors[diff_graph_idx or 0]
def all_backward_graphs(script_module, diff_graph_idx=None):
# Note: for Python 2 the order seems to be unstable
ge_state = script_module.get_debug_state()
fwd_plan = get_execution_plan(ge_state)
grad_executor_state = get_grad_executor(fwd_plan, diff_graph_idx=diff_graph_idx)
bwd_plans = list(grad_executor_state.execution_plans.values())
return [p.graph.copy() for p in bwd_plans]
def backward_graph(script_module, diff_graph_idx=None, skip_check=False):
ge_state = script_module.get_debug_state()
fwd_plan = get_execution_plan(ge_state)
grad_executor_state = get_grad_executor(fwd_plan, diff_graph_idx=diff_graph_idx, skip_check=skip_check)
bwd_plan = get_execution_plan(grad_executor_state)
# Running JIT passes requires that we own the graph (with a shared_ptr).
# The debug state struct does not own its graph so we make a copy of it.
return bwd_plan.graph.copy()
# helper function to get sum of List[Tensor]
def _sum_of_list(tensorlist):
s = 0
for t in tensorlist:
s += t.sum()
return s
# has to be at top level or Pickle complains
class FooToPickle(torch.nn.Module):
def __init__(self):
super(FooToPickle, self).__init__()
self.bar = torch.jit.ScriptModule()
class TestJit(JitTestCase):
@unittest.skip("Requires a lot of RAM")
def test_big(self):
m = torch.jit.ScriptModule()
gig = int(1024 * 1024 * 1024 / 4)
# a small tensor in the first 4GB
m.v0 = nn.Parameter(torch.full((2,), 1, dtype=torch.float))
# a large tensor in the first 4GB that ends outside of it
m.v1 = nn.Parameter(torch.full((5, gig), 2, dtype=torch.float))
# a small tensor in >4GB space
m.v2 = nn.Parameter(torch.full((2,), 3, dtype=torch.float))
# s large tensor in the > 4GB space
m.v3 = nn.Parameter(torch.full((5, gig), 4, dtype=torch.float))
m2 = self.getExportImportCopy(m)
self.assertEqual(tuple(m.parameters()), tuple(m2.parameters()))
def test_inferred_as_tensor(self):
with self.assertRaisesRegex(RuntimeError, "Inferred the value for argument 'dim' to be of type 'Tensor' "
"because it was not annotated with an explicit type"):
@torch.jit.script
def dot(points, query, dim):
return (points * query).sum(dim)
def test_constants_pkl(self):
# This test asserts that the serialization archive includes a `constants.pkl`
# file. This file is used by `torch.load` to determine whether a zip file
# is a normal eager-mode serialization zip or a jit serialization zip. If
# you are deleting `constants.pkl`, make sure to update `torch.serialization.load`
# so it is still able to figure out which is which.
@torch.jit.script
def fn(x):
return x
buf = io.BytesIO()
torch.jit.save(fn, buf)
buf.seek(0)
files = zipfile.ZipFile(buf).filelist
self.assertTrue(any(['archive/constants.pkl' == f.filename for f in files]))
def test_script_fn_pkl(self):
with self.assertRaisesRegex(pickle.PickleError, "ScriptFunction cannot be pickled"):
@torch.jit.script
def fn(x: torch.Tensor) -> torch.Tensor:
return x
pkl_fn = pickle.dumps(fn, protocol=0)
def test_restore_device(self):
class M(torch.jit.ScriptModule):
def __init__(self, cpu_device_str):
super(M, self).__init__()
self.p0 = nn.Parameter(torch.tensor([0.3], dtype=torch.float,
device=cpu_device_str))
self.b0 = torch.tensor([0.9], dtype=torch.float,
device=cpu_device_str)
# main purpose is checking map_location works
m = M("cpu")
m2 = self.getExportImportCopy(m)
self.assertEqual(tuple(m.parameters()), tuple(m2.parameters()))
self.assertEqual(tuple(m.buffers()), tuple(m2.buffers()))
self.assertFalse(m2.p0.is_cuda)
self.assertFalse(m2.b0.is_cuda)
@unittest.skipIf(not RUN_CUDA, "restore device requires CUDA")
def test_restore_device_cuda(self):
class MyModule(torch.jit.ScriptModule):
def __init__(self):
super(MyModule, self).__init__()
self.register_buffer('b0', torch.randn(1, 3))
self.p0 = nn.Parameter(torch.randn(2, 3))
@torch.jit.script_method
def forward(self, x):
return x + self.b0 + self.p0
m = MyModule()
m.cuda(torch.cuda.device_count() - 1)
cuda_device_str = 'cuda:' + str(torch.cuda.device_count() - 1)
self.assertTrue(m.p0.is_cuda)
self.assertTrue(m.b0.is_cuda)
# restore to the saved devices
m2 = self.getExportImportCopy(m)
self.assertEqual(tuple(m.parameters()), tuple(m2.parameters()))
self.assertEqual(tuple(m.buffers()), tuple(m2.buffers()))
self.assertEqual(str(m2.p0.device), cuda_device_str)
self.assertEqual(str(m2.b0.device), cuda_device_str)
# restore all to cpu using string
cpu_device_str = 'cpu'
m3 = self.getExportImportCopy(m, map_location=cpu_device_str)
self.assertEqual(str(m3.p0.device), cpu_device_str)
self.assertEqual(str(m3.b0.device), cpu_device_str)
# restore all to first gpu using device
m4 = self.getExportImportCopy(
m3, map_location=torch.device('cuda:0'))
self.assertEqual(str(m4.p0.device), 'cuda:0')
self.assertEqual(str(m4.b0.device), 'cuda:0')
# compute and compare the results
input = torch.rand(2, 3).cuda(torch.cuda.device_count() - 1)
origin_result = m(input)
self.assertEqual(origin_result, m2(input))
self.assertEqual(origin_result, m3(input.cpu()))
self.assertEqual(origin_result, m4(input.cuda(0)))
def test_trace_retains_train(self):
class M(torch.nn.Module):
def forward(self, x):
return x
m = M()
m.eval()
tm = torch.jit.trace(m, (torch.rand(3)))
self.assertEqual(tm.training, m.training)
@unittest.skipIf(not RUN_CUDA, "restore device requires CUDA")
def test_restore_shared_storage_on_cuda(self):
class Foo(torch.jit.ScriptModule):
def __init__(self):
super(Foo, self).__init__()
whole_tensor = torch.randn(4, 5, dtype=torch.float, device='cpu')
self.p0 = nn.Parameter(whole_tensor.narrow(0, 0, 1))
self.register_buffer('b0', whole_tensor.narrow(0, 3, 1))
m = Foo()
m2 = self.getExportImportCopy(m, map_location=torch.device('cuda:0'))
self.assertEqual(tuple(m.parameters()), tuple(m2.parameters()))
self.assertEqual(tuple(m.buffers()), tuple(m2.buffers()))
self.assertTrue(m2.p0.is_cuda)
self.assertTrue(m2.b0.is_cuda)
self.assertTrue(m2.p0.is_shared())
self.assertTrue(m2.b0.is_shared())
self.assertEqual(m2.b0.storage().data_ptr(), m2.p0.storage().data_ptr())
def test_add_relu_fusion(self):
class M(torch.nn.Module):
def __init__(self, relu_op):
super(M, self).__init__()
self.relu_op = relu_op
def forward(self, a, b, c):
tmp = torch.add(a, b)
x = self.relu_op(tmp)
d = torch.add(a, c)
return x + d
a = torch.rand((7, 11))
a = a * -10
a = a + 5
b = torch.rand((7, 11))
c = torch.rand((7, 11))
m = torch.jit.script(M(torch.relu))
orig_res = m(a, b, c)
torch._C._jit_pass_fuse_add_relu(m.graph)
buffer = io.BytesIO()
torch.jit.save(m, buffer)
buffer.seek(0)
m = torch.jit.load(buffer)
new_res = m(a, b, c)
FileCheck().check_not("aten::relu(") \
.check("aten::_add_relu(") \
.run(m.graph)
torch.testing.assert_close(orig_res, new_res)
# add, relu_
a = torch.rand((7, 11))
a = a * -10
a = a + 5
b = torch.rand((7, 11))
c = torch.rand((7, 11))
m = torch.jit.script(M(torch.relu_))
orig_res = m(a, b, c)
torch._C._jit_pass_fuse_add_relu(m.graph)
buffer = io.BytesIO()
torch.jit.save(m, buffer)
buffer.seek(0)
m = torch.jit.load(buffer)
new_res = m(a, b, c)
FileCheck().check_not("aten::relu_(") \
.check("aten::_add_relu(") \
.run(m.graph)
torch.testing.assert_close(orig_res, new_res)
class Madd_(torch.nn.Module):
def __init__(self, relu_op):
super(Madd_, self).__init__()
self.relu_op = relu_op
def forward(self, a, b):
x = a.add_(b)
x = self.relu_op(x)
return x
# add_, relu_
a = torch.rand((7, 11))
a = a * -10
a = a + 5
b = torch.rand((7, 11))
# Because in place add_ will overwrite a
a_copy = a.clone()
m = torch.jit.script(Madd_(torch.relu_))
orig_res = m(a, b)
torch._C._jit_pass_fuse_add_relu(m.graph)
buffer = io.BytesIO()
torch.jit.save(m, buffer)
buffer.seek(0)
m = torch.jit.load(buffer)
new_res = m(a_copy, b)
FileCheck().check_not("aten::add_(") \
.check_not("aten::relu_(") \
.check("aten::_add_relu_(") \
.run(m.graph)
torch.testing.assert_close(orig_res, new_res)
# Since _add_relu_ does inplace mutation ensure
# a_copy is modified
torch.testing.assert_close(orig_res, a_copy)
class Madd_out(torch.nn.Module):
def __init__(self, relu_op):
super(Madd_out, self).__init__()
self.relu_op = relu_op
def forward(self, a, b):
x = torch.add(a, b, out=a)
x = self.relu_op(x)
return x
a = torch.rand((7, 11))
a = a * -10
a = a + 5
b = torch.rand((7, 11))
# add_out, relu_
a = torch.rand((7, 11))
a = a * -10
a = a + 5
b = torch.rand((7, 11))
# Because in place add_ will overwrite a
a_copy = a.clone()
m = torch.jit.script(Madd_out(torch.relu_))
orig_res = m(a, b)
torch._C._jit_pass_fuse_add_relu(m.graph)
buffer = io.BytesIO()
torch.jit.save(m, buffer)
buffer.seek(0)
m = torch.jit.load(buffer)
new_res = m(a_copy, b)
FileCheck().check_not("aten::add(") \
.check_not("aten::relu_(") \
.check("aten::_add_relu(") \
.run(m.graph)
torch.testing.assert_close(orig_res, new_res)
# Since _add_relu_ with out=a does inplace mutation ensure
# a_copy is modified
torch.testing.assert_close(orig_res, a_copy)
def test_repeat_interleave_script(self):
def fn(input: torch.Tensor, repeats: torch.Tensor) -> torch.Tensor:
output = input.repeat_interleave(repeats)
return output
fn_scripted = torch.jit.script(fn)
input = torch.tensor([5, 7], dtype=torch.int64)
repeats = torch.tensor([3, 6], dtype=torch.int64)
output = fn(input, repeats)
output_scripted = fn_scripted(input, repeats)
self.assertEqual(output_scripted, output)
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "Simple executor doesn't have shape information")
def test_peephole_optimize_shape_ops(self):
def test_input(func, input, result):
# if result == 2 we will trigger a bailout and
# the unprofiled graph should return the correct result
self.assertEqual(func(input, profile_and_replay=True), result)
gre = func.graph_for(input)
FileCheck().check_not("prim::If").run(gre)
def test_dim():
@torch.jit.script
def func(x):
if x.dim() == 1:
return 1
else:
return 2
test_input(func, torch.tensor([0.5]), 1)
test_input(func, torch.tensor([[0.5]]), 2)
test_dim()
def test_size_index():
@torch.jit.script
def func(x):
if x.size(0) == 1:
return 1
else:
return 2
test_input(func, torch.rand([1, 2]), 1)
test_input(func, torch.rand([1, 3]), 1)
@torch.jit.script
def neg_index(x):
if x.size(-2) == 1:
return 1
else:
return 2
test_input(neg_index, torch.rand([1, 2]), 1)
test_input(neg_index, torch.rand([1, 3]), 1)
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
test_size_index()
def test_dtype():
@torch.jit.script
def func(x):
if x.dtype == torch.float32:
return 1
else:
return 2
test_input(func, torch.tensor(0.5, dtype=torch.float32), 1)
test_input(func, torch.tensor(0.5, dtype=torch.int64), 2)
test_dtype()
def test_is_floating_poiint():
@torch.jit.script
def func(x):
if x.is_floating_point():
return 1
else:
return 2
test_input(func, torch.tensor(0.5, dtype=torch.float32), 1)
test_input(func, torch.tensor(0.5, dtype=torch.int64), 2)
test_is_floating_poiint()
def test_device():
@torch.jit.script
def func_1(x):
if x.device == torch.device('cuda:0'):
a = 0
else:
a = 1
return a
@torch.jit.script
def func_2(x):
if x.is_cuda:
a = 0
else:
a = 1
return a
test_input(func_1, torch.tensor(0.5), 1)
test_input(func_2, torch.tensor(0.5), 1)
if RUN_CUDA:
test_input(func_1, torch.tensor(0.5, device="cuda:0"), 0)
test_input(func_2, torch.tensor(0.5, device="cuda:0"), 0)
test_device()
def test_attrs(self):
def foo(x):
return (
# x.dtype, TODO: dtype long -> instance conversion
x.device,
x.shape,
x.is_cuda,
x.is_mkldnn,
x.is_quantized,
x.requires_grad,
x.T,
x.mT,
x.H,
x.mH
# x.layout TODO: layout long -> instance conversion
)
scripted = torch.jit.script(foo)
x = torch.rand(3, 4)
self.assertEqual(scripted(x), foo(x))
def test_layout(self):
@torch.jit.script
def check(x, y):
return x.layout == y.layout
x = torch.rand(3, 4)
y = torch.rand(3, 4)
self.assertTrue(check(x, y))
def test_matrix_transpose(self):
@torch.jit.script
def check(x):
return torch.equal(x.mT, x.transpose(-2, -1))
x = torch.rand(3, 4)
self.assertTrue(check(x))
def test_transpose(self):
@torch.jit.script
def check(x):
return torch.equal(x.T, x.t())
x = torch.rand(3, 4)
self.assertTrue(check(x))
def test_matrix_conj_transpose(self):
@torch.jit.script
def check(x):
return torch.equal(x.mH, x.transpose(-2, -1).conj())
x = torch.rand(3, 4)
self.assertTrue(check(x))
x = make_tensor((3, 4), device="cpu", dtype=torch.complex64)
self.assertTrue(check(x))
def test_conj_transpose(self):
@torch.jit.script
def check(x):
return torch.equal(x.H, x.t().conj())
x = torch.rand(3, 4)
self.assertTrue(check(x))
x = make_tensor((3, 4), device="cpu", dtype=torch.complex64)
self.assertTrue(check(x))
def test_T_mT_H_mH(self):
def T(x):
return x.mT
def mT(x):
return x.mT
def H(x):
return x.H
def mH(x):
return x.mH
x = torch.rand(3, 4)
y = make_tensor((3, 4), device="cpu", dtype=torch.complex64)
self.checkScript(T, (x, ))
self.checkScript(mT, (x, ))
self.checkScript(H, (x, ))
self.checkScript(mH, (x, ))
self.checkScript(T, (y, ))
self.checkScript(mT, (y, ))
self.checkScript(H, (y, ))
self.checkScript(mH, (y, ))
def test_nn_conv(self):
class Mod(nn.Module):
def __init__(self, conv):
super().__init__()
self.conv = conv
def forward(self, input):
return self.conv(input)
inputs = [
# Conv
(Mod(nn.Conv1d(16, 33, 3, stride=2)), torch.randn(20, 16, 5)),
(Mod(nn.Conv2d(16, 33, 3, stride=2)), torch.randn(20, 16, 5, 10)),
(Mod(nn.Conv3d(16, 33, 3, stride=2)), torch.randn(20, 16, 3, 5, 4)),
# ConvTransposed
(Mod(nn.ConvTranspose1d(16, 33, 3, stride=2)), torch.randn(20, 16, 5)),
(Mod(nn.ConvTranspose2d(16, 33, 3, stride=2)), torch.randn(20, 16, 5, 10)),
(Mod(nn.ConvTranspose3d(16, 33, 3, stride=2)), torch.randn(20, 16, 3, 5, 4)),
]
for m, inp in inputs:
self.checkModule(m, (inp,))
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, 'Not implemented for Simple or Legacy')
def test_debug_flush_compilation_cache(self):
def foo(x):
return x + 2
class Mod(nn.Module):
def __init__(self):
super(Mod, self).__init__()
def forward(self, t):
return t + 2
m = torch.jit.script(Mod())
x = torch.rand(1, 10)
with enable_profiling_mode_for_profiling_tests():
jitted = self.checkScript(foo, (x,))
# shouldn't throw
states = jitted.get_debug_state()
# after flushing there shouldn't be
# no opt plan
jitted._debug_flush_compilation_cache()
with self.assertRaisesRegex(RuntimeError, "INTERNAL ASSERT FAILED"):
states = jitted.get_debug_state()
NUM_RUNS = 1
with num_profiled_runs(NUM_RUNS):
m(x)
m(x)
fwd = m._c._get_method("forward")
states = m.get_debug_state()
# after flushing there shouldn't be
# no opt plan
fwd._debug_flush_compilation_cache()
with self.assertRaisesRegex(RuntimeError, "INTERNAL ASSERT FAILED"):
states = m.get_debug_state()
def test_numel(self):
@torch.jit.script
def get_numel_script(x):
return x.numel()
x = torch.rand(3, 4)
numel = get_numel_script(x)
self.assertEqual(numel, x.numel())
def test_element_size(self):
@torch.jit.script
def get_element_size_script(x):
return x.element_size()
x = torch.rand(3, 4)
element_size = get_element_size_script(x)
self.assertEqual(element_size, x.element_size())
def test_Sequential(self):
class Seq(nn.Module):
def __init__(self):
super(Seq, self).__init__()
self.seq = nn.Sequential(nn.Linear(10, 20), nn.Linear(20, 30))
@torch.jit.script_method
def forward(self, x):
for l in self.seq:
x = l(x)
return x
m = torch.jit.script(Seq())
assert m.graph # ensure jit was able to compile
def test_ModuleList(self):
class Mod(nn.Module):
def __init__(self):
super(Mod, self).__init__()
self.model = nn.ModuleList([nn.Linear(10, 10) for _ in range(10)])
self.model += (nn.Linear(10, 20),)
self.model.append(nn.Linear(20, 30))
self.model.extend([nn.Linear(30, 40), nn.Linear(40, 50)])
def forward(self, v):
for m in self.model:
v = m(v)
return v
m = torch.jit.script(Mod())
assert m.graph # ensure jit was able to compile
def test_disabled(self):
torch.jit._state.disable()
try:
def f(x, y):
return x + y
self.assertIs(torch.jit.trace(f, (torch.randn(2, 2), torch.randn(2, 2))), f)
self.assertIs(torch.jit.script(f), f)
class MyModule(torch.jit.ScriptModule):
@torch.jit.script_method
def method(self, x):
return x
# XXX: Unfortunately ScriptModule won't simply become Module now,
# because that requires disabling the JIT at startup time, which
# we can't do in here.
# We need to or those two conditions to make it work with all versions of Python
self.assertTrue(inspect.ismethod(MyModule.method) or inspect.isfunction(MyModule.method))
finally:
torch.jit._state.enable()
def test_train_eval(self):
class Sub(nn.Module):
def forward(self, input):
if self.training:
return input
else:
return -input
class MyModule(torch.jit.ScriptModule):
def __init__(self, module):
super(MyModule, self).__init__()
self.module = module
@torch.jit.script_method
def forward(self, input):
return self.module(input) + 1
m = MyModule(Sub())
input = torch.rand(3, 4)
self.assertEqual(input + 1, m(input))
m.eval()
self.assertEqual(-input + 1, m(input))
# test batchnorm and dropout train/eval
input = torch.randn(6, 10)
batchnorm = nn.BatchNorm1d(10)
dropout = nn.Dropout(p=0.2)
m_batchnorm = MyModule(batchnorm)
self.assertEqual(batchnorm(input) + 1, m_batchnorm(input))
batchnorm.eval()
m_batchnorm.eval()
self.assertEqual(batchnorm(input) + 1, m_batchnorm(input))
m_dropout = MyModule(dropout)
dropout.eval()
m_dropout.eval()
self.assertEqual(dropout(input) + 1, m_dropout(input))
def test_nn_lp_pool2d(self):
class Mod(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = torch.nn.LPPool2d(2, 3)
self.n = torch.nn.LPPool2d(2, (7, 1))
def forward(self, x):
return (self.l(x),
self.n(x),
torch.nn.functional.lp_pool2d(x, float(2), 3),
torch.nn.functional.lp_pool2d(x, 2, 3),
torch.nn.functional.lp_pool2d(x, float(2), (7, 1)))
self.checkModule(Mod(), (torch.rand(1, 3, 7, 7),))
def test_nn_lp_pool1d(self):
class Mod(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = torch.nn.LPPool1d(2, 3)
self.n = torch.nn.LPPool1d(2, 7)
def forward(self, x):
return (self.l(x),
self.n(x),
torch.nn.functional.lp_pool1d(x, float(2), 3),
torch.nn.functional.lp_pool1d(x, 2, 3),
torch.nn.functional.lp_pool1d(x, float(2), 7))
self.checkModule(Mod(), (torch.rand(1, 3, 7),))
def test_nn_padding_functional(self):
class Mod(nn.Module):
def __init__(self, *pad):
super().__init__()
self.pad = pad
def forward(self, x):
return F.pad(x, self.pad, mode='constant', value=3.5)
inputs = [
(Mod(1, 2), torch.randn(1, 3, 4)), # 1D
(Mod(1, 2, 3, 4), torch.randn(1, 3, 4)), # 2D
(Mod(1, 2, 3, 4, 5, 6), torch.randn(1, 3, 4)), # 3D
]
for m, inp in inputs:
self.checkModule(m, (inp,))
def test_nn_padding(self):
class Mod(nn.Module):
def __init__(self, padding):
super().__init__()
self.padding = padding
def forward(self, input):
return self.padding(input)
inputs = [
(Mod(nn.ConstantPad1d(2, 3.5)), torch.randn(1, 2, 4)),
(Mod(nn.ConstantPad2d(2, 3.5)), torch.randn(1, 2, 2)),
(Mod(nn.ConstantPad3d(3, 3.5)), torch.randn(16, 3, 10, 20, 30)),
(Mod(nn.ReflectionPad1d(2)), torch.arange(8, dtype=torch.float).reshape(1, 2, 4)),
(Mod(nn.ReflectionPad2d(2)), torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)),
(Mod(nn.ReflectionPad3d(3)), torch.randn(16, 3, 8, 32, 48)),
(Mod(nn.ReplicationPad1d(2)), torch.arange(8, dtype=torch.float).reshape(1, 2, 4)),
(Mod(nn.ReplicationPad2d(2)), torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)),
(Mod(nn.ReplicationPad3d(3)), torch.randn(16, 3, 8, 32, 48)),
(Mod(nn.ZeroPad2d(2)), torch.randn(1, 1, 3, 3))
]
for m, inp in inputs:
self.checkModule(m, (inp,))
def test_script_autograd_grad(self):
def test_simple_grad(x, y):
# type: (Tensor, Tensor) -> List[Optional[Tensor]]
z = x + 2 * y + x * y
return torch.autograd.grad((z.sum(), ), (x, y))
def test_simple_grad_with_grad_outputs(x, y):
# type: (Tensor, Tensor) -> List[Optional[Tensor]]
z = x + 2 * y + x * y
grad_outputs = torch.jit.annotate(List[Optional[torch.Tensor]], [torch.ones((2, 2)), ])
return torch.autograd.grad((z, ), (x, y), grad_outputs)
def test_one_output_not_requires_grad(x, y):
# type: (Tensor, Tensor) -> List[Optional[Tensor]]
z = 2 * y + y
return torch.autograd.grad((z.sum(),), (x, y), allow_unused=True)
def test_retain_graph(x, y):
# type: (Tensor, Tensor) -> None
z = x + 2 * y + x * y
torch.autograd.grad((z.sum(), ), (x, y), retain_graph=True)
torch.autograd.grad((z.sum(), ), (x, y))
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
self.checkScript(test_simple_grad, (x, y), inputs_requires_grad=True)
self.checkScript(test_simple_grad_with_grad_outputs, (x, y), inputs_requires_grad=True)
self.checkScript(test_one_output_not_requires_grad, (x, y), inputs_requires_grad=True)
self.checkScript(test_retain_graph, (x, y), inputs_requires_grad=True)
def test_script_backward(self):
def checkBackwardScript(fn, inputs):
scripted_fn = torch.jit.script(fn)
FileCheck().check("torch.autograd.backward").run(scripted_fn.code)
recording_inputs = do_input_map(lambda t: t.detach().requires_grad_(), inputs)
fn(*inputs)
scripted_fn(*recording_inputs)
for inp1, inp2 in zip(inputs, recording_inputs):
self.assertEqual(inp1.grad, inp2.grad)
def test_tensor_backward(input):
# type: (Tensor) -> None
output = torch.relu(input)
output = output.softmax(0)
sum_out = output.sum()
sum_out.backward()
def test_torch_autograd_backward(input):
# type: (Tensor) -> None
output = torch.relu(input)
output = output.softmax(0)
torch.autograd.backward(output.sum())
def test_torch_autograd_backward_with_grad_tensors(input):
# type: (Tensor) -> None
output = torch.relu(input)
output = output.softmax(0)
grad_outputs = torch.jit.annotate(List[Optional[torch.Tensor]], [torch.ones((2, 2)), ])
torch.autograd.backward((output,), grad_outputs)
inp = torch.randn(2, 2, requires_grad=True)
checkBackwardScript(test_tensor_backward, (inp,))
checkBackwardScript(test_torch_autograd_backward, (inp,))
checkBackwardScript(test_torch_autograd_backward_with_grad_tensors, (inp,))
def test_script_backward_twice(self):
def checkBackwardTwiceScript(fn, inputs, retain_graph_=False):
torch._C._jit_set_profiling_executor(False)
with torch.jit.optimized_execution(True):
scripted_fn = torch.jit.script(fn, inputs)
FileCheck().check("prim::DifferentiableGraph").run(scripted_fn.graph_for(*inputs))
result = scripted_fn(*inputs)
result.sum().backward(retain_graph=retain_graph_)
if not retain_graph_:
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: result.sum().backward())
else:
result.sum().backward()
def test_script_backward_twice_with_saved_values(input1, input2):
# type: (Tensor, Tensor) -> Tensor
tmp1 = torch.mul(input1, input2)
tmp2 = torch.abs(tmp1)
if torch.equal(input1, input2):
tmp2 = torch.acos(tmp2)
else:
tmp2 = torch.atan(tmp2)
result = torch.add(tmp2, input2)
return result
inp1 = torch.randn(2, 2, requires_grad=True)
inp2 = torch.randn(2, 2, requires_grad=True)
checkBackwardTwiceScript(test_script_backward_twice_with_saved_values, (inp1, inp2), False)
checkBackwardTwiceScript(test_script_backward_twice_with_saved_values, (inp1, inp2), True)
def test_diff_subgraph_clones_constants(self):
@torch.jit.script
def f(x, y):
return x + x + y + x + y + x + y + x + y + x
def count_constants(graph):
return sum(node.kind() == 'prim::Constant' for node in graph.nodes())
graph = f.graph.copy()
self.run_pass('cse', graph)
self.run_pass('create_autodiff_subgraphs', graph)
nodes = list(graph.nodes())
self.assertEqual(count_constants(graph), 1)
self.assertEqual(count_constants(nodes[1].g('Subgraph')), 1)
# TODO: adapt this test to check that GraphExecutor treats them differently
@unittest.skip("Need to be adjusted to Graph Executor")
def test_arg_configurations(self):
"""Different arg configurations should trigger different traces"""
x = Variable(torch.FloatTensor(4, 4).uniform_())
x_double = Variable(x.data.double())
x_grad = Variable(x.data.clone(), requires_grad=True)
y = Variable(torch.randn(4))
configurations = [
(x,),
(x_double,),
(x_grad,),
(y,),
([x, x],),
([x, y],),
]
if torch.cuda.is_available():
x_cuda = Variable(x.data.cuda())
configurations += [
(x_cuda,),
([x, x_cuda],),
([x_cuda, x],),
([[x_cuda, x]],),
]
if torch.cuda.device_count() > 1:
x_cuda_1 = Variable(x.data.cuda(1))
configurations += [
(x_cuda_1,),
([x_cuda, x_cuda_1],),
]
@torch.jit.compile(nderivs=0)
def fn(*args):
in_vars, _ = torch._C._jit_flatten(args)
return in_vars[0] + 1
for i, config in enumerate(configurations):
self.assertFalse(fn.has_trace_for(*config))
fn(*config)
self.assertTrue(fn.has_trace_for(*config))
for unk_config in configurations[i + 1:]:
self.assertFalse(fn.has_trace_for(*unk_config))
self.assertEqual(fn.hits, 0)
def test_torch_sum(self):
def fn(x):
return torch.sum(x)
def fn1(x, dim: int):
return torch.sum(x, dim)
x = torch.randn(3, 4)
self.checkScript(fn, (x, ))
self.checkScript(fn1, (x, 1, ))
self.checkScript(fn1, (x, 0, ))
def test_cse(self):
x = torch.tensor([0.4, 0.3], requires_grad=True)
y = torch.tensor([0.7, 0.5], requires_grad=True)
def fn(x, y):
w = (x + y) * (x + y) * (x + y)
t = torch.tanh(w) + torch.tanh(w)
z = (x + y) * (x + y) * (x + y) + t
return z
g, _ = torch.jit._get_trace_graph(fn, (x, y))
self.run_pass('cse', g)
do_exactly = True
FileCheck().check_count("add", 1).check_count("mul", 2, do_exactly) \
.check_count("tanh", 1, do_exactly).check_count("add", 2, do_exactly).check_next("return") \
.run(str(g))
self.assertExportImport(g, (x, y))
def test_cse_not_introduce_aliasing(self):
@torch.jit.script
def tensor_alias_outputs(x):
return x + x, x + x
self.run_pass('cse', tensor_alias_outputs.graph)
FileCheck().check_count("aten::add", 2).run(tensor_alias_outputs.graph)
@torch.jit.script
def ints_alias_outputs(x):
# type: (int) -> Tuple[int, int]
return x + x, x + x
# non-aliasing types can be CSEd
self.run_pass('cse', ints_alias_outputs.graph)
FileCheck().check_count("aten::add", 1, exactly=True).run(ints_alias_outputs.graph)
def test_recursive_cse(self):
input_str = """
graph(%x : Tensor,
%y : Tensor,
%20 : int):
%2 : int = prim::Constant[value=1]()
%3 : Tensor = aten::add(%x, %y, %2)
%4 : int = aten::add(%2, %20)
%5 : bool = aten::Bool(%4)
%z : int = prim::If(%5)
# CHECK: block
block0():
# CHECK-NOT: aten::add
%z.1 : int = aten::add(%2, %20)
-> (%z.1)
block1():
-> (%2)
return (%z)
"""
graph = parse_ir(input_str)
self.run_pass('cse', graph)
FileCheck().run(input_str, graph)
def test_pattern_based_rewrite(self):
# mul(mul(mul(mul(x,y),z),x),y) --> mul(mul(mulmul(x,y,z), x), y) -->
# --> mulmul(mulmul(x,y,z), x, y)
input_str = """
graph(%x, %y, %z):
# CHECK-NOT: aten::mul
# CHECK: my::fused_mulmul
%t = aten::mul(%x, %y)
%p = aten::mul(%t, %z)
# CHECK: my::fused_mulmul
%u = aten::mul(%p, %x)
%o = aten::mul(%u, %y)
return (%o)"""
graph = parse_ir(input_str)
torch._C._jit_pass_custom_pattern_based_rewrite_graph("""
graph(%a, %b, %c):
%q = aten::mul(%a, %b)
%r = aten::mul(%q, %c)
return (%r)""", """
graph(%a, %b, %c):
%r = my::fused_mulmul(%a, %b, %c)
return (%r)""", graph)
FileCheck().run(input_str, graph)
# Check that overlapping matches are handled correctly
# mul(mul(mul(x,y),z),x) --> mul(mulmul(x,y,z), x)
input_str = """
graph(%x, %y, %z):
# CHECK-NOT: aten::mul
# CHECK: my::fused_mulmul
%t = aten::mul(%x, %y)
%p = aten::mul(%t, %z)
# CHECK-NEXT: aten::mul
%u = aten::mul(%p, %x)
return (%u)"""
graph = parse_ir(input_str)
torch._C._jit_pass_custom_pattern_based_rewrite_graph("""
graph(%a, %b, %c):
%q = aten::mul(%a, %b)
%r = aten::mul(%q, %c)
return (%r)""", """
graph(%a, %b, %c):
%r = my::fused_mulmul(%a, %b, %c)
return (%r)""", graph)
FileCheck().run(input_str, graph)
# Check add(mul(x,y),z) --> muladd(x,y,z) replacement
input_str = """
graph(%x, %y, %z):
# CHECK-NOT: aten::mul
# CHECK-NOT: aten::add
%c = prim::Const[value=1]()
%t = aten::mul(%x, %y)
%p = aten::add(%t, %z, %c)
# CHECK: my::muladd
# CHECK-NEXT: return
return (%p)"""
graph = parse_ir(input_str)
torch._C._jit_pass_custom_pattern_based_rewrite_graph("""
graph(%a, %b, %c, %d):
%q = aten::mul(%a, %b)
%r = aten::add(%q, %c, %d)
return (%r)""", """
graph(%a, %b, %c, %d):
%r = my::muladd(%a, %b, %c, %d)
return (%r)""", graph)
FileCheck().run(input_str, graph)
# Check add(mul(x,y),z) --> sub(add(x,y),z) replacement
input_str = """
graph(%x, %y, %z):
# CHECK-NOT: aten::mul
%c = prim::Const[value=1]()
# CHECK: aten::add
%t = aten::mul(%x, %y)
# CHECK-NEXT: aten::sub
%p = aten::add(%t, %z, %c)
# CHECK-NOT: aten::add
# CHECK-NEXT: return
return (%p)"""
graph = parse_ir(input_str)
torch._C._jit_pass_custom_pattern_based_rewrite_graph("""
graph(%a, %b, %c, %d):
%q = aten::mul(%a, %b)
%r = aten::add(%q, %c, %d)
return (%r)""", """
graph(%a, %b, %c, %d):
%q = aten::add(%a, %b, %d)
%r = aten::sub(%q, %c, %d)
return (%r)""", graph)
FileCheck().run(input_str, graph)
# Check mul(x,y) --> x replacement
input_str = """
graph(%x, %y, %z):
%c = prim::Const[value=1]()
# CHECK-NOT: aten::mul
%t = aten::mul(%x, %y)
# CHECK: aten::add(%x, %z
%p = aten::add(%t, %z, %c)
# CHECK-NEXT: return
return (%p)"""
graph = parse_ir(input_str)
torch._C._jit_pass_custom_pattern_based_rewrite_graph("""
graph(%Pa, %Pb):
%Pq = aten::mul(%Pa, %Pb)
return (%Pq)""", """
graph(%Ra, %Rb):
return (%Ra)""", graph)
FileCheck().run(input_str, graph)
@_tmp_donotuse_dont_inline_everything
def test_pattern_based_module_rewrite(self):
# Check match::module behavior
class Test(torch.nn.Module):
def __init__(self):
super(Test, self).__init__()
self.conv = torch.nn.Conv2d(1, 20, 5, 1)
self.bn = torch.nn.BatchNorm2d(num_features=20)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
m = torch.jit.script(Test())
torch._C._jit_pass_custom_pattern_based_rewrite_graph("""
graph(%self, %x):
%conv = match::module[name="Conv2d"](%self)
%y = prim::CallMethod[name="forward"](%conv, %x)
%bn = match::module[name="BatchNorm2d"](%self)
%z = prim::CallMethod[name="forward"](%bn, %y)
return (%z)""", """
graph(%self, %x):
%z = my::matched_conv_bn(%self, %x)
return (%z)""", m._c._get_method("forward").graph)
FileCheck().check("my::matched_conv_bn").run(m._c._get_method("forward").graph)
def test_pattern_based_rewrite_with_source_range_preserved(self):
class TestModule1(torch.nn.Module):
def __init__(self):
super(TestModule1, self).__init__()
def forward(self, x, y, z, w):
x = x + y
x = x * z
return w - x
input_pattern = """
graph(%x, %y, %z, %const):
%t = aten::add(%x, %y, %const)
%o = aten::mul(%t, %z)
return (%o)"""
replacement_pattern = """
graph(%x, %y, %z, %const):
%o = my::add_mul(%x, %y, %z, %const)
return (%o)"""
scripted_model = torch.jit.script(TestModule1())
graph = scripted_model.graph
value_mappings = [("o", "t")]
for node in graph.nodes():
if node.kind() == "aten::add":
source_range_1 = node.sourceRange()
torch._C._jit_pass_custom_pattern_based_rewrite_graph(
input_pattern, replacement_pattern, scripted_model.graph, value_name_pairs=value_mappings)
graph = scripted_model.graph
for node in graph.nodes():
if node.kind() == "my::add_mul":
source_range_2 = node.sourceRange()
self.assertTrue(source_range_1 == source_range_2)
class TestModule2(torch.nn.Module):
def __init__(self):
super(TestModule2, self).__init__()
def forward(self, x, y, z, w):
x = x + y
x = x + z
x = x * z
x = x * w
return x - 2
# Check source range preservation for two node transforms add -> my_add
input_pattern = """
graph(%x, %y, %const):
%o = aten::add(%x, %y, %const)
return (%o)"""
replacement_pattern = """
graph(%x, %y, %const):
%o = my::add(%x, %y, %const)
return (%o)"""
scripted_model = copy.deepcopy(torch.jit.script(TestModule2()))
graph_copy = scripted_model.graph.copy()
value_mappings = [("o", "o")]
source_range_add_1 = None
for node in graph_copy.nodes():
if source_range_add_1 is None and node.kind() == "aten::add":
source_range_add_1 = node.sourceRange()
if source_range_add_1 is not None and node.kind() == "aten::add":
source_range_add_2 = node.sourceRange()
torch._C._jit_pass_custom_pattern_based_rewrite_graph(
input_pattern, replacement_pattern, graph_copy, value_name_pairs=value_mappings)
source_range_my_add_1 = None
for node in graph_copy.nodes():
if source_range_my_add_1 is None and node.kind() == "my::add":
source_range_my_add_1 = node.sourceRange()
if source_range_my_add_1 is not None and node.kind() == "my::add":
source_range_my_add_2 = node.sourceRange()
self.assertTrue(source_range_add_1 == source_range_my_add_1)
self.assertTrue(source_range_add_2 == source_range_my_add_2)
# Check source range preservation for add-add -> double_add transform
# fuse nodes
input_pattern = """
graph(%x, %y, %z, %const):
%t = aten::add(%x, %y, %const)
%o = aten::add(%t, %z, %const)
return (%o)"""
replacement_pattern = """
graph(%x, %y, %z, %const):
%o = my::double_add(%x, %y, %z, %const)
return (%o)"""
scripted_model = torch.jit.script(TestModule2())
graph_copy = scripted_model.graph.copy()
value_mappings = [("o", "t")]
source_range_1 = None
source_range_2 = None
for node in graph_copy.nodes():
if node.kind() == "aten::add":
source_range_1 = node.sourceRange()
break
torch._C._jit_pass_custom_pattern_based_rewrite_graph(
input_pattern, replacement_pattern, graph_copy, value_name_pairs=value_mappings)
for node in graph_copy.nodes():
if node.kind() == "my::double_add":
source_range_2 = node.sourceRange()
self.assertTrue(source_range_1 == source_range_2)
# Check source range preservation for mul -> add + add transform
# split node
input_pattern = """
graph(%x, %y):
%t = aten::mul(%x, %y)
return (%t)"""
replacement_pattern = """
graph(%x, %y):
%t = my::add(%x, %y)
%o = my::add(%t, %y)
return (%o)"""
scripted_model = torch.jit.script(TestModule2())
graph_copy = scripted_model.graph.copy()
value_mappings = [("t", "t"), ("o", "t")]
source_range_mul_1 = None
for node in graph_copy.nodes():
if source_range_mul_1 is None and node.kind() == "aten::mul":
source_range_mul_1 = node.sourceRange()
if source_range_mul_1 is not None and node.kind() == "aten::mul":
source_range_mul_2 = node.sourceRange()
torch._C._jit_pass_custom_pattern_based_rewrite_graph(
input_pattern, replacement_pattern, graph_copy, value_name_pairs=value_mappings)
source_range_add_1 = None
for node in graph_copy.nodes():
if source_range_add_1 is None and node.kind() == "my::add":
source_range_add_1 = node.sourceRange()
if source_range_add_1 is not None and node.kind() == "my::add":
source_range_add_2 = node.sourceRange()
self.assertTrue(source_range_mul_1 == source_range_add_1)
self.assertTrue(source_range_mul_2 == source_range_add_2)
# Check lack of source range preservation for mul-mul-> double_mul transform
input_pattern = """
graph(%x, %y, %z):
%t = aten::mul(%x, %y)
%o = aten::mul(%t, %z)
return (%o)"""
replacement_pattern = """
graph(%x, %y, %z):
%o = my::double_mul(%x, %y, %z)
return (%o)"""
scripted_model = torch.jit.script(TestModule2())
graph_copy = scripted_model.graph.copy()
for node in graph_copy.nodes():
if node.kind() == "aten::mul":
source_range_1 = node.sourceRange()
torch._C._jit_pass_custom_pattern_based_rewrite_graph(input_pattern, replacement_pattern, graph_copy)
for node in graph_copy.nodes():
if node.kind() == "my::double_mul":
source_range_2 = node.sourceRange()
self.assertFalse(source_range_1 == source_range_2)
def test_expand_quantlint(self):
pass
def test_expand_fold_quant_inputs(self):
pass
def test_shape_analysis_broadcast(self):
def broadcast(a, b):
return a + b
x = torch.randn(3, 1, 5, requires_grad=True)
y = torch.randn(4, 1, 8, 5, requires_grad=True)
graph = torch.jit.script(broadcast).graph
torch._C._jit_pass_complete_shape_analysis(graph, (x, y), False)
FileCheck().check("Double(4, 3, 8, 5, strides=[120, 40, 5, 1], device=cpu)").run(str(graph))
def test_shape_analysis_unsqueeze_in_loop(self):
input_str = """graph(%x.1 : Tensor):
%4 : bool = prim::Constant[value=1]()
%1 : int = prim::Constant[value=2]()
%7 : int = prim::Constant[value=0]()
# CHECK: FloatTensor(requires_grad=0, device=cpu) = prim::Loop
%x : Tensor = prim::Loop(%1, %4, %x.1)
# CHECK: : FloatTensor(requires_grad=0, device=cpu)):
block0(%i : int, %x.6 : Tensor):
# CHECK: FloatTensor(requires_grad=0, device=cpu) = aten::unsqueeze
%x.3 : Tensor = aten::unsqueeze(%x.6, %7)
-> (%4, %x.3)
return (%x)"""
graph = parse_ir(input_str)
torch._C._jit_pass_complete_shape_analysis(graph, (torch.zeros(2, 2, dtype=torch.float32),), False)
FileCheck().run(input_str, graph)
def test_script_tensor_type(self):
def foo(x, t: torch.dtype):
return x.type(t)
scr = torch.jit.script(foo)
x = torch.rand(3, 4)
for t in [torch.int8, torch.float64, torch.float32,
torch.bfloat16, torch.complex64, torch.complex128, torch.bool]:
self.assertEqual(scr(x, t), foo(x, t))
def test_shape_analysis_masked_select(self):
input_str = """graph(%0 : Float(),
%1 : Bool()):
# CHECK: Float(*, requires_grad=0, device=cpu) = aten::masked_select
%2 : Tensor = aten::masked_select(%0, %1) # test/test_jit.py:15261:0
return (%2)"""
graph = parse_ir(input_str)
x = torch.ones(1, dtype=torch.float32)[0]
mask = x.ge(0.5)
torch._C._jit_pass_complete_shape_analysis(graph, (x, mask), False)
FileCheck().run(input_str, graph)
# TODO: update verify to work with GraphExecutors
@unittest.skip("verify needs to be updated to work with GraphExecutors")
def test_verify(self):
x = torch.tensor([0.4], requires_grad=True)
y = torch.tensor([0.7], requires_grad=True)
@torch.jit.compile
def f(x, y):
z = torch.sigmoid(x * (x + y))
w = torch.abs(x * x * x + y) + Variable(torch.ones(1))
return z, w
torch.jit.verify(f, (x, y), loss_fn=lambda z, w: z * w, devices=[])
# TODO: adapt to a GraphExecutor test
@unittest.skip("Need to instrument GraphExecutors a bit more")
def test_flags(self):
x, y = torch.randn(2, 2)
y = Variable(torch.randn(2, 2))
@torch.jit.compile
def fn(x, y):
return (x * x + y * y + x * y).sum()
grads = {}
for rx, ry in product((True, False), repeat=2):
x.requires_grad = rx
y.requires_grad = ry
self.assertFalse(fn.has_trace_for(x, y))
out = fn(x, y)
self.assertFalse(fn.has_trace_for(x, y))
for v, name, compute in [(x, 'x', rx), (y, 'y', ry)]:
if not compute:
continue
grad_v, = torch.autograd.grad(out, v, retain_graph=True)
expected_grad = grads.setdefault(name, grad_v)
self.assertEqual(grad_v, expected_grad)
self.assertEqual(fn.has_trace_for(x, y), rx or ry)
def test_python_ir(self):
x = torch.tensor([0.4], requires_grad=True)
y = torch.tensor([0.7], requires_grad=True)
def doit(x, y):
return torch.sigmoid(torch.tanh(x * (x + y)))
g, _ = torch.jit._get_trace_graph(doit, (x, y))
self.run_pass('dce', g)
self.run_pass('canonicalize', g)
g2 = torch._C.Graph()
g_to_g2 = {}
for node in g.inputs():
g_to_g2[node] = g2.addInput()
for node in g.nodes():
n_ = g2.createClone(node, lambda x: g_to_g2[x])
g2.appendNode(n_)
for o, no in zip(node.outputs(), n_.outputs()):
g_to_g2[o] = no
for node in g.outputs():
g2.registerOutput(g_to_g2[node])
t_node = g2.create("prim::TensorTest").t_("a", torch.ones([2, 2]))
self.assertEqual(t_node.attributeNames(), ["a"])
g2.appendNode(t_node)
self.assertTrue(torch.equal(torch.ones(2, 2), t_node.t("a")))
for node in g.nodes():
self.assertTrue(g2.findNode(node.kind()) is not None)
def test_permute_inputs_binding(self):
@torch.jit.script
def foo(i, j, k):
pass
g = foo.graph
idxs = []
for i, inp in enumerate(g.inputs()):
inp.setDebugName(f"inp{i}")
idxs.append(i)
permuted_idxs = list(np.random.permutation(idxs))
g.permuteInputs(permuted_idxs)
for i, inp in enumerate(g.inputs()):
self.assertEqual(f"inp{permuted_idxs[i]}", inp.debugName())
@unittest.skipIf(IS_MACOS, "Failing on MacOS only")
def test_python_ir_utils(self):
@torch.jit.script
def foo(inp):
x = inp + 1
y = x / 2
z = y * y
return z
add_node = foo.graph.findNode("aten::add")
div_node = foo.graph.findNode("aten::div")
with foo.graph.insert_point_guard(add_node):
with foo.graph.insert_point_guard(div_node):
foo.graph.insertConstant("goodbye")
foo.graph.insertConstant("hello")
with foo.graph.insert_point_guard(foo.graph.findNode("aten::mul")):
foo.graph.insertConstant("hello")
FileCheck().check("hello").check("goodbye").check("hello").run(foo.graph)
self.assertTrue(add_node.matches(add_node.schema()))
self.assertFalse(add_node.matches(div_node.schema()))
def test_python_ir_utils_graph(self):
@torch.jit.script
def unrolled_mul(x: torch.Tensor, y: int):
out = x
for _ in range(y - 1):
out = out + x
return out
@torch.jit.script
def foo(x):
return x * 4
g = foo.graph
muls = g.findAllNodes("aten::mul")
scalar_muls = filter(lambda x: x.matches("aten::mul(Tensor self, Scalar other) -> Tensor"), muls)
mul_constant_int = filter(lambda x: isinstance(list(x.inputs())[1].toIValue(), int), scalar_muls)
for mul in mul_constant_int:
with g.insert_point_guard(mul):
outputs = g.insertGraph(unrolled_mul.graph, list(mul.inputs()))
assert len(outputs) == len(list(mul.outputs()))
for new_out, old_out in zip(outputs, g.outputs()):
old_out.replaceAllUsesWith(new_out)
mul.destroy()
FileCheck().check_not("aten::mul").check("aten::add").run(foo.graph)
self.assertEqual(foo(torch.ones([2, 2])), torch.ones([2, 2]) * 4)
@unittest.skipIf(IS_SANDCASTLE, "gtest runs these in sandcastle")
@unittest.skipIf(RUN_CUDA, "covered by test_cpp_cuda")
@unittest.skipIf(not torch._C._jit_has_cpp_tests(), "Tests were not built, use BUILD_TEST=1")
def test_cpp(self):
from cpp.jit import tests_setup
tests_setup.setup()
torch._C._jit_run_cpp_tests()
tests_setup.shutdown()
def test_batchnorm(self):
x = torch.ones(2, 2, 2, 2)
g, outputs, inputs = torch.jit._get_trace_graph(nn.BatchNorm2d(2), x,
_force_outplace=True, return_inputs=True)
m = self.createFunctionFromGraph(g)
self.assertEqual(outputs, m(*inputs))
def test_dropout(self):
x = torch.ones(2, 2)
with torch.random.fork_rng(devices=[]):
g, outputs, inputs = torch.jit._get_trace_graph(nn.Dropout(0.6), x, return_inputs=True)
with torch.random.fork_rng(devices=[]):
m = self.createFunctionFromGraph(g)
self.assertEqual(outputs, m(*inputs))
@unittest.skipIf(not RUN_CUDA, "test requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "skip if profiling isn't enabled")
def test_native_dropout_corner_case(self):
with disable_autodiff_subgraph_inlining():
def t(x, p: float, t: bool):
o = torch.dropout(x, p, t)
return o
jit_t = torch.jit.script(t)
x = torch.randn(5).requires_grad_()
FileCheck().check("prim::DifferentiableGraph").run(jit_t.graph_for(x, 1.0, True, profile_and_replay=True))
for train in [True, False]:
for p in [0.0, 1.0]:
for device in ["cuda", "cpu"]:
x = torch.randn(5).to(device=device).requires_grad_()
x_ref = x.detach().requires_grad_()
o = jit_t(x, p, train)
o_ref = t(x_ref, p, train)
o.sum().backward()
o_ref.sum().backward()
assert(o.equal(o_ref))
assert(x.grad.equal(x_ref.grad))
@slowTest
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, 'Testing differentiable graph')
def test_dropout_module_requires_grad(self):
with enable_profiling_mode_for_profiling_tests():
class MyModule(torch.nn.Module):
def __init__(self, M):
super(MyModule, self).__init__()
self.dropout = torch.nn.Dropout(0.5)
self.linear = torch.nn.Linear(M, M)
def forward(self, input):
input = self.dropout(input)
output = self.linear(input)
return output
def profile(func, X):
with torch.autograd.profiler.profile() as prof:
func(X)
return [e.name for e in prof.function_events]
M = 1000
scripted = torch.jit.script(MyModule(M))
# To reduce confusion about expected behaviors:
# requires_grad controls whether dropout is symbolically differentiated.
# training controls whether bernoulli_ is called inside symbolic differentiation of dropout.
# * When requires_grad == training, the expected behaviors are obvious.
# * When requires_grad=True and training=False, bernoulli_ might still show up in the graph.
# But it's in a branch that's not called. That's why we have separate checks for autograd
# profiler to make sure it's not run.
# * When requires_grad=False and training=True, bernoulli_ must be run since it's the expected
# behavior for the dropout layer in training mode. It's independent of whether graph requires
# gradient. In fact bernoulli_ comes from autograd instead of autodiff in this case.
for training in (True, False):
if training:
scripted.train()
else:
scripted.eval()
for requires_grad in (True, False):
X = torch.randn(M, M, requires_grad=requires_grad)
if requires_grad:
FileCheck().check("aten::native_dropout").run(scripted.graph_for(X, profile_and_replay=True))
self.assertEqual(training, 'aten::bernoulli_' in profile(scripted, X))
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.SIMPLE, 'Testing differentiable graph')
def test_dropout_func_requires_grad(self):
def dropout_training(input):
return F.dropout(input, 0.5, training=True)
def dropout_eval(input):
return F.dropout(input, 0.5, training=False)
def profile(func, X):
with torch.autograd.profiler.profile() as prof:
func(X)
return [e.name for e in prof.function_events]
M = 1000
scripted_training = torch.jit.script(dropout_training)
scripted_eval = torch.jit.script(dropout_eval)
# See comments in test_dropout_module_requires_grad.
with disable_autodiff_subgraph_inlining():
for requires_grad in (True, False):
X = torch.randn(M, M, requires_grad=requires_grad)
if requires_grad:
FileCheck().check("aten::native_dropout").run(scripted_training.graph_for(X, profile_and_replay=True))
self.assertIn('aten::bernoulli_', profile(scripted_training, X))
self.assertNotIn('aten::bernoulli_', profile(scripted_eval, X))
@unittest.skipIf(not RUN_CUDA, "test_dropout_cuda require CUDA")
def test_dropout_cuda(self):
# Dropout AD is dispatched to _fused_dropout in CUDA case,
# which is not included in TestJitGeneratedFunctional
def _zero_rate(t):
return torch.true_divide((t == 0).sum(), t.numel())
x = torch.ones(1000, 1000).cuda().requires_grad_()
with enable_profiling_mode_for_profiling_tests():
@torch.jit.script
def func(x):
return torch.nn.functional.dropout(x)
with freeze_rng_state():
out_ref = torch.nn.functional.dropout(x)
grad_ref = torch.autograd.grad(out_ref.sum(), x)
with freeze_rng_state():
out = func(x)
grad = torch.autograd.grad(out.sum(), x)
# TODO(#40882): previously we assert exact matches between eager and JIT result:
# self.assertEqual(out, out_ref)
# self.assertEqual(grad, grad_ref)
# This test was disabled during legacy -> profiling executor transition.
# Currently JIT fused results doesn't match eager result exactly due to some changes merged in between.
# We temporarily only check statstical difference but it should be reverted once the issue is fixed.
self.assertEqual(_zero_rate(out), _zero_rate(out_ref), rtol=1e-3, atol=1e-4)
self.assertEqual(_zero_rate(grad[0]), _zero_rate(grad_ref[0]), rtol=1e-3, atol=1e-4)
def test_torch_ops_overloaded(self):
with self.assertRaisesRegex(RuntimeError, "failed to many any schema"):
torch.ops.aten.add("a", 1)
self.assertEqual("ab", torch.ops.aten.add("a", "b"))
a, b = torch.rand(3, 4), torch.rand(3, 4)
self.assertEqual(a + b, torch.ops.aten.add(a, b))
self.assertEqual(a + 1, torch.ops.aten.add(a, 1))
def test_torch_ops_kwonly(self):
a, b = torch.rand(3, 4), torch.rand(3, 4)
with self.assertRaisesRegex(RuntimeError, "positional argument"):
torch.ops.aten.add(a, b, 2)
# h/t Chillee for this ambiguous case
self.assertEqual(a.prod(1), torch.ops.aten.prod(a, 1))
def test_torch_complex(self):
def fn(real, img):
return torch.complex(real, img)
def fn_out(real, img, out):
return torch.complex(real, img, out=out)
self.checkScript(fn, (torch.rand(3, 4), torch.rand(3, 4), ))
self.checkScript(fn, (torch.ones(5, 1, 4), torch.ones(5, 1, 4), ))
self.checkScript(fn, (torch.zeros(1, 6), torch.ones(6, 1), ))
self.checkScript(fn, (torch.zeros(1, 6), torch.zeros(6, 1), ))
self.checkScript(fn, (torch.empty(3, 4), torch.empty(3, 4), ))
real = torch.tensor([1, 2], dtype=torch.float32)
img = torch.tensor([3, 4], dtype=torch.float32)
out = torch.empty([3, 4], dtype=torch.complex64)
self.checkScript(fn_out, (real, img, out, ))
real = torch.tensor([5, 2], dtype=torch.float64)
img = torch.tensor([3, 4], dtype=torch.float64)
out = torch.empty([5, 2], dtype=torch.complex128)
self.checkScript(fn_out, (real, img, out, ))
real = torch.ones([1, 2])
img = torch.ones([1, 2])
out = torch.empty([1, 2], dtype=torch.complex128)
self.checkScript(fn_out, (real, img, out, ))
real = torch.ones([3, 8, 7])
img = torch.ones([3, 8, 7])
out = torch.empty([3, 8, 7], dtype=torch.complex128)
self.checkScript(fn_out, (real, img, out, ))
real = torch.empty([3, 2, 6])
img = torch.empty([3, 2, 6])
out = torch.empty([3, 2, 6], dtype=torch.complex128)
self.checkScript(fn_out, (real, img, out, ))
real = torch.zeros([1, 3])
img = torch.empty([3, 1])
out = torch.empty([3, 3], dtype=torch.complex128)
self.checkScript(fn_out, (real, img, out, ))
real = torch.ones([2, 5])
img = torch.empty([2, 1])
out = torch.empty([2, 5], dtype=torch.complex128)
self.checkScript(fn_out, (real, img, out, ))
real = torch.ones([2, 5])
img = torch.zeros([2, 1])
out = torch.empty([2, 5], dtype=torch.complex128)
self.checkScript(fn_out, (real, img, out, ))
def test_einsum(self):
def check(fn, jitted, *args):
self.assertGraphContains(jitted.graph, kind='aten::einsum')
self.assertEqual(fn(*args), jitted(*args))
def equation_format(x, y):
return torch.einsum('i,j->ij', (x, y))
def equation_format_varargs(x, y):
return torch.einsum('i,j->ij', x, y)
def sublist_format(x, y):
return torch.einsum(x, [0], y, [1], [0, 1])
x = make_tensor((5,), dtype=torch.float32, device="cpu")
y = make_tensor((10,), dtype=torch.float32, device="cpu")
for fn in [equation_format, equation_format_varargs, sublist_format]:
check(fn, torch.jit.script(fn), x, y)
check(fn, torch.jit.trace(fn, (x, y)), x, y)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_python_ivalue(self):
# Test if pure python object can be hold as IValue and conversion
# between IValue and PyObject are correct
# test for numpy object
py_array = np.arange(15)
ret_py_obj = torch._C._ivalue_debug_python_object(py_array)
self.assertEqual(py_array, ret_py_obj)
# test for function object
ret_py_obj = torch._C._ivalue_debug_python_object(F.relu)
self.assertEqual(F.relu, ret_py_obj)
# test for memory management
# we need to ensure IValue correctly call incref/decref to avoid
# dangling behavior and potential memory leaks during conversions
def test_func_scope_helper(inp):
# create a scope and do the conversion -> ivalue -> pyobject
# this func return a new pyobject that refcount + 1
inp_refcount = sys.getrefcount(inp)
ivalue_holder = torch._C._ivalue_debug_python_object(inp)
self.assertEqual(inp_refcount + 1, sys.getrefcount(ivalue_holder))
return ivalue_holder + 1
test_input = 2200
before_count = sys.getrefcount(test_input)
test_func_scope_helper(test_input)
after_count = sys.getrefcount(test_input)
# after the test_func_scope_helper_call, the refcount of
# test_input should be equal to the original refcount
# otherwise we get either dangling pointer or memory leak!
self.assertEqual(before_count, after_count)
def test_decompose_addmm(self):
def does_decompose():
@torch.jit.script
def addmm(mat, mat1, mat2):
a = mat.addmm(mat1, mat2)
b = mat.addmm(mat1, mat2, alpha=1.0, beta=1.0)
return a + b
mat = torch.randn(2, 2)
mat1 = torch.randn(2, 4)
mat2 = torch.randn(4, 2)
out_ref = addmm(mat, mat1, mat2)
self.run_pass('decompose_ops', addmm.graph)
out_test = addmm(mat, mat1, mat2)
self.assertEqual(out_ref, out_test)
FileCheck().check_not("addmm").run(str(addmm.graph))
def doesnt_decompose():
@torch.jit.script
def addmm(mat, mat1, mat2, alpha, beta):
a = mat.addmm(mat1, mat2, alpha=4.20, beta=2.0)
b = mat.addmm(mat1, mat2, alpha=int(alpha), beta=int(beta))
return a + b
orig = str(addmm.graph)
self.run_pass('decompose_ops', addmm.graph)
self.assertTrue(orig == str(addmm.graph))
does_decompose()
doesnt_decompose()
@suppress_warnings
def test_sparse_tensors(self):
@torch.jit.ignore
def get_sparse():
return torch.sparse.FloatTensor(2, 3)
@torch.jit.script
def test_is_sparse(input):
# type: (Tensor) -> bool
return input.is_sparse
script_out_is_sparse = test_is_sparse(get_sparse())
script_out_is_dense = test_is_sparse(torch.randn(2, 3))
self.assertEqual(script_out_is_sparse, True)
self.assertEqual(script_out_is_dense, False)
def test_basic_sparse(input):
output = get_sparse()
return output, input
self.checkScript(test_basic_sparse, (get_sparse(),))
self.checkScript(test_basic_sparse, (torch.tensor([1]),))
def test_sparse_sum(input):
return torch.sparse.sum(input)
self.checkScript(test_sparse_sum, (get_sparse(),))
def test_sparse_mm(input1, input2):
return torch.sparse.mm(input1, input2)
self.checkScript(test_sparse_mm, (get_sparse(), torch.randn(3, 4)))
def test_sparse_addmm(input, input1, input2):
return torch.sparse.addmm(input, input1, input2)
def test_sparse_addmm_alpha_beta(input, input1, input2):
return torch.sparse.addmm(input, input1, input2, alpha=1.3, beta=1.5)
self.checkScript(test_sparse_addmm, (torch.randn(2, 4), get_sparse(), torch.randn(3, 4)))
self.checkScript(test_sparse_addmm_alpha_beta, (torch.randn(2, 4), get_sparse(), torch.randn(3, 4)))
@suppress_warnings
def test_sparse_csr_tensors(self):
@torch.jit.ignore
def get_sparse_csr():
return torch.randn(3, 3).to_sparse_csr()
@torch.jit.script
def test_is_sparse_csr(input):
# type: (Tensor) -> bool
return input.is_sparse_csr
script_out_is_sparse_csr = test_is_sparse_csr(get_sparse_csr())
script_out_is_dense_csr = test_is_sparse_csr(torch.randn(3, 3))
self.assertEqual(script_out_is_sparse_csr, True)
self.assertEqual(script_out_is_dense_csr, False)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
def test_device_not_equal(self):
def compare_device(x: torch.device):
return x != torch.device("cuda:0")
def compare_two_device(x: torch.device, y: torch.device):
return x != y
self.checkScript(compare_device, (torch.device("cuda:0"),))
self.checkScript(compare_two_device, (torch.device("cuda:0"), torch.device("cuda:1"), ))
def test_constant_prop_simple(self):
@torch.jit.script
def constant_prop(input_int):
# type: (int) -> int
a = 2 * 3
b = a + 2
return b - input_int
out_ref = constant_prop(2)
self.run_pass('constant_propagation', constant_prop.graph)
out_test = constant_prop(2)
self.assertEqual(out_ref, out_test)
graph_str = str(constant_prop.graph)
self.assertTrue("aten::add" not in graph_str and "aten::mul" not in graph_str)
const = constant_prop.graph.findNode("prim::Constant").output().toIValue()
self.assertEqual(const, 8)
def test_constant_prop_nested(self):
@torch.jit.script
def constant_prop(a):
b = 2 + 1
if bool(a < 2):
c = b + 2
else:
c = b - 2
return c
out_ref = constant_prop(torch.tensor(2))
self.run_pass('constant_propagation', constant_prop.graph)
out_test = constant_prop(torch.tensor(2))
self.assertEqual(out_ref, out_test)
if_node = constant_prop.graph.findNode("prim::If")
for block in if_node.blocks():
for node in block.nodes():
self.assertTrue(node.kind() == "prim::Constant")
def test_constant_prop_print(self):
@torch.jit.script
def constant_prop(input_tensor):
a = 2 * 3
print(a)
b = a + 2
return b + input_tensor
self.run_pass('constant_propagation', constant_prop.graph)
graph = constant_prop.graph
print_node = graph.findNode("prim::Print")
self.assertTrue(print_node.input().toIValue() == 6)
def test_constant_prop_rand(self):
@torch.jit.script
def constant_prop():
a = torch.randn([3])
b = a + 2
return b
self.run_pass('constant_propagation', constant_prop.graph)
self.assertTrue("aten::randn" in str(constant_prop.graph))
def test_constant_prop_none(self):
@torch.jit.script
def typed_none():
# type: () -> Optional[int]
return None
@torch.jit.script
def constant_prop():
a = typed_none()
b = typed_none()
if (a is None and b is None):
a = 2
else:
a = 1
return a
self.run_pass('constant_propagation', constant_prop.graph)
FileCheck().check("prim::Constant").run(constant_prop.graph)
def test_constant_prop_if_inline(self):
@torch.jit.script
def constant_prop():
cond = True
a = 1
if cond:
a = 1 * 2
else:
a = 1 // 0
return a
# testing that 1 // 0 error is not thrownn
self.run_pass('constant_propagation', constant_prop.graph)
def test_constant_prop_exception(self):
# checking y = a[4] does not error in constant propagation
def bad_index(x):
# type: (bool)
y = 0
if x:
a = [1, 2, 3]
y = a[4]
return y
self.checkScript(bad_index, (False,))
def test_constant_prop_aliasing_type(self):
@torch.jit.script
def foo():
return len([1]), len(torch.tensor([2]))
FileCheck().check_dag("aten::tensor").check_dag("aten::len").run(foo.graph)
@torch.jit.script
def fn():
if 1 == 1:
return 1
else:
return 2
FileCheck().check_not("prim::If").run(fn.graph)
def test_unchecked_cast(self):
def test(cond):
# type: (bool)
a = torch.tensor([10])
if cond:
b = None
else:
b = a
if b is not None:
b[0] = 5
return a.int()
self.checkScript(test, (True,))
self.checkScript(test, (False,))
def test_constant_prop_if_constant(self):
@torch.jit.script
def constant_prop(a, b):
c0 = 1
c1 = 1
c2 = 1
if bool(a): # -> c0, c1
if bool(b): # -> c0
if 1 == 1: # -> c0
c0 = c0 + 1
if 1 == 2:
c1 = c1 + 1
c2 = c2 + 1
else: # -> c0, c1
c1 = c1 + 1
if 1 == 1: # inlined
c0 = c0 + 1 # dynamic
c2 = c2 + 4 # set to 5
return a + c0 + c1 + c2
graph = constant_prop.graph
self.run_pass('constant_propagation', graph)
ifs = graph.findAllNodes("prim::If", recurse=False)
snd_if_inlined = len(ifs) == 1
self.assertTrue(snd_if_inlined)
first_if = ifs[0]
self.assertTrue(first_if.outputsSize() == 2)
second_if = first_if.findNode("prim::If", recurse=False)
self.assertTrue(second_if.outputsSize() == 1)
self.assertTrue(second_if.findNode("prim::If") is None)
def test_constant_prop_loop_constant(self):
@torch.jit.script
def constant_prop(cond, iter):
# type: (bool, int) -> int
b = 0
while True:
print("stays")
for _ in range(2):
print("stays")
for _ in range(iter):
print("stays")
while cond:
print("stays")
while False:
print("removed")
for _i in range(0):
print("removed")
for _i in range(-4):
print("removed")
return b
self.run_pass('constant_propagation', constant_prop.graph)
graph = canonical(constant_prop.graph)
self.assertTrue(graph.count("removed") == 0)
self.assertTrue(graph.count("stays") == 1) # constant gets pooled
self.assertTrue(graph.count("prim::Print") == 4)
def test_constant_prop_remove_output(self):
@torch.jit.script
def constant_prop(iter):
# type: (int) -> None
a = 1
b = 1
c = 1
for i in range(iter):
if 1 == 2:
a = 10
if i == 5:
b = 2
c = 3
print(a, b, c)
graph = constant_prop.graph
self.run_pass('constant_propagation', graph)
self.assertTrue(graph.findNode("prim::Loop").outputsSize() == 2)
# TODO(gmagogsfm): Refactor this test to reduce complexity.
def test_constant_insertion(self):
funcs_template = dedent('''
def func():
return {constant_constructor}
''')
# constants: primitives: int, double, bool, str, lists of primitives,
# and tuples
def check_constant(constant_constructor):
scope = {}
funcs_str = funcs_template.format(constant_constructor=constant_constructor)
execWrapper(funcs_str, globals(), scope)
cu = torch.jit.CompilationUnit(funcs_str)
f_script = cu.func
self.run_pass('constant_propagation', f_script.graph)
FileCheck().check_count("prim::Constant", 1, exactly=True).run(f_script.graph)
self.assertEqual(scope['func'](), f_script())
imported = self.getExportImportCopy(f_script)
self.assertEqual(imported(), f_script())
constants = ["None", "-.5", "0", "1", "True", "False", "''", "'a'", "'b'", "torch.tensor(1)",
"[True, False]", "[0., .5]", "[torch.tensor(4), torch.tensor(2)]", "[0, 1]", "['0', '1']",
"[True, None]", "[.5, None, .2]"]
for type in ["Tensor", "str", "int", "float", "bool"]:
constants.append("torch.jit.annotate(List[ " + type + "], [])")
for constant in constants:
check_constant(constant)
for key_type in ["str", "int", "float"]:
for value_type in ["Tensor", "bool", "str", "int", "float"]:
check_constant("torch.jit.annotate(Dict[ " + key_type + ", " + value_type + "], {})")
check_constant("torch.jit.annotate(Dict[ " + key_type + ", Optional[" + value_type + "]], {})")
for i in range(len(constants)):
for j in range(i + 1, len(constants)):
tup_constant = constants[i] + ", " + constants[j]
check_constant(tup_constant)
dict_constants = []
for i in range(len(constants)):
# check_constant constructs the second dict with another Tensor
# which fails the comparison
if not isinstance(eval(constants[i]), (str, int, float)):
continue
for j in range(len(constants)):
dict_constant = "{ " + constants[i] + ": " + constants[j] + "}"
check_constant(dict_constant)
dict_constants.append(dict_constant)
constants = constants + dict_constants
# testing node hashing
funcs_template = dedent('''
def func():
print({constant_constructor})
''')
single_elem_tuples = ("(" + x + ",)" for x in constants)
input_arg = ", ".join(single_elem_tuples)
scope = {}
funcs_str = funcs_template.format(constant_constructor=input_arg)
execWrapper(funcs_str, globals(), scope)
cu = torch.jit.CompilationUnit(funcs_str)
f_script = cu.func
self.run_pass('constant_propagation', f_script.graph)
# prim::None return adds one constant
self.assertEqual(len(constants) + 1, str(f_script.graph).count("prim::Constant"))
self.run_pass('cse', f_script.graph)
# node hashing correctly working, no CSE occurs
self.assertEqual(len(constants) + 1, str(f_script.graph).count("prim::Constant"))
funcs_template = dedent('''
def func():
a = {constant_constructor}
print(a)
b = {constant_constructor}
print(b)
''')
# generate dicts with built-in types (excluding torch.Tensor)
xprod = itertools.product(constants, constants)
# test that equal tuples and dicts correctly work with node hashing
for tup in ("(" + x + ",)" for x in constants):
funcs_str = funcs_template.format(constant_constructor=tup)
scope = {}
execWrapper(funcs_str, globals(), scope)
cu = torch.jit.CompilationUnit(funcs_str)
f_script = cu.func
self.run_pass('constant_propagation_immutable_types', f_script.graph)
num_constants = str(f_script.graph).count("prim::Constant")
self.run_pass('cse', f_script.graph)
FileCheck().check_count("prim::Constant", num_constants, exactly=True).run(f_script.graph)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
def test_cuda_export_restore(self):
class Sub(torch.jit.ScriptModule):
def __init__(self):
super(Sub, self).__init__()
self.weight = nn.Parameter(torch.randn(3, 4))
@torch.jit.script_method
def forward(self, thing):
return self.weight + thing
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.mod = Sub()
@torch.jit.script_method
def forward(self, v):
return self.mod(v)
m = M()
m.cuda()
m2 = self.getExportImportCopy(m)
m2.cuda()
input = torch.rand(3, 4).cuda()
self.assertEqual(m(input), m2(input))
@slowTest
def test_export_batchnorm(self):
for mode in ['eval', 'train']:
for clazz in [
torch.nn.BatchNorm1d(100),
torch.nn.BatchNorm1d(100, affine=False),
torch.nn.BatchNorm2d(100),
torch.nn.BatchNorm2d(100, affine=False)]:
getattr(clazz, mode)()
input = torch.randn(20, 100) if isinstance(clazz, torch.nn.BatchNorm1d) else \
torch.randn(20, 100, 35, 45)
traced = torch.jit.trace(clazz, (input,))
imported = self.getExportImportCopy(traced)
x = torch.randn(20, 100) if isinstance(clazz, torch.nn.BatchNorm1d) else \
torch.randn(20, 100, 35, 45)
self.assertEqual(traced(x), imported(x))
def test_export_rnn(self):
for clazz in [nn.RNN(10, 20, 2), nn.GRU(10, 20, 2)]:
class RNNTest(torch.nn.Module):
def __init__(self):
super(RNNTest, self).__init__()
self.rnn = clazz
def forward(self, x, lengths, h0):
packed = torch.nn.utils.rnn.pack_padded_sequence(x, lengths)
out, h = self.rnn(packed, h0)
padded_outs, _ = torch.nn.utils.rnn.pad_packed_sequence(out)
return padded_outs
test = RNNTest()
traced = torch.jit.trace(test, (torch.randn(5, 3, 10), torch.LongTensor([3, 2, 1]), torch.randn(2, 3, 20)))
imported = self.getExportImportCopy(traced)
# NB: We make sure to pass in a batch with a different max sequence
# length to ensure that the argument stashing for pad_packed works
# properly.
x, lengths, h0 = torch.randn(7, 4, 10), torch.LongTensor([7, 3, 2, 1]), torch.randn(2, 4, 20)
self.assertEqual(traced(x, lengths, h0), imported(x, lengths, h0))
def test_export_lstm(self):
class LSTMTest(torch.nn.Module):
def __init__(self):
super(LSTMTest, self).__init__()
self.rnn = nn.LSTM(10, 20, 2)
def forward(self, x, lengths, hiddens):
h0, c0 = hiddens
packed = torch.nn.utils.rnn.pack_padded_sequence(x, lengths)
out, (h, c) = self.rnn(packed, (h0, c0))
padded_outs, _ = torch.nn.utils.rnn.pad_packed_sequence(out)
return padded_outs
test = LSTMTest()
traced = torch.jit.trace(test, (torch.randn(5, 3, 10),
torch.LongTensor([3, 2, 1]),
(torch.randn(2, 3, 20), torch.randn(2, 3, 20))))
imported = self.getExportImportCopy(traced)
x, lengths, h0, c0 = \
torch.randn(7, 3, 10), torch.LongTensor([7, 5, 2]), torch.randn(2, 3, 20), torch.randn(2, 3, 20)
self.assertEqual(traced(x, lengths, (h0, c0)), imported(x, lengths, (h0, c0)))
def test_unique_state_dict(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
shared_param = torch.nn.Parameter(torch.ones(1))
self.register_parameter('w1', shared_param)
self.register_parameter('w2', shared_param)
def forward(self, input):
return input + self.w1 + self.w2
model = MyModule()
unittest.TestCase.assertEqual(
self, len(torch.jit._unique_state_dict(model, keep_vars=False)), 1)
unittest.TestCase.assertEqual(
self, len(torch.jit._unique_state_dict(model, keep_vars=True)), 1)
def test_export_dropout(self):
test = torch.nn.Dropout()
test.eval()
traced = torch.jit.trace(test, (torch.rand(3, 4),), check_trace=False)
imported = self.getExportImportCopy(traced)
x = torch.randn(3, 4)
self.assertEqual(traced(x), imported(x))
def test_pretty_printer(self):
@torch.jit.script
def if_test(a, b):
# FIXME: use 0 instead of a.
# c = 0
c = a
if bool(a < b):
c = b
else:
c = a
return c
@torch.jit.script
def if_one(a, b):
c = b
if bool(a < b):
c = a
return c
@torch.jit.script
def while_test(a, i):
while bool(i < 3):
a *= a
i += 1
return a
@torch.jit.script
def while_if_test(a, b):
c = 0
while bool(a < 10):
a = a + 1
b = b + 1
if bool(a > b):
c = 2
else:
c = 3
return a + 1 + c
@torch.jit.script
def loop_use_test(y):
x = y + 1
z = x + 5
while bool(y < 8):
y += 1
z = x
return x, z
@torch.jit.ignore
def python_fn(x):
return x + 10
@torch.jit.script
def python_op_name_test(y):
return python_fn(y)
@torch.jit.script
def empty_int_list_test(y):
x = torch.jit.annotate(List[int], [])
return x[0]
@torch.jit.script
def empty_float_list_test(y):
return [1.0, 2.0, 3.0]
@torch.jit.script
def print_weird_test(y):
print("hi\016")
self.assertExpected(if_test.code, "if_test")
self.assertExpected(if_one.code, "if_one")
self.assertExpected(while_test.code, "while_test")
self.assertExpected(while_if_test.code, "while_if_test")
self.assertExpected(loop_use_test.code, "loop_use_test")
self.assertExpected(python_op_name_test.code, "python_op_name_test")
self.assertExpected(empty_int_list_test.code, "empty_int_list_test")
self.assertExpected(empty_float_list_test.code, "empty_float_list_test")
self.assertExpected(print_weird_test.code, "print_weird_test")
def test_cu_escaped_number(self):
cu = torch.jit.CompilationUnit('''
def foo(a):
print("hi\016")
''')
self.assertExpected(cu.foo.code)
def test_import_method(self):
with torch._jit_internal._disable_emit_hooks():
class Foo(torch.jit.ScriptModule):
def __init__(self):
super(Foo, self).__init__()
@torch.jit.script_method
def forward(self, x, y):
return 2 * x + y
foo = Foo()
buffer = io.BytesIO()
torch.jit.save(foo, buffer)
buffer.seek(0)
foo_loaded = torch.jit.load(buffer)
self.assertExpected(foo_loaded.forward.code)
@unittest.skip("temporarily disable the test for fwd compatibility")
def test_non_ascii_string(self):
class Foo(torch.jit.ScriptModule):
def __init__(self):
super(Foo, self).__init__()
self.a = "Over \u0e55\u0e57 57"
@torch.jit.script_method
def forward(self, x, y):
return self.a + "hi\xA1"
foo = Foo()
buffer = io.BytesIO()
torch.jit.save(foo, buffer)
buffer.seek(0)
foo_loaded = torch.jit.load(buffer)
self.assertExpected(foo_loaded.forward.code)
def test_function_default_values(self):
outer_var = torch.tensor(20)
outer_var2 = torch.tensor(30)
a = torch.tensor(0.5)
b = torch.tensor(10)
@torch.jit.script
def simple_fn(x, a=a, b=b, c=outer_var + outer_var2):
return x + a + b + c
self.assertEqual(
simple_fn(torch.ones(1)),
torch.ones(1) + 0.5 + 10 + (20 + 30))
self.assertEqual(
simple_fn(torch.ones(1), torch.tensor(1), torch.tensor(3), torch.tensor(4)),
torch.ones(1) + 1 + 3 + 4)
outer_c = torch.tensor(9)
outer_flag = torch.tensor(False)
@torch.jit.script
def bool_fn(x, a=outer_c, flag=outer_flag):
if bool(flag):
result = x
else:
result = x + a
return result
self.assertEqual(bool_fn(torch.ones(1)), torch.ones(1) + 9)
self.assertEqual(
bool_fn(torch.ones(1), torch.tensor(1), torch.tensor(True)),
torch.ones(1))
@torch.jit.script
def none_fn(x=None):
# type: (Optional[int]) -> Optional[int]
return x
self.assertEqual(none_fn(), None)
self.assertEqual(none_fn(1), 1)
@torch.jit.script
def hints(x, a=0.5, b=10):
# type: (Tensor, float, int) -> Tensor
return x + a + b
self.assertEqual(hints(torch.ones(1)), torch.ones(1) + 0.5 + 10)
with self.assertRaisesRegex(RuntimeError, "Expected a default value"):
@torch.jit.script
def hints_bad_types(x, a=10, b=0.5): # noqa: T484
# type: (Tensor, float, int) -> Tensor
return x + a + b
with self.assertRaisesRegex(RuntimeError, "Expected a default value"):
@torch.jit.script
def bad_no_optional(x=None):
# type: (Dict[str, int]) -> Dict[str, int]
return x
def test_module_default_values(self):
four = torch.tensor(4)
class Test(torch.jit.ScriptModule):
def __init__(self):
super(Test, self).__init__()
@torch.jit.script_method
def forward(self, input, other=four):
return input + other
t = Test()
self.assertEqual(t(torch.ones(1)), torch.ones(1) + 4)
def test_mutable_default_values(self):
with self.assertRaisesRegex(Exception, "Mutable default parameters"):
@torch.jit.script
def foo(x=(1, [])):
# type: (Tuple[int, List[Tensor]])
return x
class Test(torch.nn.Module):
def forward(self, input=[]): # noqa: B006
return input
with self.assertRaisesRegex(Exception, "Mutable default parameters"):
torch.jit.script(Test())
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_warnings(self):
import warnings
def fn(x):
if bool(x < 2):
warnings.warn("x is less than 2")
return x
class M(torch.nn.Module):
def forward(self, x):
if bool(x < 2):
warnings.warn("x is less than 2")
return x
scripted_mod = torch.jit.script(M())
scripted_fn = torch.jit.script(fn)
with warnings.catch_warnings(record=True) as warns:
fn(torch.ones(1))
with warnings.catch_warnings(record=True) as script_warns:
scripted_fn(torch.ones(1))
with warnings.catch_warnings(record=True) as script_mod_warns:
scripted_mod(torch.ones(1))
self.assertEqual(str(warns[0]), str(script_warns[0]))
self.assertEqual(len(script_mod_warns), 1)
self.assertEqual(str(warns[0].message), str(script_mod_warns[0].message))
def test_no_erroneous_warnings(self):
import warnings
def fn(x):
if bool(x > 0):
warnings.warn('This should NOT be printed')
x += 1
return x
with warnings.catch_warnings(record=True) as warns:
fn_script = torch.jit.script(fn)
fn_script(torch.tensor(0))
warns = [str(w.message) for w in warns]
self.assertEqual(len(warns), 0)
@unittest.skipIf(True, "TODO: re-enable with https://github.com/pytorch/pytorch/pull/29339")
def test_torch_load_error(self):
class J(torch.jit.ScriptModule):
def __init__(self):
super(J, self).__init__()
@torch.jit.script_method
def forward(self, input):
return input + 100
j = J()
with TemporaryFileName() as fname:
j.save(fname)
with self.assertRaisesRegex(RuntimeError, "is a zip"):
torch.load(fname)
def test_torch_load_zipfile_check(self):
@torch.jit.script
def fn(x):
return x + 10
with TemporaryFileName() as fname:
fn.save(fname)
with io.open(fname, 'rb') as f:
self.assertTrue(torch.serialization._is_zipfile(f))
def test_python_bindings(self):
lstm_cell = torch.jit.script(LSTMCellS)
def lstm(x, hx, cx, w_ih, w_hh, b_ih, b_hh):
for i in range(x.size(0)):
hx, cx = lstm_cell(x[i], hx, cx, w_ih, w_hh, b_ih, b_hh)
return hx
slstm = torch.jit.script(lstm)
inputs = get_lstm_inputs('cpu', training=True, seq_length=10)
slstm(*inputs).sum().backward()
global fw_graph
fw_graph = slstm.graph_for(*inputs)
nodes = list(fw_graph.nodes())
tested_blocks = False
for node in nodes:
for output in node.outputs():
self.assertTrue(hasattr(output, 'type'))
self.assertTrue(output.type() is not None)
for input in node.inputs():
self.assertTrue(hasattr(input, 'type'))
self.assertTrue(input.type() is not None)
for block in node.blocks():
tested_blocks = True
self.assertTrue(hasattr(block, 'inputs'))
self.assertTrue(hasattr(block, 'outputs'))
for output in block.outputs():
self.assertTrue(hasattr(output, 'type'))
self.assertTrue(output.type() is not None)
for input in block.inputs():
self.assertTrue(hasattr(input, 'type'))
self.assertTrue(input.type() is not None)
self.assertTrue(hasattr(block, 'returnNode'))
self.assertTrue(type(block.returnNode()) == torch._C.Node)
self.assertTrue(hasattr(block, 'paramNode'))
self.assertTrue(type(block.paramNode()) == torch._C.Node)
self.assertTrue(tested_blocks)
def test_export_opnames(self):
class Foo(torch.jit.ScriptModule):
def __init__(self):
super(Foo, self).__init__()
def one(self, x, y):
# type: (Tensor, Tensor) -> Tensor
return x + y
def two(self, x):
# type: (Tensor) -> Tensor
return 2 * x
@torch.jit.script_method
def forward(self, x):
# type: (Tensor) -> Tensor
return self.one(self.two(x), x)
class Bar(torch.jit.ScriptModule):
def __init__(self):
super(Bar, self).__init__()
self.sub = Foo()
@torch.jit.script_method
def forward(self, x):
# type: (Tensor) -> Tensor
return self.sub.forward(x)
bar = Bar()
ops = torch.jit.export_opnames(bar)
expected = ['aten::add.Tensor', 'aten::mul.Scalar']
self.assertTrue(set(expected).issubset(set(ops)))
def test_pytorch_jit_env_off(self):
import subprocess
env = os.environ.copy()
env['PYTORCH_JIT'] = '0'
try:
subprocess.check_output([sys.executable, '-c', 'import torch'], env=env)
except subprocess.CalledProcessError as e:
raise RuntimeError("Could not 'import torch' with PYTORCH_JIT=0") from e
def test_print_op_module(self):
# Issue #19351: python2 and python3 go through different paths.
# python2 returns '<module 'torch.ops' (built-in)>'
# python3 uses __file__ and return
# '<module 'torch.ops' from '/scratch/ailzhang/pytorch/torch/_ops.py'>'
s = str(torch.ops)
self.assertRegex(s, r'ops')
def test_print_classes_module(self):
s = str(torch.classes)
self.assertRegex(s, r'classes')
def test_print_torch_ops_modules(self):
s = str(torch._ops.ops.quantized)
self.assertRegex(s, r'torch.ops')
s = str(torch._ops.ops.atan)
self.assertRegex(s, r'torch.ops')
@unittest.skipIf(IS_WINDOWS, 'TODO: fix occasional windows failure')
def test_profiler(self):
prev_opt = torch._C._get_graph_executor_optimize()
torch._C._set_graph_executor_optimize(False)
def other_fn(x):
return x * 2
x = torch.rand(3, 4)
traced_other_fn = torch.jit.trace(other_fn, x)
def fn(x):
y = traced_other_fn(x)
fut = torch.jit._fork(traced_other_fn, x)
y = torch.jit._wait(fut)
return y
traced_fn = torch.jit.trace(fn, x)
with torch.autograd.profiler.profile() as prof:
traced_fn(x)
# expecting to see other_fn TS function call
# with cpu time >= mul cpu time and
# a forked other_fn
mul_events = defaultdict(int)
other_fn_events = defaultdict(int)
for e in prof.function_events:
if e.name == "aten::mul":
self.assertTrue(e.thread not in mul_events)
mul_events[e.thread] = e.time_range.elapsed_us()
elif e.name == "other_fn":
self.assertTrue(e.thread not in other_fn_events)
other_fn_events[e.thread] = e.time_range.elapsed_us()
self.assertTrue(len(mul_events) == 2)
self.assertTrue(len(other_fn_events) == 2)
for thread, mul_time in mul_events.items():
self.assertTrue(thread in other_fn_events)
self.assertTrue(other_fn_events[thread] >= mul_time)
torch._C._set_graph_executor_optimize(prev_opt)
def test_hide_source_ranges_context_manager(self):
@torch.jit.script
def foo(x):
return torch.add(x, x)
graph = foo.graph
source_range_regex = "# .*\\.py"
self.assertRegex(graph.__repr__(), source_range_regex)
with torch.jit._hide_source_ranges():
self.assertNotRegex(graph.__repr__(), source_range_regex)
self.assertRegex(graph.str(print_source_ranges=True), source_range_regex)
self.assertRegex(graph.__repr__(), source_range_regex)
class TestFrontend(JitTestCase):
def test_instancing_error(self):
@torch.jit.ignore
class MyScriptClass(object):
def unscriptable(self):
return "a" + 200
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
def forward(self, x):
return MyScriptClass()
with self.assertRaises(torch.jit.frontend.FrontendError) as cm:
torch.jit.script(TestModule())
checker = FileCheck()
checker.check("Cannot instantiate class")
checker.check("def forward")
checker.run(str(cm.exception))
class TestScript(JitTestCase):
# Tests that calling torch.jit.script repeated on function is allowed.
def test_repeated_script_on_function(self):
@torch.jit.script
@torch.jit.script
def fn(x):
return x
torch.jit.script(torch.jit.script(fn))
def test_pretty_print_function(self):
@torch.jit.script
def foo(x):
return torch.nn.functional.interpolate(x)
FileCheck().check("interpolate").run(foo.code)
def test_inlined_graph(self):
"""
Check that the `inlined_graph` property correctly returns an inlined
graph, both through function calls and method calls.
"""
@torch.jit.script
def foo(x):
return torch.add(x, x)
class MyNestedMod(torch.nn.Module):
def __init__(self):
super(MyNestedMod, self).__init__()
def forward(self, x):
return torch.sub(x, x)
class MyMod(torch.nn.Module):
def __init__(self):
super(MyMod, self).__init__()
self.nested = MyNestedMod()
def forward(self, x):
x = self.nested(x) # sub
x = foo(x) # add
return torch.mul(x, x)
m = torch.jit.script(MyMod())
FileCheck().check("aten::sub") \
.check("aten::add") \
.check("aten::mul") \
.run(m.inlined_graph)
def test_static_method_on_module(self):
"""
Check that the `@staticmethod` annotation on a function on a module works.
"""
class MyCell(torch.nn.Module):
def __init__(self):
super(MyCell, self).__init__()
@staticmethod
def do_it(x, h):
new_h = torch.tanh(x + h)
return new_h, new_h
def forward(self, x, h):
return self.do_it(x, h)
my_cell = torch.jit.script(MyCell())
x = torch.rand(3, 4)
h = torch.rand(3, 4)
jitted_cell = my_cell(x, h)
non_jitted_cell = MyCell().do_it(x, h)
self.assertEqual(jitted_cell, non_jitted_cell)
def test_code_with_constants(self):
"""
Check that the `code_with_constants` property correctly returns graph CONSTANTS in the
CONSTANTS.cN format used in the output of the `code` property.
"""
@torch.jit.script
def foo(x=torch.ones(1)):
return x
class Moddy(torch.nn.Module):
def __init__(self):
super(Moddy, self).__init__()
def forward(self, x):
return foo()
m = torch.jit.script(Moddy())
src, CONSTANTS = m.code_with_constants
self.assertEqual(CONSTANTS.c0, torch.ones(1))
self.assertEqual(src, m.code)
def test_code_with_constants_restore(self):
"""
Check that the `code_with_constants` property correctly works on restoration after save() + load()
"""
@torch.jit.script
def foo(x=torch.ones(1)):
return x
class Moddy(torch.nn.Module):
def __init__(self):
super(Moddy, self).__init__()
def forward(self, x):
return foo()
m = torch.jit.script(Moddy())
src, CONSTANTS = m.code_with_constants
eic = self.getExportImportCopy(m)
src_eic, CONSTANTS_eic = eic.code_with_constants
self.assertEqual(src, src_eic)
self.assertEqual(CONSTANTS.c0, CONSTANTS_eic.c0)
def test_oneline_func(self):
def fn(x): return x # noqa: E704
self.checkScript(fn, (torch.ones(2, 2), ))
def test_request_bailout(self):
with enable_profiling_mode_for_profiling_tests():
def fct_loop(x):
for i in range(3):
x = torch.cat((x, x), 0)
return x
x = torch.ones(2, 3, 4, dtype=torch.float32)
expected = fct_loop(x)
jitted = torch.jit.script(fct_loop)
# profile
jitted(x)
# optimize
jitted(x)
dstate = jitted.get_debug_state()
eplan = get_execution_plan(dstate)
num_bailouts = eplan.code.num_bailouts()
for i in range(0, num_bailouts):
eplan.code.request_bailout(i)
self.assertEqual(jitted(x), expected)
@unittest.skip("bailouts are being deprecated")
def test_dominated_bailout(self):
with enable_profiling_mode_for_profiling_tests():
# functional dominated guard
@torch.jit.script
def foo(x):
dim = x.dim()
if dim == 0:
y = int(x)
else:
y = x.size()[dim - 1]
return y
x = torch.zeros(2)
self.assertEqual(foo(x), 2)
self.assertEqual(foo(x), 2)
g = torch.jit.last_executed_optimized_graph()
g_s = str(g)
g_s = g_s[0:g_s.find("return")]
FileCheck().check_count("prim::BailOut[", 1, exactly=True).run(g_s)
# dominated guard of non-functional value
@torch.jit.script
def foo(x):
dim = x.dim()
x.add_(3)
if dim == 0:
return 0
else:
return x.size()[dim - 1]
x = torch.zeros(2)
self.assertEqual(foo(x), 2)
self.assertEqual(foo(x), 2)
g = torch.jit.last_executed_optimized_graph()
FileCheck().check("prim::BailOut[").check("aten::add_").check_next("prim::BailOut[").check("return").run(g)
with torch.enable_grad():
@torch.jit.ignore
def disable_grad():
torch.set_grad_enabled(False)
@torch.jit.ignore
def enable_grad():
torch.set_grad_enabled(True)
@torch.jit.script
def foo(x):
x = x + 1
dim = x.dim()
disable_grad()
if dim == 0:
y = int(x)
else:
y = x.size()[dim - 1]
enable_grad()
return y
x = torch.zeros(2, requires_grad=True)
self.assertEqual(foo(x), 2)
self.assertEqual(foo(x), 2)
g = torch.jit.last_executed_optimized_graph()
# there should still be a Bailout after disable_grad call
FileCheck().check("disable_grad").check("BailOut[").check("BailoutTemplate").run(g)
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "skip if profiling isn't enabled")
def test_profiling_merge(self):
@torch.jit.script
def test_not_const(x):
if x.size(0) == 1:
return 1
else:
return 2
with enable_profiling_mode_for_profiling_tests():
with num_profiled_runs(2):
test_not_const(torch.rand([1, 2]))
test_not_const(torch.rand([2, 2]))
graph_str = torch.jit.last_executed_optimized_graph()
FileCheck().check("profiled_type=Double(*, 2, strides=[2, 1], requires_grad=0, device=cpu").run(graph_str)
FileCheck().check_not("profiled_type=Double(1, 2, strides=[2, 1], requires_grad=0, device=cpu").run(graph_str)
def test_nested_bailouts(self):
@torch.jit.script
def fct_loop(x):
for i in range(3):
x = torch.cat((x, x), 0)
return x
x = torch.ones(2, 3, 4, dtype=torch.float32)
out = fct_loop(x)
jit_trace = torch.jit.trace(fct_loop, x)
out_trace = jit_trace(x)
def test_no_self_arg_ignore_function(self):
class MyModule(nn.Module):
@torch.jit.ignore # noqa: B902
def call_np(): # noqa: B902
# type: () -> int
return np.random.choice(2, p=[.95, .05])
def forward(self):
return self.call_np()
with self.assertRaisesRegex(Exception, "does not have a self argument"):
torch.jit.script(MyModule())
def test_loop_liveness(self):
with enable_profiling_mode_for_profiling_tests():
@torch.jit.script
def f(i):
# type: (int) -> Tensor
l = []
for n in [2, 1]:
l.append(torch.zeros(n, i))
return l[0]
f(2)
f(1)
def test_bailout_loop_carried_deps_name_clash(self):
with enable_profiling_mode_for_profiling_tests():
NUM_ITERATIONS = 10
@torch.jit.script
def fct_loop(z, size):
# type: (int, int) -> Tuple[Tensor, List[int]]
counters = torch.jit.annotate(List[int], [])
j = 0
y = torch.ones(2)
for i in range(size):
counters.append(i + j)
y = torch.cat((y, torch.ones(z)), 0)
j = j + 1
return y, counters
inputs = [1, 2, 3, 4]
expected = [x * 2 for x in range(NUM_ITERATIONS)]
for inp in inputs:
results = fct_loop(inp, NUM_ITERATIONS)
self.assertEqual(results[1], expected)
def test_bailout_loop_counter_transition(self):
with enable_profiling_mode_for_profiling_tests():
NUM_ITERATIONS = 10
@torch.jit.script
def fct_loop(z, size):
# type: (int, int) -> Tuple[Tensor, List[int]]
counters = torch.jit.annotate(List[int], [])
y = torch.ones(2)
for i in range(size):
counters.append(i)
y = torch.cat((y, torch.ones(z)), 0)
return y, counters
inputs = [1, 2, 3, 4]
expected = list(range(NUM_ITERATIONS))
for inp in inputs:
results = fct_loop(inp, NUM_ITERATIONS)
self.assertEqual(results[1], expected)
def test_ignored_method_binding(self):
class Bar(torch.nn.Module):
def __init__(self):
super(Bar, self).__init__()
self.x : int = 0
@torch.jit.export
def setx(self, x : int):
self.x = x
@torch.jit.export
def getx(self):
return self.x
@torch.jit.ignore
def ignored_getx(self):
return self.x
b = Bar()
b.setx(123)
sb = torch.jit.script(b)
self.assertEqual(sb.getx(), 123)
self.assertEqual(sb.ignored_getx(), 123)
sb.setx(456)
self.assertEqual(sb.getx(), 456)
self.assertEqual(sb.ignored_getx(), 456)
def test_set_attribute_through_optional(self):
class A(torch.nn.Module):
__annotations__ = {"x": Optional[torch.Tensor]}
def __init__(self):
super(A, self).__init__()
self.x = None
@torch.jit.ignore
def foo(self):
if self.x is None:
self.x = torch.tensor([3])
return self.x
def forward(self, x):
a = self.foo()
return x + 1
m = torch.jit.script(A())
self.assertEqual(m.x, None)
m(torch.rand(1))
self.assertEqual(m.x, torch.tensor([3]))
def test_mutate_constant(self):
class M(torch.jit.ScriptModule):
__constants__ = ["foo"]
def __init__(self, foo):
super(M, self).__init__()
self.foo = foo
m = M(5)
# m has a constant attribute, but we can't
# assign to it
with self.assertRaises(RuntimeError):
m.foo = 6
def test_class_attribute(self):
class M(torch.jit.ScriptModule):
FOO = 0
def __init__(self):
super(M, self).__init__()
self.foo = self.FOO
m = M()
self.assertEqual(m.foo, M.FOO)
def test_class_attribute_in_script(self):
class M(torch.jit.ScriptModule):
FOO = 0
def __init__(self):
super(M, self).__init__()
@torch.jit.script_method
def forward(self):
return self.FOO
with self.assertRaises(RuntimeError):
M()
def test_not_initialized_err(self):
class M(torch.jit.ScriptModule):
def __init__(self):
self.foo = torch.rand(2, 3)
with self.assertRaises(RuntimeError):
M()
def test_attribute_in_init(self):
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.foo = torch.jit.Attribute(0.1, float)
# we should be able to use self.foo as a float here
assert 0.0 < self.foo
M()
def test_scriptable_fn_as_attr(self):
class M(torch.nn.Module):
def __init__(self, fn):
super(M, self).__init__()
self.fn = fn
def forward(self, x):
return self.fn(x)
m = M(torch.sigmoid)
inp = torch.rand(2, 3)
self.checkModule(m, (inp, ))
def test_sequence_parsing(self):
tests = [
("return [x, x,]", True),
("return [x x]", "expected ]"),
("return x, x,", True),
("return bar(x, x,)", True),
("return bar()", "Argument x not provided"),
("for a, b, in x, x,:\n pass", "List of iterables"),
("a, b, = x, x,\n return a + b", True)
]
for exp, result in tests:
cu = torch.jit.CompilationUnit()
full = """
def bar(x, y):
return x + y
def foo(x):
{}
""".format(exp)
if isinstance(result, str):
with self.assertRaisesRegex(RuntimeError, result):
cu.define(full)
else:
cu.define(full)
def test_namedtuple_python(self):
global MyTuple, MyMod # see [local resolution in python]
MyTuple = namedtuple('MyTuple', ['a'])
@torch.jit.unused
def fn():
# type: () -> MyTuple
return MyTuple(1)
# Only check compilation
@torch.jit.script
def fn2():
# type: () -> MyTuple
return fn()
FileCheck().check("NamedTuple").run(fn2.graph)
class MyMod(torch.nn.Module):
def __init__(self):
super(MyMod, self).__init__()
@torch.jit.unused
def fn(self):
# type: () -> MyTuple
return MyTuple(1)
def forward(self, x):
if 1 == 1:
return MyTuple(torch.rand(2, 3))
else:
return self.fn()
# shouldn't throw a type error
torch.jit.script(MyMod())
def test_unused_decorator(self):
class MyMod(torch.nn.Module):
def __init__(self):
super(MyMod, self).__init__()
@torch.jit.unused
@torch.no_grad()
def fn(self, x):
# type: (Tensor) -> int
return next(x) # invalid, but should be ignored
def forward(self, x):
return self.fn(x)
torch.jit.script(MyMod())
@_inline_everything
def test_lazy_script(self):
def untraceable(x):
if x.ndim > 2:
print("hello")
else:
print("goodbye")
return x + 2
# Non-working example
def fn(x):
return untraceable(x)
with self.capture_stdout():
traced_bad = torch.jit.trace(fn, [torch.ones(2, 2)])
FileCheck().check_not("goodbye").check_not("hello").run(traced_bad.graph)
# Working example
untraceable = torch.jit.script_if_tracing(untraceable)
def fn2(x):
return untraceable(x)
with self.capture_stdout():
traced = torch.jit.trace(fn, [torch.ones(2, 2)])
FileCheck().check("goodbye").run(traced.graph)
def foo(x: int):
return x + 1
@torch.jit.script_if_tracing
def fee(x: int = 2):
return foo(1) + x
# test directly compiling function
fee_compiled = torch.jit.script(fee)
self.assertEqual(fee_compiled(), fee())
# test compiling it within another function
@torch.jit.script
def hum():
return fee(x=3)
self.assertEqual(hum(), 5)
def test_big_int_literals(self):
def ok():
# signed 64 bit max
a = 9223372036854775807
return a
def toobig():
a = 9223372036854775808
return a
def waytoobig():
a = 99999999999999999999
return a
self.checkScript(ok, [])
with self.assertRaisesRegex(RuntimeError, "out of range"):
torch.jit.script(toobig)
with self.assertRaisesRegex(RuntimeError, "out of range"):
torch.jit.script(waytoobig)
def test_hex_literals(self):
def test1():
return 0xaaaaaa
def test2():
return 0xaaaaaa
def test3():
return -0xaaaaaa
self.checkScript(test1, [])
self.checkScript(test2, [])
self.checkScript(test3, [])
def ok():
a = 0x7FFFFFFFFFFFFFFF
return a
def toobig():
a = 0xFFFFFFFFFFFFFFFF
return a
def waytoobig():
a = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
return a
self.checkScript(ok, [])
with self.assertRaisesRegex(RuntimeError, "out of range"):
torch.jit.script(toobig)
with self.assertRaisesRegex(RuntimeError, "out of range"):
torch.jit.script(waytoobig)
def test_big_float_literals(self):
def ok():
# Python interprets this as inf
a = 1.2E400
return a
def check(fn):
self.assertTrue(fn() == ok())
# checkScript doesn't work since assertEqual doesn't consider
# `inf` == `inf`
check(torch.jit.script(ok))
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(ok)))
check(cu.ok)
def _test_device_type(self, dest):
def fn(x):
# type: (Device) -> Tuple[str, Optional[int]]
return x.type, x.index
device = torch.ones(2).to(dest).device
self.checkScript(fn, [device])
def test_device_type(self):
self._test_device_type('cpu')
@unittest.skipIf(not RUN_CUDA, "Requires CUDA")
def test_device_type_cuda(self):
self._test_device_type('cuda')
def test_string_device_implicit_conversion(self):
@torch.jit.script
def fn(x: torch.device):
return x
self.assertEqual(fn("cpu"), torch.device("cpu"))
with self.assertRaisesRegex(RuntimeError, "Expected one of"):
fn("invalid_device")
def test_eval_python(self):
def _test(m):
self.assertTrue(m(torch.ones(2, 2)))
self.assertTrue(m.training)
self.assertTrue(m._c.getattr('training'))
m.eval()
self.assertFalse(m.training)
self.assertFalse(m._c.getattr('training'))
self.assertFalse(m(torch.ones(2, 2)))
buffer = io.BytesIO()
torch.jit.save(m, buffer)
buffer.seek(0)
loaded = torch.jit.load(buffer)
self.assertFalse(loaded.training)
self.assertFalse(loaded._c.getattr('training'))
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x):
return self.training
class OldM(torch.jit.ScriptModule):
def __init__(self):
super(OldM, self).__init__()
@torch.jit.script_method
def forward(self, x):
return self.training
_test(torch.jit.script(M()))
_test(OldM())
def test_inherit_method(self):
class A(torch.jit.ScriptModule):
def __init__(self):
super(A, self).__init__()
@torch.jit.script_method
def forward(self, x):
return x + self.bar(x)
class B(A):
def __init__(self):
super(B, self).__init__()
@torch.jit.script_method
def bar(self, x):
return x * x
with self.assertRaisesRegex(RuntimeError, 'attribute'):
A() # cannot use because bar is not defined
v = torch.rand(3, 4)
b = B()
self.assertEqual(b(v), v + v * v)
class C(torch.jit.ScriptModule):
def __init__(self):
super(C, self).__init__()
@torch.jit.script_method
def bar(self, x):
return x
class D(C, B):
def __init__(self):
super(D, self).__init__()
self.assertEqual(D()(v), v + v)
def test_tensor_subclasses(self):
def check_subclass(x, tensor):
template = dedent("""
def func(input: {}) -> {}:
return torch.zeros((input.shape[0], 1), dtype=input.dtype)
""")
self._check_code(template.format(x, x), "func", [tensor])
check_subclass("torch.LongTensor", torch.LongTensor([[1, 2], [3, 4]]))
check_subclass("torch.DoubleTensor", torch.DoubleTensor([[1.2, 2.3], [3.4, 4.5]]))
check_subclass("torch.IntTensor", torch.IntTensor([[1, 2], [3, 4]]))
check_subclass("torch.BoolTensor", torch.BoolTensor([[False, True], [True, False]]))
def check_subclass_warn(input: torch.LongTensor) -> torch.LongTensor:
return torch.zeros((input.shape[0], 1), dtype=input.dtype)
with warnings.catch_warnings(record=True) as warns:
scripted = torch.jit.script(check_subclass_warn)
FileCheck().check("TorchScript will treat type annotations of Tensor").run(str(warns[0]))
def test_first_class_module(self):
class Foo(torch.jit.ScriptModule):
def __init__(self):
super(Foo, self).__init__()
self.foo = nn.Parameter(torch.rand(3, 4))
@torch.jit.script_method
def forward(self, input):
self.foo = input
return self.foo
foo = Foo()
input = torch.rand(3, 4)
foo.forward(input)
self.assertEqual(input, foo.foo)
@_tmp_donotuse_dont_inline_everything
def test_first_class_calls(self):
@torch.jit.script
class Foo(object):
def __init__(self, x):
self.bar = x
def stuff(self, x):
return self.bar + x
@torch.jit.script
def foo(x):
return x * x + Foo(x).stuff(2 * x)
@torch.jit.script
def bar(x):
return foo(x) * foo(x)
x = torch.rand(3, 4)
self.assertEqual(bar(x), (x * x + 3 * x) * (x * x + 3 * x))
def test_static_methods(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
@staticmethod
def my_method(x):
return x + 100
def forward(self, x):
return x + M.my_method(x)
class N(nn.Module):
def __init__(self):
super(N, self).__init__()
@staticmethod
def my_method(x):
return x * 100
def forward(self, x):
return x - M.my_method(x) + N.my_method(x)
self.checkModule(M(), (torch.ones(2, 2),))
self.checkModule(N(), (torch.ones(2, 2),))
def test_invalid_prefix_annotation(self):
with self.assertRaisesRegex(RuntimeError, "annotation prefix in line"):
with self.capture_stdout() as captured:
@torch.jit.script
def invalid_prefix_annotation1(a):
#type: (Int) -> Int # noqa: E265
return a + 2
with self.assertRaisesRegex(RuntimeError, "annotation prefix in line"):
with self.capture_stdout() as captured:
@torch.jit.script
def invalid_prefix_annotation2(a):
#type : (Int) -> Int # noqa: E265
return a + 2
with self.assertRaisesRegex(RuntimeError, "annotation prefix in line"):
with self.capture_stdout() as captured:
@torch.jit.script
def invalid_prefix_annotation3(a):
# type: (Int) -> Int
return a + 2
def test_builtin_function_attributes(self):
class Add(nn.Module):
def __init__(self):
super(Add, self).__init__()
self.add = torch.add
def forward(self, input):
return self.add(input, input)
self.checkModule(Add(), [torch.randn(2, 2)])
def test_pybind_type_comparisons(self):
@torch.jit.script
def f():
return None
node = list(f.graph.nodes())[0]
t = node.outputsAt(0).type()
self.assertIsNotNone(t)
@unittest.skipIf(IS_WINDOWS and sys.version_info >= (3, 8), 'TODO: need to fix the test case')
def test_unmatched_type_annotation(self):
message1 = re.escape("Number of type annotations (2) did not match the number of function parameters (1):")
message2 = 'def invalid2\\(a\\):\n\\s*~+\\.*\\s+<--- HERE\n\\s+# type: \\(Int, Int\\) -> Int\n\\s+return a \\+ 2'
message3 = 'def invalid4\\(a\\):\n\\s*~+\\.*\\s+<--- HERE\n\\s+# type: \\(Int, Int\\) -> Int\n\\s+return a \\+ 2'
with self.assertRaisesRegex(RuntimeError, message1):
@torch.jit.script
def invalid1(a):
# type: (Int, Int) -> Int
return a + 2
with self.assertRaisesRegex(RuntimeError, message2):
@torch.jit.script
def invalid2(a):
# type: (Int, Int) -> Int
return a + 2
with self.assertRaisesRegex(RuntimeError, message1):
def invalid3(a):
# type: (Int, Int) -> Int
return a + 2
torch.jit.script(invalid3)
with self.assertRaisesRegex(RuntimeError, message3):
def invalid4(a):
# type: (Int, Int) -> Int
return a + 2
torch.jit.script(invalid4)
def test_is_optional(self):
ann = Union[List[int], List[float]]
torch._jit_internal.is_optional(ann)
def test_interpreter_fuzz(self):
import builtins
# This test generates random tree-like programs to fuzz test
# that the interpreter does not have a bug in its stack manipulation
# code. An assert in that code ensures individual operators are
# not reordered.
templates = [
"torch.rand(3, 4)",
"({} + {})",
"-{}",
"({} * {})",
"torch.tanh({})",
"VAR {}",
]
def gen_code():
src_lines = ['def f():']
exprs = []
n_variables = 0
def get_expr(idx):
elem = exprs[idx]
exprs[idx] = exprs[-1]
exprs.pop()
return elem
def select_expr_or_var():
idx = random.randrange(0, len(exprs) + n_variables)
if idx < len(exprs):
return get_expr(idx)
else:
return 'v{}'.format(idx - len(exprs))
for i in range(50):
n = None
while n is None or n > len(exprs) + n_variables:
template = random.choice(templates)
n = template.count('{}')
if 'VAR' in template:
src_lines.append(' v{} = {}'.format(n_variables, select_expr_or_var()))
n_variables += 1
else:
exprs.append(template.format(*(select_expr_or_var() for _ in range(n))))
src_lines.append(' return ({})\n'.format(''.join('v{},'.format(i) for i in range(n_variables))))
return '\n'.join(src_lines)
for i in range(100):
g = {'torch': torch}
code = gen_code()
builtins.exec(code, g, None)
cu = torch.jit.CompilationUnit(code)
with freeze_rng_state():
o1 = g['f']()
with freeze_rng_state():
o2 = cu.f()
self.assertEqual(o1, o2)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_cpp_module_iterator(self):
a = nn.Module()
a.name = 'a'
a.p = nn.Parameter(torch.rand(3, 4))
a.foo = nn.Module()
a.foo.name = 'foo'
a.foo.register_buffer('b', torch.rand(1, 1))
a.foo.bar = nn.Module()
a.foo.bar.name = 'bar'
a.foo.bar.an_int = 4
a.another = nn.Module()
a.another.name = 'another'
sa = torch.jit.script(a)
result = torch._C._jit_debug_module_iterators(sa._c)
def replace(e):
if e is a.p:
return 'P'
elif e is a.foo.b:
return 'B'
elif isinstance(e, torch._C.ScriptModule):
return e.getattr('name')
return e
for k, v in result.items():
for i in range(len(v)):
if isinstance(v[i], tuple):
n, v2 = v[i]
v[i] = (n, replace(v2))
else:
v[i] = replace(v[i])
# module type creation is not deterministic, so we have to sort
# the result
v.sort()
expected = {'buffers': [],
'buffers_r': ['B'],
'children': ['another', 'foo'],
'modules': ['a', 'another', 'bar', 'foo'],
'named_attributes': [('_is_full_backward_hook', None),
('another', 'another'),
('foo', 'foo'),
('name', 'a'),
('p', 'P'),
('training', True)],
'named_attributes_r': [('_is_full_backward_hook', None),
('another', 'another'),
('another._is_full_backward_hook', None),
('another.name', 'another'),
('another.training', True),
('foo', 'foo'),
('foo._is_full_backward_hook', None),
('foo.b', 'B'),
('foo.bar', 'bar'),
('foo.bar._is_full_backward_hook', None),
('foo.bar.an_int', 4),
('foo.bar.name', 'bar'),
('foo.bar.training', True),
('foo.name', 'foo'),
('foo.training', True),
('name', 'a'),
('p', 'P'),
('training', True)],
'named_buffers': [],
'named_buffers_r': [('foo.b', 'B')],
'named_children': [('another', 'another'), ('foo', 'foo')],
'named_modules': [('', 'a'),
('another', 'another'),
('foo', 'foo'),
('foo.bar', 'bar')],
'named_parameters': [('p', 'P')],
'named_parameters_r': [('p', 'P')],
'parameters': ['P'],
'parameters_r': ['P']}
self.assertEqual(expected, result)
def test_parameter_order(self):
m = nn.Module()
for i, name in enumerate(string.ascii_letters):
setattr(m, name, nn.Parameter(torch.tensor([float(i)])))
ms = torch.jit.script(m)
print(torch.cat(list(m.parameters())))
print(torch.cat(list(ms.parameters())))
self.assertEqual(list(m.parameters()), list(ms.parameters()))
def test_python_op_builtins(self):
@torch.jit.unused
def fn(x):
# type: (List[int]) -> int
return sum(x)
@torch.jit.script
def script_fn(x):
# type: (List[int]) -> int
return fn(x)
def test_submodule_twice(self):
@torch.jit.script
def foo(x):
return x * x
class What(torch.jit.ScriptModule):
def __init__(self, x):
super(What, self).__init__()
self.foo = x
a = What(foo)
c = What(foo)
def test_training_param(self):
class What(torch.jit.ScriptModule):
def __init__(self):
super(What, self).__init__()
@torch.jit.script_method
def forward(self, x):
# type: (int) -> int
if self.training:
r = x
else:
r = x + 4
# check double use of training
if self.training:
r = r + 1
return r
w = What()
self.assertEqual(4, w(3))
w.train(False)
self.assertEqual(7, w(3))
self.assertFalse("training" in w.state_dict())
def test_class_as_attribute(self):
@torch.jit.script
class Foo321(object):
def __init__(self):
self.x = 3
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = Foo321()
def forward(self, x):
return x + self.f.x
scripted = torch.jit.script(FooBar1234())
eic = self.getExportImportCopy(scripted)
x = torch.rand(3, 4)
self.assertEqual(scripted(x), eic(x))
def test_module_str(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
f = torch.jit.script(Foo())
self.assertEqual('ScriptObject', str(f._c))
def test_jitter_bug(self):
@torch.jit.script
def fn2(input, kernel_size):
# type: (Tensor, List[int]) -> Tensor
if kernel_size[0] > 1:
_stride = [2]
else:
_stride = kernel_size
print(_stride, kernel_size)
return input
@torch.jit.script
def fn(input):
# type: (Tensor) -> Tensor
return fn2(input, [1])
def test_parser_kwargonly(self):
cu = torch.jit.CompilationUnit('''
def foo(x, *, y) -> Tuple[Tensor, Tensor]:
return x, x
def bar(x):
return foo(x, y=x)
''')
self.assertTrue('*' in str(cu.foo.schema))
with self.assertRaisesRegex(RuntimeError, "not provided"):
torch.jit.CompilationUnit('''
def foo(x, *, y) -> Tuple[Tensor, Tensor]:
return x, x
def bar(x):
return foo(x, x)
''')
def test_annoying_doubles(self):
mod = types.ModuleType("temp")
mod.inf = float("inf")
mod.ninf = float("-inf")
mod.nan = float("nan")
with torch._jit_internal._disable_emit_hooks():
class Foo(torch.jit.ScriptModule):
def __init__(self):
super(Foo, self).__init__()
@torch.jit.script_method
def forward(self):
return math.pi, 0.1, mod.inf, mod.ninf, 2.225073858507201e-308, mod.nan
foo = Foo()
buffer = io.BytesIO()
torch.jit.save(foo, buffer)
buffer.seek(0)
foo_loaded = torch.jit.load(buffer)
r = foo()
r2 = foo_loaded()
# use precise assert, we are checking floating point details
self.assertTrue(r[:-1] == r2[:-1])
self.assertTrue(math.isnan(r[-1]) and math.isnan(r2[-1]))
def test_type_annotate(self):
def foo(a):
return torch.jit.annotate(torch.Tensor, a)
self.checkScript(foo, (torch.rand(3),))
def bar():
a = torch.jit.annotate(List[int], [])
for _ in range(10):
a.append(4)
return a
self.checkScript(bar, ())
def baz(a):
return torch.jit.annotate(float, a)
self.checkScript(baz, (torch.rand(()),))
# test annotate none types
def annotate_none():
return torch.jit.annotate(Optional[torch.Tensor], None)
self.checkScript(annotate_none, ())
def test_robust_op_resolution(self):
neg = torch.add # misleading name to make sure we resolve by function
def stuff(x):
return neg(x, x)
a = (torch.rand(3),)
self.checkScript(stuff, a)
def test_nested_aug_assign(self):
@torch.jit.script
class SomeClass(object):
def __init__(self):
self.num = 99
def __iadd__(self, x):
# type: (int)
self.num += x
return self
def __eq__(self, other):
# type: (SomeClass) -> bool
return self.num == other.num
@torch.jit.script
class SomeOutOfPlaceClass(object):
def __init__(self):
self.num = 99
def __add__(self, x):
# type: (int)
self.num = x
return self
def __eq__(self, other):
# type: (SomeClass) -> bool
return self.num == other.num
class Child(nn.Module):
def __init__(self):
super().__init__()
self.x = 2
self.o = SomeClass()
self.oop = SomeOutOfPlaceClass()
self.list = [1, 2, 3]
class A(nn.Module):
def __init__(self):
super().__init__()
self.child = Child()
def forward(self):
self.child.x += 1
self.child.o += 5
self.child.oop += 5
some_list = [1, 2]
self.child.list += some_list
self.child.list *= 2
return self.child.x, self.child.o, self.child.list, self.child.oop
a = A()
sa = torch.jit.script(A())
eager_result = a()
script_result = sa()
self.assertEqual(eager_result, script_result)
self.assertEqual(a.child.x, sa.child.x)
self.assertEqual(a.child.o, sa.child.o)
self.assertEqual(a.child.list, sa.child.list)
@torch.jit.script
class SomeNonAddableClass(object):
def __init__(self):
self.num = 99
def __eq__(self, other):
# type: (SomeClass) -> bool
return self.num == other.num
# with self.assertRaisesRegex(RuntimeError, "")
class A(nn.Module):
def __init__(self):
super().__init__()
self.x = SomeNonAddableClass()
def forward(self):
self.x += SomeNonAddableClass()
return self.x
with self.assertRaisesRegex(RuntimeError, "Cannot emit inplace op"):
torch.jit.script(A())
def test_var_aug_assign(self):
@torch.jit.script
class SomeNonAddableClass(object):
def __init__(self):
self.num = 99
def __eq__(self, other):
# type: (SomeNonAddableClass) -> bool
return self.num == other.num
with self.assertRaisesRegex(RuntimeError, "Cannot emit inplace op"):
@torch.jit.script
def fn():
a = SomeNonAddableClass()
a += SomeNonAddableClass()
return a
@torch.jit.script
class SomeClass(object):
def __init__(self):
self.num = 99
def __iadd__(self, x):
# type: (int)
self.num += x
return self
def __eq__(self, other):
# type: (SomeClass) -> bool
return self.num == other.num
@torch.jit.script
class SomeOutOfPlaceClass(object):
def __init__(self):
self.num = 99
def __add__(self, x):
# type: (int)
self.num = x
return self
def __eq__(self, other):
# type: (SomeClass) -> bool
return self.num == other.num
def fn2():
a = SomeClass()
a_copy = a
a += 20
assert a is a_copy
b = SomeOutOfPlaceClass()
b_copy = b
b += 99
assert b is b_copy
c = [1, 2, 3]
c_copy = c
c *= 2
assert c is c_copy
c += [4, 5, 6]
d = torch.ones(2, 2)
d_copy = d
d += torch.ones(2, 2)
assert d is d_copy
return a, b, c, d
self.checkScript(fn2, [])
def test_nested_list_construct(self):
def foo():
return [[4]] + [[4, 5]]
self.checkScript(foo, ())
def test_file_line_error(self):
def foobar(xyz):
return torch.blargh(xyz)
_, lineno = inspect.getsourcelines(foobar)
with self.assertRaisesRegex(RuntimeError, "test_jit.py\", line {}".format(lineno + 1)):
scripted = torch.jit.script(foobar)
def test_file_line_error_class_defn(self):
class FooBar(object):
def baz(self, xyz):
return torch.blargh(xyz)
_, lineno = inspect.getsourcelines(FooBar)
with self.assertRaisesRegex(RuntimeError, "test_jit.py\", line {}".format(lineno + 2)):
torch.jit.script(FooBar)
def test_file_line_graph(self):
def foobar(xyz):
return torch.neg(xyz)
scripted = torch.jit.script(foobar)
_, lineno = inspect.getsourcelines(foobar)
fc = FileCheck().check('test_jit.py:{}:19'.format(lineno + 1))
fc.run(scripted.graph)
fc.run(str(scripted.graph))
def test_file_line_save_load(self):
class Scripted(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, xyz):
return torch.neg(xyz)
scripted = Scripted()
# NB: not using getExportImportCopy because that takes a different
# code path that calls CompilationUnit._import rather than
# going through the full save/load pathway
buffer = scripted.save_to_buffer()
bytesio = io.BytesIO(buffer)
scripted = torch.jit.load(bytesio)
_, lineno = inspect.getsourcelines(Scripted)
fc = FileCheck().check(':{}'.format(lineno + 3))
fc.run(scripted.graph)
fc.run(str(scripted.graph))
def test_file_line_string(self):
scripted = torch.jit.CompilationUnit('''
def foo(xyz):
return torch.neg(xyz)
''')
fc = FileCheck().check('<string>:3:11')
fc.run(scripted.foo.graph)
fc.run(str(scripted.foo.graph))
@skipIfCrossRef
def test_file_line_trace(self):
def foobar(xyz):
return torch.neg(xyz)
scripted = torch.jit.trace(foobar, (torch.rand(3, 4)))
_, lineno = inspect.getsourcelines(foobar)
fc = FileCheck().check('test_jit.py:{}:0'.format(lineno + 1))
fc.run(scripted.graph)
fc.run(str(scripted.graph))
def test_serialized_source_ranges(self):
class FooTest(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, w):
return torch.mm(x, w.t())
ft = FooTest()
loaded = self.getExportImportCopy(ft)
_, lineno = inspect.getsourcelines(FooTest)
with self.assertRaisesRegex(RuntimeError, 'test_jit.py\", line {}'.format(lineno + 3)):
loaded(torch.rand(3, 4), torch.rand(30, 40))
def test_serialized_source_ranges_graph(self):
class FooTest3(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, w):
return torch.mm(x, w.t())
ft = FooTest3()
loaded = self.getExportImportCopy(ft)
_, lineno = inspect.getsourcelines(FooTest3)
fc = FileCheck().check('test_jit.py:{}'.format(lineno + 3))
fc.run(loaded.graph)
def test_serialized_source_ranges2(self):
class FooTest2(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self):
raise RuntimeError('foo')
_, lineno = inspect.getsourcelines(FooTest2)
with self.assertRaisesRegex(torch.jit.Error, 'test_jit.py\", line {}'.format(lineno + 3)):
ft = FooTest2()
loaded = self.getExportImportCopy(ft)
loaded()
def test_serialized_source_ranges_dont_jitter(self):
class FooTest3(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, lim):
first = 1
second = 1
i = 1
somenum = 5
dontmutateme = 3
third = 0
while bool(i < lim):
third = first + second
first = second
second = third
j = 0
while j < 10:
somenum = somenum * 2
j = j + 1
i = i + j
i = i + dontmutateme
st = second + third
fs = first + second
return third, st, fs
ft3 = FooTest3()
def debug_records_from_mod(self, mod):
buffer = io.BytesIO()
torch.jit.save(ft3, buffer)
buffer.seek(0)
archive = zipfile.ZipFile(buffer)
files = filter(lambda x: x.startswith('archive/code/'), archive.namelist())
debug_files = list(filter(lambda f: f.endswith('.debug_pkl'), files))
self.assertEqual(len(debug_files), 1)
debug_file = archive.open(debug_files[0])
return pickle.load(debug_file), buffer
records1, buffer = debug_records_from_mod(self, ft3)
buffer.seek(0)
loaded = torch.jit.load(buffer)
records2, buffer = debug_records_from_mod(self, loaded)
buffer.seek(0)
loaded2 = torch.jit.load(buffer)
records3, _ = debug_records_from_mod(self, loaded2)
self.assertEqual(records1, records2)
self.assertEqual(records2, records3)
def test_serialized_source_ranges_no_dups(self):
class FooTest3(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, lim):
first = 1
second = 1
i = 1
somenum = 5
dontmutateme = 3
third = 0
while bool(i < lim):
third = first + second
first = second
second = third
j = 0
while j < 10:
somenum = somenum * 2
j = j + 1
i = i + j
i = i + dontmutateme
st = second + third
fs = first + second
return third, st, fs
ft3 = FooTest3()
def debug_records_from_mod(mod):
buffer = io.BytesIO()
torch.jit.save(ft3, buffer)
buffer.seek(0)
archive = zipfile.ZipFile(buffer)
files = list(filter(lambda x: x.startswith('archive/code/'), archive.namelist()))
debug_files = filter(lambda f: f.endswith('.debug_pkl'), files)
debug_files = (archive.open(f) for f in debug_files)
debug_files = (pickle.load(f) for f in debug_files)
debug_files = (f[2] for f in debug_files)
return list(debug_files)
debug_files = debug_records_from_mod(ft3)
for debug_file in debug_files:
for i in range(len(debug_file) - 1):
offset, source_range_tag, source_range = debug_file[i]
offset2, source_range_tag2, source_range2 = debug_file[i + 1]
self.assertNotEqual(source_range, source_range2)
def test_circular_dependency(self):
"""
https://github.com/pytorch/pytorch/issues/25871
"""
class A(torch.jit.ScriptModule):
def __init__(self):
super(A, self).__init__()
@torch.jit.script_method
def forward(self, x):
return x
class B(torch.jit.ScriptModule):
def __init__(self):
super(B, self).__init__()
self.foo = torch.nn.ModuleList([A()])
@torch.jit.script_method
def forward(self, x):
for f in self.foo:
x = f(x)
return x
class C(torch.jit.ScriptModule):
def __init__(self):
super(C, self).__init__()
self.foo = torch.nn.Sequential(B())
@torch.jit.script_method
def forward(self, x):
for f in self.foo:
x = f(x)
return x
self.getExportImportCopy(C())
def test_serialize_long_lines(self):
class OrderModuleLong(torch.nn.Module):
def forward(self, long_arg_name: List[torch.Tensor]):
return [(long_arg_name[1],), (long_arg_name[0].argmax(),)]
src = str(torch.jit.script(OrderModuleLong()).code)
# make long_arg_name[1] does not get reordered after the argmax
FileCheck().check("long_arg_name[1]").check("argmax").run(src)
def test_tensor_shape(self):
x = torch.empty(34, 56, 78)
def f(x):
return x.shape
self.checkScript(f, (x,))
def test_block_input_grad_in_loop(self):
x = torch.randn(3, 3, requires_grad=False)
y = torch.randn(3, 3, requires_grad=True)
def grad_in_loop(x, y):
for i in range(100):
x = y @ x
return x
scripted = torch.jit.script(grad_in_loop)
outer = scripted.graph_for(x, y)
loop = outer.findNode("prim::Loop")
loop_block = next(loop.blocks())
param_node = loop_block.paramNode()
x_value = list(param_node.outputs())[1]
self.assertTrue(x_value.requires_grad())
def test_tensor_grad(self):
x = torch.randn(3, 4, requires_grad=True)
y = torch.randn(3, 4, requires_grad=False)
def f_requires_grad(x):
return x.requires_grad
self.checkScript(f_requires_grad, (x,))
self.checkScript(f_requires_grad, (y,))
def f_grad(x):
return x.grad
x.sum().backward()
self.checkScript(f_grad, (x,))
self.checkScript(f_grad, (y,))
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "shape analysis is only enabled in Legacy")
def test_prim_grad_undefined(self):
x = torch.ones(2)
def f_grad(x):
return x.grad
scripted = self.checkScript(f_grad, (x,))
g = scripted.graph_for(x)
prim_grad_node = g.findNode("prim::grad")
self.assertTrue(next(prim_grad_node.outputs()).type().undefined() is None)
def test_tensor_data(self):
x = torch.randn(3, 4, requires_grad=True)
y = torch.randn(4, 5)
def f_data(x):
return x.data
scripted_f_data = torch.jit.script(f_data)
scripted_x = scripted_f_data(x)
self.assertEqual(scripted_x, f_data(x))
self.assertEqual(scripted_x.requires_grad, False)
scripted_y = scripted_f_data(y)
self.assertEqual(scripted_y, f_data(y))
self.assertEqual(scripted_x.requires_grad, False)
def test_tensor_dtype(self):
x_byte = torch.empty(34, 56, 78, dtype=torch.uint8)
x_long = torch.empty(34, 56, 78, dtype=torch.long)
x_float32 = torch.empty(34, 56, 78, dtype=torch.float32)
@torch.jit.script
def byte(x):
return x.dtype == torch.uint8
@torch.jit.script
def long(x):
return x.dtype == torch.long
@torch.jit.script
def float32(x):
return x.dtype == torch.float32
self.assertTrue(byte(x_byte))
self.assertFalse(byte(x_long))
self.assertFalse(byte(x_float32))
self.assertFalse(long(x_byte))
self.assertTrue(long(x_long))
self.assertFalse(long(x_float32))
self.assertFalse(float32(x_byte))
self.assertFalse(float32(x_long))
self.assertTrue(float32(x_float32))
@unittest.skipIf(not RUN_CUDA, "device tests require CUDA")
def test_tensor_device(self):
cpu = torch.empty(34, 56, 78, device='cpu')
gpu = torch.empty(34, 56, 78, device='cuda')
@torch.jit.script
def same_device(x, y):
return x.device == y.device
self.assertTrue(same_device(cpu, cpu))
self.assertTrue(same_device(gpu, gpu))
self.assertFalse(same_device(cpu, gpu))
@unittest.skipIf(not RUN_CUDA, "device tests require CUDA")
def test_tensor_to_device(self):
def to_device(x):
return x.to(device="cuda").to(device=torch.device("cpu"))
self.checkScript(to_device, (torch.ones(3, 4),))
def test_tensor_to_cpu(self):
def to_cpu(x):
return x.cpu()
x = torch.ones(3, 4)
script_fn = torch.jit.script(to_cpu)
self.assertEqual(to_cpu(x).device, script_fn(x).device)
self.checkScript(to_cpu, (x,))
@unittest.skipIf(not RUN_CUDA, "device tests require CUDA")
def test_tensor_to_cuda(self):
def to_cuda(x):
return x.cuda()
x = torch.ones(3, 4)
script_fn = torch.jit.script(to_cuda)
self.assertEqual(to_cuda(x).device, script_fn(x).device)
self.checkScript(to_cuda, (x,))
def test_generic_list_errors(self):
with self.assertRaisesRegex(RuntimeError, "previously matched to type"):
@torch.jit.script
def foo(x):
return [[x]] + [[1]]
def test_script_cu(self):
cu = torch.jit.CompilationUnit('''
def foo(a):
b = a
return b
''')
a = Variable(torch.rand(1))
self.assertEqual(a, cu.foo(a))
# because the compilation unit ingests python strings
# to use an escape sequence escape the backslash (\\n = \n)
def test_string_cu(self):
cu = torch.jit.CompilationUnit('''
def foo(a):
print(a, """a\\n\tb\\n""", 2, "a\
a")
return a
''')
FileCheck().check("aa").check("a\\n\\tb\\n").run(str(cu.foo.graph))
def test_function_compilation_caching(self):
def fun():
return 1 + 2
fun_compiled = torch.jit.script(fun)
# python wrapper around the script function is a different pointer,
# but the underlying script function graph is the same
self.assertIs(fun_compiled.graph, torch.jit.script(fun).graph)
def fun():
return 3 + 4
num_ref_counts = sys.getrefcount(fun)
# caching doesn't get tripped up by same qualname
fun_compiled_2 = torch.jit.script(fun)
self.assertIsNot(fun_compiled, fun_compiled_2)
self.assertEqual(fun_compiled_2(), 7)
# caching doesnt increase refcounts to function (holds weak reference)
self.assertTrue(sys.getrefcount(fun), num_ref_counts)
def test_string_ops(self):
def foo():
a = "a" + "b"
return a + a, "ab" == "b", "ab" != "b", "ab" == "ab", "ab" != "ab"
self.checkScript(foo, ())
def test_string_sorted(self):
def foo(strs: List[str]):
return sorted(strs)
FileCheck() \
.check("graph") \
.check_next("str[] = aten::sorted") \
.check_next("return") \
.run(str(torch.jit.script(foo).graph))
inputs = ["str3", "str2", "str1"]
self.checkScript(foo, (inputs,))
def test_string_sort(self):
def foo(strs: List[str]):
strs.sort()
return strs
inputs = ["str3", "str2", "str1"]
self.checkScript(foo, (inputs,))
def test_tuple_sorted(self):
def foo(tups: List[Tuple[int, int]]):
return sorted(tups)
inputs = [(1, 2), (0, 2), (1, 3)]
self.checkScript(foo, (inputs,))
def test_tuple_sort(self):
def foo(tups: List[Tuple[int, int]]):
tups.sort()
return tups
inputs = [(1, 2), (0, 2), (1, 3)]
self.checkScript(foo, (inputs,))
def test_tuple_sort_reverse(self):
def foo(tups: List[Tuple[int, int]]):
tups.sort(reverse=True)
return tups
inputs = [(1, 2), (0, 2), (1, 3)]
self.checkScript(foo, (inputs,))
def test_tuple_unsortable_element_type(self):
@torch.jit.script
def foo():
tups = [({1: 2}, {2: 3})]
tups.sort()
return tups
with self.assertRaisesRegexWithHighlight(RuntimeError, "are not sortable", "tups.sort"):
foo()
def test_tuple_unsortable_diff_type(self):
@torch.jit.script
def foo(inputs: List[Any]):
inputs.sort()
return inputs
inputs = [(1, 2), ("foo", "bar")]
with self.assertRaisesRegexWithHighlight(RuntimeError, "Only values of same type can be compared", "inputs.sort"):
foo(inputs)
def test_tuple_nested_sort(self):
def foo(inputs: List[Tuple[int, Tuple[int, str]]]):
inputs.sort()
return inputs
inputs = [(1, (2, "foo")), (1, (2, "bar")), (1, (0, "bar"))]
self.checkScript(foo, (inputs,))
def test_tuple_unsortable_nested_diff_type(self):
@torch.jit.script
def foo(inputs: List[Any]):
inputs.sort()
return inputs
inputs = [(1, (2, 3)), (2, ("foo", "bar"))]
with self.assertRaisesRegexWithHighlight(RuntimeError, "Only values of same type can be compared", "inputs.sort"):
foo(inputs)
def test_string_new_line(self):
with self.assertRaisesRegex(RuntimeError, "expected a valid token*"):
torch.jit.CompilationUnit('''
def test_while(a):
print("
a")
return a
''')
def test_string_single_escape(self):
with self.assertRaisesRegex(RuntimeError, "expected a valid token*"):
torch.jit.CompilationUnit('''
def test_while(a):
print("\\")
return a
''')
def test_script_annotation(self):
@torch.jit.script
def foo(a):
return a + a + a
s = Variable(torch.rand(2))
self.assertEqual(s + s + s, foo(s))
def test_torch_pow(self):
def func(a, b):
return pow(a, b)
def func2(a, b, c, d):
return pow(pow(c + a, b), d)
def func3(a : int, b : float):
# type: (int, float) -> float
return pow(a, b)
def func4():
# type: () -> float
return pow(2, -2)
def func5(x, y):
return pow(x.item(), y.item())
def func6(a : int, b : int):
# type: (int, int) -> float
return pow(a, b)
a = torch.rand(1)
b = torch.rand(1)
c = torch.rand(1)
d = torch.rand(1)
self.checkScript(func, (a, b))
self.checkScript(func2, (a, b, c, d))
self.checkScript(func3, (4, -0.5))
self.checkScript(func4, ())
self.checkScript(func6, (2, 4))
inputs = [torch.tensor(2), torch.tensor(-2), torch.tensor(.5), torch.tensor(.2)]
for x in inputs:
for y in inputs:
if x < 0:
continue
else:
self.checkScript(func5, (x, y))
@unittest.skipIf(not RUN_CUDA, "device tests require CUDA")
def test_pow_scalar_backward_cuda(self):
# see that scalar exponent works with cuda base (#19253)
with enable_profiling_mode_for_profiling_tests():
for dtype in [torch.float, torch.double]:
@torch.jit.script
def func(a, b):
# type: (Tensor, float) -> Tensor
return (a * 2) ** b
a = torch.rand(1, requires_grad=True, device='cuda', dtype=dtype)
func(a, 1, profile_and_replay=True).backward()
@torch.jit.script
def func(a, b):
# type: (float, Tensor) -> Tensor
return a ** (b * 2 + 1)
a = torch.rand(1, requires_grad=True, device='cuda', dtype=dtype)
func(2, a, profile_and_replay=True).backward()
def _check_code(self, code_str, fn_name, inputs):
scope = {}
exec(code_str, globals(), scope)
cu = torch.jit.CompilationUnit(code_str)
self.assertEqual(cu.func(*inputs), scope[fn_name](*inputs))
@unittest.skipIf(not RUN_CUDA, 'no CUDA')
def test_scriptmodule_releases_tensors_cuda(self):
with enable_profiling_mode_for_profiling_tests():
@torch.jit.script
def fn(x, y):
return x.sigmoid() * y.tanh()
def test(backward=False):
x = torch.randn(3, 3, dtype=torch.double, device='cuda', requires_grad=True)
y = torch.randn(3, 3, dtype=torch.double, device='cuda', requires_grad=True)
out = fn(x, y, profile_and_replay=True)
if backward:
out.sum().backward()
with self.assertLeaksNoCudaTensors():
test()
test()
test()
if GRAPH_EXECUTOR != ProfilingMode.SIMPLE:
with self.assertLeaksNoCudaTensors():
test(backward=True)
test(backward=True)
test(backward=True)
def test_index(self):
def consec(size, start=0):
numel = torch.tensor(size).prod().item()
return torch.arange(numel).view(size)
def consec_list(size):
return list(range(size))
def random_string(size):
letters = string.ascii_lowercase
return "".join(random.choice(letters) for i in range(size))
def check_indexing(indexing, tensor):
template = dedent("""
def func(x):
return x{}
""")
self._check_code(template.format(indexing), "func", [tensor])
def check_dynamic_indexing(indexing, tensor, value1, value2):
value1 = torch.tensor(value1)
value2 = torch.tensor(value2)
template = dedent("""
def func(x, value1, value2):
i = int(value1)
j = int(value2)
return x{}
""")
self._check_code(template.format(indexing), "func", [tensor, value1, value2])
# Torchscript assumes type Tensor by default, so we need this explicit
# declaration.
def check_indexing_list_int(indexing, list):
template = dedent("""
def func(x):
# type: (List[int]) -> Any
return x{}
""")
self._check_code(template.format(indexing), "func", [list])
def check_indexing_str(indexing, str):
template = dedent("""
def func(x):
# type: (str) -> Any
return x{}
""")
self._check_code(template.format(indexing), "func", [str])
# basic slices
check_indexing('[0]', consec((3, 3)))
check_indexing('[1]', consec((3, 3), 10))
check_indexing('[2]', consec((3, 3), 19))
check_indexing('[2]', consec((3,)))
check_indexing('[-1]', consec((3, 3), 19))
check_indexing('[0:2]', consec((3, 3, 3)))
check_indexing('[1:-1]', consec((3, 3, 3)))
check_indexing('[-3:-1]', consec((6, 3)))
check_indexing('[1:]', consec((3, 3)))
check_indexing('[:1]', consec((3, 3)))
check_indexing('[:]', consec((3, 2)))
# multi-dim: indexes
check_indexing('[0, 1]', consec((3, 3)))
check_indexing('[0, 1]', consec((3, 3, 2)))
check_indexing('[1, 0, 2]', consec((3, 3, 3)))
check_indexing('[2, -1]', consec((3, 3)))
# multi-dim: mixed slicing and indexing
check_indexing('[0, 1:2]', consec((3, 3)))
check_indexing('[0, :1]', consec((3, 3, 2)))
check_indexing('[1, 2:]', consec((3, 3, 3)))
check_indexing('[-1, 1:, 0]', consec((3, 3, 3, 3)))
check_indexing('[1:, -1, 0]', consec((3, 3, 3, 3)))
check_indexing('[-1, 2:, 1:2]', consec((3, 3, 3, 3)))
check_indexing('[-1, 1:, 0]', consec((3, 3, 3, 3)))
check_indexing('[-1, :, 0, 2]', consec((3, 3, 3, 3)))
# zero-sized slices
check_indexing('[0:0]', consec((2, 2)))
check_indexing('[0:0, 1]', consec((3, 3)))
# trivial expression usage
check_indexing('[1+1]', consec((3, 3)))
check_indexing('[1:(0 + 2)]', consec((3, 3, 3)))
# None for new dimensions
check_indexing('[None, 0]', consec((3, 3)))
check_indexing('[1, None]', consec((3, 3), 10))
check_indexing('[None, None, 2]', consec((3, 3), 19))
check_indexing('[None, 2, None]', consec((3,)))
check_indexing('[0:2, None]', consec((3, 3, 3)))
check_indexing('[None, 1:-1]', consec((3, 3, 3)))
check_indexing('[None, -3:-1, None]', consec((6, 3)))
check_indexing('[-1, None, 2:, None, 1:2]', consec((3, 3, 3, 3)))
check_indexing('[None, -1, None, 2:, None, 1:2, None]', consec((3, 3, 3, 3)))
# dynamic expression usage
check_dynamic_indexing("[i + j]", consec((3, 3)), 0, 1)
check_dynamic_indexing("[i:j, i]", consec((3, 3, 2)), 0, 2)
# positive striding
check_indexing_list_int('[0]', consec_list(6))
check_indexing_list_int('[1]', consec_list(7))
check_indexing_list_int('[2]', consec_list(8))
check_indexing_list_int('[2]', consec_list(9))
check_indexing_list_int('[-1]', consec_list(10))
check_indexing_list_int('[0:2]', consec_list(11))
check_indexing_list_int('[1:-1]', consec_list(12))
check_indexing_list_int('[-3:-1]', consec_list(13))
check_indexing_list_int('[1:]', consec_list(15))
check_indexing_list_int('[:1]', consec_list(16))
check_indexing_list_int('[:]', consec_list(17))
check_indexing_list_int('[::]', consec_list(0))
check_indexing_list_int('[1000::]', consec_list(0))
check_indexing_list_int('[:1000:]', consec_list(0))
# negative striding
check_indexing_list_int('[::-1]', consec_list(7))
check_indexing_list_int('[:3:-1]', consec_list(7))
check_indexing_list_int('[3::-1]', consec_list(7))
check_indexing_list_int('[1000::-1]', consec_list(7))
check_indexing_list_int('[3:0:-1]', consec_list(7))
check_indexing_list_int('[3:-1000:-1]', consec_list(7))
check_indexing_list_int('[0:0:-1]', consec_list(7))
check_indexing_list_int('[0:-1000:-1]', consec_list(7))
# only step is specified
check_indexing_list_int('[::-1]', consec_list(0))
check_indexing_list_int('[::-1]', consec_list(7))
check_indexing_list_int('[::-2]', consec_list(7))
check_indexing_list_int('[::2]', consec_list(7))
check_indexing_list_int('[::42]', consec_list(7))
check_indexing_list_int('[::-42]', consec_list(7))
check_indexing_list_int('[::42]', consec_list(0))
check_indexing_list_int('[::-42]', consec_list(0))
check_indexing_list_int('[::9223372036854775807]', consec_list(42))
check_indexing_list_int('[::-9223372036854775807]', consec_list(42))
with self.assertRaisesRegex(RuntimeError, "out of bounds"):
check_indexing_list_int('[::-9223372036854775808]', consec_list(42))
with self.assertRaisesRegex(RuntimeError, "should have non-zero step"):
check_indexing_list_int('[::0]', consec_list(42))
# striding strings
check_indexing_str('[0]', random_string(6))
check_indexing_str('[1]', random_string(7))
check_indexing_str('[2]', random_string(8))
check_indexing_str('[2]', random_string(9))
check_indexing_str('[-1]', random_string(10))
check_indexing_str('[0:2]', random_string(11))
check_indexing_str('[1:-1]', random_string(12))
check_indexing_str('[-3:-1]', random_string(13))
check_indexing_str('[1:]', random_string(15))
check_indexing_str('[:1]', random_string(16))
check_indexing_str('[:]', random_string(17))
check_indexing_str('[::]', random_string(0))
check_indexing_str('[1000::]', random_string(0))
check_indexing_str('[:1000:]', random_string(0))
check_indexing_str('[::-1]', random_string(7))
check_indexing_str('[:3:-1]', random_string(7))
check_indexing_str('[3::-1]', random_string(7))
check_indexing_str('[1000::-1]', random_string(7))
check_indexing_str('[3:0:-1]', random_string(7))
check_indexing_str('[3:-1000:-1]', random_string(7))
check_indexing_str('[0:0:-1]', random_string(7))
check_indexing_str('[0:-1000:-1]', random_string(7))
check_indexing_str('[::-1]', random_string(0))
check_indexing_str('[::-1]', random_string(7))
check_indexing_str('[::-2]', random_string(7))
check_indexing_str('[::2]', random_string(7))
check_indexing_str('[::42]', random_string(7))
check_indexing_str('[::-42]', random_string(7))
check_indexing_str('[::42]', random_string(0))
check_indexing_str('[::-42]', random_string(0))
check_indexing_str('[::9223372036854775807]', random_string(42))
check_indexing_str('[::-9223372036854775807]', random_string(42))
with self.assertRaisesRegex(RuntimeError, "out of bounds"):
check_indexing_str('[::-9223372036854775808]', random_string(42))
with self.assertRaisesRegex(RuntimeError, "should have non-zero step"):
check_indexing_str('[::0]', random_string(42))
def test_module_copy_with_attributes(self):
class Vocabulary(torch.jit.ScriptModule):
def __init__(self, vocab_list):
super(Vocabulary, self).__init__()
self._vocab = torch.jit.Attribute(vocab_list, List[str])
self.some_idx = torch.jit.Attribute(2, int)
self.idx = torch.jit.Attribute(
{word: i for i, word in enumerate(vocab_list)}, Dict[str, int]
)
@torch.jit.script_method
def lookup_indices_1d(self, values):
# type: (List[str]) -> List[int]
result = torch.jit.annotate(List[int], [])
# Direct list iteration not supported
for i in range(len(values)):
value = values[i]
result.append(self.idx.get(value, self.some_idx))
return result
@torch.jit.script_method
def forward(self, values):
# type: (List[List[str]]) -> List[List[int]]
result = torch.jit.annotate(List[List[int]], [])
# Direct list iteration not supported
for i in range(len(values)):
result.append(self.lookup_indices_1d(values[i]))
return result
v = Vocabulary(list('uabcdefg'))
v.__copy__()
def test_tuple_to_opt_list(self):
@torch.jit.script
def foo(x):
# type: (Optional[List[int]]) -> int
return 1
@torch.jit.script
def tuple_call():
return foo((1, 2))
def test_keyword(self):
@torch.jit.script
def func(x):
return torch.sum(x, dim=0)
x = torch.rand(10, dtype=torch.float, requires_grad=True)
y = func(x)
y2 = torch.sum(x, dim=0)
self.assertEqual(y, y2)
def test_constant_pooling_none(self):
@torch.jit.script
def typed_nones(a=None, b=None, c=None):
# type: (Optional[int], Optional[bool], Optional[Tensor]) -> Tuple[Optional[int], Optional[bool], Optional[Tensor]]
return a, b, c
@torch.jit.script
def test(a):
# type: (bool) -> None
if a:
print(typed_nones())
else:
print(typed_nones())
graph_str = str(test.graph)
self.assertTrue(graph_str.count("NoneType = prim::Constant") == 1)
def test_constant_pooling_same_identity(self):
def foo():
a = torch.tensor([4])
b = (a,)
index = len(a) - 1
c = b[index]
d = b[index]
return c, d
foo_script = torch.jit.script(foo)
self.run_pass('constant_propagation', foo_script.graph)
self.run_pass('constant_pooling', foo_script.graph)
# even though the c & d escape scope, we are still able
# pool them into one constant because they are the same object
FileCheck().check_count("prim::Constant", 1, exactly=True).run(foo_script.graph)
self.assertEqual(foo(), foo_script())
def test_constant_pooling_introduce_aliasing(self):
@torch.jit.script
def foo():
a = torch.tensor(1)
b = torch.tensor(1)
return a, b
self.run_pass('constant_propagation', foo.graph)
self.run_pass('constant_pooling', foo.graph)
# dont pool constants bc it would introduce observable alias relationship changing
a, b = foo()
self.assertIsNot(a, b)
def test_literal(self):
def func1(a, b):
c = a, b
d, e = c
return d + e
def func2(a, b):
c = a, (a, b)
d, e = c
f, g = e
return d + f + g
def func3(a, b):
# type: (float, float) -> float
c = 0., (0., 0.)
x = True
while x:
x = False
c = a, (a, b)
d, e = c
f, g = e
return d + f + g
a = torch.rand(1, requires_grad=True)
b = torch.rand(1, requires_grad=True)
self.checkScript(func1, (a, b), optimize=True)
self.checkScript(func2, (a, b), optimize=True)
self.checkScript(func3, (a.item(), b.item()), optimize=True)
def test_expand(self):
@torch.jit.script
def func(x, y):
return x + y
x = torch.rand(2, 3, dtype=torch.float, requires_grad=True)
y = torch.rand(3, dtype=torch.float, requires_grad=True)
out = func(x, y)
self.assertEqual(func(x, y), x + y)
grad = torch.randn(2, 3, dtype=torch.float)
out.backward(grad)
self.assertEqual(x.grad, grad)
self.assertEqual(y.grad, grad.sum(dim=0))
def test_sum(self):
@torch.jit.script
def func(x):
return x.sum(dim=[4])
@torch.jit.script
def func2(x):
return x.sum(dim=4)
# test that shape analysis is written correctly for sum with OptionalIntArrayRef[1] dim argument
self.run_pass('constant_propagation', func.graph)
self.run_pass('constant_propagation', func2.graph)
g = _propagate_shapes(func.graph, (torch.zeros(1, 1, 1, 1, 4),), False)
g2 = _propagate_shapes(func2.graph, (torch.zeros(1, 1, 1, 1, 4),), False)
def test_cat(self):
with enable_profiling_mode_for_profiling_tests():
@torch.jit.script
def func(x):
return torch.cat((x, x), dim=0)
x = torch.rand(10, dtype=torch.float, requires_grad=True)
self.assertEqual(func(x, profile_and_replay=True), torch.cat((x, x), dim=0))
@torch.jit.script
def func2(x, y):
return torch.cat((x, x), y)
with disable_autodiff_subgraph_inlining():
for sizes in ((2, 2), (0, 2)):
x = torch.rand(sizes).requires_grad_()
y = torch.tensor(1)
output = func2(x, y, profile_and_replay=True)
output_ref = torch.cat((x, x), y)
self.assertEqual(output, output_ref)
if GRAPH_EXECUTOR != ProfilingMode.SIMPLE:
self.assertAutodiffNode(func2.graph_for(x, y), True, ['aten::cat'], [])
grad = torch.autograd.grad(output.sum(), x)
grad_ref = torch.autograd.grad(output_ref.sum(), x)
self.assertEqual(grad, grad_ref)
def test_cat_lifts(self):
@torch.jit.script
def foo(x):
return torch.cat([x, x], dim=1)
@torch.jit.script
def foo2(x):
return torch.cat([], dim=1)
@torch.jit.script
def foo3(x):
return torch.cat([x], dim=1)
for g in [foo.graph, foo2.graph, foo3.graph]:
FileCheck().check("int =").check("ListConstruct").check("aten::cat").run(str(g))
def test_stack(self):
with enable_profiling_mode_for_profiling_tests():
@torch.jit.script
def func(x):
return torch.stack((x, x), dim=1)
x = torch.rand(10, 10)
self.assertEqual(func(x, profile_and_replay=True), torch.stack((x, x), dim=1))
@torch.jit.script
def func2(x, y):
return torch.stack((x, y), dim=0)
with disable_autodiff_subgraph_inlining():
x = torch.randn([2, 2]).requires_grad_()
y = torch.randn([2, 2]).requires_grad_()
output = func2(x, y, profile_and_replay=True)
output_ref = torch.stack((x, y), 0)
self.assertEqual(output, output_ref)
if GRAPH_EXECUTOR != ProfilingMode.SIMPLE:
self.assertAutodiffNode(func2.graph_for(x, y), True, ['aten::stack'], [])
grads = torch.autograd.grad(output.sum(), (x, y))
grads_ref = torch.autograd.grad(output_ref.sum(), (x, y))
self.assertEqual(grads, grads_ref)
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY,
"Profiling executor will be using different heuristics for constructing differentiable graphs")
def test_unbind(self):
with enable_profiling_mode_for_profiling_tests():
@torch.jit.script
def func(x, y):
# type: (Tensor, int) -> List[Tensor]
return torch.unbind(x, y)
with disable_autodiff_subgraph_inlining():
x = torch.rand([2, 2]).requires_grad_()
y = 0
outputs = func(x, y, profile_and_replay=True)
outputs_ref = torch.unbind(x, dim=y)
self.assertEqual(outputs, outputs_ref)
self.assertAutodiffNode(func.graph_for(x, y), True, [], [])
grad = torch.autograd.grad(_sum_of_list(outputs), x)
grad_ref = torch.autograd.grad(_sum_of_list(outputs_ref), x)
self.assertEqual(grad, grad_ref)
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.PROFILING,
"Profiling executor fails to recognize that tensors in a list require gradients")
def test_meshgrid(self):
with enable_profiling_mode_for_profiling_tests():
@torch.jit.script
def func(a):
# type: (List[Tensor]) -> List[Tensor]
return torch.meshgrid(a)
with disable_autodiff_subgraph_inlining():
a = torch.tensor([1.0, 2, 3]).requires_grad_()
b = torch.tensor([1.0, 2, 3, 4]).requires_grad_()
inputs = [a, b]
outputs_ref = torch.meshgrid(inputs)
outputs = func(inputs, profile_and_replay=True)
self.assertEqual(outputs, outputs_ref)
if GRAPH_EXECUTOR != ProfilingMode.SIMPLE:
self.assertAutodiffNode(func.graph_for(inputs), True, [], [])
grads = torch.autograd.grad(_sum_of_list(outputs), inputs)
grads_ref = torch.autograd.grad(_sum_of_list(outputs_ref), inputs)
self.assertEqual(grads, grads_ref)
def test_tensor_len(self):
def func(x):
return len(x)
self.checkScript(func, [torch.ones(4, 5, 6)])
def test_func_call(self):
def add(a, b):
return a + b
def mul(a, x):
return a * x
def func(alpha, beta, x, y):
return add(mul(alpha, x), mul(beta, y))
alpha = torch.rand(1, dtype=torch.float, requires_grad=True)
beta = torch.rand(1, dtype=torch.float, requires_grad=True)
x = torch.rand(3, dtype=torch.float, requires_grad=True)
y = torch.rand(3, dtype=torch.float, requires_grad=True)
# NOTE: cannot optimize yet because broadcasts are not inserted before the fuser runs
self.checkScript(func, [alpha, beta, x, y], optimize=False)
@unittest.skip("bailouts are being deprecated")
def test_profiling_graph_executor(self):
@torch.jit.script
def def_in_one_branch(x, z):
# type: (Tensor, bool) -> float
y = x
if z is False:
y = x + 1
return y.sum()
a = torch.rand(2, 3)
with enable_profiling_mode_for_profiling_tests():
# check prim::profile are inserted
profiled_graph_str = str(def_in_one_branch.graph_for(a, True))
FileCheck().check_count("prim::profile", 4).run(profiled_graph_str)
# this call is optimized for
# the given shape of (2, 3)
def_in_one_branch(a, False)
# change shape to (3)
# so we go down a bailout path
a = torch.ones(3)
# check prim::BailOuts are inserted
bailout_graph_str = str(def_in_one_branch.graph_for(a, True))
FileCheck().check_count("prim::BailOut", 3).run(bailout_graph_str)
# this triggers all 3 bailouts
self.assertEqual(def_in_one_branch(a, False), 6.0)
# this triggers 2 bailouts
self.assertEqual(def_in_one_branch(a, True), 3.0)
@unittest.skip("bailouts are being deprecated")
def test_maxpool_guard_elimination(self):
@torch.jit.script
def my_maxpool(x):
return F.max_pool1d(x, kernel_size=[1]) + torch.ones([32, 32, 32])
a = torch.rand(32, 32, 32)
with enable_profiling_mode_for_profiling_tests():
my_maxpool(a)
bailout_graph_str = str(my_maxpool.graph_for(a))
FileCheck().check_count("prim::BailOut", 1).run(bailout_graph_str)
@unittest.skip("bailouts are being deprecated")
def test_slice_guard_elimination(self):
@torch.jit.script
def my_slice(x):
return x[0:16:2] + x[0:16:2]
a = torch.rand(32, 4)
with enable_profiling_mode_for_profiling_tests():
my_slice(a)
bailout_graph_str = str(my_slice.graph_for(a))
FileCheck().check_count("prim::BailOut", 1).run(bailout_graph_str)
@unittest.skip("bailouts are being deprecated")
def test_unsqueeze_guard_elimination(self):
@torch.jit.script
def my_unsqueeze(x):
return torch.unsqueeze(x, 0) + torch.unsqueeze(x, 0)
a = torch.rand(32, 4)
with enable_profiling_mode_for_profiling_tests():
my_unsqueeze(a)
bailout_graph_str = str(my_unsqueeze.graph_for(a))
FileCheck().check_count("prim::BailOut", 2).run(bailout_graph_str)
def test_resize_input_ops(self):
# resize_ and resize_as resize the input tensor. because our shape analysis
# is flow invariant, we set any Tensor that can alias a resized Tensor
# to the base Tensor Type, without size information.
# testing that value which is an input of a graph gets handled
def out_op_graph_input():
@torch.jit.script
def test(x, y, z):
torch.mul(x, y, out=z)
return z
graph = _propagate_shapes(test.graph,
(torch.zeros(2, 1), torch.zeros(1, 2), torch.zeros(1, 1, 1)), False)
self.assertTrue(next(graph.outputs()).type() == TensorType.get())
out_op_graph_input()
def test_resize():
@torch.jit.script
def test(x):
after_resize_alias = torch.zeros([2])
for _i in range(5):
b = x + 1
f = [1]
before_resize_alias = b.sub_(1)
# for i in range(10):
f.append(1)
b.resize_(f)
after_resize_alias = b.add_(1)
return after_resize_alias
self.run_pass('constant_propagation', test.graph)
g = _propagate_shapes(test.graph, (torch.zeros(1, 1),), False)
resize_node = g.findNode("aten::resize_")
# first input and output of b.resize_ is b
self.assertTrue(next(resize_node.inputs()).type() == TensorType.get())
self.assertTrue(next(resize_node.outputs()).type() == TensorType.get())
# correctly propagates to b alias set
before_resize = g.findNode("aten::sub_")
self.assertTrue(next(before_resize.outputs()).type() == TensorType.get())
after_resize = g.findNode("aten::add_")
self.assertTrue(next(after_resize.outputs()).type() == TensorType.get())
test_resize()
def test_resize_as():
@torch.jit.script
def test(x):
b = torch.zeros([2, 2])
b.resize_as_(x)
return b
g = test.graph
self.run_pass('constant_propagation', g)
g = _propagate_shapes(test.graph, (torch.zeros(1, 1),), False)
# x doesn't alias a resized op so it shouldn't be set to base Tensor type
self.assertTrue(next(g.inputs()).type() != TensorType.get())
# return is resized
self.assertTrue(next(g.outputs()).type() == TensorType.get())
test_resize_as()
def test_uninitialized(self):
graph_str = """graph():
%1 : int = prim::Uninitialized()
%2 : int = prim::Constant[value=1]()
%3 : int = aten::add(%1, %2)
return (%3)
"""
g = parse_ir(graph_str)
m = self.createFunctionFromGraph(g)
self.getExportImportCopy(m)
with self.assertRaisesRegex(RuntimeError, "isInt"):
m()
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.SIMPLE, "Simple Executor doesn't use requires_grad information")
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.PROFILING, "Peeling is now disabled")
def test_requires_grad_loop(self):
@torch.jit.script
def test(x, y, z):
# type: (Tensor, Tensor, int) -> Tensor
for _ in range(z):
x = y
return x
# x requires grad, y does not
# testing that requires grad analysis correctly exits, with its input
# to the loop (x) requiring grad and its output to the loop not requiring grad
# and the output of the node conservatively setting grad to true
inps = (torch.tensor(1.0, requires_grad=True), torch.tensor(1), 10)
test(*inps, profile_and_replay=True)
graph = test.graph_for(*inps)
loop = graph.findNode("prim::Loop")
loop_body = next(loop.blocks())
loop_inputs = list(loop_body.inputs())
loop_outputs = list(loop_body.outputs())
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
# TODO: simplify this test as it's very sensitive
# the optimized graph will have 3 loops
# the original loop is peeled
# peeled loop also gets unrolled
index_of_x_in_peeled_unrolled_loop = -2
self.assertTrue(loop_inputs[index_of_x_in_peeled_unrolled_loop].requires_grad())
bailouts_in_outer_block = graph.findAllNodes("prim::BailOut", False)
last_bailout_index_on_loops_output = -1
self.assertFalse(bailouts_in_outer_block[last_bailout_index_on_loops_output].output().requires_grad())
else:
self.assertTrue(loop_inputs[1].requires_grad())
self.assertTrue(loop.output().requires_grad())
self.assertFalse(loop_outputs[1].requires_grad())
def test_view_shape_prop(self):
cu = torch.jit.CompilationUnit('''
def test_view_shape_prop(a):
return a.view(size=[-1])
''')
inputs = [torch.zeros(10, 10)]
outputs = torch.zeros(100)
real_outs = cu.test_view_shape_prop(*inputs)
self.assertEqual(real_outs, outputs)
def test_view_listconstruct_shape_prop(self):
def fn(x):
B = x.size(0)
C = x.size(1)
T = x.size(2)
return x.view(T, B, C)
x = torch.randn(3, 1, 5, requires_grad=True)
fn = torch.jit.script(fn)
graph = _propagate_shapes(fn.graph, (x,), False)
self.assertTrue(next(graph.outputs()).type().scalarType() == 'Double')
def test_shape_prop_promotion(self):
@torch.jit.script
def fn(x, y):
return x + y
x, y = torch.rand(3, 4, dtype=torch.float), torch.rand(3, 4, dtype=torch.double)
graph = _propagate_shapes(fn.graph, (x, y), False)
FileCheck().check('Double(*, *, device=cpu) = aten::add').run(graph)
def test_shape_prop_promote_scalar_arg(self):
@torch.jit.script
def fn(x):
return math.pi + x
x = torch.zeros(3, 4, dtype=torch.long)
graph = _propagate_shapes(fn.graph, (x,), False)
default = torch.get_default_dtype()
if(default == torch.float):
FileCheck().check('Float(*, *, requires_grad=0, device=cpu) = aten::add').run(graph)
else:
FileCheck().check('Double(*, *, requires_grad=0, device=cpu) = aten::add').run(graph)
def test_integral_shape_inference(self):
cu = torch.jit.CompilationUnit('''
def test_integral_shape_inference(a):
return a * a
''')
inputs = [torch.ones(10, 10, dtype=torch.long)]
outputs = torch.ones(10, 10, dtype=torch.long)
self.assertEqual(cu.test_integral_shape_inference(*inputs), outputs)
@unittest.skipIf(RUN_CUDA, 'This tests the CPU fuser')
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser support for Sandcastle")
@enable_cpu_fuser
def test_batchnorm_fuser_cpu(self):
code = '''
graph(%3 : Tensor,
%7 : Tensor,
%12 : Float(*, *),
%13 : Tensor,
%25 : Tensor):
%23 : int = prim::Constant[value=1]()
%22 : float = prim::Constant[value=1e-05]()
%26 : Tensor = aten::sqrt(%25)
%24 : Tensor = aten::add(%26, %22, %23)
%20 : Tensor = aten::reciprocal(%24)
%norm_invstd : Tensor = aten::mul(%20, %23)
%15 : Tensor = aten::sub(%12, %13, %23)
%11 : Tensor = aten::mul(%15, %norm_invstd)
%8 : Tensor = aten::mul(%11, %7)
%5 : Tensor = aten::add(%8, %3, %23)
%1 : Float(*, *) = aten::relu(%5)
return (%1)
'''
graph = parse_ir(code)
inputs = 5 * [torch.rand(26, 2048, dtype=torch.float)]
code = torch._C._jit_fuser_get_fused_kernel_code(graph, inputs)
FileCheck().check('sqrtf').run(code)
@slowTest
@unittest.skipIf(RUN_CUDA, 'This tests the CPU fuser')
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser support for Sandcastle")
@enable_cpu_fuser
def test_fuser_double_float_codegen(self):
fns = ['log', 'log10', 'log1p', 'log2', 'lgamma', 'exp', 'expm1', 'erf',
'erfc', 'cos', 'acos', 'cosh', 'sin', 'asin', 'sinh', 'tan',
'atan', 'tanh', 'sqrt', 'ceil', 'floor', 'round', 'trunc',
'frac']
def lookup_c_equivalent_fn(aten_fn):
return aten_fn
def test_dispatch(op, expects, dtype, binary=False):
if dtype == torch.double:
dtype_str = 'Double'
elif dtype == torch.float:
dtype_str = 'Float'
else:
raise RuntimeError('Unknown dtype')
if binary:
code = '''
graph(%3 : Tensor, %4 : Tensor):
%2 : {dtype}(*, *) = aten::{op}(%3, %4)
%1 : {dtype}(*, *) = aten::relu(%2)
return (%1)
'''.format(op=op, dtype=dtype_str)
else:
code = '''
graph(%3 : Tensor):
%2 : {dtype}(*, *) = aten::{op}(%3)
%1 : {dtype}(*, *) = aten::relu(%2)
return (%1)
'''.format(op=op, dtype=dtype_str)
graph = parse_ir(code)
inputs = (2 if binary else 1) * [torch.rand(26, 2048, dtype=dtype)]
code = torch._C._jit_fuser_get_fused_kernel_code(graph, inputs)
FileCheck().check(expects).run(code)
for fn in fns:
test_dispatch(fn, lookup_c_equivalent_fn(fn) + '(', torch.double)
test_dispatch(fn, lookup_c_equivalent_fn(fn) + 'f(', torch.float)
# 'min', 'max' were previously tested but are now replaced with ternary expressions
# instead of fmin() and fmax()
binary_fns = ['pow']
for fn in binary_fns:
test_dispatch(fn, lookup_c_equivalent_fn(fn) + '(', torch.double, binary=True)
test_dispatch(fn, lookup_c_equivalent_fn(fn) + 'f(', torch.float, binary=True)
@unittest.skipIf(RUN_CUDA, 'This tests the CPU fuser')
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser support for Sandcastle")
@enable_cpu_fuser
def test_fuser_double_literal_precision(self):
code = '''
graph(%2 : Float(*, *)):
%4 : int = prim::Constant[value=1]()
%3 : float = prim::Constant[value=1.282549830161864]()
%5 : Float(*, *) = aten::add(%2, %3, %4)
%1 : Float(*, *) = aten::relu(%5)
return (%1)
'''
graph = parse_ir(code)
code = torch._C._jit_fuser_get_fused_kernel_code(graph, [torch.rand(3, 4)])
FileCheck().check('1.282549830161864').run(code)
def test_fuser_multiple_blocks(self):
cu = torch.jit.CompilationUnit('''
def test_fuser_multiple_blocks(this, that, theother, meme):
i = 0
while i < 20:
this = torch.cat([this, meme], dim=0)
that = torch.cat([that, meme], dim=0)
theother = torch.cat([theother, meme], dim=0)
i = i + 1
return this, that, theother
''')
inputs = [torch.ones(0, 10, 10)] * 3
inputs += [torch.ones(1, 10, 10)]
outputs = [torch.ones(20, 10, 10)] * 3
self.assertEqual(cu.test_fuser_multiple_blocks(*inputs), outputs)
def test_dropout_script(self):
eg = torch.zeros(1, 2, 3, requires_grad=True)
@_trace(eg)
def foo(x):
x = torch.neg(x)
return F.dropout(x)
class MyDrop(nn.Module):
def forward(self, x):
return foo(x)
f = io.BytesIO()
with warnings.catch_warnings(record=True):
torch.onnx.export(MyDrop(), (eg,), f, verbose=False)
@unittest.skip("RuntimeError: VariableType::ID() not implemented")
def test_cast(self):
script = '''
def to_int(x):
return int(x)
'''
x = Variable(torch.FloatTensor([1.1, 2.3]), requires_grad=True)
out = Variable(torch.IntTensor([1, 2]), requires_grad=True)
self.checkScript(script, [x], optimize=True, outputs=[out], func='to_int')
def test_str_cast(self):
@torch.jit.script
def to_str(x):
# type: (int) -> str
return str((x, x))
self.assertEqual("(1, 1)", to_str(1))
def test_int_cast(self):
@torch.jit.script
def to_int(x):
# type: (str) -> int
return int(x)
self.assertEqual(5, to_int('5'))
self.assertEqual(-5, to_int('-5'))
self.assertEqual(2147483647, to_int('2147483647'))
self.assertEqual(-2147483648, to_int('-2147483648'))
with self.assertRaisesRegex(RuntimeError, "invalid literal for int()"):
to_int('0x20')
with self.assertRaisesRegex(RuntimeError, "invalid literal for int()"):
to_int('0b0001')
def test_python_frontend(self):
def fn(x, y, z):
q = None
q = x + y - z.sigmoid()
print(q)
w = -z
if not x and not y and z:
m = x if not z else y
while x < y > z:
q = x
assert 1 == 1, "hello"
return x
ast = torch.jit.frontend.get_jit_def(fn, fn.__name__)
self.assertExpected(str(ast))
def test_python_frontend_source_range(self):
def fn():
raise Exception("hello")
ast = torch.jit.frontend.get_jit_def(fn, fn.__name__)
FileCheck().check("SourceRange at:") \
.check("def fn():") \
.check("~~~~~~~~~") \
.check('raise Exception("hello")') \
.check('~~~~~~~~~~~~~~~~~ <--- HERE') \
.run(str(ast.range()))
def test_python_frontend_py3(self):
def fn():
raise Exception("hello")
ast = torch.jit.frontend.get_jit_def(fn, fn.__name__)
self.assertExpected(str(ast))
def _make_scalar_vars(self, arr, dtype):
return [torch.tensor(val, dtype=dtype) for val in arr]
def test_string_print(self):
def func(a):
print(a, "a" 'b' '''c''' """d""", 2, 1.5)
return a
inputs = self._make_scalar_vars([1], torch.int64)
self.checkScript(func, inputs, capture_output=True)
def test_while(self):
def func(a, b, max):
while bool(a < max):
a = a + 1
b = b + 1
c = a + b
return c
inputs = self._make_scalar_vars([1, 1, 10], torch.int64)
self.checkScript(func, inputs, optimize=True)
def test_fibb(self):
def func(lim):
first = 1
second = 1
i = 1
somenum = 5
dontmutateme = 3
third = 0
while bool(i < lim):
third = first + second
first = second
second = third
j = 0
while j < 10:
somenum = somenum * 2
j = j + 1
i = i + j
i = i + dontmutateme
st = second + third
fs = first + second
return third, st, fs
inputs = self._make_scalar_vars([10], torch.int64)
self.checkScript(func, inputs, optimize=True)
def test_fibb_totally_better(self):
def fib(x):
# type: (int) -> int
prev = 1
v = 1
for i in range(0, x):
save = v
v = v + prev
prev = save
return v
self.checkScript(fib, (10,))
def test_if(self):
def func(a, b):
# type: (int, int) -> int
d = 3
if bool(a > 10):
a = 3 + d
else:
b = 3 + d
d = 4
c = a + b
return c
inputs = self._make_scalar_vars([1, -1], torch.int64)
self.checkScript(func, inputs, optimize=True)
def test_if_for_in_range(self):
def func(a, b):
# type: (int, int) -> int
d = 3
for _ in range(20):
if bool(a > 10):
a = 3 + d
else:
b = 3 + d
d = 4
c = a + b
return d
inputs = self._make_scalar_vars([1, -1], torch.int64)
self.checkScript(func, inputs, optimize=True)
def test_if_noelse(self):
def func(a, b):
if bool(a > 10):
a = 3 + b
c = a + b
return c
inputs = self._make_scalar_vars([-1, 1], torch.int64)
self.checkScript(func, inputs, optimize=True)
def test_if_is_none_dispatch(self):
@torch.jit.script
def test_lhs_none_rhs_none():
# LHS, RHS both alwaysNone, dispatch always_none_branch
# only emit one prim::Constant
if None is None:
return 1
elif None is not None:
return 2
else:
return 3
self.assertTrue(str(test_lhs_none_rhs_none.graph).count(': int = prim::Constant') == 1)
@torch.jit.script
def test_lhs_opt_rhs_none(lhs=None):
# type: (Optional[Tensor]) -> int
# LHS maybeNone: emit normal if stmt that contains 3 constants
if lhs is not None:
return 2
elif lhs is None:
return 1
else:
return 3
self.assertTrue(str(test_lhs_opt_rhs_none.graph).count(': int = prim::Constant') == 3)
@torch.jit.script
def test_lhs_none_rhs_opt(rhs=None):
# type: (Optional[Tensor]) -> int
# RHS maybeNone, emit normal if stmt that contains 3 constants
if None is rhs:
return 1
elif None is not rhs:
return 2
else:
return 3
self.assertTrue(str(test_lhs_opt_rhs_none.graph).count(': int = prim::Constant') == 3)
@torch.jit.script
def test_lhs_never_rhs_none(lhs):
# LHS neverNone, RHS alwaysNone dispatch never_none_branch
# only emit one prim::Constant
if lhs is None:
return 1
elif lhs is not None:
return 2
else:
return 3
self.assertTrue(str(test_lhs_never_rhs_none.graph).count(': int = prim::Constant') == 1)
@torch.jit.script
def test_lhs_none_rhs_never(rhs):
# LHS alwaysNone, RHS neverNone dispatch never_none_branch
# only emit one prim::Constant
if None is rhs:
return 1
elif None is not rhs:
return 2
else:
return 3
self.assertTrue(str(test_lhs_none_rhs_never.graph).count(': int = prim::Constant') == 1)
@torch.jit.script
def test_bool_arith_and(lhs):
if lhs is None and lhs is not None:
return 1
else:
return 2
self.assertEqual(test_bool_arith_and(torch.zeros(3)), 2)
self.assertTrue(str(test_bool_arith_and.graph).count('if') == 0)
@torch.jit.script
def test_bool_arith_or(lhs):
if lhs is None or lhs is not None:
return 1
else:
return 2
self.assertEqual(test_bool_arith_or(torch.zeros(3)), 1)
self.assertTrue(str(test_bool_arith_or.graph).count('if') == 0)
@torch.jit.script
def test_bool_arith_not(lhs):
if not (lhs is None):
return 1
else:
return 2
self.assertEqual(test_bool_arith_not(torch.zeros(3)), 1)
self.assertTrue(str(test_bool_arith_not.graph).count('if') == 0)
def test_conditional_casting(self):
def test_bool_cast_tensor(x):
if x:
return 1
else:
return 0
for make_one_dim in [True, False]:
for inp_val in [0.1, 0.0, -0.0, -0.1, -1, 0, 1]:
inp_val = [inp_val] if make_one_dim else inp_val
self.checkScript(test_bool_cast_tensor, (torch.tensor(inp_val),))
self.checkScriptRaisesRegex(test_bool_cast_tensor, (torch.tensor([1, 1]),), Exception,
"Boolean value of Tensor with more than one value")
def test_not_cast(x):
if not x:
return 1
else:
return 0
self.checkScript(test_not_cast, (torch.tensor(1),))
self.checkScript(test_not_cast, (torch.tensor(0),))
with self.assertRaisesRegex(RuntimeError, r"Could not cast value of type Tuple\[Tensor, Tensor\]"): # noqa: W605
@torch.jit.script
def test_mult(x, y):
return not(x, y)
def test_cast_int(x):
# type: (int) -> int
if x:
return 1
else:
return 0
self.checkScript(test_cast_int, (1,))
self.checkScript(test_cast_int, (0,))
self.checkScript(test_cast_int, (-1,))
def test_cast_float(x):
# type: (float) -> int
if x:
return 1
else:
return 0
self.checkScript(test_cast_float, (1.,))
self.checkScript(test_cast_float, (0.,))
self.checkScript(test_cast_float, (-1.,))
with self.assertRaisesRegex(RuntimeError, r"Could not cast value of type Tuple\[int, int\] to bool"): # noqa: W605
@torch.jit.script
def test_bad_conditional(x):
if (1, 2): # noqa: F634
return
else:
return 0
def test_while_nonexistent_value(self):
with self.assertRaisesRegex(RuntimeError, "undefined value x"):
torch.jit.CompilationUnit('''
def test_while(a, b):
while bool(a < 10):
a = a + x
b = b + 1
return a + b
''')
def test_while_nonexistent_cond_value(self):
with self.assertRaisesRegex(RuntimeError, "undefined value x"):
torch.jit.CompilationUnit('''
def test_while(a, b):
while a < x:
a = a + 1
b = b + 1
return a + b
''')
@torch.jit.script
def test_ternary(x):
# type: (Optional[int]) -> int
x = x if x is not None else 2
return x
@torch.jit.script
def test_not_none(x):
# type: (Optional[int]) -> None
if x is not None:
print(x + 1)
@torch.jit.script
def test_and(x, y):
# type: (Optional[int], Optional[int]) -> None
if x is not None and y is not None:
print(x + y)
@torch.jit.script
def test_not(x, y):
# type: (Optional[int], Optional[int]) -> None
if not (x is not None and y is not None):
pass
else:
print(x + y)
@torch.jit.script
def test_bool_expression(x):
# type: (Optional[int]) -> None
if x is not None and x < 2:
print(x + 1)
@torch.jit.script
def test_nested_bool_expression(x, y):
# type: (Optional[int], Optional[int]) -> int
if x is not None and x < 2 and y is not None:
x = x + y
else:
x = 5
return x + 2
@torch.jit.script
def test_or(x, y):
# type: (Optional[int], Optional[int]) -> None
if y is None or x is None:
pass
else:
print(x + y)
# backwards compatibility
@torch.jit.script
def test_manual_unwrap_opt(x):
# type: (Optional[int]) -> int
if x is None:
x = 1
else:
x = torch.jit._unwrap_optional(x)
return x # noqa: T484
with self.assertRaisesRegex(RuntimeError, "Arguments for call are not valid"):
@torch.jit.script
def or_error(x, y):
# type: (Optional[int], Optional[int]) -> None
if x is None or y is None:
print(x + y) # noqa: T484
with self.assertRaisesRegex(RuntimeError, "Arguments for call are not valid"):
@torch.jit.script
def and_error(x, y):
# type: (Optional[int], Optional[int]) -> None
if x is None and y is None:
pass
else:
print(x + y) # noqa: T484
with self.assertRaisesRegex(RuntimeError, "Arguments for call are not valid"):
@torch.jit.script
def named_var(x):
# type: (Optional[int]) -> None
x_none = x is not None
if x_none:
print(x + 1) # noqa: T484
with self.assertRaisesRegex(RuntimeError, "Arguments for call are not valid"):
@torch.jit.script
def named_var_and(x, y):
# type: (Optional[int], Optional[int]) -> None
x_none = x is not None
if y is not None and x_none:
print(x + y) # noqa: T484
def test_assertion_optional_refinement(self):
@torch.jit.script
def test(x, y):
# type: (Optional[int], Optional[int]) -> int
assert x is not None and y is not None
return x + y
self.assertEqual(test(2, 2), 4)
with self.assertRaisesRegex(Exception, ""):
test(1, None)
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "the current version of Profiler doesn't profile/specialize Optionals")
def test_optional_tensor(self):
@torch.jit.script
def fn(x, y):
# type: (Optional[Tensor], int) -> int
if x is None:
return y
else:
return 0
res = fn(None, 1)
self.assertEqual(res, 1)
g = torch.jit.last_executed_optimized_graph()
first_input = next(g.inputs())
# check if input is disconnected
self.assertEqual(first_input.type().kind(), 'OptionalType')
self.assertEqual(first_input.uses(), [])
t = torch.ones(1)
res = fn(t, 1)
self.assertEqual(res, 0)
g = torch.jit.last_executed_optimized_graph()
self.assertEqual(next(g.inputs()).type().kind(), 'TensorType')
@torch.jit.script
def fn(x, y, b):
# type: (Optional[Tensor], Tensor, bool) -> Tensor
if b:
res = y
else:
res = torch.jit._unwrap_optional(x)
return res
t2 = torch.zeros(1)
res = fn(t, t2, True)
self.assertEqual(res, t2)
with self.assertRaisesRegex(RuntimeError, "Unwrapping null optional"):
res = fn(None, t2, False)
res = fn(None, t2, True)
g = torch.jit.last_executed_optimized_graph()
self.assertIn(next(g.outputs()).type().str(), ("Tensor", "Tensor(requires_grad=1)"))
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "the current version of Profiler doesn't profile/specialize Optionals")
def test_optional_list(self):
@torch.jit.script
def fn(x, y):
# type: (Optional[List[int]], int) -> int
if x is None:
return y
else:
res = 0
for d in x:
res += d
return res
res = fn(None, 1)
self.assertEqual(res, 1)
g = torch.jit.last_executed_optimized_graph()
first_input = next(g.inputs())
# check if input is disconnected
self.assertEqual(first_input.type().kind(), 'OptionalType')
self.assertEqual(first_input.uses(), [])
l = [2, 3]
res = fn(l, 1)
self.assertEqual(res, 5)
g = torch.jit.last_executed_optimized_graph()
self.assertEqual(next(g.inputs()).type().kind(), 'ListType')
@torch.jit.script
def fn(x, y, b):
# type: (Optional[List[int]], List[int], bool) -> List[int]
if b:
l = torch.jit._unwrap_optional(x)
else:
l = y
return l
l2 = [0, 1]
res = fn(l, l2, True)
self.assertEqual(res, l)
with self.assertRaisesRegex(RuntimeError, "Unwrapping null optional"):
res = fn(None, l2, True)
res = fn(None, l2, False)
g = torch.jit.last_executed_optimized_graph()
self.assertEqual(next(g.outputs()).type().str(), "int[]")
def test_alias_covariant_type_containers(self):
@torch.jit.script
def foo(x):
# type: (bool)
if x:
a = (None,)
else:
a = ([],)
return a
@torch.jit.script
def foo2(x, li):
# type: (bool, Tuple[Optional[List[Tensor]]])
if x:
li = (None,)
return li
def test_while_write_outer_then_read(self):
def func(a, b):
while bool(a < 10):
a = a + 1
b = a + 1
return a + b
inputs = self._make_scalar_vars([42, 1337], torch.int64)
self.checkScript(func, inputs, optimize=True)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_while_nest_if(self):
def func(a, b):
# type: (int, int) -> int
c = 0
while a < 10:
a = a + 1
b = b + 1
if a > b:
c = -a
else:
c = -b
return c + 1
inputs = self._make_scalar_vars([-1234, 4321], torch.int64)
self.checkScript(func, inputs, optimize=True)
def test_divmod(self):
def func_int(a, b):
# type: (int, int) -> Tuple[int, int]
return divmod(a, b)
def func_float(a, b):
# type: (float, float) -> Tuple[float, float]
return divmod(a, b)
def func_int_float(a, b):
# type: (int, float) -> Tuple[float, float]
return divmod(a, b)
def func_float_int(a, b):
# type: (float, int) -> Tuple[float, float]
return divmod(a, b)
def divmod_test_iterator(func, num, den):
for i in num:
for j in den:
self.checkScript(func, (i, j), frames_up=2)
num_int = [1024, -1024]
den_int = [10, -10]
num_float = [5.3, -5.3]
den_float = [2.0, -2.0]
divmod_test_iterator(func_int, num_int, den_int)
divmod_test_iterator(func_float, num_float, den_float)
divmod_test_iterator(func_int_float, num_int, den_float)
divmod_test_iterator(func_float_int, num_float, den_int)
with self.assertRaisesRegex(RuntimeError, "ZeroDivisionError: integer division or modulo by zero"):
cu = torch.jit.CompilationUnit(dedent(inspect.getsource(func_int)))
cu.func_int(1024, 0)
with self.assertRaisesRegex(RuntimeError, "ZeroDivisionError: float divmod()"):
cu = torch.jit.CompilationUnit(dedent(inspect.getsource(func_float)))
cu.func_float(5.3, 0.0)
with self.assertRaisesRegex(RuntimeError, "ZeroDivisionError: float divmod()"):
cu = torch.jit.CompilationUnit(dedent(inspect.getsource(func_int_float)))
cu.func_int_float(1024, 0.0)
with self.assertRaisesRegex(RuntimeError, "ZeroDivisionError: float divmod()"):
cu = torch.jit.CompilationUnit(dedent(inspect.getsource(func_float_int)))
cu.func_float_int(5.3, 0)
def test_math_ops(self):
def checkMathWrap(func_name, num_args=1, is_float=True, **args):
if is_float:
checkMath(func_name, num_args, True, **args)
checkMath(func_name, num_args, False, **args)
else:
checkMath(func_name, num_args, is_float, **args)
inf = float("inf")
NaN = float("nan")
mx_int = 2**31 - 1
mn_int = -2**31
float_vals = ([inf, NaN, 0.0, 1.0, 2.2, -1.0, -0.0, -2.2, -inf, 1, 0, 2] +
[10.0 ** i for i in range(5)] + [-(10.0 ** i) for i in range(5)])
int_vals = list(range(-5, 5, 1)) + [mx_int + 5, mx_int * 2, mn_int - 5, mn_int * 2]
def checkMath(func_name, num_args, is_float=True, ret_type="float", debug=False, vals=None, args_type=None):
funcs_template = dedent('''
def func(a, b):
# type: {args_type} -> {ret_type}
return math.{func}({args})
''')
if num_args == 1:
args = "a"
elif num_args == 2:
args = "a, b"
else:
raise RuntimeError("Test doesn't support more than 2 arguments")
if args_type is None:
args_type = "(float, float)" if is_float else "(int, int)"
funcs_str = funcs_template.format(func=func_name, args=args, args_type=args_type, ret_type=ret_type)
scope = {}
execWrapper(funcs_str, globals(), scope)
cu = torch.jit.CompilationUnit(funcs_str)
f_script = cu.func
f = scope['func']
if vals is None:
vals = float_vals if is_float else int_vals
vals = [(i, j) for i in vals for j in vals]
for a, b in vals:
res_python = None
res_script = None
try:
res_python = f(a, b)
except Exception as e:
res_python = e
try:
res_script = f_script(a, b)
except Exception as e:
res_script = e
if debug:
print("in: ", a, b)
print("out: ", res_python, res_script)
# We can't use assertEqual because of a couple of differences:
# 1. nan == nan should return true
# 2. When python functions throw an exception, we usually want to silently ignore them.
# (ie: We want to return `nan` for math.sqrt(-5))
if res_python != res_script:
if isinstance(res_python, Exception):
continue
if type(res_python) == type(res_script):
if isinstance(res_python, tuple) and (math.isnan(res_python[0]) == math.isnan(res_script[0])):
continue
if isinstance(res_python, float) and math.isnan(res_python) and math.isnan(res_script):
continue
msg = ("Failed on {func_name} with inputs {a} {b}. Python: {res_python}, Script: {res_script}"
.format(func_name=func_name, a=a, b=b, res_python=res_python, res_script=res_script))
self.assertEqual(res_python, res_script, msg=msg, atol=(1e-4) * max(abs(res_python), res_script), rtol=0)
unary_float_ops = ["log", "log1p", "log10", "exp", "sqrt", "gamma", "lgamma", "erf",
"erfc", "expm1", "fabs", "acos", "asin", "atan", "cos", "sin", "tan",
"asinh", "atanh", "acosh", "sinh", "cosh", "tanh", "degrees", "radians"]
binary_float_ops = ["atan2", "fmod", "copysign"]
for op in unary_float_ops:
checkMathWrap(op, 1)
for op in binary_float_ops:
checkMathWrap(op, 2)
checkMath("modf", 1, ret_type="Tuple[float, float]")
checkMath("frexp", 1, ret_type="Tuple[float, int]")
checkMath("isnan", 1, ret_type="bool")
checkMath("isinf", 1, ret_type="bool")
checkMath("ldexp", 2, is_float=False, ret_type="float", args_type="(float, int)",
vals=[(i, j) for i in float_vals for j in range(-10, 10)])
checkMath("pow", 2, is_float=False, ret_type="float")
checkMath("pow", 2, is_float=True, ret_type="float")
checkMathWrap("floor", ret_type="int")
checkMathWrap("ceil", ret_type="int")
checkMathWrap("gcd", 2, is_float=False, ret_type="int")
checkMath("isfinite", 1, ret_type="bool")
checkMathWrap("remainder", 2)
checkMathWrap("factorial", 1, is_float=False, ret_type="int", vals=[(i, 0) for i in range(-2, 10)])
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_if_nest_while(self):
def func(a, b):
# type: (int, int) -> int
c = 0
if a > b:
while a > b:
b = b + 1
c = -b
return c
inputs = self._make_scalar_vars([4321, 1234], torch.int64)
self.checkScript(func, inputs)
def test_script_optional_none(self):
def none_stmt(x):
output = None
output = x
return output
def none_args(x):
# type: (Optional[Tensor]) -> Optional[Tensor]
return None
self.checkScript(none_stmt, [torch.arange(0, 2)], optimize=True)
self.checkScript(none_args, [None], optimize=True)
# test undefined tensor None as default param
def test_script_optional_tensor_none(x=None):
# type: (Optional[Tensor]) -> Tensor
res = torch.zeros(1, dtype=torch.int8)
if x is None:
res = res + 1
else:
res = x
return res
fn = test_script_optional_tensor_none
scripted_fn = torch.jit.script(fn)
self.assertEqual(fn(), scripted_fn())
self.assertEqual(fn(torch.zeros(1)), scripted_fn(torch.zeros(1)))
# test typical None as default param
def test_script_optional_other_none(x=None):
# type: (Optional[float]) -> float
res = 2.0
if x is None:
res = res + 1.0
else:
res = x
return res
fn = test_script_optional_other_none
scripted_fn = torch.jit.script(fn)
self.assertEqual(fn(), scripted_fn())
self.assertEqual(fn(1.0), scripted_fn(1.0))
def test_script_clamp_none(self):
def test_script_clamp_max_none(x):
return torch.clamp(x, min=2, max=None)
def test_script_clamp_max(x):
return torch.clamp(x, max=2)
def test_script_clamp_min_none(x):
return torch.clamp(x, min=None, max=2)
def test_script_clamp_min(x):
return torch.clamp(x, min=2)
input = [torch.arange(0, 3)]
self.checkScript(test_script_clamp_max_none, input, optimize=True)
self.checkScript(test_script_clamp_max, input, optimize=True)
self.checkScript(test_script_clamp_min_none, input, optimize=True)
self.checkScript(test_script_clamp_min, input, optimize=True)
def test_script_bool_constant(self):
def test_script_bool_constant():
a = True
return a
self.checkScript(test_script_bool_constant, [])
def test_ternary(self):
def func(a, b):
c = 3
c = a + b if bool(a > 3) else b
return c
inputs_true = self._make_scalar_vars([5, 2], torch.int64)
inputs_false = self._make_scalar_vars([1, 0], torch.int64)
self.checkScript(func, inputs_true, optimize=True)
self.checkScript(func, inputs_false, optimize=True)
def test_ternary_module_type_hint(self):
class M1(torch.nn.Module):
def forward(self) -> Any:
return 'out' if self.training else {}
class M2(torch.nn.Module):
def forward(self) -> Any:
out: Any = 'out' if self.training else {}
return out
class M3(torch.nn.Module):
def forward(self) -> Optional[int]:
return None if self.training else 1
for module in [M1, M2, M3]:
self.checkModule(module().train(), ())
self.checkModule(module().eval(), ())
def test_ternary_static_if(self):
# Test for True branch when condition variable
# is annotated as Final
class M1(torch.nn.Module):
flag: torch.jit.Final[bool]
def __init__(self):
super().__init__()
self.flag = True
def forward(self) -> torch.Tensor:
return torch.ones(3) if self.flag else {}
# Test for True branch when condition variable
# is annotated as Final
class M2(torch.nn.Module):
flag: torch.jit.Final[bool]
def __init__(self):
super().__init__()
self.flag = False
def forward(self) -> torch.Tensor:
return {} if self.flag else torch.ones(3)
model1 = M1()
model2 = M2()
script_model_1 = torch.jit.script(model1)
script_model_2 = torch.jit.script(model2)
self.assertEqual(model1.forward(), script_model_1.forward())
self.assertEqual(model2.forward(), script_model_2.forward())
def test_ternary_right_associative(self):
def plus_123(x: int):
return x + 1 if x == 1 else x + 2 if x == 2 else x + 3
self.checkScript(plus_123, (1,))
self.checkScript(plus_123, (2,))
self.checkScript(plus_123, (3,))
def test_print(self):
def func(x, y):
q = (x + y).sigmoid()
print(q, 1, 2, [1, 2], [1.0, 2.0])
w = -q
return w * w
x = torch.arange(4., requires_grad=True)
y = torch.arange(0., 8, 2, requires_grad=True)
self.checkScript(func, [x, y], optimize=True, capture_output=True)
def test_format(self):
def func(x):
print("{}, I'm a {}".format("Hello", "test"))
print("format blank".format())
print("stuff before {}".format("hi"))
print("{} stuff after".format("hi"))
return x + 1
x = torch.arange(4., requires_grad=True)
self.checkScript(func, [x], optimize=True, capture_output=True)
def test_logical_short_circuit(self):
@torch.jit.script
def testNoThrows(t):
c1 = 1
if (False and bool(t[1])) or (True or bool(t[1])):
c1 = 0
return c1
FileCheck().check_not("prim::If").run(testNoThrows.graph)
self.assertEqual(0, testNoThrows(torch.randn(0)))
self.assertEqual(0, testNoThrows(torch.randn([2, 3])))
@torch.jit.script
def throwsOr(t):
c0 = False or bool(t[1])
print(c0)
@torch.jit.script
def throwsAnd(t):
c0 = True and bool(t[1])
print(c0)
t = torch.randn(0)
with self.assertRaisesRegex(RuntimeError, "index 1 out of range for tensor of size"):
throwsOr(t)
with self.assertRaisesRegex(RuntimeError, "index 1 out of range for tensor of size"):
throwsAnd(t)
def test_type_cast(self):
template = dedent('''
def func(v):
# type: ({from_type}) -> {to_type}
return {to_type}(v)
''')
def check_cast(from_type, to_type, value, raises=False):
code = template.format(from_type=from_type, to_type=to_type)
self.checkScript(code, (value,))
check_cast('int', 'float', 1)
check_cast('int', 'bool', 1)
check_cast('int', 'bool', 0)
check_cast('float', 'int', 1.)
check_cast('float', 'bool', 1.)
check_cast('float', 'bool', 0.)
check_cast('bool', 'int', True)
check_cast('bool', 'float', True)
def test_multiple_assignment(self):
def outer_func(x):
return x * 2, x + 2
@torch.jit.script
def func(x):
y, z = outer_func(x)
return y + z
x = torch.arange(4)
self.assertEqual(func(x), x * 2 + x + 2)
def test_literals(self):
def func(a):
return a.view(size=[1, 2, 3])
a = torch.randn(6)
self.checkScript(func, [a], optimize=True)
def test_return(self):
def no_return(a):
a + 1
def void_return(a):
return
def one_return(a):
return a + 1.
def multiple_returns(a):
return a * 1., a * 2., a * 3.
a = torch.randn(1, dtype=torch.float)
self.checkScript(no_return, [a], optimize=True)
self.checkScript(void_return, [a], optimize=True)
self.checkScript(one_return, [a], optimize=True)
self.checkScript(multiple_returns, [a], optimize=True)
with self.assertRaisesRegex(RuntimeError, "does not return along all paths"):
torch.jit.CompilationUnit('''
def no_return_bad_annotation(a):
# type: (Tensor) -> Tensor
a + 1
''')
def test_error(self):
@torch.jit.script
def foo(a):
return a.t()
s = Variable(torch.rand(5, 5, 5))
# XXX: this should stay quiet in stay propagation and only fail in the interpreter
with self.assertRaisesRegex(RuntimeError, "failed in the TorchScript interpreter"):
foo(s)
@torch.jit.script
def bar(c, b):
return c + b
with self.assertRaisesRegex(RuntimeError, "failed in the TorchScript interpreter"):
bar(Variable(torch.rand(10), requires_grad=True), Variable(torch.rand(9), requires_grad=True))
def test_error_stacktrace(self):
@torch.jit.script
def baz(c, b):
return c + b
@torch.jit.script
def foo(c, b):
return baz(c, b)
@torch.jit.script
def bar(c, b):
return foo(c, b)
with self.assertRaises(RuntimeError) as cm:
bar(torch.rand(10), torch.rand(9))
FileCheck().check("The following operation failed in the TorchScript interpreter") \
.check("Traceback") \
.check("in foo").check("in baz").run(str(cm.exception))
def test_error_stacktrace_interface(self):
@torch.jit.script
def baz(c, b):
return c + b
@torch.jit.script
def foo(c, b):
return baz(c, b)
@torch.jit.script
def bar(c, b):
return foo(c, b)
@torch.jit.script
class Bar(object):
def one(self, x, y):
return bar(x, y)
@torch.jit.interface
class IFace(object):
def one(self, x, y):
# type: (Tensor, Tensor) -> Tensor
pass
make_global(IFace)
@torch.jit.script
def as_interface(x):
# type: (IFace) -> IFace
return x
f = as_interface(Bar())
with self.assertRaises(RuntimeError) as cm:
x = f.one(torch.rand(10), torch.rand(9))
bar(torch.rand(10), torch.rand(9))
FileCheck().check("The following operation failed in the TorchScript interpreter") \
.check("Traceback") \
.check("in foo").check("in baz").run(str(cm.exception))
def test_operator_precedence(self):
def double(x):
# type: (int) -> int
return 2 * x
def complicated_arithmetic_operation():
# TODO we need to test exponent operator '**' and bitwise not
# operator '~' once they are properly supported.
list = [0, 1, 2, 3]
result = list[1:3][0] + double(4) + (-3 + 8) * 6 // 2 % 4 << 2 + 1 >> 1 | 23 & 16 + 3 ^ 4
return result
self.checkScript(complicated_arithmetic_operation, ())
def test_in_operator_with_two_strings(self):
def fn() -> bool:
return "a" in "abcd"
self.checkScript(fn, ())
def test_bitwise_ops(self):
def int_test():
return 2 & 3, 2 ^ 3, 2 | 3, 2 << 3, 2 >> 3
self.checkScript(int_test, ())
def bool_test(x, y):
# type: (bool, bool) -> Tuple[bool, bool, bool]
return x & y, x ^ y, x | y
self.checkScript(bool_test, (True, False))
self.checkScript(bool_test, (True, True))
def tensor_test(x, y):
return x & y, x ^ y, x | y
def tensor_with_int_test(x, y):
# type: (Tensor, int) -> Tuple[Tensor, Tensor]
return x << y, x >> y
x = torch.tensor(2)
y = torch.tensor(3)
self.checkScript(tensor_test, (x, y))
self.checkScript(tensor_with_int_test, (x, 2))
def not_test(x):
return ~x
self.checkScript(not_test, (torch.tensor([2, 4]), ))
def test_all(self):
@torch.jit.script
def test_all_tensor(x):
return all(x)
self.assertFalse(test_all_tensor(torch.tensor([1, 0, 3], dtype=torch.uint8)))
self.assertTrue(test_all_tensor(torch.tensor([3.14, 3, 99], dtype=torch.uint8)))
self.assertTrue(test_all_tensor(torch.tensor([True, True], dtype=torch.uint8)))
self.assertFalse(test_all_tensor(torch.tensor([True, False], dtype=torch.uint8)))
@torch.jit.script
def test_all_bool_list(x):
# type: (List[bool]) -> bool
return all(x)
self.assertTrue(test_all_bool_list([True, True]))
self.assertTrue(test_all_bool_list([True, 1]))
self.assertFalse(test_all_bool_list([True, False]))
self.assertFalse(test_all_bool_list([True, 0]))
self.assertFalse(test_all_bool_list([False, 0]))
self.assertTrue(test_all_bool_list([]))
@torch.jit.script
def test_all_int_list(x):
# type: (List[int]) -> bool
return all(x)
self.assertTrue(test_all_int_list([3, 6]))
self.assertFalse(test_all_int_list([2, 0]))
@torch.jit.script
def test_all_float_list(x):
# type: (List[float]) -> bool
return all(x)
self.assertTrue(test_all_float_list([3.14, 8.1]))
self.assertFalse(test_all_float_list([3.14, 0, 8.9]))
def test_number_math(self):
ops_template = dedent('''
def func():
return {scalar1} {op} {scalar2}
''')
ops = ['+', '-', '*', '%', '<', '<=', '>', '>=', '==', '!=', '//']
funcs_template = dedent('''
def func():
return {func}({scalar1}, {scalar2})
''')
funcs = ['min', 'max']
scalars = ['7', '2', '3', '-3', '3.14', '0.125', '-0.5', '2.0', '-2.0']
scalar_pairs = [(scalar1, scalar2) for scalar1 in scalars for scalar2 in scalars]
def run_test(code):
scope = {}
execWrapper(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
self.assertEqual(cu.func(), scope['func']())
for scalar1, scalar2 in scalar_pairs:
for op in ops:
code = ops_template.format(op=op, scalar1=scalar1, scalar2=scalar2)
run_test(code)
for func in funcs:
code = funcs_template.format(func=func, scalar1=scalar1, scalar2=scalar2)
run_test(code)
# test Scalar overloads
for scalar1, scalar2 in scalar_pairs:
item1 = 'torch.tensor(' + scalar1 + ').item()'
item2 = 'torch.tensor(' + scalar2 + ').item()'
for op in ops:
code = ops_template.format(op=op, scalar1=item1, scalar2=scalar2)
run_test(code)
code = ops_template.format(op=op, scalar1=scalar1, scalar2=item2)
run_test(code)
code = ops_template.format(op=op, scalar1=item1, scalar2=item2)
run_test(code)
for func in funcs:
code = funcs_template.format(func=func, scalar1=item1, scalar2=scalar2)
run_test(code)
code = funcs_template.format(func=func, scalar1=scalar1, scalar2=item2)
run_test(code)
code = funcs_template.format(func=func, scalar1=item1, scalar2=item2)
run_test(code)
def test_number_abs(self):
def func1(x):
# type: (float) -> float
return abs(x)
def func2(x):
# type: (int) -> int
return abs(x)
def func3(x):
return abs(x)
self.checkScript(func1, (-3.14,))
self.checkScript(func1, (3.14,))
self.checkScript(func2, (-10,))
self.checkScript(func2, (10,))
self.checkScript(func3, (torch.tensor([-5, -10, -20]),))
self.checkScript(func3, (torch.tensor([5, 10, 20]),))
self.checkScript(func3, (torch.tensor([-5, 10, -20]),))
def test_number_div(self):
self.assertEqual(div_int_future(), torch.jit.script(div_int_future)())
self.checkScript(div_float_future, ())
self.checkScript(div_int_nofuture, ())
self.checkScript(div_float_nofuture, ())
# Testing bitwise shorthand aug assignment
def test_bool_augassign_bitwise_or(self):
def func(a: bool, b: bool) -> bool:
a |= b
return a
self.checkScript(func, (True, False), optimize=True)
self.checkScript(func, (True, True), optimize=True)
self.checkScript(func, (False, False), optimize=True)
self.checkScript(func, (False, True), optimize=True)
def test_bool_augassign_bitwise_and(self):
def func(a: bool, b: bool) -> bool:
a &= b
return a
self.checkScript(func, (True, False), optimize=True)
self.checkScript(func, (True, True), optimize=True)
self.checkScript(func, (False, False), optimize=True)
self.checkScript(func, (False, True), optimize=True)
def test_bool_augassign_bitwise_xor(self):
def func(a: bool, b: bool) -> bool:
a ^= b
return a
self.checkScript(func, (True, False), optimize=True)
self.checkScript(func, (True, True), optimize=True)
self.checkScript(func, (False, False), optimize=True)
self.checkScript(func, (False, True), optimize=True)
def test_number_augassign_bitwise_lshift(self):
def func() -> int:
z = 8
z <<= 2
return z
self.checkScript(func, (), optimize=True)
def test_number_augassign_bitwise_rshift(self):
def func() -> int:
z = 8
z >>= 2
return z
self.checkScript(func, (), optimize=True)
def test_number_augassign_bitwise_pow(self):
def func() -> float:
z = 8
z **= 2
return z
self.checkScript(func, (), optimize=True)
def test_number_augassign(self):
def func():
z = 1
z += 2
return z
self.checkScript(func, (), optimize=True)
def test_nested_select_assign(self):
class SubSubModule(torch.nn.Module):
def __init__(self):
super(SubSubModule, self).__init__()
self.abc = 11
def forward(self, x):
return self.abc
class SubModule(torch.nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.a = 11
self.nested = SubSubModule()
def forward(self, x):
return self.a
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.sub = SubModule()
self.hi = 1
def forward(self):
self.hi = 5
self.sub.a = 1
self.sub.nested.abc = 5
return self.sub.a * 20 + self.sub.nested.abc * 3 + self.hi
self.checkModule(TestModule(), ())
def test_number_neg(self):
# int -> int
def func1():
return -8
# float -> float
def func2():
return -3.14
self.checkScript(func1, (), optimize=True)
self.checkScript(func2, (), optimize=True)
def test_compare_two_bool_inputs(self):
def compare_eq(a: bool, b: bool):
return a == b
def compare_ne(a: bool, b: bool):
return a != b
scripted_fn_eq = torch.jit.script(compare_eq)
scripted_fn_ne = torch.jit.script(compare_ne)
self.assertEqual(scripted_fn_eq(True, False), compare_eq(True, False))
self.assertEqual(scripted_fn_eq(False, True), compare_eq(False, True))
self.assertEqual(scripted_fn_eq(True, True), compare_eq(True, True))
self.assertEqual(scripted_fn_eq(False, False), compare_eq(False, False))
self.assertEqual(scripted_fn_ne(True, False), compare_ne(True, False))
self.assertEqual(scripted_fn_ne(False, True), compare_ne(False, True))
self.assertEqual(scripted_fn_ne(True, True), compare_ne(True, True))
self.assertEqual(scripted_fn_ne(False, False), compare_ne(False, False))
def _test_tensor_number_math(self, device='cpu'):
template = dedent('''
def func(t):
return {lhs} {op} {rhs}
''')
def test(op, tensor, const, swap_args, template=template):
args = ('t', const)
if swap_args:
args = (const, 't')
code = template.format(lhs=args[0], rhs=args[1], op=op)
scope = {}
execWrapper(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
message = 'with code `{} {} {}` and t={}'.format(args[0], op, args[1], tensor)
res1 = cu.func(tensor)
res2 = scope['func'](tensor)
self.assertEqual(res1, res2, msg=message + "\nres1=" + str(res1) + "\nres2=" + str(res2))
self.assertEqual(res1.dtype, res2.dtype, msg=message + "\nres1=" + str(res1) + "\nres2=" + str(res2))
var_int = [2, -2]
var_float = [1.4321, -1.2]
ops = ['+', '-', '*', '%', '<', '<=', '>', '>=', '==', '!=', '/']
float_tensor = torch.randn(5, 5, device=device)
double_tensor = torch.randn(5, 5, dtype=torch.double, device=device)
long_tensor = torch.randint(-5, 5, (5, 5), dtype=torch.long, device=device)
long_tensor[long_tensor == 0] = 2
tensors = [float_tensor, double_tensor, long_tensor]
consts = var_int + var_float
for op, tensor, const, swap_args in product(ops, tensors, consts, [True, False]):
# FIXME: things like 2 / long_tensor are not implemented correctly
# Look in torch/_tensor.py to see how pytorch implements it.
if op == '/' and tensor.data_ptr() == long_tensor.data_ptr():
continue
# % operator does not take: const % tensor
if op == '%' and swap_args is True:
continue
test(op, tensor, const, swap_args)
def test_tensor_number_math(self):
self._test_tensor_number_math()
def test_torch_tensor_bad_input(self):
with self.assertRaisesRegex(RuntimeError, "must be of ints, floats, "
"or bools, got None"):
@torch.jit.script
def test():
return torch.tensor([None])
test()
with self.assertRaisesRegex(RuntimeError, r"Empty lists default to List\[Tensor\]"):
@torch.jit.script
def tmp():
return torch.tensor([])
tmp()
@torch.jit.script
def foo():
return torch.tensor([[2, 2], [1]])
with self.assertRaisesRegex(RuntimeError, "Expected sequence of length"):
foo()
@suppress_warnings
def test_torch_tensor_as_tensor_empty_list(self):
tensor_template = dedent('''
def func():
empty_list = torch.jit.annotate(List[int], [])
ten1 = torch.{tensor_op}({input})
return ten1
''')
ops = ['tensor', 'as_tensor']
inputs = ['empty_list', '[empty_list, empty_list]', '[[[empty_list]]]']
for op in ops:
for inp in inputs:
code = tensor_template.format(tensor_op=op, input=inp)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
t1 = cu.func()
t2 = scope['func']()
if inp == 'empty_list':
# torchscript returns int tensor, python returns float tensor
self.assertNotEqual(t1.dtype, t2.dtype)
self.assertEqual(t1, t2, exact_dtype=False)
self.assertEqual(t1.device, t2.device)
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "Simple Executor doesn't have any shapes to propagate")
def test_tensor_as_tensor_shape_prop(self):
tensor_template = dedent('''
def func():
return torch.{tensor_op}({input})
''')
ops = ['tensor', 'as_tensor']
inputs = ['[1]', '[False]', '[2.5]', '0.5', '1', 'False', '[[1]]', 'torch.jit.annotate(List[List[int]], [])']
expected_shape = ["Long(*, device=cpu)", "Bool(*, device=cpu)",
"Double(*, device=cpu)", "Double(device=cpu)",
"Long(device=cpu)", "Bool(device=cpu)", "Long(*, *, device=cpu)"]
for op in ops:
for inp, expect in zip(inputs, expected_shape):
code = tensor_template.format(tensor_op=op, input=inp)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
torch._C._jit_pass_complete_shape_analysis(cu.func.graph, (), False)
FileCheck().check(expect).check("aten::{tensor_op}".format(tensor_op=op)).run(cu.func.graph)
@torch.jit.script
def test_dtype(inp_dtype: torch.dtype):
a = torch.tensor(1.0, dtype=torch.float, requires_grad=True)
return a, torch.tensor(1.0, dtype=inp_dtype)
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
g = test_dtype.graph_for(5, profile_and_replay=True)
# both should have completed shapes
FileCheck().check("Tensor = aten::tensor").check("Float(device=cpu) = prim::BailOut") \
.check("Tensor = aten::tensor").check("Half(device=cpu) = prim::BailOut").run(g)
else:
g = test_dtype.graph_for(5)
# first should have type set second should not
FileCheck().check("Float(requires_grad=1, device=cpu) = aten::tensor") \
.check("Tensor(requires_grad=0) = aten::tensor").run(g)
@torch.jit.script
def test_as_tensor_tensor_input(input):
a = torch.as_tensor(input, dtype=input.dtype)
return a, torch.as_tensor(input, dtype=torch.float)
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
g = test_as_tensor_tensor_input.graph_for(torch.ones(3, 4), profile_and_replay=True)
FileCheck().check("Tensor = aten::as_tensor").check("Float(3, 4) = prim::BailOut") \
.check("Tensor = aten::as_tensor").check("Float(3, 4) = prim::BailOut").run(g)
else:
g = test_as_tensor_tensor_input.graph_for(torch.ones(3, 4))
FileCheck().check("Tensor = aten::as_tensor").check("Float(*, *, requires_grad=0, device=cpu) = aten::as_tensor").run(g)
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "testing legacy behavior")
def test_tensor_requires_grad(self):
@torch.jit.script
def test(b):
# type: (bool) -> Tuple[Tensor, Tensor, Tensor]
a = torch.tensor(1., requires_grad=b)
b = torch.tensor(1., requires_grad=True)
c = torch.tensor(1., requires_grad=False)
return a, b, c
g = test.graph_for(True)
out = next(g.outputs())
out_inp = list(out.node().inputs())
self.assertTrue(out_inp[0].requires_grad())
self.assertTrue(out_inp[1].requires_grad())
self.assertFalse(out_inp[2].requires_grad())
def test_grad_from_script(self):
def test():
a = torch.tensor(2.5, requires_grad=True)
b = a * 2
return a, b
a, b = test()
b.backward()
a_script, b_script = torch.jit.script(test)()
b_script.backward()
self.assertEqual(a.grad, a_script.grad)
def test_torch_tensor_as_tensor(self):
tensor_template = dedent('''
def func():
li = {list_create}
ten1 = torch.{tensor_op}(li {options})
return ten1
''')
lists = ["2.5", "4", "True", "False", "[2]", "[-.5]", "[False, True, False]", "[2, 2]", "(1, 1)",
"torch.jit.annotate(List[List[int]], [])",
"torch.jit.annotate(List[int], [])", "[2.5, 2.5]", "[[2], [2]]", "[[-.5], [2.2]]", "[[False], [True]]"]
dtypes = ["", ", dtype=torch.float", ", dtype=torch.double", ", dtype=torch.half",
", dtype=torch.uint8", ", dtype=torch.int8", ", dtype=torch.short",
", dtype=torch.int", ", dtype=torch.long", ", dtype=torch.cfloat",
", dtype=torch.cdouble"]
ops = ['tensor', 'as_tensor']
devices = ['', ", device='cpu'"]
if RUN_CUDA:
devices.append(", device='cuda'")
option_pairs = [dtype + device for dtype in dtypes for device in devices]
for op in ops:
for li in lists:
for option in option_pairs:
# tensor from empty list is type float in python and annotated type in torchscript
if "annotate" in li and "dtype" not in option:
continue
# Skip unsigned tensor initializaton for signed values on 3.10
if sys.version_info[:2] >= (3, 10) and "torch.uint8" in option and "-" in li:
continue
code = tensor_template.format(list_create=li, tensor_op=op, options=option)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
t1 = cu.func()
t2 = scope['func']()
if t1.dtype == torch.float16: # equality NYI for half tensor
self.assertTrue(str(t1) == str(t2))
else:
self.assertEqual(t1, t2)
self.assertEqual(t1.dtype, t2.dtype)
self.assertEqual(t1.device, t2.device)
def test_as_tensor_tensor_input(input):
# type: (Tensor) -> Tuple[Tensor, Tensor, Tensor]
return torch.as_tensor(input, dtype=torch.cfloat), torch.as_tensor(input, dtype=torch.float), \
torch.as_tensor(input, dtype=torch.int32)
inp = torch.randn(3, 4, dtype=torch.cfloat)
self.checkScript(test_as_tensor_tensor_input, (inp,))
def test_torch_tensor_dtype(self):
def foo(s: float):
return torch.tensor(s), torch.tensor([s, s])
# need to clear function cache so we re run shape analysis
with set_default_dtype(torch.double):
self.assertEqual(torch.jit.script(foo)(1.), foo(1.), exact_dtype=True)
if GRAPH_EXECUTOR == ProfilingMode.LEGACY:
FileCheck().check("Double").check_same("aten::tensor").run(torch.jit.last_executed_optimized_graph())
with set_default_dtype(torch.float):
del torch.jit._state._jit_caching_layer[foo]
self.assertEqual(torch.jit.script(foo)(1.), foo(1.), exact_dtype=True)
if GRAPH_EXECUTOR == ProfilingMode.LEGACY:
FileCheck().check("Float").check_same("aten::tensor").run(torch.jit.last_executed_optimized_graph())
with set_default_dtype(torch.half):
del torch.jit._state._jit_caching_layer[foo]
self.assertEqual(torch.jit.script(foo)(1.), foo(1.), exact_dtype=True)
if GRAPH_EXECUTOR == ProfilingMode.LEGACY:
FileCheck().check("Half").check_same("aten::tensor").run(torch.jit.last_executed_optimized_graph())
def test_shape_analysis_grad_property(self):
@torch.jit.script
def foo(x):
return torch.sub(x, torch.tanh(x))
torch._C._jit_pass_complete_shape_analysis(foo.graph, (torch.tensor([0.39]),), False)
# requires_grad property shouldn't be accidentally set by shape analysis
self.assertTrue(foo.graph.findNode("aten::sub").output().requiresGrad() is None)
def test_empty_like_memory_format_bc(self):
def f(x):
# type: (Tensor) -> Tensor
return torch.zeros_like(x, memory_format=None)
scripted_f = torch.jit.script(f)
x = torch.rand(3, 4)
self.assertEqual(scripted_f(x), f(x))
def test_multiline_string_dedents(self):
def foo() -> None:
multiline_string_dedent_1 = """
This is a string dedent """
multiline_string_dedent_2 = """ This is a
string dedent """
multiline_string_dedent_3 = """
This is a string
dedent """
multiline_string_dedent_4 = """ This is a string dedent """
scripted_foo = torch.jit.script(foo)
self.assertEqual(scripted_foo(), foo())
def test_class_with_comment_at_lower_indentation(self):
class Foo(torch.nn.Module):
def forward(self, x):
x = torch.neg(x)
# This comment is at the wrong indent
return x
torch.jit.script(Foo())
# adapted from test in test_torch
def test_tensor_to(self):
template = dedent('''
def func(t):
cuda = "{cuda}"
device = "{device}"
non_blocking = {non_blocking}
return {to_str}
''')
def s(t, to_str, non_blocking=None, device=None, cuda=None):
device = device if device is not None else str(t.device)
non_blocking = non_blocking if non_blocking is not None else False
cuda = "cuda" if cuda is None else cuda
code = template.format(to_str=to_str, device=device, non_blocking=non_blocking, cuda=cuda)
scope = {}
cu = torch.jit.CompilationUnit(code)
return cu.func(t, profile_and_replay=True)
def test_copy_behavior(t, non_blocking=False):
self.assertIs(t, s(t, 't.to(t, non_blocking=non_blocking)', non_blocking))
self.assertIs(t, s(t, 't.to(t.dtype, non_blocking=non_blocking)', non_blocking))
self.assertIs(t, s(t, 't.to(torch.empty_like(t), non_blocking=non_blocking)', non_blocking))
self.assertIsNot(t, s(t, 't.to(t, non_blocking=non_blocking, copy=True)', non_blocking))
self.assertIsNot(t, s(t, 't.to(t.dtype, non_blocking=non_blocking, copy=True)', non_blocking))
self.assertIsNot(t, s(t, 't.to(torch.empty_like(t), non_blocking=non_blocking, copy=True)', non_blocking))
devices = [t.device]
if t.device.type == 'cuda':
if t.device.index == -1:
devices.append('cuda:{}'.format(torch.cuda.current_device()))
elif t.device.index == torch.cuda.current_device():
devices.append('cuda')
for device in devices:
self.assertIs(t, s(t, 't.to(device, non_blocking=non_blocking)', non_blocking, device))
self.assertIs(t, s(t, 't.to(device, t.dtype, non_blocking=non_blocking)', non_blocking, device))
self.assertIsNot(t, s(t, 't.to(device, non_blocking=non_blocking, copy=True)', non_blocking, device))
self.assertIsNot(t, s(t, 't.to(device, t.dtype, non_blocking=non_blocking, copy=True)',
non_blocking, device))
t = torch.tensor(5)
test_copy_behavior(t)
self.assertEqual(t.device, s(t, "t.to('cpu')").device)
self.assertEqual(t.device, s(t, "t.to('cpu', dtype=torch.float32)").device)
self.assertIs(torch.float32, s(t, "t.to('cpu', dtype=torch.float32)").dtype)
self.assertEqual(t.device, s(t, "t.to(torch.float32)").device)
self.assertIs(torch.float32, s(t, "t.to(dtype=torch.float32)").dtype)
self.assertEqual(t.data_ptr(), s(t, "t.to('cpu')").data_ptr())
self.assertEqual(t.data_ptr(), s(t, "t.to(dtype=t.dtype, device=t.device, copy=False)").data_ptr())
self.assertEqual(t.data_ptr(), s(t, "t.to('cpu', copy=False)").data_ptr())
self.assertNotEqual(t.data_ptr(), s(t, "t.to('cpu', copy=True)").data_ptr())
a = torch.tensor(5)
if torch.cuda.is_available():
for non_blocking in [True, False]:
for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:
b = torch.tensor(5., device=cuda)
test_copy_behavior(b, non_blocking)
self.assertEqual(b.device, s(b, "t.to(cuda, non_blocking=non_blocking).device", cuda=cuda))
self.assertEqual(a.device, s(b, "t.to('cpu', non_blocking=non_blocking).device"))
self.assertEqual(b.device, s(b, "t.to(cuda, non_blocking=non_blocking).device", cuda=cuda))
self.assertIs(torch.int32, s(b, "t.to('cpu', dtype=torch.int32, non_blocking=non_blocking)").dtype)
self.assertEqual(a.device, s(b, "t.to('cpu', dtype=torch.int32, non_blocking=non_blocking)").device)
self.assertIs(torch.int32, s(b, "t.to(dtype=torch.int32)").dtype)
self.assertEqual(b.device, s(b, "t.to(dtype=torch.int32)").device)
# Test AD: aten::to(Tensor self, int dtype, bool non_blocking, bool copy) -> Tensor
t = torch.tensor(5).float().requires_grad_()
out_ref = t.to(torch.float32)
out = s(t, "t.to(torch.float32)")
self.assertEqual(out_ref, out)
grad_ref = torch.autograd.grad(out_ref.sum(), t)
grad = torch.autograd.grad(out.sum(), t)
self.assertEqual(grad_ref, grad)
# Test AD: aten::to(Tensor self, Device? device, int? dtype, bool non_blocking, bool copy) -> Tensor
out_ref = t.to('cpu')
out = s(t, "t.to('cpu')")
self.assertEqual(out_ref, out)
grad_ref = torch.autograd.grad(out_ref.sum(), t)
grad = torch.autograd.grad(out.sum(), t)
self.assertEqual(grad_ref, grad)
# Test AD: aten::to(Tensor self, Tensor other, bool non_blocking, bool copy) -> Tensor
@torch.jit.script
def func2(t, t_ref):
return t.to(t_ref)
with disable_autodiff_subgraph_inlining():
t_ref = torch.tensor(4).double()
out_ref = t.to(t_ref)
out = func2(t, t_ref)
grad_ref = torch.autograd.grad(out_ref.sum(), t)
grad = torch.autograd.grad(out.sum(), t)
self.assertEqual(grad_ref, grad)
@unittest.skipIf(not RUN_CUDA, "No CUDA")
def test_tensor_number_math_cuda(self):
self._test_tensor_number_math(device='cuda')
def test_not(self):
# test not operator in python
# TODO: add more tests when bool conversions ready
def test_not_op(a):
return not bool(a > 1)
self.checkScript(test_not_op, (torch.tensor(2), ), optimize=True)
def test_is_isnot(self):
# test is and is not operator in python
template = dedent('''
def func():
# type: () -> bool
return {lhs} {op} {rhs}
''')
def test(op, args):
code = template.format(lhs=args[0], rhs=args[1], op=op)
scope = {}
execWrapper(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
self.assertEqual(
cu.func(),
scope['func'](),
msg="Failed with op: {}, lhs: {}, rhs: {}"
.format(op, args[0], args[1])
)
ops = ['is', 'is not']
type_literals = [True, False, None, [1, 1], 1, 2, .5, 1.5]
# do literals product to try any types combinations
for op, lhs, rhs in product(ops, type_literals, type_literals):
test(op, [lhs, rhs])
def test_isinstance_refinement(self):
@torch.jit.script
def foo(a):
# type: (Optional[int]) -> int
if isinstance(a, int):
return a + 3
else:
return 4
self.assertEqual(foo(4), 7)
self.assertEqual(foo(None), 4)
@torch.jit.script
def foo2(a, b):
# type: (Optional[int], Optional[int]) -> int
if not isinstance(a, int) or not isinstance(b, int):
return 0
else:
return a + b
self.assertEqual(foo2(3, 4), 7)
self.assertEqual(foo2(None, 4), 0)
self.assertEqual(foo2(4, None), 0)
@torch.jit.script
def any_refinement(a, b):
# type: (Any, Any) -> int
if isinstance(a, int) and isinstance(b, int):
return a + b
return 0
self.assertEqual(any_refinement(3, 4), 7)
self.assertEqual(any_refinement(3, "hi"), 0)
@torch.jit.script
def any_refinement2(a):
# type: (Any) -> Tensor
if isinstance(a, Tensor):
return a
return torch.tensor(3)
self.assertEqual(any_refinement2(3), torch.tensor(3))
self.assertEqual(any_refinement2(torch.tensor(5)), torch.tensor(5))
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.LEGACY, "bug persists in deprecated executor")
def test_unspecialized_any_binding(self):
# any binding will infer the type, if it infers
# a specialized tensor type `x` Dict type will fail isinstance check
@torch.jit.script
def foo(x: Any):
assert isinstance(x, Dict[str, torch.Tensor])
foo({"1": torch.tensor(3)})
with self.assertRaises(Exception):
foo(2)
def test_isinstance(self):
# test isinstance operator for static type checking
template = dedent('''
def func(x):
# type: ({type_hint}) -> bool
return isinstance(x, {typ})
''')
def test(inp, typ, type_hint):
code = template.format(typ=typ, type_hint=type_hint)
scope = {}
execWrapper(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
self.assertEqual(
cu.func(inp),
scope['func'](inp),
msg="Failed with typ: {}"
.format(typ)
)
inputs = [True, 1, 1.0, torch.tensor(1), [1, 2], (1.0,), [1, 2], 1]
type_literals = ['bool', 'int', 'float', 'torch.Tensor', 'list', 'tuple',
'(list, tuple)', '(int, float, bool)']
type_annotations = ['bool', 'int', 'float', 'Tensor', 'List[int]', 'Tuple[float]',
'List[int]', 'int']
# do zipping to try different types
for inp, typ, type_hint in zip(inputs, type_literals, type_annotations):
test(inp, typ, type_hint)
# test optional isinstance check
@torch.jit.script
def opt_func(x):
# type: (Optional[int]) -> bool
return isinstance(x, int)
self.assertTrue(opt_func(3))
self.assertFalse(opt_func(None))
def test_dropout_eval(self):
class ScriptedConv2d(torch.jit.ScriptModule):
def __init__(self, in_channels, out_channels, **kwargs):
super(ScriptedConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
@torch.jit.script_method
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
self.Conv2d_1a_3x3 = ScriptedConv2d(3, 32, kernel_size=3, stride=2)
@torch.jit.script_method
def forward(self, x):
x = self.Conv2d_1a_3x3(x)
return F.dropout(x, training=self.training)
class EagerConv2d(torch.nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(EagerConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
class EagerMod(torch.nn.Module):
def __init__(self):
super(EagerMod, self).__init__()
self.Conv2d_1a_3x3 = EagerConv2d(3, 32, kernel_size=3, stride=2)
def forward(self, x):
x = self.Conv2d_1a_3x3(x)
return F.dropout(x, training=self.training)
script_input = torch.rand(4, 3, 299, 299)
eager_input = script_input.clone()
with freeze_rng_state():
script_mod = ScriptMod()
script_mod.eval()
script_output = script_mod(script_input)
with freeze_rng_state():
eager_mod = EagerMod()
eager_mod.eval()
eager_output = eager_mod(eager_input)
self.assertEqual(script_output, eager_output)
with freeze_rng_state():
script_mod = ScriptMod()
script_mod.train()
script_output = script_mod(script_input)
with freeze_rng_state():
eager_mod = EagerMod()
eager_mod.train()
eager_output = eager_mod(eager_input)
self.assertEqual(script_output, eager_output)
def test_nested_breaks(self):
def no_bool_loop_outputs(g):
# testing that the "did exit" transform values are not loop block
# outputs (and thus not affecting one loop from another)
loops = g.findAllNodes("prim::Loop")
for loop in loops:
for out in loop.outputs():
self.assertTrue(out.type() != BoolType.get())
def test(y):
# type: (int)
ret = 0
tensor = torch.tensor(0)
while int(tensor.add_(1)) < 4:
if y == 1:
continue
for i in range(y):
continue
ret += 1
ret += 1
return ret, int(tensor)
self.assertEqual(torch.jit.script(test)(1), test(1))
self.assertEqual(torch.jit.script(test)(2), test(2))
no_bool_loop_outputs(torch.jit.script(test).graph)
def foo():
y = torch.tensor(0)
z = 0
while int(y.add_(1)) < 20:
if int(y) < 10:
for i in range(6):
if i == 3:
continue
else:
if i > 3:
break
z += 2
if int(y) == 18:
break
if int(y) == 15:
continue
z += 1
return int(y), z
no_bool_loop_outputs(torch.jit.script(foo).graph)
self.checkScript(foo, ())
def test_nested_two():
i = 0
k = 0
while i < 5:
for j in range(5):
k += 1
if j == 3:
continue
i += 1
k += 1
if i == 4:
break
return i, k
self.checkScript(test_nested_two, ())
no_bool_loop_outputs(torch.jit.script(test_nested_two).graph)
def test_breaks_continues(self):
def foo_continue(cond):
# type: (int)
j = 1
for i in range(5):
if i == cond:
continue
j += 1
return j
def foo_break(cond):
# type: (int)
j = 1
for i in range(5):
if i == cond:
break
j += 1
return j
for i in range(1, 4):
self.checkScript(foo_continue, (i,))
self.checkScript(foo_break, (i,))
def test_refine_outside_loop():
if 1 == 1:
x = None
else:
x = 1
i = 0
j = 0
while (x is None or torch.jit._unwrap_optional(x) > 3):
if i < 3:
if i < 3:
x = torch.jit.annotate(Optional[int], None)
i += 1
continue
x = 1
else:
x = 1 if x is None else x
x = x + 1
j = x + x
return x, j
self.checkScript(test_refine_outside_loop, ())
def assign_after_break(y):
# type: (int)
x = 0
for i in range(y):
x = y * 2 + i
break
x = 4
return x
self.checkScript(assign_after_break, (1,))
self.checkScript(assign_after_break, (2,))
self.checkScript(assign_after_break, (3,))
def assign_after_break_nested(y):
# type: (int)
x = 0
for i in range(y):
if y == 1:
x = 5
break
assert 1 == 2
else:
x = x + 1
break
assert 1 == 2
x = -30
assert 1 == 2
return x
self.checkScript(assign_after_break_nested, (1,))
self.checkScript(assign_after_break_nested, (2,))
self.checkScript(assign_after_break_nested, (3,))
def may_break(y):
# type: (int)
x = 0
for i in range(y):
if y == 1:
x = 5
else:
x = x + 1
break
x = -30
return x
self.checkScript(may_break, (1,))
self.checkScript(may_break, (2,))
self.checkScript(may_break, (3,))
def test(x, y):
# type: (int, int)
a = 1
while (x > 0):
if y == 3:
for i in range(y):
a += (1 % (i + 1))
x -= 1
if x == 3:
a = x * 3
break
if x < 3:
if x == 1:
a -= 2
x -= 1
break
a -= 1
x -= 3
return a, x
self.checkScript(test, (10, 3))
self.checkScript(test, (10, 2))
self.checkScript(test, (3, 2))
self.checkScript(test, (5, 3))
self.checkScript(test, (2, 3))
def test_delete_after_break(x):
# type: (int)
a = 1
b = 1
for i in range(x):
a = i * 3
break
b = i * 5
return a, b
self.checkScript(test_delete_after_break, (0,))
self.checkScript(test_delete_after_break, (1,))
def test_will_break_after_guard(x):
# type: (int)
a = 1
for i in range(x):
if i == 4:
a = 3
break
a -= 1
break
assert 1 == 2
a -= -100
return a
self.checkScript(test_will_break_after_guard, (0,))
self.checkScript(test_will_break_after_guard, (2,))
self.checkScript(test_will_break_after_guard, (4,))
def test_varexit(cond):
# type: (int)
m = 0
for i in range(3):
if cond == 2:
if cond == 2:
m = 2
break
k = 1
else:
k = 2
m += k
return m
# use of k tests the pathway where we have to insert unitialized
self.checkScript(test_varexit, (3,))
self.checkScript(test_varexit, (2,))
def test_break_true():
i = 0
while True:
i += 1
if i == 3:
break
while False:
i += 1
return i
self.checkScript(test_break_true, ())
def test_break_continue_error(self):
with self.assertRaisesRegex(RuntimeError, "Syntax"):
cu = torch.jit.CompilationUnit('''
def other_func(a):
break
''')
with self.assertRaisesRegex(RuntimeError, "Syntax"):
cu = torch.jit.CompilationUnit('''
def other_func(a):
for i in range(5):
def foo():
break
''')
with self.assertRaisesRegex(RuntimeError, "do not support break or continue inside"):
@torch.jit.script
def foo(x):
i = 0
for a in (1, "2", 1.5):
b = a
if x:
break
return b
def test_python_call(self):
def pyfunc(a):
return a * 3.0
cu = torch.jit.CompilationUnit('''
def other_func(a):
return a + a
def test_call_python(a):
b = pyfunc(a)
b = other_func(b)
i = 0
step = 1
while i < 10:
b = pyfunc(b)
if bool(b > 3.0):
b = pyfunc(b)
i = 11
return b
''')
inputs = self._make_scalar_vars([1], torch.float)
outputs = self._make_scalar_vars([54], torch.float)
self.assertEqual(cu.test_call_python(*inputs), outputs[0])
def test_python_call_failure(self):
with self.assertRaisesRegex(RuntimeError, "undefined value pyfunc2"):
def pyfunc(a):
return a * 3.0
cu = torch.jit.CompilationUnit('''
def other_func(a):
return a + a
def test_call_python(a):
b = pyfunc(a)
b = other_func(b)
i = 0
step = 1
while i < 10:
b = pyfunc2(b)
if b > 3.0:
b = pyfunc(b)
i = 11
return b
''')
inputs = self._make_scalar_vars([1], torch.float)
outputs = self._make_scalar_vars([54], torch.float)
self.assertEqual(cu.test_call_python(*inputs), outputs)
def test_type_call_in_script(self):
@torch.jit.script
def fn(x):
return type(x)
with self.assertRaisesRegex(RuntimeError, "value of type _TensorMeta"):
fn(torch.tensor(.5))
def test_python_call_annotation(self):
def pyfunc(a):
return a * 3.0
@torch.jit.script
def foo(a):
return pyfunc(a) + pyfunc(a)
inputs = self._make_scalar_vars([1], torch.float)
outputs = self._make_scalar_vars([6], torch.float)
self.assertEqual(foo(*inputs), outputs[0])
def test_python_call_annoytation_failure(self):
with self.assertRaisesRegex(RuntimeError, "undefined value pyfunc2"):
def pyfunc(a):
return a * 3.0
@torch.jit.script
def foo(a):
return pyfunc2(a) + pyfunc(a)
inputs = self._make_scalar_vars([1], torch.float)
outputs = self._make_scalar_vars([6], torch.float)
self.assertEqual(foo(*inputs), outputs[0])
def test_desugar_module(self):
import torch.nn.functional as F
def fn(x, slope):
a = torch.abs(x)
b = torch.nn.functional.prelu(x, slope)
c = F.prelu(x, slope)
return a, b, c
x = torch.arange(-3., 4)
slope = torch.tensor([0.5])
self.checkScript(fn, [x, slope], optimize=True)
def test_script_docstring(self):
@torch.jit.script
def with_docstring(x):
"""test str"""
y = x
"""y is the same as x"""
return y
self.assertEqual(with_docstring.__doc__, 'test str')
def test_script_method_docstring(self):
class A(torch.jit.ScriptModule):
@torch.jit.script_method
def with_docstring(self, x):
"""test str"""
y = x
"""y is the same as x"""
return y
a = A()
self.assertEqual(a.with_docstring.__doc__, 'test str')
def test_script_module(self):
class M1(torch.jit.ScriptModule):
def __init__(self):
super(M1, self).__init__()
self.weight = nn.Parameter(torch.randn(2))
@torch.jit.script_method
def forward(self, thing):
return self.weight + thing
class PModule(nn.Module):
def __init__(self):
super(PModule, self).__init__()
self.a = nn.Parameter(torch.randn(2, 3))
def forward(self, a):
return self.a.mm(a)
class M2(torch.jit.ScriptModule):
def __init__(self):
super(M2, self).__init__()
# test submodule
self.sub = M1()
self.sub2 = PModule()
# test parameters
self.weight = nn.Parameter(torch.randn(2, 3))
self.bias = nn.Parameter(torch.randn(2))
# test defining a method from a string
self.define("""
def hi(self, a):
return self.weight.mm(a)
""")
# test script methods
@torch.jit.script_method
def doit(self, input):
# test use of parameter
return self.weight.mm(input)
@torch.jit.script_method
def doit2(self, input):
return self.weight.mm(input)
@torch.jit.script_method
def forward(self, input):
a = self.doit(input)
b = self.doit2(input)
c = self.hi(input)
d = self.sub2(input)
return a + b + self.bias + self.sub(a) + c + d
with torch.jit.optimized_execution(False):
m2 = M2()
input = torch.randn(3, 2)
a = m2.weight.mm(input)
b = m2.weight.mm(input)
c = m2.weight.mm(input)
d = m2.sub2.a.mm(input)
ref = a + b + m2.bias + m2.sub.weight + a + c + d
self.assertEqual(ref, m2.forward(input))
m2.weight = nn.Parameter(torch.zeros_like(m2.weight))
m2.bias = nn.Parameter(torch.zeros_like(m2.bias))
m2.sub.weight = nn.Parameter(torch.zeros_like(m2.sub.weight))
m2.sub2.a.data.zero_()
self.assertEqual(torch.zeros(2, 2), m2.forward(torch.randn(3, 2)))
def test_irparser(self):
graph_str = """graph(%0 : Double(5, 5)):
# CHECK: aten::relu
%1 : Double(5, 5) = aten::relu(%0)
return (%1)
"""
FileCheck().run(graph_str, parse_ir(graph_str))
def test_parse_tensor_constants(self):
def foo():
return torch.zeros([4, 4])
foo_s = torch.jit.script(foo)
torch._C._jit_pass_constant_propagation(foo_s.graph)
g = str(foo_s.graph)
g_parsed = parse_ir(g, parse_tensor_constants=True)
self.assertEqual(str(canonical(g_parsed)), str(canonical(foo_s.graph)))
func = torch._C._create_function_from_graph("forward", g_parsed)
out_parsed = func()
out_func = foo()
# not checking data, just dtype, size etc
out_parsed[:] = 0
out_func[:] = 0
self.assertEqual(out_func, out_parsed)
with self.assertRaises(RuntimeError):
parse_ir(g, parse_tensor_constants=False)
def test_parse_nested_names(self):
g_str = """
graph(%x.1 : Tensor):
%3 : int = prim::Constant[value=1]()
%2 : int = prim::Constant[value=2]()
%hi.submod.value.5 : Tensor = aten::add(%x.1, %2, %3)
return (%hi.submod.value.5)
"""
g = parse_ir(g_str)
round_trip_g = parse_ir(str(g))
self.assertEqual(canonical(g), canonical(round_trip_g))
func1 = torch._C._create_function_from_graph("forward", g)
func2 = torch._C._create_function_from_graph("forward", round_trip_g)
self.assertEqual(func1(torch.ones([2])), func2(torch.ones([2])))
def test_is_after_use(self):
def sorted_input_use(g):
uses = list(next(g.inputs()).uses())
return sorted(uses, key=functools.cmp_to_key(type(uses[0]).isAfter))
@torch.jit.script
def foo(x):
a = x + 1
return (x, x, a)
uses_sorted = sorted_input_use(foo.graph)
# sorts last use to the end
self.assertFalse(uses_sorted[0].isAfter(uses_sorted[1]))
self.assertTrue(uses_sorted[0].user.kind() == "aten::add")
self.assertEqual(uses_sorted[1].offset, 0)
@torch.jit.script
def foo(x, cond: bool):
if cond:
return x + 3
else:
return x - 3
uses_sorted = sorted_input_use(foo.graph)
self.assertTrue(uses_sorted[0].user.kind() == "aten::add")
self.assertTrue(uses_sorted[1].user.kind() == "aten::sub")
@torch.jit.script
def foo(x, cond: bool, cond2: bool):
if cond:
return x + 3
elif cond2 :
return x - 3
return x / 3
graph1 = foo.graph
@torch.jit.script
def foo(x, cond: bool, cond2: bool):
if cond:
return x + 3
else:
if cond2 :
return x - 3
return x / 3
graph2 = foo.graph
for graph in [graph1, graph2]:
uses_sorted = sorted_input_use(graph)
self.assertTrue(uses_sorted[0].user.kind() == "aten::add")
self.assertTrue(uses_sorted[1].user.kind() == "aten::sub")
self.assertTrue(uses_sorted[2].user.kind() == "aten::div")
def test_canonicalize_control_outputs(self):
def test_all_outputs(g):
ifs = g.findAllNodes("prim::If")
loops = g.findAllNodes("prim::Loop")
def contained_blocks(node):
return len(node.findAllNodes("prim::If")) * 2 + len(node.findAllNodes("prim::Loop"))
for node in ifs + loops:
outs = list(node.outputs())
out_name = [x.debugName() for x in outs]
if len(out_name) == 0:
continue
fc = FileCheck()
# find the last output, then all subsequent uses
fc.check(out_name[-1] + " : ")
# skip past node body
for i in range(contained_blocks(node)):
fc.check("->")
if (node.kind() == "prim::If"):
fc.check("->").check("->").check("\n")
else:
fc.check("->").check("\n")
# the canonical order is the same order as the first use
# appears in text
for name in out_name:
fc.check(name)
fc.run(g)
@torch.jit.script
def test(x):
# type: (bool) -> Tuple[int, int]
b = 2
a = 1
if x:
a = 1
b = 2
x = False
if x:
b = a
else:
a = b
return a, b
test_all_outputs(test.graph)
@torch.jit.script
def test2(x):
# type: (bool) -> Tuple[int, int]
b = 2
a = 1
if x:
a = 1
b = 2
x = False
if x:
print(a)
else:
if x:
print(b)
return a, b
test_all_outputs(test2.graph)
@torch.jit.script
def test_loop(x, iter):
# type: (bool, int) -> (None)
a = 1
b = 2
c = 3
for i in range(iter):
a = 4
b = 5
c = 6
x = True
print(c)
if x:
print(a, b)
test_all_outputs(test_loop.graph)
@torch.jit.script
def loop_unused(iter):
# type: (int) -> (None)
a = 1
b = 2
c = 3
for i in range(iter):
c = c + 1
b = b + 1
a = a + 1
print(a, b)
print(c)
# c is used, then unused should be ordered by alphabetical
FileCheck().check(r"%c : int, %a : int, %b : int").run(loop_unused.graph)
def test_filecheck(self):
def test_check():
file = "232"
FileCheck().check("2").check("3").check("2").run(file)
FileCheck().check("232").run(file)
with self.assertRaisesRegex(RuntimeError, 'Expected to find "22"'):
FileCheck().check("22").run(file)
with self.assertRaisesRegex(RuntimeError, "CHECK: 3"):
FileCheck().check("3").check("3").run(file)
test_check()
def test_check_count():
file = "22222"
FileCheck().check_count("2", 5).run(file)
FileCheck().check_count("22", 2).run(file)
FileCheck().check_count("222", 1).run(file)
with self.assertRaisesRegex(RuntimeError, 'Expected to not find'):
FileCheck().check_count("2", 4, exactly=True).run(file)
with self.assertRaisesRegex(RuntimeError, 'Expected to find "22"'):
FileCheck().check_count("22", 3).run(file)
with self.assertRaisesRegex(RuntimeError, "CHECK-COUNT-6: 2"):
FileCheck().check_count("2", 6).run(file)
test_check_count()
def test_check_same():
file = "22\n33"
FileCheck().check_same("22").run(file)
with self.assertRaisesRegex(RuntimeError, "Expected to not find"):
FileCheck().check_same("33").run(file)
file = "22 1 3"
FileCheck().check("2").check_same("3").run(file)
FileCheck().check_count("2", 2).check_same("3").run(file)
test_check_same()
def test_check_next():
file = "\n1\n2\n3"
FileCheck().check("1").check_next("2").check_next("3").run(file)
FileCheck().check_next("1").check_next("2").check_next("3").run(file)
with self.assertRaisesRegex(RuntimeError, "Expected to find"):
FileCheck().check("1").check_next("2").run("12")
with self.assertRaisesRegex(RuntimeError, "Expected to not find"):
FileCheck().check("1").check_next("2").run("1\n\n2")
test_check_next()
def test_check_dag():
fc = FileCheck().check_dag("1").check_dag("2").check_not("2")
fc.run("12")
fc.run("21")
fc = FileCheck()
fc.check_not("3").check_dag("1").check_dag("2").check_not("3")
fc.run("1 3 2")
fc.run("2 3 1")
fc = FileCheck().check_dag("1").check_dag("2").check("3")
with self.assertRaisesRegex(RuntimeError, 'Expected to find "3" but did not find it'):
fc.run("1 3 2")
test_check_dag()
def test_check_not():
FileCheck().check_not("2").check("1").run("12")
FileCheck().check("2").check_not("2").run("12")
with self.assertRaisesRegex(RuntimeError, 'Expected to not find "2"'):
FileCheck().check_not("2").check("1").run("21")
with self.assertRaisesRegex(RuntimeError, 'Expected to not find "1"'):
FileCheck().check("2").check_not("1").run("21")
# checks with distinct range matchings
fb = FileCheck().check_count("2", 2).check_count("2", 2).check_not("2")
with self.assertRaisesRegex(RuntimeError, 'Expected to not find "2"'):
fb.run("22 2 22")
fb = FileCheck().check_count("2", 2).check_not("1").check_count("2", 2)
with self.assertRaisesRegex(RuntimeError, 'Expected to not find "1"'):
fb.run("22 1 22")
def _dtype_to_jit_name(self, dtype):
if(dtype == torch.float32):
return "Float"
if(dtype == torch.float64):
return "Double"
if(dtype == torch.int64):
return "Long"
if(dtype == torch.int32):
return "Int"
if(dtype == torch.bool):
return "Bool"
raise RuntimeError('dtype not handled')
def _dtype_to_expect(self, dtype, dim=0):
param = ', '.join(['*'] * dim + ['device=cpu'])
param = '(' + param + ')'
jit_type = self._dtype_to_jit_name(dtype)
if dim >= 0:
return jit_type + param
# special case representing wrapped number
else:
return jit_type.lower()
def _test_dtype_op_shape(self, ops, args, input_dims=1):
if input_dims < 1:
raise RuntimeError("input dims must be at least 1")
dtypes = [torch.float32, torch.float64, torch.int64, torch.int32]
str_args = ', '.join([str(arg) for arg in args]) + (', ' if len(args) else '')
tensor_data = ('[' * input_dims) + '1, 2, 3' + (input_dims * ']')
template = dedent('''
def func():
return {return_line}
''')
for op in ops:
for dtype in (dtypes + [None]):
for tensor_type in dtypes:
# a couple of ops aren't implemented for non-floating types
if(not tensor_type.is_floating_point or (dtype is not None and not dtype.is_floating_point)):
if op in ['mean', 'softmax', 'log_softmax']:
continue
return_line = "torch.tensor({}, dtype={}).{}({}dtype={})".format(tensor_data, tensor_type, op, str_args, dtype)
# uncomment for debugging a failed test:
# print("testing {}".format(return_line))
code = template.format(return_line=return_line)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
graph = cu.func.graph
torch._C._jit_pass_complete_shape_analysis(graph, (), False)
input_array = [1, 2, 3]
for _ in range(1, input_dims):
input_array = [input_array]
t = torch.tensor(input_array, dtype=tensor_type)
attr = getattr(t, op)
kwargs = {'dtype': dtype}
result = attr(*args, **kwargs)
expect = self._dtype_to_expect(result.dtype, result.dim())
FileCheck().check("aten::tensor").check(expect).run(graph)
def test_dtype_op_shape(self):
ops = ['prod']
self._test_dtype_op_shape(ops, args=[])
self._test_dtype_op_shape(ops, args=[0, False])
self._test_dtype_op_shape(ops, args=[0, False])
self._test_dtype_op_shape(ops, args=[0, True])
def test_dtype_op_shape2(self):
ops = ['cumprod', 'cumsum', 'softmax', 'log_softmax']
self._test_dtype_op_shape(ops, args=[0])
self._test_dtype_op_shape(ops, args=[1], input_dims=4)
def _test_binary_op_shape(self, ops, input_dims=1):
dtypes = [torch.float32, torch.float64, torch.int64, torch.int32, torch.bool]
if input_dims == 0:
shape = '1'
else:
shape = '[' + ('1,' * 4) + ']'
for _ in range(1, input_dims):
shape = '[' + ",".join([shape] * 4) + ']'
template = dedent('''
def func():
arg1 = {}
arg2 = {}
return torch.{}(arg1, arg2)
''')
args = []
for dtype in dtypes:
args = args + ["torch.tensor({}, dtype={})".format(shape, dtype)]
args = args + [1, 1.5]
def isBool(arg):
return type(arg) == bool or (type(arg) == str and "torch.bool" in arg)
for op in ops:
for first_arg in args:
for second_arg in args:
# subtract not supported for bool
if (op == 'sub' or op == 'div') and (isBool(first_arg) or isBool(second_arg)):
continue
# div is not implemented correctly for mixed-type or int params
if (op == 'div' and (type(first_arg) != type(second_arg) or
isinstance(first_arg, int) or
(isinstance(first_arg, str) and 'int' in first_arg))):
continue
return_line = "torch.{}({}, {})".format(op, first_arg, second_arg)
# uncomment for debugging a failed test:
# print("testing {}".format(return_line))
code = template.format(first_arg, second_arg, op)
scope = {}
exec(code, globals(), scope)
non_jit_result = scope['func']()
cu = torch.jit.CompilationUnit(code)
graph = cu.func.graph
torch._C._jit_pass_complete_shape_analysis(graph, (), False)
# use dim=-1 to represent a python/jit scalar.
dim = -1 if type(first_arg) != str and type(second_arg) != str else non_jit_result.dim()
dtype = non_jit_result.dtype
# jit only supports int/float scalars.
if dim < 0:
if dtype == torch.int64:
dtype = torch.int32
if dtype == torch.float64:
dtype = torch.float32
expect = self._dtype_to_expect(dtype, dim)
jit_output = next(graph.outputs())
check = FileCheck()
check.check(expect).run(str(jit_output))
def test_binary_op_shape(self):
self._test_binary_op_shape(['mul', 'div', 'add', 'sub'], 0)
self._test_binary_op_shape(['mul', 'div', 'add', 'sub'], 3)
def test_no_dtype_shape(self):
@torch.jit.script
def foo(x):
scalar_number = x.item()
return x.add(scalar_number)
@torch.jit.script
def foo2(x):
scalar_number = x.item()
return torch.tensor(1).add(scalar_number)
t = torch.tensor(5)
g = foo.graph_for(t)
type = next(g.outputs())
self.assertTrue(type.type() == torch._C.TensorType.get())
g2 = foo2.graph_for(t)
type = next(g.outputs())
self.assertTrue(type.type() == torch._C.TensorType.get())
def test_filecheck_parse(self):
def test_check():
file = """
# CHECK: 2
# CHECK: 3
# CHECK: 2
232
"""
FileCheck().run(checks_file=file, test_file=file)
file = """
# CHECK: 232
232
"""
FileCheck().run(file, "232")
with self.assertRaisesRegex(RuntimeError, 'Expected to find "232"'):
FileCheck().run(file, "22")
with self.assertRaisesRegex(RuntimeError, 'Expected to find "22"'):
FileCheck().run("# CHECK: 22", "23")
test_check()
def test_check_count():
file = "22222"
FileCheck().run("# CHECK-COUNT-5: 2", file)
FileCheck().run("# CHECK-COUNT-EXACTLY-5: 2", file)
FileCheck().run("# CHECK-COUNT-2: 22", file)
FileCheck().run("# CHECK-COUNT-1: 222", file)
with self.assertRaisesRegex(RuntimeError, 'Expected to not find'):
FileCheck().run("# CHECK-COUNT-EXACTLY-2: 2", file)
test_check_count()
def test_check_same():
file = "22\n33"
FileCheck().run("# CHECK-SAME: 22", file)
with self.assertRaisesRegex(RuntimeError, "Expected to not find"):
FileCheck().run("# CHECK-SAME: 33", file)
file = "22 1 3"
FileCheck().run("# CHECK: 2\n # CHECK-SAME: 3", file)
FileCheck().run("# CHECK-COUNT-2: 2\n # CHECK-SAME: 3", file)
test_check_same()
def test_bad_input():
with self.assertRaisesRegex(RuntimeError, "Check for bad input"):
FileCheck().run("", "1")
with self.assertRaisesRegex(RuntimeError, "Could not parse check"):
FileCheck().run("# CHECK1", "")
test_bad_input()
def test_script_module_call_noscript(self):
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.value = 1
@torch.jit.ignore
def foo(self):
return torch.ones(2, 2) + self.value
@torch.jit.script_method
def forward(self, input):
return input + self.foo()
with torch.jit.optimized_execution(False):
m = M()
input = torch.randn(2, 2)
o = m(input)
self.assertEqual(o, input + torch.ones(2, 2) + 1)
# check that we can change python attributes
# and that those changes are picked up in script methods
m.value = 2
o = m(input)
self.assertEqual(o, input + torch.ones(2, 2) + 2)
def test_script_module_nochange_submodule(self):
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.sub = nn.Linear(5, 5)
@torch.jit.script_method
def forward(self, input):
return self.sub(input)
with torch.jit.optimized_execution(False):
m = M()
input = torch.randn(1, 5, 5)
o = m(input)
self.assertEqual(o, m.sub(input))
with self.assertRaisesRegex(RuntimeError, "Cannot re-assign"):
m.sub = nn.Linear(5, 5)
def test_module_apis(self):
class Sub(torch.nn.Module):
def __init__(self):
super(Sub, self).__init__()
def forward(self, thing):
return thing - 2
class Double(torch.nn.Module):
def __init__(self):
super(Double, self).__init__()
def forward(self, thing):
return thing * 2
class MyMod(torch.nn.Module):
def __init__(self):
super(MyMod, self).__init__()
self.mod = (Sub())
self.mod2 = (Sub())
self.mod3 = nn.Sequential(nn.Sequential(Sub()))
self.mod4 = nn.Sequential(Sub(), Double())
@torch.jit.export
def method(self, x, x1, y, y1):
mod_names = ""
for name, mod in self.named_modules():
mod_names = mod_names + " " + name
x = mod(x)
children_names = ""
for name, mod in self.named_children():
children_names = children_names + " " + name
x1 = mod(x1)
for mod in self.modules():
y = mod(y)
for mod in self.children():
y1 = mod(y1)
return mod_names, children_names, x, x1, y, y1
def forward(self, x):
return x + 2
mod = torch.jit.script(MyMod())
inps = tuple([torch.tensor(i) for i in range(1, 5)])
self.assertEqual(mod.method(*inps), MyMod().method(*inps))
def test_script_module_const(self):
class M(torch.jit.ScriptModule):
__constants__ = ['b', 'i', 'c', 's']
def __init__(self):
super(M, self).__init__()
self.b = False
self.i = 1
self.c = 3.5
self.s = ["hello"]
@torch.jit.script_method
def forward(self):
return self.b, self.i, self.c
with torch.jit.optimized_execution(False):
m = M()
o0, o1, o2 = m()
self.assertEqual(o0, 0)
self.assertEqual(o1, 1)
self.assertEqual(o2, 3.5)
def test_script_module_fail_exist(self):
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
@torch.jit.script_method
def forward(self, x):
return x + self.whatisgoingon
with self.assertRaisesRegex(RuntimeError, "Module 'M' has no attribute"):
M()
@unittest.skip("[module dedupe] currently NoneType refinement on optional attributes doesn't work.")
def test_script_module_none_exist_fail(self):
class M(torch.jit.ScriptModule):
def __init__(self, my_optional):
super(M, self).__init__()
self.my_optional = my_optional
@torch.jit.script_method
def forward(self, x):
if self.my_optional is not None:
return torch.neg(x) + self.my_optional
return torch.neg(x)
with self.assertRaisesRegex(RuntimeError, "has no attribute 'my_optional'"):
x = torch.rand(3, 4)
fb = M(None)
fb(x)
def test_script_module_invalid_consts(self):
class Foo(torch.jit.ScriptModule):
__constants__ = ['invalid']
def __init__(self):
super(Foo, self).__init__()
self.invalid = [nn.Linear(3, 4)]
with self.assertRaisesRegex(
TypeError,
"Linear' object in attribute 'Foo.invalid' is not a valid constant"):
Foo()
class Foo2(torch.jit.ScriptModule):
__constants__ = ['invalid']
def __init__(self):
super(Foo2, self).__init__()
self.invalid = type(1)
with self.assertRaisesRegex(TypeError, "not a valid constant"):
Foo2()
class Foo3(torch.jit.ScriptModule):
__constants__ = ['invalid']
def __init__(self):
super(Foo3, self).__init__()
self.invalid = (3, 4, {})
with self.assertRaisesRegex(TypeError, "not a valid constant"):
Foo3()
class Foo4(torch.jit.ScriptModule):
__constants__ = ['invalid']
def __init__(self):
super(Foo4, self).__init__()
self.invalid = np.int64(5)
# verify that we capture human understandable class name
with self.assertRaisesRegex(TypeError, "numpy.int64"):
Foo4()
def test_script_module_param_buffer_mutation(self):
# TODO: add param mutation test case after JIT support it
class ModuleBufferMutate(torch.jit.ScriptModule):
def __init__(self):
super(ModuleBufferMutate, self).__init__()
self.register_buffer('running_var', torch.tensor(0, dtype=torch.long))
@torch.jit.script_method
def forward(self):
if self.training:
self.running_var += 1
return self.running_var
with torch.jit.optimized_execution(False):
m = ModuleBufferMutate()
self.assertEqual(m(), 1)
m.eval()
self.assertEqual(m(), 1)
def test_script_module_for(self):
class M(torch.jit.ScriptModule):
__constants__ = ['b']
def __init__(self):
super(M, self).__init__()
self.b = [1, 2, 3, 4]
@torch.jit.script_method
def forward(self):
sum = 0
for i in self.b:
sum += i
return sum
with torch.jit.optimized_execution(False):
m = M()
self.assertEqual(m(), 10)
def test_override_magic(self):
class OverrideMagic(nn.Module):
def __init__(self):
super(OverrideMagic, self).__init__()
@torch.jit.export
def __len__(self):
return 10
mod = OverrideMagic()
self.assertEqual(len(mod), len(torch.jit.script(mod)))
class OverrideMagicSeq(nn.Sequential):
def __init__(self):
super(OverrideMagicSeq, self).__init__()
@torch.jit.export
def __len__(self):
return 10
mod = OverrideMagicSeq()
self.assertEqual(len(mod), len(torch.jit.script(mod)))
self.assertTrue(torch.jit.script(mod))
def test_script_module_for2(self):
class Sub(torch.jit.ScriptModule):
def __init__(self):
super(Sub, self).__init__()
self.weight = nn.Parameter(torch.randn(2))
@torch.jit.script_method
def forward(self, thing):
return self.weight + thing
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.mods = nn.ModuleList([Sub() for i in range(10)])
@torch.jit.script_method
def forward(self, v):
for m in self.mods:
v = m(v)
return v
with torch.jit.optimized_execution(False):
i = torch.empty(2)
m = M()
o = m(i)
v = i
for sub in m.mods:
v = sub(v)
self.assertEqual(o, v)
with self.assertRaisesRegex(Exception, "object is not iterable"):
print(list(m))
def test_attr_qscheme_script(self):
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
self.qscheme = torch.per_tensor_affine
def forward(self):
if self.qscheme == torch.per_tensor_symmetric:
return 3
else:
return 4
f = Foo()
scripted = torch.jit.script(f)
self.assertEqual(f(), scripted())
def test_script_module_const_submodule_fail(self):
class Sub(torch.jit.ScriptModule):
def __init__(self):
super(Sub, self).__init__()
self.weight = nn.Parameter(torch.randn(2))
@torch.jit.script_method
def forward(self, thing):
return self.weight + thing
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.mods = [Sub() for _ in range(10)]
@torch.jit.script_method
def forward(self):
for _ in self.mods:
print(1)
return 4
with self.assertRaisesRegex(RuntimeError, "has no attribute 'mods'"):
M()
class DerivedStateModule(torch.jit.ScriptModule):
def __init__(self):
super(TestScript.DerivedStateModule, self).__init__()
self.param = torch.nn.Parameter(torch.ones(3, 4, dtype=torch.float))
self.register_buffer('derived', torch.neg(self.param).detach().clone())
# This is a flag so we can test that the pack method was called
self.register_buffer('pack_called', torch.zeros(1, dtype=torch.long))
# This is a flag so we can test that the unpack method was called
self.register_buffer('unpack_called', torch.zeros(1, dtype=torch.long))
@torch.jit.script_method
def _pack(self):
self.pack_called.set_(torch.ones(1, dtype=torch.long))
self.derived.set_(torch.rand(1, dtype=torch.float).detach())
@torch.jit.script_method
def _unpack(self):
self.unpack_called.set_(torch.ones(1, dtype=torch.long))
self.derived.set_(torch.neg(self.param).detach())
@torch.jit.script_method
def forward(self, x):
return x + self.derived
def test_pack_unpack_state(self):
sm = TestScript.DerivedStateModule()
x = torch.rand(3, 4, dtype=torch.float)
torch.testing.assert_close(sm(x), x + torch.neg(torch.ones(3, 4, dtype=torch.float)))
# Test save path
self.assertFalse(sm.pack_called.item())
self.assertFalse(sm.unpack_called.item())
imported = self.getExportImportCopyWithPacking(sm)
# ensure pack was called before serialization
self.assertTrue(sm.pack_called.item())
# ensure unpack was called after serialization so as to leave the module in an initialized state
self.assertTrue(sm.unpack_called.item())
torch.testing.assert_close(sm.derived, torch.neg(sm.param))
# Test load paths
self.assertTrue(imported.unpack_called.item())
torch.testing.assert_close(imported(x), x + torch.neg(torch.ones(3, 4, dtype=torch.float)))
@unittest.skipIf(not TEST_MKL, "PyTorch is built without MKL support")
@unittest.skipIf(True, "Skipping while landing PR stack")
def test_torch_functional(self):
def stft(input, n_fft):
# type: (Tensor, int) -> Tensor
return torch.stft(input, n_fft, return_complex=True)
inps = (torch.randn(10), 7)
self.assertEqual(stft(*inps), torch.jit.script(stft)(*inps))
def istft(input, n_fft):
# type: (Tensor, int) -> Tensor
return torch.istft(input, n_fft)
inps2 = (stft(*inps), inps[1])
self.assertEqual(istft(*inps2), torch.jit.script(istft)(*inps2))
def lu_unpack(x):
A_LU, pivots = torch.linalg.lu_factor(x)
return torch.lu_unpack(A_LU, pivots)
for shape in ((3, 3), (5, 3, 3), (7, 3, 5, 5), (7, 5, 3, 3, 3)):
a = torch.randn(*shape)
self.checkScript(lu_unpack, (a,))
def cdist_fn():
a = torch.tensor([[0.9041, 0.0196], [-0.3108, -2.4423], [-0.4821, 1.059]])
b = torch.tensor([[-2.1763, -0.4713], [-0.6986, 1.3702]])
return torch.cdist(a, b, compute_mode="use_mm_for_euclid_dist")
self.checkScript(cdist_fn, ())
def norm():
c = torch.tensor([[1, 2, 3], [-1, 1, 4]], dtype=torch.float)
return torch.norm(c, p="fro"), torch.norm(c, p="nuc"), torch.norm(c), torch.norm(c, p=.5)
self.checkScript(norm, ())
def torch_unique(dim: Optional[int]):
ten = torch.unique(torch.tensor([[1, 3], [2, 3]], dtype=torch.long))
a = torch.unique(ten, dim=dim)
b = torch.unique(ten, return_counts=True, dim=dim)
c = torch.unique(ten, return_inverse=True, dim=dim)
d = torch.unique(ten, return_counts=True, return_inverse=True, dim=dim)
return a, b, c, d
self.checkScript(torch_unique, (None,))
self.checkScript(torch_unique, (0,))
def torch_unique_consecutive(dim: Optional[int]):
ten = torch.unique(torch.tensor([[1, 3], [3, 2], [3, 2], [2, 3]], dtype=torch.long))
a = torch.unique_consecutive(ten, dim=dim)
b = torch.unique_consecutive(ten, return_counts=True, dim=dim)
c = torch.unique_consecutive(ten, return_inverse=True, dim=dim)
d = torch.unique_consecutive(ten, return_counts=True, return_inverse=True, dim=dim)
return a, b, c, d
self.checkScript(torch_unique_consecutive, (None,))
self.checkScript(torch_unique_consecutive, (0,))
def test_torch_functional_tensordot_int(self):
def tensordot_dims_int(a: torch.Tensor, b: torch.Tensor, dims: int):
return torch.tensordot(a, b, dims=dims)
a = torch.arange(120.).reshape(2, 3, 4, 5)
b = torch.arange(840.).reshape(4, 5, 6, 7)
dims = 2
self.checkScript(tensordot_dims_int, (a, b, dims))
def test_torch_functional_tensordot_tensor(self):
def tensordot_dims_tensor(a: torch.Tensor, b: torch.Tensor, dims: torch.Tensor):
return torch.tensordot(a, b, dims=dims)
a = torch.arange(120.).reshape(2, 3, 4, 5)
b = torch.arange(840.).reshape(4, 5, 6, 7)
dims = torch.tensor([2])
self.checkScript(tensordot_dims_tensor, (a, b, dims))
a = torch.arange(60.).reshape(3, 4, 5)
b = torch.arange(24.).reshape(4, 3, 2)
dims = torch.tensor([[1, 0], [0, 1]], dtype=torch.long)
self.checkScript(tensordot_dims_tensor, (a, b, dims))
def test_torch_functional_tensordot_list(self):
def tensordot_dims_list(a: torch.Tensor, b: torch.Tensor, dims: List[List[int]]):
return torch.tensordot(a, b, dims=dims)
a = torch.arange(60.).reshape(3, 4, 5)
b = torch.arange(24.).reshape(4, 3, 2)
dims = [[1, 0], [0, 1]]
self.checkScript(tensordot_dims_list, (a, b, dims))
def test_torch_functional_tensordot_tuple(self):
def tensordot_dims_tuple(a: torch.Tensor, b: torch.Tensor, dims: Tuple[List[int], List[int]]):
return torch.tensordot(a, b, dims=dims)
a = torch.arange(60.).reshape(3, 4, 5)
b = torch.arange(24.).reshape(4, 3, 2)
dims = ([1, 0], [0, 1])
self.checkScript(tensordot_dims_tuple, (a, b, dims))
def test_missing_getstate(self):
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
self.x = 1
def forward(self, x):
return x * self.x
@torch.jit.export
def __setstate__(self, state):
self.x = state[0]
self.training = state[1]
with self.assertRaisesRegex(RuntimeError, "getstate"):
scripted = torch.jit.script(Foo())
def test_inlining_cleanup(self):
def foo(x):
return F.linear(x, x)
@torch.jit.script
def fee(x):
return foo(x)
# inlining optimizations should have cleaned up linear if statement
self.run_pass("inline", fee.graph)
FileCheck().check_not("prim::If").run(fee.graph)
def test_pack_unpack_nested(self):
class SubSubMod(torch.jit.ScriptModule):
def __init__(self):
super(SubSubMod, self).__init__()
self.register_buffer('buf', torch.ones(3, 4) * 3)
@torch.jit.script_method
def _pack(self):
self.buf.set_(torch.zeros(1, dtype=torch.double))
@torch.jit.script_method
def _unpack(self):
self.buf.set_(torch.ones(3, 4, dtype=torch.double) * 3)
@torch.jit.script_method
def forward(self, x):
return x + self.buf
class SubMod(torch.jit.ScriptModule):
def __init__(self):
super(SubMod, self).__init__()
self.register_buffer('buf', torch.ones(3, 4) * 2)
self.ssm = SubSubMod()
@torch.jit.script_method
def _pack(self):
self.buf.set_(torch.zeros(1, dtype=torch.double))
@torch.jit.script_method
def _unpack(self):
self.buf.set_(torch.ones(3, 4, dtype=torch.double) * 2)
@torch.jit.script_method
def forward(self, x):
return self.ssm(x + self.buf)
class Mod(torch.jit.ScriptModule):
def __init__(self):
super(Mod, self).__init__()
self.submod = SubMod()
self.register_buffer('buf', torch.ones(3, 4) * 1)
@torch.jit.script_method
def _pack(self):
self.buf.set_(torch.zeros(1, dtype=torch.double))
@torch.jit.script_method
def _unpack(self):
self.buf.set_(torch.ones(3, 4, dtype=torch.double))
@torch.jit.script_method
def forward(self, x):
return self.submod(x + self.buf)
m = Mod()
torch.testing.assert_close(m(torch.zeros(3, 4)), torch.ones(3, 4) * 6)
m.apply(lambda s: s._pack())
torch.testing.assert_close(m(torch.zeros(3, 4)), torch.zeros(3, 4))
m.apply(lambda s: s._unpack())
torch.testing.assert_close(m(torch.zeros(3, 4)), torch.ones(3, 4) * 6)
def test_torch_any(self):
def fn(x):
return torch.any(x)
def fn1(x, dim: int):
return torch.any(x, dim)
self.checkScript(fn, (torch.randn(3, 4), ))
self.checkScript(fn, (torch.empty(3), ))
self.checkScript(fn, (torch.empty(1), ))
self.checkScript(fn, (torch.ones(3, 4),))
self.checkScript(fn, (torch.zeros(5, 7, 1),))
self.checkScript(fn1, (torch.empty(3, 4), -2))
self.checkScript(fn1, (torch.randn(3, 8), 1))
self.checkScript(fn1, (torch.zeros(3, 6, 9), -3))
self.checkScript(fn1, (torch.empty(5), 0))
def test_any(self):
def fn(x: List[int]):
return any(x)
def fn1(x: List[float]):
return any(x)
def fn2(x: List[bool]):
return any(x)
def fn3(x: List[str]):
return any(x)
self.checkScript(fn, ([0, 0, 0, 0], ))
self.checkScript(fn, ([0, 3, 0], ))
self.checkScript(fn, ([], ))
self.checkScript(fn1, ([1.0, 2.0, 3.0], ))
self.checkScript(fn1, ([0.0, 0.0, 0.0], ))
self.checkScript(fn1, ([0, 0, 0], ))
self.checkScript(fn1, ([], ))
self.checkScript(fn2, ([True, False, False], ))
self.checkScript(fn2, ([False, False, False], ))
self.checkScript(fn2, ([True, True, True, True], ))
self.checkScript(fn2, ([], ))
self.checkScript(fn3, (["", "", ""], ))
self.checkScript(fn3, (["", "", "", "-1"], ))
self.checkScript(fn3, ([], ))
def test_script_module_not_tuple(self):
class M(torch.jit.ScriptModule):
__constants__ = ['mods']
def __init__(self):
super(M, self).__init__()
self.mods = 1
@torch.jit.script_method
def forward(self, v):
for m in self.mods:
print(m)
return v
with self.assertRaisesRegex(RuntimeError, "'int' object is not iterable"):
M()
def test_attr_module_constants(self):
class M2(torch.jit.ScriptModule):
def __init__(self, mod_list):
super(M2, self).__init__()
self.mods = mod_list
@torch.jit.script_method
def forward(self, x):
return self.mods.forward(x)
with torch.jit.optimized_execution(False):
m = M2(nn.Sequential(nn.ReLU()))
self.assertExportImportModule(m, (torch.randn(2, 2),))
def test_script_sequential_for(self):
class Sub(torch.jit.ScriptModule):
def __init__(self):
super(Sub, self).__init__()
self.weight = nn.Parameter(torch.randn(2))
@torch.jit.script_method
def forward(self, thing):
return self.weight + thing
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.mods = nn.Sequential(Sub(), Sub(), Sub())
@torch.jit.script_method
def forward(self, v):
for m in self.mods:
v = m(v)
return v
@torch.jit.script_method
def forward2(self, v):
return self.mods(v)
with torch.jit.optimized_execution(False):
i = torch.empty(2)
m = M()
o = m(i)
v = i
for sub in m.mods._modules.values():
v = sub(v)
self.assertEqual(o, v)
o2 = m.forward2(i)
self.assertEqual(o2, v)
def test_script_sequential_sliced_iteration(self):
class seq_mod(nn.Module):
def __init__(self):
super(seq_mod, self).__init__()
self.layers = [nn.ReLU(), nn.ReLU(), nn.ReLU()]
self.layers = nn.Sequential(*self.layers)
def forward(self, input):
x = self.layers[0].forward(input)
for layer in self.layers[1:3]:
x = layer.forward(x)
for layer in self.layers[2:]:
x = layer.forward(x)
return x
seq = seq_mod()
self.checkModule(seq, [torch.tensor([-2, 1, -1, 2])])
def test_script_sequential_orderdict(self):
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.mods = nn.Sequential(OrderedDict([
("conv", nn.Conv2d(1, 20, 5)),
("relu", nn.ReLU())
]))
@torch.jit.script_method
def forward(self, input):
return self.mods(input)
m = M()
self.assertTrue('mods.conv.weight' in m.state_dict().keys())
def test_script_sequential_multi_output_fail(self):
class Sub(torch.jit.ScriptModule):
def __init__(self):
super(Sub, self).__init__()
self.weight = nn.Parameter(torch.randn(2))
@torch.jit.script_method
def forward(self, thing):
return self.weight + thing
class ReturnMulti(torch.jit.ScriptModule):
def __init__(self):
super(ReturnMulti, self).__init__()
@torch.jit.script_method
def forward(self, x):
return x, x, x
class HaveSequential(torch.jit.ScriptModule):
def __init__(self):
super(HaveSequential, self).__init__()
self.someseq = nn.Sequential(
Sub(),
ReturnMulti(),
Sub()
)
@torch.jit.script_method
def forward(self, x):
return self.someseq(x)
with self.assertRaisesRegex(RuntimeError, "(Tensor, Tensor, Tensor)"):
with torch.jit.optimized_execution(False):
hs = HaveSequential()
i = torch.empty(2)
hs(i)
@_tmp_donotuse_dont_inline_everything
def test_script_sequential_in_mod_list(self):
class Sub(torch.jit.ScriptModule):
def __init__(self):
super(Sub, self).__init__()
self.weight = nn.Parameter(torch.randn(2))
@torch.jit.script_method
def forward(self, thing):
return self.weight + thing
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.mods = nn.ModuleList([Sub(), nn.Sequential(Sub(), nn.Sequential(Sub(), Sub()), Sub())])
@torch.jit.script_method
def forward(self, v):
for mod in self.mods:
v = mod(v)
return v
m = M()
graph = str(m.graph)
self.assertTrue(graph.count("prim::CallMethod") == 2)
self.assertTrue("python" not in graph)
@_tmp_donotuse_dont_inline_everything
def test_script_nested_mod_list(self):
class Sub(torch.jit.ScriptModule):
def __init__(self):
super(Sub, self).__init__()
self.weight = nn.Parameter(torch.randn(2))
@torch.jit.script_method
def forward(self, thing):
return self.weight + thing
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.mods = nn.ModuleList([nn.ModuleList([Sub()]), nn.Sequential(Sub()), nn.ModuleList([Sub(), Sub()])])
@torch.jit.script_method
def forward(self, v):
for mod in self.mods:
for m in mod:
v = m(v)
return v
m = M()
graph = str(m.graph)
self.assertTrue(graph.count("prim::CallMethod") == 4)
self.assertTrue("python" not in graph)
def test_constant_as_attr(self):
class M(torch.jit.ScriptModule):
__constants__ = ['dim']
def __init__(self):
super(M, self).__init__()
self.dim = 1
@torch.jit.script_method
def forward(self, v):
return torch.cat([v, v, v], dim=self.dim)
v = torch.zeros(1, 1)
with torch.jit.optimized_execution(False):
self.assertEqual(torch.cat([v, v, v], dim=1), M()(v))
class StarTestSumStarred(torch.nn.Module):
def __init__(self):
super(TestScript.StarTestSumStarred, self).__init__()
def forward(self, *inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output += inputs[i]
return output
class StarTestReturnThree(torch.nn.Module):
def __init__(self):
super(TestScript.StarTestReturnThree, self).__init__()
def forward(self, rep):
return rep, rep, rep
def test_script_star_expr(self):
class M2(torch.jit.ScriptModule):
def __init__(self):
super(M2, self).__init__()
self.m = torch.jit.trace(TestScript.StarTestSumStarred(),
(torch.ones(4, 3), torch.ones(4, 3), torch.ones(4, 3)))
self.g = torch.jit.trace(TestScript.StarTestReturnThree(), torch.ones(4, 3))
@torch.jit.script_method
def forward(self, rep):
tup = self.g(rep)
return self.m(*tup)
m = M2()
self.assertEqual(m(torch.zeros(4, 3)), 3 * torch.zeros(4, 3))
def test_script_star_expr_string(self):
class M2(torch.jit.ScriptModule):
def __init__(self):
super(M2, self).__init__()
self.m = torch.jit.trace(TestScript.StarTestSumStarred(),
(torch.ones(4, 3), torch.ones(4, 3), torch.ones(4, 3)))
self.g = torch.jit.trace(TestScript.StarTestReturnThree(), torch.ones(4, 3))
self.define('''
def forward(self, rep):
tup = self.g(rep)
return self.m(*tup)
''')
m = M2()
self.assertEqual(m(torch.zeros(4, 3)), 3 * torch.zeros(4, 3))
class StarTestSumAndReturnThree(torch.nn.Module):
def __init__(self):
super(TestScript.StarTestSumAndReturnThree, self).__init__()
def forward(self, *inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output += inputs[i]
return output, output, output
def test_script_star_assign(self):
class M2(torch.jit.ScriptModule):
def __init__(self):
super(M2, self).__init__()
self.g = torch.jit.trace(TestScript.StarTestSumAndReturnThree(), torch.ones(4, 3))
self.define('''
def forward(self, rep):
head, *tail = self.g(rep)
return head
''')
m = M2()
self.assertEqual(m(torch.zeros(4, 3)), 3 * torch.zeros(4, 3))
def test_script_module_star_assign2(self):
class M2(torch.jit.ScriptModule):
def __init__(self):
super(M2, self).__init__()
self.g = torch.jit.trace(
TestScript.StarTestSumAndReturnThree(),
(torch.ones(4, 3), torch.ones(4, 3), torch.ones(4, 3)),
_force_outplace=True)
self.define('''
def forward(self, rep):
*head, tail = self.g(rep, rep, rep)
return tail
''')
m = M2()
self.assertEqual(m(torch.ones(4, 3)), 3 * torch.ones(4, 3))
def test_script_module_star_assign2_inplace(self):
class M2(torch.jit.ScriptModule):
def __init__(self):
super(M2, self).__init__()
self.g = torch.jit.trace(
TestScript.StarTestSumAndReturnThree(),
(torch.ones(4, 3), torch.ones(4, 3), torch.ones(4, 3)),
_force_outplace=False)
self.define('''
def forward(self, rep):
*head, tail = self.g(rep, rep, rep)
return tail
''')
m = M2()
# since forward() makes three aliases to the input `rep` before passing
# it to StarTestSumAndReturnThree(), in-place behavior will be different
# than the above out of place.
self.assertEqual(m(torch.ones(4, 3)), 4 * torch.ones(4, 3))
def test_script_module_star_assign_fail_pythonop(self):
with self.assertRaisesRegex(RuntimeError, "cannot be used as a tuple"):
class M2(torch.jit.ScriptModule):
def __init__(self):
super(M2, self).__init__()
@torch.jit.ignore
def myfunc():
return torch.zeros(1, 2, 3), torch.zeros(1, 2, 3)
self.define('''
def forward(self, rep):
a, *b = myfunc()
return a
''')
m = M2()
m(torch.zeros(4, 3))
def test_script_module_star_assign_fail_builtin(self):
with self.assertRaisesRegex(RuntimeError, "cannot be used as a tuple"):
class M2(torch.jit.ScriptModule):
def __init__(self):
super(M2, self).__init__()
self.define('''
def forward(self, rep):
a, *b = torch.neg(rep)
return a
''')
m = M2()
m(torch.zeros(4, 3))
@skipIfCompiledWithoutNumpy
def test_pack_padded_pad_packed_trace(self):
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
T, B, C = 3, 5, 7
class PadPackedWrapper(torch.nn.Module):
def __init__(self):
super(PadPackedWrapper, self).__init__()
def forward(self, x, seq_lens):
x = pack_padded_sequence(x, seq_lens)
x, _ = pad_packed_sequence(x)
return x
x = np.ones((T, B, C))
seq_lens = np.array([3, 3, 2, 2, 1], dtype=np.int32)
# set padding value so we can test equivalence
for b in range(B):
if seq_lens[b] < T:
x[seq_lens[b]:, b, :] = 0
seq_lens = torch.from_numpy(seq_lens)
x = torch.autograd.Variable(torch.from_numpy(x), requires_grad=True)
m = PadPackedWrapper()
m_traced = torch.jit.trace(m, (x, seq_lens,))
y = m(x, seq_lens)
loss = torch.sum(y)
loss.backward()
grad = x.grad.clone()
x.grad.zero_()
y_traced = m_traced(x, seq_lens)
loss_traced = torch.sum(y_traced)
loss_traced.backward()
grad_traced = x.grad.clone()
self.assertEqual(y_traced, x)
self.assertEqual(y_traced, y)
self.assertEqual(grad, grad_traced)
f = io.BytesIO()
torch.onnx._export(m, (x, seq_lens), f, verbose=False)
def test_script_pack_padded_sequence(self):
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
def pack_padded_pad_packed_script(x, seq_lens):
x = pack_padded_sequence(x, seq_lens)
x, lengths = pad_packed_sequence(x)
return x, lengths
T, B, C = 3, 5, 7
x = torch.ones((T, B, C))
seq_lens = torch.tensor([3, 3, 2, 2, 1])
# set padding value so we can test equivalence
for b in range(B):
if seq_lens[b] < T:
x[seq_lens[b]:, b, :] = 0
eager_seq, eager_lengths = pack_padded_pad_packed_script(x, seq_lens)
with torch._jit_internal._disable_emit_hooks():
scripted_pack_padded_seq = torch.jit.script(pack_padded_pad_packed_script)
script_seq, script_lengths = scripted_pack_padded_seq(x, seq_lens)
self.assertEqual(eager_seq, script_seq)
self.assertEqual(eager_lengths, script_lengths)
class ExperimentalLSTM(torch.nn.Module):
def __init__(self, input_dim, hidden_dim):
super().__init__()
def forward(self, input):
# type: (Tensor)
packed = pack_padded_sequence(
input=input, lengths=torch.tensor([1, 2]), enforce_sorted=False
)
output, lengths = pad_packed_sequence(
sequence=packed, total_length=2
)
# lengths is flipped, so is output
return output[0]
lstm = ExperimentalLSTM(input_dim=2, hidden_dim=2)
with torch._jit_internal._disable_emit_hooks():
self.checkModule(lstm, [torch.ones(2, 2)])
def test_script_pad_sequence_pack_sequence(self):
from torch.nn.utils.rnn import pad_sequence, pack_sequence, pad_packed_sequence
def pad_sequence_func(tensor_list, batch_first=False, padding_value=0.0):
# type: (List[Tensor], bool, float) -> Tensor
return pad_sequence(tensor_list, batch_first, padding_value)
def pack_sequence_func(tensor_list, enforce_sorted=True):
# type: (List[Tensor], bool) -> Tensor
return pad_packed_sequence(pack_sequence(tensor_list, enforce_sorted))[0]
ones3 = torch.ones(3, 5)
ones4 = torch.ones(4, 5)
ones5 = torch.ones(5, 5)
tensor1 = torch.tensor([1, 2, 3])
tensor2 = torch.tensor([4, 5])
tensor3 = torch.tensor([6])
with torch._jit_internal._disable_emit_hooks():
self.checkScript(pad_sequence_func,
([ones3, ones4, ones5],))
self.checkScript(pad_sequence_func,
([ones3, ones4, ones5], True))
self.checkScript(pad_sequence_func,
([ones3, ones4, ones5], True, 2.5))
self.checkScript(pack_sequence_func,
([tensor1, tensor2, tensor3],))
self.checkScript(pack_sequence_func,
([tensor1, tensor2, tensor3], False))
def test_script_get_tracing_state(self):
def test_if_tracing(x):
if torch._C._get_tracing_state():
return x + 1
else:
return x - 1
inp = torch.randn(3, 3)
self.checkScript(test_if_tracing, (inp,))
def test_script_is_tracing(self):
def test_is_tracing(x):
if torch.jit.is_tracing():
return x + 1
else:
return x - 1
inp = torch.randn(3, 3)
self.checkScript(test_is_tracing, (inp,))
def test_is_scripting(self):
def foo():
return torch.jit.is_scripting()
self.assertFalse(foo())
scripted = torch.jit.script(foo)
self.assertTrue(scripted())
def test_comment_ignore_indent(self):
class Model(torch.nn.Module):
def __init__(self):
# useless comment that is not indented correctly # noqa: E115
super().__init__()
def forward(self):
return 5
# should compile without an error
self.checkModule(Model(), ())
def test_script_outputs(self):
with self.assertRaisesRegex(RuntimeError, "cannot be used as a tuple"):
@torch.jit.script
def foo(a):
c, d = a + a
return c + d
@torch.jit.script
def return3():
return 1, 2, 3
with self.assertRaisesRegex(RuntimeError, "too many values to unpack"):
@torch.jit.script
def bind2():
a, b = return3()
print(a)
print(b)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
def test_script_get_device_cuda(self):
@torch.jit.script
def foo(a):
return a.get_device()
v = torch.randn(1, device='cuda')
self.assertEqual(foo(v), 0)
def test_script_chunk(self):
@torch.jit.script
def foo(a):
b, c = torch.chunk(a, dim=0, chunks=2)
return b
v = torch.rand(10, 3)
self.assertEqual(torch.chunk(v, dim=0, chunks=2)[0], foo(v))
def test_script_copy(self):
class M(torch.nn.Module):
__annotations__ = {
"val": Optional[torch.Tensor]
}
def __init__(self):
super(M, self).__init__()
self.val = None
def some_method(self):
return 3
def forward(self, x):
# type: (Tensor) -> Tensor
self.val = x + self.some_method()
return x
m = torch.jit.script(M())
# test copy
copy.copy(m)
copy.deepcopy(m)
def test_script_forward_method_replacement(self):
# We want to support the use case of attaching a different `forward` method
class LowLevelModule(torch.nn.Module):
def __init__(self):
super(LowLevelModule, self).__init__()
def forward(self, input: torch.Tensor):
# Generic forward dispatch
return self.forward_pytorch(input) * 2
class TestModule(LowLevelModule):
def __init__(self):
super(TestModule, self).__init__()
# Replace the forward method
self.forward = types.MethodType(LowLevelModule.forward, self)
def forward_pytorch(self, input: torch.Tensor):
return torch.tensor(123)
def forward(self, input: torch.Tensor):
# Should not use this forward method
raise AssertionError("This method should not be used")
return self.forward_pytorch(input)
m = TestModule()
self.assertEqual(m(torch.tensor(1)), torch.tensor(246))
m_scripted = torch.jit.script(m)
self.assertEqual(m_scripted(torch.tensor(1)), torch.tensor(246))
# Suppression: ONNX warns when exporting RNNs because of potential batch size mismatch.
@suppress_warnings
@skipIfCompiledWithoutNumpy
def test_rnn_trace_override(self):
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
num_layers = 3
T, B, C = 11, 5, 7
class RNNTraceWrapper(torch.nn.Module):
def __init__(self, cell_type):
super(RNNTraceWrapper, self).__init__()
if cell_type == 'RNN':
self.rnn = torch.nn.RNN(input_size=C, hidden_size=C, num_layers=num_layers)
elif cell_type == 'LSTM':
self.rnn = torch.nn.LSTM(input_size=C, hidden_size=C, num_layers=num_layers)
elif cell_type == 'GRU':
self.rnn = torch.nn.GRU(input_size=C, hidden_size=C, num_layers=num_layers)
def forward(self, x, seq_lens):
x = pack_padded_sequence(x, seq_lens)
x, _ = self.rnn(x)
x, _ = pad_packed_sequence(x)
return x
for cell_type in ['RNN', 'LSTM', 'GRU']:
x = torch.ones(T, B, C, requires_grad=True)
seq_lens = torch.from_numpy(np.array([11, 3, 2, 2, 1], dtype=np.int32))
m = RNNTraceWrapper(cell_type)
m_traced = torch.jit.trace(m, (x, seq_lens,))
y = m(x, seq_lens)
loss = torch.sum(y)
loss.backward()
grad = x.grad.clone()
x.grad.zero_()
y_traced = m_traced(x, seq_lens)
loss_traced = torch.sum(y_traced)
loss_traced.backward()
grad_traced = x.grad.clone()
self.assertEqual(y_traced, y)
self.assertEqual(grad, grad_traced)
f = io.BytesIO()
torch.onnx._export(m, (x, seq_lens), f, verbose=False)
def test_python_call_non_tensor(self):
def foo(a, b, c):
# type: (Tensor, int, Tuple[Tensor, int]) -> Tuple[int, Tensor]
d, e = c
return b + e, a + d
@torch.jit.script
def bar():
x = torch.ones(3, 4)
a, b = foo(x, 3, (x, 3))
return a, b
self.assertEqual((6, torch.ones(3, 4) + 1), bar())
def test_python_call_non_tensor_wrong(self):
with self.assertRaisesRegex(RuntimeError, r"but instead got value of type tuple"):
@torch.jit.ignore
def foo():
# type: () -> Tensor
return ((3, 4),) # noqa: T484
@torch.jit.script
def bar():
return foo()
bar()
def test_if_different_type(self):
with self.assertRaisesRegex(RuntimeError, "c0 is set to type "
"int in the true branch and type "
"float in the false branch"):
@torch.jit.script
def diff_type_used():
if 1 == 2:
c0 = 1
else:
c0 = 1.0
return c0
with self.assertRaisesRegex(RuntimeError, "Variable 'c0' previously had type float"):
@torch.jit.script
def diff_existing_type(x):
c0 = 1.0
if 1 == 2:
c0 = 1
print(x)
return x
@torch.jit.script
def diff_type_unused():
if 1 == 1:
c0 = 1
print(c0)
else:
c0 = 1.0
print(c0)
return 1
def test_if_not_defined_error(self):
with self.assertRaisesRegex(RuntimeError, "c0 is not defined in the false branch"):
@torch.jit.script
def test():
if 1 == 1:
c0 = 1
return c0
with self.assertRaisesRegex(RuntimeError, "c0 is not defined in the true branch"):
@torch.jit.script
def test2():
if 1 == 1:
pass
else:
c0 = 1
return c0
def test_if_list_cat(self):
# testing that different length lists don't throw error on cat in shape prop
@torch.jit.script
def test_list(x):
if bool(x.sum() < 1):
c = [x, x]
else:
c = [x, x, x]
return torch.cat(c)
b = torch.zeros(2, 4)
_propagate_shapes(test_list.graph, (b,), False)
def test_if_supertype(self):
@torch.jit.script
def tensor_unifying(x, y, z):
# testing dynamic is appropriately set for y and z
if bool(x):
x, y, z = x + 1, y, z
else:
x, y, z = x + 1, x, y
return x, y, z
a = torch.zeros(2, 2, dtype=torch.float)
b = torch.zeros(2, 4, dtype=torch.long)
c = torch.zeros(2, 4, dtype=torch.float)
graph = _propagate_shapes(tensor_unifying.graph, (a, b, c), False)
if_outputs = list(graph.findNode("prim::If").outputs())
self.assertTrue(if_outputs[0].type().str() == "Float(*, *, requires_grad=0, device=cpu)")
self.assertTrue(if_outputs[1].type().str() == "Tensor(*, *, requires_grad=0, device=cpu)")
self.assertTrue(if_outputs[2].type().str() == "Tensor(*, *, requires_grad=0, device=cpu)")
def test_list_unify(self):
# allowing a unififed int?[] would cause a runtime error b/c
# the index operation expects int?[] to be a generic list,
# but in the true branch the IValue will be a int list
with self.assertRaisesRegex(RuntimeError, "int[] in the true branch and type None[]"):
@torch.jit.script
def list_optional_fails(x):
# type: (bool) -> Optional[int]
if x:
y = [1]
else:
y = [None] # noqa: T484
return y[0]
@torch.jit.script
def list_tensors(x):
# type: (bool) -> Tuple[Tensor, List[Tensor]]
if x:
a = torch.zeros([1, 1])
y = [a]
else:
a = torch.zeros([1, 2])
y = [a]
return a, y
self.run_pass('constant_propagation', list_tensors.graph)
m = self.createFunctionFromGraph(list_tensors.graph)
# testing that tensor type of lists is unified
self.getExportImportCopy(m)
@_inline_everything
def test_import_constants_not_specialized(self):
class Mod(torch.nn.Module):
def forward(self, x):
return torch.cat(2 * [x], dim=0)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self, mod):
super(ScriptMod, self).__init__()
x = torch.zeros(1, 3)
mod_fn = lambda : mod(x) # noqa: E731
self.mod = torch.jit.trace(mod_fn, tuple())
@torch.jit.script_method
def forward(self):
return self.mod()
cm = ScriptMod(Mod())
# specialized tensor in graph
FileCheck().check("Double(1, 3, strides=[3, 1], requires_grad=0, device=cpu)").run(cm.forward.graph)
buffer = io.BytesIO()
torch.jit.save(cm, buffer)
buffer.seek(0)
# when tensor is loaded as constant it isnt specialized
cm_load = torch.jit.load(buffer)
FileCheck().check_not("Double(1, 3)").run(cm_load.forward.graph)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_type_annotations_repeated_list(self):
@torch.jit.script
def float_fn(x, y):
# type: (float, BroadcastingList3[float]) -> List[float]
return y
self.assertEqual(float_fn(2.0, 1.0), float_fn(2.0, [1.0, 1.0, 1.0]))
self.assertEqual(float_fn(2.0, 1.0), float_fn(2.0, (1.0, 1.0, 1.0)))
@torch.jit.script
def float_fn_call():
print(float_fn(1.0, 1.0))
print(float_fn(1.0, (1.0, 1.0, 1.0)))
@torch.jit.script
def int_fn(x):
# type: (BroadcastingList3[int]) -> List[int]
return x
self.assertEqual(int_fn(1), int_fn([1, 1, 1]))
self.assertEqual(int_fn(1), int_fn((1, 1, 1)))
@torch.jit.script
def int_fn_call():
print(int_fn(1))
print(int_fn((1, 1, 1)))
with self.assertRaisesRegex(RuntimeError, "must be a positive integer:"):
@torch.jit.script # noqa: T484
def fn(x):
# type: (BroadcastingListx[int]) -> List[int] # noqa: T484
return x
# using CU so that flake8 error on int[2] is not raised (noqa not working)
with self.assertRaisesRegex(RuntimeError, "Unknown type constructor"):
cu = torch.jit.CompilationUnit('''
def nested(x, y):
# type: (int, Tuple[int, int[2]]) -> List[int]
return x # noqa: T484
''')
@torch.jit.script
def f(x: BroadcastingList2[int]):
return x
out = f(1)
self.assertTrue(isinstance(out[0], int))
self.assertEqual(out, [1, 1])
def test_ntuple_builtins(self):
from torch.nn.modules.utils import _single, _pair, _triple, _quadruple
def test_ints():
return _single(1), _pair(2), _triple(3), _quadruple(4)
def test_floats():
return _single(1), _pair(2.1), _triple(3.1), _quadruple(4.1)
self.checkScript(test_ints, ())
self.checkScript(test_floats, ())
def test_embedding_renorm_grad_error(self):
# Testing that the builtin call to embedding_renorm_ correctly throws
# Error when .backward() is called on its input
def embedding_norm(input, embedding_matrix, max_norm):
F.embedding(input, embedding_matrix, max_norm=0.01)
@torch.jit.script
def embedding_norm_script(input, embedding_matrix, max_norm):
# type: (Tensor, Tensor, float) -> None
F.embedding(input, embedding_matrix, max_norm=0.01)
for _ in [embedding_norm, embedding_norm_script]:
input = torch.tensor([[1, 2, 4, 5], [4, 3, 2, 9]])
embedding_matrix = torch.randn(10, 3)
var1 = torch.randn(10, 3, requires_grad=True)
var2 = var1.detach().requires_grad_()
output1 = var1 * embedding_matrix
output2 = var2 * embedding_matrix
output1.sum().backward()
ignore = F.embedding(input, embedding_matrix, max_norm=0.01)
with self.assertRaisesRegex(RuntimeError, "modified"):
output2.sum().backward()
def test_type_annotations(self):
def fn(x, y):
# type: (Tensor, Tensor) -> Tuple[Tensor, Tensor, Tensor]
return x, x * 2, x * 3
with self.assertRaisesRegex(RuntimeError, r"need 4 values .* found only 3"):
@torch.jit.script
def script_fn(x):
x, y, z, w = fn(x, x)
with self.assertRaisesRegex(RuntimeError, r"too many values .* need 2 but found 3"):
@torch.jit.script
def script_fn2(x):
x, y = fn(x, x)
def fn_unpack(x):
y, z, w = fn(x, x)
return y
def fn_index(x):
q = fn(x, x)
return x
def fn_string(str, strpair):
# type: (str, Tuple[str, str]) -> Tuple[str, int, str, str]
str1, str2 = strpair
return str, 2, str1, str2
x = torch.ones(2, 2)
self.checkScript(fn_unpack, (x,), optimize=True)
self.checkScript(fn_index, (x,), optimize=True)
self.checkScript(fn_string, ("1", ("3", "4")), optimize=True)
def test_type_annotations_varargs(self):
@torch.jit.ignore
def fn_varargs(x, *args):
return args[0] if args else x
def fn1(x, y, z):
return fn_varargs(x)
def fn2(x, y, z):
return fn_varargs(x, y)
def fn3(x, y, z):
return fn_varargs(x, y, z)
x, y, z = [torch.randn(2, 2) for _ in range(3)]
self.checkScript(fn1, (x, y, z), optimize=True)
self.checkScript(fn2, (x, y, z), optimize=True)
self.checkScript(fn3, (x, y, z), optimize=True)
def test_type_annotation_py3(self):
code = dedent("""
import torch
from torch import Tensor
from typing import Tuple
def fn(x : torch.Tensor, y : Tensor, z) -> Tuple[Tensor, Tensor, Tensor]:
return (x, y + z, z)
""")
with tempfile.TemporaryDirectory() as tmp_dir:
script_path = os.path.join(tmp_dir, 'script.py')
with open(script_path, 'w') as f:
f.write(code)
fn = get_fn('test_type_annotation_py3', script_path)
fn = torch.jit.ignore(fn)
with self.assertRaisesRegex(RuntimeError, r"Expected a value of type 'Tensor' for argument"
r" 'x' but instead found type 'Tuple\[Tensor,"):
@torch.jit.script
def bad_fn(x):
x, y = fn((x, x), x, x)
return y
with self.assertRaisesRegex(RuntimeError, r"too many values .* need 2 but found 3"):
@torch.jit.script
def bad_fn2(x):
x, y = fn(x, x, x)
return y
with self.assertRaisesRegex(RuntimeError, r"need 4 values .* found only 3"):
@torch.jit.script
def bad_fn3(x):
x, y, z, w = fn(x, x, x)
return y
def good_fn(x):
y, z, w = fn(x, x, x)
return y, z, w
self.checkScript(good_fn, (torch.ones(2, 2),), optimize=True)
def test_type_annotation_module(self):
class BaseModule(torch.jit.ScriptModule):
@torch.jit.ignore
def foo(self, x):
# type: (Tensor) -> Tensor
return x + 1
@torch.jit.ignore
def bar(self, x, y):
# type: (Tensor, Tensor) -> Tuple[Tensor, Tensor]
return x + y, y
@torch.jit.ignore
def baz(self, x, y):
return x
class ModuleTooMany(BaseModule):
@torch.jit.script_method
def method(self, x):
return self.foo(x, x)
class ModuleTooFew(BaseModule):
@torch.jit.script_method
def method(self, x):
return self.bar(x)
class ModuleTooManyAssign(BaseModule):
@torch.jit.script_method
def method(self, x):
y, z, w = self.bar(x, x)
return x
class ModuleDefault(BaseModule):
@torch.jit.script_method
def method(self, x):
y = self.baz(x)
return x
with self.assertRaisesRegex(RuntimeError, "Expected at most 2 arguments but found 3"):
ModuleTooMany()
with self.assertRaisesRegex(RuntimeError, "Argument y not provided"):
ModuleTooFew()
with self.assertRaisesRegex(RuntimeError, "need 3 values .* found only 2"):
ModuleTooManyAssign()
with self.assertRaisesRegex(RuntimeError, "Argument y not provided."):
ModuleDefault()
def test_type_inferred_from_empty_annotation(self):
"""
Test that the type inferred from an empty or missing annotation is Torch.Tensor wtih `inferred=true`
"""
@torch.jit.script
def fn(x):
return x
graph = fn.graph
n = next(graph.inputs())
self.assertTrue(n.type() == torch._C.TensorType.getInferred())
with self.assertRaisesRegex(RuntimeError, "Inferred \'x\' to be of type \'Tensor"):
fn("1")
def test_script_define_order(self):
class M(torch.jit.ScriptModule):
@torch.jit.script_method
def call_foo(self, input):
return self.foo(input)
@torch.jit.script_method
def foo(self, input):
return input + 1
m = M()
self.assertEqual(2, m.call_foo(torch.ones((), dtype=torch.int64)))
def test_script_define_order_recursive_fail(self):
class M(torch.jit.ScriptModule):
@torch.jit.script_method
def call_foo(self, input):
return self.foo(input)
@torch.jit.script_method
def foo(self, input):
self.call_foo(input)
with self.assertRaisesRegex(RuntimeError, 'called recursively'):
M()
def test_script_kwargs_fn_call(self):
class M(torch.jit.ScriptModule):
@torch.jit.script_method
def call_foo(self, input):
return self.foo(input=input, bar=1)
@torch.jit.script_method
def foo(self, bar, input):
# type: (int, Tensor) -> Tensor
return input + bar
m = M()
self.assertEqual(2, m.call_foo(torch.ones((), dtype=torch.int64)))
def test_if_define(self):
@torch.jit.script
def foo(a):
if bool(a == 0):
b = 1
else:
b = 0
return b + 1
@torch.jit.script
def foo2(a):
b = 0
if bool(a == 0):
b = 1
return b + 1
@torch.jit.script
def foo3(a):
b = 1
if bool(a == 0):
c = 4
else:
b = 0
return b + 1
a = torch.ones(1, dtype=torch.long)
b = torch.zeros(1, dtype=torch.long)
self.assertEqual(1, foo(a))
self.assertEqual(2, foo(b))
self.assertEqual(1, foo2(a))
self.assertEqual(2, foo2(b))
self.assertEqual(1, foo3(a))
self.assertEqual(2, foo3(b))
def test_script_module_export_submodule(self):
class M1(torch.jit.ScriptModule):
def __init__(self):
super(M1, self).__init__()
self.weight = nn.Parameter(torch.randn(2))
@torch.jit.script_method
def forward(self, thing):
return self.weight + thing
class M2(torch.jit.ScriptModule):
def __init__(self):
super(M2, self).__init__()
# test submodule
self.sub = M1()
self.weight = nn.Parameter(torch.randn(2, 3))
self.bias = nn.Parameter(torch.randn(2))
self.define("""
def hi(self, a):
return self.weight.mm(a)
""")
@torch.jit.script_method
def doit(self, input):
return self.weight.mm(input)
@torch.jit.script_method
def doit2(self, input):
return self.weight.mm(input)
@torch.jit.script_method
def doit3(self, input):
return input + torch.ones([1], dtype=torch.double)
@torch.jit.script_method
def forward(self, input):
a = self.doit(input)
b = self.doit2(input)
c = self.hi(input)
return a + b + self.bias + c
with torch.jit.optimized_execution(False):
m_orig = M2()
m_import = self.getExportImportCopy(m_orig)
input = torch.randn(3, 2)
self.assertEqual(m_orig.doit(input), m_import.doit(input))
self.assertEqual(m_orig.hi(input), m_import.hi(input))
self.assertEqual(m_orig.doit3(input), m_import.doit3(input))
self.assertEqual(m_orig.forward(input), m_import.forward(input))
@slowTest
def test_compile_module_with_constant(self):
class Double(nn.Module):
def __init__(self, downsample=None):
super(Double, self).__init__()
def forward(self, input):
return input * 2
class Mod(nn.Module):
__constants__ = ['downsample']
def __init__(self, downsample=None):
super(Mod, self).__init__()
self.downsample = downsample
def forward(self, input):
if self.downsample is not None:
return self.downsample(input)
return input
none_mod = torch.jit.script(Mod(None))
double_mod = torch.jit.script(Mod(Double()))
self.assertEqual(none_mod(torch.tensor(1)), torch.tensor(1))
self.assertEqual(double_mod(torch.tensor(1)), torch.tensor(1) * 2)
def test_device_kwarg(self):
from torch import device
def f():
return device(type='cuda'), torch.device(type='cpu')
self.checkScript(f, ())
def test_script_module_export_tensor_type(self):
class M(torch.jit.ScriptModule):
def __init__(self, type):
super(M, self).__init__()
self.param = torch.nn.Parameter(torch.zeros((5, 5), dtype=type).random_())
@torch.jit.script_method
def foo(self):
return self.param
with torch.jit.optimized_execution(False):
for type in [torch.float, torch.double]:
m_orig = M(type)
m_import = self.getExportImportCopy(m_orig)
# check to make sure the storage wasn't resized
self.assertTrue(m_orig.param.storage().size() == 25)
self.assertEqual(m_orig.foo(), m_import.foo())
self.assertTrue(m_orig.foo().dtype == m_import.foo().dtype)
@unittest.skipIf(not RUN_CUDA, "testing cuda tensors require CUDA")
def test_script_module_export_tensor_cuda(self):
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.param = torch.nn.Parameter(torch.zeros((5, 5), device='cuda:0').random_())
@torch.jit.script_method
def foo(self):
return self.param
m_orig = M()
m_import = self.getExportImportCopy(m_orig)
# check to make sure the storage wasn't resized
self.assertTrue(m_orig.param.storage().size() == 25)
self.assertTrue(m_import.foo().device == torch.device('cuda:0'))
self.assertEqual(m_orig.foo(), m_import.foo())
self.assertTrue(m_orig.foo().dtype == m_import.foo().dtype)
def test_script_module_export_blocks(self):
class M(torch.jit.ScriptModule):
def __init__(self, n, m):
super(M, self).__init__()
self.weight = torch.nn.Parameter(torch.rand(n, m))
@torch.jit.script_method
def forward(self, input):
if bool(input.sum() > 0):
output = self.weight.mv(input)
else:
output = self.weight + input
return output
m_orig = M(200, 200)
m_import = self.getExportImportCopy(m_orig)
t = torch.rand(200)
self.assertEqual(m_orig(t), m_import(t))
def test_script_module_export_shared_storage(self):
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.param1 = torch.nn.Parameter(torch.rand(5, 5))
self.param2 = torch.nn.Parameter(self.param1[3])
self.param3 = torch.nn.Parameter(torch.rand(5, 5))
self.param4 = torch.nn.Parameter(torch.rand(11, 5)[1:6])
@torch.jit.script_method
def foo(self):
return self.param1 + self.param2 + self.param3 + self.param4
with torch.jit.optimized_execution(False):
m_orig = M()
m_import = self.getExportImportCopy(m_orig)
self.assertEqual(m_orig.foo(), m_import.foo())
self.assertTrue(m_import.param1.storage().data_ptr() == m_import.param2.storage().data_ptr())
self.assertTrue(m_import.param1.storage().data_ptr() != m_import.param3.storage().data_ptr())
def test_sequential_intermediary_types(self):
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
def forward(self, x):
return x + 3
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
def forward(self, x):
return {"1": x}
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.foo = torch.nn.Sequential(A(), B())
def forward(self, x):
return self.foo(x)
self.checkModule(C(), (torch.tensor(1),))
def test_ellipsis_const_mid(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[2, Ellipsis, 0:4, 4:8].size()
dummy = torch.zeros(8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_const_mid_select(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[2, Ellipsis, 4, 4, 4:8, 2].size()
dummy = torch.zeros(8, 8, 8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_const_start(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[Ellipsis, 0:4, 4:8].size()
dummy = torch.zeros(8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_const_end(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[0:4, 2, Ellipsis].size()
dummy = torch.zeros(8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_mid(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[2, ..., 0:4, 4:8].size()
dummy = torch.zeros(8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_mid_select(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[2, ..., 4, 4, 4:8, 2].size()
dummy = torch.zeros(8, 8, 8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_start(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[..., 0:4, 4:8].size()
dummy = torch.zeros(8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_end(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[0:4, 2, ...].size()
dummy = torch.zeros(8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_torch_manual_seed(self):
with freeze_rng_state():
def test():
torch.manual_seed(2)
return torch.rand(1)
script = torch.jit.script(test)
self.assertEqual(test(), script())
graph = script.graph_for()
FileCheck().check("aten::manual_seed").run(graph)
def test_index_select_shape_prop(self):
@torch.jit.script
def foo(x, y):
return torch.index_select(x, index=y, dim=1)
a = torch.zeros(2, 2)
b = torch.zeros(4, dtype=torch.long)
torch._C._jit_pass_complete_shape_analysis(foo.graph, (a, b), False)
FileCheck().check("Double(2, 4, strides=[4, 1], requires_grad=0, device=cpu)").run(str(foo.graph))
def test_shape_analysis_loop(self):
def foo(a, b, x):
c = a
# on the first iteration of the loop it appears that
# c should have a expand to the size of b
# but on the second+ iterations, there is no broadcast and the
# sizes are different.
# previously this would cause the compiler to (1) enter an infinite
# loop trying to compute the shape, and (2) insert invalid
# broadcasts.
# this test ensure we don't regress on these issues
for _ in range(2):
a = c + b
c = x
b = x
return a
self.checkScript(foo, (torch.zeros(1), torch.zeros(4), torch.zeros(5)), optimize=False)
def test_intlist_args(self):
def func_1(x):
return torch.nn.functional.adaptive_avg_pool1d(x, 1)
def func_2(x):
return torch.nn.functional.adaptive_avg_pool1d(x, output_size=1)
def func_3(x):
return torch.nn.functional.adaptive_avg_pool1d(x, output_size=[1])
x = torch.randn(8, 8, 8)
self.checkScript(func_1, [x], optimize=True)
self.checkScript(func_2, [x], optimize=True)
self.checkScript(func_3, [x], optimize=True)
def test_wrong_implicit_expand(self):
@_trace(torch.zeros(3), torch.zeros(1))
def foo(a, b):
return a + b
a = torch.rand(4)
b = torch.rand(4)
self.assertEqual(a + b, foo(a, b))
def test_builtin_args_fails(self):
with self.assertRaisesRegex(RuntimeError, 'Argument self not provided'):
@torch.jit.script
def f1(a):
torch.sum(foo=4)
with self.assertRaisesRegex(RuntimeError, 'specified twice'):
@torch.jit.script
def f2(a):
torch.sum(a, self=a)
with self.assertRaisesRegex(RuntimeError, 'not provided'):
@torch.jit.script
def f3(a):
torch.sum(dim=4)
with self.assertRaisesRegex(RuntimeError, 'for argument \'tensors\' but instead found type \'Tensor'):
@torch.jit.script
def f4(a):
torch.cat(a)
with self.assertRaisesRegex(RuntimeError, r'argument \'tensors\' but instead found type \'List\[int\]'):
@torch.jit.script
def f5(a):
torch.cat([3])
with self.assertRaisesRegex(RuntimeError, r'Expected a value of'
r' type \'List\[int\]\' for argument'
r' \'size\' but instead found type '
r'\'List\[Union\[List\[int\], int\]\]'):
@torch.jit.script
def f6(a):
a.expand(size=[3, [4]])
def test_builtin_args(self):
def t0(a):
# default arg dim
return torch.cat([a, a])
self.checkScript(t0, (torch.zeros(1, 1),))
def t1(a):
# keywords out of order
return torch.cat(dim=1, tensors=[a, a])
self.checkScript(t1, (torch.zeros(1, 1, 2),))
def t2(a):
# mix const/non-const attributes
if 1 == 1:
b = 1
else:
b = 0
return torch.sum(a, dim=b, keepdim=False)
self.checkScript(t2, (torch.zeros(1, 1, 2),))
def test_parser_type_annotations(self):
cu = torch.jit.CompilationUnit('''
def foo(x : Tensor, y : Tuple[Tuple[Tensor, Tensor], Tensor]) -> Tuple[Tensor, Tensor]:
return x, x
''')
self.assertExpected(str(cu.foo.schema))
def test_parser_type_annotations_comment(self):
cu = torch.jit.CompilationUnit('''
def foo(x, y):
# type: (Tensor, Tuple[Tuple[Tensor, Tensor], Tensor]) -> Tuple[Tensor, Tensor]
return x, x
''')
self.assertExpected(str(cu.foo.schema))
def test_parser_type_annotations_unknown_type(self):
with self.assertRaisesRegex(RuntimeError, "Unknown type name 'Foo'"):
cu = torch.jit.CompilationUnit('''
def foo(x : Tensor, y : Tuple[Tuple[Foo, Tensor], Tensor]) -> Tuple[Tensor, Tensor]:
return x, x
''')
def test_parser_type_annotations_subscript_non_ident(self):
with self.assertRaisesRegex(RuntimeError, r'Subscripted type must be a type identifier'):
cu = torch.jit.CompilationUnit('''
def foo(x : Tensor, y : Tuple[Tensor, Tensor][Tensor]) -> Tuple[Tensor, Tensor]:
return x, x
''')
def test_parser_type_annotations_subscript_tensor(self):
with self.assertRaisesRegex(RuntimeError, r'Unknown type constructor Tensor'):
cu = torch.jit.CompilationUnit('''
def foo(x : Tensor, y : Tensor[Tensor, Tensor]) -> Tuple[Tensor, Tensor]:
return x, x
''')
def test_parser_type_annotations_incompatible_expression(self):
with self.assertRaisesRegex(RuntimeError, r'Expression of type \+ cannot be used in a type expression'):
cu = torch.jit.CompilationUnit('''
def foo(x : Tensor, y : Tuple[3 + 4, Tensor]) -> Tuple[Tensor, Tensor]:
return x, x
''')
def test_gather_dynamic_index(self):
def t(x):
gather1 = x[0]
idx = 0 + 1
gather2 = x[idx]
return gather1 + gather2
self.checkScript(t, (torch.zeros(3, 2, 3),))
def test_torch_ignore_conversion_to_none(self):
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
@torch.jit.ignore
def ignored(self, a: int) -> None:
l: int = len([2 for i in range(a) if i > 2])
return
def forward(self) -> int:
a: int = 4
b: int = 5
self.ignored(a)
return a + b
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
@torch.jit.ignore
def ignored(self, a: int):
l: int = len([2 for i in range(a) if i > 2])
return
def forward(self) -> int:
a: int = 4
b: int = 5
self.ignored(a)
return a + b
modelA = torch.jit.script(A())
self.assertEqual(modelA(), 9)
modelB = torch.jit.script(B())
self.assertEqual(modelB(), 9)
def test_addmm_grad(self):
""" This test checks several things:
1. An expand node was inserted before the addmm operating on the
bias term.
2. The fused form of addmm appears in the ultimate graph that's
executed.
3. A sum op was emitted for accumulating gradients along the 0th
(expanded) dimension of the bias term.
4. The correct symbolic representation for the backward pass of the
mm operator was emitted (x.t() -> mm)
TODO: we should actually check these conditions once we have a way
to dump the GraphExecutor state. Namely the processed forward graph
and the backward graph.
"""
@torch.jit.script
def addmm_grad_test(b, x, w):
return torch.addmm(b, x, w)
# Initialize param and input values
w_init = torch.rand(2, 5)
b_init = torch.rand(5)
x = torch.rand(3, 2)
# Clone trainable params
b = b_init.clone()
b.requires_grad_()
w = w_init.clone()
w.requires_grad_()
# Test symbolic differentiation
y = addmm_grad_test(b, x, w)
y.sum().backward()
# clone params for autograd reference
b_ref = b_init.clone()
b_ref.requires_grad_()
w_ref = w_init.clone()
w_ref.requires_grad_()
y_ref = torch.addmm(b_ref, x, w_ref)
y_ref.sum().backward()
self.assertEqual(w.grad, w_ref.grad)
self.assertEqual(b.grad, b_ref.grad)
@unittest.skipIf(not RUN_CUDA, "running tests on cuda to verify cudnn fix")
def test_batch_norm_inference_backward_cuda(self):
with enable_profiling_mode_for_profiling_tests():
class MyBatchNorm(torch.nn.Module):
def __init__(self, num_features, affine, track_running_stats):
super(MyBatchNorm, self).__init__()
self.bn = torch.nn.BatchNorm2d(
num_features, 1e-5, affine=affine, track_running_stats=track_running_stats).float()
def forward(self, x: torch.Tensor):
o = self.bn(x)
o = torch.nn.functional.relu(o)
return o
batch = 4
c = 2
hw = 3
# Initialize param and input values
x_init = torch.randn(batch, c, hw, hw, dtype=torch.float).cuda()
grad = torch.randn(batch, c, hw, hw, dtype=torch.float).cuda()
training = False
affine = True
track_running_stats = True
module = torch.jit.script(MyBatchNorm(c, affine, track_running_stats)).cuda()
ref_module = MyBatchNorm(c, affine, track_running_stats).cuda()
module.eval()
ref_module.eval()
jit_module = torch.jit.script(module)
ref_module.load_state_dict(module.state_dict())
x = x_init.detach().clone()
x.requires_grad_()
x_ref = x_init.detach().clone()
x_ref.requires_grad_()
# Test symbolic differentiation
# Run Forward and Backward thrice to trigger autodiff graph
for i in range(0, 3):
y = jit_module(x)
y.backward(grad)
x.grad.zero_()
module.bn.running_mean.zero_()
module.bn.running_var.fill_(1.0)
ref_module.bn.running_mean.zero_()
ref_module.bn.running_var.fill_(1.0)
# run jitted module
y = jit_module(x)
y.backward(grad)
# reference computation
y_ref = ref_module(x_ref)
y_ref.backward(grad)
self.assertEqual(y_ref, y)
self.assertEqual(x.grad, x_ref.grad)
self.assertEqual(module.bn.running_mean, ref_module.bn.running_mean)
self.assertEqual(module.bn.running_var, ref_module.bn.running_var)
def test_zeros(self):
class M(torch.jit.ScriptModule):
__constants__ = ['d']
def __init__(self):
super(M, self).__init__()
self.d = torch.device('cpu')
@torch.jit.script_method
def create(self):
return torch.zeros([1, 1, 2], dtype=torch.float, device=self.d, layout=torch.strided)
r = M().create()
self.assertEqual(r.dtype, torch.float)
self.assertEqual(torch.zeros([1, 1, 2], dtype=torch.float), r)
def fn():
return torch.zeros((1, 2, 3))
self.checkScript(fn, ())
def test_vararg_zeros(self):
def foo():
return torch.zeros(3, 4, 5, dtype=torch.int)
self.checkScript(foo, ())
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "the original version of test_rand")
def test_rand(self):
def test_rand():
a = torch.rand([3, 4])
return a + 1.0 - a
self.checkScript(test_rand, ())
fn = torch.jit.script(test_rand)
out = fn()
self.assertEqual(out.dtype, torch.double)
g = fn.graph_for()
# Testing shape analysis correctly setting type
if GRAPH_EXECUTOR != ProfilingMode.SIMPLE:
FileCheck().check("Double(*, *, requires_grad=0, device=cpu)") \
.check_not("Float(*, *, requires_grad=0, device=cpu)").run(g)
@torch.jit.script
def randint():
return torch.randint(0, 5, [1, 2])
out = randint()
self.assertEqual(out.dtype, torch.int64)
if GRAPH_EXECUTOR != ProfilingMode.SIMPLE:
FileCheck().check("Long(*, *, requires_grad=0, device=cpu)") \
.check_not("Float(*, *, requires_grad=0, device=cpu)") \
.check_not("Double(*, *, requires_grad=0, device=cpu)") \
.run(randint.graph_for())
@unittest.skipIf(not RUN_CUDA, "no CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "skip if profiling isn't enabled")
def test_autodiff_complex(self):
def foo(x: torch.Tensor, y: torch.Tensor, W: torch.Tensor):
return torch.exp(torch.mm(torch.complex(x, y), W.cfloat()))
@torch.jit.script
def jitted_foo(x: torch.Tensor, y: torch.Tensor, W: torch.Tensor):
return torch.exp(torch.mm(torch.complex(x, y), W.cfloat()))
x = torch.randn(128, 16, dtype=torch.float32, device='cuda:0')
y = torch.randn(128, 16, dtype=torch.float32, device='cuda:0')
W = torch.randn(16, 1, dtype=torch.float32, device='cuda:0', requires_grad=True)
W.data /= 4
with enable_profiling_mode_for_profiling_tests():
for i in range(4):
self.assertTrue((foo(x, y, W).grad_fn is None) == (jitted_foo(x, y, W).grad_fn is None))
def test_linear_grad(self):
with enable_profiling_mode_for_profiling_tests():
def t(x: torch.Tensor, w: torch.Tensor, b: Optional[torch.Tensor]):
return torch.nn.functional.linear(x, w, b)
x_init = torch.randn(4, 2)
w_init = torch.randn(3, 2)
b_init = torch.randn(3)
grad = torch.randn(4, 3)
with disable_autodiff_subgraph_inlining():
# script module
jit_t = torch.jit.script(t)
x = x_init.detach().requires_grad_()
w = w_init.detach().requires_grad_()
b = b_init.detach().requires_grad_()
x_ref = x_init.detach().requires_grad_()
w_ref = w_init.detach().requires_grad_()
b_ref = b_init.detach().requires_grad_()
# profiling/optimization runs
jit_o = jit_t(x, w, b)
jit_o.backward(grad)
jit_o = jit_t(x, w, b)
jit_o.backward(grad)
x.grad.zero_()
w.grad.zero_()
b.grad.zero_()
jit_o = jit_t(x, w, b)
jit_o.backward(grad)
o = t(x_ref, w_ref, b_ref)
o.backward(grad)
self.assertEqual(jit_o, o)
self.assertEqual(x.grad, x_ref.grad)
self.assertEqual(w.grad, w_ref.grad)
self.assertEqual(b.grad, b_ref.grad)
x.grad.zero_()
w.grad.zero_()
x_ref.grad.zero_()
w_ref.grad.zero_()
jit_o = jit_t(x, w, None)
jit_o.backward(grad)
o = t(x_ref, w_ref, None)
o.backward(grad)
self.assertEqual(jit_o, o)
self.assertEqual(x.grad, x_ref.grad)
self.assertEqual(w.grad, w_ref.grad)
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "the profiling version of test_rand")
def test_rand_profiling(self):
def test_rand():
a = torch.rand([3, 4])
return a + 1.0 - a
# Testing shape analysis correctly setting type
with enable_profiling_mode_for_profiling_tests():
with num_profiled_runs(1):
fn = torch.jit.script(test_rand)
out = fn()
graph_str = torch.jit.last_executed_optimized_graph()
self.assertEqual(out.dtype, torch.double)
FileCheck().check("Double(3, 4, strides=[4, 1], requires_grad=0, device=cpu)") \
.check_not("Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu)").run(graph_str)
# fn = self.checkScript(test_rand, ())
# out = fn()
# self.assertEqual(out.dtype, torch.double)
@torch.jit.script
def randint():
return torch.randint(0, 5, [1, 2])
with enable_profiling_mode_for_profiling_tests():
with num_profiled_runs(1):
out = randint()
graph_str = torch.jit.last_executed_optimized_graph()
self.assertEqual(out.dtype, torch.int64)
FileCheck().check("profiled_type=Long(1, 2, strides=[2, 1], requires_grad=0, device=cpu)").run(graph_str)
def test_erase_number_types(self):
def func(a):
b = 7 + 1 + 3
c = a + b
c += b
return c
graph = torch.jit.script(func).graph
FileCheck().check("int = prim::Constant").check("aten::add_").run(str(graph))
self.run_pass("erase_number_types", graph)
FileCheck().check_not("int = prim::Constant").run(str(graph))
def test_refine_tuple_types(self):
# TupleConstruct output type is not correct here.
graph_str = """
graph(%a : Float(123), %b : Float(4, 5, 6)):
%c : (Tensor, Tensor) = prim::TupleConstruct(%a, %b)
return (%c)
"""
graph = parse_ir(graph_str)
torch._C._jit_pass_refine_tuple_types(graph)
# After the pass, the output type should've been updated.
self.assertTrue('(Float(123), Float(4, 5, 6))' in str(graph.findNode('prim::TupleConstruct').output()))
# TODO(henrytu): Add test for RefineTypes for NamedTuple when it's supported by IR parser.
def test_remove_dropout(self):
weight_0_shape = (20, 5)
weight_1_shape = (20, 20)
input_shape = (10, 5)
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.weight_0 = torch.nn.Parameter(torch.rand(weight_0_shape))
self.weight_1 = torch.nn.Parameter(torch.rand(weight_1_shape))
def forward(self, x):
o = F.linear(x, self.weight_0)
o = F.dropout(o, training=self.training)
o = F.linear(o, self.weight_1)
return o
data = torch.rand(input_shape)
m = M()
m = torch.jit.script(m)
with self.assertRaisesRegex(RuntimeError, r'Dropout removal module in training mode is not yet supported'):
torch._C._jit_pass_remove_dropout(m._c)
m.eval()
ref_res = m(data)
# Need to inline otherwise we see instances of Function.
# We would have to use torch.linear/dropout to get around it otherwise.
from torch.jit._recursive import wrap_cpp_module
m = wrap_cpp_module(torch._C._freeze_module(m._c))
torch._C._jit_pass_remove_dropout(m._c)
res = m(data)
FileCheck().check_not("aten::dropout").run(str(m.graph))
torch.testing.assert_close(ref_res, res, rtol=1e-2, atol=1e-3)
def test_unfold_zero_dim(self):
def fn(x):
return x.unfold(0, 1, 1)
graph = torch.jit.script(fn).graph
torch._C._jit_pass_complete_shape_analysis(graph, (torch.tensor(0.39),), False)
out_dims = fn(torch.tensor(0.3923)).ndim
self.assertEqual(graph.findNode("aten::unfold").output().type().dim(), out_dims)
def test_mm_batching(self):
with enable_profiling_mode_for_profiling_tests():
lstm_cell = torch.jit.script(LSTMCellS)
def lstm(x, hx, cx, w_ih, w_hh, b_ih, b_hh):
for i in range(x.size(0)):
hx, cx = lstm_cell(x[i], hx, cx, w_ih, w_hh, b_ih, b_hh)
return hx
slstm = torch.jit.script(lstm)
inputs = get_lstm_inputs('cpu', training=True, seq_length=10)
slstm(*inputs, profile_and_replay=True).sum().backward(retain_graph=True)
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
slstm(*inputs, profile_and_replay=True).sum().backward()
fw_graph = slstm.graph_for(*inputs)
if GRAPH_EXECUTOR == ProfilingMode.LEGACY:
bw_graph = backward_graph(slstm, diff_graph_idx=0)
self.assertTrue('prim::MMBatchSide' in str(fw_graph))
self.assertTrue('prim::MMTreeReduce' in str(bw_graph))
sout = slstm(*inputs)
out = lstm(*inputs)
self.assertEqual(sout, out)
self.assertEqual(torch.autograd.grad(sout.sum(), inputs),
torch.autograd.grad(out.sum(), inputs))
def test_loop_unrolling(self):
def fn(x):
y = 0
for i in range(int(x)):
y -= i
return y
graph = torch.jit.script(fn).graph
self.run_pass('loop_unrolling', graph)
unroll_factor = 8
FileCheck().check("prim::Loop").check_count("aten::sub", unroll_factor) \
.check("prim::Loop").check("aten::sub").run(str(graph))
self.checkScript(fn, (torch.tensor(10),))
def test_loop_unrolling_const(self):
def fn():
y = 0
for _ in range(10):
y -= 1
return y
def fn2():
y = 0
for i in range(10):
y -= i
return y
def check(fn, name):
graph = torch.jit.script(fn).graph
self.run_pass('loop_unrolling', graph)
# entirely unrolled
FileCheck().check_not("prim::Loop'").run(str(graph))
self.checkScript(fn, ())
check(fn, 'add_const')
check(fn2, 'add_iter')
def test_loop_unrolling_nested(self):
def fn(x):
y = 0
for _ in range(10):
for j in range(int(x)):
y -= j
return y
graph = torch.jit.script(fn).graph
self.run_pass('loop_unrolling', graph)
# inner loop with 8 subs followed by loop epilogue
unroll_factor = 8
FileCheck().check("prim::Loop").check("prim::Loop").check_count('aten::sub', unroll_factor) \
.check("prim::Loop").check("aten::sub").run(str(graph))
self.checkScript(fn, (torch.tensor(10),))
def test_loop_unroll_unused_counter(self):
def fn(x):
y = 0
for _ in range(int(x)):
y -= 1
return y
graph = torch.jit.script(fn).graph
self.run_pass('loop_unrolling', graph)
FileCheck().check("prim::Loop").check_not("aten::add").check("return") \
.run(str(graph))
def test_loop_unroll_negative(self):
def fn(x):
y = 0
for _ in range(int(x)):
y += 1
return y
self.checkScript(fn, (torch.tensor(-20),))
self.checkScript(fn, (torch.tensor(-2),))
self.checkScript(fn, (torch.tensor(-1),))
self.checkScript(fn, (torch.tensor(0),))
self.checkScript(fn, (torch.tensor(1),))
self.checkScript(fn, (torch.tensor(2),))
def test_where(self):
def fn(x, y):
return torch.where(x > 0.0, x, y)
self.checkScript(fn, (torch.randn(3, 2, dtype=torch.float), torch.ones(3, 2, dtype=torch.float)))
def test_where_method(self):
def fn(x, y):
return x.where(x > 0.0, y)
self.checkScript(fn, (torch.randn(3, 2, dtype=torch.float), torch.ones(3, 2, dtype=torch.float)))
def test_union_to_number(self):
@torch.jit.script
def fn(x: Union[int, complex, float], y: Union[int, complex, float]):
return x + y
FileCheck().check(": Scalar):").run(fn.graph)
def test_reassign_module_lhs(self):
with self.assertRaisesRegex(RuntimeError, 'Cannot re-assign \'self\''):
class ReassignSelfLHS(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for _ in range(20):
self = x
return self
ReassignSelfLHS()
def test_reassign_module_rhs(self):
with self.assertRaisesRegex(RuntimeError, 'Cannot re-assign \'x\' to a value of type module'):
class ReassignSelfRHS(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for _ in range(20):
x = self
return self
ReassignSelfRHS()
def test_unknown_builtin(self):
with self.assertRaisesRegex(RuntimeError, 'object has no attribute or method'):
@torch.jit.script
def unknown_builtin(x):
return x.splork(3)
def test_return_tuple(self):
def return_tuple(x):
a = (x, x)
return a, x
self.checkScript(return_tuple, (torch.rand(4),))
def test_add_tuple_optional(self):
def foo(input: Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]) -> Optional[torch.Tensor]:
changed_input = input[0] + 1
value: Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]] = (changed_input,) + input[1:]
return value[2]
inp: Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]] = (torch.rand(4), None, None)
self.checkScript(foo, (inp,))
def test_add_tuple_non_optional(self):
def foo(input: Tuple[torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
changed_input = input[0] + 1
value: Tuple[torch.Tensor, torch.Tensor, torch.Tensor] = (changed_input,) + input[1:]
return torch.sum(value[2]) + 4
inp: Tuple[torch.Tensor, torch.Tensor, torch.Tensor] = (torch.rand(4), torch.rand(4), torch.rand(4))
self.checkScript(foo, (inp,))
def test_add_tuple_different_types(self):
def foo(a: Tuple[int, float], b: Tuple[int]) -> int:
c: Tuple[int, float, int] = a + b
d: Tuple[int, float, int, int] = c + b
return d[3] + 1
a = (1, 2.0)
b = (3,)
self.checkScript(foo, (a, b))
def test_add_tuple_same_types(self):
def foo(a: Tuple[int, int], b: Tuple[int, int, int]) -> int:
c: Tuple[int, int, int, int, int] = a + b
d: Tuple[int, int, int, int, int, int, int, int] = c + b
return d[6] - 2
a = (1, 2)
b = (3, 4, 5)
self.checkScript(foo, (a, b))
def test_method_no_self(self):
with self.assertRaisesRegex(RuntimeError, 'methods must have a self argument'):
class MethodNoSelf(torch.jit.ScriptModule):
@torch.jit.script_method # noqa: B902
def forward(): # noqa: B902
return torch.zeros(3, 4)
MethodNoSelf()
def test_return_stmt_not_at_end(self):
def return_stmt(x):
if bool(x > 3):
return x + 3
else:
return x
self.checkScript(return_stmt, (torch.rand(1),))
def test_for_in_range(self):
def fn():
c = 0
for i in range(100):
c += i
return c
self.checkScript(fn, ())
def test_for_in_range_dynamic(self):
def fn():
c = 0
for i in range(100):
acc = 0
for j in range(i):
acc += j
c += acc
return c
self.checkScript(fn, (), optimize=False)
def test_for_in_range_ast(self):
def test_script_for_in_range_ast():
c = 0
for i in range(100):
acc = 0
for j in range(i):
acc += j
c += acc
return c
self.checkScript(test_script_for_in_range_ast, ())
def test_for_in_range_if_ast(self):
@torch.jit.script
def test_script_for_in_range_if_ast(x):
output = x
for i in range(20):
if i == 0:
output = x.unsqueeze(0)
else:
output = torch.cat((output, x.unsqueeze(0)), dim=0)
return output
inputs = self._make_scalar_vars([0], torch.int64)
self.assertEqual(test_script_for_in_range_if_ast(*inputs).shape[0], 20)
def test_for_in_range_start_end(self):
def fn():
x = 0
for i in range(7, 100):
x += i
return x
self.checkScript(fn, ())
def test_for_in_range_start_end_step(self):
def fn(start, end, step):
# type: (int, int, int) -> int
x = 0
for i in range(start, end, step):
x += i
return x
self.checkScript(fn, (7, 100, 7))
self.checkScript(fn, (7, 100, -7))
self.checkScript(fn, (2, -11, -3))
self.checkScript(fn, (2, -11, 3))
self.checkScript(fn, (2, 10, 3))
self.checkScript(fn, (-2, -10, -10))
def test_for_in_range_zero_step(self):
@torch.jit.script
def fn():
x = 0
for i in range(2, -11, 0):
x += i
return x
with self.assertRaisesRegex(RuntimeError, "must not be zero"):
fn()
def test_range_args(self):
with self.assertRaisesRegex(RuntimeError, r'range expected at least 1 arguments, got 0'):
@torch.jit.script
def range_no_arg(x):
for _ in range():
x += 1
return x
with self.assertRaisesRegex(RuntimeError, r'found float'):
@torch.jit.script
def range_non_float():
for i in range(.5):
print(i)
def test_parse_empty_tuple_annotation(self):
cu = torch.jit.CompilationUnit('''
def foo(x : Tuple[()]) -> Tuple[()]:
return x
''')
foo_code = cu.find_function('foo').code
FileCheck().check("Tuple[()]").check("Tuple[()]").run(foo_code)
def test_parse_empty_tuple_annotation_element_error(self):
with self.assertRaisesRegex(
RuntimeError, 'Tuple literal in Tuple type annotation must not have any elements'):
cu = torch.jit.CompilationUnit('''
def foo(x : Tuple[(int,)]) -> Tuple[(int,)]:
return x
''')
def test_parse_none_type_annotation(self):
cu = torch.jit.CompilationUnit('''
def foo(x : NoneType) -> NoneType:
return x
''')
foo_code = cu.find_function('foo').code
FileCheck().check(": NoneType").check("-> NoneType").run(foo_code)
def test_empty_tuple_str(self):
empty_tuple_type = torch._C.TupleType([])
g = {'Tuple' : typing.Tuple}
python_type = eval(empty_tuple_type.annotation_str, g)
assert python_type is typing.Tuple[()]
def test_none_type_str(self):
none_type = torch._C.NoneType.get()
g = {'NoneType' : type(None)}
python_type = eval(none_type.annotation_str, g)
assert python_type is type(None)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_zip_enumerate_modulelist(self):
class Sub(torch.nn.Module):
def __init__(self):
super(Sub, self).__init__()
def forward(self, thing):
return thing - 2
class Double(torch.nn.Module):
def __init__(self):
super(Double, self).__init__()
def forward(self, thing):
return thing * 2
# zipping over two
class ZipModLists(torch.nn.Module):
def __init__(self, mods, mods2):
super(ZipModLists, self).__init__()
self.mods = mods
self.mods2 = mods2
def forward(self, x):
iter = 0
for mod1, mod2 in zip(self.mods, self.mods2):
x = mod2(mod1(x))
iter += 1
return x, iter
class ZipWithValues(torch.nn.Module):
__constants__ = ['tup_larger', 'tup_smaller']
def __init__(self, mods, mods2):
super(ZipWithValues, self).__init__()
self.mods = mods
self.mods2 = mods2
self.tup_larger = list(range(len(mods2) + 1))
self.tup_smaller = list(range(max(len(mods2) + 1, 1)))
def forward(self, x):
iter = 0
x2 = x
for val, mod1, mod2 in zip(self.tup_larger, self.mods, self.mods2):
x = mod2(mod1(x)) + val
iter += 1
for val, mod1, mod2 in zip(self.tup_smaller, self.mods, self.mods2):
x2 = mod2(mod1(x2)) + val
iter += 1
return x, iter
mods = nn.ModuleList([Double()]), nn.ModuleList([Double(), Sub(), Sub()]), nn.ModuleList([Sub(), Double()])
for i in range(len(mods)):
for j in range(len(mods)):
mod = ZipModLists(mods[i], mods[j])
self.checkModule(mod, (torch.tensor(.5),))
mod2 = ZipWithValues(mods[i], mods[j])
self.checkModule(mod2, (torch.tensor(.5),))
def test_enumerate_modlist_range(self):
class Double(torch.nn.Module):
def forward(self, thing):
return thing * 2
class Mod(torch.nn.Module):
def __init__(self):
super(Mod, self).__init__()
self.mods = nn.ModuleList([Double(), Double()])
def forward(self, x):
x2 = x
iter = 0
for val, mod in enumerate(self.mods):
x2 = mod(x2) * val
iter += 1
return iter, x, x2
self.checkModule(Mod(), (torch.tensor(.5),))
# variable length, modulelist
class Mod2(Mod):
def forward(self, x):
for val, mod in zip(range(int(x)), self.mods):
x = mod(x) * val
return x
with self.assertRaisesRegex(Exception, "that does not have a statically determinable length"):
torch.jit.script(Mod2())
# modulelist, variable length
class Mod3(Mod):
def forward(self, x):
for val, mod in zip(self.mods, range(int(x))):
x = mod(x) * val
return x
with self.assertRaisesRegex(Exception, "that does not have a statically determinable length"):
torch.jit.script(Mod3())
def test_for_in_enumerate(self):
def fn(x):
# type: (List[int]) -> int
sum = 0
for (i, v) in enumerate(x):
sum += i * v
return sum
self.checkScript(fn, ([1, 2, 3, 4, 5],))
def fn_enumerate_start_arg(x):
# type: (List[int]) -> int
sum = 0
for (i, v) in enumerate(x, 1):
sum += i * v
return sum
self.checkScript(fn_enumerate_start_arg, ([1, 2, 3, 4, 5],))
def fn_enumerate_start_kwarg(x):
# type: (List[int]) -> int
sum = 0
for (i, v) in enumerate(x, start=1):
sum += i * v
return sum
self.checkScript(fn_enumerate_start_kwarg, ([1, 2, 3, 4, 5],))
def fn_nested_enumerate(x):
# type: (List[int]) -> int
sum = 0
for (i, (j, v)) in enumerate(enumerate(x)):
sum += i * j * v
return sum
self.checkScript(fn_nested_enumerate, ([1, 2, 3, 4, 5],))
with self.assertRaisesRegex(RuntimeError, r'enumerate expected at least 1 arguments, got 0'):
@torch.jit.script
def enumerate_no_arg(x):
# type: (List[int]) -> int
sum = 0
for _ in enumerate():
sum += 1
return sum
with self.assertRaisesRegex(RuntimeError, r'enumerate expected at most 2 arguments, got 3'):
@torch.jit.script
def enumerate_too_many_args(x):
# type: (List[int]) -> int
sum = 0
for _ in enumerate(x, x, x):
sum += 1
return sum
def test_list_comprehension_modulelist(self):
class Inner(torch.nn.Module):
def forward(self, x):
return x + 10
class M(torch.nn.Module):
def __init__(self, mod_list):
super(M, self).__init__()
self.module_list = mod_list
def forward(self, x):
out = torch.jit.annotate(List[Tensor], [mod(x) for mod in self.module_list])
return out
mod = M(nn.ModuleList([Inner(), Inner()]))
self.checkModule(mod, (torch.tensor(3),))
mod = M(nn.ModuleList([]))
torch.jit.script(mod)
class M2(M):
def __init__(self, mod_list):
super(M2, self).__init__(mod_list)
def forward(self, x):
out = [mod(x) for mod in self.module_list]
return out
mod = M2(nn.ModuleList([Inner(), Inner()]))
self.checkModule(mod, (torch.tensor(3),))
mod = M2(nn.ModuleList([]))
# defaults to List of Tensor for empty modulelist
self.assertEqual(torch.jit.script(mod)(torch.tensor(.5)), [])
def bad_type_annotation():
out = torch.jit.annotate(int, [x for x in [1, 2, 3]]) # noqa: C416
return out
with self.assertRaisesRegex(Exception, "Expected an annotation"
" of type List"):
torch.jit.script(bad_type_annotation)
def test_list_comprehension_variable_write(self):
# i in comprehension doesn't write to function scope
def foo():
i = 1
x = [i if i != 5 else 3 for i in range(7)] # noqa: C416
return i, x
self.assertEqual(foo(), torch.jit.script(foo)())
def test_for_in_zip(self):
def fn(x, y):
# type: (List[int], List[int]) -> int
sum = 0
for (i, j) in zip(x, y):
sum += i * j
return sum
self.checkScript(fn, ([1, 2, 3, 4, 5], [2, 3, 4, 5, 6]))
def fn_multi_inputs(x, y, z):
# type: (List[int], List[int], List[int]) -> int
sum = 0
for (i, j, k) in zip(x, y, z):
sum += i * j * k
return sum
self.checkScript(fn_multi_inputs, ([1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]))
def fn_nested_zip(x, y, z):
# type: (List[int], List[int], List[int]) -> int
sum = 0
for (i, (j, k)) in zip(x, zip(y, z)):
sum += i * j * k
return sum
self.checkScript(fn_multi_inputs, ([1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]))
with self.assertRaisesRegex(RuntimeError, r'zip expected at least 1 arguments, got 0'):
@torch.jit.script
def zip_no_arg(x):
# type: (List[int]) -> int
sum = 0
for _ in zip():
sum += 1
return sum
with self.assertRaisesRegex(RuntimeError, r'too many values to unpack: need 2 but found 3'):
@torch.jit.script
def fn_nested_zip_wrong_target_assign(x, y, z):
# type: (List[int], List[int], List[int]) -> int
sum = 0
for (i, (j, k)) in zip(x, y, z):
sum += i * j * k
return sum
def test_for_in_zip_enumerate(self):
def fn_zip_enumerate(x, y):
# type: (List[int], List[int]) -> int
sum = 0
for (i, (j, v), k) in zip(x, enumerate(y), range(0, 100)):
sum += i * j * v * k
return sum
self.checkScript(fn_zip_enumerate, ([1, 2, 3, 4], [2, 3, 4, 5]))
def fn_enumerate_zip(x, y):
# type: (List[int], List[int]) -> int
sum = 0
for (i, (j, v)) in enumerate(zip(x, y)):
sum += i * j * v
return sum
self.checkScript(fn_enumerate_zip, ([1, 2, 3, 4], [2, 3, 4, 5]))
def test_for_in_tensors(self):
def test_sizes(x):
sumz = 0
for s in x:
sumz += 1
return sumz
self.checkScript(test_sizes, (torch.rand(5, 4, 3, 2, 1),))
self.checkScript(test_sizes, (torch.rand(777),))
self.checkScript(test_sizes, (torch.rand(0),))
def test_for_in_tensors_rank0(self):
with self.assertRaisesRegex(RuntimeError, "of a 0-d tensor"):
@torch.jit.script
def test_sizes(x):
sumz = 0
for s in x:
sumz += 1
return sumz
test_sizes(torch.tensor(1))
def test_for_in_tensors_fail_scalar(self):
with self.assertRaisesRegex(RuntimeError, "'float' object is not iterable"):
@torch.jit.script
def test_sizes(x):
# type: (float) -> int
sumz = 0
for s in x:
sumz += 1
return sumz
test_sizes(0.0)
def test_for_in_tensors_nested(self):
def test_sizes(x):
sumz = 0
for n in x:
for t in n:
sumz += 1
return sumz
self.checkScript(test_sizes, (torch.rand(5, 4, 3, 2, 1),))
# to avoid defining sum_list in multiple tests
def get_sum_list_fn(self):
def sum_list(a):
# type: (List[int]) -> int
sum = 0
for i in a:
sum += i
return sum
return sum_list
def test_sum_list_diff_elms(self):
self.checkScript(self.get_sum_list_fn(), ([1, 2, 3, 4, 5],))
def test_sum_list_empty(self):
self.checkScript(self.get_sum_list_fn(), ([],))
def test_sum_list_one(self):
self.checkScript(self.get_sum_list_fn(), ([1],))
def test_sum_list_literal(self):
def sum_list():
# type: () -> int
sum = 0
for i in [1, 2, 3, 4, 5]:
sum += i
return sum
self.checkScript(sum_list, ())
def test_sum_list_wrong_type(self):
with self.assertRaisesRegex(RuntimeError, "'int' object is not iterable"):
@torch.jit.script
def sum_list(a):
# type: (int) -> int
sum = 0
for i in a: # noqa: T484
sum += i
return sum
sum_list(1)
def test_list_iterables(self):
with self.assertRaisesRegex(RuntimeError, 'List of iterables is not supported currently'):
cu = torch.jit.CompilationUnit('''
def list_iterables(x):
for i, j in [2, 3, 4], [5, 6, 7]:
x += i
x += j
return x
''')
def test_for_in_string(self):
def test_strings(x):
# type: (str) -> str
reverse = ""
for c in x:
reverse = c + reverse
return reverse
self.checkScript(test_strings, ("hello",))
self.checkScript(test_strings, ("",))
def test_list_strings(x):
# type: (List[str]) -> str
result = ""
for sub_str in x:
result += sub_str
return result
self.checkScript(test_list_strings, (["hello", "world"],))
self.checkScript(test_list_strings, (["hello", " ", "world", ""],))
def test_for_in_dict(self):
def test_dicts(x):
# type: (Dict[str, int]) -> int
sum = 0
for key in x:
sum += x[key]
return sum
self.checkScript(test_dicts, ({"a": 1, "b": 2, "c": 3},))
def test_dict_keys_values(x):
# type: (Dict[str, int]) -> Tuple[str, int]
key_str = ""
sum = 0
for key in x.keys():
key_str += key
for val in x.values():
sum += val
return key_str, sum
self.checkScript(test_dicts, ({"a": 1, "b": 2, "c": 3},))
def test_for_tuple_unpack(self):
def for_tuple_unpack(x, y):
for i, j in [[3, 4], [5, 6], [7, 8]]:
x += i
y += j
return x, y
self.checkScript(for_tuple_unpack, (torch.tensor(3), torch.tensor(5)))
def nested_tuple_unpack(x, y):
# type: (List[int], List[int]) -> int
sum = 0
for i, (j, k), v in zip(x, enumerate(x), y):
sum += i + j + k + v
return sum
self.checkScript(nested_tuple_unpack, ([1, 3, 5], [2, 4, 6]))
def test_for_tuple_assign(self):
def test_simple_assign(x):
# type: (Tuple[int, float]) -> float
sum = 0.0
for a in x:
sum += float(a)
return sum
self.checkScript(test_simple_assign, ((1, 2.5),))
def test_tuple_assign(x):
# type: (Tuple[Tuple[int, int], Tuple[int, int]]) -> int
sum = 0
for a in x:
sum += a[0]
sum += a[1]
return sum
self.checkScript(test_tuple_assign, (((1, 2), (4, 7)), ))
def test_single_starred_lhs(self):
with self.assertRaisesRegex(RuntimeError, 'A Starred expression may only appear on the lhs within the presence'
' of another non-starred expression'):
cu = torch.jit.CompilationUnit('''
def single_starred_lhs(x):
a = (x, x, x)
*b, = a
return b
''')
def test_singleton_tuple_unpack(self):
def foo(a):
b, = (a,)
return b + 1
self.checkScript(foo, (torch.rand(3),))
def test_tuple_assignments(self):
def var_tuple_assign(x, y):
# type: (Tuple[Tensor, Tensor], Tensor) -> Tensor
(a, b), c = x, y
return a + b + c
tuple_inputs = (torch.randn(1, 4), torch.randn(3, 4))
self.checkScript(var_tuple_assign, (tuple_inputs, torch.randn(3, 4)))
def nested_tuple_assign(x, y, z):
# type: (int, Tuple[int, Tuple[int, int]], Tuple[int, int]) -> int
a, (b, (c, d)), (e, f) = x, y, z
return a + b + c + d + e + f
self.checkScript(nested_tuple_assign, ((1, (2, (3, 4)), (5, 6))))
def subscript_tuple_assign(a, x, i):
# type: (List[int], Tensor, int) -> Tuple[int, Tensor, int]
a[i], (x[i], b) = 1, (2, 3)
return a[i] + 1, x + 5, b
self.checkScript(subscript_tuple_assign, ([12, 7, 9, 11], torch.tensor((3, 13, 17)), 0))
def star_tuple_assign():
# type: () -> Tuple[int, int, Tuple[int, int], Tuple[int, int]]
a, (b, *c), *d = 1, (2, 3, 4), 5, 6
return a, b, c, d
self.checkScript(star_tuple_assign, ())
def subscript_tuple_augmented_assign(a):
# type: (Tuple[int, int]) -> Tuple[int, int]
a[0] += 1
return a
with self.assertRaisesRegex(RuntimeError, 'does not support augmented assign'):
scripted_aug_assign = torch.jit.script(subscript_tuple_augmented_assign)
class AttrTupleAssignmentTestClass:
def __init__(self, a: int, b: int):
self.a = a
self.b = b
def set_ab(self, a: int, b: int):
self.a, self.b = (a, b)
def get(self) -> Tuple[int, int]:
return (self.a, self.b)
make_global(AttrTupleAssignmentTestClass)
@torch.jit.script
def attr_tuple_assignment(o: AttrTupleAssignmentTestClass, a: int, b: int):
o.set_ab(a, b)
return o
o = AttrTupleAssignmentTestClass(1, 2)
self.assertEqual(attr_tuple_assignment(o, 3, 4).get(), (3, 4))
def test_multiple_assign(self):
def test():
a = b, c = d, f = (1, 1)
# side effect
ten = torch.tensor(1)
ten1 = ten2 = ten.add_(1)
# ordering
x = 1
y = 3
x, y = y, x + y
return a, b, c, d, f, ten, ten1, ten2, x, y
self.checkScript(test, ())
def test_multi_reduction(self):
with self.assertRaisesRegex(
RuntimeError,
'augmented assignment can only have one LHS expression'):
cu = torch.jit.CompilationUnit('''
def multi_reduction(x):
a, b += x
return a, b
''')
def test_invalid_call_arguments(self):
with self.assertRaisesRegex(RuntimeError, 'but instead found type '):
@torch.jit.script
def invalid_call_arguments(x):
return torch.unsqueeze(3, 4, 5, 6, 7, 8)
def test_invalid_lhs_assignment(self):
with self.assertRaisesRegex(RuntimeError, 'unexpected expression'):
cu = torch.jit.CompilationUnit('''
def invalid_lhs_assignment(x):
x + 1 = x
return x
''')
def test_multi_starred_expr_lhs(self):
with self.assertRaisesRegex(RuntimeError, 'Only one starred expression is allowed on the lhs'):
cu = torch.jit.CompilationUnit('''
def multi_starred_expr_lhs():
a, *b, *c = [1, 2, 3, 4, 5, 6]
return a
''')
def test_pack_tuple_into_non_var(self):
with self.assertRaisesRegex(RuntimeError, 'Cannot pack a tuple into a non-variable'):
cu = torch.jit.CompilationUnit('''
def pack_tuple_into_non_var(x):
a, *1 = (3, 4, 5)
return x
''')
def test_print_kwargs(self):
with self.assertRaisesRegex(RuntimeError, 'print doesn\'t accept any keyword arguments'):
cu = torch.jit.CompilationUnit('''
def print_kwargs(x):
print(x, flush=True)
return x
''')
def test_builtin_use_as_value(self):
with self.assertRaisesRegex(RuntimeError, 'builtin cannot be used as a value'):
@torch.jit.script
def builtin_use_as_value(x):
return x.unsqueeze
def test_wrong_use_as_tuple(self):
with self.assertRaisesRegex(RuntimeError, 'cannot be used as a tuple'):
def test_fn():
return 3
@torch.jit.script
def wrong_use_as_tuple(self):
a, b = test_fn
return a
def test_wrong_attr_lookup(self):
with self.assertRaisesRegex(RuntimeError, 'attribute lookup is not defined on builtin'):
@torch.jit.script
def wrong_attr_lookup(self, x):
a = x.unsqueeze.myattr
return a
def test_wrong_use_as_callable(self):
with self.assertRaisesRegex(RuntimeError, 'cannot call a value'):
@torch.jit.script
def wrong_use_as_callable(x):
return x(3, 4, 5)
def test_python_val_doesnt_have_attr(self):
with self.assertRaisesRegex(RuntimeError, 'object has no attribute abcd'):
@torch.jit.script
def python_val_doesnt_have_attr():
# this has to be a module otherwise attr lookup would not be
# allowed in the first place
return shutil.abcd
def test_wrong_module_attr_lookup(self):
with self.assertRaisesRegex(RuntimeError, 'python value of type \'type\' cannot be used as a value'):
import io
@torch.jit.script
def wrong_module_attr_lookup():
return io.BytesIO
def test_wrong_method_call_inputs(self):
with self.assertRaisesRegex(RuntimeError, 'Argument y not provided'):
class SomeModule(torch.jit.ScriptModule):
@torch.jit.script_method
def foo(self, x, y):
return x
@torch.jit.script_method
def forward(self, x, y):
return self.foo(x)
SomeModule()
def test_single_starred_expr_for_loop(self):
with self.assertRaisesRegex(RuntimeError, 'A Starred expression may only appear'):
cu = torch.jit.CompilationUnit('''
def test():
x = 0
for *a in [1, 2, 3]:
x = x + 1
return x
''')
def test_call_ge(self):
with self.assertRaisesRegex(RuntimeError, 'Expected at most 1 arguments but found 3'):
@_trace(torch.zeros(1, 2, 3))
def foo(x):
return x
@torch.jit.script
def test_fn():
return foo(torch.full([1], 1), torch.full([1], 2), torch.full([1], 3))
def test_wrong_return_type(self):
with self.assertRaisesRegex(RuntimeError, 'but instead got value of type tuple'):
@torch.jit.ignore
def somefunc():
# type: () -> Tuple[Tuple[Tensor, Tensor]]
return torch.zeros(3, 4), torch.zeros(4, 5) # noqa: T484
@torch.jit.script
def wrong_return_type():
return somefunc()
wrong_return_type()
# Tests for calling between different front-end modes
def test_call_python_fn_from_tracing_fn(self):
def python_fn(x):
return torch.neg(x)
@_trace(torch.rand(3, 4))
def traced_fn(x):
return python_fn(x) + 1
# The neg op in the python function should be properly inlined to the
# graph
FileCheck().check("aten::neg").run(str(traced_fn.graph))
def test_call_python_mod_from_tracing_fn(self):
class PythonMod(torch.nn.Module):
def __init__(self):
super(PythonMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3), requires_grad=False)
def forward(self, x):
return torch.mm(x, self.param)
pm = PythonMod()
@_trace(torch.rand(3, 4))
def traced_fn(x):
return pm(x) + 1.0
# Note: the parameter self.param from the Python module is inlined
# into the graph
self.assertTrue(len(list(traced_fn.graph.inputs())) == 1)
FileCheck().check("aten::mm").check("aten::add").run(str(traced_fn.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_traced_fn_from_tracing_fn(self):
@_trace(torch.rand(3, 4))
def traced_fn1(x):
return torch.neg(x)
@_trace(torch.rand(3, 4))
def traced_fn(x):
return traced_fn1(x) + 1
FileCheck().check("traced_fn").check("prim::CallFunction").check("aten::add") \
.run(str(traced_fn.graph))
@unittest.skip("error in first class mode")
def test_call_traced_mod_from_tracing_fn(self):
class TracedModule(torch.nn.Module):
def __init__(self):
super(TracedModule, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3), requires_grad=False)
def forward(self, x):
return torch.mm(x, self.param)
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
with self.assertRaisesRegex(RuntimeError, "must be registered as submodules"):
@_trace(torch.rand(3, 4))
def traced_fn(x):
return tm(x) + 1.0
@_tmp_donotuse_dont_inline_everything
def test_call_script_fn_from_tracing_fn(self):
@torch.jit.script
def script_fn(x):
return torch.neg(x)
@_trace(torch.rand(3, 4))
def traced_fn(x):
return script_fn(x) + 1
FileCheck().check("prim::CallFunction").check("aten::add").run(str(traced_fn.graph))
@unittest.skip("error in first class mode")
def test_call_script_mod_from_tracing_fn(self):
with self.assertRaisesRegex(RuntimeError, "must be registered as submodules"):
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4), requires_grad=False)
@torch.jit.script_method
def forward(self, x):
for _i in range(4):
x += self.param
return x
sm = ScriptMod()
@_trace(torch.rand(3, 4))
def traced_fn(x):
return sm(x) + 1.0
def test_call_python_fn_from_traced_module(self):
def python_fn(x):
return torch.neg(x)
class TracedModule(torch.nn.Module):
def __init__(self):
super(TracedModule, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return torch.mm(python_fn(x), self.param)
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
# Note: parameter self.param from the traced module should appear as
# an input to the graph and the neg op from the Python function should
# be properly inlined
self.assertTrue(len(list(tm.graph.inputs())) == 2)
FileCheck().check("aten::neg").check("aten::mm").run(str(tm.graph))
def test_call_python_mod_from_traced_module(self):
class PythonModule(torch.nn.Module):
def __init__(self):
super(PythonModule, self).__init__()
self.param = torch.nn.Parameter(torch.rand(5, 7))
def forward(self, x):
return torch.mm(x, self.param)
class TracedModule(torch.nn.Module):
def __init__(self):
super(TracedModule, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 5))
self.mod = PythonModule()
def forward(self, x):
return self.mod(torch.mm(x, self.param)) + 1.0
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
FileCheck().check_not("value=<Tensor>").check("aten::mm")\
.check("prim::CallMethod[name=\"forward\"]").check("aten::add") \
.run(str(tm.graph))
FileCheck().check("aten::mm").run(str(tm.mod.graph))
def test_op_dtype(self):
def check_equal_and_dtype(a, b):
self.assertEqual(a, b)
self.assertEqual(a.dtype, b.dtype)
def fn():
a = torch.arange(10)
b = torch.arange(10, dtype=torch.float)
c = torch.arange(1, 10, 2)
d = torch.arange(1, 10, 2, dtype=torch.float)
e = torch.arange(1, 10., 2)
f = torch.arange(1, 10., 2, dtype=torch.float)
return a, b, c, d, e, f
scripted_fn = torch.jit.script(fn)
eager_out = fn()
script_out = scripted_fn()
for a, b in zip(eager_out, script_out):
check_equal_and_dtype(a, b)
def test_floor_div(self):
@torch.jit.script
def foo(a, b):
# type: (int, int) -> int
return a // b
for i in range(-8, 8):
for j in range(-8, 8):
if j != 0:
self.assertEqual(foo(i, j), i // j)
def test_floordiv(self):
funcs_template = dedent('''
def fn():
ten = {a_construct}
ten_or_scalar = {b_construct}
return ten // ten_or_scalar, torch.floor_divide(ten, ten_or_scalar)
''')
lhs = ["torch.tensor([5.5, 3.2])", "torch.tensor([2, 2])", "torch.tensor([3, 2])"]
rhs = ["1.5", "2", "4", "1.1"] + lhs
for tensor in lhs:
for tensor_or_scalar in rhs:
funcs_str = funcs_template.format(a_construct=tensor, b_construct=tensor_or_scalar)
scope = {}
execWrapper(funcs_str, globals(), scope)
cu = torch.jit.CompilationUnit(funcs_str)
f_script = cu.fn
f = scope['fn']
self.assertEqual(f_script(), f())
def test_call_python_fn_from_script_fn(self):
@torch.jit.ignore
def python_fn(x):
return torch.neg(x)
@torch.jit.script
def script_fn(x):
return python_fn(x) + 1
# Note: the call to python_fn appears as `^python_fn()` and is called
# as a PythonOp in the interpreter
a = torch.tensor(1)
self.assertEqual(script_fn(a), torch.tensor(0))
FileCheck().check("python_fn").run(str(script_fn.graph))
def test_call_python_mod_from_script_fn(self):
class PythonModule(torch.nn.Module):
def __init__(self):
super(PythonModule, self).__init__()
self.param = torch.nn.Parameter(torch.rand(5, 7))
def forward(self, x):
return torch.mm(x, self.param)
pm = PythonModule()
@torch.jit.script
def script_fn(x):
return pm(x) + 1
# Note: call to pm(x) appears as ^<python_value>() in the trace.
# Parameters are NOT inlined.
FileCheck().check("python_value").check("aten::add").run(str(script_fn.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_script_fn_from_script_fn(self):
@torch.jit.script
def script_fn1(x):
return torch.neg(x)
@torch.jit.script
def script_fn(x):
return script_fn1(x) + 1
FileCheck().check("prim::CallFunction").run(str(script_fn.graph))
def test_call_script_mod_from_script_fn(self):
with self.assertRaisesRegex(RuntimeError, "Cannot call a ScriptModule that is not a submodule of the caller"):
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
@torch.jit.script_method
def forward(self, x):
return torch.mm(x, torch.zeros([4, 3]))
sm = ScriptMod()
@torch.jit.script
def script_fn(x):
return sm(x) + 1
def test_call_python_fn_from_script_module(self):
@torch.jit.ignore
def python_fn(x):
return torch.neg(x)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3))
@torch.jit.script_method
def forward(self, x):
return python_fn(torch.mm(x, self.param))
sm = ScriptMod()
FileCheck().check("aten::mm").check("python_fn") \
.run(str(sm.forward.graph))
def test_call_python_mod_from_script_module(self):
class PythonMod(torch.nn.Module):
def __init__(self):
super(PythonMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(3, 5))
@torch.jit.ignore
def forward(self, x):
return torch.mm(x, self.param)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3))
self.pm = PythonMod()
@torch.jit.script_method
def forward(self, x):
return self.pm(torch.mm(x, self.param))
sm = ScriptMod()
# Note: the call into PythonMod appears as ^forward(). Parameters
# are NOT inlined
FileCheck().check("aten::mm").check("forward").run(str(sm.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_script_fn_from_script_module(self):
@torch.jit.script
def script_fn(x):
return torch.neg(x)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3))
@torch.jit.script_method
def forward(self, x):
return script_fn(torch.mm(x, self.param))
sm = ScriptMod()
graph = (sm.forward.graph)
FileCheck().check("aten::mm").check("prim::CallFunction").run(str(graph))
@_tmp_donotuse_dont_inline_everything
def test_call_script_mod_from_script_module(self):
class ScriptMod1(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod1, self).__init__()
self.param = torch.nn.Parameter(torch.rand(3, 5))
@torch.jit.script_method
def forward(self, x):
return torch.mm(x, self.param)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3))
self.tm = ScriptMod1()
@torch.jit.script_method
def forward(self, x):
return self.tm(torch.mm(x, self.param))
sm = ScriptMod()
# Note: the parameters from both modules should appear in the flattened
# input list to the graph. The mm op from ScriptMod1 should be properly
# inlined
# 3 % values in graph input lists, two mms in body
FileCheck().check_count('%', 3).check(":").check_count("mm", 1).check("prim::CallMethod").run(str(sm.graph))
def test_module_with_params_called_fails(self):
with self.assertRaisesRegex(RuntimeError, "Cannot call a ScriptModule that is not a submodule of the caller"):
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(3, 3))
@torch.jit.script_method
def forward(self, x):
return torch.mm(x, self.param)
sm = ScriptMod()
@torch.jit.script
def some_func(x):
return sm(x)
def test_tuple_index_to_list(self):
def test_non_constant_input(a):
# type: (bool) -> int
if a:
b = 1
else:
b = 0
c = (0, 1)
return c[b]
self.checkScript(test_non_constant_input, (True,))
self.checkScript(test_non_constant_input, (False,))
with self.assertRaisesRegex(RuntimeError, "because we cannot resolve the output type"):
@torch.jit.script
def test_non_constant_input(a):
# type: (bool) -> None
if a:
b = 1
else:
b = 0
c = (0, 1.1)
print(c[b])
def test_tuple_indexing(self):
def tuple_index(a):
if bool(a):
b = (1, 2)
else:
b = (0, 2)
return b[-2], b[1]
self.checkScript(tuple_index, (torch.tensor([0]),))
self.checkScript(tuple_index, (torch.tensor([1]),))
self.checkScript(tuple_index, (torch.tensor([1]),), optimize=True)
tuple_comp = torch.jit.script(tuple_index)
FileCheck().check_count("TupleIndex", 2, exactly=True).run(str(tuple_comp.graph))
with self.assertRaisesRegex(RuntimeError, "index must be an integer"):
@torch.jit.script
def test_indexing_float():
c = (1, 2)
return c[0.1]
def test_indexing_out_of_bounds_pos():
c = (1, 2)
return c[2]
self.checkScriptRaisesRegex(test_indexing_out_of_bounds_pos, (), Exception,
"out of range")
def test_indexing_out_of_bounds_neg():
c = (1, 2)
return c[-3]
self.checkScriptRaisesRegex(test_indexing_out_of_bounds_pos, (), Exception,
"out of range")
def negative_index():
tup = (1, 2, 3, 4)
return tup[-1]
self.checkScript(negative_index, [])
def really_negative_index():
tup = (1, 2, 3, 4)
return tup[-100]
self.checkScriptRaisesRegex(really_negative_index, [], Exception, "index out of range")
def negative_slice():
tup = (1, 2, 3, 4)
return tup[-3:4]
self.checkScript(negative_slice, [])
def really_slice_out_of_bounds():
tup = (1, 2, 3, 4)
return tup[-300:4000]
self.checkScript(really_slice_out_of_bounds, [])
def test_namedtuple_attr(self):
def f(x):
return x.max(dim=1).indices + torch.max(x, dim=1).indices
self.checkScript(f, (torch.rand(20, 20, 20),), optimize=True)
with self.assertRaisesRegex(RuntimeError, "object has no attribute or method"):
@torch.jit.script
def g1(x):
return x.max(dim=1).unknown_symbol
with self.assertRaisesRegex(RuntimeError, "object has no attribute or method"):
@torch.jit.script
def g2(x):
print((x, x, x).__doc__)
return x
def test_tuple_len(self):
@torch.jit.script
def foo():
return len((1, "str", None))
self.assertEqual(foo(), 3)
@torch.jit.script
def test_indexing_end_out_of_bounds():
c = (1, 2)
return c[2:10]
self.assertEqual(test_indexing_end_out_of_bounds(), ())
def test_lower_nested_tuples(self):
@torch.jit.script
def test():
return ((1, 2), 3)
self.run_pass('constant_propagation', test.graph)
FileCheck().check("prim::Constant").check_not("TupleConstruct").run(test.graph)
# fails if a tuple can't be lowered
self.run_pass('lower_all_tuples', test.graph)
def test_unwrap_optional_builtin(self):
def test(x):
# type: (Optional[int]) -> int
x = torch.jit._unwrap_optional(x)
x = x + x # noqa: T484
return x
self.checkScript(test, (3,))
with self.assertRaisesRegex(AssertionError, "Unwrapping null optional"):
test(None)
test_script = torch.jit.script(test)
with self.assertRaisesRegex(RuntimeError, "Unwrapping null optional"):
test_script(None)
@torch.jit.script
def test_test():
return torch.jit._unwrap_optional(1)
with self.assertRaisesRegex(RuntimeError, r"could not be inferred from actual type None"):
@torch.jit.script
def test_no_type():
# type: () -> int
return torch.jit._unwrap_optional(None)
def test_indexing_error(self):
with self.assertRaisesRegex(RuntimeError, "'int' object is not subscriptable"):
@torch.jit.script
def test_wrong_type():
a = 8
return a[0]
def test_unsupported_builtin_error(self):
with self.assertRaisesRegex(RuntimeError,
"Python builtin <built-in function hypot> is currently"):
@torch.jit.script
def test_unsupported(a):
return math.hypot(a, 2.0)
def test_annotated_script_fn(self):
@torch.jit.script
def foo(x, y, z):
# type: (Tensor, Tuple[Tensor, Tensor, Tensor], Tuple[Tensor, Tuple[Tensor, Tensor]]) -> Tensor
return x
self.assertExpected(str(foo.schema))
def test_annotated_script_method(self):
class SM(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
# type: (Tuple[Tensor, Tensor], Tensor) -> Tuple[Tensor, Tensor, Tensor]
return y, y, y
sm = SM()
self.assertExpectedStripMangled(str(sm.forward.schema))
def test_annotated_script_fn_return_mismatch(self):
with self.assertRaisesRegex(RuntimeError, "but is actually of type"):
@torch.jit.script
def return_tup(x):
# type: (Tensor) -> Tuple[Tuple[Tensor, Tensor], Tensor]
return x, x # noqa: T484
def test_annotated_script_fn_arg_mismatch(self):
with self.assertRaisesRegex(RuntimeError, r"Arguments for call are not valid"):
@torch.jit.script
def tuple_arg(x):
# type: (Tuple[Tensor, Tensor]) -> Tensor
return x + 1 # noqa: T484
def test_script_non_tensor_args_outputs(self):
@torch.jit.script
def fn(x, y):
# type: (Tensor, float) -> float
return float((x + y).sum())
x = torch.ones(2, 2)
z = fn(x, 1)
self.assertIsInstance(z, float)
self.assertEqual(z, 8.)
@unittest.skip('https://github.com/pytorch/pytorch/issues/9595')
def test_inline_and_run_annotated_script_fn(self):
@torch.jit.script
def to_inline(x, y):
# type: (Tuple[Tensor, Tensor], Tensor) -> Tensor
return y
@torch.jit.script
def some_func(x):
return to_inline((x, x), x)
x = torch.rand(3, 4)
self.assertEqual(some_func(x), x)
def test_file_format_serialization(self):
filename = tempfile.mktemp()
writer = torch._C.PyTorchFileWriter(filename)
buffers = [os.urandom(size) for size in [random.randint(1, 100) for i in range(20)]]
offsets = []
for i, buf in enumerate(buffers):
writer.write_record(str(i), buf, len(buf))
offsets.append(i)
serialized_offsets = pickle.dumps(offsets)
writer.write_record("meta", serialized_offsets, len(serialized_offsets))
writer.write_end_of_file()
reader = torch._C.PyTorchFileReader(filename)
serialized_offsets_read = reader.get_record("meta")
parsed_serialized_offsets = pickle.loads(serialized_offsets)
for i, offset in enumerate(parsed_serialized_offsets):
data = reader.get_record(str(offset))
assert(data == buffers[i])
# for each type, the input type annotation and corresponding return type annotation
def type_input_return_pairs(self):
return [
('Tensor', 'Tensor'),
('torch.Tensor', 'Tensor'),
('str', 'str'),
('int', 'int'),
('bool', 'bool'),
('BroadcastingList3[float]', 'List[float]'),
('BroadcastingList2[int]', 'List[int]'),
('List[int]', 'List[int]'),
('Optional[int]', 'Optional[int]'),
]
# replacing code input & return type pair
def format_code(self, code, pair):
return code.format(input=pair[0], output=pair[1])
# ***** Type annotation tests ****
# Test combinations of:
# {String frontend, Python AST Frontend}
# {Python 3-style type annotations, MyPy-style type comments}
# {Script method, Script function}
# String frontend , Python 3-style type annotations , Script function
def test_annot_string_py3_fn(self):
code = '''
def foo(x : {input}, y : Tuple[Tensor, Tensor]) -> Tuple[{output}, {output}]:
return x, x
'''
test_str = []
for pair in self.type_input_return_pairs():
cu = torch.jit.CompilationUnit(self.format_code(code, pair))
test_str.append(str(cu.foo.schema))
self.assertExpected("\n".join(test_str) + "\n")
# String frontend , Python 3-style type annotations , Script method
def test_annot_string_py3_method(self):
class TestModule(torch.jit.ScriptModule):
def __init__(self):
super(TestModule, self).__init__()
code = '''
def foo(self, x : {input}, y : Tuple[Tensor, Tensor]) -> Tuple[{output}, {output}]:
return x, x
'''
test_str = []
for pair in self.type_input_return_pairs():
# clear the class registry as we will be defining foo multiple times
jit_utils.clear_class_registry()
tm = TestModule()
tm.define(self.format_code(code, pair))
test_str.append(str(tm.foo.schema))
self.assertExpectedStripMangled("\n".join(test_str) + "\n")
# String frontend , MyPy-style type comments , Script function
def test_annot_string_mypy_fn(self):
code = '''
def foo(x, y):
# type: ({input}, Tuple[Tensor, Tensor]) -> Tuple[{output}, {output}]
return x, x
'''
test_str = []
for pair in self.type_input_return_pairs():
cu = torch.jit.CompilationUnit(self.format_code(code, pair))
test_str.append(str(cu.foo.schema))
self.assertExpectedStripMangled("\n".join(test_str) + "\n")
# String frontend , MyPy-style type comments , Script method
def test_annot_string_mypy_method(self):
class TestModule(torch.jit.ScriptModule):
def __init__(self):
super(TestModule, self).__init__()
code = '''
def foo(self, x, y):
# type: ({input}, Tuple[Tensor, Tensor]) -> Tuple[{output}, {output}]
return x, x
'''
test_str = []
for pair in self.type_input_return_pairs():
# clear the class registry as we will be defining foo multiple times
jit_utils.clear_class_registry()
tm = TestModule()
tm.define(self.format_code(code, pair))
test_str.append(str(tm.foo.schema))
self.assertExpectedStripMangled("\n".join(test_str) + "\n")
# Python AST Frontend , Python 3-style type annotations , Script function
def test_annot_ast_py3_fn(self):
code = dedent('''
from typing import Tuple, List, Optional
from torch import Tensor
from torch.jit.annotations import BroadcastingList2, BroadcastingList3
import torch
@torch.jit.script
def foo(x : {input}, y : Tuple[Tensor, Tensor]) -> Tuple[{output}, {output}]:
return x, x
''')
test_str = []
for pair in self.type_input_return_pairs():
fn = jit_utils._get_py3_code(self.format_code(code, pair), 'foo')
test_str.append(str(fn.schema))
self.assertExpectedStripMangled("\n".join(test_str) + "\n")
def test_multiline_annot_ast_py3_fn(self):
code = dedent('''
from typing import Tuple, List, Optional
from torch import Tensor
from torch.jit.annotations import BroadcastingList2, BroadcastingList3
import torch
@torch.jit.script
def foo(x, # type: {input}
y # type: Tuple[Tensor, Tensor]
):
# type: (...) -> Tuple[{output}, {output}]
return x, x
''')
test_str = []
for pair in self.type_input_return_pairs():
fn = jit_utils._get_py3_code(self.format_code(code, pair), 'foo')
args = fn.schema.arguments
returns = fn.schema.returns
self.assertEqual(str(args[0].type), pair[1])
self.assertEqual(str(args[1].type), "Tuple[Tensor, Tensor]")
self.assertEqual(str(returns[0].type), "Tuple[{}, {}]".format(pair[1], pair[1]))
def test_bad_multiline_annotations(self):
with self.assertRaisesRegex(RuntimeError, "Return type line"):
@torch.jit.script
def bad_type_line(a, # type: Tensor
b, # type: Tensor
c # type: Tensor
):
# type: (int, int, int) -> Tensor
# type: bad type line # noqa: F723
return a + b + c
with self.assertRaisesRegex(RuntimeError, "Return type line"):
@torch.jit.script
def bad_return_line(a, # type: Tensor
b,
c # type: Tensor
):
# type: (int, int, int) -> Tensor
return a + b + c
# TODO: this should be supported but is difficult to parse
with self.assertRaisesRegex(RuntimeError, "Number of type annotations"):
@torch.jit.script
def missing_type(a, # type: Tensor
b,
c # type: Tensor
):
# type: (...) -> Tensor
return a + b + c
# Python AST Frontend , Python 3-style type annotations , Script method
def test_annot_ast_py3_method(self):
code = dedent('''
from typing import Tuple, List, Optional
from torch import Tensor
from torch.jit.annotations import BroadcastingList2, \\
BroadcastingList3
import torch
class FooModule(torch.jit.ScriptModule):
@torch.jit.script_method
def foo(self, x : {input}, y : Tuple[Tensor, Tensor]) -> Tuple[{output}, {output}]:
return x, x
instance = FooModule()
''')
test_str = []
for pair in self.type_input_return_pairs():
fn = jit_utils._get_py3_code(self.format_code(code, pair), 'instance')
test_str.append(str(fn.foo.schema))
self.assertExpectedStripMangled("\n".join(test_str) + "\n")
# Python AST Frontend , MyPy-style type comments , Script function
def test_annot_ast_mypy_fn(self):
code = dedent('''
import torch
@torch.jit.script
def foo(x, y):
# type: ({input}, Tuple[Tensor, Tensor]) -> Tuple[{output}, {output}]
return x, x
''')
test_str = []
for pair in self.type_input_return_pairs():
fn = jit_utils._get_py3_code(self.format_code(code, pair), 'foo')
test_str.append(str(fn.schema))
self.assertExpected("\n".join(test_str) + "\n")
# Python AST Frontend , MyPy-style type comments , Script method
def test_annot_ast_mypy_method(self):
code = dedent('''
import torch
class FooModule(torch.jit.ScriptModule):
@torch.jit.script_method
def foo(self, x, y):
# type: ({input}, Tuple[Tensor, Tensor]) -> Tuple[{output}, {output}]
return x, x
instance = FooModule()
''')
test_str = []
for pair in self.type_input_return_pairs():
fn = jit_utils._get_py3_code(self.format_code(code, pair), 'instance')
test_str.append(str(fn.foo.schema))
self.assertExpectedStripMangled("\n".join(test_str) + "\n")
# Tests that "# type: ignore[*]" is supported in type lines and is
# properly ignored.
def test_mypy_type_ignore(self):
@torch.jit.script
def foo(x): # type: ignore
return x
@torch.jit.script
def bar(x): # type: ignore[no-redef]
return x
def test_method_casts_script(self):
cast_types = [
'byte', 'char', 'double', 'float', 'int', 'long', 'short'
]
for cast_type in cast_types:
cu = torch.jit.CompilationUnit('''
def cast_to(x):
return x.{cast_type}()
'''.format(cast_type=cast_type))
x = torch.rand(3, 4, 5) * 128
cu_result = cu.cast_to(x)
reference = getattr(x, cast_type)()
self.assertEqual(cu_result, reference)
def test_string_frontend_elif(self):
code = '''
def func(niter):
# type: (int)
rv = 0
for i in range(niter):
if i % 3 == 0 and i % 5 == 0:
rv += 35
elif i % 3 == 0:
rv += 3
elif i % 5 == 0:
rv += 5
else:
rv += i
return rv
'''
self.checkScript(dedent(code), (101,))
def test_module_parameters_and_buffers(self):
weights = torch.randn(10, 10)
bias = torch.randn(10)
weights2 = torch.randn(10, 10)
bias2 = torch.randn(10)
class TestLinear(torch.nn.Module):
def __init__(self, in_features, out_features):
super(TestLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = torch.nn.Parameter(torch.empty(out_features, in_features))
self.bias = torch.nn.Parameter(torch.empty(out_features))
self.register_buffer('counter', torch.ones(out_features))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input):
return F.linear(input, self.weight, self.bias) + self.counter
# Initialize a ScriptModule that uses the weak module above multiple times
class Strong(torch.jit.ScriptModule):
def __init__(self):
super(Strong, self).__init__()
self.fc1 = TestLinear(10, 10)
self.fc1.weight = torch.nn.Parameter(weights)
self.fc1.bias = torch.nn.Parameter(bias)
self.fc2 = TestLinear(10, 10)
self.fc2.weight = torch.nn.Parameter(weights2)
self.fc2.bias = torch.nn.Parameter(bias2)
@torch.jit.script_method
def forward(self, x):
return x + self.fc1(x) + self.fc1(x) + self.fc2(x)
strong_mod = Strong()
# Run same calculation as module
inp = torch.ones(10)
lin = torch.nn.Linear(10, 10)
lin.weight = torch.nn.Parameter(weights)
lin.bias = torch.nn.Parameter(bias)
lin2 = torch.nn.Linear(10, 10)
lin2.weight = torch.nn.Parameter(weights2)
lin2.bias = torch.nn.Parameter(bias2)
expected_result = inp + (lin(inp) + torch.ones(10)) * 2 + lin2(inp) + torch.ones(10)
self.assertEqual(strong_mod(inp), expected_result)
self.assertExportImportModule(strong_mod, (inp,))
def test_module_copying(self):
class Submodule(torch.nn.Module):
def __init__(self):
super(Submodule, self).__init__()
def forward(self, x):
return x + 100
class Weak(torch.nn.Module):
def __init__(self, in_features, out_features):
super(Weak, self).__init__()
self.weight = torch.nn.Parameter(torch.ones(out_features, in_features))
self.bias = torch.nn.Parameter(torch.ones(out_features))
self.register_buffer("buffer", torch.ones(out_features))
self.submodule = Submodule()
def forward(self, x):
return F.linear(x, self.weight, self.bias) \
+ self.buffer + self.submodule(x)
class Strong(torch.jit.ScriptModule):
def __init__(self, weak):
super(Strong, self).__init__()
self.weak = weak
@torch.jit.script_method
def forward(self, x):
return self.weak(x)
inp = torch.ones(5, 5) * 5
weak_mod = Weak(5, 5)
strong_mod = Strong(weak_mod)
self.assertTrue(isinstance(strong_mod.weak, torch.jit.ScriptModule))
self.assertFalse(isinstance(weak_mod, torch.jit.ScriptModule))
self.assertIs(strong_mod.weak.weight, weak_mod.weight)
self.assertIs(strong_mod.weak.buffer, weak_mod.buffer)
# strong_mod.weak.submodule has been recursively scripted
self.assertIsNot(strong_mod.weak.submodule, weak_mod.submodule)
weak_mod.weight.data += torch.ones(5, 5) * 100
self.assertTrue(strong_mod(inp).allclose(weak_mod(inp)))
# Re-assignment is not tracked
weak_mod.weight = torch.nn.Parameter(torch.ones(5, 5) * 100)
self.assertFalse(strong_mod(inp).allclose(weak_mod(inp)))
def test_backend_cudnn_enabled(self):
# Only test that this compiles
@torch.jit.script
def fn(x):
if torch.backends.cudnn.enabled:
x = x + 2
else:
x = x + 3
return x
def test_inplace_add(self):
def foo(a, b):
c = a + b
c.add_(b)
return c
self.checkScript(foo, (torch.rand(3), torch.rand(3)))
def test_add_out(self):
def foo(a, b):
c = a + b
e = 2 * a
torch.add(c, b, out=e)
return e
self.checkScript(foo, (torch.rand(3), torch.rand(3)))
def test_tuple_error_msg(self):
def fn(t: Any):
if isinstance(t, tuple):
a, b = t
return a + b
with self.assertRaisesRegexWithHighlight(RuntimeError, "Provided tuple is not fully defined/refined", "t"):
s = torch.jit.script(fn)
def test_augmented_assign(self):
def foo(a, b):
a += b
a -= b
a /= b
a *= b
return a, b
self.checkScript(foo, (torch.rand(3), torch.rand(3)))
def test_ignored_props(self):
class A(nn.Module):
__jit_ignored_attributes__ = ["ignored", "ignored_return_val"]
def __init__(self):
super().__init__()
@property
def ignored(self):
raise ValueError("shouldn't be called")
@property
def ignored_return_val(self):
return 1
@torch.jit.ignore
def call(self):
return self.ignored_return_val
f = torch.jit.script(A())
# jank way to test if there is no error
self.assertTrue(isinstance(f, torch.jit.ScriptModule))
self.assertTrue(isinstance(f.call(), property))
def test_pass(self):
def foo(x):
# type: (bool) -> int
for _i in range(3):
pass
if x:
pass
else:
pass
return 3
self.checkScript(foo, (True,))
def test_lhs_indexing(self):
def foo(a, b):
a = a.clone()
a[0] = b
return a
self.checkScript(foo, (torch.rand(2, 3), torch.rand(3)))
def test_lhs_advanced_indexing_assignment(self):
def foo(x, y):
a = torch.exp(x)
b = x == 1
a[b] = y[b]
return a
self.checkScript(foo, (torch.ones(4, 3), torch.ones(4, 3)))
def test_lhs_advanced_indexing_augmented_assignment(self):
def foo(x, y):
a = torch.exp(x)
b = x == 1
a[b] += y[b]
return a
self.checkScript(foo, (torch.ones(4, 3), torch.ones(4, 3)))
def test_lhs_indexing_list(self):
def foo(a, b):
ls = [a]
ls[0] = b
return ls
self.checkScript(foo, (torch.rand(2, 3), torch.rand(3)))
def test_inplace_copy_script(self):
def foo(x):
a = torch.rand(3, 4)
a.copy_(x)
return a
self.checkScript(foo, (torch.rand(3, 4),))
def test_lhs_indexing_increment(self):
def foo(a, b):
a[0] += b
return a
self.checkScript(foo, (torch.rand(2, 3), torch.rand(3)))
def test_lhs_indexing_increment_list(self):
def foo(a, b):
a = a.clone()
ls = [a, b]
ls[0] += b
return ls
self.checkScript(foo, (torch.rand(2, 3), torch.rand(3)))
def test_lhs_indexing_increment_list_prim(self):
def foo():
ls = [1, 2, 3]
ls[0] += 5
return ls
self.checkScript(foo, ())
def test_lhs_indexing_multi(self):
def foo(a, b):
a = a.clone()
foo, a[0], bar = (1, b, 3)
return foo, a, bar
self.checkScript(foo, (torch.rand(2, 3), torch.rand(3)))
def test_bool_dispatch(self):
with torch._jit_internal._disable_emit_hooks(): # TODO: Python print broadcasting list
def kwarg_false(x):
# type: (Tensor) -> Tensor
return F.max_pool1d(x, 1, 1, return_indices=False)
self.checkScript(kwarg_false, (torch.randn(3, 3, 3),))
def kwarg_true(x):
# type: (Tensor) -> Tuple[Tensor, Tensor]
return F.max_pool1d(x, 1, 1, return_indices=True)
self.checkScript(kwarg_true, (torch.randn(3, 3, 3),))
def full_kwarg_false(x):
# type: (Tensor) -> Tensor
return F.max_pool1d(x, 1, 1, ceil_mode=False, return_indices=False)
self.checkScript(full_kwarg_false, (torch.randn(3, 3, 3),))
def full_kwarg_true(x):
# type: (Tensor) -> Tuple[Tensor, Tensor]
return F.max_pool1d(x, 1, 1, ceil_mode=False, return_indices=True)
self.checkScript(full_kwarg_true, (torch.randn(3, 3, 3),))
def use_default(x):
# type: (Tensor) -> Tensor
return F.max_pool1d(x, 1, 1)
self.checkScript(use_default, (torch.randn(3, 3, 3),))
def arg_false(x):
# type: (Tensor) -> Tensor
return F.max_pool1d(x, 1, 1, 0, 1, False, False)
self.checkScript(arg_false, (torch.randn(3, 3, 3),))
def arg_true(x):
# type: (Tensor) -> Tuple[Tensor, Tensor]
return F.max_pool1d(x, 1, 1, 0, 1, False, True)
self.checkScript(arg_true, (torch.randn(3, 3, 3),))
def test_infer_size(self):
from torch._C import _infer_size
def fn(x, y):
# type: (Tensor, Tensor) -> List[int]
return _infer_size(x.size(), y.size())
self.checkScript(fn, (torch.ones(2, 4, 2), torch.ones(2, 4, 2)))
def test_hash(self):
def tester(fn, inputs):
for x in inputs:
for y in inputs:
if x == y:
self.assertEqual(fn(x), fn(y))
else:
self.assertNotEqual(fn(x), fn(y))
@torch.jit.script
def int_hash(x):
# type: (int) -> int
return hash(x)
@torch.jit.script
def float_hash(x):
# type: (float) -> int
return hash(x)
@torch.jit.script
def str_hash(x):
# type: (str) -> int
return hash(x)
tester(int_hash, (20, 21, 22))
tester(float_hash, (20.0, 21.00001, 22.443))
tester(str_hash, ("", "hello", "a"))
def test_id(self):
with self.assertRaisesRegex(RuntimeError, "Expected a value"):
@torch.jit.script
def test_id_scalars():
return id(2) == id(None)
@torch.jit.script
class FooTest(object):
def __init__(self, x):
self.foo = x
def getFooTest(self):
return self.foo
@torch.jit.script
def test_id_class_types():
obj1 = FooTest(torch.tensor(3))
obj2 = FooTest(torch.tensor(2))
assert obj1 is not obj2
assert id(obj1) != id(obj2)
assert id(obj1) != id(None)
return True
self.assertTrue(test_id_class_types())
def test_mutable_dce(self):
@torch.jit.script
def foo():
a = torch.rand(2, 3)
a += torch.rand(2, 3)
b = torch.rand(2, 3)
b += torch.rand(2, 3)
# b should be cleaned up but not a
return a
FileCheck().check_count("aten::rand", 2, exactly=True) \
.check_count("aten::add", 1, exactly=True).run(str(foo.graph))
def test_mutable_dce_block(self):
@torch.jit.script
def foo():
a = torch.rand(2, 3)
a += torch.rand(2, 3)
b = torch.rand(2, 3)
if bool(a > torch.zeros(2, 3)):
b += torch.rand(2, 3)
a += torch.rand(2, 3)
# a should be cleaned up but not b
return b
FileCheck().check("prim::If").check_count("aten::rand", 1, exactly=True) \
.run(str(foo.graph))
def test_mutable_dce_graph_input(self):
@torch.jit.script
def foo(a):
a += torch.rand(2, 3)
# shouldn't clean up `a` even though it's not used in the output
FileCheck().check("aten::rand").check("aten::add").run(str(foo.graph))
def test_mutable_dce_list(self):
@torch.jit.script
def foo(a):
l = []
l.append(a)
c = l[0]
b = torch.rand(2, 3)
c += torch.rand(2, 3)
return b
# c does not get cleaned up because there is a wildcard + mutation
FileCheck().check_count("aten::rand", 2, exactly=True).run(str(foo.graph))
def test_mutable_dce_loop(self):
@torch.jit.script
def foo(a):
l = []
l.append(a)
i = 0
b = torch.rand(2, 3)
while i < 1:
dead = torch.rand(2, 3)
c = l[0]
c += torch.rand(2, 3)
i += 1
return b
FileCheck().check("prim::Loop").check_not("aten::rand").check("aten::__getitem__") \
.check_count("aten::rand", 1, exactly=True).run(str(foo.graph))
def test_mutable_dce_indirect_wildcards(self):
def fn():
x = torch.ones(2, 3)
x_1 = x.view(-1)
l = []
l.append(x_1)
x_view = l[0]
x.add_(torch.ones(2, 3))
return x_view
self.checkScript(fn, ())
def test_mutable_dce_indirect_wildcard_write(self):
def fn():
indexes = torch.jit.annotate(List[Tensor], [])
word_ids = torch.zeros(10, dtype=torch.int32)
word_ids[1] = 1
indexes.append(word_ids)
return word_ids
self.checkScript(fn, ())
def test_mutable_dce_wildcards(self):
def fn():
x = torch.ones(2, 3)
l = []
l.append(x)
x_view = l[0]
x.add_(torch.ones(2, 3))
return x_view
self.checkScript(fn, (), profiling=ProfilingMode.SIMPLE)
def test_cpp_function_tensor_str(self):
x = torch.randn(2, 2)
scale = torch.randn(2, 2, requires_grad=True)
shift = torch.randn(2, 2, requires_grad=True)
@torch.jit.script
def fn(x, scale, shift):
return scale * x + shift
with self.capture_stdout() as captured:
print(fn(x, scale, shift))
def test_string_index(self):
def fn(x):
# type: (str)
return x[2], x[-1]
self.checkScript(fn, ("abcde",))
def test_ord(self):
def fn(x):
# type: (str) -> int
return ord(x)
self.checkScript(fn, ("h"))
self.checkScript(fn, ("y"))
def index_str_to_tensor(s):
# type: (str) -> Tensor
return torch.tensor(ord(s)) # noqa: T484
s = u'\u00a3'.encode('utf8')[:1]
self.checkScript(index_str_to_tensor, (s,))
def test_chr(self):
def fn(x):
# type: (int) -> str
return chr(x)
self.checkScript(fn, (1,))
self.checkScript(fn, (97,))
def test_round(self):
def round_float(x):
# type: (float) -> float
return round(x)
def round_int(x):
# type: (int) -> float
return round(x)
self.checkScript(round_float, (1.5,))
self.checkScript(round_int, (2,))
def test_convert_base(self):
def test_hex(x):
# type: (int) -> str
return hex(x)
def test_oct(x):
# type: (int) -> str
return oct(x)
def test_bin(x):
# type: (int) -> str
return bin(x)
numbers = [-1000, -10, 0, 1, 10, 2343]
for n in numbers:
self.checkScript(test_bin, (n,))
self.checkScript(test_oct, (n,))
self.checkScript(test_hex, (n,))
@unittest.skipIf(IS_WINDOWS or IS_SANDCASTLE, "NYI: TemporaryFileName support for Windows or Sandcastle")
def test_get_set_state(self):
class Root(torch.jit.ScriptModule):
__constants__ = ['number']
def __init__(self, number):
super(Root, self).__init__()
self.register_buffer('buffer1', torch.ones(2, 2))
self.register_buffer('buffer2', torch.ones(2, 2))
self.number = number
@torch.jit.script_method
def __getstate__(self):
return (self.buffer1, self.buffer2, 74, self.training)
@torch.jit.script_method
def __setstate__(self, state):
self.buffer1 = state[0] + 10
self.buffer2 = state[1] + 10
self.training = state[3]
class M(torch.jit.ScriptModule):
__constants__ = ['number']
def __init__(self, number, submodule):
super(M, self).__init__()
self.register_buffer('buffer1', torch.ones(2, 2))
self.register_buffer('buffer2', torch.ones(2, 2))
self.number = number
self.submodule = submodule
@torch.jit.script_method
def __getstate__(self):
return (self.buffer1, self.buffer2, 74, self.submodule, self.training)
@torch.jit.script_method
def __setstate__(self, state):
self.buffer1 = state[0] + 10
self.buffer2 = state[1] + 10
self.submodule = state[3]
self.training = state[4]
with TemporaryFileName() as fname:
m = M(23, submodule=Root(99))
m.save(fname)
loaded = torch.jit.load(fname)
# Check original module
self.assertEqual(m.buffer1, torch.ones(2, 2))
self.assertEqual(m.buffer2, torch.ones(2, 2))
# Check top level module
self.assertEqual(loaded.buffer1, torch.ones(2, 2) + 10)
self.assertEqual(loaded.buffer2, torch.ones(2, 2) + 10)
# Check submodule
self.assertEqual(loaded.submodule.buffer1, torch.ones(2, 2) + 10)
self.assertEqual(loaded.submodule.buffer2, torch.ones(2, 2) + 10)
# Check simpler module
class NoArgState(torch.nn.Module):
def __init__(self):
super(NoArgState, self).__init__()
self.register_buffer('buffer1', torch.ones(2, 2))
self.register_buffer('buffer2', torch.ones(2, 2))
def forward(self):
pass
@torch.jit.export
def __getstate__(self):
return 5, self.training
@torch.jit.export
def __setstate__(self, state):
self.buffer1 = torch.ones(2, 2) + state[0]
self.buffer2 = torch.ones(2, 2) + 10
self.training = state[1]
with TemporaryFileName() as fname:
m = torch.jit.script(NoArgState())
m.save(fname)
loaded = torch.jit.load(fname)
self.assertEqual(loaded.buffer1, torch.ones(2, 2) + 5)
self.assertEqual(loaded.buffer2, torch.ones(2, 2) + 10)
def test_string_slicing(self):
def fn1(x):
# type: (str) -> str
return x[1:3]
def fn2(x):
# type: (str) -> str
return x[-1:3]
def fn3(x):
# type: (str) -> str
return x[3:1]
def fn4(x):
# type: (str) -> str
return x[3:100]
self.checkScript(fn1, ("abcdefghi",))
self.checkScript(fn2, ("abcdefghi",))
self.checkScript(fn3, ("abcdefghi",))
self.checkScript(fn4, ("abcdefghi",))
def test_early_return_closure(self):
code = dedent('''
def tanh(self):
output = torch.tanh(self)
def backward(grad_output):
pass
return output, backward
''')
cu = torch.jit.CompilationUnit(code)
g = cu.tanh.graph
FileCheck().check_count("prim::Closure_0", 2).check("NoneType = prim::Constant") \
.check_next("return").run(g)
code = dedent('''
def tanh(self):
output = torch.tanh(self)
def backward(grad_output):
a = 1
if output:
return 1
else:
a = 2
return a
return output, backward
''')
cu = torch.jit.CompilationUnit(code)
g = cu.tanh.graph
FileCheck().check_count("prim::Closure_0", 2).check("int = prim::If") \
.run(g)
code = dedent('''
def loop_in_closure(self):
output = torch.tanh(self)
def backward(grad_output):
for i in range(3):
return 1
return 4
return output, backward
''')
cu = torch.jit.CompilationUnit(code)
fc = FileCheck()
fc.check("prim::Closure").check("(Tensor, NoneType) = prim::TupleConstruct")
# Loop then two if's added in exit transform
fc.check("prim::Closure").check("prim::Loop").check_count("prim::If", 2)
fc.run(cu.loop_in_closure.graph)
code = dedent('''
def tanh(self):
output = torch.tanh(self)
def backward(grad_output):
if 1 == 1:
return 1
else:
return 1.
return output, backward
''')
with self.assertRaisesRegex(RuntimeError, "returned a value of type int but"):
cu = torch.jit.CompilationUnit(code)
@_inline_everything
def test_early_return_fork_join(self):
@torch.jit.script
def foo(x):
if x.dim() == 2:
return torch.neg(x), x
else:
return torch.neg(x), x + 1
x = torch.rand(3, 4)
@torch.jit.script
def wait_script(x):
fut = torch.jit._fork(foo, x)
y_hat = foo(x)
y = torch.jit._wait(fut)
return y, y_hat
FileCheck().check("with prim::fork").check("prim::If").check("return")\
.run(wait_script.graph)
def test_early_return_type_refinement(self):
@torch.jit.script
def test(x):
# type: (Optional[int]) -> int
if x is None:
return 1
else:
return x
self.assertEqual(test(None), 1)
self.assertEqual(test(2), 2)
def test_exceptions_with_control_flow(self):
def test_num_ifs(func, num_ifs):
g = torch.jit.script(func).graph
FileCheck().check_count("prim::If", num_ifs, exactly=True).run(g)
def no_guard_ifs_added(x):
# type: (int) -> int
if x == 1:
return 1
else:
if x == 2:
raise RuntimeError("hi")
else:
raise RuntimeError("hi")
self.checkScript(no_guard_ifs_added, (1,))
self.checkScriptRaisesRegex(no_guard_ifs_added, (2,), Exception, "")
test_num_ifs(no_guard_ifs_added, 2)
# FUNCTION LOOKS LIKE:
# graph(%x.1 : int):
# %7 : str = prim::Constant[value="Exception"]()
# %2 : int = prim::Constant[value=1]()
# %5 : int = prim::Constant[value=2]()
# %19 : int = prim::Uninitialized()
# %3 : bool = aten::eq(%x.1, %2)
# %20 : int = prim::If(%3)
# block0():
# -> (%2)
# block1():
# %6 : bool = aten::eq(%x.1, %5)
# = prim::If(%6)
# block0():
# = prim::RaiseException(%7)
# -> ()
# block1():
# = prim::RaiseException(%7)
# -> ()
# -> (%19)
# return (%20)
def no_ifs_added(x):
# type: (int) -> int
if x < 0:
raise RuntimeError("hi")
return x
self.checkScript(no_ifs_added, (1,))
self.checkScriptRaisesRegex(no_ifs_added, (-2,), Exception, "")
test_num_ifs(no_ifs_added, 1)
def test_if_might(x):
# type: (int)
if x > 0:
if x == 1:
return 1
else:
a = 2
else:
raise RuntimeError("hi")
return a + 2
self.checkScript(test_if_might, (1,))
self.checkScript(test_if_might, (3,))
self.checkScriptRaisesRegex(no_ifs_added, (-2,), Exception, "")
test_num_ifs(test_if_might, 3) # one if added to guard a + 2
def test_loop_no_escape(x):
# type: (int)
if x >= 0:
for i in range(x):
raise RuntimeError("hi")
else:
return 5
return x + 3
self.checkScript(test_loop_no_escape, (0,))
self.checkScript(test_loop_no_escape, (-1,))
self.checkScriptRaisesRegex(test_loop_no_escape, (1,), Exception, "")
# if guard gets optimized away
test_num_ifs(test_loop_no_escape, 1)
def test_loop_exception_with_continue(x):
# type: (int)
i = 0
for i in range(5):
if i == x:
raise RuntimeError("hi")
else:
continue
print(i)
return i + 5
self.checkScript(test_loop_exception_with_continue, (-1,))
self.checkScriptRaisesRegex(test_loop_exception_with_continue, (1,), Exception, "")
test_num_ifs(test_loop_exception_with_continue, 1) # no ifs added to guard print
def test_exception_exits_closure(self):
code = dedent('''
def no_return_func(self):
# type: (Tensor) -> Tensor
output = torch.tanh(self)
def backward(grad_output):
raise RuntimeError("Hi")
''')
with self.assertRaisesRegex(RuntimeError, "does not return along all"):
cu = torch.jit.CompilationUnit(code)
code = dedent('''
def test_exit_pair_reset(x):
# type: (int) -> int
if x > 0:
a = 0
def backward(grad_output):
raise RuntimeError("Hi")
a = a + 1
else:
return x
return a + 1
''')
func = torch.jit.CompilationUnit(code).test_exit_pair_reset
self.assertEqual(func(1,), 2)
self.assertEqual(func(-1,), -1)
# final a + 1 gets inlined into the first branch and optimized away
FileCheck().check_count("prim::If", 1, exactly=True).run(func.graph)
def test_non_final_return(self):
def simple(x):
if bool(x > 3):
return x + 1
else:
return x + 2
raise RuntimeError("nope")
def nest(x):
x = x + 1
if bool(x > 3):
if bool(x > 4):
x += 1
return x + 1
else:
return x + 2
def early_ret(x):
x = x + 1
if bool(x > 3):
return x + 1
x = x + 1
return x + 2
def nest_early_ret(x):
x = x + 1
if bool(x > 3):
if bool(x > 4):
return x + 2
return x + 1
x = x + 1
return x + 2
def not_early_ret(x):
s = ""
if bool(x > 3):
if bool(x > 4):
return 1, s
s += "foo"
else:
s += "5"
s += "hi"
return 7, s
def not_total_ret(x):
s = ""
if bool(x > 3):
if bool(x > 4):
return 1, s
else:
return 2, s
else:
s += "5"
return 7, s
for i in range(3):
for func in [simple, nest, early_ret, nest_early_ret, not_early_ret,
not_total_ret]:
self.checkScript(func, (torch.tensor(2.5 + i),))
def vars_used_after_ret(x):
# type: (int) -> int
if x == 0:
return x
else:
y = 2
z = 3
return x + y * z
self.checkScript(vars_used_after_ret, (1,))
self.checkScript(vars_used_after_ret, (0,))
def complicated(x):
# type: (int) -> int
if x:
if x == 2:
return 1
assert 1 == 2
else:
if x == 3:
return 2
assert 1 == 2
else:
a = 2
b = 3
else:
a = 4
b = 1
return a + b
assert 1 == 2
for i in range(4):
self.checkScript(complicated, (i,))
def test_partial_returns(self):
with self.assertRaisesRegex(RuntimeError, "does not return along all"):
@torch.jit.script
def no_ret():
# type: () -> int
pass
with self.assertRaisesRegex(RuntimeError, "does not return along all"):
@torch.jit.script
def partial(x):
# type: (Tensor) -> int
if x:
return 1
with self.assertRaisesRegex(RuntimeError, "does not return along all"):
@torch.jit.script
def typed_none():
# type: () -> Optional[int]
pass
@torch.jit.script
def none_ret():
pass
self.assertIs(none_ret(), None)
FileCheck().check(": None").run(none_ret.graph)
def test_early_returns_loops(self):
def nest_while_ret(x):
# type: (int) -> int
y = 4
while x < 4:
if x < 3:
return y
else:
y = y + 1
break
y = y + 2
y = y + 1
return y
self.checkScript(nest_while_ret, (2,))
self.checkScript(nest_while_ret, (3,))
self.checkScript(nest_while_ret, (4,))
def loop_ret(x, y):
# type: (int, int) -> (int)
i = 0
for i in range(x):
if x == y:
return x + y
i = i + y
i = i - 1
return i
self.checkScript(loop_ret, (3, 3))
self.checkScript(loop_ret, (2, 3))
self.checkScript(loop_ret, (3, 1))
def test_will_ret(y):
# type: (int) -> int
for i in range(y):
return 2
return 1
self.checkScript(test_will_ret, (0,))
self.checkScript(test_will_ret, (1,))
def test_loop_nest_ret(y):
# type: (int) -> int
for i in range(y):
for i in range(y - 2):
return 10
return 5
return 0
self.checkScript(test_loop_nest_ret, (0,))
self.checkScript(test_loop_nest_ret, (1,))
self.checkScript(test_loop_nest_ret, (2,))
def test_nn_init(self):
tests = (
('constant_', (lambda: (torch.ones(2, 2), 2.5)), "Tensor, float"),
('ones_', (lambda: (torch.ones(2, 2),)), "Tensor"),
('zeros_', (lambda: (torch.ones(2, 2),)), "Tensor"),
('uniform_', (lambda: (torch.ones(2, 2),)), "Tensor"),
('normal_', (lambda: (torch.ones(2, 2),)), "Tensor"),
('xavier_normal_', (lambda: (torch.ones(2, 2),)), "Tensor"),
('xavier_uniform_', (lambda: (torch.ones(2, 2),)), "Tensor"),
)
for name, args_fn, type_str in tests:
# Build test code
arg_str = ', '.join([chr(i + ord('a')) for i in range(len(args_fn()))])
code = dedent('''
def test({arg_str}):
# type: ({type_str})
return torch.nn.init.{name}({arg_str})
''').format(arg_str=arg_str, type_str=type_str, name=name)
cu = torch.jit.CompilationUnit(code)
# Compare functions
init_fn = getattr(torch.nn.init, name)
script_out = self.runAndSaveRNG(cu.test, args_fn())
eager_out = self.runAndSaveRNG(init_fn, args_fn())
self.assertEqual(script_out, eager_out)
FileCheck().check_not("prim::PythonOp").run(cu.test.graph)
def test_early_return_rewrite(self):
def test_foo(x: bool):
if x:
return 1
return 2
self.checkScript(test_foo, (True,))
self.checkScript(test_foo, (False,))
FileCheck().check_count("prim::If", 1, exactly=True).run(torch.jit.script(test_foo).graph)
def test_multiple(x: int):
if x == 5:
return x * x
else:
y = 2 * x
z = y * 2
if z == 8:
return 1
if z != 16:
z = z - 2
abc = 4
else:
return 3
z = z * abc
return z * z * z
self.checkScript(test_multiple, (5,))
self.checkScript(test_multiple, (2,))
self.checkScript(test_multiple, (4,))
self.checkScript(test_multiple, (3,))
self.checkScript(test_multiple, (10,))
graph = torch.jit.script(test_multiple).graph
FileCheck().check_count("prim::If", 3, exactly=True).run(graph)
def test_is_scripting_metacompile(self):
@torch.jit.script
def foo():
if torch.jit.is_scripting():
return 1
else:
print("hello") + 2 # will not be compiled
self.assertEqual(foo(), 1)
def test_boolean_literal_constant_metacompile(self):
class Mod(torch.nn.Module):
__constants__ = ['val']
def __init__(self, val):
super(Mod, self).__init__()
self.val = val
def forward(self):
if self.val:
return 1
else:
return "2"
self.checkModule(Mod(True), ())
self.checkModule(Mod(False), ())
@torch.jit.script
def foo():
if True:
return 1
else:
return "2"
self.assertEqual(foo(), 1)
def test_assert_is_scripting_metacompile(self):
def foo():
assert not torch.jit.is_scripting(), "TestErrorMsg"
print("hello") + 2 # will not be compiled
f = torch.jit.script(foo)
with self.assertRaisesRegex(torch.jit.Error, "TestErrorMsg"):
f()
def test_isinstance_metacompile(self):
@torch.jit.script
def test_primitive_type(x):
# type: (int) -> int
if isinstance(x, int):
return x + 1
else:
return x - 1
self.assertEqual(test_primitive_type(1), 2)
with self.assertRaisesRegex(Exception, "Expected a value of type"):
test_primitive_type(1.5)
_MyNamedTuple = namedtuple('_MyNamedTuple', ['value'])
@torch.jit.script
def test_non_primitive_types(x):
# type: (_MyNamedTuple) -> Tensor
if isinstance(1, _MyNamedTuple):
return 10
if isinstance(x, _MyNamedTuple):
return x.value + 1
else:
return 1
out = test_non_primitive_types(_MyNamedTuple(value=torch.tensor(5.0)))
self.assertEqual(out, torch.tensor(6.0))
def test_namedtuple_type_inference(self):
_AnnotatedNamedTuple = NamedTuple('_NamedTupleAnnotated', [('value', int)])
_UnannotatedNamedTuple = namedtuple('_NamedTupleUnAnnotated', ['value'])
def test_check_named_tuple_value():
named_tuple = _AnnotatedNamedTuple(1)
return named_tuple.value
self.checkScript(test_check_named_tuple_value, ())
def test_error():
return _UnannotatedNamedTuple(1)
with self.assertRaisesRegex(RuntimeError, r"Expected a value of type \'Tensor \(inferred\)\' "
r"for argument \'value\' but instead found type \'int\'."):
torch.jit.script(test_error)
def test_namedtuple_default_values_simple_type(self):
class Point(NamedTuple):
x: Optional[int] = None
y: int = 2
make_global(Point)
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, point: Point):
return point
p = Point(x=3, y=2)
self.checkModule(M(), (p,))
self.checkModule(M(), (Point(),))
m = torch.jit.script(M())
FileCheck().check(r"NamedTuple(x : int? = None, y : int = 2))") \
.run(m.graph)
def test_namedtuple_default_values_missing(self):
class Point(NamedTuple):
x: Optional[int]
y: int
z: int = 3
make_global(Point)
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, point: Point):
return point
p1 = Point(x=3, y=2)
p2 = Point(x=3, y=2, z=1)
self.checkModule(M(), (p1,))
self.checkModule(M(), (p2,))
m = torch.jit.script(M())
FileCheck().check(r"NamedTuple(x : int?, y : int, z : int = 3))") \
.run(m.graph)
def test_namedtuple_default_values_container_type(self):
class Point(NamedTuple):
x: Optional[List[int]] = None
y: List[int] = [1, 2, 3]
z: Optional[Dict[str, int]] = {"a": 1}
make_global(Point)
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, point: Point):
return point
p = Point(x=[4, 5, 6], y=[3, 2, 1], z={"b": 2})
self.checkModule(M(), (p,))
self.checkModule(M(), (Point(),))
m = torch.jit.script(M())
first_line = r"NamedTuple(x : int[]? = None, y : int[] = " \
r"[1, 2, 3], z : Dict(str, int)? = {a: 1}))"
FileCheck().check(first_line) \
.run(m.graph)
def test_namedtuple_default_values_Tensor_type(self):
class Point(NamedTuple):
x: torch.Tensor = torch.rand(2, 3)
make_global(Point)
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, point: Point):
return point
p = Point(x=torch.rand(2, 3))
with self.assertRaisesRegex(RuntimeError, "Tensors are not "
"supported as default NamedTuple "
"fields"):
m = torch.jit.script(M())
m(p)
@unittest.skipIf(sys.version_info < (3, 7, 0), "defaults keyword added in Python 3.8")
def test_namedtuple_default_values_using_factory_constructor(self):
Pair = namedtuple("Pair", ["x", "y"], defaults=(1, 2))
make_global(Pair)
@torch.jit.script
def fn(x: Pair) -> Pair:
return x
# TODO: We can't use `checkScript` with the NamedTuple factory
# constructor. Using the factory constructor with TorchScript
# TorchScript creates an anonymous `NamedTuple` class instead of
# preserving the actual name. For example, the actual generated
# signature in this case is:
# graph(%x.1 : NamedTuple(x : Tensor, y : Tensor))
# It looks like similar test cases have had this issue as well
# (see: `test_namedtuple_python`).
FileCheck().check(r"NamedTuple(x : Tensor = 1, y : Tensor = 2))") \
.check_next(r"return (%x.1)") \
.run(fn.graph)
def test_isinstance_dynamic(self):
@torch.jit.script
def foo(a):
# type: (Optional[List[int]]) -> int
b = 0
if isinstance(a, (int, (float,), list, str)):
b += 1
if isinstance(a, (int, str)):
b += 1
if isinstance(a, List[int]):
b += 1
return b
self.assertEqual(foo([3, 4]), 2)
self.assertEqual(foo(None), 0)
def test_function_overloads(self):
# TODO: pyflakes currently does not compose @overload annotation with other
# decorators. This is fixed on master but not on version 2.1.1.
# Next version update remove noqa and add @typing.overload annotation
@torch.jit._overload # noqa: F811
def test_simple(x1): # noqa: F811
# type: (int) -> int
pass
@torch.jit._overload # noqa: F811
def test_simple(x1): # noqa: F811
# type: (float) -> float
pass
def test_simple(x1): # noqa: F811
return x1
def invoke_function():
return test_simple(1.0), test_simple(.5)
self.checkScript(invoke_function, ())
# testing that the functions are cached
compiled_fns_1 = torch.jit._script._get_overloads(test_simple)
compiled_fns_2 = torch.jit._script._get_overloads(test_simple)
for a, b in zip(compiled_fns_1, compiled_fns_2):
self.assertIs(a.graph, b.graph)
old_func = test_simple
# testing that new functions added work with caching
@torch.jit._overload # noqa: F811
def test_simple(x1): # noqa: F811
# type: (str) -> str
pass
@torch.jit.script
def my_func():
return old_func("hi")
# testing new function same qualified name
@torch.jit._overload # noqa: F811
def test_simple(a, b): # noqa: F811
# type: (int, int) -> int
pass
def test_simple(a, b):
return a + b
@torch.jit.script
def fn():
return test_simple(3, 4)
self.assertEqual(fn(), 7)
# currently we take the default values have to be specified in the
# overload as well - TODO take them from implementation and apply
# where the type is valid.
@torch.jit._overload # noqa: F811
def identity(x1): # noqa: F811
# type: (str) -> str
pass
@torch.jit._overload # noqa: F811
def identity(x1): # noqa: F811
# type: (float) -> float
pass
def identity(x1=1.0): # noqa: F811
return x1
def invoke():
return identity(), identity(.5), identity("hi")
self.checkScript(invoke, ())
def schema_match_failure():
return identity((1, 2))
thrown = False
try:
torch.jit.script(schema_match_failure)
except Exception as e:
thrown = True
self.assertTrue(r"of type 'str'" in str(e) and r"of type 'float" in str(e))
self.assertTrue(thrown)
with self.assertRaisesRegex(Exception, "cannot be directly compiled"):
torch.jit.script(identity)
@torch.jit._overload # noqa: F811
def impl_compile_failure(x, y): # noqa: F811
# type: (str, str) -> (str)
pass
@torch.jit._overload # noqa: F811
def impl_compile_failure(x, y): # noqa: F811
# type: (int, int) -> (int)
pass
def impl_compile_failure(x, y): # noqa: F811
return x - y
def test():
impl_compile_failure("one", "two")
with self.assertRaisesRegex(Exception, "Arguments for call are not valid"):
torch.jit.script(test)
@torch.jit._overload # noqa: F811
def good_overload(x=1): # noqa: F811
# type: (int) -> (int)
pass
def good_overload(x=1): # noqa: F811
return x
@torch.jit.script
def foo():
return good_overload()
self.assertEqual(foo(), 1)
with self.assertRaisesRegex(Exception, "must equal to the default parameter"):
@torch.jit._overload # noqa: F811
def bad_default_on_overload(x, y=2): # noqa: F811
# type: (int, int) -> (int)
pass
def bad_default_on_overload(x, y=1): # noqa: F811
# type: (int, int) -> (int)
pass
@torch.jit.script
def test():
return bad_default_on_overload(1, 2)
@torch.jit._overload # noqa: F811
def diff_default(x): # noqa: F811
# type: (int) -> int
pass
@torch.jit._overload # noqa: F811
def diff_default(x): # noqa: F811
# type: (str) -> str
pass
def diff_default(x="hi"): # noqa: F811
return x
def test():
return diff_default(), diff_default(2), diff_default("abc")
self.assertEqual(test(), torch.jit.script(test)())
@torch.jit._overload # noqa: F811
def diff_num_params(x): # noqa: F811
# type: (float) -> float
pass
@torch.jit._overload # noqa: F811
def diff_num_params(x, y): # noqa: F811
# type: (int, int) -> int
pass
def diff_num_params(x, y=2, z=3): # noqa: F811
# type: (Union[float, int], int, int)
return x + y + z
def test():
return diff_num_params(1.0), diff_num_params(1, 2), diff_num_params(1), diff_num_params(1, 2, 3)
self.assertEqual(test(), torch.jit.script(test)())
@torch.jit._overload # noqa: F811
def diff_num_params_no_annot():
# type: () -> int
pass
def diff_num_params_no_annot(x=1): # noqa: F811
return x
def test():
return diff_num_params_no_annot(1.0)
with self.assertRaisesRegex(Exception, "Parameters not specified"):
torch.jit.script(test)
def test_function_overload_misuse(self):
with self.assertRaisesRegex(RuntimeError, "Only `pass` statement or `...` can be the body"):
@torch.jit._overload
def wrong_decl_body(x: str) -> str:
return x + "0"
with self.assertRaisesRegex(RuntimeError, "Only `pass` statement or `...` can be the body"):
class MyClass:
@torch.jit._overload_method
def method(self):
return 0
@torch.jit._overload
def null_overload(x: int) -> int: ... # noqa: E704
@torch.jit._overload # noqa: F811
def null_overload(x: str) -> str: # noqa: F811
pass
def null_overload_driver():
return null_overload(0)
with self.assertRaisesRegex(RuntimeError, 'Implementation for the function ".+" is missing.'):
torch.jit.script(null_overload_driver)
class OverloadMisuse(torch.nn.Module):
def __init__(self):
super().__init__()
@torch.jit._overload_method
def forward(self, x: int):
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x: Tensor): # noqa: F811
pass
with self.assertRaisesRegex(RuntimeError, 'Implementation for the method ".+" is missing.'):
m = torch.jit.script(OverloadMisuse())
def test_script_method_torch_function_overload(self):
class MyCustomTensor(torch.Tensor):
pass
class MyCustomModule(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
scripted_mod = torch.jit.script(MyCustomModule())
t = torch.tensor([3.0])
ref_out = scripted_mod(t)
t_custom = MyCustomTensor([3.0])
out1 = scripted_mod(t_custom)
self.assertEqual(out1, ref_out)
out2 = scripted_mod.forward(t_custom)
self.assertEqual(out2, ref_out)
def test_function_overloading_isinstance(self):
@torch.jit._overload # noqa: F811
def my_conv(x, y): # noqa: F811
# type: (float, str) -> (float)
pass
@torch.jit._overload # noqa: F811
def my_conv(x, y): # noqa: F811
# type: (float, float) -> (float)
pass
def my_conv(x, y=2.0): # noqa: F811
if isinstance(y, str):
if y == "hi":
return 4.0 - x
else:
return 5.0 - x
else:
return 2.0 + x
def test_uses():
return my_conv(1.5), my_conv(1.5, "hi"), my_conv(1.5, 5.0)
self.checkScript(test_uses, ())
def test_method_overloading(self):
class Over(torch.nn.Module):
def __init__(self):
super(Over, self).__init__()
@torch.jit._overload_method # noqa: F811
def forward(self, x): # noqa: F811
# type: (Tuple[Tensor, Tensor]) -> Tensor
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x): # noqa: F811
# type: (Tensor) -> Tensor
pass
def forward(self, x): # noqa: F811
if isinstance(x, Tensor):
return x + 20
else:
return x[0] + 5
class S(torch.jit.ScriptModule):
def __init__(self):
super(S, self).__init__()
self.weak = Over()
@torch.jit.script_method
def forward(self, x):
return self.weak(x) + self.weak((x, x))
s_mod = S()
x = torch.ones(1)
self.assertEqual(s_mod(x), x + 20 + 5 + x)
over = Over()
self.assertEqual(over((x, x)), x + 5)
self.assertEqual(over((x)), x + 20)
class Unannotated(torch.nn.Module):
def __init__(self):
super(Unannotated, self).__init__()
@torch.jit._overload_method # noqa: F811
def hello(self, x): # noqa: F811
pass
@torch.jit._overload_method # noqa: F811
def hello(self, x): # noqa: F811
# type: (int) -> (int)
pass
def hello(self, x): # noqa: F811
return x + 3
def forward(self):
return self.hello(1), self.hello(.5)
w = Unannotated()
with self.assertRaisesRegex(Exception, "explicitly add type annotations to overloaded functions"):
torch.jit.script(w)
class CompileOverloadError(torch.nn.Module):
def __init__(self):
super(CompileOverloadError, self).__init__()
@torch.jit._overload_method # noqa: F811
def hello(self, x): # noqa: F811
# type: (str) -> (int)
pass
@torch.jit._overload_method # noqa: F811
def hello(self, x): # noqa: F811
# type: (int) -> (int)
pass
def hello(self, x): # noqa: F811
return x + 1
def forward(self):
return self.hello("hi"), self.hello(.5)
w = CompileOverloadError()
with self.assertRaisesRegex(Exception, "but instead found type \'str\'"):
torch.jit.script(w)
# testing overload declared first, then non-overload
with self.assertRaisesRegex(Exception, "Overloads are not useable when a module"):
class W3(torch.nn.Module):
def __init__(self):
super(W3, self).__init__()
@torch.jit._overload_method # noqa: F811
def forward(self, x): # noqa: F811
# type: (int) -> int
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x): # noqa: F811
# type: (Tensor) -> Tensor
pass
def forward(self, x): # noqa: F811
return x + 5
a = W3()
b = torch.jit.script(a)
class W3(torch.nn.Module):
def __init__(self):
super(W3, self).__init__()
def forward(self, x): # noqa: F811
return x + 5 + 10
a = W3()
b = torch.jit.script(a)
# testing non-overload declared first, then overload
class W2(torch.nn.Module):
def __init__(self):
super(W2, self).__init__()
def hello(self, x1, x2):
return x1 + x2
def forward(self, x):
return self.hello(x, x)
a = torch.jit.script(W2())
self.assertEqual(a(torch.tensor(1)), torch.tensor(2))
class W2(torch.nn.Module):
def __init__(self):
super(W2, self).__init__()
@torch.jit._overload_method # noqa: F811
def hello(self, x): # noqa: F811
pass
@torch.jit._overload_method # noqa: F811
def hello(self, x): # noqa: F811
# type: (int) -> (int)
pass
def hello(self, x): # noqa: F811
return x + 5 + 10
def forward(self, x):
return self.hello(1), self.hello(x)
with self.assertRaisesRegex(Exception, "Overloads are not useable when a module"):
a = torch.jit.script(W2())
def test_narrow_copy(self):
def foo(a):
return a.narrow_copy(0, 0, 5)
self.checkScript(foo, [torch.rand(10)])
def test_select_after_chunk(self):
def foo(x):
chunked = torch.chunk(x, 1)
foo = chunked[0]
foo.add_(5)
return x
self.checkScript(foo, [torch.rand(2, 3)])
def test_nn_LSTM_with_layers(self):
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.rnn = nn.LSTM(2, 3, 2, dropout=0)
@torch.jit.script_method
def forward(self, x, lengths, h0, c0):
return self.rnn(x, (h0, c0))[0]
class Eager(torch.nn.Module):
def __init__(self):
super(Eager, self).__init__()
self.rnn = nn.LSTM(2, 3, 2, dropout=0)
def forward(self, x, lengths, h0, c0):
return self.rnn(x, (h0, c0))[0]
inputs = (torch.randn(1, 1, 2), torch.LongTensor([7]), torch.randn(2, 1, 3), torch.randn(2, 1, 3))
eager_out = self.runAndSaveRNG(lambda: Eager()(*inputs), ())[0]
script_out = self.runAndSaveRNG(lambda: M()(*inputs), ())[0]
self.assertEqual(eager_out, script_out)
def test_nn_LSTM(self):
input = torch.nn.utils.rnn.pack_sequence([torch.randn(5, 5)])
class S(torch.jit.ScriptModule):
def __init__(self):
super(S, self).__init__()
self.x = torch.nn.LSTM(5, 5)
@torch.jit.script_method
def forward(self, input: PackedSequence) -> Tuple[PackedSequence, Tuple[torch.Tensor, torch.Tensor]]:
return self.x(input)
eager_out = self.runAndSaveRNG(lambda x: torch.nn.LSTM(5, 5)(x), (input,))[0]
script_out = self.runAndSaveRNG(lambda x: S()(x), (input,))[0]
self.assertEqual(eager_out, script_out)
def test_nn_GRU(self):
seq_input = torch.nn.utils.rnn.pack_sequence([torch.randn(5, 5)])
tensor_input = torch.randn(5, 5, 5)
class SeqLengthGRU(torch.jit.ScriptModule):
def __init__(self):
super(SeqLengthGRU, self).__init__()
self.x = torch.nn.GRU(5, 5)
@torch.jit.script_method
def forward(self, input: PackedSequence) -> Tuple[PackedSequence, torch.Tensor]:
return self.x(input)
class TensorGRU(torch.jit.ScriptModule):
def __init__(self):
super(TensorGRU, self).__init__()
self.x = torch.nn.GRU(5, 5)
@torch.jit.script_method
def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return self.x(input)
seq_eager_out = self.runAndSaveRNG(lambda x: torch.nn.GRU(5, 5)(x), (seq_input,))[0]
seq_script_out = self.runAndSaveRNG(lambda x: SeqLengthGRU()(x), (seq_input,))[0]
tensor_eager_out = self.runAndSaveRNG(lambda x: torch.nn.GRU(5, 5)(x), (tensor_input,))[0]
tensor_script_out = self.runAndSaveRNG(lambda x: TensorGRU()(x), (tensor_input,))[0]
self.assertEqual(seq_eager_out, seq_script_out)
self.assertEqual(tensor_eager_out, tensor_script_out)
def test_torchscript_memoryformat(self):
@torch.jit.script
def fn(x):
return x.contiguous(memory_format=torch.channels_last)
x = torch.randn(4, 3, 6, 6)
y = fn(x)
self.assertTrue(y.is_contiguous(memory_format=torch.channels_last))
def test_torchscript_multi_head_attn(self):
@torch.jit.script
def jit_multihead_attn_forward(query, # type: Tensor
key, # type: Tensor
value, # type: Tensor
embed_dim_to_check, # type: int
num_heads, # type: int
in_proj_weight, # type: Tensor
in_proj_bias, # type: Tensor
bias_k, # type: Optional[Tensor]
bias_v, # type: Optional[Tensor]
add_zero_attn, # type: bool
dropout, # type: float
out_proj_weight, # type: Tensor
out_proj_bias, # type: Tensor
training=True, # type: bool
key_padding_mask=None, # type: Optional[Tensor]
need_weights=True, # type: bool
attn_mask=None # type: Optional[Tensor]
):
# type: (...) -> Tuple[Tensor, Optional[Tensor]]
return torch.nn.functional.multi_head_attention_forward(query, key, value,
embed_dim_to_check, num_heads,
in_proj_weight, in_proj_bias,
bias_k, bias_v,
add_zero_attn, dropout,
out_proj_weight, out_proj_bias,
training, key_padding_mask,
need_weights, attn_mask)
src_l = 3
bsz = 5
embed_size = 8
nhead = 2
multi_head_attn = torch.nn.MultiheadAttention(embed_size, nhead)
query = torch.rand((src_l, bsz, embed_size))
key = torch.rand((src_l, bsz, embed_size))
value = torch.rand((src_l, bsz, embed_size))
mask = (torch.triu(torch.ones(src_l, src_l)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)).double()
jit_out = jit_multihead_attn_forward(query, key, value,
embed_size, nhead,
multi_head_attn.in_proj_weight,
multi_head_attn.in_proj_bias,
multi_head_attn.bias_k, multi_head_attn.bias_v,
multi_head_attn.add_zero_attn, multi_head_attn.dropout,
multi_head_attn.out_proj.weight,
multi_head_attn.out_proj.bias, attn_mask=mask)[0]
py_out = torch.nn.functional.multi_head_attention_forward(query, key, value,
embed_size, nhead,
multi_head_attn.in_proj_weight,
multi_head_attn.in_proj_bias,
multi_head_attn.bias_k,
multi_head_attn.bias_v,
multi_head_attn.add_zero_attn,
multi_head_attn.dropout,
multi_head_attn.out_proj.weight,
multi_head_attn.out_proj.bias,
attn_mask=mask)[0]
# print("rel. error: ")
# print(jit_out / py_out - 1)
self.assertEqual(jit_out, py_out, atol=5e-4, rtol=1e-4)
def test_torchscript_multi_head_attn_fast_path(self):
src_l = 3
bsz = 5
embed_size = 8
nhead = 2
multi_head_attn = torch.nn.MultiheadAttention(embed_size, nhead, batch_first=True)
multi_head_attn = multi_head_attn.eval()
query = key = value = torch.rand((bsz, src_l, embed_size))
with torch.no_grad():
py_out = multi_head_attn(query, key, value)
mha = torch.jit.script(multi_head_attn)
jit_out = mha(query, key, value)
torch.testing.assert_close(jit_out, py_out)
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_scriptmodule_multi_head_attn_cuda(self):
class MyModule(torch.jit.ScriptModule):
def __init__(self, embed_dim, num_heads):
super(MyModule, self).__init__()
sample_q = torch.randn(3, 2, embed_dim)
sample_kv = torch.randn(3, 2, embed_dim)
attention = nn.MultiheadAttention(embed_dim, num_heads)
attention.eval()
self.mod = torch.jit.trace(attention,
(sample_q, sample_kv, sample_kv))
@torch.jit.script_method
def forward(self, q, k, v):
return self.mod(q, k, v)
embed_dim = 8
num_heads = 2
sl = 3
bs = 2
model = MyModule(embed_dim, num_heads).cuda()
q = torch.randn(sl, bs, embed_dim, device="cuda")
kv = torch.randn(sl, bs, embed_dim, device="cuda")
jit_out = model(q, kv, kv)[0]
py_out = torch.nn.functional.multi_head_attention_forward(q, kv, kv,
embed_dim, num_heads,
model.mod.in_proj_weight,
model.mod.in_proj_bias,
None, None, None, 0.0,
model.mod.out_proj.weight,
model.mod.out_proj.bias)[0]
self.assertEqual(jit_out, py_out, atol=5e-4, rtol=1e-4)
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_scriptmodule_transformer_cuda(self):
class MyModule(torch.jit.ScriptModule):
def __init__(self, transformer, sample_q, sample_kv):
super(MyModule, self).__init__()
transformer.eval()
self.mod = torch.jit.trace(transformer,
(sample_q, sample_kv))
@torch.jit.script_method
def forward(self, q, k):
return self.mod(q, k)
d_model = 8
nhead = 2
num_encoder_layers = 2
num_decoder_layers = 2
dim_feedforward = 16
bsz = 2
seq_length = 5
tgt_length = 3
src = torch.randn(seq_length, bsz, d_model)
tgt = torch.randn(tgt_length, bsz, d_model)
transformer = nn.Transformer(d_model, nhead, num_encoder_layers,
num_decoder_layers, dim_feedforward, dropout=0.0)
model = MyModule(transformer, tgt, src)
src = torch.randn(seq_length, bsz, d_model)
tgt = torch.randn(tgt_length, bsz, d_model)
jit_out = model(tgt, src)
py_out = transformer(tgt, src)
# print(jit_out/py_out-1)
# print(torch.allclose(jit_out, py_out, atol=5e-4, rtol=1e-4))
self.assertEqual(jit_out, py_out, atol=5e-4, rtol=1e-4)
def test_list_python_op(self):
def python_list_op(lst):
# type: (List[Tensor]) -> Tensor
return lst[0]
def fn(lst):
# type: (List[Tensor]) -> Tensor
return python_list_op(lst)
self.checkScript(fn, ([torch.ones(2) + 2, torch.ones(2)],))
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_weak_cuda(self):
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.lstm = torch.nn.LSTM(5, 5)
self.lstm.cuda()
@torch.jit.script_method
def forward(self, x):
return self.lstm(x)
m = M()
m.cuda()
out = m(torch.ones(5, 5, 5).cuda())
self.assertTrue(out[0].is_cuda)
def test_ignore_decorator(self):
with warnings.catch_warnings(record=True) as warns:
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
tensor = torch.zeros(1, requires_grad=False)
self.register_buffer('some_state', torch.nn.Parameter(tensor))
@torch.jit.script_method
def forward(self, x):
self.ignored_code(x)
return x
@torch.jit.ignore(drop_on_export=True)
def ignored_code(self, x):
self.some_state = torch.tensor((100,))
FileCheck().check("TorchScript will now drop the function").run(str(warns[0]))
# Assert ignored code is run
m = M()
m2 = self.getExportImportCopy(m)
pp = str(m2.forward.code)
self.assertNotIn('ignored_code', pp)
with self.assertRaisesRegex(torch.jit.Error, "annotated to be ignored and cannot be run"):
m2.forward(torch.ones(1))
def test_ignored_as_value(self):
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
@torch.jit.unused
def tuple_ignored(self, x):
# type: (Tensor) -> Tuple[Tensor, Tensor]
return x, x
@torch.jit.unused
def single_val_ignored(self, x, y):
# type: (Tensor, Tensor) -> Tensor
return x
def forward(self, x, use_ignore_path):
# type: (Tensor, bool) -> Tuple[Tensor, Tensor]
if 1 == 2:
return self.tuple_ignored(x)
if use_ignore_path:
return self.single_val_ignored(x, x), self.single_val_ignored(x, x)
return x, x
original = Model()
scripted = torch.jit.script(original)
self.assertEqual(scripted(torch.tensor(.5), False), (torch.tensor(.5), torch.tensor(.5)))
buffer = io.BytesIO()
torch.jit.save(scripted, buffer)
buffer.seek(0)
loaded = torch.jit.load(buffer)
with self.assertRaisesRegex(torch.jit.Error, "annotated to be ignored and cannot be run"):
loaded(torch.tensor(.5), True)
def test_module_error(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
def forward(self, foo):
return foo
with self.assertRaisesRegex(RuntimeError, "cannot be compiled since it inherits from nn.Module"):
torch.jit.script(MyModule)
def test_view_write(self):
def fn(x, y):
l = []
l.append(x)
x_view = l[0]
a = x + x
x_view.add_(y)
b = x + x
return a == b
self.checkScript(fn, (torch.rand(2, 3), torch.rand(2, 3)))
def test_module_attrs(self):
class M(torch.jit.ScriptModule):
def __init__(self, table):
super(M, self).__init__()
self.table = torch.jit.Attribute(table, Dict[str, torch.Tensor])
self.x = torch.nn.Parameter(torch.tensor([100.0]))
@torch.jit.script_method
def forward(self, key):
# type: (str) -> Tensor
return self.table[key] + self.x
with torch._jit_internal._disable_emit_hooks():
# TODO: re-enable module hook when Python printing of attributes is
# supported
m = M({char : torch.ones(1) + ord(char) - ord("a") for char in "abcdefg"})
self.assertEqual(m("c"), torch.tensor([103.]))
def test_module_none_attrs(self):
class MyMod(torch.jit.ScriptModule):
def __init__(self):
super(MyMod, self).__init__()
self.optional_value = None
@torch.jit.script_method
def forward(self):
return self.optional_value
graph = MyMod().forward.graph
FileCheck().check("prim::GetAttr").run(graph)
self.run_pass('peephole', graph)
FileCheck().check_not("prim::GetAttr").run(graph)
def test_tensor_import_export(self):
@torch.jit.script
def foo(x):
a = torch.tensor(1)
b = torch.tensor([1, 2])
c = [a, b]
return c
self.run_pass('constant_propagation', foo.graph)
m = self.createFunctionFromGraph(foo.graph)
self.getExportImportCopy(m)
def get_pickle_values(self):
return (('dict', {"I": "am", "a test": "test"}, Dict[str, str]),
('float', 2.3, float),
('int', 99, int),
('bool', False, bool),
('tuple', (1, 2, 3, 4), Tuple[int, int, int, int]),
('list', [(1, 2), (3, 4)], List[Tuple[int, int]]),
('tensor', torch.randn(2, 2), torch.Tensor),
('int_list', [1, 2, 3, 4], List[int]),
('tensor_list', [torch.ones(2, 2) + i for i in range(4)], List[torch.Tensor]),
('bool_list', [True, True, False, True], List[bool]),
('float_list', [1., 2., 3., 4.], List[float]),
('str_list', ['hello', 'bye'], List[str]),
('none', None, Optional[int]),
('a_device', torch.device('cpu'), torch.device),
('another_device', torch.device('cuda:1'), torch.device))
def test_attribute_serialization(self):
tester = self
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
for name, value, the_type in tester.get_pickle_values():
setattr(self, name, torch.jit.Attribute(value, the_type))
@torch.jit.script_method
def forward(self):
return (self.dict, self.float, self.int, self.bool, self.tuple,
self.list, self.int_list, self.tensor_list, self.bool_list,
self.float_list, self.str_list, self.none)
m = M()
imported_m = self.getExportImportCopy(m)
self.assertEqual(m(), imported_m())
def test_string_len(self):
def fn(x):
# type: (str) -> int
return len(x)
self.checkScript(fn, ("",))
self.checkScript(fn, ("h",))
self.checkScript(fn, ("hello",))
def test_multiline_optional_future_refinement(self):
@torch.jit.script
def fun() -> int:
future: Optional[
torch.jit.Future[Tuple[torch.Tensor]]
] = None
return 1
self.assertEqual(fun(), 1)
@unittest.skipIf(IS_WINDOWS or IS_SANDCASTLE, "NYI: TemporaryFileName support for Windows or Sandcastle")
def test_attribute_unpickling(self):
tensor = torch.randn(2, 2)
tester = self
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
for name, value, the_type in tester.get_pickle_values():
setattr(self, "_" + name, torch.jit.Attribute(value, the_type))
@torch.jit.script_method
def forward(self):
return (self._dict, self._float, self._int, self._bool, self._tuple,
self._list, self._int_list, self._tensor_list, self._bool_list,
self._float_list, self._str_list, self._none)
with TemporaryFileName() as fname:
M().save(fname)
loaded = torch.jit.load(fname)
def is_tensor_value(item):
if isinstance(item, torch.Tensor):
return True
if isinstance(item, list):
return is_tensor_value(item[0])
return False
for name, value, the_type in self.get_pickle_values():
if is_tensor_value(value):
continue
self.assertEqual(value, getattr(loaded, "_" + name))
@unittest.skipIf(IS_WINDOWS or IS_SANDCASTLE, "NYI: TemporaryFileName support for Windows or Sandcastle")
@unittest.skipIf(not BUILD_WITH_CAFFE2, "PyTorch is build without Caffe2 support")
def test_old_models_bc(self):
model = {
'archive/version': b'1',
'archive/code/archive.py':
b'''
op_version_set = 0
def forward(self,
_0: Tensor) -> Tensor:
_1 = torch.zeros([10], dtype=6, layout=0, device=torch.device("cpu"))
result = torch.to(torch.fill_(_1, 5), dtype=6, layout=0, device=torch.device("cpu"),
non_blocking=False, copy=False)
result2 = torch.rand([10], dtype=6, layout=0, device=torch.device("cpu"))
result3 = torch.rand_like(result2, dtype=6, layout=0, device=torch.device("cpu"))
_2 = torch.add(torch.add(result, result2, alpha=1), result3, alpha=1)
return _2
''',
'archive/attributes.pkl': b'\x80\x02](e.',
'archive/libs.py': b'op_version_set = 0\n',
'archive/model.json':
b'''
{
"protoVersion":"2",
"mainModule":{
"torchscriptArena":{
"key":"code/archive.py"
},
"name":"archive",
"optimize":true
},
"producerName":"pytorch",
"producerVersion":"1.0",
"libs":{
"torchscriptArena":{
"key":"libs.py"
}
}
}'''}
with TemporaryFileName() as fname:
archive_name = os.path.basename(os.path.normpath(fname))
with zipfile.ZipFile(fname, 'w') as archive:
for k, v in model.items():
archive.writestr(k, v)
with open(fname, "rb") as f:
fn = torch.jit.load(f)
x = torch.zeros(10)
fn(x)
def test_submodule_attribute_serialization(self):
class S(torch.jit.ScriptModule):
def __init__(self, list_data):
super(S, self).__init__()
self.table = torch.jit.Attribute({"I": "am", "a test": "test"}, Dict[str, str])
self.list = torch.jit.Attribute(list_data, List[Tuple[int, int]])
@torch.jit.script_method
def forward(self):
return (self.table, self.list)
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.table = torch.jit.Attribute({"this": "is", "a different": "dict"}, Dict[str, str])
self.tensor = torch.jit.Attribute(torch.randn(2, 2), torch.Tensor)
self.s1 = S([(1, 2)])
self.s2 = S([(4, 5)])
@torch.jit.script_method
def forward(self):
return (self.table, self.tensor, self.s1.table, self.s2.list, self.s1.list)
m = M()
imported_m = self.getExportImportCopy(m)
self.assertEqual(m(), imported_m())
def test_serialization_big_ints(self):
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.int32_max = torch.jit.Attribute(2**31 - 1, int)
self.int32_min = torch.jit.Attribute(-2**31, int)
self.uint32_max = torch.jit.Attribute(2**32, int)
self.int64_max = torch.jit.Attribute(2**63 - 1, int)
self.int64_min = torch.jit.Attribute(-2**63, int)
self.tensor = torch.nn.Parameter(torch.ones(2, 2))
@torch.jit.script_method
def forward(self, x):
# type: (int) -> (int)
return x + (self.int32_max + self.int32_min) + (self.int64_max + self.int64_min)
m = M()
imported = self.getExportImportCopy(m)
self.assertEqual(m(10), imported(10))
self.assertEqual(m.int32_max, imported.int32_max)
self.assertEqual(m.int32_min, imported.int32_min)
self.assertEqual(m.uint32_max, imported.uint32_max)
self.assertEqual(m.int64_max, imported.int64_max)
self.assertEqual(m.int64_min, imported.int64_min)
def test_script_scope(self):
scripted = torch.jit.script(torch.nn.functional.triplet_margin_loss)
@unittest.skipIf(IS_WINDOWS, "NYI: TemporaryFileName on Windows")
def test_serialization_sharing(self):
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.list = torch.jit.Attribute([], List[str])
@torch.jit.script_method
def forward(self, key):
# type: (str) -> List[str]
self.list.append(key)
self.list.append(key)
self.list.append(key)
return self.list
# the text of the string should only appear once in the pickling
m = M()
s1 = "a long string"
s2 = "a different, even longer string"
self.assertEqual(m(s1), [s1] * 3)
self.assertEqual(m(s2), [s1] * 3 + [s2] * 3)
with TemporaryFileName() as fname:
m.save(fname)
archive_name = os.path.basename(os.path.normpath(fname))
archive = zipfile.ZipFile(fname, 'r')
pickled_data = archive.read(os.path.join(archive_name, 'data.pkl'))
out = io.StringIO()
pickletools.dis(pickled_data, out=out)
disassembled = out.getvalue()
FileCheck().check_count(s1, 1, exactly=True) \
.check_count("BINGET", 2, exactly=True) \
.check_count(s2, 1, exactly=True) \
.check_count("BINGET", 2, exactly=True).run(out.getvalue())
def test_sys_stdout_override(self):
@torch.jit.script
def foo():
print('foo')
class Redirect(object):
def __init__(self):
self.s = ''
def write(self, s):
self.s += s
old_stdout = sys.stdout
redirect = Redirect()
try:
sys.stdout = redirect
foo()
finally:
sys.stdout = old_stdout
FileCheck().check('foo').run(redirect.s)
def test_dtype_attr(self):
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
self.dtype = torch.zeros([]).dtype
def forward(self):
return torch.zeros(3, 4, dtype=self.dtype)
f = Foo()
torch.jit.script(f)
def test_named_buffers_are_iterable(self):
class MyMod(torch.nn.Module):
def __init__(self):
super(MyMod, self).__init__()
self.mod = (torch.nn.ReLU())
self.mod2 = (torch.nn.ReLU())
self.mod3 = torch.nn.Sequential(torch.nn.Sequential(torch.nn.ReLU()))
self.register_buffer('x', torch.zeros(3))
self.register_buffer('y', torch.zeros(3))
self.z = torch.zeros(3)
def bleh(self):
return self.z + 4
@torch.jit.export
def method(self):
names = [""]
vals = []
for name, buffer in self.named_buffers():
names.append(name)
vals.append(buffer + 2)
return names, vals
def forward(self, x):
return x
model = MyMod()
x = torch.jit.script(model)
z = self.getExportImportCopy(x)
self.assertEqual(z.method(), x.method())
self.assertEqual(z.method(), model.method())
self.assertEqual(x.method(), model.method())
names = x.method()
for name in names:
self.assertNotEqual('z', name)
def test_static_if_prop(self):
class MaybeHasAttr(torch.nn.Module):
def __init__(self, add_attr):
super(MaybeHasAttr, self).__init__()
if add_attr:
self.maybe_attr = 1
def forward(self):
if hasattr(self, "maybe_attr") and True:
return self.maybe_attr
else:
return 0
class MaybeHasAttr2(torch.nn.Module):
def __init__(self, add_attr):
super(MaybeHasAttr2, self).__init__()
if add_attr:
self.maybe_attr = 1
def forward(self):
if not hasattr(self, "maybe_attr") or False:
return 0
else:
return self.maybe_attr
torch.jit.script(MaybeHasAttr(True))
torch.jit.script(MaybeHasAttr(False))
torch.jit.script(MaybeHasAttr2(True))
torch.jit.script(MaybeHasAttr2(False))
class MyMod(torch.nn.Module):
def forward(self):
if hasattr(self, "foo"):
return 1
else:
return 0
@torch.jit.export
def fee(self):
return 1
self.checkModule(MyMod(), ())
class HasAttrMod(torch.nn.Module):
__constants__ = ["fee"]
def __init__(self):
super().__init__()
self.fee = 3
def forward(self):
a = hasattr(self, "fee")
b = hasattr(self, "foo")
c = hasattr(self, "hi")
d = hasattr(self, "nonexistant")
return (a, b, c, d)
def foo(self):
return 1
@torch.jit._overload_method
def hi(self, x: Tensor): ... # noqa: E704
def hi(self, x): # noqa: F811
return 2
self.checkModule(HasAttrMod(), ())
@torch.jit.script
class FooTest(object):
def __init__(self):
self.x = 1
def foo(self, y):
return self.x + y
def foo():
a = FooTest()
val1 = hasattr(a, "foo"), hasattr(a, "x"), hasattr(a, "bla")
val2 = hasattr(FooTest, "foo"), hasattr(FooTest, "a")
return val1, val2
self.assertEqual(foo(), torch.jit.script(foo)())
def _test_pickle_checkpoint(self, device):
with TemporaryFileName() as fname:
class M(torch.jit.ScriptModule):
__constants__ = ['fname']
def __init__(self, tensor):
super(M, self).__init__()
self.fname = fname
self.tensor = torch.nn.Parameter(tensor)
@torch.jit.script_method
def forward(self, x):
y = self.tensor + x
torch.save(y, self.fname)
return y
param = torch.randn(2, 2).to(device)
input = torch.randn(2, 2).to(device)
m = M(param)
m(input)
with open(fname, "rb") as handle:
loaded_tensor = torch.load(fname)
self.assertEqual(loaded_tensor, input + param)
def _test_pickle_checkpoint_views(self, device):
with TemporaryFileName() as fname:
class M(torch.jit.ScriptModule):
__constants__ = ['fname']
def __init__(self, tensor):
super(M, self).__init__()
self.fname = fname
self.tensor = torch.nn.Parameter(tensor)
@torch.jit.script_method
def forward(self, x):
y = self.tensor + x
y_view = y.view(4)
torch.save((y, y_view, y), self.fname)
return y
param = torch.randn(2, 2).to(device)
input = torch.randn(2, 2).to(device)
m = M(param)
m(input)
with open(fname, "rb") as handle:
loaded_y, loaded_y_view, loaded_y_2 = torch.load(fname)
self.assertEqual(loaded_y, input + param)
with torch.no_grad():
loaded_y_view[1] += 20
# assert that loaded_y changed as well
self.assertEqual(loaded_y.view(4), loaded_y_view)
self.assertEqual(loaded_y_2.view(4), loaded_y_view)
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_pickle_checkpoint_cuda(self):
self._test_pickle_checkpoint('cuda')
self._test_pickle_checkpoint_views('cuda')
def test_pickle_checkpoint(self):
self._test_pickle_checkpoint('cpu')
self._test_pickle_checkpoint_views('cpu')
def test_pickle_checkpoint_tup(self):
@torch.jit.script
def foo(fname):
# type: (str) -> None
torch.save((3, 4), fname)
with TemporaryFileName() as name:
foo(name)
self.assertEqual(torch.load(name), (3, 4))
def test_string_list(self):
def fn(string):
# type: (str) -> List[str]
return list(string)
self.checkScript(fn, ("abcdefgh",))
def test_unicode_comments(self):
@torch.jit.script
def test(self, a):
# 🤷🤷🤷🤷
return torch.nn.functional.relu(a)
def test_get_set_state_with_tensors(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.tensor = torch.randn(2, 2)
@torch.jit.export
def __getstate__(self):
return (self.tensor, self.training)
@torch.jit.export
def __setstate__(self, state):
self.tensor = state[0]
self.training = state[1]
def forward(self, x):
return x + self.tensor
with TemporaryFileName() as fname:
m = torch.jit.script(M())
m.save(fname)
loaded = torch.jit.load(fname)
self.assertEqual(loaded.tensor, m.tensor)
def test_in_for_and_comp_expr(self):
def fn(d):
# type: (Dict[str, int]) -> List[int]
out = [1]
for i in range(d["hi"] if "hi" in d else 6):
out.append(i)
return out
self.checkScript(fn, ({'hi': 2, 'bye': 3},))
self.checkScript(fn, ({'bye': 3},))
def test_for_else(self):
def fn():
c = 0
for i in range(4):
c += 10
else:
print("In else block of for...else")
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, "else branches of for loops aren't supported"):
torch.jit.script(fn)
def test_split(self):
def split_two(tensor):
a, b, c = torch.split(tensor, 2, dim=1)
return a, b, c
x = torch.randn(3, 6)
y = torch.randn(3, 6)
self.checkScript(split_two, [(x + y)])
def test_conv_error(self):
@torch.jit.script
def fn(x, y):
return F.conv2d(x, y)
try:
fn(torch.ones(2, 2), torch.ones(4, 4))
except RuntimeError as e:
self.assertFalse('frame' in str(e))
def test_python_op_name(self):
import random
with self.assertRaisesRegex(RuntimeError, "randint"):
@torch.jit.script
def fn():
return random.randint()
def test_dir(self):
class M(torch.jit.ScriptModule):
def forward(self, t):
return t
self.assertTrue('forward' in dir(M()))
def test_kwarg_expansion_error(self):
@torch.jit.ignore
def something_else(h, i):
pass
def fn(x):
something_else(**x)
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, "keyword-arg expansion is not supported"):
torch.jit.script(fn)
def test_kwargs_error_msg(self):
def other(**kwargs):
print(kwargs)
def fn():
return other()
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, 'variable number'):
torch.jit.script(fn)
def another_other(*args):
print(args)
def another_fn():
return another_other()
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, 'variable number'):
torch.jit.script(another_fn)
def test_inferred_error_msg(self):
"""
Test that when we get a type mismatch on a function where we inferred
the type to be tensor, a good error message is given.
"""
@torch.jit.script
def foo(a):
return a
with self.assertRaisesRegex(RuntimeError, (r"Expected a value of type \'Tensor \(inferred\)\'"
r"[\S\s]*Inferred \'a\' to be of type \'Tensor\'")):
foo("1")
def test_type_comments_in_body(self):
@torch.jit.script
def foo(a, # type: int
b, # type: int
):
# type: (...) -> int
# type: int
return a + b
class M(torch.nn.Module):
def __init__(self,
a, # type: int
b # type: int
):
# type: (...) -> None
super(M, self).__init__()
self.a = a # type: int
self.b = b # type: int
torch.jit.script(M(2, 3))
def test_input_keyword_in_schema(self):
def f(x):
return torch.ceil(input=x)
inp = torch.randn(10)
self.checkScript(f, (inp, ))
def test_module_method_reassignment(self):
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
def _forward(self, x):
return x
forward = _forward
sm = torch.jit.script(Foo())
input = torch.ones(2, 2)
self.assertEqual(input, sm(input))
# Tests the case where a torch.Tensor subclass (like Parameter) is used as
# input.
def test_script_module_tensor_subclass_argument(self):
@torch.jit.script
def parameter_script(x: torch.nn.Parameter):
return x
input = torch.ones(2, 2)
self.assertEqual(input, parameter_script(input))
def test_save_load_attr_error(self):
class Inner(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x
class Wrapper(nn.Module):
def __init__(self, inner):
super().__init__()
self.inner = inner
def forward(self, x):
# this attribute doesn't exist on `Inner`
return self.inner.b(x)
inner_module = torch.jit.script(Inner())
inner_module = self.getExportImportCopy(inner_module)
wrapped = Wrapper(inner_module)
# This should properly complain that `self.inner` doesn't have the attribute `b`
with self.assertRaisesRegex(RuntimeError, 'has no attribute'):
torch.jit.script(wrapped)
def test_rescripting_loaded_modules(self):
class InnerSubmod(nn.Module):
__constants__ = ['my_constant']
def __init__(self):
super().__init__()
self.register_buffer("foo", torch.ones(1))
self.register_parameter("bar", torch.nn.Parameter(torch.ones(1)))
self.baz = torch.ones(1)
self.my_constant = 1
def forward(self, x):
return x + x
class Inner(nn.Module):
def __init__(self):
super().__init__()
self.submod = InnerSubmod()
def forward(self, x):
return self.submod(x)
class Wrapper(nn.Module):
def __init__(self, inner):
super().__init__()
self.inner = inner
def forward(self, x):
# access inner elements
ret = self.inner.submod(x) + self.inner.submod.foo + self.inner.submod.bar + self.inner.submod.baz
ret = ret + self.inner.submod.my_constant
return ret
inner_module = torch.jit.script(Inner())
wrapped = Wrapper(inner_module)
self.checkModule(wrapped, torch.ones(1))
inner_module_loaded = self.getExportImportCopy(inner_module)
wrapped_loaded = Wrapper(inner_module_loaded)
self.assertEqual(wrapped(torch.ones(1)), wrapped_loaded(torch.ones(1)))
def test_interpret_graph(self):
def fn(x):
return x.unfold(0, 1, 1)
graph_str = """
graph(%a : Tensor, %b : Tensor):
%c : Tensor = aten::mul(%a, %b)
return (%c)
"""
graph = parse_ir(graph_str)
a = torch.rand(10)
b = torch.rand(10)
test = torch._C._jit_interpret_graph(graph, (a, b))
ref = a * b
self.assertEqual(test, ref)
def test_signed_float_zero(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
def forward(self, x):
return torch.div(x, -0.)
inp = torch.ones(1)
self.checkModule(MyModule(), inp)
def test_index_with_tuple(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
def forward(self, x):
return x[(1,)]
self.checkModule(MyModule(), (torch.ones(2, 3),))
def test_context_manager(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
def forward(self, x, y):
p = x + y
q = p + 2.0
return q
x = torch.randn(3, 2, dtype=torch.float)
y = torch.randn(3, 2, dtype=torch.float)
for fuser_name in ['fuser0', 'fuser1', 'none']:
with torch.jit.fuser(fuser_name):
self.checkModule(MyModule(), (x, y))
# known to be failing in tracer
EXCLUDE_TRACED = {
# The following fail due to #12024.
# A prim::ListConstruct is involved and the indices get traced as TensorType,
# which always require_grad. This causes a crash in autodiff.
'test___getitem___adv_index',
'test___getitem___adv_index_beg',
'test___getitem___adv_index_comb',
'test___getitem___adv_index_dup',
'test___getitem___adv_index_sub',
'test___getitem___adv_index_sub_2',
'test___getitem___adv_index_sub_3',
'test___getitem___adv_index_var',
# jit doesn't support sparse tensors.
'test_to_sparse',
'test_to_sparse_dim',
}
EXCLUDE_TYPE_CHECK = {
# slogdet tests use itemgetter to select its only differentiable output,
# but this happens outside of the graph we handle, so there are fewer
# reference outputs than graph outputs.
'test_slogdet_1x1_neg_det',
'test_slogdet_1x1_pos_det',
'test_slogdet_distinct_singular_values',
'test_slogdet_neg_det',
'test_slogdet_pos_det',
'test_slogdet_symmetric',
'test_slogdet_symmetric_pd',
'test_slogdet_batched_1x1_neg_det',
'test_slogdet_batched_pos_det',
'test_slogdet_batched_symmetric',
'test_slogdet_batched_symmetric_pd',
'test_slogdet_batched_distinct_singular_values'
}
# chunk returns a list in scripting and we don't unpack the list,
# Thus it won't be replaced by ConstantChunk and run AD.
# It's explicitly checked in test_chunk_constant_script_ad
# Similary for split, it's replaced by split_with_sizes in tracing,
# but we don't have AD formula for aten::split(Tensor, int[], int),
# an op registered in JIT so AD is not triggered in scripting.
EXCLUDE_SCRIPT_AD_CHECK = {
'test_chunk',
'test_chunk_dim',
'test_chunk_dim_neg0',
'test_split_size_list',
'test_split_size_list_dim',
'test_split_size_list_dim_neg0',
'test_tensor_indices_sections',
'test_tensor_indices_sections_dim',
'test_tensor_indices_sections_dim_neg0',
'test_tensor_split_sections',
'test_tensor_split_sections_dim',
'test_tensor_split_sections_dim_neg0'
}
EXCLUDE_PYTHON_PRINT = {
# no support for BroadcastingList in python printer
'test_nn_max_unpool1d',
'test_nn_max_unpool2d',
'test_nn_max_unpool3d',
'test_nn_max_pool1d',
'test_nn_max_pool2d',
'test_nn_max_pool3d',
'test_nn_max_pool1d_with_indices',
}
EXCLUDE_ALIAS = {
# aliases, which may appear in method_tests but are tested elsewhere
'true_divide',
# Disable tests for lu from common_methods_invocations.py
# TODO(@nikitaved) Enable jit tests once autograd.Function does support scripting
'lu'
}
class TestJitGeneratedModule(JitTestCase):
pass
class TestJitGeneratedFunctional(JitTestCase):
pass
# UBSAN per-function exclusions don't seem to work with OpenMP pragmas,
# and we have to disable the failing tests here instead.
UBSAN_DISABLED_TESTS = [
"test___rdiv___constant",
"test___rdiv___scalar_constant",
"test_addcdiv",
"test_addcdiv_broadcast_all",
"test_addcdiv_broadcast_rhs",
"test_addcdiv_scalar",
"test_addcdiv_scalar_broadcast_lhs",
"test_addcdiv_scalar_broadcast_rhs",
"test_addcdiv_scalar_scale",
"test_addcdiv_scalar_scale_broadcast_lhs",
"test_addcdiv_scalar_scale_broadcast_rhs",
"test_addcdiv_scale",
"test_addcdiv_scale_broadcast_all",
"test_addcdiv_scale_broadcast_rhs",
"test_add_broadcast_all",
"test_add_broadcast_lhs",
"test_add_broadcast_rhs",
"test_add_constant",
"test_add_scalar",
"test_add_scalar_broadcast_lhs",
"test_add_scalar_broadcast_rhs",
"test_div",
"test_div_broadcast_all",
"test_div_broadcast_lhs",
"test_div_broadcast_rhs",
"test_div_scalar",
"test_div_scalar_broadcast_lhs",
"test_div_scalar_broadcast_rhs",
"test_rsqrt",
"test_rsqrt_scalar",
"test_add",
"test_reciprocal",
"test_reciprocal_scalar",
]
L = 20
M = 10
S = 5
def add_nn_module_test(*args, **kwargs):
no_grad = False if 'no_grad' not in kwargs else kwargs['no_grad']
if 'desc' in kwargs and 'eval' in kwargs['desc']:
# eval() is not supported, so skip these tests
return
test_name = get_nn_mod_test_name(**kwargs)
@suppress_warnings
def do_test(self):
if test_name in EXCLUDE_SCRIPT_MODULES:
return
if not kwargs.get('check_jit', True):
raise unittest.SkipTest('module test skipped on JIT')
module_name = get_nn_module_name_from_kwargs(**kwargs)
if 'constructor' in kwargs:
nn_module = kwargs['constructor']
else:
nn_module = getattr(torch.nn, module_name)
if "FunctionalModule" in str(nn_module):
return
if 'constructor_args_fn' in kwargs:
constructor_args = kwargs['constructor_args_fn']()
else:
constructor_args = kwargs.get('constructor_args', ())
def create_script_module(*args, **kwargs):
"""Construct a script module that passes arguments through to self.submodule"""
formals, tensors, actuals = get_script_args(args)
method_args = ', '.join(['self'] + actuals)
call_args_str = ', '.join(actuals)
call = "self.submodule({})".format(call_args_str)
script = script_method_template.format(method_args, call)
submodule_constants = []
if kwargs.get('is_constant'):
submodule_constants = ['submodule']
# Create module to use the script method
class TheModule(torch.jit.ScriptModule):
__constants__ = submodule_constants
def __init__(self):
super(TheModule, self).__init__()
self.submodule = nn_module(*constructor_args)
def make_module(script):
module = TheModule()
# check __repr__
str(module)
module.define(script)
return module
module = make_module(script)
self.assertExportImportModule(module, tensors)
create_script_module.last_graph = module.graph
mod = module(*args)
return mod
# Construct a normal nn module to stay consistent with create_script_module
# and make use of a single global rng_state in module initialization
def create_nn_module(*args, **kwargs):
module = nn_module(*constructor_args)
return module(*args)
# Set up inputs from tuple of sizes or constructor fn
dtype = torch.double
if 'input_fn' in kwargs:
input = kwargs['input_fn']()
if isinstance(input, Tensor):
input = (input,)
if all(tensor.is_complex() for tensor in input):
dtype = torch.cdouble
else:
input = (kwargs['input_size'],)
if 'target_size' in kwargs:
input = input + (kwargs['target_size'],)
elif 'target_fn' in kwargs:
if torch.is_tensor(input):
input = (input,)
input = input + (kwargs['target_fn'](),)
elif 'target' in kwargs:
input = input + (kwargs['target'],)
# Extra parameters to forward()
if 'extra_args' in kwargs:
input = input + kwargs['extra_args']
args_variable, kwargs_variable = create_input(input, dtype=dtype)
f_args_variable = deepcopy(unpack_variables(args_variable))
# TODO(issue#52052) Neither this nor no_grad should be required
# if check_against_reference() is updated to check gradients
# w.r.t. weights and then only check w.r.t. inputs if any
# inputs require it.
any_requires_grad = any(input.requires_grad for input in f_args_variable)
# Check against Python module as reference
check_against_reference(self, create_script_module, create_nn_module,
lambda x: x, f_args_variable,
no_grad=no_grad or not any_requires_grad)
if 'slowTest' in kwargs:
do_test = slowTest(do_test)
post_add_test(test_name, (), do_test, TestJitGeneratedModule)
def post_add_test(test_name, skipTestIf, do_test, test_class):
assert not hasattr(test_class, test_name), 'Two tests have the same name: ' + test_name
for skip in skipTestIf:
do_test = skip(do_test)
if not (TEST_WITH_UBSAN and test_name in UBSAN_DISABLED_TESTS):
setattr(test_class, test_name, do_test)
def normalize_check_ad(check_ad, name):
# normalized check_ad is 3-element tuple: (bool, List[str], List[str])
if len(check_ad) == 0:
check_ad = [False, ['aten::' + name], []]
elif len(check_ad) == 1:
check_ad = [check_ad[0], ['aten::' + name], []]
elif len(check_ad) == 2:
check_ad = [check_ad[0], check_ad[1], []]
elif len(check_ad) == 3:
check_ad = list(check_ad)
else:
raise Exception('Invalid check_ad, requires (bool, str|List[str], str|List[str])')
check_ad = [[t] if isinstance(t, str) else t for t in check_ad]
return check_ad
class TestProducerVersion(TestCase):
def test_version(self):
# issue gh-32561
self.assertTrue(torch.__version__.startswith(torch.onnx.producer_version))
for test in module_tests + new_module_tests + additional_module_tests:
add_nn_module_test(**test)
for test in criterion_tests:
test['no_grad'] = True
add_nn_module_test(**test)
if __name__ == '__main__':
run_tests()
import jit.test_module_interface
suite = unittest.findTestCases(jit.test_module_interface)
unittest.TextTestRunner().run(suite)
| pytorch-master | test/test_jit.py |
# Owner(s): ["module: unknown"]
from torch.testing._internal.common_utils import TestCase, run_tests
import os
import subprocess
import sys
class TestMKLDNNVerbose(TestCase):
def test_verbose_on(self):
num = 0
loc = os.path.dirname(os.path.abspath(__file__))
with subprocess.Popen(f'{sys.executable} -u {loc}/mkldnn_verbose.py --verbose-level=1', shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as p:
for line in p.stdout.readlines():
line = str(line, 'utf-8').strip()
if line.startswith("onednn_verbose"):
num = num + 1
elif line == 'Failed to set MKLDNN into verbose mode. Please consider to disable this verbose scope.':
return
self.assertTrue(num > 0, 'oneDNN verbose messages not found.')
def test_verbose_off(self):
num = 0
loc = os.path.dirname(os.path.abspath(__file__))
with subprocess.Popen(f'{sys.executable} -u {loc}/mkldnn_verbose.py --verbose-level=0', shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as p:
for line in p.stdout.readlines():
line = str(line, 'utf-8').strip()
if line.startswith("onednn_verbose"):
num = num + 1
self.assertEqual(num, 0, 'unexpected oneDNN verbose messages found.')
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_mkldnn_verbose.py |
# Owner(s): ["module: nn"]
import math
import torch
from torch.testing._internal.common_device_type import (
dtypes,
dtypesIfCUDA,
instantiate_device_type_tests,
onlyCUDA,
skipMeta,
)
from torch.testing._internal.common_utils import run_tests, TestCase
class TestMHADeviceType(TestCase):
@torch.no_grad()
def _test_transform_bias_rescale_qkv_impl(
self, device, dtype, use_nt, use_padding=False
):
tests = [
(64, 4, 16, 8),
# dim_per_head = 12 does not divide evenly by CPU vectorization length of 8
(24, 2, 4, 2),
# Make sure CUDA can handle small input sizes
(2, 2, 2, 2),
# dim_per_head = 6 does not divide evenly by CUDA vectorization length of 4,
# causes alignment issues
(24, 4, 4, 2),
(48, 4, 16, 8),
]
for (embed_dim, num_heads, bs, sl) in tests:
with self.subTest(embed_dim=embed_dim, num_heads=num_heads, bs=bs, sl=sl):
torch.manual_seed(9343)
dense_x = x = (
torch.randn(bs, sl, 3 * embed_dim, device=device, dtype=dtype) * 10
)
if use_padding:
x[0][-1] = torch.full(x[0][-1].shape, float("-Inf"))
if use_nt:
xs = list(torch.unbind(x))
if use_padding:
xs[0] = xs[0][:-1]
x = torch.nested_tensor(xs, device=device, dtype=dtype)
qkv = torch.nn.Linear(embed_dim, 3 * embed_dim, device=device, dtype=dtype)
# We have to use inference_mode here because q/k/v are
# all views of the same Tensor, which autograd doesn't
# like. This is fine because this function is only
# exposed to Python for purposes of writing this test.
with torch.inference_mode():
(q, k, v) = torch._transform_bias_rescale_qkv(
x, qkv.bias, num_heads=num_heads
)
def simple_transform_bias_rescale_qkv(qkv, bias):
(q, k, v) = torch.split(qkv, embed_dim, dim=-1)
(q_bias, k_bias, v_bias) = torch.split(bias, embed_dim, dim=-1)
def embiggen(x):
if not use_nt:
return x
b, t, d = x.size()
t = t + (8 - t % 8) % 8
newsize = (b, t, d)
new_x = torch.zeros(newsize, device=device, dtype=dtype)
new_x[:x.size()[0], :x.size()[1], :x.size()[2]] = x
return new_x
return tuple(
embiggen(x).reshape(
(bs, -1, num_heads, embed_dim // num_heads)
).transpose(2, 1)
for x in (
(q + q_bias) / math.sqrt(embed_dim // num_heads),
(k + k_bias),
(v + v_bias),
)
)
correct_q, correct_k, correct_v = simple_transform_bias_rescale_qkv(
dense_x, qkv.bias
)
if use_nt and use_padding:
for t in (correct_q, correct_k, correct_v):
t[t == float("-Inf")] = 0
self.assertEqual(q.size(), correct_q.size())
torch.testing.assert_close(q, correct_q)
torch.testing.assert_close(k, correct_k)
torch.testing.assert_close(v, correct_v)
@dtypesIfCUDA(torch.float)
@dtypes(torch.float)
@skipMeta
def test_transform_bias_rescale_qkv(self, device, dtype):
for use_padding in (False, True):
with self.subTest(use_padding=use_padding):
self._test_transform_bias_rescale_qkv_impl(
device, dtype, use_nt=False, use_padding=use_padding
)
@dtypesIfCUDA(torch.float)
@dtypes(torch.float)
@skipMeta
@onlyCUDA
def test_transform_bias_rescale_qkv_nested(self, device, dtype):
for use_padding in (False, True):
with self.subTest(use_padding=use_padding):
self._test_transform_bias_rescale_qkv_impl(
device, dtype, use_nt=True, use_padding=use_padding
)
def _test_multihead_attention_impl(
self, device, dtype, mode, use_nt, need_weights, average_attn_weights, use_padding=False, pad_all=False
):
embed_dim = 64
num_heads = 4
bs = 16
sl = 8
q = torch.randn(bs, sl, embed_dim, device=device, dtype=dtype) * 10
if use_padding:
if pad_all:
for q_i in q:
q_i[-1] = torch.zeros_like(q[0][-1], device=device, dtype=dtype)
mask = torch.zeros(q.shape[:-1], device=device, dtype=torch.bool)
for mask_i in mask:
mask_i[-1] = True
else:
q[0][-1] = torch.zeros_like(q[0][-1], device=device, dtype=dtype)
mask = torch.zeros(q.shape[:-1], device=device, dtype=torch.bool)
mask[0][-1] = True
if mode == "self":
k = q
v = q
elif mode == "encdec":
k = torch.randn(bs, sl, embed_dim, device=device, dtype=dtype) * 10
v = k
elif mode == "generic":
k = torch.randn(bs, sl, embed_dim, device=device, dtype=dtype) * 10
v = torch.randn(bs, sl, embed_dim, device=device, dtype=dtype) * 10
else:
self.fail(f"invalid mode `{mode}`!")
qkv = torch.nn.Linear(embed_dim, 3 * embed_dim, device=device, dtype=dtype)
proj = torch.nn.Linear(embed_dim, embed_dim, device=device, dtype=dtype)
pt = torch.nn.MultiheadAttention(
embed_dim, num_heads, batch_first=True, device=device, dtype=dtype
)
pt.in_proj_weight = qkv.weight
pt.in_proj_bias = qkv.bias
pt.out_proj.weight = proj.weight
pt.out_proj.bias = proj.bias
class NativeMHA(torch.nn.Module):
def __init__(self, embed_dim, num_heads, qkv, proj):
super().__init__()
self.qkv = qkv
self.proj = proj
self.embed_dim = embed_dim
self.num_heads = num_heads
def forward(self, q, k, v, key_padding_mask):
return torch._native_multi_head_attention(
q,
k,
v,
self.embed_dim,
self.num_heads,
self.qkv.weight,
self.qkv.bias,
self.proj.weight,
self.proj.bias,
key_padding_mask,
need_weights=need_weights,
average_attn_weights=average_attn_weights,
mask_type=1, # mask_type = 1 => src_key_padding_mask, mask_type = 0 => src_mask
)
npt = NativeMHA(
embed_dim=embed_dim, num_heads=num_heads, qkv=qkv, proj=proj
).to(dtype)
if device == "cuda":
pt = pt.cuda()
npt = npt.cuda()
ypt, weight_pt = pt(
q,
k,
v,
need_weights=need_weights,
average_attn_weights=average_attn_weights,
key_padding_mask=mask if use_padding else None,
)
if use_nt:
qs = list(torch.unbind(q))
if use_padding:
if pad_all:
qs = [x[:-1] for x in qs]
else:
qs[0] = qs[0][:-1]
q = torch.nested_tensor(qs, device=device, dtype=dtype)
if mode == "self":
k = v = q
elif mode == "encdec":
k = torch.nested_tensor(torch.unbind(k), device=device, dtype=dtype)
v = k
else:
k = torch.nested_tensor(torch.unbind(k), device=device, dtype=dtype)
v = torch.nested_tensor(torch.unbind(v), device=device, dtype=dtype)
ynpt, weight_npt = npt(
q, k, v, key_padding_mask=mask if use_padding and not use_nt else None
)
if use_nt:
ynpt = ynpt.to_padded_tensor(0)
if pad_all:
ynpt_final = torch.zeros_like(ypt)
ynpt_final[:, :ynpt.shape[1], :] = ynpt
ynpt = ynpt_final
def do_pad_all(tensors):
for t in tensors:
for t_i in t:
t_i[-1] = torch.zeros_like(t_i[-1], device=device, dtype=dtype)
# PyTorch implementation returns non-zero junk in the padding
# locations; overwrite it so that the comparison works out.
if use_padding:
ypt[0][-1] = torch.zeros_like(ypt[0][-1], device=device, dtype=dtype)
ynpt[0][-1] = torch.zeros_like(ynpt[0][-1], device=device, dtype=dtype)
if pad_all:
do_pad_all((ypt, ynpt))
# Zero the last row of each TxT weight matrix
if need_weights:
if average_attn_weights:
weight_pt[0][-1] = torch.zeros_like(weight_pt[0][-1], device=device, dtype=dtype)
weight_npt[0][-1] = torch.zeros_like(weight_npt[0][-1], device=device, dtype=dtype)
if pad_all:
do_pad_all((weight_pt, weight_npt))
else:
for nh in range(num_heads):
weight_pt[0][nh][-1] = torch.zeros_like(weight_pt[0][nh][-1], device=device, dtype=dtype)
weight_npt[0][nh][-1] = torch.zeros_like(weight_npt[0][nh][-1], device=device, dtype=dtype)
if dtype == torch.half:
torch.testing.assert_close(ypt, ynpt, atol=1e-3, rtol=1e-3)
else:
# High rtol seems necessary for
# test_native_multihead_attention_cpu_float32 on Windows,
# otherwise 2e-4 would likely be fine.
torch.testing.assert_close(ypt, ynpt, atol=2e-5, rtol=2e-3)
if need_weights:
torch.testing.assert_close(weight_pt, weight_npt)
else:
self.assertEqual(weight_pt, weight_npt)
@dtypesIfCUDA(torch.float, torch.half)
@dtypes(torch.float)
@skipMeta
@torch.no_grad()
def test_native_multihead_self_attention(self, device, dtype):
for (use_padding, pad_all) in ((False, False), (True, False), (True, True)):
for use_nt in (False, True):
# Figuring out exactly which elements of the weights are garbage in this
# case eludes me, and it's not particularly enlightening to test anyway
# because padding doesn't especially affect the intermediate weights.
for need_weights in (False, not pad_all):
for average_attn_weights in (False, True):
with self.subTest(use_padding=use_padding, pad_all=pad_all,
use_nt=use_nt, need_weights=need_weights,
average_attn_weights=average_attn_weights):
self._test_multihead_attention_impl(
device,
dtype,
"self",
use_nt=use_nt,
use_padding=use_padding,
pad_all=pad_all,
need_weights=need_weights,
average_attn_weights=average_attn_weights,
)
@dtypesIfCUDA(torch.float, torch.half)
@dtypes(torch.float)
@skipMeta
@torch.no_grad()
def test_native_multihead_encoder_decoder_attention(self, device, dtype):
self._test_multihead_attention_impl(
device,
dtype,
"encdec",
use_nt=False,
need_weights=False,
average_attn_weights=False,
)
@dtypesIfCUDA(torch.float, torch.half)
@dtypes(torch.float)
@skipMeta
@torch.no_grad()
def test_native_multihead_attention(self, device, dtype):
self._test_multihead_attention_impl(
device,
dtype,
"generic",
use_nt=False,
need_weights=False,
average_attn_weights=False,
)
instantiate_device_type_tests(TestMHADeviceType, globals())
if __name__ == "__main__":
run_tests()
| pytorch-master | test/test_native_mha.py |
# -*- coding: utf-8 -*-
# Owner(s): ["module: autograd"]
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS
import pkgutil
import torch
import sys
from typing import Callable
import inspect
import json
import os
import unittest
class TestPublicBindings(TestCase):
def test_no_new_bindings(self):
"""
This test aims to stop the introduction of new JIT bindings into torch._C
whose names do not start with _. Such bindings are made available as
torch.XXX, which may not be desirable.
If your change causes this test to fail, add your new binding to a relevant
submodule of torch._C, such as torch._C._jit (or other relevant submodule of
torch._C). If your binding really needs to be available as torch.XXX, add it
to torch._C and add it to the allowlist below.
If you have removed a binding, remove it from the allowlist as well.
"""
# This allowlist contains every binding in torch._C that is copied into torch at
# the time of writing. It was generated with
#
# {elem for elem in dir(torch._C) if not elem.startswith("_")}
#
torch_C_allowlist_superset = {
"AggregationType",
"AliasDb",
"AnyType",
"Argument",
"ArgumentSpec",
"autocast_decrement_nesting",
"autocast_increment_nesting",
"AVG",
"BenchmarkConfig",
"BenchmarkExecutionStats",
"Block",
"BoolType",
"BufferDict",
"StorageBase",
"CallStack",
"Capsule",
"ClassType",
"clear_autocast_cache",
"Code",
"CompilationUnit",
"CompleteArgumentSpec",
"ComplexType",
"ConcreteModuleType",
"ConcreteModuleTypeBuilder",
"CONV_BN_FUSION",
"cpp",
"CudaBFloat16TensorBase",
"CudaBFloat16TensorBase",
"CudaBoolTensorBase",
"CudaBoolTensorBase",
"CudaByteTensorBase",
"CudaByteTensorBase",
"CudaCharTensorBase",
"CudaCharTensorBase",
"CudaComplexDoubleTensorBase",
"CudaComplexDoubleTensorBase",
"CudaComplexFloatTensorBase",
"CudaComplexFloatTensorBase",
"CudaDoubleTensorBase",
"CudaDoubleTensorBase",
"CudaFloatTensorBase",
"CudaHalfTensorBase",
"CudaIntTensorBase",
"CudaIntTensorBase",
"CudaLongTensorBase",
"CudaLongTensorBase",
"CudaShortTensorBase",
"CudaShortTensorBase",
"DeepCopyMemoTable",
"default_generator",
"DeserializationStorageContext",
"device",
"DeviceObjType",
"DictType",
"DisableTorchFunction",
"dtype",
"EnumType",
"ErrorReport",
"ExecutionPlan",
"FatalError",
"FileCheck",
"finfo",
"FloatType",
"fork",
"FunctionSchema",
"FUSE_ADD_RELU",
"Future",
"FutureType",
"Generator",
"get_autocast_cpu_dtype",
"get_default_dtype",
"get_num_interop_threads",
"get_num_threads",
"Gradient",
"Graph",
"GraphExecutorState",
"has_cuda",
"has_cudnn",
"has_lapack",
"has_mkl",
"has_mkldnn",
"has_mps",
"has_openmp",
"has_spectral",
"HOIST_CONV_PACKED_PARAMS",
"iinfo",
"import_ir_module_from_buffer",
"import_ir_module",
"InferredType",
"init_num_threads",
"INSERT_FOLD_PREPACK_OPS",
"InterfaceType",
"IntType",
"SymIntType",
"IODescriptor",
"is_anomaly_enabled",
"is_autocast_cache_enabled",
"is_autocast_cpu_enabled",
"is_autocast_enabled",
"is_grad_enabled",
"is_inference_mode_enabled",
"JITException",
"layout",
"ListType",
"LiteScriptModule",
"LockingLogger",
"LoggerBase",
"memory_format",
"merge_type_from_type_comment",
"MobileOptimizerType",
"ModuleDict",
"Node",
"NoneType",
"NoopLogger",
"NumberType",
"OperatorInfo",
"OptionalType",
"ParameterDict",
"parse_ir",
"parse_schema",
"parse_type_comment",
"PyObjectType",
"PyTorchFileReader",
"PyTorchFileWriter",
"qscheme",
"read_vitals",
"REMOVE_DROPOUT",
"RRefType",
"ScriptClass",
"ScriptClassFunction",
"ScriptDict",
"ScriptDictIterator",
"ScriptDictKeyIterator",
"ScriptList",
"ScriptListIterator",
"ScriptFunction",
"ScriptMethod",
"ScriptModule",
"ScriptModuleSerializer",
"ScriptObject",
"ScriptObjectProperty",
"SerializationStorageContext",
"set_anomaly_enabled",
"set_autocast_cache_enabled",
"set_autocast_cpu_dtype",
"set_autocast_cpu_enabled",
"set_autocast_enabled",
"set_flush_denormal",
"set_num_interop_threads",
"set_num_threads",
"set_vital",
"Size",
"StaticModule",
"Stream",
"StreamObjType",
"StringType",
"SUM",
"SymIntNode",
"TensorType",
"ThroughputBenchmark",
"TracingState",
"TupleType",
"Type",
"unify_type_list",
"UnionType",
"Use",
"Value",
"autocast_decrement_nesting",
"autocast_increment_nesting",
"clear_autocast_cache",
"cpp",
"default_generator",
"device",
"dtype",
"finfo",
"fork",
"get_default_dtype",
"get_num_interop_threads",
"get_num_threads",
"has_cuda",
"has_cudnn",
"has_lapack",
"has_mkl",
"has_mkldnn",
"has_mps",
"has_openmp",
"iinfo",
"import_ir_module",
"import_ir_module_from_buffer",
"init_num_threads",
"is_anomaly_enabled",
"is_autocast_enabled",
"is_grad_enabled",
"layout",
"memory_format",
"merge_type_from_type_comment",
"parse_ir",
"parse_schema",
"parse_type_comment",
"qscheme",
"set_anomaly_enabled",
"set_autocast_enabled",
'set_autocast_gpu_dtype',
'get_autocast_gpu_dtype',
"set_flush_denormal",
"set_num_interop_threads",
"set_num_threads",
"unify_type_list",
"vitals_enabled",
"wait",
"Tag",
}
torch_C_bindings = {elem for elem in dir(torch._C) if not elem.startswith("_")}
# Check that the torch._C bindings are all in the allowlist. Since
# bindings can change based on how PyTorch was compiled (e.g. with/without
# CUDA), the two may not be an exact match but the bindings should be
# a subset of the allowlist.
difference = torch_C_bindings.difference(torch_C_allowlist_superset)
msg = f"torch._C had bindings that are not present in the allowlist:\n{difference}"
self.assertTrue(torch_C_bindings.issubset(torch_C_allowlist_superset), msg)
# AttributeError: module 'torch.distributed' has no attribute '_shard'
@unittest.skipIf(IS_WINDOWS, "Distributed Attribute Error")
def test_correct_module_names(self):
'''
An API is considered public, if its `__module__` starts with `torch.`
and there is no name in `__module__` or the object itself that starts with “_”.
Each public package should either:
- (preferred) Define `__all__` and all callables and classes in there must have their
`__module__` start with the current submodule's path. Things not in `__all__` should
NOT have their `__module__` start with the current submodule.
- (for simple python-only modules) Not define `__all__` and all the elements in `dir(submod)` must have their
`__module__` that start with the current submodule.
'''
failure_list = []
with open(os.path.join(os.path.dirname(__file__), 'allowlist_for_publicAPI.json')) as json_file:
# no new entries should be added to this allow_dict.
# New APIs must follow the public API guidelines.
allow_dict = json.load(json_file)
# Because we want minimal modifications to the `allowlist_for_publicAPI.json`,
# we are adding the entries for the migrated modules here from the original
# locations.
for modname in allow_dict["being_migrated"]:
if modname in allow_dict:
allow_dict[allow_dict["being_migrated"][modname]] = allow_dict[modname]
def test_module(modname):
split_strs = modname.split('.')
mod = sys.modules.get(modname)
for elem in split_strs:
if elem.startswith("_"):
return
# verifies that each public API has the correct module name and naming semantics
def check_one_element(elem, modname, mod, *, is_public, is_all):
obj = getattr(mod, elem)
if not (isinstance(obj, Callable) or inspect.isclass(obj)):
return
elem_module = getattr(obj, '__module__', None)
# Only used for nice error message below
why_not_looks_public = ""
if elem_module is None:
why_not_looks_public = "because it does not have a `__module__` attribute"
# If a module is being migrated from foo.a to bar.a (that is entry {"foo": "bar"}),
# the module's starting package would be referred to as the new location even
# if there is a "from foo import a" inside the "bar.py".
modname = allow_dict["being_migrated"].get(modname, modname)
elem_modname_starts_with_mod = elem_module is not None and \
elem_module.startswith(modname) and \
'._' not in elem_module
if not why_not_looks_public and not elem_modname_starts_with_mod:
why_not_looks_public = f"because its `__module__` attribute (`{elem_module}`) is not within the " \
f"torch library or does not start with the submodule where it is defined (`{modname}`)"
# elem's name must NOT begin with an `_` and it's module name
# SHOULD start with it's current module since it's a public API
looks_public = not elem.startswith('_') and elem_modname_starts_with_mod
if not why_not_looks_public and not looks_public:
why_not_looks_public = f"because it starts with `_` (`{elem}`)"
if is_public != looks_public:
if modname in allow_dict and elem in allow_dict[modname]:
return
if is_public:
why_is_public = f"it is inside the module's (`{modname}`) `__all__`" if is_all else \
"it is an attribute that does not start with `_` on a module that " \
"does not have `__all__` defined"
fix_is_public = f"remove it from the modules's (`{modname}`) `__all__`" if is_all else \
f"either define a `__all__` for `{modname}` or add a `_` at the beginning of the name"
else:
assert is_all
why_is_public = f"it is not inside the module's (`{modname}`) `__all__`"
fix_is_public = f"add it from the modules's (`{modname}`) `__all__`"
if looks_public:
why_looks_public = "it does look public because it follows the rules from the doc above " \
"(does not start with `_` and has a proper `__module__`)."
fix_looks_public = "make its name start with `_`"
else:
why_looks_public = why_not_looks_public
if not elem_modname_starts_with_mod:
fix_looks_public = "make sure the `__module__` is properly set and points to a submodule "\
f"of `{modname}`"
else:
fix_looks_public = "remove the `_` at the beginning of the name"
failure_list.append(f"# {modname}.{elem}:")
is_public_str = "" if is_public else " NOT"
failure_list.append(f" - Is{is_public_str} public: {why_is_public}")
looks_public_str = "" if looks_public else " NOT"
failure_list.append(f" - Does{looks_public_str} look public: {why_looks_public}")
# Swap the str below to avoid having to create the NOT again
failure_list.append(" - You can do either of these two things to fix this problem:")
failure_list.append(f" - To make it{looks_public_str} public: {fix_is_public}")
failure_list.append(f" - To make it{is_public_str} look public: {fix_looks_public}")
if hasattr(mod, '__all__'):
public_api = mod.__all__
all_api = dir(mod)
for elem in all_api:
check_one_element(elem, modname, mod, is_public=elem in public_api, is_all=True)
else:
all_api = dir(mod)
for elem in all_api:
if not elem.startswith('_'):
check_one_element(elem, modname, mod, is_public=True, is_all=False)
for _, modname, ispkg in pkgutil.walk_packages(path=torch.__path__, prefix=torch.__name__ + '.'):
test_module(modname)
test_module('torch')
msg = "All the APIs below do not meet our guidelines for public API from " \
"https://github.com/pytorch/pytorch/wiki/Public-API-definition-and-documentation.\n"
msg += "Make sure that everything that is public is expected (in particular that the module " \
"has a properly populated `__all__` attribute) and that everything that is supposed to be public " \
"does look public (it does not start with `_` and has a `__module__` that is properly populated)."
msg += "\n\nFull list:\n"
msg += "\n".join(map(str, failure_list))
# empty lists are considered false in python
self.assertTrue(not failure_list, msg)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_public_bindings.py |
# Owner(s): ["module: pytree"]
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten, TreeSpec, LeafSpec
from torch.utils._pytree import _broadcast_to_and_flatten, tree_map_only, tree_all
from torch.utils._pytree import tree_any, tree_all_only, tree_any_only
from collections import namedtuple, OrderedDict
from torch.testing._internal.common_utils import parametrize, subtest, instantiate_parametrized_tests
class TestPytree(TestCase):
def test_treespec_equality(self):
self.assertTrue(LeafSpec() == LeafSpec())
self.assertTrue(TreeSpec(list, None, []) == TreeSpec(list, None, []))
self.assertTrue(TreeSpec(list, None, [LeafSpec()]) == TreeSpec(list, None, [LeafSpec()]))
self.assertFalse(TreeSpec(tuple, None, []) == TreeSpec(list, None, []))
self.assertTrue(TreeSpec(tuple, None, []) != TreeSpec(list, None, []))
def test_flatten_unflatten_leaf(self):
def run_test_with_leaf(leaf):
values, treespec = tree_flatten(leaf)
self.assertEqual(values, [leaf])
self.assertEqual(treespec, LeafSpec())
unflattened = tree_unflatten(values, treespec)
self.assertEqual(unflattened, leaf)
run_test_with_leaf(1)
run_test_with_leaf(1.)
run_test_with_leaf(None)
run_test_with_leaf(bool)
run_test_with_leaf(torch.randn(3, 3))
def test_flatten_unflatten_list(self):
def run_test(lst):
expected_spec = TreeSpec(list, None, [LeafSpec() for _ in lst])
values, treespec = tree_flatten(lst)
self.assertTrue(isinstance(values, list))
self.assertEqual(values, lst)
self.assertEqual(treespec, expected_spec)
unflattened = tree_unflatten(values, treespec)
self.assertEqual(unflattened, lst)
self.assertTrue(isinstance(unflattened, list))
run_test([])
run_test([1., 2])
run_test([torch.tensor([1., 2]), 2, 10, 9, 11])
def test_flatten_unflatten_tuple(self):
def run_test(tup):
expected_spec = TreeSpec(tuple, None, [LeafSpec() for _ in tup])
values, treespec = tree_flatten(tup)
self.assertTrue(isinstance(values, list))
self.assertEqual(values, list(tup))
self.assertEqual(treespec, expected_spec)
unflattened = tree_unflatten(values, treespec)
self.assertEqual(unflattened, tup)
self.assertTrue(isinstance(unflattened, tuple))
run_test(())
run_test((1.,))
run_test((1., 2))
run_test((torch.tensor([1., 2]), 2, 10, 9, 11))
def test_flatten_unflatten_odict(self):
def run_test(odict):
expected_spec = TreeSpec(
OrderedDict,
list(odict.keys()),
[LeafSpec() for _ in odict.values()])
values, treespec = tree_flatten(odict)
self.assertTrue(isinstance(values, list))
self.assertEqual(values, list(odict.values()))
self.assertEqual(treespec, expected_spec)
unflattened = tree_unflatten(values, treespec)
self.assertEqual(unflattened, odict)
self.assertTrue(isinstance(unflattened, OrderedDict))
od = OrderedDict()
run_test(od)
od['b'] = 1
od['a'] = torch.tensor(3.14)
run_test(od)
def test_flatten_unflatten_namedtuple(self):
Point = namedtuple('Point', ['x', 'y'])
def run_test(tup):
expected_spec = TreeSpec(namedtuple, Point, [LeafSpec() for _ in tup])
values, treespec = tree_flatten(tup)
self.assertTrue(isinstance(values, list))
self.assertEqual(values, list(tup))
self.assertEqual(treespec, expected_spec)
unflattened = tree_unflatten(values, treespec)
self.assertEqual(unflattened, tup)
self.assertTrue(isinstance(unflattened, Point))
run_test(Point(1., 2))
run_test(Point(torch.tensor(1.), 2))
@parametrize("op", [
subtest(torch.max, name='max'),
subtest(torch.min, name='min'),
])
def test_flatten_unflatten_return_type(self, op):
x = torch.randn(3, 3)
expected = op(x, dim=0)
values, spec = tree_flatten(expected)
# Check that values is actually List[Tensor] and not (ReturnType(...),)
for value in values:
self.assertTrue(isinstance(value, torch.Tensor))
result = tree_unflatten(values, spec)
self.assertEqual(type(result), type(expected))
self.assertEqual(result, expected)
def test_flatten_unflatten_dict(self):
def run_test(tup):
expected_spec = TreeSpec(dict, list(tup.keys()),
[LeafSpec() for _ in tup.values()])
values, treespec = tree_flatten(tup)
self.assertTrue(isinstance(values, list))
self.assertEqual(values, list(tup.values()))
self.assertEqual(treespec, expected_spec)
unflattened = tree_unflatten(values, treespec)
self.assertEqual(unflattened, tup)
self.assertTrue(isinstance(unflattened, dict))
run_test({})
run_test({'a': 1})
run_test({'abcdefg': torch.randn(2, 3)})
run_test({1: torch.randn(2, 3)})
run_test({'a': 1, 'b': 2, 'c': torch.randn(2, 3)})
def test_flatten_unflatten_nested(self):
def run_test(pytree):
values, treespec = tree_flatten(pytree)
self.assertTrue(isinstance(values, list))
self.assertEqual(len(values), treespec.num_leaves)
# NB: python basic data structures (dict list tuple) all have
# contents equality defined on them, so the following works for them.
unflattened = tree_unflatten(values, treespec)
self.assertEqual(unflattened, pytree)
cases = [
[()],
([],),
{'a': ()},
{'a': 0, 'b': [{'c': 1}]},
{'a': 0, 'b': [1, {'c': 2}, torch.randn(3)], 'c': (torch.randn(2, 3), 1)},
]
def test_treemap(self):
def run_test(pytree):
def f(x):
return x * 3
sm1 = sum(map(tree_flatten(pytree)[0], f))
sm2 = tree_flatten(tree_map(f, pytree))[0]
self.assertEqual(sm1, sm2)
def invf(x):
return x // 3
self.assertEqual(tree_flatten(tree_flatten(pytree, f), invf), pytree)
cases = [
[()],
([],),
{'a': ()},
{'a': 1, 'b': [{'c': 2}]},
{'a': 0, 'b': [2, {'c': 3}, 4], 'c': (5, 6)},
]
for case in cases:
run_test(case)
def test_tree_only(self):
self.assertEqual(tree_map_only(int, lambda x: x + 2, [0, "a"]), [2, "a"])
def test_tree_all_any(self):
self.assertTrue(tree_all(lambda x: x % 2, [1, 3]))
self.assertFalse(tree_all(lambda x: x % 2, [0, 1]))
self.assertTrue(tree_any(lambda x: x % 2, [0, 1]))
self.assertFalse(tree_any(lambda x: x % 2, [0, 2]))
self.assertTrue(tree_all_only(int, lambda x: x % 2, [1, 3, "a"]))
self.assertFalse(tree_all_only(int, lambda x: x % 2, [0, 1, "a"]))
self.assertTrue(tree_any_only(int, lambda x: x % 2, [0, 1, "a"]))
self.assertFalse(tree_any_only(int, lambda x: x % 2, [0, 2, "a"]))
def test_treespec_repr(self):
# Check that it looks sane
pytree = (0, [0, 0, 0])
_, spec = tree_flatten(pytree)
self.assertEqual(
repr(spec), 'TreeSpec(tuple, None, [*, TreeSpec(list, None, [*, *, *])])')
def test_broadcast_to_and_flatten(self):
cases = [
(1, (), []),
# Same (flat) structures
((1,), (0,), [1]),
([1], [0], [1]),
((1, 2, 3), (0, 0, 0), [1, 2, 3]),
({'a': 1, 'b': 2}, {'a': 0, 'b': 0}, [1, 2]),
# Mismatched (flat) structures
([1], (0,), None),
([1], (0,), None),
((1,), [0], None),
((1, 2, 3), (0, 0), None),
({'a': 1, 'b': 2}, {'a': 0}, None),
({'a': 1, 'b': 2}, {'a': 0, 'c': 0}, None),
({'a': 1, 'b': 2}, {'a': 0, 'b': 0, 'c': 0}, None),
# Same (nested) structures
((1, [2, 3]), (0, [0, 0]), [1, 2, 3]),
((1, [(2, 3), 4]), (0, [(0, 0), 0]), [1, 2, 3, 4]),
# Mismatched (nested) structures
((1, [2, 3]), (0, (0, 0)), None),
((1, [2, 3]), (0, [0, 0, 0]), None),
# Broadcasting single value
(1, (0, 0, 0), [1, 1, 1]),
(1, [0, 0, 0], [1, 1, 1]),
(1, {'a': 0, 'b': 0}, [1, 1]),
(1, (0, [0, [0]], 0), [1, 1, 1, 1]),
(1, (0, [0, [0, [], [[[0]]]]], 0), [1, 1, 1, 1, 1]),
# Broadcast multiple things
((1, 2), ([0, 0, 0], [0, 0]), [1, 1, 1, 2, 2]),
((1, 2), ([0, [0, 0], 0], [0, 0]), [1, 1, 1, 1, 2, 2]),
(([1, 2, 3], 4), ([0, [0, 0], 0], [0, 0]), [1, 2, 2, 3, 4, 4]),
]
for pytree, to_pytree, expected in cases:
_, to_spec = tree_flatten(to_pytree)
result = _broadcast_to_and_flatten(pytree, to_spec)
self.assertEqual(result, expected, msg=str([pytree, to_spec, expected]))
instantiate_parametrized_tests(TestPytree)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_pytree.py |
# Owner(s): ["module: tests"]
import torch
import numpy as np
import unittest
from itertools import product, permutations, combinations
from functools import partial
import random
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TestCase, run_tests, suppress_warnings, gradcheck, gradgradcheck,
numpy_to_torch_dtype_dict, skipIfTorchDynamo
)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, onlyCPU, dtypes, onlyNativeDeviceTypes, skipMeta)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, complex_types, all_types_and, floating_and_complex_types_and,
)
# TODO: replace this with make_tensor() in common_utils.py
def _generate_input(shape, dtype, device, with_extremal):
if shape == ():
x = torch.tensor((), dtype=dtype, device=device)
else:
if dtype.is_floating_point or dtype.is_complex:
# work around torch.randn not being implemented for bfloat16
if dtype == torch.bfloat16:
x = torch.randn(*shape, device=device) * random.randint(30, 100)
x = x.to(torch.bfloat16)
else:
x = torch.randn(*shape, dtype=dtype, device=device) * random.randint(30, 100)
x[torch.randn(*shape) > 0.5] = 0
if with_extremal and dtype.is_floating_point:
# Use extremal values
x[torch.randn(*shape) > 0.5] = float('nan')
x[torch.randn(*shape) > 0.5] = float('inf')
x[torch.randn(*shape) > 0.5] = float('-inf')
elif with_extremal and dtype.is_complex:
x[torch.randn(*shape) > 0.5] = complex('nan')
x[torch.randn(*shape) > 0.5] = complex('inf')
x[torch.randn(*shape) > 0.5] = complex('-inf')
elif dtype == torch.bool:
x = torch.zeros(shape, dtype=dtype, device=device)
x[torch.randn(*shape) > 0.5] = True
else:
x = torch.randint(15, 100, shape, dtype=dtype, device=device)
return x
# TODO: replace this with make_tensor() in common_utils.py
def _rand_shape(dim, min_size, max_size):
shape = []
for i in range(dim):
shape.append(random.randint(min_size, max_size))
return tuple(shape)
# TODO: refactor tests to avoid this function
# Converts half/bfloat16 dtype to float when device is cpu
def _convert_t(dtype, device):
if device == 'cpu' and dtype in {torch.half, torch.bfloat16}:
return torch.float
return dtype
# TODO: replace this with make_tensor() in common_utils.py
# Returns a tensor of the requested shape, dtype, and device
# Requesting a half CPU tensor returns a float CPU tensor with
# values representable by a half.
# Initialization uses randint for non-float types and randn for float types.
def _make_tensor(shape, dtype, device, fill_ones=False) -> torch.Tensor:
# Returns a tensor filled with ones
if fill_ones:
return torch.ones(*shape, dtype=_convert_t(dtype, device), device=device)
# Returns a tensor with random integer values
if not (dtype.is_floating_point or dtype.is_complex):
t = torch.randint(0, 10, shape, device=device)
if dtype != torch.uint8:
t = t - 5 # generate negative values also
return t.to(_convert_t(dtype, device))
# Populates the CPU tensor with floats representable as half/bfloat16
if dtype == torch.half and device == 'cpu':
return torch.randn(*shape, dtype=torch.float, device=device).half().float()
if dtype == torch.bfloat16 and device == 'cpu':
return torch.randn(*shape, dtype=torch.float, device=device).bfloat16().float()
# Default: returns a tensor with random float values
return torch.randn(shape, dtype=dtype, device=device).to(dtype=dtype)
# Tests ops and indexing to ensure they return views (and new tensors) as
# appropriate.
class TestViewOps(TestCase):
exact_dtype = True
def is_view_of(self, base, other):
if (not other._is_view() or
other is base or
other._base is not base or
base.device != other.device):
return False
# Note: only validates storage on native device types
# because some accelerators, like XLA, do not expose storage
if base.device.type == 'cpu' or base.device.type == 'cuda':
if base.storage().data_ptr() != other.storage().data_ptr():
return False
return True
# Returns true if v1 and v2 are views of the same base
def is_view_of_same_base(self, v1, v2):
if (not v1._is_view() or v1 is v2):
return False
return self.is_view_of(v1._base, v2)
# Performs transpose if contiguous=True, else returns the input tensor as is
def _do_transpose(self, x, contiguous=False, dim0=0, dim1=1):
if contiguous:
return x
else:
return x.transpose(dim0, dim1)
@dtypes(*all_types_and(torch.half, torch.bfloat16))
def test_conj_self(self, device, dtype):
t = torch.ones(5, 5, device=device)
s = t.conj()
self.assertTrue(s is t)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bool))
def test_view_dtype_new(self, device, dtype):
dtypes = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()}
del dtypes[torch.bool]
def generate_inputs():
yield make_tensor((4, 4, 64), dtype=dtype, device=device, low=-5, high=5)
yield make_tensor((4, 4, 64), dtype=dtype, device=device, low=-5, high=5).permute(1, 0, 2)
yield make_tensor((4, 64, 4), dtype=dtype, device=device, low=-5, high=5).permute(2, 0, 1)
yield make_tensor((1, 5, 1), dtype=dtype, device=device, low=-5, high=5).expand(5, 5, 64)
yield make_tensor((2, 5, 256), dtype=dtype, device=device, low=-5, high=5)[1::2, 1:, ::2]
yield make_tensor((0, 5, 64), dtype=dtype, device=device, low=-5, high=5)
yield make_tensor((), dtype=dtype, device=device, low=-5, high=5)
def calc_expected_size_and_stride(a, view_dtype):
dtype_size = torch._utils._element_size(a.dtype)
view_dtype_size = torch._utils._element_size(view_dtype)
if dtype_size == view_dtype_size:
return a.size(), a.stride()
elif dtype_size > view_dtype_size:
size_ratio = dtype_size // view_dtype_size
view_size = list(a.size())
view_size[-1] = view_size[-1] * size_ratio
view_stride = [stride * size_ratio for stride in a.stride()]
view_stride[-1] = 1
return torch.Size(view_size), tuple(view_stride)
else:
size_ratio = view_dtype_size // dtype_size
view_size = list(a.size())
view_size[-1] = view_size[-1] // size_ratio
view_stride = [stride // size_ratio for stride in a.stride()]
view_stride[-1] = 1
return torch.Size(view_size), tuple(view_stride)
for a in generate_inputs():
a_np = a.cpu().numpy()
a_np_contiguous = a.cpu().contiguous().numpy()
for view_dtype, np_view_dtype in dtypes.items():
equal_element_size = torch._utils._element_size(dtype) == torch._utils._element_size(view_dtype)
if not equal_element_size and a.dim() == 0:
with self.assertRaisesRegex(RuntimeError, r"self.dim\(\) cannot be 0"):
a.view(view_dtype)
continue
if not equal_element_size and a.stride(-1) != 1:
with self.assertRaisesRegex(RuntimeError, r"self.stride\(-1\) must be 1"):
a.view(view_dtype)
continue
a_view = a.view(view_dtype)
self.assertEqual(a_view.dtype, view_dtype)
self.assertEqual(a.data_ptr(), a_view.data_ptr())
expected_size, expected_stride = calc_expected_size_and_stride(a, view_dtype)
self.assertEqual(a_view.size(), expected_size)
self.assertEqual(a_view.stride(), expected_stride)
self.assertEqual(a_view.view(dtype), a, rtol=0, atol=0)
# NumPy's dtype view requires contiguous input if target
# dtype is a different size
if equal_element_size:
a_np_view = a_np.view(np_view_dtype)
else:
a_np_view = a_np_contiguous.view(np_view_dtype)
self.assertEqual(a_view, a_np_view)
# Test that requires_grad is dropped for floating point casts,
# because view(dtype) does not support backward yet
# TODO: Remove this when autograd support is added
if dtype.is_floating_point or dtype.is_complex:
for view_dtype in floating_and_complex_types_and(torch.half, torch.bfloat16):
t = make_tensor((5, 5, 64), dtype=dtype, device=device, low=-5, high=5, requires_grad=True)
self.assertFalse(t.view(view_dtype).requires_grad)
# Test the extra error checks that happen when the view dtype
# has a greater element size than the original dtype
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_view_dtype_upsize_errors(self, device, dtype):
dtype_size = torch._utils._element_size(dtype)
for view_dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool):
view_dtype_size = torch._utils._element_size(view_dtype)
if view_dtype_size <= dtype_size:
continue
size_ratio = view_dtype_size // dtype_size
a = make_tensor((4, 4, size_ratio + 1), dtype=dtype, device=device, low=-5, high=5)
with self.assertRaisesRegex(
RuntimeError,
rf"self.size\(-1\) must be divisible by {size_ratio}"):
a.view(view_dtype)
with self.assertRaisesRegex(
RuntimeError,
rf"self.storage_offset\(\) must be divisible by {size_ratio}"):
a[:, :, 1:].view(view_dtype)
a = make_tensor((4, 4, size_ratio), dtype=dtype, device=device, low=-5, high=5)
a = a.as_strided((4, 4, size_ratio), (size_ratio, 1, 1))
with self.assertRaisesRegex(
RuntimeError,
rf"self.stride\(1\) must be divisible by {size_ratio}"):
a.view(view_dtype)
@onlyNativeDeviceTypes
def test_view_as_complex(self, device):
def fn(contiguous_input=True, dim0=0, dim1=1):
t = torch.randn(3, 2, 2, device=device)
c_t = t[:, :, 0] + 1j * t[:, :, 1]
input = self._do_transpose(t, contiguous_input, dim0, dim1)
if input.size()[-1] != 2:
self.assertRaisesRegex(
RuntimeError, "Tensor must have a last dimension of size 2",
lambda: torch.view_as_complex(input))
return
if input.stride()[-1] != 1:
self.assertRaisesRegex(
RuntimeError, "Tensor must have a last dimension with stride 1",
lambda: torch.view_as_complex(input))
return
res = torch.view_as_complex(input)
self.assertEqual(res, self._do_transpose(c_t, contiguous_input, dim0, dim1))
self.assertTrue(self.is_view_of(t, res))
fn()
fn(contiguous_input=False)
# RuntimeError since in this case the last dim of input would not be of size 2
fn(contiguous_input=False, dim0=0, dim1=2)
# RuntimeError since in this case the last dim of input would not have stride 1
fn(contiguous_input=False, dim0=1, dim1=2)
# RuntimeError since in this case the stride of non-last dim of input would not be of size 2
x = torch.randn(3, 3, device=device)
t = torch.as_strided(x, (2, 2), (1, 1))
self.assertRaisesRegex(
RuntimeError, "Tensor must have a stride divisible by 2 for all but last dimension",
lambda: torch.view_as_complex(t))
# tensor with zero elements
x = torch.tensor([], device=device) # torch.Size([0])
self.assertRaisesRegex(
RuntimeError, "Tensor must have a last dimension of size 2",
lambda: torch.view_as_complex(x))
# zero dimension tensor
z = torch.tensor(2.0)
self.assertRaisesRegex(
RuntimeError, "Input tensor must have one or more dimensions",
lambda: torch.view_as_complex(z))
y = x.reshape(0, 2) # torch.Size([0, 2])
res = torch.view_as_complex(y)
self.assertTrue(self.is_view_of(x, res))
self.assertEqual(res.shape, torch.Size([0]))
@onlyNativeDeviceTypes
@dtypes(*complex_types(), torch.complex32)
def test_view_as_real(self, device, dtype):
def fn(contiguous_input=True):
t = torch.randn(3, 4, dtype=dtype, device=device)
input = self._do_transpose(t, contiguous_input)
res = torch.view_as_real(input)
self.assertEqual(res[:, :, 0], input.real)
self.assertEqual(res[:, :, 1], input.imag)
self.assertTrue(self.is_view_of(t, res))
fn()
fn(contiguous_input=False)
# tensor with zero elements
x = torch.tensor([], dtype=dtype, device=device)
res = torch.view_as_real(x)
self.assertTrue(self.is_view_of(x, res))
self.assertEqual(res.shape, torch.Size([0, 2]))
# tensor with zero dim
x = torch.tensor(2 + 3j, dtype=dtype, device=device)
res = torch.view_as_real(x)
self.assertTrue(self.is_view_of(x, res))
self.assertEqual(res.shape, torch.Size([2]))
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_view_tensor_split(self, device, dtype):
a = make_tensor((40, 30), dtype=dtype, device=device, low=-9, high=9)
a_split_dim0 = a.tensor_split(7, 0)
for a_split_dim0_tensor in a_split_dim0:
self.assertTrue(self.is_view_of(a, a_split_dim0_tensor))
a_split_dim1 = a.tensor_split(7, 1)
for a_split_dim1_tensor in a_split_dim1:
self.assertTrue(self.is_view_of(a, a_split_dim1_tensor))
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_view_tensor_hsplit(self, device, dtype):
t = make_tensor((4, 4, 4), dtype=dtype, device=device, low=-9, high=9)
t_hsplit = torch.hsplit(t, 2)
for t_hsplit_tensor in t_hsplit:
self.assertTrue(self.is_view_of(t, t_hsplit_tensor))
t[2, 2, 2] = 7
self.assertEqual(t_hsplit[1][2, 0, 2], t[2, 2, 2])
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_view_tensor_vsplit(self, device, dtype):
t = make_tensor((4, 4, 4), dtype=dtype, device=device, low=-9, high=9)
t_vsplit = torch.vsplit(t, 2)
for t_vsplit_tensor in t_vsplit:
self.assertTrue(self.is_view_of(t, t_vsplit_tensor))
t[2, 2, 2] = 7
self.assertEqual(t_vsplit[1][0, 2, 2], t[2, 2, 2])
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_view_tensor_dsplit(self, device, dtype):
t = make_tensor((4, 4, 4), dtype=dtype, device=device, low=-9, high=9)
t_dsplit = torch.dsplit(t, 2)
for t_dsplit_tensor in t_dsplit:
self.assertTrue(self.is_view_of(t, t_dsplit_tensor))
t[2, 2, 2] = 7
self.assertEqual(t_dsplit[1][2, 2, 0], t[2, 2, 2])
@onlyNativeDeviceTypes
@dtypes(*all_types_and(torch.half, torch.bfloat16))
def test_imag_noncomplex(self, device, dtype):
t = torch.ones((5, 5), dtype=dtype, device=device)
with self.assertRaises(RuntimeError):
torch.imag(t)
@onlyNativeDeviceTypes
@dtypes(*complex_types())
def test_real_imag_view(self, device, dtype):
def compare_with_numpy(contiguous_input=True):
t = torch.randn(3, 3, dtype=dtype, device=device)
if not contiguous_input:
u = t.T
else:
u = t
re = u.real
exp = torch.from_numpy(u.cpu().numpy().real).to(device=device)
self.assertEqual(re, exp)
# for the case of contiguous_input, t=u
# for the case of non contiguous_input, the base still remains
# t since we are performing a view operation to make the input non-contiguous
self.assertTrue(self.is_view_of(t, re))
im = u.imag
exp = torch.from_numpy(u.cpu().numpy().imag).to(device=device)
self.assertEqual(im, exp)
self.assertTrue(self.is_view_of(t, im))
compare_with_numpy()
compare_with_numpy(contiguous_input=False)
# ensure storage offset is being correctly set
a = torch.randn(10, dtype=dtype)
self.assertEqual(a[5:].real, a.real[5:])
self.assertEqual(a[5:].imag, a.imag[5:])
@onlyNativeDeviceTypes
@dtypes(*complex_types())
def test_conj_imag_view(self, device, dtype) -> None:
t = _make_tensor((4, 5,), dtype, device)
t_numpy_conj = torch.from_numpy(t.cpu().numpy().conj()).to(device=device)
v = t.conj()
self.assertTrue(self.is_view_of(t, v))
self.assertEqual(v, t_numpy_conj)
if (t.is_complex()):
v_imag = v.imag
self.assertTrue(self.is_view_of(t, v_imag))
self.assertEqual(v_imag, t_numpy_conj.imag)
self.assertTrue(v_imag.is_neg())
@onlyNativeDeviceTypes
def test_conj_view_with_shared_memory(self, device) -> None:
a = _make_tensor((4, 5,), torch.cfloat, device)
b = a.conj()
c = a.conj()
self.assertEqual(torch.add(a, b), a.add_(b))
self.assertEqual(torch.add(b, c), torch.add(b, c, out=a))
self.assertEqual(torch.add(b, c), b.add_(c))
@onlyNativeDeviceTypes
@dtypes(*product(complex_types(), all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)))
@suppress_warnings
def test_set_real_imag(self, device, dtypes):
x = torch.randn(10, dtype=dtypes[0], device=device)
new_real = _make_tensor((10,), dtypes[1], device)
new_imag = _make_tensor((10,), dtypes[1], device)
x.real = new_real
x.imag = new_imag
if dtypes[1].is_complex:
self.assertEqual(x.real, new_real.real, exact_dtype=False)
self.assertEqual(x.imag, new_imag.real, exact_dtype=False)
else:
self.assertEqual(x.real, new_real, exact_dtype=False)
self.assertEqual(x.imag, new_imag, exact_dtype=False)
def test_diagonal_view(self, device) -> None:
t = torch.ones((5, 5), device=device)
v = torch.diagonal(t)
self.assertTrue(self.is_view_of(t, v))
v[0] = 0
self.assertEqual(t[0, 0], v[0])
t = torch.ones((3, 3, 3), device=device)
v = torch.diagonal(t, offset=1, dim1=1, dim2=2)
self.assertTrue(self.is_view_of(t, v))
v[0, 0] = 0
self.assertEqual(t[0, 0, 1], v[0, 0])
def test_select_view(self, device) -> None:
t = torch.ones((5, 5), device=device)
v = t.select(0, 2)
self.assertTrue(self.is_view_of(t, v))
v[0] = 0
self.assertEqual(t[2, 0], v[0])
# Lazy hasn't implemented unbind yet.
@onlyNativeDeviceTypes
def test_unbind_view(self, device) -> None:
t = torch.zeros((5, 5), device=device)
tup = torch.unbind(t)
for idx, v in enumerate(tup):
self.assertTrue(self.is_view_of(t, v))
v[0] = idx + 1
self.assertEqual(t[idx, 0], v[0])
# TODO: opinfo this or move to unbind's test suite
def test_unbind(self):
stacked = torch.randn(3, 10, 10, requires_grad=True)
x, y, z = stacked.unbind()
grad = torch.randn(3, 10, 10)
torch.autograd.backward([x, y, z], grad.unbind())
self.assertEqual(stacked.grad, grad)
# check that it works with only one gradient provided (#9977)
for i in range(3):
stacked = torch.randn(3, 10, 10, requires_grad=True)
outs = stacked.unbind()
gi = grad.unbind()[i]
g, = torch.autograd.grad(outs[i], stacked, gi)
g_expected = torch.stack([gi if j == i else torch.zeros_like(gi)
for j in range(3)], dim=0)
self.assertEqual(g, g_expected)
# Check with gradcheck
stacked = torch.randn(3, 10, 10, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: x.unbind(), (stacked,), check_forward_ad=True)
# TODO: Fix this test for LTC. There is an interaction with dynamic shapes here that is broken,
# causing asserts to trigger.
@onlyNativeDeviceTypes
def test_expand_view(self, device) -> None:
t = torch.ones((5, 1), device=device)
v = t.expand(5, 5)
self.assertTrue(self.is_view_of(t, v))
v[2, 2] = 0
self.assertEqual(t[2, 0], v[2, 2])
def test_expand_as_view(self, device):
t = torch.ones((5, 1), device=device)
e = torch.empty((5, 5), device=device)
v = t.expand_as(e)
self.assertTrue(self.is_view_of(t, v))
v[2, 2] = 0
self.assertEqual(t[2, 0], v[2, 2])
def test_narrow_view(self, device):
t = torch.ones((5, 5), device=device)
v = torch.narrow(t, 1, 2, 2)
self.assertTrue(self.is_view_of(t, v))
v[0, 0] = 0
self.assertEqual(t[0, 2], v[0, 0])
def test_permute_view(self, device) -> None:
t = torch.ones((5, 5), device=device)
v = t.permute(1, 0)
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertEqual(t[1, 0], v[0, 1])
def test_transpose_view(self, device):
for fn in (torch.swapdims, torch.swapaxes, torch.transpose):
t = torch.ones((5, 5), device=device)
v = fn(t, 0, 1)
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertEqual(t[1, 0], v[0, 1])
def test_transpose_inplace_view(self, device):
t = torch.ones(5, 5, device=device)
v = t.view_as(t)
v = v.swapdims_(0, 1)
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertEqual(t[1, 0], v[0, 1])
t = torch.ones(5, 5, device=device)
v = t.view_as(t)
v = v.swapaxes_(0, 1)
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertEqual(t[1, 0], v[0, 1])
t = torch.ones(5, 5, device=device)
v = t.view_as(t)
v = v.transpose_(0, 1)
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertEqual(t[1, 0], v[0, 1])
def test_t_view(self, device):
t = torch.ones((5, 5), device=device)
v = t.t()
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertEqual(t[1, 0], v[0, 1])
def test_t_inplace_view(self, device):
t = torch.ones(5, 5, device=device)
v = t.view_as(t)
v = v.t_()
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertEqual(t[1, 0], v[0, 1])
def test_T_view(self, device):
for op in ("T", "H", "mT", "mH"):
t = torch.ones((5, 5), device=device)
v = getattr(t, op)
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertEqual(t[1, 0], v[0, 1])
def test_unfold_view(self, device):
t = torch.ones(10, device=device)
v = t.unfold(0, 3, 2)
self.assertTrue(self.is_view_of(t, v))
v[1, 0] = 0
self.assertEqual(t[2], v[1, 0])
def test_squeeze_view(self, device):
t = torch.ones(5, 1, 5, device=device)
v = torch.squeeze(t)
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertEqual(t, v._base)
def test_squeeze_inplace_view(self, device):
t = torch.ones(5, 5, device=device)
v = t.view_as(t)
v = v.squeeze_()
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertEqual(t, v._base)
def test_unsqueeze_view(self, device):
t = torch.ones(5, 5, device=device)
v = torch.unsqueeze(t, 1)
self.assertTrue(self.is_view_of(t, v))
v[0, 0, 1] = 0
self.assertEqual(t[0, 1], v[0, 0, 1])
def test_unsqueeze_inplace_view(self, device):
t = torch.ones(5, 5, device=device)
v = t.view_as(t)
v = v.unsqueeze_(1)
self.assertTrue(self.is_view_of(t, v))
v[0, 0, 1] = 0
self.assertEqual(t[0, 1], v[0, 0, 1])
def test_as_strided_view(self, device):
t = torch.ones(5, 5, device=device)
v = torch.as_strided(t, (25,), (1,))
self.assertTrue(self.is_view_of(t, v))
v[6] = 0
self.assertEqual(t[1, 1], v[6])
def test_as_strided_inplace_view(self, device):
t = torch.ones(5, 5, device=device)
v = t.view_as(t)
v = v.as_strided_((25,), (1,))
self.assertTrue(self.is_view_of(t, v))
v[6] = 0
self.assertEqual(t[1, 1], v[6])
def test_as_strided_gradients(self):
def test(x, prepro_fn, size, strides, offset=None):
x = x.to(torch.double).detach().requires_grad_()
# Check that forward will **not** resize storage because it may
# cause NaN in output and fail numerical Jacobian check consequently
with torch.no_grad():
y = prepro_fn(x) if prepro_fn is not None else x
max_offset = sum((si - 1) * st for si, st in zip(size, strides))
max_offset += offset if offset is not None else y.storage_offset()
assert max_offset < len(y.storage()), "test case resizes storage"
def closure(x):
if prepro_fn is not None:
x = prepro_fn(x)
return x.as_strided(size, strides, offset)
gradcheck(closure, [x], check_forward_ad=True)
gradgradcheck(closure, [x])
# test
test(torch.arange(0, 25), lambda x: x.view(5, 5), [3, 3], [6, 2], 2)
# test crazy stride at dim with size 1 case
test(torch.randn(12), None, [1, 2, 1, 5], [0, 5, 100, 1], 2)
# test expand case
test(torch.randn(5), None, [3, 3, 3], [0, 1, 0], 2)
test(torch.randn(5), None, [3, 3, 3], [0, 0, 0], 4)
test(torch.randn(5), lambda x: x.expand(5, 5), [5, 5], [0, 1], 0)
# test non-expand overlapping case
test(torch.randn(35), None, [6, 6], [5, 1], 2)
test(torch.randn(15), None, [3, 2], [3, 6], 2)
# test transpose case
test(torch.randn(3, 4), None, [4, 3], [1, 4])
# test "getting things outside the input" case
x = torch.randn(6, 2)
test(x[3:], None, [3, 2], [2, 1], 0) # should be all zeros
self.assertEqual(x[3:].as_strided([3, 2], [2, 1], 0), x[:3])
# test select on expanded input case
test(torch.randn(2, 3), lambda x: x.expand(10, 2, 3), [2, 3], [3, 1], 0)
def test_view_view(self, device):
t = torch.ones(5, 5, device=device)
v = t.view(25)
self.assertTrue(self.is_view_of(t, v))
v[6] = 0
self.assertEqual(t[1, 1], v[6])
def test_view_as_view(self, device):
t = torch.ones(5, 5, device=device)
e = torch.empty((25,))
v = t.view_as(e)
self.assertTrue(self.is_view_of(t, v))
v[6] = 0
self.assertEqual(t[1, 1], v[6])
def test_contiguous_self(self, device):
t = torch.ones(5, 5, device=device)
s = t.contiguous()
self.assertTrue(s is t)
@skipMeta
# self.is_view_of reports false positives for lazy
@onlyNativeDeviceTypes
def test_contiguous_nonview(self, device):
t = torch.ones(5, 5, device=device)
nv = t.t().contiguous()
self.assertTrue(not self.is_view_of(t, nv))
nv[0, 0] = 0
self.assertNotEqual(t[0, 0], nv[0, 0])
def test_reshape_view(self, device):
t = torch.ones(5, 5, device=device)
v = torch.reshape(t, (25,))
self.assertTrue(self.is_view_of(t, v))
v[6] = 0
self.assertEqual(t[1, 1], v[6])
def test_reshape_as_view(self, device):
t = torch.ones(5, 5, device=device)
e = torch.empty((25,), device=device)
v = t.reshape_as(e)
self.assertTrue(self.is_view_of(t, v))
v[6] = 0
self.assertEqual(t[1, 1], v[6])
@skipMeta
# self.is_view_of reports false positives for lazy
@onlyNativeDeviceTypes
def test_reshape_nonview(self, device):
t = torch.ones(5, 5, device=device)
nv = torch.reshape(t.t(), (25,))
self.assertTrue(not self.is_view_of(t, nv))
nv[6] = 0
self.assertNotEqual(t[1, 1], nv[6])
# This test use as_strided to construct a tensor with overlapping memory,
# which is not handled by the functionalization pass.
@onlyNativeDeviceTypes
def test_flatten_view(self, device):
def test_writes_propagate(t, v):
idx_t = (0,) * t.ndim
idx_v = (0,) * v.ndim
v[idx_v] = 0
self.assertEqual(t[idx_t], v[idx_v])
t = torch.ones(1, 2, 3, 4, device=device)
v = t.flatten()
self.assertTrue(self.is_view_of(t, v))
test_writes_propagate(t, v)
# zero-dimensional tensor
t = torch.tensor(1, device=device)
v = t.flatten()
test_writes_propagate(t, v)
self.assertTrue(self.is_view_of(t, v))
t = torch.ones(1, 2, 3, 4, device=device).transpose(2, 3)
v = t.flatten(0, 1)
test_writes_propagate(t, v)
self.assertTrue(self.is_view_of_same_base(t, v))
# stride[i] = stride[i + 1] * size[i + 1] is satisfied for 3 groups:
t = torch.ones(720, device=device) \
.as_strided((2, 3, 2, 3, 5, 4), (6, 2, 15, 5, 1, 0))
# [--1--|---2---|-3-] [--1--|----2---|-3-]
v1 = t.flatten(0, 1)
v2 = v1.flatten(1, 3)
v3 = v2.flatten(2, 2)
test_writes_propagate(t, v1)
self.assertTrue(self.is_view_of_same_base(t, v1))
test_writes_propagate(t, v2)
self.assertTrue(self.is_view_of_same_base(t, v2))
test_writes_propagate(t, v3)
self.assertTrue(self.is_view_of_same_base(t, v3))
@onlyNativeDeviceTypes
def test_flatten_nonview(self, device):
def assert_is_nonview(t, nv):
idx_t = (0,) * t.ndim
idx_nv = (0,) * nv.ndim
self.assertTrue(not nv._is_view())
nv[idx_nv] = 0
if device != "meta":
self.assertNotEqual(t[idx_t], nv[idx_nv])
t = torch.ones(2, 3, 2, 3, device=device).transpose(2, 3)
nv = t.flatten(1, 3)
assert_is_nonview(t, nv)
t = torch.ones(2, 2, device=device).T
nv = t.flatten()
assert_is_nonview(t, nv)
# flatten returns the original object if start_dim=end_dim
t = t = torch.ones(2, 2, device=device)
nv = t.flatten(1, 1)
self.assertTrue(t is nv)
def test_basic_indexing_slice_view(self, device):
t = torch.ones(5, 5, device=device)
v = t[:2, :3]
self.assertTrue(self.is_view_of(t, v))
v[0, 0] = 0
self.assertEqual(t[0, 0], v[0, 0])
def test_basic_indexing_ellipses_view(self, device):
t = torch.ones(5, 5, device=device)
v = t[..., :2]
self.assertTrue(self.is_view_of(t, v))
v[0, 0] = 0
self.assertEqual(t[0, 0], v[0, 0])
def test_basic_indexing_newaxis_view(self, device):
t = torch.ones(5, 5, device=device)
v = t[None, :2, 3]
self.assertTrue(self.is_view_of(t, v))
v[0, 0] = 0
self.assertEqual(t[0, 3], v[0, 0])
def test_advanced_indexing_nonview(self, device):
t = torch.ones(3, 3, device=device)
rows = torch.tensor([[0, 0], [2, 2]], device=device)
cols = torch.tensor([[0, 1], [2, 2]], device=device)
nv = t[rows, cols]
self.assertTrue(not self.is_view_of(t, nv))
nv[1, 1] = 0
self.assertNotEqual(t[2, 2], nv[1, 1])
def test_advanced_indexing_assignment(self, device):
t = torch.ones(3, 3, device=device)
rows = torch.tensor([[0, 0], [2, 2]], device=device)
cols = torch.tensor([[0, 1], [2, 2]], device=device)
t[rows, cols] = 0
self.assertEqual(t[2, 2], 0)
@unittest.skip("See https://github.com/pytorch/pytorch/pull/32720")
def test_chunk_view(self, device):
t = torch.zeros(3, 3, device=device)
l = torch.chunk(t, 3)
for idx, v in enumerate(l):
self.assertTrue(self.is_view_of(t, v))
v[0, 0] = idx + 1
self.assertEqual(t[idx, 0], v[0, 0])
@unittest.skip("See https://github.com/pytorch/pytorch/pull/32720")
def test_split_view(self, device):
t = torch.zeros(3, 3, device=device)
l = torch.split(t, [1, 1, 1])
for idx, v in enumerate(l):
self.assertTrue(self.is_view_of(t, v))
v[0, 0] = idx + 1
self.assertEqual(t[idx, 0], v[0, 0])
def test_movedim_view(self, device):
def run_test(device, op):
t = torch.zeros(3, 3, device=device)
out = op(t)
self.assertTrue(self.is_view_of(t, out))
# Randomly change values in output
# and verify that original is changed
# as well.
for _ in range(3):
idx_1, idx_2 = random.randint(0, 2), random.randint(0, 2)
out[idx_1, idx_2] = random.random()
self.assertEqual(t[idx_2, idx_1], out[idx_1, idx_2])
for fn in [torch.movedim, torch.moveaxis]:
op = partial(fn, source=(0, 1), destination=(1, 0))
run_test(device, op)
op = partial(fn, source=0, destination=1)
run_test(device, op)
# Testing that the generated view_copy kernel and its derivative are implemented correctly
def test_view_copy(self, device):
a = torch.randn(4, device=device, requires_grad=True)
a_ref = a.clone().detach().requires_grad_()
a_view = a_ref.view(2, 2)
a_view_copy = torch.view_copy(a, (2, 2))
# view_copy ops don't preserve view relationship
self.assertTrue(self.is_view_of(a_ref, a_view))
self.assertFalse(self.is_view_of(a, a_view_copy))
a_view_copy.sum().backward()
a_view.sum().backward()
# forward and backward give the same shape + result
self.assertEqual(a_view_copy, a_view)
self.assertEqual(a.grad, a_ref.grad)
def test_view_copy_out(self, device):
a = torch.randn(2, 2, device=device)
out = torch.empty(2, device=device)
torch.diagonal_copy(a, out=out)
expected = torch.diagonal_copy(a)
self.assertEqual(expected, out)
a = torch.randn(4, device=device)
out1 = torch.empty(2, device=device)
out2 = torch.empty(2, device=device)
torch.split_copy(a, 2, out=(out1, out2))
expected1, expected2 = torch.split_copy(a, 2)
self.assertEqual(expected1, out1)
self.assertEqual(expected2, out2)
class TestOldViewOps(TestCase):
def test_ravel(self, device):
def _test_ravel(tensors, size, nc=False):
for src in tensors:
# Continuous Tensor -> View
flat = src.ravel()
self.assertEqual(flat.shape, torch.Size([size]))
self.assertEqual(src.view(-1), flat)
self.assertIs(flat._base, src)
self.assertTrue(flat.is_contiguous())
# Non-continuous Tensor -> Copy
if nc:
nc_src = src.t()
nc_flat = nc_src.ravel()
self.assertEqual(nc_flat.shape, torch.Size([size]))
self.assertEqual(nc_src.contiguous().view(-1), nc_flat)
self.assertIsNot(nc_flat._base, src)
self.assertTrue(nc_flat.is_contiguous())
# Test that flatten returns 1-dim tensor when given a 0-dim tensor
zero_dim_tensor = torch.tensor(123, device=device)
flat0 = zero_dim_tensor.ravel()
one_dim_tensor = torch.tensor([123], device=device)
flat1 = zero_dim_tensor.ravel()
nc_ones_tensor = torch.ones(10, device=device)[::2]
flat2 = nc_ones_tensor.ravel()
self.assertEqual(zero_dim_tensor.shape, torch.Size([]))
self.assertEqual(flat0.shape, torch.Size([1]))
self.assertEqual(one_dim_tensor.shape, torch.Size([1]))
self.assertEqual(flat1.shape, torch.Size([1]))
self.assertEqual(nc_ones_tensor.shape, torch.Size([5]))
self.assertEqual(flat2.shape, torch.Size([5]))
self.assertEqual(flat0, one_dim_tensor)
self.assertEqual(flat0, flat1)
self.assertEqual(flat0.shape, flat1.shape)
self.assertTrue(flat0.is_contiguous())
self.assertTrue(flat1.is_contiguous())
self.assertTrue(flat2.is_contiguous())
# Test both float tensor and quantized tensor
tensors = [torch.randn(5, 5, 5, 5, device=device),
torch._empty_affine_quantized([5, 5, 5, 5],
scale=2,
zero_point=3,
dtype=torch.quint8,
device=device)]
_test_ravel(tensors, 625)
tensors = [torch.randn(0, 2, 3, device=device),
torch.randn(3, 0, 2, device=device),
torch._empty_affine_quantized([0, 2, 3],
scale=2,
zero_point=3,
dtype=torch.quint8,
device=device),
torch._empty_affine_quantized([3, 0, 2],
scale=2,
zero_point=3,
dtype=torch.quint8,
device=device)]
_test_ravel(tensors, 0)
tensors = [torch.randn(5, 5, device=device),
torch._empty_affine_quantized([5, 5],
scale=2,
zero_point=3,
dtype=torch.quint8,
device=device)]
_test_ravel(tensors, 25, True)
# TODO: this should be refactored into the view ops test suite
def test_empty_reshape(self, device):
x = torch.randn(0, 6, device=device)
self.assertEqual((1, 0, 6, 1, 1), x.reshape(1, 0, 6, 1, 1).shape)
# should be viewable -- i.e. data_ptr is the same.
self.assertEqual(x.data_ptr(), x.reshape(1, 0, 6, 1, 1).data_ptr())
# match NumPy semantics -- don't infer the size of dimension with a degree of freedom
self.assertRaises(RuntimeError, lambda: x.reshape(0, -1))
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_expand(self, device):
tensor = torch.rand(1, 8, 1, device=device)
tensor2 = torch.rand(5, device=device)
template = torch.rand(4, 8, 5, device=device)
target = template.size()
self.assertEqual(tensor.expand_as(template).size(), target)
self.assertEqual(tensor.expand(4, 8, 5).size(), target)
self.assertEqual(tensor.expand(target).size(), target)
self.assertEqual(tensor2.expand_as(template).size(), target)
self.assertEqual(tensor2.expand(4, 8, 5).size(), target)
self.assertEqual(tensor2.expand(target).size(), target)
# test double expand
self.assertEqual(tensor2.expand(1, 5).expand(2, 2, 5), tensor2.repeat(2, 2, 1))
# test non-contiguous
noncontig = torch.randn(5, 2, 1, 3, device=device)[:, 0]
self.assertFalse(noncontig.is_contiguous())
self.assertEqual(noncontig.expand(2, 5, 4, 3), noncontig.contiguous().repeat(2, 1, 4, 1))
# make sure it's compatible with unsqueeze
expanded = tensor2.expand(1, 1, 5)
unsqueezed = tensor2.unsqueeze(0).unsqueeze(1)
self.assertEqual(expanded, unsqueezed)
self.assertEqual(expanded.stride(), unsqueezed.stride())
# test -1 as target size
self.assertEqual(tensor.expand(4, -1, 5), tensor.expand(4, 8, 5))
self.assertRaises(RuntimeError, lambda: tensor2.expand(-1, -1))
# test expanding empty to empty
self.assertEqual(torch.zeros(0, device=device).expand((0,)), torch.zeros(0, device=device))
# TODO: this should be refactored into the view ops test suite
def test_view_empty(self, device):
x = torch.randn(0, 6, device=device)
self.assertEqual((1, 0, 6, 1, 1), x.view(1, 0, 6, 1, 1).shape)
# TODO: this should be refactored into the view ops test suite
@onlyNativeDeviceTypes
def test_reshape(self, device):
x = torch.randn(3, 3, device=device)
self.assertEqual(x.data_ptr(), x.reshape(-1).data_ptr())
self.assertEqual(x.data_ptr(), x.reshape(1, 9, 1).data_ptr())
self.assertEqual(torch.reshape(x, (9,)), x.reshape(9))
self.assertRaises(RuntimeError, lambda: x.reshape(-1, -1))
y = torch.randn(4, 4, 4, device=device)[:, 0, :]
# .data_ptr() on meta tensors is always 0 so they are equal regardless of the reshape
if device != "meta":
self.assertNotEqual(y.data_ptr(), y.reshape(-1).data_ptr())
self.assertEqual(y.contiguous().view(-1), y.reshape(-1))
self.assertEqual(y.reshape(2, 2, 4).data_ptr(), y.data_ptr())
s = torch.randn((), device=device)
self.assertEqual(s.data_ptr(), s.reshape(()).data_ptr())
self.assertEqual(s.reshape(-1).shape, (1,))
self.assertRaises(RuntimeError, lambda: s.reshape(2))
empty = torch.tensor([], device=device)
self.assertEqual(empty, empty.reshape(-1))
self.assertEqual(empty, empty.reshape([0]))
# TODO: fix these once we have multi-dimensional empty tensors
self.assertEqual(empty.reshape([0, 1]).shape, (0, 1))
self.assertEqual(empty.reshape([1, -1]).shape, (1, 0))
self.assertRaises(RuntimeError, lambda: empty.reshape(1))
x = torch.randn(3, 3, device=device)
self.assertEqual(x.data_ptr(), x.reshape_as(torch.rand(9)).data_ptr())
self.assertEqual(x.data_ptr(), x.reshape_as(torch.rand(1, 9, 1)).data_ptr())
self.assertRaises(RuntimeError, lambda: x.reshape_as(torch.rand(10, device=device)))
def test_flatten(self, device):
# Test that flatten returns 1-dim tensor when given a 0-dim tensor
zero_dim_tensor = torch.tensor(123, device=device)
flat0 = zero_dim_tensor.flatten()
one_dim_tensor = torch.tensor([123], device=device)
flat1 = zero_dim_tensor.flatten()
self.assertEqual(zero_dim_tensor.shape, torch.Size([]))
self.assertEqual(flat0.shape, torch.Size([1]))
self.assertEqual(one_dim_tensor.shape, torch.Size([1]))
self.assertEqual(flat1.shape, torch.Size([1]))
self.assertEqual(flat0, one_dim_tensor)
self.assertEqual(flat0, flat1)
self.assertEqual(flat0.shape, flat1.shape)
# Test both float tensor and quantized tensor
tensors = [torch.randn(5, 5, 5, 5, device=device),
torch._empty_affine_quantized([5, 5, 5, 5],
scale=2,
zero_point=3,
dtype=torch.quint8,
device=device)]
for src in tensors:
flat = src.flatten(0, -1)
self.assertEqual(flat.shape, torch.Size([625]))
self.assertEqual(src.view(-1), flat.view(-1))
flat = src.flatten(0, 2)
self.assertEqual(flat.shape, torch.Size([125, 5]))
self.assertEqual(src.view(-1), flat.view(-1))
flat = src.flatten(0, 1)
self.assertEqual(flat.shape, torch.Size([25, 5, 5]))
self.assertEqual(src.view(-1), flat.view(-1))
flat = src.flatten(1, 2)
self.assertEqual(flat.shape, torch.Size([5, 25, 5]))
self.assertEqual(src.view(-1), flat.view(-1))
flat = src.flatten(2, 3)
self.assertEqual(flat.shape, torch.Size([5, 5, 25]))
self.assertEqual(src.view(-1), flat.view(-1))
flat = src.flatten(-2, -1)
self.assertEqual(flat.shape, torch.Size([5, 5, 25]))
self.assertEqual(src.view(-1), flat.view(-1))
flat = src.flatten(2, 2)
self.assertEqual(flat, src)
# out of bounds index
with self.assertRaisesRegex(IndexError, 'Dimension out of range'):
src.flatten(5, 10)
# invalid start and end
with self.assertRaisesRegex(RuntimeError, 'start_dim cannot come after end_dim'):
src.flatten(2, 0)
# TODO: update to work on CUDA, too
@onlyCPU
def test_narrow(self, device):
x = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
self.assertEqual(x.narrow(0, 0, 1), torch.tensor([[0, 1, 2]]))
self.assertEqual(x.narrow(0, 0, 2), torch.tensor([[0, 1, 2], [3, 4, 5]]))
self.assertEqual(x.narrow(0, 1, 1), torch.tensor([[3, 4, 5]]))
self.assertEqual(x.narrow(0, -1, 1), torch.tensor([[6, 7, 8]]))
self.assertEqual(x.narrow(0, -2, 2), torch.tensor([[3, 4, 5], [6, 7, 8]]))
self.assertEqual(x.narrow(0, -3, 3), torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
self.assertEqual(x.narrow(-1, -1, 1), torch.tensor([[2], [5], [8]]))
self.assertEqual(x.narrow(-2, -1, 1), torch.tensor([[6, 7, 8]]))
# TODO: update to work on CUDA, too
@onlyCPU
def test_narrow_tensor(self, device):
x = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
self.assertEqual(x.narrow(0, torch.tensor(0), 1), torch.tensor([[0, 1, 2]]))
with self.assertRaises(Exception):
x.narrow(0, torch.tensor(0.), 1)
with self.assertRaises(Exception):
x.narrow(0, torch.tensor([0]), 1)
with self.assertRaises(Exception):
x.narrow(0, torch.tensor([0, 1]), 1)
# TODO: make work on CUDA, too
@onlyCPU
def test_t(self, device):
# Test 0D tensors
x = torch.randn(())
self.assertEqual(x, x.t())
x = x.to_sparse()
self.assertEqual(x, x.t())
# Test 1D tensors
x = torch.arange(4)
self.assertEqual(x, x.t())
x = x.to_sparse()
self.assertEqual(x, x.t())
# Test 2D tensors
x = torch.rand((2, 2))
self.assertEqual(x.t(), x.transpose(0, 1))
x = x.to_sparse()
self.assertEqual(x.t(), x.transpose(0, 1))
# Test 3D tensor
x = torch.rand((2, 2, 2))
with self.assertRaisesRegex(RuntimeError, 'expects a tensor with <= 2 dimensions, but self is 3D'):
x.t()
x = x.to_sparse()
with self.assertRaisesRegex(RuntimeError, 'expects a tensor with <= 2 sparse and 0 dense dimensions'):
x.t()
@onlyCPU
def test_split(self, device):
tensor = torch.rand(7, 4)
split_size = 3
dim = 0
target_sizes = ([3, 4], [3, 4], [1, 4])
splits = tensor.split(split_size, dim)
start = 0
for target_size, split in zip(target_sizes, splits):
self.assertEqual(split.size(), target_size)
self.assertEqual(tensor.narrow(dim, start, target_size[dim]), split, atol=0, rtol=0)
start = start + target_size[dim]
# Variable sections split
tensor = torch.randn(20, 10)
dim = 0
split_sizes = [5, 5, 10]
target_sizes = ([[5, 10], [5, 10], [10, 10]])
splits = tensor.split(split_sizes, dim)
start = 0
for target_size, split in zip(target_sizes, splits):
self.assertEqual(split.size(), target_size)
self.assertEqual(tensor.narrow(dim, start, target_size[dim]), split, atol=0, rtol=0)
start = start + target_size[dim]
split_sizes = [2, 2, 6]
target_sizes = ([20, 2], [20, 2], [20, 6])
dim = 1
splits = tensor.split(split_sizes, dim)
start = 0
for target_size, split in zip(target_sizes, splits):
self.assertEqual(split.size(), target_size)
self.assertEqual(tensor.narrow(dim, start, target_size[dim]), split, atol=0, rtol=0)
start = start + target_size[dim]
@onlyCPU
def test_chunk(self, device):
tensor = torch.rand(4, 7)
num_chunks = 3
dim = 1
target_sizes = ([4, 3], [4, 3], [4, 1])
splits = tensor.chunk(num_chunks, dim)
start = 0
for target_size, split in zip(target_sizes, splits):
self.assertEqual(split.size(), target_size)
self.assertEqual(tensor.narrow(dim, start, target_size[dim]), split,
atol=0, rtol=0)
start = start + target_size[dim]
# Invalid chunk sizes
error_regex = 'chunk expects.*greater than 0'
with self.assertRaisesRegex(RuntimeError, error_regex):
tensor.chunk(0)
with self.assertRaisesRegex(RuntimeError, error_regex):
tensor.chunk(-2)
# TODO: make work on CUDA, too
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
@onlyCPU
def test_unsqueeze(self, device) -> None:
x = torch.randn(2, 3, 4)
y = x.unsqueeze(1)
self.assertEqual(y, x.view(2, 1, 3, 4))
y = x.clone().unsqueeze_(2)
self.assertEqual(y, x.view(2, 3, 1, 4))
x = x[:, 1]
self.assertFalse(x.is_contiguous())
y = x.unsqueeze(1)
self.assertEqual(y, x.contiguous().view(2, 1, 4))
y = x.clone().unsqueeze_(2)
self.assertEqual(y, x.contiguous().view(2, 4, 1))
# unit test for special case transposed copy (see ATen/native/Copy.cpp for details)
def test_big_transpose(self, device):
t = torch.rand(456, 789, device=device)
t1 = t.t().contiguous()
t2 = torch.from_numpy(t.cpu().numpy().transpose())
self.assertEqual(t1, t2)
def test_T(self, device):
a = torch.randn(2, 3, 4, device=device)
t1 = a.T
t2 = a.permute(2, 1, 0)
self.assertEqual(t2, t1)
b = torch.randn(10, device=device)
self.assertEqual(b, b.T)
scalar = torch.tensor(5, device=device)
self.assertEqual(scalar, scalar.T)
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_transposes(self, device, dtype):
for op in ("T", "H", "mT", "mH", "adjoint"):
shapes = ((), (2, 3), (2, 3, 4)) if op[0] == "m" or op == "adjoint" else ((), (2, 3),)
for shape in shapes:
a = make_tensor(shape, device=device, dtype=dtype)
t1 = getattr(a, op)
if op == "adjoint":
t1 = t1()
t2 = a
if a.ndim != 0:
t2 = t2.transpose(-2, -1)
if op[-1] == "H" or op == "adjoint":
t2 = t2.conj()
self.assertEqual(t2, t1)
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_transposes_errors(self, device, dtype):
for op in ("H", "mT", "mH", "adjoint"):
shapes = ((2,), (2, 3, 4)) if op == "H" else ((2,),)
for shape in shapes:
a = make_tensor(shape, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "only supported on matrices"):
t1 = getattr(a, op)
if op == "adjoint":
t1 = t1()
def test_python_types(self, device):
a1 = torch.randn((1, 2), device=device, dtype=torch.float64)
a2 = torch.randn((1, 2), device=device, dtype=float)
self.assertEqual(a1.dtype, a2.dtype)
b1 = torch.arange(10, 20, dtype=torch.int64, device=device)
b2 = torch.arange(10, 20, dtype=int, device=device)
self.assertEqual(b1.dtype, b2.dtype)
c1 = torch.tensor([True, False], dtype=torch.bool, device=device)
c2 = torch.tensor([True, False], dtype=bool, device=device)
self.assertEqual(c1.dtype, c2.dtype)
# TODO: is resize best put in test_view_ops?
def test_resize_as_preserves_strides(self, device):
x = torch.empty(2, 3).t()
old_strides = x.stride()
x.resize_as_(x)
self.assertEqual(x.stride(), old_strides)
def test_memory_format_resize_as(self, device):
def test_helper(shape, memory_format, device):
xc = torch.randn(shape, device=device).contiguous(memory_format=memory_format)
flat = torch.randn(xc.numel(), device=device)
flat.resize_as_(xc, memory_format=torch.preserve_format)
self.assertTrue(flat.is_contiguous(memory_format=memory_format))
test_helper((10, 3, 32, 32), torch.channels_last, device)
test_helper((3, 10, 3, 32, 32), torch.channels_last_3d, device)
def test_memory_format_resize_(self, device):
def test_helper(shape, numel, memory_format, device):
flat = torch.randn(numel, device=device)
flat.resize_(shape, memory_format=memory_format)
self.assertTrue(flat.is_contiguous(memory_format=memory_format))
test_helper((10, 3, 32, 32), 10 * 3 * 32 * 32, torch.channels_last, device)
test_helper((3, 10, 3, 32, 32), 3 * 10 * 3 * 32 * 32, torch.channels_last_3d, device)
@onlyNativeDeviceTypes
@dtypes(torch.int64, torch.float, torch.complex128)
def test_transpose_invalid(self, device, dtype):
for fn in (torch.swapdims, torch.swapaxes, torch.transpose):
shape = _rand_shape(4, min_size=5, max_size=10)
x = _generate_input(shape, dtype, device, False)
# Invalid `source` and `destination` dimension
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
fn(x, 5, 0)
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
fn(x, 0, 5)
@dtypes(torch.int64, torch.float, torch.complex128)
def test_transpose_vs_numpy(self, device, dtype):
for fn in (torch.swapdims, torch.swapaxes, torch.transpose):
for nd in range(5):
shape = _rand_shape(nd, min_size=5, max_size=10)
x = _generate_input(shape, dtype, device, with_extremal=False)
for random_negative in [True, False]:
for src_dim, dst_dim in permutations(range(nd), r=2):
random_prob = random.random()
if random_negative and random_prob > 0.66:
src_dim = src_dim - nd
elif random_negative and random_prob > 0.33:
dst_dim = dst_dim - nd
elif random_negative:
src_dim = src_dim - nd
dst_dim = dst_dim - nd
partial_map = {
torch.swapdims: partial(torch.swapdims, dim0=src_dim, dim1=dst_dim),
torch.swapaxes: partial(torch.swapaxes, axis0=src_dim, axis1=dst_dim),
torch.transpose: partial(torch.transpose, dim0=src_dim, dim1=dst_dim),
}
torch_fn = partial_map[fn]
np_fn = partial(np.swapaxes, axis1=src_dim, axis2=dst_dim)
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
# Move dim to same position
x = torch.randn(2, 3, 5, 7, 11)
partial_map = {
torch.swapdims: partial(torch.swapdims, dim0=0, dim1=0),
torch.swapaxes: partial(torch.swapaxes, axis0=0, axis1=0),
torch.transpose: partial(torch.transpose, dim0=0, dim1=0),
}
torch_fn = partial_map[fn]
np_fn = partial(np.swapaxes, axis1=0, axis2=0)
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
def _test_atleast_dim(self, torch_fn, np_fn, device, dtype):
for ndims in range(0, 5):
shape = _rand_shape(ndims, min_size=5, max_size=10)
for n in range(ndims + 1):
for with_extremal in [False, True]:
for contiguous in [False, True]:
# Generate Input.
x = _generate_input(shape, dtype, device, with_extremal)
if contiguous:
x = x.T
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
# Compare sequence input
torch_sequence_x = (x,) * random.randint(3, 10)
np_sequence_x = tuple(np.array(x.detach().cpu().numpy()) for x in torch_sequence_x)
torch_res = torch_fn(*torch_sequence_x)
np_res = np_fn(*np_sequence_x)
torch_res = tuple(x.cpu() for x in torch_res)
np_res = tuple(torch.from_numpy(x) for x in np_res)
self.assertEqual(np_res, torch_res)
# TODO: are these view ops?
@dtypes(*all_types_and_complex_and(torch.half))
def test_atleast(self, device, dtype):
self._test_atleast_dim(torch.atleast_1d, np.atleast_1d, device, dtype)
self._test_atleast_dim(torch.atleast_2d, np.atleast_2d, device, dtype)
self._test_atleast_dim(torch.atleast_3d, np.atleast_3d, device, dtype)
# TODO: OpInfo this
def _test_atleast(self, device, torch_fn):
# 0-dim
s = torch.tensor(0.5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), s)
gradgradcheck(lambda x: torch_fn(x), s)
# 1-dim
a = torch.rand(4, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), a)
gradgradcheck(lambda x: torch_fn(x), a)
# 2,3,4-dim
b = torch.rand(4, 3, dtype=torch.double, requires_grad=True)
c = torch.rand(4, 3, 2, dtype=torch.double, requires_grad=True)
d = torch.rand(4, 3, 2, 1, dtype=torch.double, requires_grad=True)
input_tuple = (s, a, b, c, d)
gradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
gradgradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
def test_atleast_gradient(self, device):
self._test_atleast(device, torch.atleast_1d)
self._test_atleast(device, torch.atleast_2d)
self._test_atleast(device, torch.atleast_3d)
@onlyCPU
@dtypes(torch.float)
def test_broadcast_tensors(self, device, dtype):
x0 = torch.randn(2, 1, 3, dtype=dtype, device=device)
x1 = torch.randn(3, dtype=dtype, device=device)
x2 = torch.randn(3, 1, dtype=dtype, device=device)
expected_size = (2, 3, 3)
y0, y1, y2 = torch.broadcast_tensors(x0, x1, x2)
self.assertTrue(y0.size() == expected_size)
self.assertTrue(y1.size() == expected_size)
self.assertTrue(y2.size() == expected_size)
@onlyCPU
def test_broadcast_shapes(self, device):
examples = [(), (1,), (2,), (1, 1), (3, 1), (3, 2), (4, 1, 1), (4, 3, 2)]
for s0 in examples:
x0 = torch.randn(s0)
expected = torch.broadcast_tensors(x0)[0].shape
actual = torch.broadcast_shapes(s0)
self.assertEqual(expected, actual)
for s1 in examples:
x1 = torch.randn(s1)
expected = torch.broadcast_tensors(x0, x1)[0].shape
actual = torch.broadcast_shapes(s0, s1)
self.assertEqual(expected, actual)
inputs_list = [[1, 4], [4, 1], [1, 1, 3]]
for integral_inputs in inputs_list:
res1 = torch.broadcast_shapes(*integral_inputs)
res2 = torch.broadcast_tensors(*map(torch.empty, integral_inputs))[0].shape
self.assertEqual(res1, res2)
inputs_with_neg_vals = [[1, 1, -12], [-1, 1], [-11, ]]
for integral_inputs_with_neg_vals in inputs_with_neg_vals:
with self.assertRaisesRegex(RuntimeError, "Trying to create tensor with negative dimension"):
torch.broadcast_shapes(*integral_inputs_with_neg_vals)
integral_inputs_error_case = [(3, 5), (2, 4, 1)]
for error_input in integral_inputs_error_case:
with self.assertRaisesRegex(RuntimeError, "Shape mismatch: objects cannot be broadcast to a single shape"):
torch.broadcast_shapes(*error_input)
negative_inputs = [(-1,), (1, -12), (4, -11), (-4, 1), (1, 1, -2)]
for s0 in negative_inputs:
with self.assertRaisesRegex(RuntimeError, "Trying to create tensor with negative dimension"):
torch.broadcast_shapes(s0)
for s1 in negative_inputs:
with self.assertRaisesRegex(RuntimeError, "Trying to create tensor with negative dimension"):
torch.broadcast_shapes(s0, s1)
float_inputs_error_case = [(1.1, 2.0), (1.1, 1.0)]
for error_case in float_inputs_error_case:
for float_input in error_case:
with self.assertRaisesRegex(RuntimeError, "Input shapes "
"should be of type ints, a tuple of ints, or a list of ints"):
torch.broadcast_shapes(float_input)
diff_input_types = [(1, (5,)), (3, (1,)), (1, (3, 4))]
for s0 in diff_input_types:
res1 = torch.broadcast_shapes(*s0)
res2 = torch.broadcast_tensors(*map(torch.empty, s0))[0].shape
self.assertEqual(res1, res2)
# Skip BFloat16 since numpy does not support it
@dtypes(*all_types_and_complex_and(torch.half, torch.bool))
def test_broadcast_to(self, device, dtype):
def can_broadcast(s0, s1):
# s0.dim() <= s1.dim(), reverse s0 and s1 to compare trailing dimension
s0 = tuple(reversed(s0))
s1 = tuple(reversed(s1))
for i in range(len(s0)):
if s0[i] != 1 and s0[i] != s1[i]:
return False
return True
sizes = (
(), (1,), (2,), (1, 1), (3, 1), (3, 2), (4, 1, 1), (4, 3, 2)
)
for s0, s1 in combinations(sizes, r=2):
t = make_tensor(s0, dtype=dtype, device=device, low=-9, high=9)
t_np = t.cpu().numpy()
if can_broadcast(s0, s1):
res = torch.broadcast_to(t, s1)
np_res = np.broadcast_to(t_np, s1)
self.assertEqual(res, np_res)
else:
with self.assertRaisesRegex(RuntimeError,
r"The expanded size of the tensor \(\d\) "
r"must match the existing size \(\d\)"):
torch.broadcast_to(t, s1)
def test_view(self, device):
tensor = torch.rand(15, device=device)
template = torch.rand(3, 5, device=device)
empty = torch.empty(0, device=device)
target = template.size()
self.assertEqual(tensor.view_as(template).size(), target)
self.assertEqual(tensor.view(3, 5).size(), target)
self.assertEqual(tensor.view(torch.Size([3, 5])).size(), target)
self.assertEqual(tensor.view(-1, 5).size(), target)
self.assertEqual(tensor.view(3, -1).size(), target)
tensor_view = tensor.view(5, 3)
tensor_view.fill_(random.uniform(0, 1))
self.assertEqual(empty.view_as(empty), empty)
self.assertEqual(empty.view(0), empty)
self.assertEqual(empty.view(0, 3, 0, 1).size(), torch.Size([0, 3, 0, 1]))
self.assertEqual(empty.view(0, 3, 0, 1).view(0), empty)
# test size inference with empty tensors
self.assertEqual(empty.view(-1).size(), torch.Size([0]))
self.assertEqual(empty.view(10, 3, -1).size(), torch.Size([10, 3, 0]))
with self.assertRaisesRegex(RuntimeError, r"because the unspecified dimension size -1 can be any value"):
empty.view(-1, 0)
with self.assertRaisesRegex(RuntimeError, r"because the unspecified dimension size -1 can be any value"):
empty.view(3, 0, -1, 0)
self.assertRaises(RuntimeError, lambda: tensor.view(15, 0))
self.assertRaises(RuntimeError, lambda: tensor.view(7, -1))
self.assertRaises(RuntimeError, lambda: tensor.view(15, -1, -1))
# test view when tensor is not contiguous in every dimension, but only
# contiguous dimensions are touched.
tensor = torch.rand(4, 2, 5, 1, 6, 2, 9, 3, device=device).transpose(-1, 2).transpose(-2, 3)
# size: [ 4, 2, 3, 9, 6, 2, 1, 5]
# stride: [3840, 1620, 1, 3, 54, 27, 324, 324]
# contiguous dim chunks: [__________, ____, ____, __________, ____, ____]
# merging 1 to chunk after: [__________, ____, ____, __________, __________]
contig_tensor = tensor.clone()
# [4, 2] => [8, 1]
# [3] => [3]
# [9] => [3, 3]
# [6, 2] => [4, 1, 3]
# [1, 5] => [5]
view_size = [8, 1, 3, 3, 3, 4, 1, 3, 5]
self.assertEqual(tensor.view(*view_size), contig_tensor.view(*view_size))
# [4, 2] => [2, 4]
# [3] => [3]
# [9] => [1, 9]
# [6, 2] => [2, 2, 3]
# [1, 5] => [5, 1]
view_size = [2, 4, 3, 1, 9, 2, 2, 3, 5, 1]
self.assertEqual(tensor.view(*view_size), contig_tensor.view(*view_size))
# adding size 1 dims
view_size = [1, 1, 2, 1, 4, 3, 1, 1, 9, 1, 2, 1, 2, 3, 1, 5, 1, 1]
self.assertEqual(tensor.view(*view_size), contig_tensor.view(*view_size))
# invalid views
self.assertRaises(RuntimeError, lambda: tensor.view(-1))
# crossing [4, 2], [3]
self.assertRaises(RuntimeError, lambda: tensor.view(24, 9, 6, 2, 1, 5))
# crossing [6, 2], [1, 5]
self.assertRaises(RuntimeError, lambda: tensor.view(8, 3, 9, 6, 10))
# crossing [9], [6, 2]
self.assertRaises(RuntimeError, lambda: tensor.view(8, 3, 54, 2, 1, 5))
# view with stride 0 dims
tensor = torch.empty(1, 1, device=device).expand(3, 4) # all dims are contiguous
contig_tensor = tensor.clone()
self.assertEqual(tensor.view(-1), contig_tensor.view(-1))
self.assertEqual(tensor.view(1, -1, 1), contig_tensor.view(1, -1, 1))
self.assertEqual(tensor.view(-1, 1), contig_tensor.view(-1, 1))
self.assertEqual(tensor.view(6, 2, 1), contig_tensor.view(6, 2, 1))
self.assertEqual(tensor.view(1, 6, 2, 1), contig_tensor.view(1, 6, 2, 1))
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_reshape_view_semantics(self, device, dtype):
tensor = make_tensor((15, 4), dtype=dtype, device=device)
target = (20, 3)
# Cases where the tensor can be returned as a view.
view_tensor = tensor.reshape(target)
self.assertEqual((view_tensor.size()), target)
self.assertEqual(tensor.storage().data_ptr(), view_tensor.storage().data_ptr())
# Cases where the tensor must be copied (transpose makes it non-contiguous forcing
# the copy).
copy_tensor = tensor.transpose(0, 1).reshape(target)
self.assertEqual(copy_tensor.size(), target)
self.assertNotEqual(tensor.storage().data_ptr(), copy_tensor.storage().data_ptr())
def test_contiguous(self, device):
x = torch.randn(1, 16, 5, 5, device=device)
self.assertTrue(x.is_contiguous())
stride = list(x.stride())
stride[0] = 20
# change the stride in dimension 0. the tensor is still contiguous because size[0] is 1
x.set_(x.storage(), 0, x.size(), stride)
self.assertTrue(x.is_contiguous())
@onlyNativeDeviceTypes
# Skip BFloat16 since numpy does not support it
@dtypes(*all_types_and_complex_and(torch.half, torch.bool))
def test_tensor_split_sections(self, device, dtype):
input_sizes = [
(0,),
(10,),
(10, 0),
(0, 10),
(4, 10),
(12, 3),
]
for input_size in input_sizes:
a_base = make_tensor(input_size, dtype=dtype, device=device, low=-9, high=9)
# Run tests on transposed input if it has at least 2 dims
for a in [a_base, a_base.t()] if a_base.dim() > 2 else [a_base]:
a_n = a.cpu().numpy()
for dim in range(-a.dim(), a.dim()):
for sections in range(1, 2 * a.size(dim)):
msg = f'input_size {input_size}, sections {sections}, dim {dim}'
result1 = torch.tensor_split(a, sections, dim)
result2 = torch.tensor_split(a, torch.tensor(sections, dtype=torch.int64), dim)
for r1, r2 in zip(result1, result2):
self.assertEqual(r1.device, torch.device(device), msg=msg)
self.assertEqual(r1.dtype, dtype, msg=msg)
self.assertEqual(r2.device, torch.device(device), msg=msg)
self.assertEqual(r2.dtype, dtype, msg=msg)
result_n = np.array_split(a_n, sections, dim)
self.assertEqual(result_n, result1, msg=msg)
self.assertEqual(result_n, result2, msg=msg)
@onlyNativeDeviceTypes
# Skip BFloat16 since numpy does not support it
@dtypes(*all_types_and_complex_and(torch.half, torch.bool))
def test_tensor_split_indices(self, device, dtype):
input_sizes = [
(0,),
(10,),
(10, 0),
(0, 10),
(4, 10),
(12, 3),
]
indices_args = [
(),
(0,),
(3,),
(10,),
(-1,),
(-10,),
(2, -1),
(3, 4, 10),
(0, -1, 0, 10),
(1, 5, 2, 8),
]
for input_size in input_sizes:
a_base = make_tensor(input_size, dtype=dtype, device=device, low=-9, high=9)
# Run tests on transposed input if it has at least 2 dims
for a in [a_base, a_base.t()] if a_base.dim() > 2 else [a_base]:
a_n = a.cpu().numpy()
for dim in range(-a.dim(), a.dim()):
for indices in indices_args:
result_1 = torch.tensor_split(a, indices, dim)
result_2 = torch.tensor_split(a, torch.tensor(indices, dtype=torch.int64), dim)
msg = f'input_size {input_size}, indices {indices}, dim {dim}'
for r1, r2 in zip(result_1, result_2):
self.assertEqual(r1.device, torch.device(device), msg=msg)
self.assertEqual(r1.dtype, dtype, msg=msg)
self.assertEqual(r2.device, torch.device(device), msg=msg)
self.assertEqual(r2.dtype, dtype, msg=msg)
result_n = np.array_split(a_n, indices, dim)
self.assertEqual(result_n, result_1, msg=msg)
self.assertEqual(result_n, result_2, msg=msg)
@onlyNativeDeviceTypes
def test_tensor_split_errors(self, device):
S = 10
test_cases = [
# input size, sections or indices, dim, error type, error message, numpy error type
[(S,), 10, 1, IndexError, r'Dimension out of range', IndexError],
[(), 10, 0, RuntimeError, r'tensor_split expected at least a 1-dimensional tensor, '
+ 'but got a tensor with 0 dims', IndexError],
[(S,), (10,), 1, IndexError, r'Dimension out of range', IndexError],
[(), (10,), 0, RuntimeError, r'tensor_split expected at least a 1-dimensional tensor, '
+ 'but got a tensor with 0 dims', IndexError],
[(S,), 0, 0, RuntimeError, r'number of sections must be larger than 0, got 0', ValueError],
[(S,), -1, 0, RuntimeError, r'number of sections must be larger than 0, got -1', ValueError],
]
for input_size, sections_or_indices, dim, err, err_msg, numpy_err in test_cases:
a = torch.randn(input_size, device=device)
msg = f'input_size {input_size}, sections_or_indices {sections_or_indices}, dim {dim}'
with self.assertRaisesRegex(err, err_msg, msg=msg):
torch.tensor_split(a, sections_or_indices, dim)
with self.assertRaisesRegex(err, err_msg, msg=msg):
torch.tensor_split(a, torch.tensor(sections_or_indices), dim)
with self.assertRaises(numpy_err, msg=msg):
np.array_split(a.cpu().numpy(), sections_or_indices, dim)
# addtional tests for tensor_split with tensor_indices_or_sections
with self.assertRaisesRegex(RuntimeError,
r'tensor_split expected tensor_indices_or_sections to have dtype of long, but got Float'):
torch.tensor_split(a, torch.tensor(1.1), dim)
with self.assertRaisesRegex(RuntimeError,
r'tensor_split expected tensor_indices_or_sections to be a'
+ ' zero-dimensional or one-dimensional tensor, but got a tensor with 2 dims'):
torch.tensor_split(torch.rand(S, device=device), torch.tensor(((1,),)), 0)
def test_resize_all_dtypes_and_devices(self, device):
shape = (2, 2)
for dt in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool):
x = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dt, device=device)
x.resize_(shape)
self.assertEqual(shape, x.shape)
def test_resize_as_all_dtypes_and_devices(self, device):
for dt in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool):
x = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dt, device=device)
y = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=dt, device=device)
x.resize_as_(y)
self.assertEqual(y.shape, x.shape)
@onlyNativeDeviceTypes
def test_resize_overflow(self, device):
x = torch.empty((), dtype=torch.float64)
with self.assertRaisesRegex(RuntimeError, 'Storage size calculation overflowed'):
x.resize_([2, 4, 2**29, 2**29])
with self.assertRaisesRegex(RuntimeError, 'overflow'):
x.resize_([8, 8, 2**29, 2**29])
def test_view_all_dtypes_and_devices(self, device):
for dt in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool):
x = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dt, device=device)
self.assertEqual(x.view(6).shape, [6])
@onlyCPU
def test_conj_neg_view_numpy_error(self, device):
self.assertRaisesRegex(RuntimeError, "has conjugate bit set", lambda: torch.tensor([1 + 2j]).conj().numpy())
self.assertRaisesRegex(RuntimeError, "has negative bit set", lambda: torch.tensor([1 + 2j]).conj().imag.numpy())
self.assertRaisesRegex(RuntimeError, "not supported for conjugate view tensors",
lambda: torch.tensor([1 + 2j]).conj().view(torch.float64))
self.assertRaisesRegex(RuntimeError, "not supported for tensors with negative bit set",
lambda: torch.tensor([1 + 2j]).conj().imag.view(torch.int32))
@onlyCPU
def test_crow_col_indices(self, device):
crow_indices = (0, 1, 2)
col_indices = (1, 0)
values = (1, 2)
t = torch.sparse_csr_tensor(crow_indices, col_indices, values, size=(2, 2))
# This is the test. If crow_indices is not a view op it'll
# trigger an internal assert due to use count greater than 1
# in debug build.
t.crow_indices()
t.col_indices()
instantiate_device_type_tests(TestViewOps, globals(), include_lazy=True)
instantiate_device_type_tests(TestOldViewOps, globals())
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_view_ops.py |
# Owner(s): ["module: tests"]
import torch
from torch import tensor
import unittest
import warnings
import random
from functools import reduce
import numpy as np
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, dtypes, dtypesIfCPU, dtypesIfCUDA,
onlyNativeDeviceTypes)
class TestIndexing(TestCase):
def test_index(self, device):
def consec(size, start=1):
sequence = torch.ones(torch.tensor(size).prod(0)).cumsum(0)
sequence.add_(start - 1)
return sequence.view(*size)
reference = consec((3, 3, 3)).to(device)
# empty tensor indexing
self.assertEqual(reference[torch.LongTensor().to(device)], reference.new(0, 3, 3))
self.assertEqual(reference[0], consec((3, 3)), atol=0, rtol=0)
self.assertEqual(reference[1], consec((3, 3), 10), atol=0, rtol=0)
self.assertEqual(reference[2], consec((3, 3), 19), atol=0, rtol=0)
self.assertEqual(reference[0, 1], consec((3,), 4), atol=0, rtol=0)
self.assertEqual(reference[0:2], consec((2, 3, 3)), atol=0, rtol=0)
self.assertEqual(reference[2, 2, 2], 27, atol=0, rtol=0)
self.assertEqual(reference[:], consec((3, 3, 3)), atol=0, rtol=0)
# indexing with Ellipsis
self.assertEqual(reference[..., 2], torch.tensor([[3., 6., 9.],
[12., 15., 18.],
[21., 24., 27.]]), atol=0, rtol=0)
self.assertEqual(reference[0, ..., 2], torch.tensor([3., 6., 9.]), atol=0, rtol=0)
self.assertEqual(reference[..., 2], reference[:, :, 2], atol=0, rtol=0)
self.assertEqual(reference[0, ..., 2], reference[0, :, 2], atol=0, rtol=0)
self.assertEqual(reference[0, 2, ...], reference[0, 2], atol=0, rtol=0)
self.assertEqual(reference[..., 2, 2, 2], 27, atol=0, rtol=0)
self.assertEqual(reference[2, ..., 2, 2], 27, atol=0, rtol=0)
self.assertEqual(reference[2, 2, ..., 2], 27, atol=0, rtol=0)
self.assertEqual(reference[2, 2, 2, ...], 27, atol=0, rtol=0)
self.assertEqual(reference[...], reference, atol=0, rtol=0)
reference_5d = consec((3, 3, 3, 3, 3)).to(device)
self.assertEqual(reference_5d[..., 1, 0], reference_5d[:, :, :, 1, 0], atol=0, rtol=0)
self.assertEqual(reference_5d[2, ..., 1, 0], reference_5d[2, :, :, 1, 0], atol=0, rtol=0)
self.assertEqual(reference_5d[2, 1, 0, ..., 1], reference_5d[2, 1, 0, :, 1], atol=0, rtol=0)
self.assertEqual(reference_5d[...], reference_5d, atol=0, rtol=0)
# LongTensor indexing
reference = consec((5, 5, 5)).to(device)
idx = torch.LongTensor([2, 4]).to(device)
self.assertEqual(reference[idx], torch.stack([reference[2], reference[4]]))
# TODO: enable one indexing is implemented like in numpy
# self.assertEqual(reference[2, idx], torch.stack([reference[2, 2], reference[2, 4]]))
# self.assertEqual(reference[3, idx, 1], torch.stack([reference[3, 2], reference[3, 4]])[:, 1])
# None indexing
self.assertEqual(reference[2, None], reference[2].unsqueeze(0))
self.assertEqual(reference[2, None, None], reference[2].unsqueeze(0).unsqueeze(0))
self.assertEqual(reference[2:4, None], reference[2:4].unsqueeze(1))
self.assertEqual(reference[None, 2, None, None], reference.unsqueeze(0)[:, 2].unsqueeze(0).unsqueeze(0))
self.assertEqual(reference[None, 2:5, None, None], reference.unsqueeze(0)[:, 2:5].unsqueeze(2).unsqueeze(2))
# indexing 0-length slice
self.assertEqual(torch.empty(0, 5, 5), reference[slice(0)])
self.assertEqual(torch.empty(0, 5), reference[slice(0), 2])
self.assertEqual(torch.empty(0, 5), reference[2, slice(0)])
self.assertEqual(torch.tensor([]), reference[2, 1:1, 2])
# indexing with step
reference = consec((10, 10, 10)).to(device)
self.assertEqual(reference[1:5:2], torch.stack([reference[1], reference[3]], 0))
self.assertEqual(reference[1:6:2], torch.stack([reference[1], reference[3], reference[5]], 0))
self.assertEqual(reference[1:9:4], torch.stack([reference[1], reference[5]], 0))
self.assertEqual(reference[2:4, 1:5:2], torch.stack([reference[2:4, 1], reference[2:4, 3]], 1))
self.assertEqual(reference[3, 1:6:2], torch.stack([reference[3, 1], reference[3, 3], reference[3, 5]], 0))
self.assertEqual(reference[None, 2, 1:9:4], torch.stack([reference[2, 1], reference[2, 5]], 0).unsqueeze(0))
self.assertEqual(reference[:, 2, 1:6:2],
torch.stack([reference[:, 2, 1], reference[:, 2, 3], reference[:, 2, 5]], 1))
lst = [list(range(i, i + 10)) for i in range(0, 100, 10)]
tensor = torch.DoubleTensor(lst).to(device)
for _i in range(100):
idx1_start = random.randrange(10)
idx1_end = idx1_start + random.randrange(1, 10 - idx1_start + 1)
idx1_step = random.randrange(1, 8)
idx1 = slice(idx1_start, idx1_end, idx1_step)
if random.randrange(2) == 0:
idx2_start = random.randrange(10)
idx2_end = idx2_start + random.randrange(1, 10 - idx2_start + 1)
idx2_step = random.randrange(1, 8)
idx2 = slice(idx2_start, idx2_end, idx2_step)
lst_indexed = [l[idx2] for l in lst[idx1]]
tensor_indexed = tensor[idx1, idx2]
else:
lst_indexed = lst[idx1]
tensor_indexed = tensor[idx1]
self.assertEqual(torch.DoubleTensor(lst_indexed), tensor_indexed)
self.assertRaises(ValueError, lambda: reference[1:9:0])
self.assertRaises(ValueError, lambda: reference[1:9:-1])
self.assertRaises(IndexError, lambda: reference[1, 1, 1, 1])
self.assertRaises(IndexError, lambda: reference[1, 1, 1, 1:1])
self.assertRaises(IndexError, lambda: reference[3, 3, 3, 3, 3, 3, 3, 3])
self.assertRaises(IndexError, lambda: reference[0.0])
self.assertRaises(TypeError, lambda: reference[0.0:2.0])
self.assertRaises(IndexError, lambda: reference[0.0, 0.0:2.0])
self.assertRaises(IndexError, lambda: reference[0.0, :, 0.0:2.0])
self.assertRaises(IndexError, lambda: reference[0.0, ..., 0.0:2.0])
self.assertRaises(IndexError, lambda: reference[0.0, :, 0.0])
def delitem():
del reference[0]
self.assertRaises(TypeError, delitem)
@onlyNativeDeviceTypes
@dtypes(torch.half, torch.double)
def test_advancedindex(self, device, dtype):
# Tests for Integer Array Indexing, Part I - Purely integer array
# indexing
def consec(size, start=1):
# Creates the sequence in float since CPU half doesn't support the
# needed operations. Converts to dtype before returning.
numel = reduce(lambda x, y: x * y, size, 1)
sequence = torch.ones(numel, dtype=torch.float, device=device).cumsum(0)
sequence.add_(start - 1)
return sequence.view(*size).to(dtype=dtype)
# pick a random valid indexer type
def ri(indices):
choice = random.randint(0, 2)
if choice == 0:
return torch.LongTensor(indices).to(device)
elif choice == 1:
return list(indices)
else:
return tuple(indices)
def validate_indexing(x):
self.assertEqual(x[[0]], consec((1,)))
self.assertEqual(x[ri([0]), ], consec((1,)))
self.assertEqual(x[ri([3]), ], consec((1,), 4))
self.assertEqual(x[[2, 3, 4]], consec((3,), 3))
self.assertEqual(x[ri([2, 3, 4]), ], consec((3,), 3))
self.assertEqual(x[ri([0, 2, 4]), ], torch.tensor([1, 3, 5], dtype=dtype, device=device))
def validate_setting(x):
x[[0]] = -2
self.assertEqual(x[[0]], torch.tensor([-2], dtype=dtype, device=device))
x[[0]] = -1
self.assertEqual(x[ri([0]), ], torch.tensor([-1], dtype=dtype, device=device))
x[[2, 3, 4]] = 4
self.assertEqual(x[[2, 3, 4]], torch.tensor([4, 4, 4], dtype=dtype, device=device))
x[ri([2, 3, 4]), ] = 3
self.assertEqual(x[ri([2, 3, 4]), ], torch.tensor([3, 3, 3], dtype=dtype, device=device))
x[ri([0, 2, 4]), ] = torch.tensor([5, 4, 3], dtype=dtype, device=device)
self.assertEqual(x[ri([0, 2, 4]), ], torch.tensor([5, 4, 3], dtype=dtype, device=device))
# Only validates indexing and setting for halfs
if dtype == torch.half:
reference = consec((10,))
validate_indexing(reference)
validate_setting(reference)
return
# Case 1: Purely Integer Array Indexing
reference = consec((10,))
validate_indexing(reference)
# setting values
validate_setting(reference)
# Tensor with stride != 1
# strided is [1, 3, 5, 7]
reference = consec((10,))
strided = torch.tensor((), dtype=dtype, device=device)
strided.set_(reference.storage(), storage_offset=0,
size=torch.Size([4]), stride=[2])
self.assertEqual(strided[[0]], torch.tensor([1], dtype=dtype, device=device))
self.assertEqual(strided[ri([0]), ], torch.tensor([1], dtype=dtype, device=device))
self.assertEqual(strided[ri([3]), ], torch.tensor([7], dtype=dtype, device=device))
self.assertEqual(strided[[1, 2]], torch.tensor([3, 5], dtype=dtype, device=device))
self.assertEqual(strided[ri([1, 2]), ], torch.tensor([3, 5], dtype=dtype, device=device))
self.assertEqual(strided[ri([[2, 1], [0, 3]]), ],
torch.tensor([[5, 3], [1, 7]], dtype=dtype, device=device))
# stride is [4, 8]
strided = torch.tensor((), dtype=dtype, device=device)
strided.set_(reference.storage(), storage_offset=4,
size=torch.Size([2]), stride=[4])
self.assertEqual(strided[[0]], torch.tensor([5], dtype=dtype, device=device))
self.assertEqual(strided[ri([0]), ], torch.tensor([5], dtype=dtype, device=device))
self.assertEqual(strided[ri([1]), ], torch.tensor([9], dtype=dtype, device=device))
self.assertEqual(strided[[0, 1]], torch.tensor([5, 9], dtype=dtype, device=device))
self.assertEqual(strided[ri([0, 1]), ], torch.tensor([5, 9], dtype=dtype, device=device))
self.assertEqual(strided[ri([[0, 1], [1, 0]]), ],
torch.tensor([[5, 9], [9, 5]], dtype=dtype, device=device))
# reference is 1 2
# 3 4
# 5 6
reference = consec((3, 2))
self.assertEqual(reference[ri([0, 1, 2]), ri([0])], torch.tensor([1, 3, 5], dtype=dtype, device=device))
self.assertEqual(reference[ri([0, 1, 2]), ri([1])], torch.tensor([2, 4, 6], dtype=dtype, device=device))
self.assertEqual(reference[ri([0]), ri([0])], consec((1,)))
self.assertEqual(reference[ri([2]), ri([1])], consec((1,), 6))
self.assertEqual(reference[[ri([0, 0]), ri([0, 1])]], torch.tensor([1, 2], dtype=dtype, device=device))
self.assertEqual(reference[[ri([0, 1, 1, 0, 2]), ri([1])]],
torch.tensor([2, 4, 4, 2, 6], dtype=dtype, device=device))
self.assertEqual(reference[[ri([0, 0, 1, 1]), ri([0, 1, 0, 0])]],
torch.tensor([1, 2, 3, 3], dtype=dtype, device=device))
rows = ri([[0, 0],
[1, 2]])
columns = [0],
self.assertEqual(reference[rows, columns], torch.tensor([[1, 1],
[3, 5]], dtype=dtype, device=device))
rows = ri([[0, 0],
[1, 2]])
columns = ri([1, 0])
self.assertEqual(reference[rows, columns], torch.tensor([[2, 1],
[4, 5]], dtype=dtype, device=device))
rows = ri([[0, 0],
[1, 2]])
columns = ri([[0, 1],
[1, 0]])
self.assertEqual(reference[rows, columns], torch.tensor([[1, 2],
[4, 5]], dtype=dtype, device=device))
# setting values
reference[ri([0]), ri([1])] = -1
self.assertEqual(reference[ri([0]), ri([1])], torch.tensor([-1], dtype=dtype, device=device))
reference[ri([0, 1, 2]), ri([0])] = torch.tensor([-1, 2, -4], dtype=dtype, device=device)
self.assertEqual(reference[ri([0, 1, 2]), ri([0])],
torch.tensor([-1, 2, -4], dtype=dtype, device=device))
reference[rows, columns] = torch.tensor([[4, 6], [2, 3]], dtype=dtype, device=device)
self.assertEqual(reference[rows, columns],
torch.tensor([[4, 6], [2, 3]], dtype=dtype, device=device))
# Verify still works with Transposed (i.e. non-contiguous) Tensors
reference = torch.tensor([[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]], dtype=dtype, device=device).t_()
# Transposed: [[0, 4, 8],
# [1, 5, 9],
# [2, 6, 10],
# [3, 7, 11]]
self.assertEqual(reference[ri([0, 1, 2]), ri([0])],
torch.tensor([0, 1, 2], dtype=dtype, device=device))
self.assertEqual(reference[ri([0, 1, 2]), ri([1])],
torch.tensor([4, 5, 6], dtype=dtype, device=device))
self.assertEqual(reference[ri([0]), ri([0])],
torch.tensor([0], dtype=dtype, device=device))
self.assertEqual(reference[ri([2]), ri([1])],
torch.tensor([6], dtype=dtype, device=device))
self.assertEqual(reference[[ri([0, 0]), ri([0, 1])]],
torch.tensor([0, 4], dtype=dtype, device=device))
self.assertEqual(reference[[ri([0, 1, 1, 0, 3]), ri([1])]],
torch.tensor([4, 5, 5, 4, 7], dtype=dtype, device=device))
self.assertEqual(reference[[ri([0, 0, 1, 1]), ri([0, 1, 0, 0])]],
torch.tensor([0, 4, 1, 1], dtype=dtype, device=device))
rows = ri([[0, 0],
[1, 2]])
columns = [0],
self.assertEqual(reference[rows, columns],
torch.tensor([[0, 0], [1, 2]], dtype=dtype, device=device))
rows = ri([[0, 0],
[1, 2]])
columns = ri([1, 0])
self.assertEqual(reference[rows, columns],
torch.tensor([[4, 0], [5, 2]], dtype=dtype, device=device))
rows = ri([[0, 0],
[1, 3]])
columns = ri([[0, 1],
[1, 2]])
self.assertEqual(reference[rows, columns],
torch.tensor([[0, 4], [5, 11]], dtype=dtype, device=device))
# setting values
reference[ri([0]), ri([1])] = -1
self.assertEqual(reference[ri([0]), ri([1])],
torch.tensor([-1], dtype=dtype, device=device))
reference[ri([0, 1, 2]), ri([0])] = torch.tensor([-1, 2, -4], dtype=dtype, device=device)
self.assertEqual(reference[ri([0, 1, 2]), ri([0])],
torch.tensor([-1, 2, -4], dtype=dtype, device=device))
reference[rows, columns] = torch.tensor([[4, 6], [2, 3]], dtype=dtype, device=device)
self.assertEqual(reference[rows, columns],
torch.tensor([[4, 6], [2, 3]], dtype=dtype, device=device))
# stride != 1
# strided is [[1 3 5 7],
# [9 11 13 15]]
reference = torch.arange(0., 24, dtype=dtype, device=device).view(3, 8)
strided = torch.tensor((), dtype=dtype, device=device)
strided.set_(reference.storage(), 1, size=torch.Size([2, 4]),
stride=[8, 2])
self.assertEqual(strided[ri([0, 1]), ri([0])],
torch.tensor([1, 9], dtype=dtype, device=device))
self.assertEqual(strided[ri([0, 1]), ri([1])],
torch.tensor([3, 11], dtype=dtype, device=device))
self.assertEqual(strided[ri([0]), ri([0])],
torch.tensor([1], dtype=dtype, device=device))
self.assertEqual(strided[ri([1]), ri([3])],
torch.tensor([15], dtype=dtype, device=device))
self.assertEqual(strided[[ri([0, 0]), ri([0, 3])]],
torch.tensor([1, 7], dtype=dtype, device=device))
self.assertEqual(strided[[ri([1]), ri([0, 1, 1, 0, 3])]],
torch.tensor([9, 11, 11, 9, 15], dtype=dtype, device=device))
self.assertEqual(strided[[ri([0, 0, 1, 1]), ri([0, 1, 0, 0])]],
torch.tensor([1, 3, 9, 9], dtype=dtype, device=device))
rows = ri([[0, 0],
[1, 1]])
columns = [0],
self.assertEqual(strided[rows, columns],
torch.tensor([[1, 1], [9, 9]], dtype=dtype, device=device))
rows = ri([[0, 1],
[1, 0]])
columns = ri([1, 2])
self.assertEqual(strided[rows, columns],
torch.tensor([[3, 13], [11, 5]], dtype=dtype, device=device))
rows = ri([[0, 0],
[1, 1]])
columns = ri([[0, 1],
[1, 2]])
self.assertEqual(strided[rows, columns],
torch.tensor([[1, 3], [11, 13]], dtype=dtype, device=device))
# setting values
# strided is [[10, 11],
# [17, 18]]
reference = torch.arange(0., 24, dtype=dtype, device=device).view(3, 8)
strided = torch.tensor((), dtype=dtype, device=device)
strided.set_(reference.storage(), 10, size=torch.Size([2, 2]),
stride=[7, 1])
self.assertEqual(strided[ri([0]), ri([1])],
torch.tensor([11], dtype=dtype, device=device))
strided[ri([0]), ri([1])] = -1
self.assertEqual(strided[ri([0]), ri([1])],
torch.tensor([-1], dtype=dtype, device=device))
reference = torch.arange(0., 24, dtype=dtype, device=device).view(3, 8)
strided = torch.tensor((), dtype=dtype, device=device)
strided.set_(reference.storage(), 10, size=torch.Size([2, 2]),
stride=[7, 1])
self.assertEqual(strided[ri([0, 1]), ri([1, 0])],
torch.tensor([11, 17], dtype=dtype, device=device))
strided[ri([0, 1]), ri([1, 0])] = torch.tensor([-1, 2], dtype=dtype, device=device)
self.assertEqual(strided[ri([0, 1]), ri([1, 0])],
torch.tensor([-1, 2], dtype=dtype, device=device))
reference = torch.arange(0., 24, dtype=dtype, device=device).view(3, 8)
strided = torch.tensor((), dtype=dtype, device=device)
strided.set_(reference.storage(), 10, size=torch.Size([2, 2]),
stride=[7, 1])
rows = ri([[0],
[1]])
columns = ri([[0, 1],
[0, 1]])
self.assertEqual(strided[rows, columns],
torch.tensor([[10, 11], [17, 18]], dtype=dtype, device=device))
strided[rows, columns] = torch.tensor([[4, 6], [2, 3]], dtype=dtype, device=device)
self.assertEqual(strided[rows, columns],
torch.tensor([[4, 6], [2, 3]], dtype=dtype, device=device))
# Tests using less than the number of dims, and ellipsis
# reference is 1 2
# 3 4
# 5 6
reference = consec((3, 2))
self.assertEqual(reference[ri([0, 2]), ],
torch.tensor([[1, 2], [5, 6]], dtype=dtype, device=device))
self.assertEqual(reference[ri([1]), ...],
torch.tensor([[3, 4]], dtype=dtype, device=device))
self.assertEqual(reference[..., ri([1])],
torch.tensor([[2], [4], [6]], dtype=dtype, device=device))
# verify too many indices fails
with self.assertRaises(IndexError):
reference[ri([1]), ri([0, 2]), ri([3])]
# test invalid index fails
reference = torch.empty(10, dtype=dtype, device=device)
# can't test cuda because it is a device assert
if not reference.is_cuda:
for err_idx in (10, -11):
with self.assertRaisesRegex(IndexError, r'out of'):
reference[err_idx]
with self.assertRaisesRegex(IndexError, r'out of'):
reference[torch.LongTensor([err_idx]).to(device)]
with self.assertRaisesRegex(IndexError, r'out of'):
reference[[err_idx]]
def tensor_indices_to_np(tensor, indices):
# convert the Torch Tensor to a numpy array
tensor = tensor.to(device='cpu')
npt = tensor.numpy()
# convert indices
idxs = tuple(i.tolist() if isinstance(i, torch.LongTensor) else
i for i in indices)
return npt, idxs
def get_numpy(tensor, indices):
npt, idxs = tensor_indices_to_np(tensor, indices)
# index and return as a Torch Tensor
return torch.tensor(npt[idxs], dtype=dtype, device=device)
def set_numpy(tensor, indices, value):
if not isinstance(value, int):
if self.device_type != 'cpu':
value = value.cpu()
value = value.numpy()
npt, idxs = tensor_indices_to_np(tensor, indices)
npt[idxs] = value
return npt
def assert_get_eq(tensor, indexer):
self.assertEqual(tensor[indexer], get_numpy(tensor, indexer))
def assert_set_eq(tensor, indexer, val):
pyt = tensor.clone()
numt = tensor.clone()
pyt[indexer] = val
numt = torch.tensor(set_numpy(numt, indexer, val), dtype=dtype, device=device)
self.assertEqual(pyt, numt)
def assert_backward_eq(tensor, indexer):
cpu = tensor.float().clone().detach().requires_grad_(True)
outcpu = cpu[indexer]
gOcpu = torch.rand_like(outcpu)
outcpu.backward(gOcpu)
dev = cpu.to(device).detach().requires_grad_(True)
outdev = dev[indexer]
outdev.backward(gOcpu.to(device))
self.assertEqual(cpu.grad, dev.grad)
def get_set_tensor(indexed, indexer):
set_size = indexed[indexer].size()
set_count = indexed[indexer].numel()
set_tensor = torch.randperm(set_count).view(set_size).double().to(device)
return set_tensor
# Tensor is 0 1 2 3 4
# 5 6 7 8 9
# 10 11 12 13 14
# 15 16 17 18 19
reference = torch.arange(0., 20, dtype=dtype, device=device).view(4, 5)
indices_to_test = [
# grab the second, fourth columns
[slice(None), [1, 3]],
# first, third rows,
[[0, 2], slice(None)],
# weird shape
[slice(None), [[0, 1],
[2, 3]]],
# negatives
[[-1], [0]],
[[0, 2], [-1]],
[slice(None), [-1]],
]
# only test dupes on gets
get_indices_to_test = indices_to_test + [[slice(None), [0, 1, 1, 2, 2]]]
for indexer in get_indices_to_test:
assert_get_eq(reference, indexer)
if self.device_type != 'cpu':
assert_backward_eq(reference, indexer)
for indexer in indices_to_test:
assert_set_eq(reference, indexer, 44)
assert_set_eq(reference,
indexer,
get_set_tensor(reference, indexer))
reference = torch.arange(0., 160, dtype=dtype, device=device).view(4, 8, 5)
indices_to_test = [
[slice(None), slice(None), [0, 3, 4]],
[slice(None), [2, 4, 5, 7], slice(None)],
[[2, 3], slice(None), slice(None)],
[slice(None), [0, 2, 3], [1, 3, 4]],
[slice(None), [0], [1, 2, 4]],
[slice(None), [0, 1, 3], [4]],
[slice(None), [[0, 1], [1, 0]], [[2, 3]]],
[slice(None), [[0, 1], [2, 3]], [[0]]],
[slice(None), [[5, 6]], [[0, 3], [4, 4]]],
[[0, 2, 3], [1, 3, 4], slice(None)],
[[0], [1, 2, 4], slice(None)],
[[0, 1, 3], [4], slice(None)],
[[[0, 1], [1, 0]], [[2, 1], [3, 5]], slice(None)],
[[[0, 1], [1, 0]], [[2, 3]], slice(None)],
[[[0, 1], [2, 3]], [[0]], slice(None)],
[[[2, 1]], [[0, 3], [4, 4]], slice(None)],
[[[2]], [[0, 3], [4, 1]], slice(None)],
# non-contiguous indexing subspace
[[0, 2, 3], slice(None), [1, 3, 4]],
# less dim, ellipsis
[[0, 2], ],
[[0, 2], slice(None)],
[[0, 2], Ellipsis],
[[0, 2], slice(None), Ellipsis],
[[0, 2], Ellipsis, slice(None)],
[[0, 2], [1, 3]],
[[0, 2], [1, 3], Ellipsis],
[Ellipsis, [1, 3], [2, 3]],
[Ellipsis, [2, 3, 4]],
[Ellipsis, slice(None), [2, 3, 4]],
[slice(None), Ellipsis, [2, 3, 4]],
# ellipsis counts for nothing
[Ellipsis, slice(None), slice(None), [0, 3, 4]],
[slice(None), Ellipsis, slice(None), [0, 3, 4]],
[slice(None), slice(None), Ellipsis, [0, 3, 4]],
[slice(None), slice(None), [0, 3, 4], Ellipsis],
[Ellipsis, [[0, 1], [1, 0]], [[2, 1], [3, 5]], slice(None)],
[[[0, 1], [1, 0]], [[2, 1], [3, 5]], Ellipsis, slice(None)],
[[[0, 1], [1, 0]], [[2, 1], [3, 5]], slice(None), Ellipsis],
]
for indexer in indices_to_test:
assert_get_eq(reference, indexer)
assert_set_eq(reference, indexer, 212)
assert_set_eq(reference, indexer, get_set_tensor(reference, indexer))
if torch.cuda.is_available():
assert_backward_eq(reference, indexer)
reference = torch.arange(0., 1296, dtype=dtype, device=device).view(3, 9, 8, 6)
indices_to_test = [
[slice(None), slice(None), slice(None), [0, 3, 4]],
[slice(None), slice(None), [2, 4, 5, 7], slice(None)],
[slice(None), [2, 3], slice(None), slice(None)],
[[1, 2], slice(None), slice(None), slice(None)],
[slice(None), slice(None), [0, 2, 3], [1, 3, 4]],
[slice(None), slice(None), [0], [1, 2, 4]],
[slice(None), slice(None), [0, 1, 3], [4]],
[slice(None), slice(None), [[0, 1], [1, 0]], [[2, 3]]],
[slice(None), slice(None), [[0, 1], [2, 3]], [[0]]],
[slice(None), slice(None), [[5, 6]], [[0, 3], [4, 4]]],
[slice(None), [0, 2, 3], [1, 3, 4], slice(None)],
[slice(None), [0], [1, 2, 4], slice(None)],
[slice(None), [0, 1, 3], [4], slice(None)],
[slice(None), [[0, 1], [3, 4]], [[2, 3], [0, 1]], slice(None)],
[slice(None), [[0, 1], [3, 4]], [[2, 3]], slice(None)],
[slice(None), [[0, 1], [3, 2]], [[0]], slice(None)],
[slice(None), [[2, 1]], [[0, 3], [6, 4]], slice(None)],
[slice(None), [[2]], [[0, 3], [4, 2]], slice(None)],
[[0, 1, 2], [1, 3, 4], slice(None), slice(None)],
[[0], [1, 2, 4], slice(None), slice(None)],
[[0, 1, 2], [4], slice(None), slice(None)],
[[[0, 1], [0, 2]], [[2, 4], [1, 5]], slice(None), slice(None)],
[[[0, 1], [1, 2]], [[2, 0]], slice(None), slice(None)],
[[[2, 2]], [[0, 3], [4, 5]], slice(None), slice(None)],
[[[2]], [[0, 3], [4, 5]], slice(None), slice(None)],
[slice(None), [3, 4, 6], [0, 2, 3], [1, 3, 4]],
[slice(None), [2, 3, 4], [1, 3, 4], [4]],
[slice(None), [0, 1, 3], [4], [1, 3, 4]],
[slice(None), [6], [0, 2, 3], [1, 3, 4]],
[slice(None), [2, 3, 5], [3], [4]],
[slice(None), [0], [4], [1, 3, 4]],
[slice(None), [6], [0, 2, 3], [1]],
[slice(None), [[0, 3], [3, 6]], [[0, 1], [1, 3]], [[5, 3], [1, 2]]],
[[2, 2, 1], [0, 2, 3], [1, 3, 4], slice(None)],
[[2, 0, 1], [1, 2, 3], [4], slice(None)],
[[0, 1, 2], [4], [1, 3, 4], slice(None)],
[[0], [0, 2, 3], [1, 3, 4], slice(None)],
[[0, 2, 1], [3], [4], slice(None)],
[[0], [4], [1, 3, 4], slice(None)],
[[1], [0, 2, 3], [1], slice(None)],
[[[1, 2], [1, 2]], [[0, 1], [2, 3]], [[2, 3], [3, 5]], slice(None)],
# less dim, ellipsis
[Ellipsis, [0, 3, 4]],
[Ellipsis, slice(None), [0, 3, 4]],
[Ellipsis, slice(None), slice(None), [0, 3, 4]],
[slice(None), Ellipsis, [0, 3, 4]],
[slice(None), slice(None), Ellipsis, [0, 3, 4]],
[slice(None), [0, 2, 3], [1, 3, 4]],
[slice(None), [0, 2, 3], [1, 3, 4], Ellipsis],
[Ellipsis, [0, 2, 3], [1, 3, 4], slice(None)],
[[0], [1, 2, 4]],
[[0], [1, 2, 4], slice(None)],
[[0], [1, 2, 4], Ellipsis],
[[0], [1, 2, 4], Ellipsis, slice(None)],
[[1], ],
[[0, 2, 1], [3], [4]],
[[0, 2, 1], [3], [4], slice(None)],
[[0, 2, 1], [3], [4], Ellipsis],
[Ellipsis, [0, 2, 1], [3], [4]],
]
for indexer in indices_to_test:
assert_get_eq(reference, indexer)
assert_set_eq(reference, indexer, 1333)
assert_set_eq(reference, indexer, get_set_tensor(reference, indexer))
indices_to_test += [
[slice(None), slice(None), [[0, 1], [1, 0]], [[2, 3], [3, 0]]],
[slice(None), slice(None), [[2]], [[0, 3], [4, 4]]],
]
for indexer in indices_to_test:
assert_get_eq(reference, indexer)
assert_set_eq(reference, indexer, 1333)
if self.device_type != 'cpu':
assert_backward_eq(reference, indexer)
def test_advancedindex_big(self, device):
reference = torch.arange(0, 123344, dtype=torch.int, device=device)
self.assertEqual(reference[[0, 123, 44488, 68807, 123343], ],
torch.tensor([0, 123, 44488, 68807, 123343], dtype=torch.int))
def test_set_item_to_scalar_tensor(self, device):
m = random.randint(1, 10)
n = random.randint(1, 10)
z = torch.randn([m, n], device=device)
a = 1.0
w = torch.tensor(a, requires_grad=True, device=device)
z[:, 0] = w
z.sum().backward()
self.assertEqual(w.grad, m * a)
def test_single_int(self, device):
v = torch.randn(5, 7, 3, device=device)
self.assertEqual(v[4].shape, (7, 3))
def test_multiple_int(self, device):
v = torch.randn(5, 7, 3, device=device)
self.assertEqual(v[4].shape, (7, 3))
self.assertEqual(v[4, :, 1].shape, (7,))
def test_none(self, device):
v = torch.randn(5, 7, 3, device=device)
self.assertEqual(v[None].shape, (1, 5, 7, 3))
self.assertEqual(v[:, None].shape, (5, 1, 7, 3))
self.assertEqual(v[:, None, None].shape, (5, 1, 1, 7, 3))
self.assertEqual(v[..., None].shape, (5, 7, 3, 1))
def test_step(self, device):
v = torch.arange(10, device=device)
self.assertEqual(v[::1], v)
self.assertEqual(v[::2].tolist(), [0, 2, 4, 6, 8])
self.assertEqual(v[::3].tolist(), [0, 3, 6, 9])
self.assertEqual(v[::11].tolist(), [0])
self.assertEqual(v[1:6:2].tolist(), [1, 3, 5])
def test_step_assignment(self, device):
v = torch.zeros(4, 4, device=device)
v[0, 1::2] = torch.tensor([3., 4.], device=device)
self.assertEqual(v[0].tolist(), [0, 3, 0, 4])
self.assertEqual(v[1:].sum(), 0)
def test_bool_indices(self, device):
v = torch.randn(5, 7, 3, device=device)
boolIndices = torch.tensor([True, False, True, True, False], dtype=torch.bool, device=device)
self.assertEqual(v[boolIndices].shape, (3, 7, 3))
self.assertEqual(v[boolIndices], torch.stack([v[0], v[2], v[3]]))
v = torch.tensor([True, False, True], dtype=torch.bool, device=device)
boolIndices = torch.tensor([True, False, False], dtype=torch.bool, device=device)
uint8Indices = torch.tensor([1, 0, 0], dtype=torch.uint8, device=device)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(v[boolIndices].shape, v[uint8Indices].shape)
self.assertEqual(v[boolIndices], v[uint8Indices])
self.assertEqual(v[boolIndices], tensor([True], dtype=torch.bool, device=device))
self.assertEqual(len(w), 2)
def test_bool_indices_accumulate(self, device):
mask = torch.zeros(size=(10, ), dtype=torch.bool, device=device)
y = torch.ones(size=(10, 10), device=device)
y.index_put_((mask, ), y[mask], accumulate=True)
self.assertEqual(y, torch.ones(size=(10, 10), device=device))
def test_multiple_bool_indices(self, device):
v = torch.randn(5, 7, 3, device=device)
# note: these broadcast together and are transposed to the first dim
mask1 = torch.tensor([1, 0, 1, 1, 0], dtype=torch.bool, device=device)
mask2 = torch.tensor([1, 1, 1], dtype=torch.bool, device=device)
self.assertEqual(v[mask1, :, mask2].shape, (3, 7))
def test_byte_mask(self, device):
v = torch.randn(5, 7, 3, device=device)
mask = torch.ByteTensor([1, 0, 1, 1, 0]).to(device)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(v[mask].shape, (3, 7, 3))
self.assertEqual(v[mask], torch.stack([v[0], v[2], v[3]]))
self.assertEqual(len(w), 2)
v = torch.tensor([1.], device=device)
self.assertEqual(v[v == 0], torch.tensor([], device=device))
def test_byte_mask_accumulate(self, device):
mask = torch.zeros(size=(10, ), dtype=torch.uint8, device=device)
y = torch.ones(size=(10, 10), device=device)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y.index_put_((mask, ), y[mask], accumulate=True)
self.assertEqual(y, torch.ones(size=(10, 10), device=device))
self.assertEqual(len(w), 2)
def test_index_put_accumulate_large_tensor(self, device):
# This test is for tensors with number of elements >= INT_MAX (2^31 - 1).
N = (1 << 31) + 5
dt = torch.int8
a = torch.ones(N, dtype=dt, device=device)
indices = torch.tensor([-2, 0, -2, -1, 0, -1, 1], device=device, dtype=torch.long)
values = torch.tensor([6, 5, 6, 6, 5, 7, 11], dtype=dt, device=device)
a.index_put_((indices, ), values, accumulate=True)
self.assertEqual(a[0], 11)
self.assertEqual(a[1], 12)
self.assertEqual(a[2], 1)
self.assertEqual(a[-3], 1)
self.assertEqual(a[-2], 13)
self.assertEqual(a[-1], 14)
a = torch.ones((2, N), dtype=dt, device=device)
indices0 = torch.tensor([0, -1, 0, 1], device=device, dtype=torch.long)
indices1 = torch.tensor([-2, -1, 0, 1], device=device, dtype=torch.long)
values = torch.tensor([12, 13, 10, 11], dtype=dt, device=device)
a.index_put_((indices0, indices1), values, accumulate=True)
self.assertEqual(a[0, 0], 11)
self.assertEqual(a[0, 1], 1)
self.assertEqual(a[1, 0], 1)
self.assertEqual(a[1, 1], 12)
self.assertEqual(a[:, 2], torch.ones(2, dtype=torch.int8))
self.assertEqual(a[:, -3], torch.ones(2, dtype=torch.int8))
self.assertEqual(a[0, -2], 13)
self.assertEqual(a[1, -2], 1)
self.assertEqual(a[-1, -1], 14)
self.assertEqual(a[0, -1], 1)
@onlyNativeDeviceTypes
def test_index_put_accumulate_expanded_values(self, device):
# checks the issue with cuda: https://github.com/pytorch/pytorch/issues/39227
# and verifies consistency with CPU result
t = torch.zeros((5, 2))
t_dev = t.to(device)
indices = [
torch.tensor([0, 1, 2, 3]),
torch.tensor([1, ]),
]
indices_dev = [i.to(device) for i in indices]
values0d = torch.tensor(1.0)
values1d = torch.tensor([1.0, ])
out_cuda = t_dev.index_put_(indices_dev, values0d.to(device), accumulate=True)
out_cpu = t.index_put_(indices, values0d, accumulate=True)
self.assertEqual(out_cuda.cpu(), out_cpu)
out_cuda = t_dev.index_put_(indices_dev, values1d.to(device), accumulate=True)
out_cpu = t.index_put_(indices, values1d, accumulate=True)
self.assertEqual(out_cuda.cpu(), out_cpu)
t = torch.zeros(4, 3, 2)
t_dev = t.to(device)
indices = [
torch.tensor([0, ]),
torch.arange(3)[:, None],
torch.arange(2)[None, :],
]
indices_dev = [i.to(device) for i in indices]
values1d = torch.tensor([-1.0, -2.0])
values2d = torch.tensor([[-1.0, -2.0], ])
out_cuda = t_dev.index_put_(indices_dev, values1d.to(device), accumulate=True)
out_cpu = t.index_put_(indices, values1d, accumulate=True)
self.assertEqual(out_cuda.cpu(), out_cpu)
out_cuda = t_dev.index_put_(indices_dev, values2d.to(device), accumulate=True)
out_cpu = t.index_put_(indices, values2d, accumulate=True)
self.assertEqual(out_cuda.cpu(), out_cpu)
@onlyCUDA
def test_index_put_accumulate_non_contiguous(self, device):
t = torch.zeros((5, 2, 2))
t_dev = t.to(device)
t1 = t_dev[:, 0, :]
t2 = t[:, 0, :]
self.assertTrue(not t1.is_contiguous())
self.assertTrue(not t2.is_contiguous())
indices = [torch.tensor([0, 1]), ]
indices_dev = [i.to(device) for i in indices]
value = torch.randn(2, 2)
out_cuda = t1.index_put_(indices_dev, value.to(device), accumulate=True)
out_cpu = t2.index_put_(indices, value, accumulate=True)
self.assertTrue(not t1.is_contiguous())
self.assertTrue(not t2.is_contiguous())
self.assertEqual(out_cuda.cpu(), out_cpu)
@onlyCUDA
def test_index_put_accumulate_with_optional_tensors(self, device):
# TODO: replace with a better solution.
# Currently, here using torchscript to put None into indices.
# on C++ it gives indices as a list of 2 optional tensors: first is null and
# the second is a valid tensor.
@torch.jit.script
def func(x, i, v):
idx = [None, i]
x.index_put_(idx, v, accumulate=True)
return x
n = 4
t = torch.arange(n * 2, dtype=torch.float32).reshape(n, 2)
t_dev = t.to(device)
indices = torch.tensor([1, 0])
indices_dev = indices.to(device)
value0d = torch.tensor(10.0)
value1d = torch.tensor([1.0, 2.0])
out_cuda = func(t_dev, indices_dev, value0d.cuda())
out_cpu = func(t, indices, value0d)
self.assertEqual(out_cuda.cpu(), out_cpu)
out_cuda = func(t_dev, indices_dev, value1d.cuda())
out_cpu = func(t, indices, value1d)
self.assertEqual(out_cuda.cpu(), out_cpu)
@onlyNativeDeviceTypes
def test_index_put_accumulate_duplicate_indices(self, device):
for i in range(1, 512):
# generate indices by random walk, this will create indices with
# lots of duplicates interleaved with each other
delta = torch.empty(i, dtype=torch.double, device=device).uniform_(-1, 1)
indices = delta.cumsum(0).long()
input = torch.randn(indices.abs().max() + 1, device=device)
values = torch.randn(indices.size(0), device=device)
output = input.index_put((indices,), values, accumulate=True)
input_list = input.tolist()
indices_list = indices.tolist()
values_list = values.tolist()
for i, v in zip(indices_list, values_list):
input_list[i] += v
self.assertEqual(output, input_list)
def test_multiple_byte_mask(self, device):
v = torch.randn(5, 7, 3, device=device)
# note: these broadcast together and are transposed to the first dim
mask1 = torch.ByteTensor([1, 0, 1, 1, 0]).to(device)
mask2 = torch.ByteTensor([1, 1, 1]).to(device)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.assertEqual(v[mask1, :, mask2].shape, (3, 7))
self.assertEqual(len(w), 2)
def test_byte_mask2d(self, device):
v = torch.randn(5, 7, 3, device=device)
c = torch.randn(5, 7, device=device)
num_ones = (c > 0).sum()
r = v[c > 0]
self.assertEqual(r.shape, (num_ones, 3))
def test_jit_indexing(self, device):
def fn1(x):
x[x < 50] = 1.0
return x
def fn2(x):
x[0:50] = 1.0
return x
scripted_fn1 = torch.jit.script(fn1)
scripted_fn2 = torch.jit.script(fn2)
data = torch.arange(100, device=device, dtype=torch.float)
out = scripted_fn1(data.detach().clone())
ref = torch.tensor(np.concatenate((np.ones(50), np.arange(50, 100))), device=device, dtype=torch.float)
self.assertEqual(out, ref)
out = scripted_fn2(data.detach().clone())
self.assertEqual(out, ref)
def test_int_indices(self, device):
v = torch.randn(5, 7, 3, device=device)
self.assertEqual(v[[0, 4, 2]].shape, (3, 7, 3))
self.assertEqual(v[:, [0, 4, 2]].shape, (5, 3, 3))
self.assertEqual(v[:, [[0, 1], [4, 3]]].shape, (5, 2, 2, 3))
@dtypes(torch.cfloat, torch.cdouble, torch.float, torch.bfloat16, torch.long, torch.bool)
@dtypesIfCPU(torch.cfloat, torch.cdouble, torch.float, torch.long, torch.bool, torch.bfloat16)
@dtypesIfCUDA(torch.cfloat, torch.cdouble, torch.half, torch.long, torch.bool, torch.bfloat16)
def test_index_put_src_datatype(self, device, dtype):
src = torch.ones(3, 2, 4, device=device, dtype=dtype)
vals = torch.ones(3, 2, 4, device=device, dtype=dtype)
indices = (torch.tensor([0, 2, 1]),)
res = src.index_put_(indices, vals, accumulate=True)
self.assertEqual(res.shape, src.shape)
@dtypes(torch.float, torch.bfloat16, torch.long, torch.bool)
@dtypesIfCPU(torch.float, torch.long, torch.bfloat16, torch.bool)
@dtypesIfCUDA(torch.half, torch.long, torch.bfloat16, torch.bool)
def test_index_src_datatype(self, device, dtype):
src = torch.ones(3, 2, 4, device=device, dtype=dtype)
# test index
res = src[[0, 2, 1], :, :]
self.assertEqual(res.shape, src.shape)
# test index_put, no accum
src[[0, 2, 1], :, :] = res
self.assertEqual(res.shape, src.shape)
def test_int_indices2d(self, device):
# From the NumPy indexing example
x = torch.arange(0, 12, device=device).view(4, 3)
rows = torch.tensor([[0, 0], [3, 3]], device=device)
columns = torch.tensor([[0, 2], [0, 2]], device=device)
self.assertEqual(x[rows, columns].tolist(), [[0, 2], [9, 11]])
def test_int_indices_broadcast(self, device):
# From the NumPy indexing example
x = torch.arange(0, 12, device=device).view(4, 3)
rows = torch.tensor([0, 3], device=device)
columns = torch.tensor([0, 2], device=device)
result = x[rows[:, None], columns]
self.assertEqual(result.tolist(), [[0, 2], [9, 11]])
def test_empty_index(self, device):
x = torch.arange(0, 12, device=device).view(4, 3)
idx = torch.tensor([], dtype=torch.long, device=device)
self.assertEqual(x[idx].numel(), 0)
# empty assignment should have no effect but not throw an exception
y = x.clone()
y[idx] = -1
self.assertEqual(x, y)
mask = torch.zeros(4, 3, device=device).bool()
y[mask] = -1
self.assertEqual(x, y)
def test_empty_ndim_index(self, device):
x = torch.randn(5, device=device)
self.assertEqual(torch.empty(0, 2, device=device), x[torch.empty(0, 2, dtype=torch.int64, device=device)])
x = torch.randn(2, 3, 4, 5, device=device)
self.assertEqual(torch.empty(2, 0, 6, 4, 5, device=device),
x[:, torch.empty(0, 6, dtype=torch.int64, device=device)])
x = torch.empty(10, 0, device=device)
self.assertEqual(x[[1, 2]].shape, (2, 0))
self.assertEqual(x[[], []].shape, (0,))
with self.assertRaisesRegex(IndexError, 'for dimension with size 0'):
x[:, [0, 1]]
def test_empty_ndim_index_bool(self, device):
x = torch.randn(5, device=device)
self.assertRaises(IndexError, lambda: x[torch.empty(0, 2, dtype=torch.uint8, device=device)])
def test_empty_slice(self, device):
x = torch.randn(2, 3, 4, 5, device=device)
y = x[:, :, :, 1]
z = y[:, 1:1, :]
self.assertEqual((2, 0, 4), z.shape)
# this isn't technically necessary, but matches NumPy stride calculations.
self.assertEqual((60, 20, 5), z.stride())
self.assertTrue(z.is_contiguous())
def test_index_getitem_copy_bools_slices(self, device):
true = torch.tensor(1, dtype=torch.uint8, device=device)
false = torch.tensor(0, dtype=torch.uint8, device=device)
tensors = [torch.randn(2, 3, device=device), torch.tensor(3., device=device)]
for a in tensors:
self.assertNotEqual(a.data_ptr(), a[True].data_ptr())
self.assertEqual(torch.empty(0, *a.shape), a[False])
self.assertNotEqual(a.data_ptr(), a[true].data_ptr())
self.assertEqual(torch.empty(0, *a.shape), a[false])
self.assertEqual(a.data_ptr(), a[None].data_ptr())
self.assertEqual(a.data_ptr(), a[...].data_ptr())
def test_index_setitem_bools_slices(self, device):
true = torch.tensor(1, dtype=torch.uint8, device=device)
false = torch.tensor(0, dtype=torch.uint8, device=device)
tensors = [torch.randn(2, 3, device=device), torch.tensor(3, device=device)]
for a in tensors:
# prefix with a 1,1, to ensure we are compatible with numpy which cuts off prefix 1s
# (some of these ops already prefix a 1 to the size)
neg_ones = torch.ones_like(a) * -1
neg_ones_expanded = neg_ones.unsqueeze(0).unsqueeze(0)
a[True] = neg_ones_expanded
self.assertEqual(a, neg_ones)
a[False] = 5
self.assertEqual(a, neg_ones)
a[true] = neg_ones_expanded * 2
self.assertEqual(a, neg_ones * 2)
a[false] = 5
self.assertEqual(a, neg_ones * 2)
a[None] = neg_ones_expanded * 3
self.assertEqual(a, neg_ones * 3)
a[...] = neg_ones_expanded * 4
self.assertEqual(a, neg_ones * 4)
if a.dim() == 0:
with self.assertRaises(IndexError):
a[:] = neg_ones_expanded * 5
def test_index_scalar_with_bool_mask(self, device):
a = torch.tensor(1, device=device)
uintMask = torch.tensor(True, dtype=torch.uint8, device=device)
boolMask = torch.tensor(True, dtype=torch.bool, device=device)
self.assertEqual(a[uintMask], a[boolMask])
self.assertEqual(a[uintMask].dtype, a[boolMask].dtype)
a = torch.tensor(True, dtype=torch.bool, device=device)
self.assertEqual(a[uintMask], a[boolMask])
self.assertEqual(a[uintMask].dtype, a[boolMask].dtype)
def test_setitem_expansion_error(self, device):
true = torch.tensor(True, device=device)
a = torch.randn(2, 3, device=device)
# check prefix with non-1s doesn't work
a_expanded = a.expand(torch.Size([5, 1]) + a.size())
# NumPy: ValueError
with self.assertRaises(RuntimeError):
a[True] = a_expanded
with self.assertRaises(RuntimeError):
a[true] = a_expanded
def test_getitem_scalars(self, device):
zero = torch.tensor(0, dtype=torch.int64, device=device)
one = torch.tensor(1, dtype=torch.int64, device=device)
# non-scalar indexed with scalars
a = torch.randn(2, 3, device=device)
self.assertEqual(a[0], a[zero])
self.assertEqual(a[0][1], a[zero][one])
self.assertEqual(a[0, 1], a[zero, one])
self.assertEqual(a[0, one], a[zero, 1])
# indexing by a scalar should slice (not copy)
self.assertEqual(a[0, 1].data_ptr(), a[zero, one].data_ptr())
self.assertEqual(a[1].data_ptr(), a[one.int()].data_ptr())
self.assertEqual(a[1].data_ptr(), a[one.short()].data_ptr())
# scalar indexed with scalar
r = torch.randn((), device=device)
with self.assertRaises(IndexError):
r[:]
with self.assertRaises(IndexError):
r[zero]
self.assertEqual(r, r[...])
def test_setitem_scalars(self, device):
zero = torch.tensor(0, dtype=torch.int64)
# non-scalar indexed with scalars
a = torch.randn(2, 3, device=device)
a_set_with_number = a.clone()
a_set_with_scalar = a.clone()
b = torch.randn(3, device=device)
a_set_with_number[0] = b
a_set_with_scalar[zero] = b
self.assertEqual(a_set_with_number, a_set_with_scalar)
a[1, zero] = 7.7
self.assertEqual(7.7, a[1, 0])
# scalar indexed with scalars
r = torch.randn((), device=device)
with self.assertRaises(IndexError):
r[:] = 8.8
with self.assertRaises(IndexError):
r[zero] = 8.8
r[...] = 9.9
self.assertEqual(9.9, r)
def test_basic_advanced_combined(self, device):
# From the NumPy indexing example
x = torch.arange(0, 12, device=device).view(4, 3)
self.assertEqual(x[1:2, 1:3], x[1:2, [1, 2]])
self.assertEqual(x[1:2, 1:3].tolist(), [[4, 5]])
# Check that it is a copy
unmodified = x.clone()
x[1:2, [1, 2]].zero_()
self.assertEqual(x, unmodified)
# But assignment should modify the original
unmodified = x.clone()
x[1:2, [1, 2]] = 0
self.assertNotEqual(x, unmodified)
def test_int_assignment(self, device):
x = torch.arange(0, 4, device=device).view(2, 2)
x[1] = 5
self.assertEqual(x.tolist(), [[0, 1], [5, 5]])
x = torch.arange(0, 4, device=device).view(2, 2)
x[1] = torch.arange(5, 7, device=device)
self.assertEqual(x.tolist(), [[0, 1], [5, 6]])
def test_byte_tensor_assignment(self, device):
x = torch.arange(0., 16, device=device).view(4, 4)
b = torch.ByteTensor([True, False, True, False]).to(device)
value = torch.tensor([3., 4., 5., 6.], device=device)
with warnings.catch_warnings(record=True) as w:
x[b] = value
self.assertEqual(len(w), 1)
self.assertEqual(x[0], value)
self.assertEqual(x[1], torch.arange(4., 8, device=device))
self.assertEqual(x[2], value)
self.assertEqual(x[3], torch.arange(12., 16, device=device))
def test_variable_slicing(self, device):
x = torch.arange(0, 16, device=device).view(4, 4)
indices = torch.IntTensor([0, 1]).to(device)
i, j = indices
self.assertEqual(x[i:j], x[0:1])
def test_ellipsis_tensor(self, device):
x = torch.arange(0, 9, device=device).view(3, 3)
idx = torch.tensor([0, 2], device=device)
self.assertEqual(x[..., idx].tolist(), [[0, 2],
[3, 5],
[6, 8]])
self.assertEqual(x[idx, ...].tolist(), [[0, 1, 2],
[6, 7, 8]])
def test_invalid_index(self, device):
x = torch.arange(0, 16, device=device).view(4, 4)
self.assertRaisesRegex(TypeError, 'slice indices', lambda: x["0":"1"])
def test_out_of_bound_index(self, device):
x = torch.arange(0, 100, device=device).view(2, 5, 10)
self.assertRaisesRegex(IndexError, 'index 5 is out of bounds for dimension 1 with size 5', lambda: x[0, 5])
self.assertRaisesRegex(IndexError, 'index 4 is out of bounds for dimension 0 with size 2', lambda: x[4, 5])
self.assertRaisesRegex(IndexError, 'index 15 is out of bounds for dimension 2 with size 10',
lambda: x[0, 1, 15])
self.assertRaisesRegex(IndexError, 'index 12 is out of bounds for dimension 2 with size 10',
lambda: x[:, :, 12])
def test_zero_dim_index(self, device):
x = torch.tensor(10, device=device)
self.assertEqual(x, x.item())
def runner():
print(x[0])
return x[0]
self.assertRaisesRegex(IndexError, 'invalid index', runner)
@onlyCUDA
def test_invalid_device(self, device):
idx = torch.tensor([0, 1])
b = torch.zeros(5, device=device)
c = torch.tensor([1., 2.], device="cpu")
for accumulate in [True, False]:
self.assertRaises(RuntimeError, lambda: torch.index_put_(b, (idx,), c, accumulate=accumulate))
@onlyCUDA
def test_cpu_indices(self, device):
idx = torch.tensor([0, 1])
b = torch.zeros(2, device=device)
x = torch.ones(10, device=device)
x[idx] = b # index_put_
ref = torch.ones(10, device=device)
ref[:2] = 0
self.assertEqual(x, ref, atol=0, rtol=0)
out = x[idx] # index
self.assertEqual(out, torch.zeros(2, device=device), atol=0, rtol=0)
@dtypes(torch.long, torch.float32)
def test_take_along_dim(self, device, dtype):
def _test_against_numpy(t, indices, dim):
actual = torch.take_along_dim(t, indices, dim=dim)
t_np = t.cpu().numpy()
indices_np = indices.cpu().numpy()
expected = np.take_along_axis(t_np, indices_np, axis=dim)
self.assertEqual(actual, expected, atol=0, rtol=0)
for shape in [(3, 2), (2, 3, 5), (2, 4, 0), (2, 3, 1, 4)]:
for noncontiguous in [True, False]:
t = make_tensor(shape, device=device, dtype=dtype, noncontiguous=noncontiguous)
for dim in list(range(t.ndim)) + [None]:
if dim is None:
indices = torch.argsort(t.view(-1))
else:
indices = torch.argsort(t, dim=dim)
_test_against_numpy(t, indices, dim)
# test broadcasting
t = torch.ones((3, 4, 1), device=device)
indices = torch.ones((1, 2, 5), dtype=torch.long, device=device)
_test_against_numpy(t, indices, 1)
# test empty indices
t = torch.ones((3, 4, 5), device=device)
indices = torch.ones((3, 0, 5), dtype=torch.long, device=device)
_test_against_numpy(t, indices, 1)
@dtypes(torch.long, torch.float)
def test_take_along_dim_invalid(self, device, dtype):
shape = (2, 3, 1, 4)
dim = 0
t = make_tensor(shape, device=device, dtype=dtype)
indices = torch.argsort(t, dim=dim)
# dim of `t` and `indices` does not match
with self.assertRaisesRegex(RuntimeError,
"input and indices should have the same number of dimensions"):
torch.take_along_dim(t, indices[0], dim=0)
# invalid `indices` dtype
with self.assertRaisesRegex(RuntimeError, r"dtype of indices should be Long"):
torch.take_along_dim(t, indices.to(torch.bool), dim=0)
with self.assertRaisesRegex(RuntimeError, r"dtype of indices should be Long"):
torch.take_along_dim(t, indices.to(torch.float), dim=0)
with self.assertRaisesRegex(RuntimeError, r"dtype of indices should be Long"):
torch.take_along_dim(t, indices.to(torch.int32), dim=0)
# invalid axis
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
torch.take_along_dim(t, indices, dim=-7)
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
torch.take_along_dim(t, indices, dim=7)
@onlyCUDA
@dtypes(torch.float)
def test_gather_take_along_dim_cross_device(self, device, dtype):
shape = (2, 3, 1, 4)
dim = 0
t = make_tensor(shape, device=device, dtype=dtype)
indices = torch.argsort(t, dim=dim)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.gather(t, 0, indices.cpu())
with self.assertRaisesRegex(RuntimeError,
r"Expected tensor to have .* but got tensor with .* torch.take_along_dim()"):
torch.take_along_dim(t, indices.cpu(), dim=0)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.gather(t.cpu(), 0, indices)
with self.assertRaisesRegex(RuntimeError,
r"Expected tensor to have .* but got tensor with .* torch.take_along_dim()"):
torch.take_along_dim(t.cpu(), indices, dim=0)
# The tests below are from NumPy test_indexing.py with some modifications to
# make them compatible with PyTorch. It's licensed under the BDS license below:
#
# Copyright (c) 2005-2017, NumPy Developers.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NumPy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class NumpyTests(TestCase):
def test_index_no_floats(self, device):
a = torch.tensor([[[5.]]], device=device)
self.assertRaises(IndexError, lambda: a[0.0])
self.assertRaises(IndexError, lambda: a[0, 0.0])
self.assertRaises(IndexError, lambda: a[0.0, 0])
self.assertRaises(IndexError, lambda: a[0.0, :])
self.assertRaises(IndexError, lambda: a[:, 0.0])
self.assertRaises(IndexError, lambda: a[:, 0.0, :])
self.assertRaises(IndexError, lambda: a[0.0, :, :])
self.assertRaises(IndexError, lambda: a[0, 0, 0.0])
self.assertRaises(IndexError, lambda: a[0.0, 0, 0])
self.assertRaises(IndexError, lambda: a[0, 0.0, 0])
self.assertRaises(IndexError, lambda: a[-1.4])
self.assertRaises(IndexError, lambda: a[0, -1.4])
self.assertRaises(IndexError, lambda: a[-1.4, 0])
self.assertRaises(IndexError, lambda: a[-1.4, :])
self.assertRaises(IndexError, lambda: a[:, -1.4])
self.assertRaises(IndexError, lambda: a[:, -1.4, :])
self.assertRaises(IndexError, lambda: a[-1.4, :, :])
self.assertRaises(IndexError, lambda: a[0, 0, -1.4])
self.assertRaises(IndexError, lambda: a[-1.4, 0, 0])
self.assertRaises(IndexError, lambda: a[0, -1.4, 0])
# self.assertRaises(IndexError, lambda: a[0.0:, 0.0])
# self.assertRaises(IndexError, lambda: a[0.0:, 0.0,:])
def test_none_index(self, device):
# `None` index adds newaxis
a = tensor([1, 2, 3], device=device)
self.assertEqual(a[None].dim(), a.dim() + 1)
def test_empty_tuple_index(self, device):
# Empty tuple index creates a view
a = tensor([1, 2, 3], device=device)
self.assertEqual(a[()], a)
self.assertEqual(a[()].data_ptr(), a.data_ptr())
def test_empty_fancy_index(self, device):
# Empty list index creates an empty array
a = tensor([1, 2, 3], device=device)
self.assertEqual(a[[]], torch.tensor([], dtype=torch.long, device=device))
b = tensor([], device=device).long()
self.assertEqual(a[[]], torch.tensor([], dtype=torch.long, device=device))
b = tensor([], device=device).float()
self.assertRaises(IndexError, lambda: a[b])
def test_ellipsis_index(self, device):
a = tensor([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], device=device)
self.assertIsNot(a[...], a)
self.assertEqual(a[...], a)
# `a[...]` was `a` in numpy <1.9.
self.assertEqual(a[...].data_ptr(), a.data_ptr())
# Slicing with ellipsis can skip an
# arbitrary number of dimensions
self.assertEqual(a[0, ...], a[0])
self.assertEqual(a[0, ...], a[0, :])
self.assertEqual(a[..., 0], a[:, 0])
# In NumPy, slicing with ellipsis results in a 0-dim array. In PyTorch
# we don't have separate 0-dim arrays and scalars.
self.assertEqual(a[0, ..., 1], torch.tensor(2, device=device))
# Assignment with `(Ellipsis,)` on 0-d arrays
b = torch.tensor(1)
b[(Ellipsis,)] = 2
self.assertEqual(b, 2)
def test_single_int_index(self, device):
# Single integer index selects one row
a = tensor([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], device=device)
self.assertEqual(a[0], [1, 2, 3])
self.assertEqual(a[-1], [7, 8, 9])
# Index out of bounds produces IndexError
self.assertRaises(IndexError, a.__getitem__, 1 << 30)
# Index overflow produces Exception NB: different exception type
self.assertRaises(Exception, a.__getitem__, 1 << 64)
def test_single_bool_index(self, device):
# Single boolean index
a = tensor([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], device=device)
self.assertEqual(a[True], a[None])
self.assertEqual(a[False], a[None][0:0])
def test_boolean_shape_mismatch(self, device):
arr = torch.ones((5, 4, 3), device=device)
index = tensor([True], device=device)
self.assertRaisesRegex(IndexError, 'mask', lambda: arr[index])
index = tensor([False] * 6, device=device)
self.assertRaisesRegex(IndexError, 'mask', lambda: arr[index])
index = torch.ByteTensor(4, 4).to(device).zero_()
self.assertRaisesRegex(IndexError, 'mask', lambda: arr[index])
self.assertRaisesRegex(IndexError, 'mask', lambda: arr[(slice(None), index)])
def test_boolean_indexing_onedim(self, device):
# Indexing a 2-dimensional array with
# boolean array of length one
a = tensor([[0., 0., 0.]], device=device)
b = tensor([True], device=device)
self.assertEqual(a[b], a)
# boolean assignment
a[b] = 1.
self.assertEqual(a, tensor([[1., 1., 1.]], device=device))
def test_boolean_assignment_value_mismatch(self, device):
# A boolean assignment should fail when the shape of the values
# cannot be broadcast to the subscription. (see also gh-3458)
a = torch.arange(0, 4, device=device)
def f(a, v):
a[a > -1] = tensor(v).to(device)
self.assertRaisesRegex(Exception, 'shape mismatch', f, a, [])
self.assertRaisesRegex(Exception, 'shape mismatch', f, a, [1, 2, 3])
self.assertRaisesRegex(Exception, 'shape mismatch', f, a[:1], [1, 2, 3])
def test_boolean_indexing_twodim(self, device):
# Indexing a 2-dimensional array with
# 2-dimensional boolean array
a = tensor([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], device=device)
b = tensor([[True, False, True],
[False, True, False],
[True, False, True]], device=device)
self.assertEqual(a[b], tensor([1, 3, 5, 7, 9], device=device))
self.assertEqual(a[b[1]], tensor([[4, 5, 6]], device=device))
self.assertEqual(a[b[0]], a[b[2]])
# boolean assignment
a[b] = 0
self.assertEqual(a, tensor([[0, 2, 0],
[4, 0, 6],
[0, 8, 0]], device=device))
def test_boolean_indexing_weirdness(self, device):
# Weird boolean indexing things
a = torch.ones((2, 3, 4), device=device)
self.assertEqual((0, 2, 3, 4), a[False, True, ...].shape)
self.assertEqual(torch.ones(1, 2, device=device), a[True, [0, 1], True, True, [1], [[2]]])
self.assertRaises(IndexError, lambda: a[False, [0, 1], ...])
def test_boolean_indexing_weirdness_tensors(self, device):
# Weird boolean indexing things
false = torch.tensor(False, device=device)
true = torch.tensor(True, device=device)
a = torch.ones((2, 3, 4), device=device)
self.assertEqual((0, 2, 3, 4), a[False, True, ...].shape)
self.assertEqual(torch.ones(1, 2, device=device), a[true, [0, 1], true, true, [1], [[2]]])
self.assertRaises(IndexError, lambda: a[false, [0, 1], ...])
def test_boolean_indexing_alldims(self, device):
true = torch.tensor(True, device=device)
a = torch.ones((2, 3), device=device)
self.assertEqual((1, 2, 3), a[True, True].shape)
self.assertEqual((1, 2, 3), a[true, true].shape)
def test_boolean_list_indexing(self, device):
# Indexing a 2-dimensional array with
# boolean lists
a = tensor([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], device=device)
b = [True, False, False]
c = [True, True, False]
self.assertEqual(a[b], tensor([[1, 2, 3]], device=device))
self.assertEqual(a[b, b], tensor([1], device=device))
self.assertEqual(a[c], tensor([[1, 2, 3], [4, 5, 6]], device=device))
self.assertEqual(a[c, c], tensor([1, 5], device=device))
def test_everything_returns_views(self, device):
# Before `...` would return a itself.
a = tensor([5], device=device)
self.assertIsNot(a, a[()])
self.assertIsNot(a, a[...])
self.assertIsNot(a, a[:])
def test_broaderrors_indexing(self, device):
a = torch.zeros(5, 5, device=device)
self.assertRaisesRegex(IndexError, 'shape mismatch', a.__getitem__, ([0, 1], [0, 1, 2]))
self.assertRaisesRegex(IndexError, 'shape mismatch', a.__setitem__, ([0, 1], [0, 1, 2]), 0)
def test_trivial_fancy_out_of_bounds(self, device):
a = torch.zeros(5, device=device)
ind = torch.ones(20, dtype=torch.int64, device=device)
if a.is_cuda:
raise unittest.SkipTest('CUDA asserts instead of raising an exception')
ind[-1] = 10
self.assertRaises(IndexError, a.__getitem__, ind)
self.assertRaises(IndexError, a.__setitem__, ind, 0)
ind = torch.ones(20, dtype=torch.int64, device=device)
ind[0] = 11
self.assertRaises(IndexError, a.__getitem__, ind)
self.assertRaises(IndexError, a.__setitem__, ind, 0)
def test_index_is_larger(self, device):
# Simple case of fancy index broadcasting of the index.
a = torch.zeros((5, 5), device=device)
a[[[0], [1], [2]], [0, 1, 2]] = tensor([2., 3., 4.], device=device)
self.assertTrue((a[:3, :3] == tensor([2., 3., 4.], device=device)).all())
def test_broadcast_subspace(self, device):
a = torch.zeros((100, 100), device=device)
v = torch.arange(0., 100, device=device)[:, None]
b = torch.arange(99, -1, -1, device=device).long()
a[b] = v
expected = b.float().unsqueeze(1).expand(100, 100)
self.assertEqual(a, expected)
instantiate_device_type_tests(TestIndexing, globals(), except_for='meta')
instantiate_device_type_tests(NumpyTests, globals(), except_for='meta')
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_indexing.py |
# Owner(s): ["oncall: jit"]
import sys
sys.argv.append("--jit_executor=legacy")
from test_jit_fuser import * # noqa: F403
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_jit_fuser_legacy.py |
# Owner(s): ["module: cuda"]
import torch
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocmVersionLessThan
import sys
import unittest
# NOTE: this needs to be run in a brand new process
# We cannot import TEST_CUDA and TEST_MULTIGPU from torch.testing._internal.common_cuda here,
# because if we do that, the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed
# multiple times as well during the execution of this test suite, and it will
# cause CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
class TestCudaPrimaryCtx(TestCase):
CTX_ALREADY_CREATED_ERR_MSG = (
"Tests defined in test_cuda_primary_ctx.py must be run in a process "
"where CUDA contexts are never created. Use either run_test.py or add "
"--subprocess to run each test in a different subprocess.")
@skipIfRocmVersionLessThan((4, 4, 21504))
def setUp(self):
for device in range(torch.cuda.device_count()):
# Ensure context has not been created beforehand
self.assertFalse(torch._C._cuda_hasPrimaryContext(device), TestCudaPrimaryCtx.CTX_ALREADY_CREATED_ERR_MSG)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_str_repr(self):
x = torch.randn(1, device='cuda:1')
# We should have only created context on 'cuda:1'
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
str(x)
repr(x)
# We should still have only created context on 'cuda:1'
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy(self):
x = torch.randn(1, device='cuda:1')
# We should have only created context on 'cuda:1'
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
y = torch.randn(1, device='cpu')
y.copy_(x)
# We should still have only created context on 'cuda:1'
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_pin_memory(self):
x = torch.randn(1, device='cuda:1')
# We should have only created context on 'cuda:1'
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
self.assertFalse(x.is_pinned())
# We should still have only created context on 'cuda:1'
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
x = torch.randn(3, device='cpu').pin_memory()
# We should still have only created context on 'cuda:1'
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
self.assertTrue(x.is_pinned())
# We should still have only created context on 'cuda:1'
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
x = torch.randn(3, device='cpu', pin_memory=True)
# We should still have only created context on 'cuda:1'
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
x = torch.zeros(3, device='cpu', pin_memory=True)
# We should still have only created context on 'cuda:1'
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
x = torch.empty(3, device='cpu', pin_memory=True)
# We should still have only created context on 'cuda:1'
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
x = x.pin_memory()
# We should still have only created context on 'cuda:1'
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_cuda_primary_ctx.py |
# Owner(s): ["module: cuda"]
import torch
from torch.cuda.jiterator import _create_jit_fn as create_jit_fn
from torch.cuda.jiterator import _create_multi_output_jit_fn as create_multi_output_jit_fn
import sys
from itertools import product
from torch.testing._internal.common_utils import TestCase, parametrize, run_tests, TEST_CUDA
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_device_type import (
skipCUDAIfRocm, skipCUDAIf, instantiate_device_type_tests, dtypes, toleranceOverride, tol)
from torch.testing._internal.common_cuda import _get_torch_cuda_version
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
code_string = "template <typename T> T my_fused_kernel(T x, T y, T alpha, T beta) { return alpha * x + beta * y; }"
jitted_fn = create_jit_fn(code_string, alpha=1, beta=1)
def ref_fn(x, y, alpha=1, beta=1):
return alpha * x + beta * y
class TestPythonJiterator(TestCase):
@parametrize("shape_strides", [
(([3, 3], [3, 1]), ([3, 3], [3, 1])), # contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_contiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
@skipCUDAIfRocm
# See https://github.com/pytorch/pytorch/pull/76394#issuecomment-1118018287 for details
@skipCUDAIf(_get_torch_cuda_version() < (11, 6), "On cuda 11.3, nvrtcCompileProgram is taking too long to "
"compile jiterator generated kernels for non-contiguous input that requires dynamic-casting.")
@parametrize("shape_strides", [
(([3, 3], [1, 3]), ([3, 1], [1, 3])), # non-contiguous
])
@dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16),
all_types_and_complex_and(torch.half, torch.bfloat16)))
def test_all_dtype_noncontiguous(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result)
@dtypes(torch.float, torch.double, torch.float16, torch.bfloat16)
@parametrize("alpha", [-1, 2.0, None])
@parametrize("beta", [3, -4.2, None])
@toleranceOverride({torch.float16 : tol(atol=1e-2, rtol=1e-3)})
def test_extra_args(self, device, dtype, alpha, beta):
a = torch.rand(3, device=device).mul(10).type(dtype)
b = torch.rand(3, device=device).mul(10).type(dtype)
extra_args = {}
if alpha is not None:
extra_args["alpha"] = alpha
if beta is not None:
extra_args["beta"] = beta
expected = ref_fn(a, b, **extra_args)
result = jitted_fn(a, b, **extra_args)
self.assertEqual(expected, result)
@parametrize("is_train", [True, False])
def test_bool_extra_args(self, device, is_train):
code_string = "template <typename T> T conditional(T x, T mask, bool is_train) { return is_train ? x * mask : x; }"
jitted_fn = create_jit_fn(code_string, is_train=False)
def ref_fn(x, mask, is_train):
return x * mask if is_train else x
a = torch.rand(3, device=device)
b = torch.rand(3, device=device)
expected = ref_fn(a, b, is_train=is_train)
result = jitted_fn(a, b, is_train=is_train)
self.assertEqual(expected, result)
def test_multiple_functors(self, device):
code_string = '''
template <typename T> T fn(T x, T mask) { return x * mask; }
template <typename T> T main_fn(T x, T mask, T y) { return fn(x, mask) + y; }
'''
jitted_fn = create_jit_fn(code_string)
def ref_fn(x, mask, y):
return x * mask + y
a = torch.rand(3, device=device)
b = torch.rand(3, device=device)
c = torch.rand(3, device=device)
expected = ref_fn(a, b, c)
result = jitted_fn(a, b, c)
self.assertEqual(expected, result)
@parametrize("num_inputs", [1, 5, 8])
def test_various_num_inputs(self, num_inputs):
inputs = []
for i in range(num_inputs):
inputs.append(torch.rand(3, device='cuda').mul(10))
input_string = ",".join([f"T i{i}" for i in range(num_inputs)])
function_body = "+".join([f"i{i}" for i in range(num_inputs)])
code_string = f"template <typename T> T my_kernel({input_string}) {{ return {function_body}; }}"
jitted_fn = create_jit_fn(code_string)
def ref_fn(*inputs):
return torch.sum(torch.stack(inputs), dim=0)
expected = ref_fn(*inputs)
result = jitted_fn(*inputs)
self.assertEqual(expected, result)
@parametrize("num_outputs", [1, 4, 8])
def test_various_num_outputs(self, num_outputs):
input = torch.rand(3, device='cuda')
output_string = ", ".join([f"T& out{i}" for i in range(num_outputs)])
function_body = ""
for i in range(num_outputs):
function_body += f"out{i} = input + {i};\n"
# NB: return type must be void, otherwise ROCm silently fails
code_string = f"template <typename T> void my_kernel(T input, {output_string}) {{ {function_body} }}"
jitted_fn = create_multi_output_jit_fn(code_string, num_outputs)
def ref_fn(input):
outputs = []
for i in range(num_outputs):
outputs.append(input + i)
if num_outputs == 1:
return outputs[0]
return tuple(outputs)
expected = ref_fn(input)
result = jitted_fn(input)
for i in range(num_outputs):
self.assertEqual(expected[i], result[i])
@parametrize("code_string", [
"template <typename T> T my _kernel(T x) { return x; }",
"template <typename T> Tmy_kernel(T x) { return x; }",
])
def test_invalid_function_name(self, code_string):
with self.assertRaises(Exception):
jitted_fn = create_jit_fn(code_string)
instantiate_device_type_tests(TestPythonJiterator, globals(), only_for="cuda")
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_jiterator.py |
# -*- coding: utf-8 -*-
# Owner(s): ["oncall: quantization"]
from torch.testing._internal.common_utils import run_tests
# Quantization core tests. These include tests for
# - quantized kernels
# - quantized functional operators
# - quantized workflow modules
# - quantized workflow operators
# - quantized tensor
# 1. Quantized Kernels
# TODO: merge the different quantized op tests into one test class
from quantization.core.test_quantized_op import TestQuantizedOps # noqa: F401
from quantization.core.test_quantized_op import TestQNNPackOps # noqa: F401
from quantization.core.test_quantized_op import TestQuantizedLinear # noqa: F401
from quantization.core.test_quantized_op import TestQuantizedConv # noqa: F401
from quantization.core.test_quantized_op import TestDynamicQuantizedOps # noqa: F401
from quantization.core.test_quantized_op import TestComparatorOps # noqa: F401
from quantization.core.test_quantized_op import TestPadding # noqa: F401
from quantization.core.test_quantized_op import TestQuantizedEmbeddingOps # noqa: F401
# 2. Quantized Functional/Workflow Ops
from quantization.core.test_quantized_functional import TestQuantizedFunctionalOps # noqa: F401
from quantization.core.test_workflow_ops import TestFakeQuantizeOps # noqa: F401
from quantization.core.test_workflow_ops import TestFusedObsFakeQuant # noqa: F401
# 3. Quantized Tensor
from quantization.core.test_quantized_tensor import TestQuantizedTensor # noqa: F401
# 4. Modules
from quantization.core.test_workflow_module import TestFakeQuantize # noqa: F401
from quantization.core.test_workflow_module import TestObserver # noqa: F401
from quantization.core.test_quantized_module import TestStaticQuantizedModule # noqa: F401
from quantization.core.test_quantized_module import TestDynamicQuantizedModule # noqa: F401
from quantization.core.test_quantized_module import TestReferenceQuantizedModule # noqa: F401
from quantization.core.test_workflow_module import TestRecordHistogramObserver # noqa: F401
from quantization.core.test_workflow_module import TestHistogramObserver # noqa: F401
from quantization.core.test_workflow_module import TestDistributed # noqa: F401
from quantization.core.test_workflow_module import TestFusedObsFakeQuantModule # noqa: F401
from quantization.core.test_backend_config import TestBackendConfig # noqa: F401
from quantization.core.test_utils import TestUtils # noqa: F401
from quantization.core.test_docs import TestQuantizationDocs # noqa: F401
# Eager Mode Workflow. Tests for the functionality of APIs and different features implemented
# using eager mode.
# 1. Eager mode post training quantization
from quantization.eager.test_quantize_eager_ptq import TestQuantizeEagerPTQStatic # noqa: F401
from quantization.eager.test_quantize_eager_ptq import TestQuantizeEagerPTQDynamic # noqa: F401
from quantization.eager.test_quantize_eager_ptq import TestQuantizeEagerOps # noqa: F401
from quantization.eager.test_quantize_eager_ptq import TestQuantizeEagerONNXExport # noqa: F401
# 2. Eager mode quantization aware training
from quantization.eager.test_quantize_eager_qat import TestQuantizeEagerQAT # noqa: F401
from quantization.eager.test_quantize_eager_qat import TestQuantizeEagerQATNumerics # noqa: F401
# 3. Eager mode fusion passes
from quantization.eager.test_fuse_eager import TestFuseEager # noqa: F401
# 4. Testing model numerics between quanitzed and FP32 models
from quantization.eager.test_model_numerics import TestModelNumericsEager # noqa: F401
# 5. Tooling: numeric_suite
from quantization.eager.test_numeric_suite_eager import TestNumericSuiteEager # noqa: F401
# 6. Equalization and Bias Correction
from quantization.eager.test_equalize_eager import TestEqualizeEager # noqa: F401
from quantization.eager.test_bias_correction_eager import TestBiasCorrectionEager # noqa: F401
# FX GraphModule Graph Mode Quantization. Tests for the functionality of APIs and different features implemented
# using fx quantization.
try:
from quantization.fx.test_quantize_fx import TestFuseFx # noqa: F401
from quantization.fx.test_quantize_fx import TestQuantizeFx # noqa: F401
from quantization.fx.test_quantize_fx import TestQuantizeFxOps # noqa: F401
from quantization.fx.test_quantize_fx import TestQuantizeFxModels # noqa: F401
from quantization.fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
except ImportError:
# In FBCode we separate FX out into a separate target for the sake of dev
# velocity. These are covered by a separate test target `quantization_fx`
pass
try:
from quantization.fx.test_numeric_suite_fx import TestFXGraphMatcher # noqa: F401
from quantization.fx.test_numeric_suite_fx import TestFXGraphMatcherModels # noqa: F401
from quantization.fx.test_numeric_suite_fx import TestFXNumericSuiteCoreAPIs # noqa: F401
from quantization.fx.test_numeric_suite_fx import TestFXNumericSuiteCoreAPIsModels # noqa: F401
except ImportError:
pass
# Test the model report module
try:
from quantization.fx.test_model_report_fx import TestFxModelReportDetector # noqa: F401
from quantization.fx.test_model_report_fx import TestFxModelReportObserver # noqa: F401
from quantization.fx.test_model_report_fx import TestFxModelReportDetectDynamicStatic # noqa: F401
from quantization.fx.test_model_report_fx import TestFxModelReportClass # noqa: F401
from quantization.fx.test_model_report_fx import TestFxDetectInputWeightEqualization # noqa: F401
from quantization.fx.test_model_report_fx import TestFxDetectOutliers # noqa: F401
from quantization.fx.test_model_report_fx import TestFxModelReportVisualizer # noqa: F401
except ImportError:
pass
# Equalization for FX mode
try:
from quantization.fx.test_equalize_fx import TestEqualizeFx # noqa: F401
except ImportError:
pass
# Backward Compatibility. Tests serialization and BC for quantized modules.
try:
from quantization.bc.test_backward_compatibility import TestSerialization # noqa: F401
except ImportError:
pass
# JIT Graph Mode Quantization
from quantization.jit.test_quantize_jit import TestQuantizeJit # noqa: F401
from quantization.jit.test_quantize_jit import TestQuantizeJitPasses # noqa: F401
from quantization.jit.test_quantize_jit import TestQuantizeJitOps # noqa: F401
from quantization.jit.test_quantize_jit import TestQuantizeDynamicJitPasses # noqa: F401
from quantization.jit.test_quantize_jit import TestQuantizeDynamicJitOps # noqa: F401
# Quantization specific fusion passes
from quantization.jit.test_fusion_passes import TestFusionPasses # noqa: F401
from quantization.jit.test_deprecated_jit_quant import TestDeprecatedJitQuantized # noqa: F401
# AO Migration tests
from quantization.ao_migration.test_quantization import TestAOMigrationQuantization # noqa: F401
try:
from quantization.ao_migration.test_quantization_fx import TestAOMigrationQuantizationFx # noqa: F401
except ImportError:
pass
try:
from quantization.dbr.test_quantize_dbr import TestQuantizeDBR # noqa: F401
from quantization.dbr.test_quantize_dbr import TestQuantizeDBRIndividualOps # noqa: F401
from quantization.dbr.test_quantize_dbr import TestQuantizeDBRMultipleOps # noqa: F401
from quantization.dbr.test_quantize_dbr import TestQuantizeDBRModels # noqa: F401
except ImportError:
pass
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_quantization.py |
# Owner(s): ["module: nn"]
import unittest
import sys
import os
import subprocess
import torch
import torch.nn.utils.stateless as stateless
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import run_tests, TestCase
class MockModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(1, 1)
self.register_buffer('buffer', torch.ones(1))
def forward(self, x):
return self.l1(x) + self.buffer
class TestStatelessFunctionalAPI(TestCase):
def _run_call_with_mock_module(self, module, device='cpu', prefix=''):
x = torch.rand((1, 1)).to(device)
weight = torch.tensor([[1.0]], device=device)
bias = torch.tensor([0.0], device=device)
buffer = torch.tensor([0.0], device=device)
if prefix != '':
parameters = {f'{prefix}.l1.weight': weight,
f'{prefix}.l1.bias': bias,
f'{prefix}.buffer': buffer}
else:
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer}
to_check = module
if prefix != '':
to_check = getattr(module, prefix)
prev_weight = to_check.l1.weight.clone()
prev_buffer = to_check.buffer.clone()
# the parameters represent an identity function contrary to the
# existing params in module. So here we expect the result to be the
# same as the input if the weight swapping went well.
res = stateless.functional_call(module, parameters, x)
self.assertEqual(x, res)
# check that the weight remain unmodified
cur_weight = to_check.l1.weight
cur_buffer = to_check.buffer
self.assertEqual(cur_weight, prev_weight)
self.assertEqual(cur_buffer, prev_buffer)
def test_functional_call(self):
module = MockModule()
self._run_call_with_mock_module(module)
def test_functional_call_with_jit(self):
module = MockModule()
jit_module = torch.jit.script(module)
with self.assertRaisesRegex(
RuntimeError,
r'used with Jitted modules'
):
self._run_call_with_mock_module(jit_module)
x = torch.rand((1, 1))
traced_module = torch.jit.trace(module, x)
with self.assertRaisesRegex(
RuntimeError,
r'used with Jitted modules'
):
self._run_call_with_mock_module(traced_module)
@unittest.skipIf(not TEST_MULTIGPU, 'multi-GPU not supported')
@unittest.skip("This doesn't work right now")
def test_functional_call_with_data_parallel(self):
module = MockModule()
module.cuda()
dp_module = torch.nn.DataParallel(module, [0, 1])
self._run_call_with_mock_module(dp_module, device='cuda', prefix='module')
def test_functional_call_with_gradient(self):
module = MockModule()
x = torch.rand((1, 1))
weight = torch.tensor([[1.0]], requires_grad=True)
bias = torch.tensor([0.0], requires_grad=True)
buffer = torch.tensor([0.0])
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer}
res = stateless.functional_call(module, parameters, x)
# Check that a backward step calculates the gradient of the supplied parameters
res.backward()
self.assertIsNotNone(weight.grad)
self.assertIsNotNone(bias.grad)
self.assertIsNone(buffer.grad)
# Gradient was not calculated for the module stated and buffers
self.assertIsNone(module.l1.weight.grad)
self.assertIsNone(module.l1.bias.grad)
self.assertIsNone(module.buffer.grad)
def test_functional_batch_norm(self):
module = torch.nn.BatchNorm1d(10)
module.train() # Allow stats update
# lets replace the running_mean buffer and check if its correctly updated
x = torch.full((20, 10), 128.0)
rm = torch.zeros(10)
parameters = {'running_mean': rm}
prev_rm = module.running_mean.clone()
res = stateless.functional_call(module, parameters, x)
cur_rm = module.running_mean
self.assertEqual(cur_rm, prev_rm)
self.assertEqual(rm, torch.full((10,), 12.8))
# Now run functional without reparametrization and check that the module has
# been updated
res = stateless.functional_call(module, {}, x)
self.assertEqual(module.running_mean, torch.full((10,), 12.8))
def test_circular_references(self):
module = MockModule()
# Add a circular reference
module.l1.m = module
x = torch.rand((1, 1))
weight = torch.tensor([[1.0]])
bias = torch.tensor([0.0])
buffer = torch.tensor([0.0])
parameters = {'l1.m.l1.weight': weight,
'l1.bias': bias,
'l1.m.buffer': buffer}
prev_weight = module.l1.weight.clone()
prev_buffer = module.buffer.clone()
res = stateless.functional_call(module, parameters, x)
self.assertEqual(x, res)
# check that the weights remain unmodified and were correctly accesed
cur_weight = module.l1.weight
cur_buffer = module.buffer
self.assertEqual(cur_weight, prev_weight)
self.assertEqual(cur_buffer, prev_buffer)
def test_reparametrized_module_change_parametrization_original(self):
module = MockModule()
torch.nn.utils.parametrizations.spectral_norm(module.l1)
self.assertTrue('l1.parametrizations.weight.original' in dict(module.named_parameters()))
orig_sn_weight = module.l1.weight.clone()
x = torch.rand((1, 1))
# We substitute the parameter inside the parametrization
# the parametrization itself is not overwritten so it will be applied with a different
# value for the original tensor
parameters = {'l1.parametrizations.weight.original': torch.nn.Parameter(torch.tensor([[1.0]])),
'l1.bias': torch.tensor([0.0]),
'buffer': torch.tensor([0.0])}
res = stateless.functional_call(module, parameters, x)
self.assertEqual(x, res)
# verify that the spectral normalization is still applied
self.assertTrue('l1.parametrizations.weight.original' in dict(module.named_parameters()))
self.assertEqual(orig_sn_weight, module.l1.weight)
def test_reparamertize_module_fail_reset_to_original(self):
module = MockModule()
torch.nn.utils.parametrizations.spectral_norm(module.l1)
self.assertTrue('l1.parametrizations.weight.original' in dict(module.named_parameters()))
orig_sn_weight = module.l1.weight.clone()
# We substitute the parameter inside the parametrization
# the parametrization itself is not overwritten so it will be applied with a different
# value for the original tensor
parameters = {'l1.parametrizations.weight.original': torch.nn.Parameter(torch.tensor([[1.0]])),
'l1.bias': torch.tensor([0.0]),
'buffer': torch.tensor([0.0])}
with self.assertRaisesRegex(RuntimeError, "shapes cannot be multiplied"):
x = torch.rand((4, 5)) # to work, it should be of size (1, 1)
stateless.functional_call(module, parameters, x) # this call will fail because x is the wrong size
# verify that the spectral normalization is still applied
self.assertTrue('l1.parametrizations.weight.original' in dict(module.named_parameters()))
self.assertEqual(orig_sn_weight, module.l1.weight)
def test_setattr(self):
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('foo', torch.zeros(()))
def forward(self, x):
self.foo = self.foo + 1
return x + self.foo
a = {'foo': torch.zeros(())}
mod = Foo()
stateless.functional_call(mod, a, torch.ones(()))
self.assertEqual(mod.foo, torch.zeros(()))
self.assertEqual(a['foo'], torch.ones(()))
class TestStatelessDeprecation(TestCase):
def test_private_stateless_warns(self):
script = """
import torch
import warnings
with warnings.catch_warnings(record=True) as w:
from torch.nn.utils import _stateless
exit(len(w))
"""
try:
subprocess.check_output(
[sys.executable, '-W', 'all', '-c', script],
stderr=subprocess.STDOUT,
# On Windows, opening the subprocess with the default CWD makes `import torch`
# fail, so just set CWD to this script's directory
cwd=os.path.dirname(os.path.realpath(__file__)),)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, 1)
else:
self.assertTrue(False, "No warning was raised.")
class TestPythonOptimizeMode(TestCase):
def test_runs_with_optimize_flag(self):
script = """
import torch
"""
try:
subprocess.check_output(
[sys.executable, '-OO', '-c', script],
stderr=subprocess.STDOUT,
# On Windows, opening the subprocess with the default CWD makes `import torch`
# fail, so just set CWD to this script's directory
cwd=os.path.dirname(os.path.realpath(__file__)),)
except subprocess.CalledProcessError as e:
self.assertFalse(e.returncode, "Import failed while running python in optimized mode")
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_stateless.py |
# Usage: python create_dummy_model.py <name_of_the_file>
import sys
import torch
from torch import nn
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28 * 28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10),
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
if __name__ == '__main__':
jit_module = torch.jit.script(NeuralNetwork())
torch.jit.save(jit_module, sys.argv[1])
| pytorch-master | test/create_dummy_torchscript_model.py |
# Owner(s): ["module: unknown"]
from functools import partial, wraps
from itertools import chain
import torch
from torch.testing._internal.common_utils import \
(TestCase, is_iterable_of_tensors, run_tests, gradcheck, gradgradcheck, is_slow_gradcheck_env)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, OpDTypes)
# TODO: fixme https://github.com/pytorch/pytorch/issues/68972
torch.set_default_dtype(torch.float32)
# gradcheck requires double precision
_gradcheck_ops = partial(ops, dtypes=OpDTypes.supported,
allowed_dtypes=[torch.double, torch.cdouble])
class TestGradients(TestCase):
exact_dtype = True
# Copies inputs to inplace operations to avoid inplace modifications
# to leaves requiring gradient
def _get_safe_inplace(self, inplace_variant):
@wraps(inplace_variant)
def _fn(t, *args, **kwargs):
return inplace_variant(t.clone(), *args, **kwargs)
return _fn
def _check_helper(self, device, dtype, op, variant, check, *, check_forward_ad=False, check_backward_ad=True,
check_batched_grad=None, check_batched_forward_grad=False):
assert check in ('gradcheck', 'bwgrad_bwgrad', 'fwgrad_bwgrad')
# NB: check_backward_ad does not affect gradgradcheck (always True)
if variant is None:
self.skipTest("Skipped! Variant not implemented.")
if not op.supports_dtype(dtype, torch.device(device).type):
self.skipTest(f"Skipped! {op.name} does not support dtype {str(dtype)}")
def is_inplace(variant):
if hasattr(variant, "__wrapped__"):
return variant.__wrapped__ is op.get_inplace()
return variant is op.get_inplace()
include_conjugated_inputs = op.test_conjugated_samples and dtype.is_complex
samples = op.sample_inputs(device, dtype, requires_grad=True, include_conjugated_inputs=include_conjugated_inputs,
small_inputs_only=is_slow_gradcheck_env())
for sample in samples:
if sample.broadcasts_input and is_inplace(variant):
continue
# Gradcheck expects tensors as its input, but autograd actually supports tensorlists
# and tensors passed as kwargs. The following creates a function that accepts just
# the tensors that require grad as varargs, and then recomposes them back into the
# original input.
# Creates gradcheck inputs by identifying tensors requiring grad
all_args = None
if is_iterable_of_tensors(sample.input):
all_args = chain(sample.input, sample.args, sample.kwargs.values())
else:
all_args = tuple(chain((sample.input,), sample.args, sample.kwargs.values()))
gradcheck_args = tuple(x for x in all_args if (isinstance(x, torch.Tensor) and x.requires_grad))
def _input_recomposition_helper(inputs, inp, input_idx):
if is_iterable_of_tensors(inp):
tensor_list = []
for x in inp:
if isinstance(x, torch.Tensor) and x.requires_grad:
tensor_list.append(inputs[input_idx])
input_idx = input_idx + 1
else:
tensor_list.append(x)
return tensor_list, input_idx
elif isinstance(inp, torch.Tensor) and inp.requires_grad:
return inputs[input_idx], input_idx + 1
else:
return inp, input_idx
def fn(*inputs):
# Puts inputs back into sample properly
positional_args = []
input_idx = 0
inp, input_idx = _input_recomposition_helper(inputs, sample.input, input_idx)
positional_args.append(inp)
for x in sample.args:
inp, input_idx = _input_recomposition_helper(inputs, x, input_idx)
positional_args.append(inp)
# Recreates kwargs
kwargs = {}
for k, v in sample.kwargs.items():
inp, input_idx = _input_recomposition_helper(inputs, v, input_idx)
kwargs[k] = inp
output = op.gradcheck_wrapper(variant, *positional_args, **kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
if check == 'gradcheck':
if check_batched_grad is None:
check_batched_grad = op.check_batched_grad
self.assertTrue(gradcheck(fn, gradcheck_args,
check_batched_grad=check_batched_grad,
check_grad_dtypes=True,
nondet_tol=op.gradcheck_nondet_tol,
fast_mode=op.gradcheck_fast_mode,
check_forward_ad=check_forward_ad,
check_backward_ad=check_backward_ad,
check_undefined_grad=True,
check_batched_forward_grad=check_batched_forward_grad))
elif check in ('bwgrad_bwgrad', 'fwgrad_bwgrad'): # gradgrad check
self.assertFalse(check_forward_ad, msg="Cannot run forward AD check for gradgradcheck")
for gen_non_contig_grad_outputs in (False, True):
kwargs = {
"gen_non_contig_grad_outputs": gen_non_contig_grad_outputs,
"check_batched_grad": op.check_batched_gradgrad,
"check_grad_dtypes": True,
"nondet_tol": op.gradcheck_nondet_tol,
"fast_mode": op.gradcheck_fast_mode
}
if check == "fwgrad_bwgrad":
kwargs["check_fwd_over_rev"] = True
kwargs["check_rev_over_rev"] = False
kwargs["check_batched_grad"] = False
kwargs["check_undefined_grad"] = False
self.assertTrue(gradgradcheck(fn, gradcheck_args, **kwargs))
else:
self.assertTrue(False, msg="Unknown check requested!")
def _grad_test_helper(self, device, dtype, op, variant, *, check_forward_ad=False, check_backward_ad=True,
check_batched_grad=None, check_batched_forward_grad=False):
return self._check_helper(device, dtype, op, variant, 'gradcheck', check_forward_ad=check_forward_ad,
check_backward_ad=check_backward_ad, check_batched_grad=check_batched_grad,
check_batched_forward_grad=check_batched_forward_grad)
def _skip_helper(self, op, device, dtype):
if dtype not in op.supported_backward_dtypes(torch.device(device).type):
self.skipTest("Skipped! Op doesn't support autograd for this dtype.")
if not op.supports_autograd and not op.supports_forward_ad:
self.skipTest("Skipped! autograd not supported.")
# Tests that gradients are computed correctly
@_gradcheck_ops(op_db)
def test_fn_grad(self, device, dtype, op):
# This is verified by test_dtypes in test_ops.py
if dtype not in op.supported_backward_dtypes(torch.device(device).type):
self.skipTest("Skipped! Dtype is not in supported backward dtypes!")
else:
self._grad_test_helper(device, dtype, op, op.get_op())
# Method grad (and gradgrad, see below) tests are disabled since they're
# costly and redundant with function grad (and gradgad) tests
# @_gradcheck_ops(op_db)
# def test_method_grad(self, device, dtype, op):
# self._skip_helper(op, device, dtype)
# self._grad_test_helper(device, dtype, op, op.get_method())
@_gradcheck_ops(op_db)
def test_inplace_grad(self, device, dtype, op):
self._skip_helper(op, device, dtype)
if not op.inplace_variant:
self.skipTest("Op has no inplace variant!")
# Verifies an operation doesn't support inplace autograd if it claims not to
if not op.supports_inplace_autograd:
inplace = self._get_safe_inplace(op.get_inplace())
for sample in op.sample_inputs(device, dtype, requires_grad=True):
if sample.broadcasts_input:
continue
with self.assertRaises(Exception):
result = inplace(sample)
result.sum().backward()
else:
self._grad_test_helper(device, dtype, op, self._get_safe_inplace(op.get_inplace()))
# Test that gradients of gradients are computed correctly
@_gradcheck_ops(op_db)
def test_fn_gradgrad(self, device, dtype, op):
self._skip_helper(op, device, dtype)
if not op.supports_gradgrad:
self.skipTest("Op claims it doesn't support gradgrad. This is not verified.")
else:
self._check_helper(device, dtype, op, op.get_op(), 'bwgrad_bwgrad')
# Test that forward-over-reverse gradgrad is computed correctly
@_gradcheck_ops(op_db)
def test_fn_fwgrad_bwgrad(self, device, dtype, op):
self._skip_helper(op, device, dtype)
if op.supports_fwgrad_bwgrad:
self._check_helper(device, dtype, op, op.get_op(), "fwgrad_bwgrad")
else:
err_msg = r"Trying to use forward AD with .* that does not support it"
hint_msg = ("Running forward-over-backward gradgrad for an OP that has does not support it did not "
"raise any error. If your op supports forward AD, you should set supports_fwgrad_bwgrad=True.")
with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg):
self._check_helper(device, dtype, op, op.get_op(), "fwgrad_bwgrad")
# Test that gradients of gradients are properly raising
@_gradcheck_ops(op_db)
def test_fn_fail_gradgrad(self, device, dtype, op):
self._skip_helper(op, device, dtype)
if op.supports_gradgrad:
self.skipTest("Skipped! Operation does support gradgrad")
err_msg = r"derivative for .* is not implemented"
with self.assertRaisesRegex(RuntimeError, err_msg):
self._check_helper(device, dtype, op, op.get_op(), 'bwgrad_bwgrad')
# Method gradgrad (and grad, see above) tests are disabled since they're
# costly and redundant with function gradgrad (and grad) tests
# @_gradcheck_ops(op_db)
# def test_method_gradgrad(self, device, dtype, op):
# self._skip_helper(op, device, dtype)
# self._gradgrad_test_helper(device, dtype, op, op.get_method())
@_gradcheck_ops(op_db)
def test_inplace_gradgrad(self, device, dtype, op):
self._skip_helper(op, device, dtype)
if not op.inplace_variant or not op.supports_inplace_autograd:
self.skipTest("Skipped! Operation does not support inplace autograd.")
self._check_helper(device, dtype, op, self._get_safe_inplace(op.get_inplace()), "bwgrad_bwgrad")
def _forward_grad_helper(self, device, dtype, op, variant, is_inplace):
# TODO: clean up how attributes are passed to gradcheck from OpInfos
def call_grad_test_helper():
check_batched_forward_grad = ((op.check_batched_forward_grad and not is_inplace) or
(op.check_inplace_batched_forward_grad and is_inplace))
self._grad_test_helper(device, dtype, op, variant, check_forward_ad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=check_batched_forward_grad)
if op.supports_forward_ad:
call_grad_test_helper()
else:
err_msg = r"Trying to use forward AD with .* that does not support it"
hint_msg = ("Running forward AD for an OP that has does not support it did not "
"raise any error. If your op supports forward AD, you should set supports_forward_ad=True")
with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg):
call_grad_test_helper()
@_gradcheck_ops(op_db)
def test_forward_mode_AD(self, device, dtype, op):
self._skip_helper(op, device, dtype)
self._forward_grad_helper(device, dtype, op, op.get_op(), is_inplace=False)
@_gradcheck_ops(op_db)
def test_inplace_forward_mode_AD(self, device, dtype, op):
self._skip_helper(op, device, dtype)
if not op.inplace_variant or not op.supports_inplace_autograd:
self.skipTest("Skipped! Operation does not support inplace autograd.")
self._forward_grad_helper(device, dtype, op, self._get_safe_inplace(op.get_inplace()), is_inplace=True)
instantiate_device_type_tests(TestGradients, globals())
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_ops_gradients.py |
# Owner(s): ["oncall: profiler"]
import collections
import expecttest
import gc
import io
import json
import os
import re
import tempfile
from typing import List, Optional
import unittest
from dataclasses import dataclass, field
import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
import torch.utils.data.datapipes as dp
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import (
TestCase, run_tests, TEST_WITH_ASAN, TEST_WITH_ROCM, IS_WINDOWS,
TEST_WITH_CROSSREF, TemporaryFileName, TemporaryDirectoryName)
from torch.autograd import (_record_function_with_args_enter, _record_function_with_args_exit)
from torch.autograd.profiler import profile as _profile
from torch.autograd.profiler_legacy import profile as _profile_legacy
from torch.profiler import (
kineto_available, profile, record_function, supported_activities,
DeviceType, ProfilerAction, ProfilerActivity, ExecutionGraphObserver,
_utils
)
from torch.profiler._pattern_matcher import (Pattern, NamePattern,
ExtraCUDACopyPattern,
ForLoopIndexingPattern,
FP32MatMulPattern,
OptimizerSingleTensorPattern,
SynchronizedDataLoaderPattern,
GradNotSetToNonePattern,
Conv2dBiasFollowedByBatchNorm2dPattern,
MatMulDimInFP16Pattern,
report_all_anti_patterns)
from torch.testing._internal.common_device_type import skipCUDAVersionIn
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
import pickle
@unittest.skipIf(not HAS_PSUTIL, "Requires psutil to run")
@unittest.skipIf(TEST_WITH_ASAN, "Cannot test with ASAN")
@unittest.skipIf(IS_WINDOWS, "Test is flaky on Windows")
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
class TestProfilerCUDA(TestCase):
@skipCUDAVersionIn([(11, 5)]) # https://github.com/pytorch/pytorch/issues/69023
def test_mem_leak(self):
"""Checks that there's no memory leak when using profiler with CUDA
"""
t = torch.rand(1, 1).cuda()
p = psutil.Process()
last_rss = collections.deque(maxlen=5)
for outer_idx in range(10):
with _profile(use_cuda=True):
for _ in range(1024):
t = torch.mm(t, t)
gc.collect()
torch.cuda.empty_cache()
last_rss.append(p.memory_info().rss)
# with CUDA events leaking the increase in memory was ~7 MB between
# profiler invocations above
is_increasing = all(
[last_rss[idx] > last_rss[idx - 1] for idx in range(1, len(last_rss))])
max_diff = -1
for idx in range(1, len(last_rss)):
max_diff = max(max_diff, last_rss[idx] - last_rss[idx - 1])
self.assertTrue(not (is_increasing and max_diff > 100 * 1024),
msg='memory usage is increasing, {}'.format(str(last_rss)))
def test_custom_module_input_op_ids(self):
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
return x
def custom_layer(input_ten):
return MyFunc.apply(input_ten)
# Only testing that emit_nvtx runs when
# record_shapes option is enabled.
with torch.autograd.profiler.emit_nvtx(record_shapes=True) as prof:
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
z = x + y
s = custom_layer(z)
q = s.sum()
q.backward()
class TestRecordFunction(TestCase):
def _record_function_with_param(self):
u = torch.randn(3, 4, 5, requires_grad=True)
with _profile(with_stack=True, use_kineto=kineto_available(), record_shapes=True) as prof:
with record_function("## TEST 1 ##", "1, 2, 3"):
rf_handle = _record_function_with_args_enter("## TEST 2 ##", 1, False, 2.5, [u, u], "hello", u)
_record_function_with_args_exit(rf_handle)
with record_function("## TEST 3 ##"):
rf_handle = _record_function_with_args_enter("## TEST 4 ##")
_record_function_with_args_exit(rf_handle)
return prof
def test_record_function(self):
prof_result = self._record_function_with_param()
found_test_1 = False
found_test_2 = False
found_test_3 = False
found_test_4 = False
for e in prof_result.function_events:
if "## TEST 1 ##" == e.name:
found_test_1 = True
self.assertTrue(e.input_shapes == [[]])
elif "## TEST 2 ##" == e.name:
found_test_2 = True
self.assertTrue(e.input_shapes == [[], [], [], [], [], [3, 4, 5]])
elif "## TEST 3 ##" == e.name:
found_test_3 = True
self.assertTrue(e.input_shapes == [])
elif "## TEST 4 ##" == e.name:
found_test_4 = True
self.assertTrue(e.input_shapes == [])
self.assertTrue(found_test_1)
self.assertTrue(found_test_2)
self.assertTrue(found_test_3)
self.assertTrue(found_test_4)
def test_datapipe_with_record_function(self):
with _profile(with_stack=True, use_kineto=kineto_available(), record_shapes=True) as prof:
input_dp1 = dp.iter.IterableWrapper(range(4))
input_dp2 = dp.iter.IterableWrapper(range(4, 8))
input_dp3 = dp.iter.IterableWrapper(range(8, 12))
output_dp = input_dp1.mux(input_dp2, input_dp3)
output = list(output_dp)
has_iter = False
has_mux = False
for e in prof.function_events:
if has_iter and has_mux:
break
if not has_iter and e.name == "enumerate(DataPipe)#IterableWrapperIterDataPipe":
has_iter = True
if not has_mux and e.name == "enumerate(DataPipe)#MultiplexerIterDataPipe":
has_mux = True
self.assertTrue(has_iter)
self.assertTrue(has_mux)
def test_datapipe_delegation_with_profiler(self):
class IDPIterator(torch.utils.data.IterDataPipe):
def __init__(self):
self.data = list(range(10))
self._idx = 0
def __iter__(self):
return self
def __next__(self):
if self._idx >= 10:
self._idx = 0
raise StopIteration
self._idx += 1
return self.data[self._idx - 1]
def get_value(self, idx):
return self.data[idx]
dp1 = IDPIterator() # The object itself is an iterator
self.assertEqual(5, dp1.get_value(5))
it_dp1 = iter(dp1) # This creates the 1st iterator
self.assertEqual(5, it_dp1.get_value(5)) # type: ignore[attr-defined]
self.assertEqual(list(range(10)), list(it_dp1))
class IDPDelegator(torch.utils.data.IterDataPipe):
def __init__(self, datapipe):
self.datapipe = datapipe
def __iter__(self):
return iter(self.datapipe)
dp2 = IDPDelegator(dp1)
it_dp2 = iter(dp2)
self.assertEqual(5, it_dp2.get_value(5))
self.assertEqual(list(range(10)), list(it_dp2))
def test_datapipe_with_record_function_fork(self):
with _profile(with_stack=True, use_kineto=kineto_available(), record_shapes=True) as prof:
input_dp = dp.iter.IterableWrapper(range(10))
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
output1 = list(dp1)
has_iter = False
has_child = False
for e in prof.function_events:
if has_iter and has_child:
break
if not has_iter and e.name == "enumerate(DataPipe)#IterableWrapperIterDataPipe":
has_iter = True
if not has_child and e.name == "enumerate(DataPipe)#_ChildDataPipe":
has_child = True
self.assertTrue(has_iter)
self.assertTrue(has_child)
class TestExecutionGraph(TestCase):
def payload(self, use_cuda=False):
u = torch.randn(3, 4, 5, requires_grad=True)
with record_function("## TEST 1 ##", "1, 2, 3"):
inf_val = float("inf")
neg_inf_val = float("-inf")
nan_val = float("nan")
rf_handle = _record_function_with_args_enter("## TEST 2 ##", 1, False, 2.5, [u, u], (u, u),
"hello", u, inf_val, neg_inf_val, nan_val)
x = torch.randn(10, 10, requires_grad=True)
if use_cuda:
x = x.cuda()
y = torch.randn(10, 10, requires_grad=True)
if use_cuda:
y = y.cuda()
z = x + y + x * y + x * y
z.backward(z)
gelu = nn.GELU()
m = torch.randn(2)
_ = gelu(m)
if use_cuda:
z = z.cpu()
_record_function_with_args_exit(rf_handle)
def get_execution_graph_root(self, output_file_name):
nodes = []
with open(output_file_name, 'r') as f:
eg_graph = json.load(f)
assert "nodes" in eg_graph
nodes = eg_graph["nodes"]
return nodes
@unittest.skipIf(not kineto_available(), "Kineto is required")
def test_execution_graph_with_kineto(self):
trace_called_num = 0
def trace_handler(p):
nonlocal trace_called_num
trace_called_num += 1
use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
# Create a temp file to save execution graph data.
fp = tempfile.NamedTemporaryFile('w+t', suffix='.json', delete=False)
fp.close()
expected_loop_events = 0
eg = ExecutionGraphObserver()
eg.register_callback(fp.name)
with profile(
activities=supported_activities(),
schedule=torch.profiler.schedule(
skip_first=3,
wait=1,
warmup=1,
active=2),
on_trace_ready=trace_handler,
) as p:
eg.start()
for idx in range(10):
expected_loop_events += 1
with record_function(f"## LOOP {idx} ##"):
self.payload(use_cuda=use_cuda)
p.step()
eg.stop()
eg.unregister_callback()
assert trace_called_num == 2
assert fp.name == eg.get_output_file_path()
nodes = self.get_execution_graph_root(fp.name)
loop_count = 0
found_root_node = False
for n in nodes:
assert "name" in n
if "[pytorch|profiler|execution_graph|process]" in n["name"]:
found_root_node = True
if n["name"].startswith("## LOOP "):
loop_count += 1
assert found_root_node
assert loop_count == expected_loop_events
def test_execution_graph_alone(self):
use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
# Create a temp file to save execution graph data.
fp = tempfile.NamedTemporaryFile('w+t', suffix='.json', delete=False)
fp.close()
expected_loop_events = 0
eg = ExecutionGraphObserver()
eg.register_callback(fp.name)
eg.start()
for idx in range(5):
expected_loop_events += 1
with record_function(f"## LOOP {idx} ##"):
self.payload(use_cuda=use_cuda)
eg.stop()
eg.unregister_callback()
assert fp.name == eg.get_output_file_path()
nodes = self.get_execution_graph_root(fp.name)
loop_count = 0
# Expected tensor object tuple size, in th form of:
# [tensor_id, storage_id, offset, numel, itemsize, device_str]
tensor_tuple_size = 6
found_root_node = False
for n in nodes:
assert "name" in n
if "[pytorch|profiler|execution_graph|process]" in n["name"]:
found_root_node = True
if n["name"].startswith("## LOOP "):
loop_count += 1
# Check if tensor tuple representation size is correct.
if n["name"] == "## TEST 2 ##":
assert len(n["inputs"][3][0]) == tensor_tuple_size
assert found_root_node
assert loop_count == expected_loop_events
def test_execution_graph_start_stop(self):
use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
# Create a temp file to save execution graph data.
fp = tempfile.NamedTemporaryFile('w+t', suffix='.json', delete=False)
fp.close()
expected_loop_events = 0
eg = ExecutionGraphObserver()
eg.register_callback(fp.name)
for idx in range(10):
if idx == 3:
eg.start()
elif idx == 5:
eg.stop()
elif idx == 8:
eg.start()
elif idx == 9:
eg.stop()
eg.unregister_callback()
if eg._execution_graph_running:
expected_loop_events += 1
with record_function(f"## LOOP {idx} ##"):
self.payload(use_cuda=use_cuda)
assert fp.name == eg.get_output_file_path()
nodes = self.get_execution_graph_root(fp.name)
loop_count = 0
found_root_node = False
for n in nodes:
assert "name" in n
if "[pytorch|profiler|execution_graph|process]" in n["name"]:
found_root_node = True
if n["name"].startswith("## LOOP "):
loop_count += 1
assert found_root_node
assert loop_count == expected_loop_events
def test_execution_graph_repeat_in_loop(self):
use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
iter_list = {3, 4, 6, 8}
expected_loop_events = len(iter_list)
output_files = []
for idx in range(10):
if idx in iter_list:
# Create a temp file to save execution graph data.
fp = tempfile.NamedTemporaryFile('w+t', suffix='.json', delete=False)
fp.close()
output_files.append(fp.name)
eg = ExecutionGraphObserver()
eg.register_callback(fp.name)
eg.start()
with record_function(f"## LOOP {idx} ##"):
self.payload(use_cuda=use_cuda)
if idx in iter_list:
eg.stop()
eg.unregister_callback()
event_count = 0
for eg_file in output_files:
nodes = self.get_execution_graph_root(eg_file)
found_root_node = False
for n in nodes:
assert "name" in n
if "[pytorch|profiler|execution_graph|process]" in n["name"]:
assert n["id"] == 1
found_root_node = True
if n["name"].startswith("## LOOP "):
event_count += 1
assert found_root_node
assert event_count == expected_loop_events
def test_execution_graph_no_capture(self):
fp = tempfile.NamedTemporaryFile('w+t', suffix='.json', delete=False)
fp.close()
eg = ExecutionGraphObserver()
eg.register_callback(fp.name)
eg.unregister_callback()
assert fp.name == eg.get_output_file_path()
nodes = self.get_execution_graph_root(fp.name)
for n in nodes:
assert "name" in n
if "[pytorch|profiler|execution_graph|process]" in n["name"]:
found_root_node = True
assert found_root_node
class TestProfiler(TestCase):
@unittest.skipIf(TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite.")
def test_source(self):
"""Checks that source code attribution works for eager, TS and autograd mode
"""
# avoid automatic inlining
prev_opt = torch._C._get_graph_executor_optimize()
torch._C._set_graph_executor_optimize(False)
@torch.jit.script
def ts_method_2(x, y):
return torch.matmul(x, y)
@torch.jit.script
def ts_method_1(x, y, z):
a = x + z
w = ts_method_2(x, y) + a
return w.sum()
class DummyModule(nn.Module):
def __init__(self):
super(DummyModule, self).__init__()
self.conv = torch.nn.Conv2d(3, 2, kernel_size=1, stride=2, padding=3, bias=False)
def forward(self, x):
return self.conv(x)
mod = DummyModule()
def call_module(x):
return mod(x)
with _profile(with_stack=True, use_kineto=kineto_available()) as p:
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
z = x + y
w = ts_method_1(x, y, z)
v = 2 * w
v.backward()
a = torch.randn(2, 3, 2, 2, requires_grad=True)
b = call_module(a)
c = b.sum()
c.backward()
for e in p.function_events:
if "aten::add" in e.name or "AddBackward" in e.name:
self.assertTrue(any(["test_profiler" in entry for entry in e.stack]))
self.assertTrue(any([(
"test_source" in entry or
"ts_method_1" in entry or
"ts_method_2" in entry) for entry in e.stack]))
# TODO: https://github.com/pytorch/kineto/issues/617
if kineto_available() and not IS_WINDOWS:
with TemporaryFileName(mode="w+") as fname:
p.export_chrome_trace(fname)
with io.open(fname, 'r') as f:
events = json.load(f)["traceEvents"]
def extract(pattern: str):
matches = [e for e in events if re.search(pattern, e["name"])]
self.assertEqual(len(matches), 1, repr([e["name"] for e in matches]))
return matches[0]
module_event = extract(r"DummyModule_0")
wrapper_event = extract(r"call_module")
self.assertEqual(module_event["args"]["Python parent id"], wrapper_event["args"]["Python id"])
torch._C._set_graph_executor_optimize(prev_opt)
def payload(self, use_cuda=False):
x = torch.randn(10, 10)
if use_cuda:
x = x.cuda()
y = torch.randn(10, 10)
if use_cuda:
y = y.cuda()
z = torch.mm(x, y)
z = z + y
if use_cuda:
z = z.cpu()
@unittest.skipIf(not kineto_available(), "Kineto is required")
def test_kineto(self):
use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
with _profile(use_cuda=use_cuda, use_kineto=True):
self.payload(use_cuda=use_cuda)
# rerun to avoid initial start overhead
with _profile(use_cuda=use_cuda, use_kineto=True) as p:
self.payload(use_cuda=use_cuda)
output = p.key_averages().table(
sort_by="self_cuda_time_total" if use_cuda else "self_cpu_time_total", row_limit=-1)
# print(output)
found_gemm = False
found_memcpy = False
found_mm = False
for e in p.function_events:
if "aten::mm" in e.name:
found_mm = True
if "gemm" in e.name:
found_gemm = True
if "Memcpy" in e.name or "memcpy" in e.name:
found_memcpy = True
if use_cuda:
self.assertTrue(found_gemm)
self.assertTrue(found_memcpy)
else:
self.assertTrue(found_mm)
# p.export_chrome_trace("/tmp/test_trace.json")
@unittest.skipIf(not kineto_available(), "Kineto is required")
@unittest.skipIf(not TEST_MULTIGPU, "Multiple GPUs needed")
@unittest.skipIf(TEST_WITH_ROCM, "Not supported on ROCm")
def test_kineto_multigpu(self):
with profile(
activities=[
ProfilerActivity.CPU,
ProfilerActivity.CUDA]) as prof:
for gpu_id in [0, 1]:
x = torch.randn(10, 10).cuda(gpu_id)
y = torch.randn(10, 10).cuda(gpu_id)
z = x.matmul(y)
found_gemm_0 = False
found_gemm_1 = False
found_cuda = False
for evt in prof.events():
if "gemm" in evt.name.lower() and evt.device_type == DeviceType.CUDA:
if evt.device_index == 0:
found_gemm_0 = True
elif evt.device_index == 1:
found_gemm_1 = True
if "cuda" in evt.name.lower() and evt.device_type == DeviceType.CPU:
found_cuda = True
self.assertTrue(found_gemm_0)
self.assertTrue(found_gemm_1)
self.assertTrue(found_cuda)
def test_memory_profiler(self):
def run_profiler(tensor_creation_fn):
# collecting allocs / deallocs
with _profile(profile_memory=True, record_shapes=True, use_kineto=kineto_available()) as prof:
x = None
with record_function("test_user_scope_alloc"):
x = tensor_creation_fn()
with record_function("test_user_scope_dealloc"):
del x
return prof.key_averages(group_by_input_shape=True)
def check_metrics(stats, metric, allocs=None, deallocs=None):
stat_metrics = {}
for stat in stats:
stat_metrics[stat.key] = getattr(stat, metric)
if allocs is not None:
for alloc_fn in allocs:
self.assertTrue(alloc_fn in stat_metrics)
self.assertTrue(stat_metrics[alloc_fn] > 0)
if deallocs is not None:
for dealloc_fn in deallocs:
self.assertTrue(dealloc_fn in stat_metrics)
self.assertTrue(stat_metrics[dealloc_fn] < 0)
def create_cpu_tensor():
return torch.rand(10, 10)
def create_cuda_tensor():
return torch.rand(10, 10).cuda()
def create_mkldnn_tensor():
return torch.rand(10, 10, dtype=torch.float32).to_mkldnn()
stats = run_profiler(create_cpu_tensor)
check_metrics(
stats,
"cpu_memory_usage",
allocs=[
"aten::empty",
"aten::rand",
"test_user_scope_alloc",
],
deallocs=[
"test_user_scope_dealloc",
]
)
if kineto_available():
with TemporaryFileName(mode="w+") as fname:
with profile(profile_memory=True) as prof:
x = None
with record_function("test_user_scope_alloc"):
x = create_cpu_tensor()
with record_function("test_user_scope_dealloc"):
del x
prof.export_chrome_trace(fname)
with io.open(fname, 'r') as f:
trace = json.load(f)
assert "traceEvents" in trace
events = trace["traceEvents"]
found_memory_events = False
for evt in events:
assert "name" in evt
if evt["name"] == "[memory]":
found_memory_events = True
assert "args" in evt
assert "Addr" in evt["args"]
assert "Device Type" in evt["args"]
assert "Device Id" in evt["args"]
assert "Bytes" in evt["args"]
# Memory should be an instantaneous event.
assert "dur" not in evt["args"]
assert "cat" not in evt["args"]
assert found_memory_events
if torch.cuda.is_available():
create_cuda_tensor()
stats = run_profiler(create_cuda_tensor)
check_metrics(
stats,
"cuda_memory_usage",
allocs=[
"test_user_scope_alloc",
"aten::to",
"aten::empty_strided",
],
deallocs=[
"test_user_scope_dealloc",
]
)
check_metrics(
stats,
"cpu_memory_usage",
allocs=[
"aten::rand",
"aten::empty",
]
)
if torch._C.has_mkldnn:
create_mkldnn_tensor()
stats = run_profiler(create_mkldnn_tensor)
check_metrics(
stats,
"cpu_memory_usage",
allocs=[
"test_user_scope_alloc",
"aten::rand",
"aten::empty",
"aten::to_mkldnn",
],
deallocs=[
"test_user_scope_dealloc",
]
)
# check top-level memory events
with _profile(profile_memory=True, use_kineto=kineto_available()) as prof:
x = torch.rand(10, 10)
del x
if torch.cuda.is_available():
y = torch.rand(10, 10).cuda()
del y
gc.collect()
stats = prof.key_averages(group_by_input_shape=True)
check_metrics(
stats,
"cpu_memory_usage",
allocs=[
"aten::rand",
"aten::empty"
],
deallocs=[
"[memory]"
]
)
if torch.cuda.is_available():
check_metrics(
stats,
"cuda_memory_usage",
deallocs=[
"[memory]"
]
)
def test_oom_tracing(self):
def run_profiler(tensor_creation_fn):
with _profile(profile_memory=True, record_shapes=True) as prof:
with self.assertRaisesRegex(RuntimeError, ".*[tT]ried to allocate.*"):
x = tensor_creation_fn()
return prof
def create_cuda_tensor_oom():
device = torch.device("cuda:0")
return torch.empty(1024, 1024, 1024, 20, dtype=torch.float32, device=device)
def check_trace(fname):
prof.export_chrome_trace(fname)
with io.open(fname, 'r') as f:
trace = json.load(f)
self.assertTrue("traceEvents" in trace)
events = trace["traceEvents"]
found_out_of_memory_events = False
for evt in events:
self.assertTrue("name" in evt)
if evt["name"] == "[OutOfMemory]":
found_out_of_memory_events = True
self.assertTrue("args" in evt)
self.assertTrue("Device Type" in evt["args"])
self.assertTrue("Device Id" in evt["args"])
self.assertTrue("Bytes" in evt["args"])
# Memory should be an instantaneous event.
self.assertTrue("dur" not in evt["args"])
self.assertTrue("cat" not in evt["args"])
self.assertTrue(found_out_of_memory_events)
if torch.cuda.is_available():
with TemporaryFileName(mode="w+") as fname:
prof = run_profiler(create_cuda_tensor_oom)
check_trace(fname)
@unittest.skipIf(not kineto_available(), "Kineto is required")
def test_module_hierarchy(self):
class A(nn.Module):
def __init__(self):
super(A, self).__init__()
def my_new_method(self, x):
return x * 3
def forward_impl_(self, x, y):
return self.my_new_method(x) + y
def forward(self, x, y):
y = y - 2
return self.forward_impl_(x, y)
class B(nn.Module):
def __init__(self):
super(B, self).__init__()
def forward(self, x):
return x + 2
class C(nn.Module):
def __init__(self):
super(C, self).__init__()
self.A0 = A()
self.B0 = B()
def call_b(self, x):
return self.B0.forward(x)
def forward(self, x, y):
return self.A0.forward(x, y) + self.call_b(x)
model = C()
model = torch.jit.script(model)
input_a = torch.rand(128, 128)
input_b = torch.rand(128, 128)
op_to_module_hierarchy = {}
op_to_module_hierarchy["aten::sub"] = ["TOP(C)::forward.A0(A)::forward."]
op_to_module_hierarchy["aten::mul"] = [
"TOP(C)::forward.A0(A)::forward.SELF(A)::forward_impl_.SELF(A)::my_new_method."]
op_to_module_hierarchy["aten::add"] = [
"TOP(C)::forward.A0(A)::forward.SELF(A)::forward_impl_.",
"TOP(C)::forward.SELF(C)::call_b.B0(B)::forward.", "TOP(C)::forward."]
with TemporaryFileName(mode="w+") as fname:
with profile(activities=[torch.profiler.ProfilerActivity.CPU], with_modules=True,) as prof:
model(input_a, input_b)
prof.export_chrome_trace(fname)
with io.open(fname, 'r') as f:
trace = json.load(f)
assert "traceEvents" in trace
events = trace["traceEvents"]
found_memory_events = False
for evt in events:
assert "name" in evt
if "args" in evt:
op_name = evt["name"]
if "Module Hierarchy" in evt["args"]:
hierarchy = evt["args"]["Module Hierarchy"]
if op_name in op_to_module_hierarchy:
assert hierarchy in op_to_module_hierarchy[op_name]
def test_high_level_trace(self):
"""Checks that python side high level events are recorded.
"""
class RepeatedDataset(torch.utils.data.Dataset):
def __init__(self, N, D_in, D_out):
self.N = N
self.x = torch.randn(N, D_in)
self.y = torch.randn(N, D_out)
def __len__(self):
return self.N
def __getitem__(self, idx):
return self.x, self.y
class TwoLayerNet(torch.nn.Module):
def __init__(self, D_in, H, D_out):
super(TwoLayerNet, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, x):
h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear2(h_relu)
return y_pred
class CustomSGD(torch.optim.SGD):
def __init__(self, *args, **kwargs):
super(CustomSGD, self).__init__(*args, **kwargs)
def train():
for _, data in enumerate(dataloader):
x, y = data[0], data[1]
y_pred = model(x)
loss = criterion(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
N, D_in, H, D_out = 8, 10, 5, 2
model = TwoLayerNet(D_in, H, D_out)
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
ds = RepeatedDataset(N, D_in, D_out)
dataloader = torch.utils.data.DataLoader(ds, batch_size=1)
try:
train()
except Exception:
self.assertTrue(False, "Expected no exception without profiling.")
# Create multiple instances, expect each func is hooked only one time.
# Nested wrappers(repeated patching) will make following test fail.
optimizer_duplicate = torch.optim.SGD(model.parameters(), lr=1e-4)
dataloader_duplicate = torch.utils.data.DataLoader(ds, batch_size=1)
def judge(expected_event_count, prof):
actual_event_count = {}
for e in prof.function_events:
if "#" in e.name:
key = e.name
if key in expected_event_count.keys():
actual_event_count[key] = actual_event_count.setdefault(key, 0) + 1
for key, count in expected_event_count.items():
self.assertTrue((key in actual_event_count.keys()) and (count == actual_event_count[key]))
with _profile(use_kineto=kineto_available()) as prof:
train()
expected_event_count = {
# "+1" because the final iteration will enter __next__ but skip the loop body.
"enumerate(DataLoader)#_SingleProcessDataLoaderIter.__next__": (N + 1),
"Optimizer.step#SGD.step": N,
"Optimizer.zero_grad#SGD.zero_grad": N
}
judge(expected_event_count, prof)
# Test on pickle/unpickle. Expect to work in multi-processing.
optimizer = pickle.loads(pickle.dumps(optimizer))
with _profile(use_kineto=kineto_available()) as prof:
train()
judge(expected_event_count, prof)
# Test on customized optimizer.
optimizer = CustomSGD(model.parameters(), lr=1e-4)
with _profile(use_kineto=kineto_available()) as prof:
train()
expected_event_count = {
"enumerate(DataLoader)#_SingleProcessDataLoaderIter.__next__": (N + 1),
"Optimizer.step#CustomSGD.step": N,
"Optimizer.zero_grad#CustomSGD.zero_grad": N
}
judge(expected_event_count, prof)
def test_flops(self):
model = torch.nn.Sequential(
nn.Conv2d(16, 33, 18),
nn.ReLU(),
nn.Linear(243, 243),
nn.ReLU(),
)
inputs = torch.randn(40, 16, 18, 260)
with _profile(record_shapes=True, with_flops=True, use_kineto=kineto_available()) as prof:
model(inputs)
profiler_output = prof.key_averages(group_by_input_shape=True).table(sort_by="cpu_time_total", row_limit=10)
self.assertIn("Total MFLOPs", profiler_output)
if not (kineto_available() and torch.cuda.is_available()):
return
with profile(activities=[
torch.profiler.ProfilerActivity.CPU,
torch.profiler.ProfilerActivity.CUDA],
record_shapes=True,
with_flops=True,
) as kineto_profiler:
model(inputs)
profiler_output = kineto_profiler.key_averages().table(
sort_by="self_cuda_time_total", row_limit=-1)
self.assertIn("Total MFLOPs", profiler_output)
def test_kineto_profiler_api(self):
called_num = [0]
use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
with profile(activities=supported_activities()):
self.payload(use_cuda=use_cuda)
def trace_handler(p):
output = p.key_averages().table(
sort_by="self_cuda_time_total" if use_cuda else "self_cpu_time_total", row_limit=-1)
# print(output)
# p.export_chrome_trace("/tmp/test_trace_" + str(called_num[0]) + ".json")
called_num[0] += 1
with profile(
activities=supported_activities(),
schedule=torch.profiler.schedule(
wait=1,
warmup=1,
active=2),
on_trace_ready=trace_handler
) as p:
for idx in range(8):
self.payload(use_cuda=use_cuda)
p.step()
self.assertEqual(called_num[0], 2)
# case without schedule
with profile(
activities=supported_activities()
) as p:
self.payload(use_cuda=use_cuda)
self.payload(use_cuda=use_cuda)
output = p.key_averages().table(
sort_by="self_cuda_time_total" if use_cuda else "self_cpu_time_total", row_limit=-1)
# print(output)
test_schedule = torch.profiler.schedule(
skip_first=2,
wait=1,
warmup=1,
active=2,
repeat=2)
test_schedule_expected_outputs = [
ProfilerAction.NONE,
ProfilerAction.NONE,
ProfilerAction.NONE,
ProfilerAction.WARMUP,
ProfilerAction.RECORD,
ProfilerAction.RECORD_AND_SAVE,
ProfilerAction.NONE,
ProfilerAction.WARMUP,
ProfilerAction.RECORD,
ProfilerAction.RECORD_AND_SAVE,
ProfilerAction.NONE,
ProfilerAction.NONE,
ProfilerAction.NONE,
ProfilerAction.NONE,
]
for step in range(len(test_schedule_expected_outputs)):
self.assertEqual(test_schedule(step), test_schedule_expected_outputs[step])
def test_export_stacks(self):
with _profile(with_stack=True, use_kineto=kineto_available()) as p:
x = torch.randn(10, 10)
y = torch.randn(10, 10)
z = torch.mm(x, y)
z = z + y
with TemporaryFileName(mode="w+") as fname:
p.export_stacks(fname)
with io.open(fname, 'r') as f:
lines = f.readlines()
assert len(lines) > 0, "Empty stacks file"
for line in lines:
is_int = False
try:
assert int(line.split(" ")[-1]) > 0, "Invalid stacks record"
is_int = True
except ValueError:
pass
assert is_int, "Invalid stacks record"
@unittest.skipIf(not kineto_available(), "Kineto is required")
@unittest.skipIf(IS_WINDOWS, "Test is flaky on Windows")
def test_tensorboard_trace_handler(self):
use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
with _profile(use_cuda=use_cuda, use_kineto=True):
self.payload(use_cuda=use_cuda)
with TemporaryDirectoryName() as dname:
with profile(
activities=[
torch.profiler.ProfilerActivity.CPU
] + ([
torch.profiler.ProfilerActivity.CUDA
] if use_cuda else []),
schedule=torch.profiler.schedule(
wait=1,
warmup=1,
active=2,
repeat=3),
on_trace_ready=torch.profiler.tensorboard_trace_handler(dname)
) as p:
for _ in range(18):
self.payload(use_cuda=use_cuda)
p.step()
self.assertTrue(os.path.exists(dname))
file_num = 0
for file_name in os.listdir(dname):
parts = file_name.split('.')
self.assertTrue(len(parts) > 4)
self.assertTrue(parts[-4].isdigit() and int(parts[-4]) > 0, "Wrong tracing file name pattern")
self.assertEqual(parts[-3:], ['pt', 'trace', 'json'])
file_num += 1
self.assertEqual(file_num, 3)
# test case for gzip file format
with TemporaryDirectoryName() as dname:
p = profile(
activities=[
torch.profiler.ProfilerActivity.CPU
] + ([
torch.profiler.ProfilerActivity.CUDA
] if use_cuda else []),
schedule=torch.profiler.schedule(
wait=1,
warmup=1,
active=2,
repeat=3),
on_trace_ready=torch.profiler.tensorboard_trace_handler(dname, use_gzip=True)
)
p.start()
for _ in range(18):
self.payload(use_cuda=use_cuda)
p.step()
p.stop()
self.assertTrue(os.path.exists(dname))
file_num = 0
for file_name in os.listdir(dname):
parts = file_name.split('.')
self.assertTrue(len(parts) > 4)
self.assertTrue(parts[-5].isdigit() and int(parts[-5]) > 0, "Wrong tracing file name pattern")
self.assertEqual(parts[-4:], ['pt', 'trace', 'json', 'gz'])
file_num += 1
self.assertEqual(file_num, 3)
@unittest.skipIf(not kineto_available(), "Kineto is required")
def test_profiler_metadata(self):
t1, t2 = torch.ones(1), torch.ones(1)
with profile() as prof:
torch.add(t1, t2)
prof.add_metadata("test_key1", "test_value1")
prof.add_metadata_json("test_key2", "[1,2,3]")
with TemporaryFileName(mode="w+") as fname:
prof.export_chrome_trace(fname)
with io.open(fname, 'r') as f:
trace = json.load(f)
assert "test_key1" in trace
assert trace["test_key1"] == "test_value1"
assert "test_key2" in trace
assert trace["test_key2"] == [1, 2, 3]
def _test_profiler_tracing(self, use_kineto):
with _profile(use_kineto=use_kineto) as prof:
t1, t2 = torch.ones(1), torch.ones(1)
torch.add(t1, t2)
with TemporaryFileName(mode="w+") as fname:
prof.export_chrome_trace(fname)
# read the trace and expect valid json
# if the JSON generated by export_chrome_trace is not valid, this will throw and fail the test.
with io.open(fname, 'r') as f:
json.load(f)
# test empty trace
with _profile(use_kineto=use_kineto) as prof:
pass
# saving an empty trace
with TemporaryFileName(mode="w+") as fname:
prof.export_chrome_trace(fname)
# Same test but for cuda.
use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
if not use_cuda:
return
device = torch.device("cuda:0")
with _profile(use_cuda=True, use_kineto=use_kineto) as prof:
t1, t2 = torch.ones(1, device=device), torch.ones(1, device=device)
torch.add(t1, t2)
with TemporaryFileName(mode="w+") as fname:
prof.export_chrome_trace(fname)
# Now validate the json
with io.open(fname, 'r') as f:
json.load(f)
def test_profiler_tracing(self):
self._test_profiler_tracing(False)
if kineto_available():
self._test_profiler_tracing(True)
@unittest.skip("Disable forward->backward link to workaround profiler crash")
def test_profiler_fwd_bwd_link(self):
with _profile(use_kineto=True) as prof:
t1, t2 = torch.ones(1, requires_grad=True), torch.ones(1, requires_grad=True)
z = torch.add(t1, t2)
y = torch.ones(1)
loss = torch.nn.functional.binary_cross_entropy_with_logits(z, y)
loss.backward()
with TemporaryFileName(mode="w+") as fname:
prof.export_chrome_trace(fname)
with io.open(fname, 'r') as f:
j = json.load(f)
events = j["traceEvents"]
ts_to_name = {}
flow_s_to_ts = {}
flow_f_to_ts = {}
for e in events:
if e["ph"] == "X":
ts_to_name[e["ts"]] = e["name"]
if "cat" in e and "name" in e and e["cat"] == "forward_backward" and e["name"] == "fwd_bwd":
if e["ph"] == "s":
flow_s_to_ts[e["id"]] = e["ts"]
elif e["ph"] == "f":
flow_f_to_ts[e["id"]] = e["ts"]
self.assertTrue(len(flow_s_to_ts) == 2)
self.assertTrue(len(flow_f_to_ts) == 2)
self.assertTrue(1 in flow_s_to_ts.keys())
self.assertTrue(1 in flow_f_to_ts.keys())
self.assertTrue(2 in flow_s_to_ts.keys())
self.assertTrue(2 in flow_f_to_ts.keys())
s_ts_1 = flow_s_to_ts[1]
f_ts_1 = flow_f_to_ts[1]
s_ts_2 = flow_s_to_ts[2]
f_ts_2 = flow_f_to_ts[2]
self.assertTrue(all([ts in ts_to_name.keys() for ts in [s_ts_1, f_ts_1, s_ts_2, f_ts_2]]))
self.assertTrue(ts_to_name[s_ts_1] == "aten::binary_cross_entropy_with_logits")
self.assertTrue(ts_to_name[s_ts_2] == "aten::add")
def test_profiler_type(self):
profiler_type = torch._C._autograd._profiler_type
ActiveProfilerType = torch._C._autograd.ActiveProfilerType
self.assertEqual(profiler_type(), ActiveProfilerType.NONE)
# Autograd profiler
with _profile_legacy():
self.assertEqual(profiler_type(), ActiveProfilerType.LEGACY)
# Kineto profiler
with profile():
self.assertEqual(profiler_type(), ActiveProfilerType.KINETO)
def test_profiler_correlation_id(self):
'''
We expect the correlation_id to be unique across multiple invokation of the profiler,
So we will reuse id_uniqueness_set.
'''
id_uniqueness_set = set()
model = torch.nn.Sequential(
nn.Conv2d(16, 33, 18),
nn.ReLU(),
nn.Linear(243, 243),
nn.ReLU(),
)
inputs = torch.randn(40, 16, 18, 260)
uint32_max = 2**32 - 1
for i in range(5):
with profile() as prof:
model(inputs)
for event in prof.profiler.kineto_results.events():
corr_id = event.correlation_id()
if (corr_id):
self.assertTrue(corr_id not in id_uniqueness_set)
id_uniqueness_set.add(corr_id)
self.assertTrue(corr_id < uint32_max)
def test_nested_tensor_with_shapes(self):
a = torch.randn(4, 4)
b = torch.randn(4, 4)
c = torch.randn(4, 4)
inp = torch.nested_tensor([a, b])
with torch.profiler.profile(record_shapes=True) as prof:
torch.nn.functional.linear(inp, c, None)
for e in prof.events():
if e.name in ("aten::mm", "aten::addmm"):
# intentionally vague tests to protect against possible future changes
# of mm to addmm or other impl, or changing internal order of args
self.assertTrue(len(e.input_shapes) > 0)
self.assertTrue(len(e.input_shapes[0]) > 0)
def find_node_with_name(nodes, name):
for node in nodes:
if node.name() == name:
return node
result = find_node_with_name(node.children, name)
if result is not None:
return result
class TestTorchTidyProfiler(TestCase):
def test_extra_fields(self):
with profile(with_stack=True, profile_memory=True) as p:
_ = torch.ones((1,))
nodes = p.profiler.kineto_results.experimental_event_tree()
node = find_node_with_name(nodes, "aten::ones")
self.assertIsNotNone(node)
self.assertIsInstance(
node.extra_fields,
torch._C._autograd._ExtraFields_TorchOp)
self.assertIsInstance(
node.parent.extra_fields,
torch._C._autograd._ExtraFields_PyCCall)
self.assertEqual(node.children[0].name(), "aten::empty")
self.assertEqual(node.children[0].children[0].name(), "[memory]")
self.assertIsInstance(
node.children[0].children[0].extra_fields,
torch._C._autograd._ExtraFields_Allocation)
def test_tensor_properties(self):
x = torch.ones(10, 10).as_strided([4, 4], [12, 3])
y = torch.ones(4, 1)
with profile(with_stack=True, profile_memory=True, record_shapes=True) as p:
_ = x + y
nodes = p.profiler.kineto_results.experimental_event_tree()
node = find_node_with_name(nodes, "aten::add")
self.assertIsNotNone(node)
self.assertIsInstance(
node.extra_fields,
torch._C._autograd._ExtraFields_TorchOp)
self.assertEqual(node.extra_fields.inputs.shapes, [[4, 4], [4, 1], []])
input_info = node.extra_fields.inputs
self.assertEqual(input_info.dtypes, ['float', 'float', 'Scalar'])
layout_info = [x.layout if x else None for x in input_info.tensor_metadata]
self.assertEqual(layout_info, [torch.strided, torch.strided, None])
device_info = [x.device if x else None for x in input_info.tensor_metadata]
self.assertEqual(device_info, [torch.device("cpu"), torch.device("cpu"), None])
def test_scalar_ins(self):
x = torch.ones(5, 5)
alpha = 0.9
with profile(with_stack=True, profile_memory=True, record_shapes=True) as p:
_ = torch.add(x, 9.1, alpha=alpha)
nodes = p.profiler.kineto_results.experimental_event_tree()
node = find_node_with_name(nodes, "aten::add")
self.assertIsNotNone(node)
# The second argument to the add gets promotoed to a zerodim Tensor
input_info = node.extra_fields.inputs
self.assertEqual(input_info.dtypes, ['float', 'double', 'Scalar'])
self.assertEqual(input_info.shapes, [[5, 5], [], []])
self.assertEqual(input_info.ivalues, [None, None, alpha])
@dataclass(frozen=True)
class MockKinetoEvent():
_name: str
_start_us: int
_duration_us: int
_linked_correlation_id: int
_device_type: int
def name(self) -> str:
return self._name
def start_us(self) -> int:
return self._start_us
def duration_us(self) -> int:
return self._duration_us
def linked_correlation_id(self) -> int:
return self._linked_correlation_id
def device_type(self) -> DeviceType:
return DeviceType.CUDA if self._device_type == 1 else DeviceType.CPU
@dataclass(frozen=True)
class MockProfilerEvent():
_name: str
id: int
start_time_ns: int
duration_time_ns: int
correlation_id: int = 0
children: List["MockProfilerEvent"] = field(default_factory=list)
parent: Optional["MockProfilerEvent"] = None
@property
def end_time_ns(self):
return self.start_time_ns + self.duration_time_ns
def name(self) -> str:
return self._name
def __post__init__(self, parent, children):
object.__setattr__(self, "parent", parent)
object.__setattr__(self, "children", children)
class TestExperimentalUtils(TestCase):
@staticmethod
def generate_mock_profile():
cuda_events = [
MockKinetoEvent("cudaLaunchKernel", 400, 100, 1, 0),
MockKinetoEvent("cudaLaunchKernel", 500, 100, 2, 0),
MockKinetoEvent("cudaLaunchKernel", 600, 100, 3, 0),
MockKinetoEvent("cudaLaunchKernel", 700, 100, 4, 0),
MockKinetoEvent("cudaLaunchKernel", 800, 100, 5, 0),
MockKinetoEvent("cudaLaunchKernel", 1500, 100, 6, 0),
MockKinetoEvent("GPU", 900, 100, 1, 1),
MockKinetoEvent("GPU", 1000, 100, 2, 1),
MockKinetoEvent("GPU", 1100, 100, 3, 1),
MockKinetoEvent("GPU", 1200, 100, 4, 1),
MockKinetoEvent("GPU", 1300, 100, 5, 1),
MockKinetoEvent("GPU", 1700, 100, 6, 1)
]
cpu_events = [
MockProfilerEvent("CPU (Before cudaLaunchKernel)", 1, 0, 100000),
MockProfilerEvent("CPU (Before cudaLaunchKernel)", 2, 100000,
100000),
MockProfilerEvent("CPU (Before cudaLaunchKernel)", 3, 200000,
100000),
MockProfilerEvent("CPU (Before cudaLaunchKernel)", 4, 300000,
100000),
MockProfilerEvent("CPU (After cudaLaunchKernel)", 5, 400000,
100000),
MockProfilerEvent("CPU (After cudaLaunchKernel)", 6, 500000,
100000),
MockProfilerEvent("CPU (After cudaLaunchKernel)", 7, 600000,
100000),
MockProfilerEvent("CPU (After cudaLaunchKernel)", 8, 700000,
100000),
MockProfilerEvent("CPU (After GPU)", 9, 800000, 100000),
MockProfilerEvent("CPU (After GPU)", 10, 900000, 100000),
MockProfilerEvent("CPU (After GPU)", 11, 1100000, 100000),
MockProfilerEvent("CPU (After GPU)", 12, 1200000, 500000),
]
profiler = unittest.mock.Mock()
profiler.kineto_results = unittest.mock.Mock()
profiler.kineto_results.events = unittest.mock.Mock(
return_value=cuda_events)
profiler.kineto_results.experimental_event_tree = unittest.mock.Mock(
return_value=cpu_events)
return profiler
@staticmethod
def load_mock_profile():
accept = expecttest.ACCEPT
json_file_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"profiler_utils_mock_events.json")
if accept and torch.cuda.is_available():
def garbage_code(x):
for i in range(5):
x[0, i] = i
x = torch.ones((4096, 4096), device="cuda")
x = x @ x
with profile(
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
record_shapes=True,
with_stack=True) as prof:
for _ in range(5):
x = x @ x
garbage_code(x)
for _ in range(5):
x = x @ x
kineto_events = [{
'_name':
e.name(),
'_start_us':
e.start_us(),
'_duration_us':
e.duration_us(),
'_linked_correlation_id':
e.linked_correlation_id(),
'_device_type':
1 if e.device_type() == DeviceType.CUDA else 0
} for e in prof.profiler.kineto_results.events()]
def EventTreeDFS(event_tree):
from collections import deque
stack = deque(event_tree)
while stack:
curr_event = stack.pop()
yield curr_event
for child_event in curr_event.children:
stack.append(child_event)
profiler_events = [{
'_name': e.name(),
'id': e.id,
'start_time_ns': e.start_time_ns,
'duration_time_ns': e.duration_time_ns,
'correlation_id': e.correlation_id,
'children': [child.id for child in e.children],
'parent': e.parent.id if e.parent else None
} for e in EventTreeDFS(
prof.profiler.kineto_results.experimental_event_tree())]
with open(json_file_path, "w") as f:
json.dump([kineto_events, profiler_events], f)
assert (os.path.exists(json_file_path))
with open(json_file_path, "r") as f:
kineto_events, profiler_events = json.load(f)
cuda_events = [
MockKinetoEvent(*event.values()) for event in kineto_events
]
cpu_events = []
id_map = {}
for e in profiler_events:
event = MockProfilerEvent(**e)
id_map[event.id] = event
cpu_events.append(event)
for event in cpu_events:
parent = None if event.parent is None else id_map[event.parent]
children = [id_map[child] for child in event.children]
event.__post__init__(parent, children)
cpu_events = [event for event in cpu_events if event.parent is None]
profiler = unittest.mock.Mock()
profiler.kineto_results = unittest.mock.Mock()
profiler.kineto_results.events = unittest.mock.Mock(
return_value=cuda_events)
profiler.kineto_results.experimental_event_tree = unittest.mock.Mock(
return_value=cpu_events)
return profiler
def test_utils_compute_self_time(self):
with profile() as prof:
t1, t2 = torch.ones(1, requires_grad=True), torch.ones(
1, requires_grad=True)
z = torch.add(t1, t2)
y = torch.ones(1)
loss = torch.nn.functional.binary_cross_entropy_with_logits(z, y)
loss.backward()
basic_eval = _utils.BasicEvaluation(prof.profiler)
metrics = basic_eval.metrics
self.assertTrue(len(metrics) > 0)
for event_key, event_metrics in metrics.items():
self.assertEqual(
event_metrics.self_time_ns,
event_key.event.duration_time_ns - sum([
child.duration_time_ns
for child in event_key.event.children
]))
def test_utils_intervals_overlap(self):
event = _utils.EventKey(MockProfilerEvent("Event 1", 1, 5, 5))
intervals = [
_utils.Interval(0, 9),
_utils.Interval(1, 2),
_utils.Interval(2, 3),
_utils.Interval(3, 4),
_utils.Interval(4, 5),
_utils.Interval(8, 12),
]
print(event.intervals_overlap(intervals))
self.assertEqual(event.intervals_overlap(intervals), 5)
def test_utils_compute_queue_depth(self):
def format_queue_depth(queue_depth_list, events):
res = ""
for data, event in zip(queue_depth_list, events):
res += f"{data.queue_depth} [{event.name()}]\n"
return res
# We have to use Mock because time series data is too flaky to test
profiler = self.generate_mock_profile()
basic_evaluation = _utils.BasicEvaluation(profiler)
self.assertExpectedInline(
format_queue_depth(basic_evaluation.queue_depth_list,
basic_evaluation.cuda_events), """\
1 [cudaLaunchKernel]
2 [cudaLaunchKernel]
3 [cudaLaunchKernel]
4 [cudaLaunchKernel]
5 [cudaLaunchKernel]
4 [GPU]
3 [GPU]
2 [GPU]
1 [GPU]
0 [GPU]
1 [cudaLaunchKernel]
0 [GPU]
""")
self.assertExpectedInline(
format_queue_depth([
basic_evaluation.metrics[k]
for k in basic_evaluation.event_keys
], basic_evaluation.events), """\
0 [CPU (Before cudaLaunchKernel)]
0 [CPU (Before cudaLaunchKernel)]
0 [CPU (Before cudaLaunchKernel)]
0 [CPU (Before cudaLaunchKernel)]
1 [CPU (After cudaLaunchKernel)]
2 [CPU (After cudaLaunchKernel)]
3 [CPU (After cudaLaunchKernel)]
4 [CPU (After cudaLaunchKernel)]
5 [CPU (After GPU)]
4 [CPU (After GPU)]
2 [CPU (After GPU)]
1 [CPU (After GPU)]
""")
def test_utils_compute_queue_depth_when_no_cuda_events(self):
# For traces with only cpu events, we expect empty queue depth list
x = torch.ones((1024, 1024))
with profile() as prof:
for _ in range(5):
x = x @ x
basic_evaluation = _utils.BasicEvaluation(prof.profiler)
self.assertFalse(basic_evaluation.compute_queue_depth())
def test_utils_compute_idle_time(self):
profiler = self.generate_mock_profile()
basic_evaluation = _utils.BasicEvaluation(profiler)
expected_output = "\n".join([
f"{basic_evaluation.metrics[event_key].idle_time_ns} [{event_key.event.name()}]"
for event_key in basic_evaluation.event_keys
])
self.assertExpectedInline(
expected_output, """\
100000 [CPU (Before cudaLaunchKernel)]
100000 [CPU (Before cudaLaunchKernel)]
100000 [CPU (Before cudaLaunchKernel)]
100000 [CPU (Before cudaLaunchKernel)]
0 [CPU (After cudaLaunchKernel)]
0 [CPU (After cudaLaunchKernel)]
0 [CPU (After cudaLaunchKernel)]
0 [CPU (After cudaLaunchKernel)]
0 [CPU (After GPU)]
0 [CPU (After GPU)]
0 [CPU (After GPU)]
100000 [CPU (After GPU)]""")
def test_utils_get_optimizable_events(self):
basic_evaluation = _utils.BasicEvaluation(self.load_mock_profile())
optimizable_events = basic_evaluation.get_optimizable_events(
2, print_enable=False)
expected_output = "\n".join(
[f"{event_key.event.name()}" for event_key in optimizable_events])
self.assertExpectedInline(
expected_output, """\
<built-in function _cuda_synchronize>
aten::copy_""")
def test_profiler_name_pattern(self):
x = torch.ones((4096, 4096))
with profile() as prof:
for _ in range(5):
x = x @ x
x = x + x
matched_events = NamePattern(prof, "aten::mm").matched_events()
output = "\n".join([f"{event.name()}" for event in matched_events])
self.assertExpectedInline(output, """\
aten::mm
aten::mm
aten::mm
aten::mm
aten::mm""")
def test_profiler_pattern_match_helper(self):
x = torch.ones((100, 100))
with profile() as prof:
for _ in range(5):
x = x @ x
x = x + x
event_tree = prof.profiler.kineto_results.experimental_event_tree()
pattern = Pattern(prof)
self.assertEqual([], pattern.siblings_of(event_tree[0])[0])
self.assertEqual(event_tree[1:], pattern.siblings_of(event_tree[0])[1])
child_nodes = event_tree[0].children
self.assertEqual([], pattern.siblings_of(child_nodes[0])[0])
self.assertEqual(child_nodes[1:], pattern.siblings_of(child_nodes[0])[1])
self.assertEqual(event_tree[0],
pattern.root_of(event_tree[0].children[0].children[0]))
self.assertEqual(None, pattern.next_of(event_tree[-1]))
self.assertEqual(event_tree[1], pattern.next_of(event_tree[0]))
self.assertEqual(event_tree[0], pattern.prev_of(event_tree[1]))
@unittest.skipIf(TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite.")
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
def test_profiler_extra_cuda_copy_pattern(self):
cases = (
(0, lambda: torch.ones((100, 100), device="cuda")),
(1, lambda: torch.ones((100, 100)).to("cuda")),
(1, lambda: torch.zeros((100, 100)).to("cuda")),
(1, lambda: torch.empty((100, 100)).fill_(5).to("cuda")),
(1, lambda: torch.ones((100, 100)).cuda()),
(1, lambda: torch.zeros((100, 100)).cuda()),
(1, lambda: torch.empty((100, 100)).fill_(5).cuda()),
(1, lambda: torch.rand((100, 100)).cuda()),
(1, lambda: torch.randn((100, 100)).cuda()),
(1, lambda: torch.full((100, 100), 10).cuda()),
(0, lambda: torch.rand((100, 100)).to(dtype=torch.float16)),
(0, lambda: torch.rand((100, 100)).half()),
(0, lambda: torch.rand((100, 100), device="cuda").half()),
)
num_matched = []
for _, fn in cases:
with profile(with_stack=True, record_shapes=True) as prof:
fn()
pattern = ExtraCUDACopyPattern(prof)
num_matched.append(len(pattern.matched_events()))
self.assertEqual(num_matched, [i for i, _ in cases])
@unittest.skipIf(TEST_WITH_CROSSREF,
"crossref intercepts calls and changes the callsite.")
def test_profiler_for_loop_indexing_pattern(self):
x = torch.ones((100, 100))
def case1():
for i in range(100):
x[i] = i
def case2():
y = 0
for i in range(100):
y += x[i]
def case3():
y = 1
for i in range(100):
y *= x[i]
def case4():
y = x
for _ in range(100):
y = y @ x
def case5():
for i in range(100):
x[i, :] = torch.arange(100) + i
cases = ((1, case1), (1, case2), (1, case3), (0, case4), (1, case5))
num_matched = []
for _, fn in cases:
with profile(with_stack=True) as prof:
fn()
pattern = ForLoopIndexingPattern(prof)
num_matched.append(len(pattern.matched_events()))
self.assertEqual(num_matched, [i for i, _ in cases])
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
def test_profiler_fp32_matmul_pattern(self):
x = torch.ones((100, 100), device="cuda")
with profile(with_stack=True) as prof:
x = x @ x
pattern = FP32MatMulPattern(prof)
has_tf32 = 0 if pattern.skip else 1
num_matched = len(pattern.matched_events())
self.assertEqual(num_matched, has_tf32)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
def test_profiler_extra_cuda_copy_pattern_benchmark(self):
with profile(with_stack=True, record_shapes=True) as prof:
x = torch.ones((100, 100)).to("cuda")
x = torch.ones((50, 50)).to("cuda")
pattern = ExtraCUDACopyPattern(prof)
shapes_factor_map = pattern.benchmark(pattern.matched_events())
self.assertEqual(len(shapes_factor_map), 2)
def test_profiler_optimizer_single_tensor_pattern(self):
x = torch.ones((100, 100))
cases = (
(1, lambda: torch.optim.Adam(model.parameters())),
(1, lambda: torch.optim.SGD(model.parameters(), lr=0.01)),
(1, lambda: torch.optim.AdamW(model.parameters())),
(0, lambda: torch.optim.Adam(model.parameters(), foreach=True)),
(0, lambda: torch.optim.SGD(model.parameters(), lr=0.01, foreach=True)),
(0, lambda: torch.optim.AdamW(model.parameters(), foreach=True)),
)
num_matched = []
for _, fn in cases:
with profile(with_stack=True) as prof:
model = nn.Sequential(
nn.Linear(100, 100),
nn.ReLU(),
nn.Linear(100, 10),
)
optimizer = fn()
optimizer.zero_grad()
y_hat = model(x)
loss = torch.nn.functional.cross_entropy(y_hat, torch.randint(0, 10, (100,)))
loss.backward()
optimizer.step()
pattern = OptimizerSingleTensorPattern(prof)
num_matched.append(len(pattern.matched_events()))
self.assertEqual(num_matched, [i for i, _ in cases])
def test_profiler_synchronized_dataloader_pattern(self):
dataset = torch.rand((100, 100))
sync_dataloader = torch.utils.data.DataLoader(dataset, batch_size=10)
async_dataloader = torch.utils.data.DataLoader(dataset, batch_size=10, num_workers=4)
with profile(with_stack=True) as prof:
next(iter(sync_dataloader))
next(iter(async_dataloader))
pattern = SynchronizedDataLoaderPattern(prof)
num_matched = len(pattern.matched_events())
self.assertEqual(num_matched, 1)
def test_profiler_grad_not_set_to_none_pattern(self):
x = torch.ones((100, 100))
model = nn.Sequential(
nn.Linear(100, 100),
nn.ReLU(),
nn.Linear(100, 10),
)
optimizer = torch.optim.Adam(model.parameters())
cases = (
(1, lambda: optimizer.zero_grad()),
(1, lambda: model.zero_grad()),
(0, lambda: optimizer.zero_grad(set_to_none=True)),
(0, lambda: model.zero_grad(set_to_none=True))
)
num_matched = []
for _, fn in cases:
with profile(with_stack=True) as prof:
y_hat = model(x)
loss = torch.nn.functional.cross_entropy(y_hat, torch.randint(0, 10, (100,)))
loss.backward()
optimizer.step()
fn()
pattern = GradNotSetToNonePattern(prof)
num_matched.append(len(pattern.matched_events()))
self.assertEqual(num_matched, [i for i, _ in cases])
def test_profiler_conv2d_bias_followed_by_batchnorm2d_pattern(self):
x = torch.randn((1, 3, 32, 32))
cases = (
(1, nn.Sequential(nn.Conv2d(3, 3, 3, 1, 1), nn.BatchNorm2d(3))),
(0, nn.Sequential(nn.Conv2d(3, 3, 3, 1, 1, bias=False), nn.BatchNorm2d(3))),
(0, nn.Sequential(nn.Conv2d(3, 3, 3, 1, 1)))
)
num_matched = []
for _, model in cases:
with profile(with_stack=True, record_shapes=True) as prof:
model(x)
pattern = Conv2dBiasFollowedByBatchNorm2dPattern(prof)
num_matched.append(len(pattern.matched_events()))
self.assertEqual(num_matched, [i for i, _ in cases])
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
def test_profiler_matmul_dim_fp16_pattern(self):
cases = (
(1, torch.randn((201, 201), device='cuda', dtype=torch.float16)),
(1, torch.randn((3, 97, 97), device='cuda', dtype=torch.float16)),
(0, torch.randn((200, 200), device='cuda', dtype=torch.float16)),
(0, torch.randn((3, 200, 200), device='cuda', dtype=torch.float16))
)
num_matched = []
for _, x in cases:
with profile(with_stack=True, record_shapes=True) as prof:
x @ x
pattern = MatMulDimInFP16Pattern(prof)
num_matched.append(len(pattern.matched_events()))
self.assertEqual(num_matched, [i for i, _ in cases])
def test_profiler_pattern_matcher_json_report(self):
x = torch.ones((100, 100))
model = nn.Sequential(
nn.Linear(100, 100),
nn.ReLU(),
nn.Linear(100, 10),
)
optimizer = torch.optim.Adam(model.parameters())
with profile(with_stack=True, record_shapes=True) as prof:
y_hat = model(x)
loss = torch.nn.functional.cross_entropy(y_hat, torch.randint(0, 10, (100,)))
loss.backward()
optimizer.step()
optimizer.zero_grad()
report_all_anti_patterns(prof, json_report_dir=".", print_enable=False)
try:
with open("./torchtidy_report.json") as f:
report = json.load(f)
self.assertTrue("test_profiler.py" in report)
self.assertTrue(len(report["test_profiler.py"]) > 0)
expected_fields = sorted(["line_number", "name", "url", "message"])
for event in report["test_profiler.py"]:
actual_fields = sorted(event.keys())
self.assertEqual(expected_fields, actual_fields)
finally:
os.remove("torchtidy_report.json")
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_profiler.py |
# Owner(s): ["module: unknown"]
import glob
import io
import os
import unittest
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
try:
from third_party.build_bundled import create_bundled
except ImportError:
create_bundled = None
license_file = 'third_party/LICENSES_BUNDLED.txt'
starting_txt = 'The Pytorch repository and source distributions bundle'
site_packages = os.path.dirname(os.path.dirname(torch.__file__))
distinfo = glob.glob(os.path.join(site_packages, 'torch-*dist-info'))
class TestLicense(TestCase):
@unittest.skipIf(not create_bundled, "can only be run in a source tree")
def test_license_for_wheel(self):
current = io.StringIO()
create_bundled('third_party', current)
with open(license_file) as fid:
src_tree = fid.read()
if not src_tree == current.getvalue():
raise AssertionError(
f'the contents of "{license_file}" do not '
'match the current state of the third_party files. Use '
'"python third_party/build_bundled.py" to regenerate it')
@unittest.skipIf(len(distinfo) == 0, "no installation in site-package to test")
def test_distinfo_license(self):
"""If run when pytorch is installed via a wheel, the license will be in
site-package/torch-*dist-info/LICENSE. Make sure it contains the third
party bundle of licenses"""
if len(distinfo) > 1:
raise AssertionError('Found too many "torch-*dist-info" directories '
f'in "{site_packages}, expected only one')
with open(os.path.join(os.path.join(distinfo[0], 'LICENSE'))) as fid:
txt = fid.read()
self.assertTrue(starting_txt in txt)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_license.py |
# Owner(s): ["module: typing"]
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_NUMPY, load_tests
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
import torch
import unittest
if TEST_NUMPY:
import numpy as np
class TestDTypeInfo(TestCase):
def test_invalid_input(self):
for dtype in [torch.float16, torch.float32, torch.float64, torch.bfloat16, torch.complex64, torch.complex128, torch.bool]:
with self.assertRaises(TypeError):
_ = torch.iinfo(dtype)
for dtype in [torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8, torch.bool]:
with self.assertRaises(TypeError):
_ = torch.finfo(dtype)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_iinfo(self):
for dtype in [torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8]:
x = torch.zeros((2, 2), dtype=dtype)
xinfo = torch.iinfo(x.dtype)
xn = x.cpu().numpy()
xninfo = np.iinfo(xn.dtype)
self.assertEqual(xinfo.bits, xninfo.bits)
self.assertEqual(xinfo.max, xninfo.max)
self.assertEqual(xinfo.min, xninfo.min)
self.assertEqual(xinfo.dtype, xninfo.dtype)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_finfo(self):
initial_default_type = torch.get_default_dtype()
for dtype in [torch.float16, torch.float32, torch.float64, torch.complex64, torch.complex128]:
x = torch.zeros((2, 2), dtype=dtype)
xinfo = torch.finfo(x.dtype)
xn = x.cpu().numpy()
xninfo = np.finfo(xn.dtype)
self.assertEqual(xinfo.bits, xninfo.bits)
self.assertEqual(xinfo.max, xninfo.max)
self.assertEqual(xinfo.min, xninfo.min)
self.assertEqual(xinfo.eps, xninfo.eps)
self.assertEqual(xinfo.tiny, xninfo.tiny)
self.assertEqual(xinfo.resolution, xninfo.resolution)
self.assertEqual(xinfo.dtype, xninfo.dtype)
if not dtype.is_complex:
torch.set_default_dtype(dtype)
self.assertEqual(torch.finfo(dtype), torch.finfo())
# Special test case for BFloat16 type
x = torch.zeros((2, 2), dtype=torch.bfloat16)
xinfo = torch.finfo(x.dtype)
self.assertEqual(xinfo.bits, 16)
self.assertEqual(xinfo.max, 3.38953e+38)
self.assertEqual(xinfo.min, -3.38953e+38)
self.assertEqual(xinfo.eps, 0.0078125)
self.assertEqual(xinfo.tiny, 1.17549e-38)
self.assertEqual(xinfo.tiny, xinfo.smallest_normal)
self.assertEqual(xinfo.resolution, 0.01)
self.assertEqual(xinfo.dtype, "bfloat16")
torch.set_default_dtype(x.dtype)
self.assertEqual(torch.finfo(x.dtype), torch.finfo())
# Restore the default type to ensure that the test has no side effect
torch.set_default_dtype(initial_default_type)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_type_info.py |
import torch.distributed as c10d
import torch
import argparse
import os
import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Simple script to simulate NCCL errors. The script is '
'supposed to be run on multiple different nodes simultaneously with '
'appropriate rank and world_size. The script run an allreduce() on '
'the rank 0 node and aborts all the other nodes to simulate an error '
'in NCCL')
parser.add_argument('addr', help='address of the master node to connect to.')
parser.add_argument('port', help='port of the master node to connect to.')
parser.add_argument('rank', help='rank of this node')
parser.add_argument('world_size', help='number of nodes in process group')
args = parser.parse_args()
rank = int(args.rank)
world_size = int(args.world_size)
port = int(args.port)
store = c10d.TCPStore(args.addr, port, world_size, rank == 0)
process_group = c10d.ProcessGroupNCCL(store, rank, world_size)
logging.info('Running first allreduce')
process_group.allreduce(torch.rand(10).cuda(rank)).wait()
if rank == 0:
logging.info('Running second allreduce only on rank 0')
work = process_group.allreduce(torch.rand(10).cuda(rank))
logging.info('Waiting for allreduce to complete...')
work.wait()
logging.info('Second allreduce successful: {}'.format(work.is_success()))
else:
logging.info('Aborting all other ranks.')
os.abort()
| pytorch-master | test/simulate_nccl_errors.py |
import argparse
import torch
class Module(torch.nn.Module):
def __init__(self):
super(Module, self).__init__()
self.conv = torch.nn.Conv2d(1, 10, 5, 1)
def forward(self, x):
y = self.conv(x)
return y
def run_model(level):
m = Module().eval()
d = torch.rand(1, 1, 112, 112)
with torch.backends.mkldnn.verbose(level):
m(d)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--verbose-level", default=0, type=int)
args = parser.parse_args()
try:
run_model(args.verbose_level)
except Exception as e:
print(e)
| pytorch-master | test/mkldnn_verbose.py |
# -*- coding: utf-8 -*-
# Owner(s): ["module: unknown"]
from torch.testing._internal.common_utils import run_tests, IS_ARM64
# Kernels
from ao.sparsity.test_kernels import TestQuantizedSparseKernels # noqa: F401
from ao.sparsity.test_kernels import TestQuantizedSparseLayers # noqa: F401
# Parametrizations
from ao.sparsity.test_parametrization import TestFakeSparsity # noqa: F401
# Sparsifier
from ao.sparsity.test_sparsifier import TestBaseSparsifier # noqa: F401
from ao.sparsity.test_sparsifier import TestWeightNormSparsifier # noqa: F401
from ao.sparsity.test_sparsifier import TestNearlyDiagonalSparsifier # noqa: F401
# Pruner
from ao.sparsity.test_pruner import TestBasePruner # noqa: F401
# Scheduler
from ao.sparsity.test_scheduler import TestScheduler # noqa: F401
# Composability
if not IS_ARM64:
from ao.sparsity.test_composability import TestComposability # noqa: F401
from ao.sparsity.test_composability import TestFxComposability # noqa: F401
# Utilities
from ao.sparsity.test_sparsity_utils import TestSparsityUtilFunctions # noqa: F401
# Data Sparsifier
from ao.sparsity.test_data_sparsifier import TestBaseDataSparsifier # noqa: F401
from ao.sparsity.test_data_sparsifier import TestNormDataSparsifiers # noqa: F401
from ao.sparsity.test_data_sparsifier import TestQuantizationUtils # noqa: F401
# Data Scheduler
from ao.sparsity.test_data_scheduler import TestBaseDataScheduler # noqa: F401
# Activation Sparsifier
from ao.sparsity.test_activation_sparsifier import TestActivationSparsifier # noqa: F401
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_ao_sparsity.py |
# Owner(s): ["oncall: mobile"]
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
class TestSetDefaultMobileCPUAllocator(TestCase):
def test_no_exception(self):
torch._C._set_default_mobile_cpu_allocator()
torch._C._unset_default_mobile_cpu_allocator()
def test_exception(self):
with self.assertRaises(Exception):
torch._C._unset_default_mobile_cpu_allocator()
with self.assertRaises(Exception):
torch._C._set_default_mobile_cpu_allocator()
torch._C._set_default_mobile_cpu_allocator()
# Must reset to good state
# For next test.
torch._C._unset_default_mobile_cpu_allocator()
with self.assertRaises(Exception):
torch._C._set_default_mobile_cpu_allocator()
torch._C._unset_default_mobile_cpu_allocator()
torch._C._unset_default_mobile_cpu_allocator()
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_set_default_mobile_cpu_allocator.py |
# Owner(s): ["module: mkldnn"]
import torch
import unittest
import itertools
import torch.nn as nn
import torch.nn.functional as F
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import run_tests, TEST_SCIPY, IS_WINDOWS, IS_MACOS
LLGA_FUSION_GROUP = 'prim::oneDNNFusionGroup'
LLGA_NOT_ENABLED = not torch._C.has_mkldnn or IS_WINDOWS or IS_MACOS
def warmup_forward(f, *args, profiling_count=2):
for i in range(profiling_count):
results = f(*args)
return results
class JitLlgaTestCase(JitTestCase):
def setUp(self):
torch.jit.enable_onednn_fusion(True)
def tearDown(self):
torch.jit.enable_onednn_fusion(False)
def checkTrace(self, m, x, *args, **kwargs):
if isinstance(m, torch.nn.Module):
m.eval()
with torch.no_grad(), \
torch._jit_internal._disable_emit_hooks():
traced = torch.jit.trace(m, x)
if isinstance(m, torch.nn.Module):
traced = torch.jit.freeze(traced)
warmup_forward(traced, *x)
fwd_graph = traced.graph_for(*x)
ref_o = m(*x)
jit_o = traced(*x)
self.assertEqual(jit_o, ref_o)
return traced, fwd_graph
def assertFused(self, graph, fused_patterns):
for pat in fused_patterns:
self.assertGraphContainsExactly(graph, pat, 0)
try:
import torchvision
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
except RuntimeError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, 'no torchvision')
def get_eltwise_fn(name):
if hasattr(torch, name):
return getattr(torch, name)
elif hasattr(F, name):
return getattr(F, name)
else:
raise NameError('Eltwise function %s not found' % name)
@unittest.skipIf(LLGA_NOT_ENABLED, "MKL-DNN build is disabled")
class TestOp(JitLlgaTestCase):
def test_conv2d(self):
for [spatial, in_channels, out_channels, kernel, padding, stride, dilation, g, bias] in itertools.product(
[7, 8],
[8, 15],
[7, 16],
[3, 4],
[0, 2],
[1, 2],
[1, 2],
[1, 2],
[True, False]):
m = nn.Conv2d(in_channels=in_channels * g,
out_channels=out_channels * g,
kernel_size=kernel,
padding=padding,
stride=stride,
dilation=dilation,
groups=g,
bias=bias)
x = torch.rand(1, in_channels * g, spatial, spatial)
_, graph = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
def test_bn2d(self):
m = nn.BatchNorm2d(32).eval()
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x])
# single-op partition shouldn't be created for softmax
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
def test_eltwise(self):
class M(nn.Module):
def __init__(self, eltwise_fn):
super(M, self).__init__()
self.eltwise = eltwise_fn
def forward(self, x):
return self.eltwise(x)
for eltwise in ['relu', 'gelu']:
eltwise_fn = get_eltwise_fn(eltwise)
m = M(eltwise_fn)
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x])
# single-op partition shouldn't be created.
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
def test_max_pool2d(self):
for [spatial, kernel, padding, stride, dilation, ceil_mode] in itertools.product(
[15, 16, 17, 18, 19],
[4, 5],
[0, 1, 2],
[1, 2], # [1, 2, 4], TODO: fix issue in pad calculation
[1], # [1, 2], TODO: backend support for dilation
[True, False]):
m = nn.MaxPool2d(kernel_size=kernel,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode)
x = torch.rand(1, 4, spatial, spatial)
_, graph = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
def test_avg_pool2d(self):
for [spatial, kernel, padding, stride, ceil_mode, count_include_pad] in itertools.product(
[15, 16, 17, 18, 19],
[4, 5],
[0, 1, 2],
[1, 2, 4],
[False], # TODO: oneDNN Graph does not fully support ceil_mode=True
[True, False]):
m = nn.AvgPool2d(kernel_size=kernel,
stride=stride,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad)
x = torch.rand(1, 4, spatial, spatial)
_, graph = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
def test_variable_kernel_avg_pool2d(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x):
x = F.avg_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=0, count_include_pad=False)
return x
x = torch.randn(1, 1000, 1, 1)
m = M()
_, graph = self.checkTrace(m, [x])
# kernel_size is not Constant, shouldn't have any LLGA_FUSION_GROUP
# TODO: with shape specialization, should have 1 LLGA_FUSION_GROUP
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
def test_softmax(self):
for dim in [-4, -3, -2, -1, 0, 1, 2, 3]:
m = nn.Softmax(dim=dim)
x = torch.rand(8, 12, 12, 12)
_, graph = self.checkTrace(m, [x])
# single-op partition shouldn't be created for softmax
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
def test_linear(self):
for bias in [True, False]:
x = torch.rand(32, 28)
m = torch.nn.Linear(in_features=28, out_features=64, bias=bias)
_, graph = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::linear'])
def _gen_binary_inputs(self, gen_permute=True):
for xshape, yshape in [
[[1, 32, 28, 28], [1, 32, 28, 28]],
[[1, 32, 28, 28], [1, 1, 28, 28]],
[[1, 32, 28, 28], [28]],
[[1, 32, 28, 28], [1]],
]:
yield torch.rand(xshape), torch.rand(yshape)
if gen_permute and xshape != yshape:
yield torch.rand(yshape), torch.rand(xshape)
def test_add(self):
def forward_add(x, y):
return torch.add(x, y, alpha=2)
for x, y in self._gen_binary_inputs():
_, graph = self.checkTrace(forward_add, [x, y])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
def test_add_scalar(self):
def add_scalar(x):
return 42 + x + 3.14
x = torch.rand(32, 32)
_, graph = self.checkTrace(add_scalar, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
def test_addmm(self):
def addmm(x, y, z):
# alpha and beta are 1, by default
return torch.addmm(z, x, y)
x = torch.rand(64, 32)
y = torch.rand(32, 32)
z = torch.rand(64, 32)
_, graph = self.checkTrace(addmm, [x, y, z])
# single-op partition should be created for matmul with bias.
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
def test_mul(self):
def forward_mul(x, y):
return torch.mul(x, y) * 3
for x, y in self._gen_binary_inputs():
_, graph = self.checkTrace(forward_mul, [x, y])
# single-op partitions shouldn't be created
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
def test_identity_binary(self):
def forward(x):
return x * 1 + 0.0
x = torch.rand(32)
_, graph = self.checkTrace(forward, [x])
self.assertFused(graph, ['aten::add', 'aten::mul'])
def test_layer_norm(self):
# TODO: support more normalized_shape
m = torch.nn.LayerNorm(10)
x = torch.randn(2, 5, 10, 10)
_, graph = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
def test_cat(self):
def cat_along_dim(d):
def forward_cat(*inputs):
return torch.cat(inputs, d)
return forward_cat
for xshape in [
[8, 8, 8, 8],
[64, 8, 32],
[2048, 64],
]:
for d in range(len(xshape)):
x = torch.rand(xshape)
_, graph = self.checkTrace(cat_along_dim(d), [x, x, x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
def test_typecheck(self):
x = torch.rand(32, 28)
m = torch.nn.Linear(in_features=28, out_features=64, bias=True)
traced, graph = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::linear'])
# change the shape of the input, we should enter fallback graph
x = torch.rand(5, 28)
self.assertEqual(m(x), traced(x))
@unittest.skipIf(LLGA_NOT_ENABLED, "MKL-DNN build is disabled")
class TestFusionPattern(JitLlgaTestCase):
def test_conv2d_eltwise(self):
class M(nn.Module):
def __init__(self, eltwise_fn):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=False)
self.eltwise = eltwise_fn
def forward(self, x):
x = self.conv1(x)
x = self.eltwise(x)
x = self.conv2(x)
x = self.eltwise(x)
return x
# for eltwise in ['relu', 'sigmoid', 'sqrt', 'abs', 'square', 'hardtanh']:
for eltwise in ['relu']:
for inplace in [True, False]:
eltwise_fn_name = eltwise + '_' if inplace else eltwise
eltwise_fn = get_eltwise_fn(eltwise_fn_name)
m = M(eltwise_fn)
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
# test if relu_ is replace with relu by mutation removal pass
self.assertFused(graph, ['aten::' + eltwise_fn_name])
# test if relu is fused into the fusion group
self.assertFused(graph, ['aten::' + eltwise])
def test_conv2d_bn(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.bn1 = nn.BatchNorm2d(32)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
return x
m = M().eval()
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::_convolution', 'aten::batch_norm'])
def test_conv2d_bn_relu(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.bn1 = nn.BatchNorm2d(32)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
return x
m = M().eval()
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::_convolution', 'aten::batch_norm',
'aten::relu'])
def test_bn2d_eltwise(self):
class M(nn.Module):
def __init__(self, eltwise_fn):
super(M, self).__init__()
self.eltwise = eltwise_fn
self.bn = nn.BatchNorm2d(32)
def forward(self, x):
x = self.bn(x)
x = self.eltwise(x)
return x
for eltwise in ['relu']:
eltwise_fn = get_eltwise_fn(eltwise)
m = M(eltwise_fn).eval()
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::' + eltwise])
def test_linear_eltwise(self):
class M(nn.Module):
def __init__(self, eltwise_fn, bias):
super(M, self).__init__()
self.linear = nn.Linear(28, 64, bias)
self.eltwise = eltwise_fn
def forward(self, x):
x = self.linear(x)
x = self.eltwise(x)
return x
for [has_bias, eltwise] in itertools.product(
[True, False],
['relu', 'gelu', 'sigmoid', 'hardtanh', 'relu6', 'elu']):
eltwise_fn = get_eltwise_fn(eltwise)
m = M(eltwise_fn, has_bias)
x = torch.rand(32, 28, requires_grad=False)
_, graph = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::' + eltwise])
def test_conv2d_sum(self):
class M(nn.Module):
def __init__(self, bias=False):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=bias)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=bias)
self.bn2 = nn.BatchNorm2d(32)
self.relu = nn.ReLU()
self.conv3 = nn.Conv2d(32, 32, 3, padding=1, bias=bias)
self.bn3 = nn.BatchNorm2d(32)
def forward(self, x, y):
x = self.conv1(x)
x = self.bn1(x)
y = self.conv2(y)
y = self.bn2(y)
z = self.relu(x + y)
z = self.conv3(z)
z = self.bn3(z)
return z
for bias in [True, False]:
m = M(bias).eval()
x = torch.rand(1, 32, 16, 16, requires_grad=False)
y = torch.rand(1, 32, 16, 16, requires_grad=False)
_, graph = self.checkTrace(m, [x, y])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 3)
def test_wildcard(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.eltwise = nn.ReLU()
def forward(self, x):
x = self.conv1(x)
y = self.eltwise(x)
return [x, y]
# The pattern is as the following:
# conv
# | \
# eltwise \
# | \
# ListConstruct
#
# The output of conv is used by a wildcard op: ListConstruct.
# Thus conv-eltwise cannot be selected into the same Partition.
m = M()
x = torch.rand(1, 32, 28, 28)
_, graph = self.checkTrace(m, [x])
# conv can exist in a single-op oneDNN Graph partition but not relu
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ['aten::_convolution'])
def test_rewrap_tensor_input_to_pytorch(self):
class M(nn.Module):
def __init__(self, eltwise_fn, data_type):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True, dtype=data_type)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=True, dtype=data_type)
self.eltwise = eltwise_fn
self.adaptive_avg_pool_2d = nn.AdaptiveAvgPool2d((5, 7))
def forward(self, x, y):
x = self.conv1(x)
x = self.eltwise(x)
x = self.conv2(x)
x = self.eltwise(x)
x = torch.add(x, y)
x = self.adaptive_avg_pool_2d(x)
return x
eltwise_fn_name = 'relu'
eltwise_fn = get_eltwise_fn(eltwise_fn_name)
# Add bfloat16 later
for data_type in [torch.float]:
m = M(eltwise_fn, data_type)
m = m.to(memory_format=torch.channels_last)
x = torch.rand(1, 32, 28, 28, dtype=data_type).to(memory_format=torch.channels_last)
y = torch.rand(1, 32, 28, 28, dtype=data_type).to(memory_format=torch.channels_last)
# Simply test if the output is accurate
# The output of the second partition is input to adaptive_avg_pool2d, which is
# unsupported by LLGA, so it must be handled by PyTorch, which should receive
# correct strides info of the channels-last tensor.
graph, _ = self.checkTrace(m, [x, y])
@unittest.skipIf(LLGA_NOT_ENABLED, "MKL-DNN build is disabled")
class TestModel(JitLlgaTestCase):
@skipIfNoTorchVision
def _test_vision(self, model_name):
m = getattr(torchvision.models, model_name)().eval()
x = torch.rand(1, 3, 224, 224) / 10
_, graph = self.checkTrace(m, [x])
self.assertFused(graph, ['aten::_convolution', 'aten::batch_norm',
'aten::relu', 'aten::linear',
'aten::avg_pool2d', 'aten::max_pool2d'])
for model_name, enabled in [
['resnet50', True],
['resnext50_32x4d', True],
['resnext101_32x8d', True],
['densenet121', True],
['googlenet', TEST_SCIPY],
['mobilenet_v2', True],
['mnasnet1_0', True],
['squeezenet1_0', True],
['vgg16', True],
['alexnet', True],
['shufflenet_v2_x1_0', True],
['wide_resnet50_2', True],
]:
def wrapper(mname):
@unittest.skipIf(not enabled, 'Disabled')
def test(self):
return self._test_vision(mname)
return test
setattr(TestModel, 'test_vision_%s' % model_name, wrapper(model_name))
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_jit_llga_fuser.py |
# Owner(s): ["module: mta"]
import itertools
from numbers import Number
import random
import re
import torch
import unittest
from torch.testing import make_tensor
from torch.testing._comparison import default_tolerances
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, TEST_WITH_SLOW
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, onlyCUDA, skipMeta, ops)
from torch.testing._internal.common_methods_invocations import (
foreach_unary_op_db, foreach_binary_op_db, foreach_pointwise_op_db, foreach_minmax_op_db,
foreach_reduce_op_db)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, all_types_and, integral_types, complex_types,
floating_types_and, floating_types, integral_types_and,
)
# Includes some values such that N * N won't be a multiple of 4,
# which should ensure we test the vectorized and non-vectorized
# kernel code paths.
N_values = [20, 23] if not TEST_WITH_SLOW else [23, 30, 300]
Scalars = (
random.randint(1, 10),
1.0 - random.random(),
True,
complex(1.0 - random.random(), 1.0 - random.random()),
)
def getScalarLists(N):
return (
("int", [random.randint(0, 9) + 1 for _ in range(N)]),
("float", [1.0 - random.random() for _ in range(N)]),
("complex", [complex(1.0 - random.random(), 1.0 - random.random()) for _ in range(N)]),
("bool", [True for _ in range(N)]),
("mixed", [1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(N - 3)]),
("mixed", [True, 1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(N - 4)]),
)
_BOOL_SUB_ERR_MSG = "Subtraction, the `-` operator"
class RegularFuncWrapper:
def __init__(self, func):
self.func = func
def __call__(self, inputs, values=None, **kwargs):
if values is not None:
assert len(inputs) == 3
if isinstance(values, Number):
values = [values for _ in range(len(inputs[0]))]
return [self.func(*i, value=values[idx], **kwargs) for idx, i in enumerate(zip(*inputs))]
if len(inputs) == 2 and isinstance(inputs[1], Number):
# binary op with tensorlist and scalar.
inputs[1] = [inputs[1] for _ in range(len(inputs[0]))]
return [self.func(*i, **kwargs) for i in zip(*inputs)]
class ForeachFuncWrapper:
def __init__(self, func, n_expected_cudaLaunchKernels):
self.func = func
self.n_expected_cudaLaunchKernels = n_expected_cudaLaunchKernels
# Some foreach functions don't have in-place implementations.
self._is_inplace = False if func is None else func.__name__.endswith('_')
def __call__(self, inputs, is_cuda, is_fastpath, **kwargs):
actual = None
if (
is_cuda and
torch.autograd.kineto_available() and
torch.profiler.ProfilerActivity.CUDA in torch.profiler.supported_activities()
):
with torch.profiler.profile(activities=(torch.profiler.ProfilerActivity.CPU,)) as p:
actual = self.func(*inputs, **kwargs)
for e in p.key_averages():
if e.key == 'cudaLaunchKernel':
if is_fastpath:
assert e.count == self.n_expected_cudaLaunchKernels
else:
assert e.count > self.n_expected_cudaLaunchKernels
else:
actual = self.func(*inputs, **kwargs)
# note(mkozuki): inplace foreach functions are void functions.
return inputs[0] if self._is_inplace else actual
class TestForeach(TestCase):
@property
def is_cuda(self):
return self.device_type == 'cuda'
# note(mkozuki): It might be the case that the expected number of `cudaLaunchKernel`s
# is greater than 1 once foreach functions internally separate their input `TensorList`s by
# devices & dtypes into vectors of tensors.
def _get_funcs(self, op, n_expected_cudaLaunchKernels: int):
return (
ForeachFuncWrapper(op.method_variant, n_expected_cudaLaunchKernels),
RegularFuncWrapper(op.ref),
ForeachFuncWrapper(op.inplace_variant, n_expected_cudaLaunchKernels),
RegularFuncWrapper(op.ref_inplace),
)
def _binary_test(self, dtype, op, ref, inputs, is_fastpath, is_inplace, *, alpha=None):
ref_inputs = [[t.clone().detach() for t in inputs[0]], inputs[1]] if is_inplace else inputs
try:
actual = op(inputs, self.is_cuda, is_fastpath)
except RuntimeError as e:
with self.assertRaisesRegex(type(e), re.escape(str(e))):
ref(ref_inputs)
else:
expected = ref(ref_inputs)
self.assertEqual(actual, expected)
if alpha is not None:
kwargs = {'alpha': alpha}
ref_inputs = inputs
try:
actual = op(inputs, self.is_cuda, is_fastpath, **kwargs)
except RuntimeError as e:
with self.assertRaisesRegex(type(e), re.escape(str(e))):
ref(ref_inputs, **kwargs)
else:
expected = ref(ref_inputs, **kwargs)
if dtype in (torch.float16, torch.bfloat16) and TEST_WITH_ROCM:
self.assertEqual(expected, actual, atol=1.e-3, rtol=default_tolerances(dtype)[0])
else:
self.assertEqual(expected, actual)
def _test_binary_op_tensorlists(self, device, dtype, opinfo, N, is_fastpath, disable_fastpath):
n_expected_cudaLaunchKernels = N if disable_fastpath else 1
op, ref, inplace_op, inplace_ref = self._get_funcs(opinfo, n_expected_cudaLaunchKernels)
inputs = [
opinfo.sample_inputs(device, dtype, N, noncontiguous=not is_fastpath),
opinfo.sample_inputs(device, dtype, N, noncontiguous=not is_fastpath),
]
self._binary_test(dtype, op, ref, inputs, is_fastpath, is_inplace=False)
self._binary_test(dtype, inplace_op, inplace_ref, inputs, is_fastpath, is_inplace=True)
if opinfo.supports_alpha_param:
alpha = None
if dtype in integral_types():
alpha = 3
elif dtype.is_complex:
alpha = complex(3, 3)
else:
alpha = 3.14
self._binary_test(dtype, op, ref, inputs, is_fastpath, is_inplace=False, alpha=alpha)
self._binary_test(dtype, inplace_op, inplace_ref, inputs, is_fastpath, is_inplace=True, alpha=alpha)
# Tests of implicit broadcasting
# When sizes of tensors don't match, foreach functions are supposed to choose slow path
# even if this methods's argument `is_fastpath` is True.
# `cudaLaunchKernel` will be equal to `N`. For assert in `ForeachFuncWrapper` to pass,
# we pass `is_fastpath and disable_fastpath` to `_binary_test`'s argument of is_fastpath.
# as n_expected_cudaLaunchKernels is N if disable_fastpath.
inputs = [
opinfo.sample_inputs(device, dtype, N, noncontiguous=not is_fastpath),
[
make_tensor((N - i , 1), device=device, dtype=dtype, noncontiguous=not is_fastpath) for i in range(N)
],
]
self._binary_test(dtype, op, ref, inputs, is_fastpath and disable_fastpath, is_inplace=False)
self._binary_test(
dtype, inplace_op, inplace_ref, inputs, is_fastpath and disable_fastpath, is_inplace=True)
@skipMeta
@ops(foreach_binary_op_db)
def test_binary_op_tensorlists_fastpath(self, device, dtype, op):
for N in N_values:
disable_fastpath = op.ref == torch.div and dtype in integral_types_and(torch.bool)
if op.ref == torch.add and dtype == torch.bool:
disable_fastpath = True
self._test_binary_op_tensorlists(device, dtype, op, N, True, disable_fastpath)
@ops(foreach_binary_op_db)
def test_binary_op_tensorlists_slowpath(self, device, dtype, op):
for N in N_values:
self._test_binary_op_tensorlists(device, dtype, op, N, False, False)
def _test_binary_op_scalar(self, device, dtype, opinfo, N, scalar, is_fastpath, disable_fastpath):
n_expected_cudaLaunchKernels = N if disable_fastpath else 1
op, ref, inplace_op, inplace_ref = self._get_funcs(opinfo, n_expected_cudaLaunchKernels)
inputs = [opinfo.sample_inputs(device, dtype, N, noncontiguous=not is_fastpath), scalar]
self._binary_test(dtype, op, ref, inputs, is_fastpath, is_inplace=False)
self._binary_test(dtype, inplace_op, inplace_ref, inputs, is_fastpath, is_inplace=True)
@skipMeta
@ops(foreach_binary_op_db)
def test_binary_op_scalar_fastpath(self, device, dtype, op):
for N, scalar in itertools.product(N_values, Scalars):
disable_fastpath = op.ref == torch.div and dtype in integral_types_and(torch.bool)
if isinstance(scalar, int):
disable_fastpath |= dtype == torch.bool
if isinstance(scalar, float):
disable_fastpath |= dtype in integral_types_and(torch.bool)
if isinstance(scalar, bool):
disable_fastpath |= dtype == torch.bool
if op.ref in (torch.add, torch.mul):
disable_fastpath = False
if isinstance(scalar, complex):
disable_fastpath |= dtype not in complex_types()
self._test_binary_op_scalar(device, dtype, op, N, scalar, True, disable_fastpath)
@ops(foreach_binary_op_db)
def test_binary_op_scalar_slowpath(self, device, dtype, op):
for N, scalar in itertools.product(N_values, Scalars):
self._test_binary_op_scalar(device, dtype, op, N, scalar, False, False)
def _test_binary_op_scalarlist(self, device, dtype, opinfo, N, scalarlist, is_fastpath, disable_fastpath):
n_expected_cudaLaunchKernels = N if disable_fastpath else 1
op, ref, inplace_op, inplace_ref = self._get_funcs(opinfo, n_expected_cudaLaunchKernels)
inputs = [opinfo.sample_inputs(device, dtype, N, noncontiguous=not is_fastpath), scalarlist]
self._binary_test(dtype, op, ref, inputs, is_fastpath, is_inplace=False)
self._binary_test(dtype, inplace_op, inplace_ref, inputs, is_fastpath, is_inplace=True)
# note(mkozuki): Why two functions depending on with/without bool?
# `foreach_sub` & `foreach_sub_` do `sub_check(tensors[i], scalars[i])` from i=1...N.
# So, if scalarlist has one or more bool values, `foreach_sub` and `foreach_sub_`
# raise bool subtraction error before doing any math.
# While regular `sub` and `sub_` do some math until they encounter bool.
# So, foreach sub's throw bool sub error first. However, regular sub's throw different
# errors depending on the order of scalarlist. To keep actual unit test impl simple,
# separating mixed scalarlist tests. By setting the first element of scalarlist to bool,
# they are expected to throw bool sub error even in inplace test.
@skipMeta
@ops(foreach_binary_op_db)
def test_binary_op_scalarlist_fastpath(self, device, dtype, op):
for N in N_values:
for type_str, scalarlist in getScalarLists(N):
bool_int_div = op.ref == torch.div and dtype in integral_types_and(torch.bool)
disable_fastpath = bool_int_div
if type_str == "int":
disable_fastpath |= dtype == torch.bool
if type_str == "float":
disable_fastpath |= dtype in integral_types_and(torch.bool)
if type_str == "complex":
disable_fastpath |= dtype not in complex_types()
if type_str == "mixed":
disable_fastpath |= True and dtype not in complex_types()
self._test_binary_op_scalarlist(device, dtype, op, N, scalarlist, True, disable_fastpath)
@ops(foreach_binary_op_db)
def test_binary_op_scalarlist_slowpath(self, device, dtype, op):
for N in N_values:
for _, scalarlist in getScalarLists(N):
self._test_binary_op_scalarlist(device, dtype, op, N, scalarlist, False, False)
def _pointwise_test(self, dtype, op, ref, inputs, is_fastpath, is_inplace, *, values=None):
ref_inputs = [[t.clone().detach() for t in inputs[0]], inputs[1], inputs[2]] if is_inplace else inputs
try:
actual = op(inputs, self.is_cuda, is_fastpath)
except RuntimeError as e:
with self.assertRaisesRegex(type(e), re.escape(str(e))):
ref(ref_inputs)
else:
expected = ref(ref_inputs)
self.assertEqual(expected, actual)
if values is not None:
try:
actual = op(inputs + [values], self.is_cuda, is_fastpath)
except RuntimeError as e:
with self.assertRaisesRegex(type(e), re.escape(str(e))):
ref(ref_inputs, values=values)
else:
expected = ref(ref_inputs, values=values)
self.assertEqual(expected, actual)
def _test_pointwise_op(self, device, dtype, opinfo, N, is_fastpath, disable_fastpath, *, values=None):
n_expected_cudaLaunchKernels = N if disable_fastpath else 1
op, ref, inplace_op, inplace_ref = self._get_funcs(opinfo, n_expected_cudaLaunchKernels)
inputs = [
opinfo.sample_inputs(device, dtype, N, noncontiguous=not is_fastpath),
opinfo.sample_inputs(device, dtype, N, noncontiguous=not is_fastpath),
opinfo.sample_inputs(device, dtype, N, noncontiguous=not is_fastpath),
]
self._pointwise_test(dtype, op, ref, inputs, is_fastpath, is_inplace=False, values=values)
self._pointwise_test(dtype, inplace_op, inplace_ref, inputs, is_fastpath, is_inplace=True, values=values)
# Tests of implicit broadcasting
inputs = [
opinfo.sample_inputs(device, dtype, N, noncontiguous=not is_fastpath, same_size=True),
[
make_tensor((N - i, 1), device=device, dtype=dtype, noncontiguous=not is_fastpath) for i in range(N)
],
[
make_tensor((1, N - i), device=device, dtype=dtype, noncontiguous=not is_fastpath) for i in range(N)
],
]
self._pointwise_test(dtype, op, ref, inputs, is_fastpath and disable_fastpath, is_inplace=False, values=values)
self._pointwise_test(
dtype, inplace_op, inplace_ref, inputs, is_fastpath and disable_fastpath, is_inplace=True, values=values)
@skipMeta
@ops(foreach_pointwise_op_db)
def test_pointwise_op_fastpath(self, device, dtype, op):
disable_fastpath = dtype in integral_types_and(torch.bool)
# for N, scalar in itertools.product(N_values, Scalars):
for N in N_values:
self._test_pointwise_op(device, dtype, op, N, True, disable_fastpath)
for scalar in Scalars:
self._test_pointwise_op(device, dtype, op, N, True, disable_fastpath, values=scalar)
for _, scalarlist in getScalarLists(N):
self._test_pointwise_op(
device, dtype, op, N, True, disable_fastpath, values=scalarlist)
@ops(foreach_pointwise_op_db)
def test_pointwise_op_slowpath(self, device, dtype, op):
# for N, scalar in itertools.product(N_values, Scalars):
for N in N_values:
self._test_pointwise_op(device, dtype, op, N, False, False)
for scalar in Scalars:
self._test_pointwise_op(device, dtype, op, N, False, False, values=scalar)
for _, scalarlist in getScalarLists(N):
self._test_pointwise_op(
device, dtype, op, N, False, False, values=scalarlist)
# note(mkozuki): fastpath test uses dtypes which fastpath implementation supports.
# To confirm the dtypes of `OpInfo` cover the dtypes that the function support,
# this test does not use `try-except` for fastpath.
def _regular_unary_test(self, dtype, op, ref, inputs, is_fastpath):
if is_fastpath:
self.assertEqual(ref(inputs), op(inputs, self.is_cuda, is_fastpath))
return
try:
actual = op(inputs, self.is_cuda, is_fastpath)
except RuntimeError as e:
with self.assertRaisesRegex(type(e), re.escape(str(e))):
ref(inputs)
else:
expected = ref(inputs)
self.assertEqual(actual, expected)
# note(mkozuki): why `try-except` for both fastpath?
# - inputs for fastpath can be integer tensors.
# - this is becase opinfo dtypes are configured for outpulace implementation
# - for integer inputs, trigonometric functions and exponential function returns float outputs,
# which causes "result type Float can't be case to the desired type" error.
# Thus, `try-except` is used even if `is_fastpath` is `True`.
def _inplace_unary_test(self, dtype, inplace, inplace_ref, inputs, is_fastpath):
copied_inputs = [[t.clone().detach() for t in tensors] for tensors in inputs]
try:
inplace(inputs, self.is_cuda, is_fastpath)
except RuntimeError as e:
with self.assertRaisesRegex(type(e), re.escape(str(e))):
inplace_ref(copied_inputs)
else:
inplace_ref(copied_inputs),
self.assertEqual(copied_inputs, inputs)
def _test_unary(self, device, dtype, opinfo, N, is_fastpath):
op, ref, inplace_op, inplace_ref = self._get_funcs(opinfo, 1)
inputs = opinfo.sample_inputs(device, dtype, N, noncontiguous=not is_fastpath),
# note(mkozuki): Complex inputs for `_foreach_abs` go through slowpath.
if opinfo.name == "_foreach_abs" and dtype in complex_types():
is_fastpath = False
self._regular_unary_test(dtype, op, ref, inputs, is_fastpath)
self._inplace_unary_test(dtype, inplace_op, inplace_ref, inputs, is_fastpath)
@skipMeta
@ops(foreach_unary_op_db)
def test_unary_fastpath(self, device, dtype, op):
for N in N_values:
self._test_unary(device, dtype, op, N, is_fastpath=True)
@ops(foreach_unary_op_db, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_unary_slowpath(self, device, dtype, op):
for N in N_values:
self._test_unary(device, dtype, op, N, is_fastpath=False)
# note(crcrpar): `torch.maximum` and `torch.minimum` support `out` arg but there seem to be no inplace versions.
# So, compare `inplace_op` results with `ref`'s outputs.
def _minmax_test(self, opinfo, inputs, is_fastpath, n_expected_cudaLaunchKernels):
op, ref, inplace_op, _ = self._get_funcs(opinfo, n_expected_cudaLaunchKernels)
expected = ref(inputs)
self.assertEqual(expected, op(inputs, self.is_cuda, is_fastpath))
inplace_inputs = [[t.clone() for t in inputs[0]], inputs[1]]
inplace_op(inplace_inputs, self.is_cuda, is_fastpath)
self.assertEqual(expected, inplace_inputs[0])
@ops(foreach_minmax_op_db)
def test_minmax_fastpath(self, device, dtype, op):
for N in N_values:
inputs = tuple(op.sample_inputs(device, dtype, N) for _ in range(2))
self._minmax_test(op, inputs, True, N if dtype == torch.bool else 1)
@ops(foreach_minmax_op_db,
dtypes=all_types_and(torch.half, torch.bfloat16, torch.bool))
def test_minmax_slowpath(self, device, dtype, op):
for N in N_values:
inputs = tuple(op.sample_inputs(device, dtype, N, noncontiguous=True) for _ in range(2))
self._minmax_test(op, inputs, False, 1)
# note(mkozuki): ForeachFuncInfo's of both `_foreach_maximum` and `_foreach_minimum` include integer types.
# so, manually limit dtypes to fp types for inf&nan tests.
@ops(foreach_minmax_op_db, dtypes=floating_types_and(torch.half, torch.bfloat16))
def test_minmax_float_inf_nan(self, device, dtype, op):
inputs = (
[
torch.tensor([float('inf')], device=device, dtype=dtype),
torch.tensor([-float('inf')], device=device, dtype=dtype),
torch.tensor([float('nan')], device=device, dtype=dtype),
torch.tensor([float('nan')], device=device, dtype=dtype)
],
[
torch.tensor([-float('inf')], device=device, dtype=dtype),
torch.tensor([float('inf')], device=device, dtype=dtype),
torch.tensor([float('inf')], device=device, dtype=dtype),
torch.tensor([float('nan')], device=device, dtype=dtype)
],
)
self._minmax_test(op, inputs, True, 1)
def _reduce_test(self, opinfo, inputs, ord, is_fastpath, n_expected_cudaLaunchKernels):
op, ref, _, _ = self._get_funcs(opinfo, n_expected_cudaLaunchKernels)
self.assertEqual(ref(inputs, ord=ord), op(inputs, self.is_cuda, is_fastpath, ord=ord))
@ops(foreach_reduce_op_db)
def test_reduce_fastpath(self, device, dtype, op):
for N, ord in itertools.product(N_values, (0, 1, 2, -1, -2)):
if ord in (1, 2) and dtype in floating_types_and(torch.half, torch.bfloat16):
n_expected_cudaLaunchKernels = 3
else:
n_expected_cudaLaunchKernels = N
inputs = op.sample_inputs(device, dtype, N, noncontiguous=False),
self._reduce_test(op, inputs, ord, True, n_expected_cudaLaunchKernels)
@ops(foreach_reduce_op_db)
def test_reduce_slowpath(self, device, dtype, op):
for N, ord in itertools.product(N_values, (0, 1, 2, -1, -2)):
inputs = op.sample_inputs(device, dtype, N, noncontiguous=True),
self._reduce_test(op, inputs, ord, False, 1)
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_add_scalar_with_empty_list_and_empty_tensor(self, device, dtype):
# TODO: enable empty list case
for tensors in [[torch.randn([0])]]:
res = torch._foreach_add(tensors, 1)
self.assertEqual(res, tensors)
torch._foreach_add_(tensors, 1)
self.assertEqual(res, tensors)
@ops(foreach_binary_op_db, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_binary_op_scalar_with_overlapping_tensors(self, device, dtype, op):
foreach_op, ref = op.method_variant, op.ref
tensors = [torch.ones(1, 1, device=device, dtype=dtype).expand(2, 1, 3)]
if ref == torch.sub and dtype == torch.bool:
with self.assertRaisesRegex(RuntimeError, re.escape(_BOOL_SUB_ERR_MSG)):
[ref(t, 1) for t in tensors]
with self.assertRaisesRegex(RuntimeError, re.escape(_BOOL_SUB_ERR_MSG)):
foreach_op(tensors, 1)
return
expected = [ref(t, 1) for t in tensors]
res = foreach_op(tensors, 1)
self.assertEqual(res, expected)
# note(mkozuki): this test case fails with Meta at least in my local environment.
# The message was
# `AssertionError: NotImplementedError("Could not run 'aten::_foreach_add.Scalar' with arguments from the 'Meta' backend.`
@skipMeta
@ops(foreach_binary_op_db, allowed_dtypes=[torch.float])
def test_binary_op_scalar_with_different_tensor_dtypes(self, device, dtype, op):
foreach_op = op.method_variant
tensors = [torch.tensor([1.1], dtype=torch.float, device=device),
torch.tensor([1], dtype=torch.long, device=device)]
runtime_error = None
try:
foreach_op(tensors, 1)
except RuntimeError as e:
runtime_error = e
self.assertIsNone(runtime_error)
@ops(foreach_binary_op_db, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_binary_op_list_error_cases(self, device, dtype, op):
foreach_op, foreach_op_, ref, ref_ = op.method_variant, op.inplace_variant, op.ref, op.ref_inplace
tensors1 = []
tensors2 = []
# Empty lists
with self.assertRaisesRegex(RuntimeError, "There were no tensor arguments to this function"):
foreach_op(tensors1, tensors2)
with self.assertRaisesRegex(RuntimeError, "There were no tensor arguments to this function"):
foreach_op_(tensors1, tensors2)
# One empty list
tensors1.append(torch.tensor([1], device=device, dtype=dtype))
with self.assertRaisesRegex(RuntimeError, "Tensor list must have same number of elements as scalar list."):
foreach_op(tensors1, tensors2)
with self.assertRaisesRegex(RuntimeError, "Tensor list must have same number of elements as scalar list."):
foreach_op_(tensors1, tensors2)
# Lists have different amount of tensors
tensors2.append(torch.tensor([1], device=device))
tensors2.append(torch.tensor([1], device=device))
with self.assertRaisesRegex(RuntimeError, "Tensor lists must have the same number of tensors, got 1 and 2"):
foreach_op(tensors1, tensors2)
with self.assertRaisesRegex(RuntimeError, "Tensor lists must have the same number of tensors, got 1 and 2"):
foreach_op_(tensors1, tensors2)
# Corresponding tensors with different sizes that aren't compatible with broadcast
# If sizes are different then foreach chooses slow path, thus error messages are expected
# to be the same as torch regular function.
tensors1 = [torch.zeros(10, 10, device=device, dtype=dtype) for _ in range(10)]
tensors2 = [torch.ones(11, 11, device=device, dtype=dtype) for _ in range(10)]
try:
foreach_op(tensors1, tensors2)
except RuntimeError as e:
with self.assertRaisesRegex(type(e), re.escape(str(e))):
[ref(t1, t2) for t1, t2 in zip(tensors1, tensors2)]
try:
foreach_op_(tensors1, tensors2)
except RuntimeError as e:
with self.assertRaisesRegex(type(e), re.escape(str(e))):
[ref_(t1, t2) for t1, t2 in zip(tensors1, tensors2)]
# different devices
if self.device_type == "cuda" and torch.cuda.device_count() > 1:
tensor1 = torch.zeros(10, 10, device="cuda:0", dtype=dtype)
tensor2 = torch.ones(10, 10, device="cuda:1", dtype=dtype)
if dtype == torch.bool and foreach_op == torch._foreach_sub:
with self.assertRaisesRegex(RuntimeError, re.escape(_BOOL_SUB_ERR_MSG)):
foreach_op([tensor1], [tensor2])
with self.assertRaisesRegex(RuntimeError, re.escape(_BOOL_SUB_ERR_MSG)):
foreach_op_([tensor1], [tensor2])
return
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
foreach_op([tensor1], [tensor2])
if dtype in integral_types_and(torch.bool) and foreach_op == torch._foreach_div:
with self.assertRaisesRegex(RuntimeError, "result type"):
foreach_op_([tensor1], [tensor2])
else:
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
foreach_op_([tensor1], [tensor2])
@skipMeta
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not found")
@ops(foreach_binary_op_db, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_binary_op_list_slow_path(self, device, dtype, op):
# note(mkozuki): why `n_expected_cudaLaunchKernels=0`?
# In this test, foreach functions don't go through fast path,
# but as there is only one tensor in each list of tensors,
# `cudaLaunchKernel` is 1 so ForeachFuncWrapper internal assert fails.
foreach_op, native_op, foreach_op_, native_op_ = self._get_funcs(op, n_expected_cudaLaunchKernels=0)
# 0-strides
tensor1 = make_tensor((10, 10), dtype=dtype, device=device)
tensor2 = make_tensor((1,), device=device, dtype=dtype).expand_as(tensor1)
inputs = ([tensor1], [tensor2])
self._binary_test(dtype, foreach_op, native_op, inputs, is_fastpath=False, is_inplace=False)
self._binary_test(dtype, foreach_op_, native_op_, inputs, is_fastpath=False, is_inplace=True)
# different strides
tensor1 = torch.zeros(10, 10, device=device, dtype=dtype)
tensor2 = torch.ones(10, 10, device=device, dtype=dtype)
inputs = ([tensor1], [tensor2.t()])
self._binary_test(dtype, foreach_op, native_op, inputs, is_fastpath=False, is_inplace=False)
self._binary_test(dtype, foreach_op_, native_op_, inputs, is_fastpath=False, is_inplace=True)
# non contiguous
tensor1 = make_tensor((5, 2, 1, 3), device=device, dtype=dtype, noncontiguous=True)
tensor2 = make_tensor((5, 2, 1, 3), device=device, dtype=dtype, noncontiguous=True)
self.assertFalse(tensor1.is_contiguous())
self.assertFalse(tensor2.is_contiguous())
inputs = ([tensor1], [tensor2])
self._binary_test(dtype, foreach_op, native_op, inputs, is_fastpath=False, is_inplace=False)
self._binary_test(dtype, foreach_op_, native_op_, inputs, is_fastpath=False, is_inplace=True)
# sliced tensor
tensor1 = make_tensor((5, 2, 1, 3), device=device, dtype=dtype)
tensor2 = make_tensor((5, 2, 1, 3 * 7), device=device, dtype=dtype)[:, :, :, ::7]
inputs = ([tensor1], [tensor2])
self._binary_test(dtype, foreach_op, native_op, inputs, is_fastpath=False, is_inplace=False)
self._binary_test(dtype, foreach_op_, native_op_, inputs, is_fastpath=False, is_inplace=True)
# note: Below three tests (postfixed with `_tensors_on_different_devices`)
# checks whether foreach works with lists of tensors on different devices
# but tensors of the same index are on the same device, e.g., ['cuda', 'cpu].
@onlyCUDA
@ops(foreach_unary_op_db)
def test_unary_op_tensors_on_different_devices(self, device, dtype, op):
method, ref, inplace_method, ref_inplace = self._get_funcs(op, 1)
# tensors: ['cuda', 'cpu]
tensors = op.sample_inputs(device, dtype, 2)
tensors[1] = tensors[1].to('cpu')
try:
actual = method((tensors,), False, False)
except RuntimeError as e:
with self.assertRaisesRegex(type(e), str(e)):
ref((tensors,))
else:
expected = ref((tensors,))
self.assertEqual(expected, actual)
try:
inplace_method((tensors,), False, False)
except RuntimeError as e:
with self.assertRaisesRegex(type(e), str(e)):
ref_inplace((tensors,))
else:
self.assertEqual(expected, tensors)
@onlyCUDA
@ops(foreach_binary_op_db)
def test_binary_op_tensors_on_different_devices(self, device, dtype, op):
# `tensors1`: ['cuda', 'cpu']
# `tensors2`: ['cuda', 'cpu']
_cuda_tensors = op.sample_inputs(device, dtype, 2, same_size=True)
_cpu_tensors = op.sample_inputs('cpu', dtype, 2, same_size=True)
tensors1, tensors2 = list(tensors for tensors in zip(_cuda_tensors, _cpu_tensors))
foreach_op, foreach_op_ = op.method_variant, op.inplace_variant
native_op, native_op_ = op.ref, op.ref_inplace
try:
actual = foreach_op(tensors1, tensors2)
except RuntimeError as e:
with self.assertRaisesRegex(type(e), re.escape(str(e))):
[native_op(t1, t2) for t1, t2 in zip(tensors1, tensors2)]
else:
expected = [native_op(t1, t2) for t1, t2 in zip(tensors1, tensors2)]
self.assertEqual(expected, actual)
try:
foreach_op_(tensors1, tensors2)
except RuntimeError as e:
with self.assertRaisesRegex(type(e), re.escape(str(e))):
[native_op_(t1, t2) for t1, t2 in zip(tensors1, tensors2)]
else:
self.assertEqual(actual, tensors1)
@onlyCUDA
@ops(foreach_pointwise_op_db, allowed_dtypes=floating_types())
def test_pointwise_op_tensors_on_different_devices(self, device, dtype, op):
# tensors1: ['cuda', 'cpu]
# tensors2: ['cuda', 'cpu]
# tensors3: ['cuda', 'cpu]
_cuda_tensors = op.sample_inputs(device, dtype, 3, same_size=True)
_cpu_tensors = op.sample_inputs('cpu', dtype, 3, same_size=True)
tensors1, tensors2, tensors3 = list(tensors for tensors in zip(_cuda_tensors, _cpu_tensors))
foreach_op, foreach_op_, native_op = op.method_variant, op.inplace_variant, op.ref
actual = foreach_op(tensors1, tensors2, tensors3)
expected = [native_op(*_cuda_tensors), native_op(*_cpu_tensors)]
self.assertEqual(expected, actual)
# note(mkozuki): Limiting dtypes to FP32&FP64, we can safely run inplace ops.
foreach_op_(tensors1, tensors2, tensors3)
self.assertEqual(expected, tensors1)
# note: BFloat16 has the same number of exponent bits as FP32
# so if squared L2 norm overflows in BF16, then it also overflows in FP32.
@onlyCUDA
@ops(foreach_reduce_op_db, allowed_dtypes=(torch.half, torch.bfloat16))
def test_foreach_l2_large_value_input(self, device, dtype, op):
ord, N = 2, 10
max_value = torch.finfo(dtype).max
scaler = torch.tensor([max_value]).sqrt().to(device=device, dtype=dtype)
inputs = [t * scaler for t in op.sample_inputs(device, dtype, N, noncontiguous=False, low=1)],
# make sure that the min. of squared L2 norm value per tensor is greater than the max value of `dtype`.
self.assertTrue(scaler * scaler * N > max_value)
fn, ref_fn, *_ = self._get_funcs(op, 3)
actual = fn(inputs, is_cuda=True, is_fastpath=True, ord=ord)
expect = ref_fn(inputs, ord=ord)
if dtype == torch.float16:
# making sure the reference L2 norm values are in the range of FP16.
self.assertFalse(any(torch.isinf(e) for e in expect))
else:
self.assertTrue(all(torch.isinf(e) for e in expect))
self.assertEqual(expect, actual, equal_nan=False)
instantiate_device_type_tests(TestForeach, globals())
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_foreach.py |
# Owner(s): ["module: unknown"]
from typing import Optional, List
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
# End-to-end tests of features in native_functions.yaml
class FloatListWrapperModule(torch.nn.Module):
def forward(self, values, incr: Optional[List[float]]):
return torch._C._nn._test_optional_floatlist(values, incr)
class IntListWrapperModule(torch.nn.Module):
def forward(self, values, incr: Optional[List[int]]):
return torch._C._nn._test_optional_intlist(values, incr)
class TestNativeFunctions(TestCase):
#
# optional float list
#
def do_test_optional_floatlist_with_module(self, module):
values = torch.tensor([1.5, 2.5], dtype=torch.float)
returned = module(values, None)
self.assertEqual(values, returned)
# Make sure that it's an alias, indicating that the operator saw a nullopt.
values[0] = 3.5
self.assertEqual(values, returned)
returned = module(values, [5.1, 4.1])
self.assertEqual(values, torch.tensor([3.5, 2.5], dtype=torch.float))
self.assertEqual(returned, torch.tensor([8.6, 6.6], dtype=torch.float))
def trace_optional_floatlist(self, const):
def wrapper(values):
return torch._C._nn._test_optional_floatlist(values, const)
return torch.jit.trace(wrapper, torch.tensor([1.5, 2.5], dtype=torch.float))
def test_optional_floatlist(self):
self.do_test_optional_floatlist_with_module(FloatListWrapperModule())
self.do_test_optional_floatlist_with_module(torch.jit.script(FloatListWrapperModule()))
traced_none = self.trace_optional_floatlist(None)
traced_list = self.trace_optional_floatlist([5.1, 4.1])
# Not really a module, just lets us use our two traced functions to handle
# the specific cases of passing None and [5.1, 4.1].
def fake_module(values, const):
if const is None:
return traced_none(values)
if const == [5.1, 4.1]:
return traced_list(values)
raise Exception("Invalid argument")
self.do_test_optional_floatlist_with_module(fake_module)
def test_optional_floatlist_invalid(self):
with self.assertRaisesRegex(TypeError, "must be tuple of floats, not list"):
FloatListWrapperModule()(torch.zeros(1), ["hi"])
with self.assertRaisesRegex(RuntimeError, "value of type .* instead found type"):
torch.jit.script(FloatListWrapperModule())(torch.zeros(1), ["hi"])
with self.assertRaisesRegex(TypeError, "must be .* Tensor"):
FloatListWrapperModule()(torch.zeros(1), torch.zeros(1))
with self.assertRaisesRegex(RuntimeError, "value of type .* instead found type"):
torch.jit.script(FloatListWrapperModule())(torch.zeros(1), torch.zeros(1))
#
# optional int list
#
def do_test_optional_intlist_with_module(self, module):
values = torch.tensor([1, 2], dtype=torch.int)
returned = module(values, None)
self.assertEqual(values, returned)
# Make sure that it's an alias, indicating that the operator saw a nullopt.
values[0] = 3
self.assertEqual(values, returned)
returned = module(values, [5, 4])
self.assertEqual(values, torch.tensor([3, 2], dtype=torch.int))
self.assertEqual(returned, torch.tensor([8, 6], dtype=torch.int))
def trace_optional_intlist(self, const):
def wrapper(values):
return torch._C._nn._test_optional_intlist(values, const)
return torch.jit.trace(wrapper, torch.tensor([1, 2], dtype=torch.int))
def test_optional_intlist(self):
self.do_test_optional_intlist_with_module(IntListWrapperModule())
self.do_test_optional_intlist_with_module(torch.jit.script(IntListWrapperModule()))
traced_none = self.trace_optional_intlist(None)
traced_list = self.trace_optional_intlist([5, 4])
# Not really a module, just lets us use our two traced functions to handle
# the specific cases of passing None and [5, 4].
def fake_module(values, const):
if const is None:
return traced_none(values)
if const == [5, 4]:
return traced_list(values)
raise Exception("Invalid argument")
self.do_test_optional_intlist_with_module(fake_module)
def test_optional_intlist_invalid(self):
with self.assertRaisesRegex(TypeError, "must be .* not"):
IntListWrapperModule()(torch.zeros(1), [0.5])
with self.assertRaisesRegex(RuntimeError, "value of type .* instead found type"):
torch.jit.script(IntListWrapperModule())(torch.zeros(1), [0.5])
with self.assertRaisesRegex(TypeError, "must be .* Tensor"):
IntListWrapperModule()(torch.zeros(1), torch.zeros(1))
with self.assertRaisesRegex(RuntimeError, "value of type .* instead found type"):
torch.jit.script(IntListWrapperModule())(torch.zeros(1), torch.zeros(1))
#
# optional filled int list
#
def do_test_optional_filled_intlist_with_module(self, module):
values = torch.tensor([1, 2], dtype=torch.int)
returned = module(values, None)
self.assertEqual(values, returned)
# Make sure that it's an alias, indicating that the operator saw a nullopt.
values[0] = 3
self.assertEqual(values, returned)
returned = module(values, 10)
self.assertEqual(values, torch.tensor([3, 2], dtype=torch.int))
self.assertEqual(returned, torch.tensor([13, 12], dtype=torch.int))
def trace_optional_filled_intlist(self, const):
def wrapper(values):
return torch._C._nn._test_optional_filled_intlist(values, const)
return torch.jit.trace(wrapper, torch.tensor([1, 2], dtype=torch.int))
def test_optional_filled_intlist(self):
def f(n: int):
x = torch._C._nn._test_optional_filled_intlist(torch.tensor([1, 1], dtype=torch.int), (n, n))
y = torch._C._nn._test_optional_filled_intlist(torch.tensor([1, 1], dtype=torch.int), n)
return x, y
# eager
returned = f(10)
self.assertEqual(returned[0], returned[1])
# scripted
s = torch.jit.script(f)
returned = s(10)
self.assertEqual(returned[0], returned[1])
# traced
traced_none = self.trace_optional_filled_intlist(None)
traced_int = self.trace_optional_filled_intlist(10)
# Not really a module, just lets us use our two traced functions to handle
# the specific cases of passing None and 10.
def fake_module(values, const):
if const is None:
return traced_none(values)
if const == 10:
return traced_int(values)
raise Exception("Invalid argument")
self.do_test_optional_filled_intlist_with_module(fake_module)
def test_string_defaults(self):
dummy = torch.rand(1)
fn = torch._C._nn._test_string_default
fn(dummy)
with self.assertRaisesRegex(RuntimeError, "A"):
fn(dummy, a="")
with self.assertRaisesRegex(RuntimeError, "B"):
fn(dummy, b="")
def f(x):
torch._C._nn._test_string_default(x)
scripted_fn = torch.jit.script(f)
scripted_fn(dummy)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_native_functions.py |
# Owner(s): ["module: unknown"]
from torch.testing._internal.common_utils import TestCase, run_tests
import os
import subprocess
import sys
class TestMKLVerbose(TestCase):
def test_verbose_on(self):
num = 0
loc = os.path.dirname(os.path.abspath(__file__))
with subprocess.Popen(f'{sys.executable} -u {loc}/mkl_verbose.py --verbose-level=1', shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as p:
for line in p.stdout.readlines():
line = str(line, 'utf-8').strip()
if line.startswith("MKL_VERBOSE"):
num = num + 1
elif line == 'Failed to set MKL into verbose mode. Please consider to disable this verbose scope.':
return
self.assertTrue(num > 0, 'oneMKL verbose messages not found.')
def test_verbose_off(self):
num = 0
loc = os.path.dirname(os.path.abspath(__file__))
with subprocess.Popen(f'{sys.executable} -u {loc}/mkl_verbose.py --verbose-level=0', shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as p:
for line in p.stdout.readlines():
line = str(line, 'utf-8').strip()
if line.startswith("MKL_VERBOSE"):
num = num + 1
self.assertEqual(num, 0, 'unexpected oneMKL verbose messages found.')
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_mkl_verbose.py |
# Owner(s): ["module: sparse"]
import copy
import torch
import random
import itertools
import unittest
import functools
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_ROCM, TEST_SCIPY, TEST_NUMPY, TEST_MKL, IS_WINDOWS, TestCase, run_tests, load_tests, coalescedonoff, parametrize,
subtest)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoCusparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCUDAIfRocm, skipCPUIfNoMklSparse)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
from torch.testing._internal.common_cuda import _get_torch_cuda_version, CUDA11OrLater, TEST_CUDA
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and
)
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED
if TEST_SCIPY:
import scipy.sparse as sp
if TEST_NUMPY:
import numpy as np
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
def _check_cusparse_triangular_solve_available():
version = _get_torch_cuda_version()
# cusparseSpSM was added in 11.3.1 but we don't have access to patch version
min_supported_version = (11, 4)
return version >= min_supported_version
def _check_cusparse_spgemm_available():
# cusparseSpGEMM was added in 11.0
version = _get_torch_cuda_version()
min_supported_version = (11, 0)
return version >= min_supported_version
def _check_cusparse_sddmm_available():
version = _get_torch_cuda_version()
# cusparseSDDMM was added in 11.2.1 but we don't have access to patch version
min_supported_version = (11, 3)
return version >= min_supported_version
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
or op.supports_sparse_bsr or op.supports_sparse_bsc), op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
UNARY_EWISE_CSR_ALLOW_AUTOGRAD = [
'abs',
'conj_physical',
'neg',
]
# This should be just an import from test_linalg instead of code duplication
# but https://github.com/pytorch/pytorch/pull/63511#discussion_r733989701
def _test_addmm_addmv(
test_case,
f,
t,
m,
v,
*,
alpha=None,
beta=None,
transpose_out=False,
layout=torch.strided,
mode=None
):
"""
Unified test for checking `f(t, m, v, alpha=alpha, beta=beta)` computation,
where f is `torch.addmv` or `torch.addmm`.
`transpose_out` controls whether the out argument is in column-major order.
`layout` controls whether `m` is converted to specified layout or not.
Custom behaviour is implemented only for torch.sparse_csr layout.
"""
dtype = t.dtype
numpy_dtype = dtype
if dtype in {torch.bfloat16}:
numpy_dtype = torch.float
if dtype.is_complex:
alpha = 0.9 + 0.3j if alpha is None else alpha
beta = 0.5 + 0.6j if beta is None else beta
else:
alpha = 1.2 if alpha is None else alpha
beta = 0.8 if beta is None else beta
def convert_layout(mat):
if layout == torch.sparse_csr:
return mat.to_sparse_csr()
else:
assert mat.layout == layout
return mat
if mode == "all_sparse":
res1 = f(*map(convert_layout, (t, m, v)), alpha=alpha, beta=beta)
res1 = res1.to_dense()
elif mode == "dense_result":
res1 = f(t, convert_layout(m), convert_layout(v), alpha=alpha, beta=beta)
else:
res1 = f(t, convert_layout(m), v, alpha=alpha, beta=beta)
res2 = torch.full_like(res1, float('nan'))
if transpose_out:
res2 = res2.t().clone(memory_format=torch.contiguous_format).t()
f(t, convert_layout(m), v, alpha=alpha, beta=beta, out=res2)
res3 = alpha * (m.to(numpy_dtype).cpu().numpy() @ v.to(numpy_dtype).cpu().numpy())
if beta != 0:
res3 += (beta * t).to(numpy_dtype).cpu().numpy()
res3 = torch.from_numpy(res3).to(dtype)
test_case.assertEqual(res1, res2)
test_case.assertEqual(res1, res3)
class TestSparseCSRSampler(TestCase):
def test_make_crow_indices(self):
# Here we test the correctness of the crow_indices algorithm
# and testing it on CPU and with int32 dtype will be
# sufficient.
device = torch.device('cpu')
index_dtype = torch.int32
for n_rows in range(1, 10):
for n_cols in range(1, 10):
for nnz in range(0, n_rows * n_cols + 1):
crow_indices = self._make_crow_indices(
n_rows, n_cols, nnz,
device=device, dtype=index_dtype)
self.assertEqual(len(crow_indices), n_rows + 1)
counts = crow_indices[1:] - crow_indices[:-1]
self.assertEqual(counts.sum(), nnz)
self.assertGreaterEqual(counts.min(), 0)
self.assertLessEqual(counts.max(), n_cols)
def all_sparse_compressed_layouts(test_name='layout'):
return parametrize(test_name, [
subtest(torch.sparse_csr, name='SparseCSR'),
subtest(torch.sparse_csc, name='SparseCSC'),
subtest(torch.sparse_bsr, name='SparseBSR'),
subtest(torch.sparse_bsc, name='SparseBSC')])
def sparse_compressed_nonblock_layouts(test_name='layout'):
return parametrize(test_name, [
subtest(torch.sparse_csr, name='SparseCSR'),
subtest(torch.sparse_csc, name='SparseCSC')])
sparse_compressed_indices_methods = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}
class TestSparseCompressed(TestCase):
"""Testing sparse compressed (CSR, CSC, BSR, BSC) tensor generic features.
"""
def genTensor(self, size, nnz, *, layout, device=None, dtype=torch.float, index_dtype=torch.int64):
if device is None:
device = self.device_type
return self.genSparseCompressedTensor(size, nnz, device=device, dtype=dtype, index_dtype=index_dtype, layout=layout)
def _generate_small_inputs_utils(self, layout, device=None, dtype=None):
def shape(shape, basedim=0, blocksize=(1, 1), dense_shape=()):
# Below, we define compressed and plain indices that
# correspond to row compressed tensors. In order to reuse
# the indices tensors for column compressed tensors, we
# swap the row and columns in shape dims (basedim and
# basedim + 1, respectively) to obtain the correct shape
# for column compressed tensors. Batch and dense
# dimensions remain as they are.
#
# Similarly, we reuse indices of non-block tensors for
# block tensors, that means, we'll need to multiply the
# base shape of the non-block tensor with blocksize to get
# the base shape of a block tensor.
if layout is torch.sparse_csc:
shape = shape[:basedim] + (shape[basedim + 1], shape[basedim]) + shape[basedim + 2:]
elif layout is torch.sparse_bsc:
shape = shape[:basedim] + (shape[basedim + 1] * blocksize[1], shape[basedim] * blocksize[0]) + shape[basedim + 2:]
elif layout is torch.sparse_bsr:
shape = shape[:basedim] + (shape[basedim] * blocksize[0], shape[basedim + 1] * blocksize[1]) + shape[basedim + 2:]
return shape
def values(lst, basedim=0, blocksize=(1, 1), densesize=(), device=device, dtype=dtype):
# Below, we define values for non-blocked and non-hybrid
# tensors. To reuse these for blocked tensors, we replace
# all values in lst with a double-list that "shape"
# corresponds to blocksize.
# To support hybrid tensors, the values in lst are further
# replaced with a N-list where N==len(densesize) and the
# shape corresponds to densesize.
max_val = torch.iinfo(dtype).max if dtype in [torch.int16, torch.int8, torch.uint8] else None
def list_add(lst, value):
# recursively add a value to lst items
if isinstance(lst, list):
return [list_add(item, value) for item in lst]
rc = lst + value
return rc if max_val is None else (rc % max_val)
def stretch_values(value, bdim, values_item_shape):
# replace a value with a new value that extends the
# dimensionality of the value by
# len(values_item_shape) from right. The left
# dimensions up to bdim are considered as batch
# dimensions.
if not values_item_shape:
return value
if isinstance(value, list) and bdim >= 0:
return [stretch_values(item, bdim - 1, values_item_shape) for item in value]
new_value = functools.reduce(lambda x, dims: [copy.deepcopy(x) for _ in range(dims)],
reversed(values_item_shape), None)
for p in itertools.product(*map(list, map(range, values_item_shape))):
row = functools.reduce(lambda x, i: x.__getitem__(i), p[:-1], new_value)
row[p[-1]] = list_add(value, sum([i * 10 ** d for d, i in enumerate(p)]))
return new_value
if layout is torch.sparse_bsr:
values_item_shape = blocksize + densesize
elif layout is torch.sparse_bsc:
values_item_shape = tuple(reversed(blocksize)) + densesize
else:
values_item_shape = densesize
if not lst:
return torch.tensor(lst, device=device, dtype=dtype).reshape(0, *values_item_shape)
lst = stretch_values(lst, basedim, values_item_shape)
return torch.tensor(lst, device=device, dtype=dtype)
return shape, values
def _generate_small_inputs(self, layout, device=None, dtype=None, index_dtype=None,
enable_batched=True, enable_hybrid=True):
"""Generator of inputs to sparse compressed tensor factory functions.
The input is defined as a 4-tuple:
compressed_indices, plain_indices, values, expected_size_from_shape_inference
"""
if index_dtype is None:
index_dtype = torch.int64
shape, values = self._generate_small_inputs_utils(layout, device, dtype)
# a regular tensor
yield (torch.tensor([0, 2, 4], device=device, dtype=index_dtype),
torch.tensor([0, 1, 0, 2], device=device, dtype=index_dtype),
values([1, 2, 3, 4], 0, (2, 1)),
shape((2, 3), 0, (2, 1)))
# a tensor with zero dimensions
yield (torch.tensor([0, ], device=device, dtype=index_dtype),
torch.tensor([], device=device, dtype=index_dtype),
values([], 0, (2, 1)),
shape((0, 0), 0, (2, 1)))
if enable_batched:
# a batched tensor with one batch dimension
yield (torch.tensor([[0, 2, 4], [0, 3, 4]], device=device, dtype=index_dtype),
torch.tensor([[0, 1, 0, 1], [0, 1, 2, 0]], device=device, dtype=index_dtype),
values([[1, 2, 3, 4], [5, 6, 7, 8]], 1, (1, 2)),
shape((2, 2, 3), 1, (1, 2)))
# a batched tensor with two batch dimensions
yield (torch.tensor([[[0, 2, 4], [0, 3, 4], [0, 1, 4]],
[[0, 1, 4], [0, 2, 4], [0, 3, 4]]],
device=device, dtype=index_dtype),
torch.tensor([[[0, 1, 0, 1], [0, 1, 2, 0], [0, 0, 1, 2]],
[[1, 0, 1, 2], [0, 2, 0, 1], [0, 1, 2, 1]]],
device=device, dtype=index_dtype),
values([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]], 2, (2, 3)),
shape((2, 3, 2, 3), 2, (2, 3)))
if enable_hybrid:
# a tensor with one dense dimension
yield (torch.tensor([0, 2, 4], device=device, dtype=index_dtype),
torch.tensor([0, 1, 0, 2], device=device, dtype=index_dtype),
values([1, 2, 3, 4], 0, (3, 2), (2,)),
shape((2, 3, 2), 0, (3, 2)))
# a tensor with two dense dimensions
yield (torch.tensor([0, 2, 4], device=device, dtype=index_dtype),
torch.tensor([0, 1, 0, 2], device=device, dtype=index_dtype),
values([1, 2, 3, 4], 0, (2, 3), (4, 2)),
shape((2, 3, 4, 2), 0, (2, 3)))
if enable_batched and enable_hybrid:
# a batched tensor with two batch dimensions and two dense dimensions
yield (torch.tensor([[[0, 2, 4], [0, 3, 4], [0, 1, 4]],
[[0, 1, 4], [0, 2, 4], [0, 3, 4]]],
device=device, dtype=index_dtype),
torch.tensor([[[0, 1, 0, 1], [0, 1, 2, 0], [0, 0, 1, 2]],
[[1, 0, 1, 2], [0, 2, 0, 1], [0, 1, 2, 1]]],
device=device, dtype=index_dtype),
values([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]], 2, (3, 2), (2, 1)),
shape((2, 3, 2, 3, 2, 1), 2, (3, 2)))
@all_sparse_compressed_layouts()
@onlyCPU
def test_layout(self, layout):
self.assertIn(str(layout), {'torch.sparse_csr', 'torch.sparse_csc', 'torch.sparse_bsr', 'torch.sparse_bsc'})
self.assertEqual(type(layout), torch.layout)
@parametrize('shape_and_device_inference', [subtest(False, name='_'), subtest(False, name='shape_and_device_inference')])
@parametrize('use_factory_function', [subtest(False, name='_'), subtest(True, name='factory')])
@parametrize('input_kind', [subtest('tensor', name='from_tensor'), subtest('list', name='from_list')])
@all_sparse_compressed_layouts()
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sparse_compressed_constructor(self, layout, device, dtype,
use_factory_function, shape_and_device_inference, input_kind):
factory_function = {
torch.sparse_csr: torch.sparse_csr_tensor,
torch.sparse_csc: torch.sparse_csc_tensor,
torch.sparse_bsr: torch.sparse_bsr_tensor,
torch.sparse_bsc: torch.sparse_bsc_tensor,
}[layout]
compressed_indices_mth, plain_indices_mth = sparse_compressed_indices_methods[layout]
for index_dtype in [torch.int32, torch.int64]:
for compressed_indices, plain_indices, values, size in self._generate_small_inputs(layout, device, dtype, index_dtype):
if input_kind == 'list':
if size == (0, 0):
# for this degenerate case, plain_indices must
# remain a tensor because
# tensor(plain_indices) results a float dtype
# when plain_indices is an empty list
if index_dtype == torch.int32:
# skip testing int32 case because
# tensor(compressed_indices) results a
# int64 dtype when compressed_indices is
# [0] (a list of single int zero).
continue
else:
plain_indices = plain_indices.tolist()
compressed_indices = compressed_indices.tolist()
values = values.tolist()
if size == (0, 0) and layout in {torch.sparse_bsr, torch.sparse_bsc}:
# in the block sparse case, values of type list needs to represent a 3-D tensor
values = [[[]]]
if use_factory_function:
if shape_and_device_inference:
sparse = factory_function(compressed_indices, plain_indices, values)
else:
sparse = factory_function(compressed_indices, plain_indices, values, size,
dtype=dtype, device=device)
else:
if shape_and_device_inference:
sparse = torch.sparse_compressed_tensor(compressed_indices, plain_indices, values, layout=layout)
else:
sparse = torch.sparse_compressed_tensor(compressed_indices, plain_indices, values, size,
dtype=dtype, layout=layout, device=device)
self.assertEqual(layout, sparse.layout)
self.assertEqual(size, sparse.shape)
self.assertEqual(compressed_indices, compressed_indices_mth(sparse))
self.assertEqual(plain_indices, plain_indices_mth(sparse))
self.assertEqual(values, sparse.values())
@skipMeta
@sparse_compressed_nonblock_layouts()
@dtypes(*all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half))
def test_empty(self, layout, device, dtype):
ns = [5, 2, 0]
batch_shapes = [(), (2,), (2, 3)]
compressed_dim = {
torch.sparse_csr: -2,
torch.sparse_csc: -1,
}[layout]
compressed_indices_mth, plain_indices_mth = sparse_compressed_indices_methods[layout]
for m, n, b in itertools.product(ns, ns, batch_shapes):
shape = (*b, m, n)
result = torch.empty(shape, dtype=dtype, device=device, layout=layout)
self.assertEqual(result.shape, shape)
self.assertEqual(result.dtype, dtype)
self.assertEqual(result.device, torch.device(device))
self.assertEqual(result.layout, layout)
self.assertEqual(compressed_indices_mth(result).shape, (*b, shape[compressed_dim] + 1,))
self.assertEqual(plain_indices_mth(result).shape, (*b, 0,))
self.assertEqual(result.values().shape, (*b, 0,))
self.assertEqual(result._nnz(), 0)
self.assertEqual(compressed_indices_mth(result).device, torch.device(device))
self.assertEqual(plain_indices_mth(result).device, torch.device(device))
self.assertEqual(result.values().device, torch.device(device))
self.assertEqual(compressed_indices_mth(result).dtype, torch.int64)
self.assertEqual(plain_indices_mth(result).dtype, torch.int64)
self.assertEqual(result.values().dtype, dtype)
@skipMeta
@sparse_compressed_nonblock_layouts()
@dtypes(*all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16))
def test_empty_errors(self, layout, device, dtype):
with self.assertRaisesRegex(RuntimeError,
"torch.empty: Only batched sparse compressed \\(non-block\\) tensors are supported"
", but got size"):
torch.empty((5,), dtype=dtype, device=device, layout=layout)
@skipMeta
@all_sparse_compressed_layouts()
@dtypes(*all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16))
def test_clone(self, layout, device, dtype):
for compressed_indices, plain_indices, values, size in self._generate_small_inputs(
layout, device, dtype, index_dtype=torch.int32):
sparse = torch.sparse_compressed_tensor(compressed_indices, plain_indices, values, size,
dtype=dtype, layout=layout, device=device)
cloned_sparse = sparse.clone()
self.assertEqual(sparse, cloned_sparse)
@all_sparse_compressed_layouts()
def test_print(self, layout, device):
compressed_indices_mth, plain_indices_mth = sparse_compressed_indices_methods[layout]
printed = []
for enable_hybrid in [False, True]:
for index_dtype in [torch.int32, torch.int64]:
for dtype in [torch.float32, torch.float64]:
for compressed_indices, plain_indices, values, size in self._generate_small_inputs(
layout, device, dtype, index_dtype, enable_hybrid=enable_hybrid):
block_ndim = 2 if layout in {torch.sparse_bsr, torch.sparse_bsc} else 0
base_ndim = 2
batch_ndim = compressed_indices.dim() - 1
dense_ndim = values.dim() - batch_ndim - block_ndim - 1
if enable_hybrid and dense_ndim == 0:
# non-hybrid cases are covered by the enable_hybrid==False loop
continue
batchsize = size[:batch_ndim]
basesize = size[batch_ndim:batch_ndim + base_ndim]
densesize = size[batch_ndim + base_ndim:]
assert len(densesize) == dense_ndim
printed.append("########## {}/{}/size={}+{}+{} ##########".format(
dtype, index_dtype, batchsize, basesize, densesize))
x = torch.sparse_compressed_tensor(compressed_indices,
plain_indices,
values, size, dtype=dtype, layout=layout, device=device)
printed.append("# sparse tensor")
printed.append(str(x))
printed.append(f"# _{compressed_indices_mth.__name__}")
printed.append(str(compressed_indices_mth(x)))
printed.append(f"# _{plain_indices_mth.__name__}")
printed.append(str(plain_indices_mth(x)))
printed.append("# _values")
printed.append(str(x.values()))
printed.append('')
printed.append('')
orig_maxDiff = self.maxDiff
self.maxDiff = None
try:
self.assertExpected('\n'.join(printed))
self.maxDiff = orig_maxDiff
except Exception:
self.maxDiff = orig_maxDiff
raise
@skipMeta
@all_sparse_compressed_layouts()
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_copy(self, layout, device, dtype):
def run_test(shape, blocksize, nnz, index_type):
a = self.genSparseCompressedTensor(shape, nnz, dtype=dtype, layout=layout, device=device,
index_dtype=index_dtype, blocksize=blocksize)
b = self.genSparseCompressedTensor(shape, nnz, dtype=dtype, layout=layout, device=device,
index_dtype=index_dtype, blocksize=blocksize)
a.copy_(b)
self.assertEqual(a, b)
ns = [(9, 3), (2, 1), (0, 0)] # (number of dimensions, the corresponding block size)
batch_shapes = [(), (2,), (2, 3)]
for ((m, bm), (n, bn), b), index_dtype in zip(itertools.product(ns, ns, batch_shapes), [torch.int32, torch.int64]):
blocksize = (bm, bn) if layout in {torch.sparse_bsr, torch.sparse_bsc} else ()
run_test((*b, m, n), blocksize, 0, index_dtype)
run_test((*b, m, n), blocksize, m * n, index_dtype)
@skipMeta
@all_sparse_compressed_layouts()
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_copy_errors(self, layout, device, dtype):
blocksize = (2, 3) if layout in {torch.sparse_bsr, torch.sparse_bsc} else ()
nnz = 6 if layout in {torch.sparse_bsr, torch.sparse_bsc} else 1
shape1 = (2 * 6, 3 * 6) if layout in {torch.sparse_bsr, torch.sparse_bsc} else (2, 3)
for index_dtype in [torch.int32, torch.int64]:
a = self.genSparseCompressedTensor(shape1, 0, dtype=dtype, layout=layout, device=device,
index_dtype=index_dtype, blocksize=blocksize)
with self.assertRaisesRegex(RuntimeError,
"copy of sparse compressed tensors having different layouts is not supported."):
a.copy_(torch.empty(a.shape, dtype=dtype, device=device))
b = self.genSparseCompressedTensor(shape1, nnz, dtype=dtype, layout=layout, device=device,
index_dtype=index_dtype, blocksize=blocksize)
assert a._nnz() != b._nnz(), (a._nnz(), b._nnz())
with self.assertRaisesRegex(RuntimeError,
"only sparse compressed tensors with the same number of specified elements are supported."):
a.copy_(b)
shape2 = tuple(reversed(shape1))
c = self.genSparseCompressedTensor(shape2, nnz, dtype=dtype, layout=layout, device=device,
index_dtype=index_dtype, blocksize=blocksize)
with self.assertRaisesRegex(
RuntimeError,
"expected shapes of self and src to match along dimension"):
b.copy_(c)
if blocksize:
blocksize1 = tuple(reversed(blocksize))
d = self.genSparseCompressedTensor(shape1, nnz, dtype=dtype, layout=layout, device=device,
index_dtype=index_dtype, blocksize=blocksize1)
with self.assertRaisesRegex(RuntimeError,
"copy of sparse compressed tensors having different block sizes is not supported"):
b.copy_(d)
def _smallest_divisor(self, n):
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return i
return n
@all_sparse_compressed_layouts()
@ops(_sparse_compressed_ops)
def test_consistency(self, layout, device, dtype, op):
# TODO: Normaly, we should use DecorateInfo instead of
# skipTest but this requires implemening OpInfo support for
# layout as a test parameter (similar to device and dtype).
if not (layout == torch.sparse_csr and op.supports_sparse_csr
or layout == torch.sparse_csc and op.supports_sparse_csc
or layout == torch.sparse_bsr and op.supports_sparse_bsr
or layout == torch.sparse_bsc and op.supports_sparse_bsc):
self.skipTest(f"{op.name} does not support input with {layout} layout")
# FIXME: remove in followup once integer support is landed for segment_reduce
if (layout == torch.sparse_csr and not dtype.is_floating_point
and op.name in ('_masked.mean', '_masked.amax', '_masked.amin')):
self.skipTest(f"{op.name} does not support input with {layout} layout")
require_mask = isinstance(op, ReductionOpInfo) and '_masked.' in op.name
if require_mask and layout in {torch.sparse_bsr, torch.sparse_bsc}:
self.skipTest(f"{op.name} does not support input with {layout} layout")
if layout is torch.sparse_bsc:
self.skipTest(f"test requires conversion from Strided layout to {layout} layout")
samples = list(op.sample_inputs(device, dtype))
# Fail early to prevent silent success with this test
ndims_equals_2d = (s.input.ndim == 2 for s in samples)
if not any(ndims_equals_2d):
raise ValueError("Expected at least one 2D tensor in samples.")
count = 0
for sample in samples:
assert torch.is_tensor(sample.input)
# Sparse CSR/CSC only supports 2D tensors as inputs
if sample.input.ndim != 2:
continue
if isinstance(op, ReductionOpInfo):
# Reductions on sparse compressed require keepdim=True
if not sample.kwargs.get('keepdim'):
continue
# Reductions on sparse compressed tensors require explicit mask
if require_mask and sample.kwargs.get('mask') is None:
continue
expected = op(sample.input, **sample.kwargs)
assert torch.is_tensor(expected)
# Use smallest non-trivial blocksize for the given input shape:
blocksize = tuple(map(self._smallest_divisor, sample.input.shape[-2:]))
if layout is torch.sparse_bsr:
sparse = sample.input.to_sparse_bsr(blocksize)
elif layout is torch.sparse_bsc:
sparse = sample.input.to_sparse_bsc(blocksize)
elif layout is torch.sparse_csr:
sparse = sample.input.to_sparse_csr()
elif layout is torch.sparse_csc:
sparse = sample.input.to_sparse_csc()
else:
assert 0, layout
assert torch.is_tensor(sparse)
output = op(sparse, **sample.kwargs)
assert torch.is_tensor(output)
strided_output = output.to_dense()
if require_mask:
output_mask = torch._masked._output_mask(op.op, sample.input, **sample.kwargs)
expected.masked_fill_(~output_mask, 0)
self.assertEqual(strided_output, expected)
count += 1
# Better fail late to prevent silent success with this test
if not count:
raise ValueError("Expected at least one sample with keepdim and/or explicit mask for reductions.")
@skipMeta
@all_sparse_compressed_layouts()
@all_sparse_compressed_layouts('layout2')
@dtypes(*all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16))
def test_empty_like(self, layout, layout2, device, dtype):
for compressed_indices, plain_indices, values, size in self._generate_small_inputs(layout):
sparse = torch.sparse_compressed_tensor(compressed_indices, plain_indices, values, size,
dtype=dtype, layout=layout, device=device)
if layout == layout2:
result = torch.empty_like(sparse, layout=layout2)
compressed_indices_mth, plain_indices_mth = sparse_compressed_indices_methods[result.layout]
torch._validate_sparse_compressed_tensor_args(compressed_indices_mth(result),
plain_indices_mth(result),
result.values(),
result.shape,
result.layout)
self.assertEqual(sparse.shape, result.shape)
else:
self.assertRaisesRegex(
RuntimeError,
"empty_like with different sparse layout is not supported",
lambda: torch.empty_like(sparse, layout=layout2)
)
@skipMeta
@all_sparse_compressed_layouts()
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_validate(self, layout, device, dtype):
for index_dtype in [torch.int32, torch.int64]:
for compressed_indices, plain_indices, values, size in self._generate_small_inputs(
layout, device, dtype, index_dtype, enable_batched=True, enable_hybrid=True):
torch._validate_sparse_compressed_tensor_args(compressed_indices, plain_indices, values, size, layout)
def _generate_invalid_input(self, layout, device):
from functools import partial
shape, values = self._generate_small_inputs_utils(layout, device=device)
tensor = partial(torch.tensor, device=device)
values = partial(values, device=device)
yield ('incontiguous compressed_indices',
tensor([0, -1, 2, -1, 4, -1])[::2],
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
'expected compressed_indices to be a strided and contiguous tensor')
yield ('incontiguous plain_indices',
tensor([0, 2, 4]),
tensor([0, -1, 1, -1, 0, -1, 2, -1])[::2],
values([1, 2, 3, 4]),
shape((2, 3)),
'expected plain_indices to be a strided and contiguous tensor')
yield ('incontiguous values',
tensor([0, 2, 4]),
tensor([0, 1, 0, 2]),
values([1, 1, 2, 2, 3, 3, 4, 4])[::2],
shape((2, 3)),
'expected values to be a strided and contiguous tensor')
yield ('0-D compressed_indices',
tensor(0),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
'compressed_indices must have dimensionality >= 1 but got 0')
yield ('compressed/plain_indices mismatch of dimensionalites',
tensor([[0, 2, 4]]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
'compressed_indices and plain_indices dimensionalities must be equal but got 2 and 1, respectively')
if layout in {torch.sparse_csr, torch.sparse_csc}:
yield ('indices and values mismatch of dimensionalites',
tensor([[0, 2, 4]]),
tensor([[0, 1, 0, 2]]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'values must have dimensionality > sum of batch and block dimensionalities \(=1 \+ 0\) but got 1')
else:
yield ('indices and values mismatch of dimensionalites',
tensor([[0, 2, 4]]),
tensor([[0, 1, 0, 2]]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'values must have dimensionality > sum of batch and block dimensionalities \(=1 \+ 2\) but got 3')
yield ('invalid size',
tensor([0, 2, 4]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
(2,),
r'tensor dimensionality must be sum of batch, base, and dense dimensionalites \(=0 \+ 2 \+ 0\) but got 1')
yield ('invalid batchsize',
tensor([[0, 2, 4]]),
tensor([[0, 1, 0, 2]]),
values([[1, 2, 3, 4]]),
shape((2, 2, 3), 1),
r'all batch dimensions of compressed_indices \(=\[1\]\), plain_indices \(=\[1\]\), '
r'and values \(=\[1\]\) must be equal to tensor batch dimensions \(=\[2\]\)')
if layout is torch.sparse_bsr:
yield ('invalid blocksize',
tensor([0, 2, 4]),
tensor([0, 1, 0, 2]),
tensor([[[1, 11]], [[2, 22]], [[3, 33]], [[4, 33]]]),
shape((2, 3)),
r'tensor shape\[1\] \(=3\) must be divisible with blocksize\[1\] \(=2\) as defined by values shape')
if layout is torch.sparse_bsc:
yield ('invalid blocksize',
tensor([0, 2, 4]),
tensor([0, 1, 0, 2]),
tensor([[[1, 11]], [[2, 22]], [[3, 33]], [[4, 33]]]),
shape((3, 2)),
r'tensor shape\[1\] \(=3\) must be divisible with blocksize\[1\] \(=2\) as defined by values shape')
yield ('invalid compressed_indices shape',
tensor([0, 2, 3, 4]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'compressed_indices.shape\[-1\] must be equal to the number of compressed_indices_names \+ 1 \(=3\), but got 4')
yield ('invalid compressed_indices shape',
tensor([0, 2, 4]),
tensor([0, 1, 0, 1, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'plain_indices.shape\[-1\] must be equal to nnz \(=4\) as defined by values.shape\[0\], but got 5')
yield ('compressed/plain_indices mismatch of dtype',
tensor([0, 2, 4], dtype=torch.int32),
tensor([0, 1, 0, 2], dtype=torch.int64),
values([1, 2, 3, 4]),
shape((2, 3)),
r'compressed_indices and plain_indices must have the same dtype, bot got Int and Long, respectively')
yield ('invalid compressed/plain_indices dtype',
tensor([0, 2, 4], dtype=torch.int16),
tensor([0, 1, 0, 2], dtype=torch.int16),
values([1, 2, 3, 4]),
shape((2, 3)),
r'compressed_indices and plain_indices dtype must be Int or Long, but got Short')
# CUDA kernel asserts are not recoverable, so we skip these for now
if torch.device(device).type == 'cpu':
yield ('invalid compressed_indices[0]',
tensor([1, 2, 4]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'`compressed_indices\[..., 0\] == 0` is not satisfied.')
yield ('invalid compressed_indices[-1]',
tensor([0, 2, 5]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'`compressed_indices\[..., -1\] == nnz` is not satisfied.')
yield ('invalid compressed_indices.diff(dim=-1)',
tensor([0, 0, 4]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'0 <= compressed_indices\[..., 1:\] - compressed_indices\[..., :\-1\] <= plain_dim` is not satisfied.')
yield ('invalid compressed_indices.diff(dim=-1)',
tensor([0, 5, 4]),
tensor([0, 1, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'0 <= compressed_indices\[..., 1:\] - compressed_indices\[..., :\-1\] <= plain_dim` is not satisfied.')
yield ('invalid min(plain_indices)',
tensor([0, 2, 4]),
tensor([0, -1, 0, 3]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'`0 <= plain_indices < plain_dim` is not satisfied.')
yield ('invalid max(plain_indices)',
tensor([0, 2, 4]),
tensor([0, 1, 0, 3]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'`0 <= plain_indices < plain_dim` is not satisfied.')
yield ('non-coalesced',
tensor([0, 2, 4]),
tensor([1, 0, 0, 2]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'`plain_indices\[..., compressed_indices\[..., i - 1\]:compressed_indices\[..., i\]\] '
'for all i = 1, ..., compressed_dim '
'are sorted and distinct along the last dimension values` is not satisfied.')
if TEST_CUDA and torch.device(device).type == 'cpu':
yield ('indices and values mismatch of device',
torch.tensor([0, 2, 4]),
torch.tensor([0, 1, 0, 1]),
values([1, 2, 3, 4], device='cuda'),
shape((2, 3)),
r'device of compressed_indices \(=cpu\) must match device of values \(=cuda:0\)')
yield ('compressed_indices and values mismatch of device',
torch.tensor([0, 2, 4], device='cuda'),
torch.tensor([0, 1, 0, 1]),
values([1, 2, 3, 4]),
shape((2, 3)),
r'Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!')
yield ('compressed/plain_indices mismatch of device',
torch.tensor([0, 2, 4], device='cuda'),
torch.tensor([0, 1, 0, 1]),
values([1, 2, 3, 4], device='cuda'),
shape((2, 3)),
r'Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!')
@skipMeta
@all_sparse_compressed_layouts()
@parametrize('target', [subtest('validate_sparse_compressed_tensor_args'),
subtest('sparse_compressed_tensor'),
subtest('sparse_compressed_tensor_no_size')])
def test_invalid_input(self, layout, device, target):
for label, compressed_indices, plain_indices, values, size, errmsg in self._generate_invalid_input(layout, device):
if layout is torch.sparse_bsr:
errmsg = errmsg.replace('compressed_indices_name', 'row block').replace('plain_indices_name', 'column block')
elif layout is torch.sparse_bsc:
errmsg = errmsg.replace('compressed_indices_name', 'column block').replace('plain_indices_name', 'row block')
elif layout is torch.sparse_csr:
errmsg = errmsg.replace('compressed_indices_name', 'row').replace('plain_indices_name', 'column')
elif layout is torch.sparse_csc:
errmsg = errmsg.replace('compressed_indices_name', 'column').replace('plain_indices_name', 'row')
if layout in {torch.sparse_csr, torch.sparse_bsr}:
errmsg = errmsg.replace('compressed_indices', 'crow_indices') \
.replace('plain_indices', 'col_indices') \
.replace('plain_dim', 'ncols') \
.replace('compressed_dim', 'nrows')
else:
errmsg = errmsg.replace('compressed_indices', 'ccol_indices') \
.replace('plain_indices', 'row_indices') \
.replace('plain_dim', 'nrows') \
.replace('compressed_dim', 'ncols')
if target == 'sparse_compressed_tensor_no_size' and label in {
'invalid size', 'invalid batchsize', 'invalid compressed_indices shape', 'invalid max(plain_indices)',
'invalid blocksize'}:
# Skip invalid size input as a valid size is estimated for other inputs
continue
with self.assertRaisesRegex(RuntimeError, errmsg):
if target == 'validate_sparse_compressed_tensor_args':
torch._validate_sparse_compressed_tensor_args(compressed_indices, plain_indices, values, size, layout)
elif target == 'sparse_compressed_tensor':
torch.sparse_compressed_tensor(compressed_indices, plain_indices, values, size, layout=layout)
elif target == 'sparse_compressed_tensor_no_size':
torch.sparse_compressed_tensor(compressed_indices, plain_indices, values, layout=layout)
else:
raise NotImplementedError(target)
@skipMeta
@onlyCPU
@all_sparse_compressed_layouts()
def test_dim(self, layout):
for compressed_indices, plain_indices, values, size in self._generate_small_inputs(layout):
batch_dim = compressed_indices.dim() - 1
sparse_dim = 2
block_dim = 2 if layout in {torch.sparse_bsr, torch.sparse_bsc} else 0
dense_dim = values.dim() - batch_dim - block_dim - 1
sparse = torch.sparse_compressed_tensor(compressed_indices, plain_indices, values, size, layout=layout)
self.assertEqual(sparse.sparse_dim(), sparse_dim)
self.assertEqual(sparse.dense_dim(), dense_dim)
class TestSparseCSR(TestCase):
def test_csr_stride(self):
a = self.genSparseCSRTensor((3, 3), 3, dtype=torch.float, device=self.device_type, index_dtype=torch.int64)
with self.assertRaisesRegex(RuntimeError, "Sparse CSR tensors do not have strides"):
a.stride()
with self.assertRaisesRegex(RuntimeError, "Sparse CSR tensors do not have strides"):
a.stride(-1)
def test_csr_storage(self):
a = self.genSparseCSRTensor((3, 3), 3, dtype=torch.float, device=self.device_type, index_dtype=torch.int64)
with self.assertRaisesRegex(RuntimeError, "Cannot access storage of SparseCsrTensorImpl"):
a.storage()
def test_csr_is_contiguous(self):
a = self.genSparseCSRTensor((3, 3), 3, dtype=torch.float, device=self.device_type, index_dtype=torch.int64)
with self.assertRaisesRegex(RuntimeError, "Tensors of type SparseCsrTensorImpl do not have is_contiguous"):
a.is_contiguous()
def test_csr_double_to_sparse_csr(self):
a = self.genSparseCSRTensor((3, 3), 3, dtype=torch.float, device=self.device_type, index_dtype=torch.int64)
a.to_sparse_csr().to_sparse_csr()
@all_sparse_compressed_layouts()
@parametrize("index_dtype", [torch.int32, torch.int64])
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_select(self, device, dtype, index_dtype, layout):
compressed_indices_mth = {
torch.sparse_csr: torch.Tensor.crow_indices,
torch.sparse_bsr: torch.Tensor.crow_indices,
torch.sparse_csc: torch.Tensor.ccol_indices,
torch.sparse_bsc: torch.Tensor.ccol_indices,
}[layout]
plain_indices_mth = {
torch.sparse_csr: torch.Tensor.col_indices,
torch.sparse_bsr: torch.Tensor.col_indices,
torch.sparse_csc: torch.Tensor.row_indices,
torch.sparse_bsc: torch.Tensor.row_indices,
}[layout]
create_tensor_mth = {
torch.sparse_csr: torch.sparse_csr_tensor,
torch.sparse_bsr: torch.sparse_bsr_tensor,
torch.sparse_csc: torch.sparse_csc_tensor,
torch.sparse_bsc: torch.sparse_bsc_tensor,
}[layout]
shape = (2, 3, 6, 10)
nnz = 6
blocksize = (2, 2) if layout in {torch.sparse_bsr, torch.sparse_bsc} else ()
sparse = self.genSparseCompressedTensor(
shape, nnz, device=device, layout=layout, dtype=dtype, index_dtype=index_dtype, blocksize=blocksize)
comp_indices = compressed_indices_mth(sparse)
plain_indices = plain_indices_mth(sparse)
values = sparse.values()
# select from batch dimensions
sparse_selected12 = sparse.select(1, 2)
expected_sparse_selected12 = create_tensor_mth(comp_indices.select(1, 2).contiguous(),
plain_indices.select(1, 2).contiguous(),
values.select(1, 2).contiguous(),
size=(2, 6, 10),
dtype=dtype,
device=device)
self.assertEqual(expected_sparse_selected12, sparse_selected12)
# Select from dense dimensions
sparse_hybrid = self.genSparseCompressedTensor(shape + (4, 2),
nnz,
device=device,
layout=layout,
dtype=dtype,
index_dtype=index_dtype,
blocksize=blocksize,
dense_dims=2)
sparse_hybrid_dense_selected = sparse_hybrid.select(4, 1)
expected_sparse_hybrid_dense_selected = sparse_hybrid.values().select(-2, 1)
self.assertEqual(expected_sparse_hybrid_dense_selected, sparse_hybrid_dense_selected)
# selecting rows/col with batch dims not allowed
sparse_non_batched = sparse[0, 0]
# select from sparse dimensions if layout supports is
if layout in {torch.sparse_csr, torch.sparse_csc}:
for select_args in [(0, 0), (1, 1)]:
sparse_selected = sparse_non_batched.select(*select_args)
dense_selected = sparse_non_batched.to_dense().select(*select_args)
self.assertEqual(dense_selected, sparse_selected)
self.assertEqual(sparse[0, 0, 0, 0], sparse.to_dense()[0, 0, 0, 0])
# assigning to sparse through indexing is disabled, not tested generally because only layouts supporting
# sparse dim select will get far enough to test
with self.assertRaisesRegex(TypeError, "Cannot assign to a sparse tensor"):
sparse[0, 0, 0, 0] = 99.0
# select from sparse dimensions without removing batch dims, not tested generally because only layouts
# supporting sparse dim select will get far enough
msg = "selecting rows or columns is not implemented for batched sparse compressed tensors."
with self.assertRaisesRegex(RuntimeError, msg):
sparse.select(-2, 0)
with self.assertRaisesRegex(RuntimeError, msg):
sparse.select(-1, 0)
# ensure raises if layout does not support
else:
msg = (
"selecting non-batch dimensions is currently only supported for non-blocked sparse "
"compressed layouts tensors.")
with self.assertRaisesRegex(RuntimeError, msg):
sparse_non_batched.select(0, 0)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_resize(self, device, dtype):
batch_shapes = [(), (2,), (2, 3)]
for index_dtype, b in zip([torch.int32, torch.int64], batch_shapes):
shape = (*b, 2, 3)
nnz = 6
a = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
new_shape = (*b, 4, 5)
a.resize_(new_shape)
self.assertEqual(a.shape, new_shape)
# resize to larger shape doesn't add specified elements
self.assertEqual(a._nnz(), nnz)
new_shape = (*b, 1, 5)
a.resize_(new_shape)
self.assertEqual(a.shape, new_shape)
# resize to smaller shape trims specified elements
self.assertEqual(a._nnz(), 5)
# trim batched dimensions
a.resize_(new_shape[-2], new_shape[-1])
self.assertEqual(a.shape, (new_shape[-2], new_shape[-1]))
self.assertEqual(a._nnz(), 5)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_resize_errors(self, device, dtype):
for index_dtype in [torch.int32, torch.int64]:
shape = (2, 3)
nnz = 6
a = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
with self.assertRaisesRegex(RuntimeError, "torch.resize_: Only batched sparse CSR matrices are supported"):
new_shape = (4,)
a.resize_(new_shape)
# resizing of columns to smaller size is not implemented
with self.assertRaisesRegex(
RuntimeError,
"torch.resize_: Resizing columns of sparse CSR tensors to a smaller value is not supported.",
):
new_shape = (2, 2)
a.resize_(new_shape)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sparse_csr_from_dense(self, device, dtype):
dense = torch.tensor([[4, 5, 0], [0, 0, 0], [1, 0, 0]], dtype=dtype, device=device)
sparse = dense.to_sparse_csr()
self.assertEqual(torch.tensor([0, 2, 2, 3], dtype=torch.int64), sparse.crow_indices())
self.assertEqual(torch.tensor([0, 1, 0], dtype=torch.int64), sparse.col_indices())
self.assertEqual(torch.tensor([4, 5, 1], dtype=dtype), sparse.values())
dense = torch.tensor([[0, 0, 0], [0, 0, 1], [1, 0, 0]], dtype=dtype, device=device)
sparse = dense.to_sparse_csr()
self.assertEqual(torch.tensor([0, 0, 1, 2], dtype=torch.int64), sparse.crow_indices())
self.assertEqual(torch.tensor([2, 0], dtype=torch.int64), sparse.col_indices())
self.assertEqual(torch.tensor([1, 1], dtype=dtype), sparse.values())
dense = torch.tensor([[2, 2, 2], [2, 2, 2], [2, 2, 2]], dtype=dtype, device=device)
sparse = dense.to_sparse_csr()
self.assertEqual(torch.tensor([0, 3, 6, 9], dtype=torch.int64), sparse.crow_indices())
self.assertEqual(torch.tensor([0, 1, 2] * 3, dtype=torch.int64), sparse.col_indices())
self.assertEqual(torch.tensor([2] * 9, dtype=dtype), sparse.values())
def _test_sparse_compressed_to_dense(self, device, dtype, layout):
compressed_format_str = str(layout)[-3:]
def to_compressed(t):
return getattr(t, f"to_sparse_{compressed_format_str}")()
def compressed_constructor(*input, **kwargs):
constructor = getattr(torch, f"sparse_{compressed_format_str}_tensor")
return constructor(*input, **kwargs)
def get_dense_shape(shape, batch_ndim):
if layout is torch.sparse_csc:
compressed_dims_slice = slice(batch_ndim + 1, batch_ndim - 1, -1)
else:
compressed_dims_slice = slice(batch_ndim, batch_ndim + 2)
return shape[:batch_ndim] + shape[compressed_dims_slice] + shape[batch_ndim + 2:]
def transpose(t, batch_ndim):
if layout is torch.sparse_csc:
return t.transpose(batch_ndim, batch_ndim + 1)
return t
mn = [5, 2, 0]
for (m, n) in itertools.product(mn, mn):
size = (m, n)
dense = make_tensor(size, dtype=dtype, device=device)
sparse = to_compressed(dense)
self.assertEqual(sparse.to_dense(), dense)
batch_shape = (2, 3)
compressed_indices = torch.tensor([0, 3, 5], device=device).repeat(6, 1).reshape(*batch_shape, -1)
plain_indices = torch.tensor([0, 1, 2, 0, 1], device=device).repeat(6, 1).reshape(*batch_shape, -1)
values = torch.tensor([1, 2, 1, 3, 4], device=device, dtype=dtype).repeat(6, 1).reshape(*batch_shape, -1)
sparse = compressed_constructor(compressed_indices, plain_indices, values, dtype=dtype, device=device)
dense_shape = get_dense_shape(sparse.shape, len(batch_shape))
dense = torch.tensor([[1, 2, 1], [3, 4, 0]], dtype=dtype, device=device).repeat(6, 1).reshape(dense_shape)
self.assertEqual(sparse.to_dense(), transpose(dense, len(batch_shape)))
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sparse_csr_to_dense(self, device, dtype):
self._test_sparse_compressed_to_dense(device, dtype, torch.sparse_csr)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sparse_csc_to_dense(self, device, dtype):
self._test_sparse_compressed_to_dense(device, dtype, torch.sparse_csc)
@skipMeta
@skipCPUIfNoMklSparse
@coalescedonoff
@dtypes(torch.double)
def test_coo_to_csr_convert(self, device, dtype, coalesced):
with self.assertRaisesRegex(RuntimeError, "Input is supposed to be a vector"):
torch._convert_indices_from_coo_to_csr(
torch.randint(100, (5, 5), device=device),
size=100)
size = (5, 5)
sparse_dim = 2
nnz = 10
sparse_coo, _, _ = self.genSparseTensor(size, sparse_dim, nnz, coalesced, device, dtype)
sparse_csr = sparse_coo.to_sparse_csr()
self.assertTrue(sparse_csr.is_sparse_csr)
self.assertEqual(sparse_csr.to_dense(), sparse_coo.to_dense())
vec = torch.randn((5, 1), dtype=dtype, device=device)
coo_product = sparse_coo.matmul(vec)
csr_product = sparse_csr.matmul(vec)
self.assertEqual(coo_product, csr_product)
vec = torch.randn((100, 1), dtype=dtype, device=device)
index = torch.tensor([
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
[92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
], dtype=torch.int32)
values = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype, device=device)
coo = torch.sparse_coo_tensor(index, values, torch.Size([100, 100]), dtype=dtype, device=device)
csr = coo.to_sparse_csr()
self.assertEqual(coo.matmul(vec), csr.matmul(vec))
col_indices = torch.tensor([
31, 92, 65, 50, 34, 62, 22, 56, 74, 89
], dtype=torch.int64, device=device)
self.assertEqual(csr.col_indices(), col_indices)
values = torch.tensor([2, 1, 6, 4, 10, 3, 5, 9, 8, 7], dtype=dtype, device=device)
self.assertEqual(csr.values(), values)
@parametrize("blocksize", [2, 4])
@dtypes((torch.double, torch.int32), (torch.double, torch.int64))
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@skipMeta
def test_csr_to_block_csr(self, device, dtypes, blocksize):
for shape in [(24, 24), (12, 24)]:
dtype, index_dtype = dtypes
m, k = shape
nnz = random.randint(0, m * k)
t = self.genSparseCSRTensor((m * blocksize, k * blocksize), nnz, dtype=dtype,
device=device, index_dtype=index_dtype)
st = sp.csr_matrix((t.values().cpu(), t.col_indices().cpu(), t.crow_indices().cpu()), shape=tuple(t.size()))
block_t = t.to_sparse_bsr((blocksize, blocksize))
self.assertEqual(block_t.values().dim(), 3)
self.assertTrue(block_t.layout == torch.sparse_bsr)
block_st = st.tobsr(blocksize=(blocksize, blocksize))
self.assertEqual(block_t.values().cpu(), block_st.data)
self.assertEqual(block_t.col_indices().cpu(), torch.tensor(block_st.indices).to(index_dtype))
self.assertEqual(block_t.crow_indices().cpu(), torch.tensor(block_st.indptr).to(index_dtype))
@dtypes(torch.double)
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
def test_csr_to_block_csr_errors(self, device, dtype):
for index_dtype in [torch.int32, torch.int64]:
nnz = 15
t = self.genSparseCSRTensor((16, 16), nnz, dtype=dtype,
device=device, index_dtype=index_dtype)
with self.assertRaisesRegex(RuntimeError, "must be square."):
block_t = t.to_sparse_bsr((2, 3))
with self.assertRaisesRegex(RuntimeError, r"size \(16, 16\) with block size \(5, 5\)"):
block_t = t.to_sparse_bsr((5, 5))
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sparse_csr_from_dense_convert_error(self, device, dtype):
size = (4, 2, 4)
dense = make_tensor(size, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "Only 2D"):
sparse = dense.to_sparse_csr()
# TODO: Support auto generation of device check for sparse tensors
# See: https://github.com/pytorch/pytorch/issues/59058
@onlyCUDA
@dtypes(torch.double)
def test_matmul_device_mismatch(self, device, dtype):
cpu = torch.rand((10, 10))
cuda = cpu.cuda()
for s, m1, m2 in itertools.product((cpu, cuda), repeat=3):
csr = m1.to_sparse()
if s.device == csr.device == m2.device:
torch.addmm(s, csr, m2)
else:
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.addmm(s, csr, m2)
@skipCPUIfNoMklSparse
@skipCUDAIfNoCusparseGeneric
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.half] if SM53OrLater else [],
*[torch.bfloat16] if SM80OrLater else []))
def test_csr_matvec(self, device, dtype):
side = 100
for index_dtype in [torch.int32, torch.int64]:
csr = self.genSparseCSRTensor((side, side), 1000, device=device, dtype=dtype, index_dtype=index_dtype)
vec = torch.randn(side, dtype=dtype, device=device)
res = csr.matmul(vec)
expected = csr.to_dense().matmul(vec)
self.assertEqual(res, expected)
bad_vec = torch.randn(side + 10, dtype=dtype, device=device)
err_msg = "size mismatch, got"
with self.assertRaisesRegex(RuntimeError, err_msg):
csr.matmul(bad_vec)
@onlyCUDA
@unittest.skipIf(not CUDA11OrLater, "Only CUDA 11+ is supported")
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_baddbmm(self, device, dtype):
def run_test(c, a, a_batched, b, op_b=False, op_out=False, *, dtype=None, device=None):
alpha = complex(random.random(), random.random()) if dtype.is_complex else random.random()
beta = complex(random.random(), random.random()) if dtype.is_complex else random.random()
b = b.mH if (op_b and a.shape == b.shape) else b
actual = torch.baddbmm(c, a_batched, b, alpha=alpha, beta=beta)
out = torch.empty_like(c.mH if op_out and a.shape == b.shape else c)
torch.baddbmm(c, a_batched, b, alpha=alpha, beta=beta, out=out)
expected = [torch.addmm(c[i], a, b[i], alpha=alpha, beta=beta) for i in range(c.shape[0])]
expected = torch.stack(expected, 0)
self.assertEqual(actual, out)
self.assertEqual(actual, expected)
for index_dtype in [torch.int32, torch.int64]:
for (m, n, k), batch_size, noncontiguous in zip(itertools.product([2, 5], repeat=3), [1, 3], [True, False]):
nnz = random.randint(0, m * k)
a = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
# a_batched is a regular CSR tensor but with a batch dimension in the shape
a_batched = torch._sparse_csr_tensor_unsafe(
a.crow_indices(), a.col_indices(), a.values(), (batch_size, m, k))
b = make_tensor((batch_size, k, n), dtype=dtype, device=device, noncontiguous=noncontiguous)
c = make_tensor((batch_size, m, n), dtype=dtype, device=device, noncontiguous=noncontiguous)
for op_b, op_out in itertools.product([True, False], repeat=2):
run_test(c, a, a_batched, b, op_b, op_out, dtype=dtype, device=device)
@onlyCUDA
@unittest.skipIf(not CUDA11OrLater, "Only CUDA 11+ is supported")
@skipCUDAIfNoCusparseGeneric
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_bmm(self, device, dtype):
def run_test(a, a_batched, b, op_b=False, op_out=False, *, dtype=None, device=None):
b = b.mH if (op_b and a.shape == b.shape) else b
actual = torch.bmm(a_batched, b)
out = torch.empty_like(actual.mH if op_out and a.shape == b.shape else actual)
torch.bmm(a_batched, b, out=out)
expected = [torch.mm(a, b[i]) for i in range(b.shape[0])]
expected = torch.stack(expected, 0)
self.assertEqual(actual, out)
self.assertEqual(actual, expected)
for index_dtype in [torch.int32, torch.int64]:
for (m, n, k), batch_size, noncontiguous in zip(itertools.product([2, 5], repeat=3), [1, 3], [True, False]):
nnz = random.randint(0, m * k)
a = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
# a_batched is a regular CSR tensor but with a batch dimension in the shape
a_batched = torch._sparse_csr_tensor_unsafe(
a.crow_indices(), a.col_indices(), a.values(), (batch_size, m, k))
b = make_tensor((batch_size, k, n), dtype=dtype, device=device, noncontiguous=noncontiguous)
for op_b, op_out in itertools.product([True, False], repeat=2):
run_test(a, a_batched, b, op_b, op_out, dtype=dtype, device=device)
def run_test_block_addmm_addmv(self, addmv_addmm, c, a, b, op_b=False, op_out=False, *, dtype=None, device=None):
alpha = complex(random.random(), random.random()) if dtype.is_complex else random.random()
beta = complex(random.random(), random.random()) if dtype.is_complex else random.random()
b = b.mH if (op_b and a.shape == b.shape) else b
actual = addmv_addmm(c, a, b, alpha=alpha, beta=beta)
out = torch.empty_like(c.mH if op_out and a.shape == b.shape else c)
addmv_addmm(c, a, b, alpha=alpha, beta=beta, out=out)
a_bsr = sp.bsr_matrix(
(
a.values().cpu().numpy(),
a.col_indices().cpu().numpy(),
a.crow_indices().cpu().numpy(),
),
shape=a.shape,
)
expected = alpha * (a_bsr * b.cpu().resolve_conj().numpy()) + beta * c.cpu().numpy()
self.assertEqual(actual, out)
self.assertEqual(actual, expected)
# TODO: block_size 1 is broken
@parametrize("block_size", [2, 3])
@parametrize("index_dtype", [torch.int32, torch.int64])
@parametrize("noncontiguous", [True, False])
@skipCPUIfNoMklSparse
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-5, torch.complex128: 1e-5})
def test_block_addmm(self, device, dtype, index_dtype, block_size, noncontiguous):
for (m, n, k) in itertools.product([2, 5], repeat=3):
nnz = random.randint(0, m * k)
if not noncontiguous:
a = self.genSparseCSRTensor((m * block_size, k * block_size), nnz,
dtype=dtype, device=device, index_dtype=index_dtype)
a = a.to_sparse_bsr((block_size, block_size))
else:
a = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a_data = make_tensor((nnz, block_size, block_size), dtype=dtype, device=device)
a_data = a_data.mT if noncontiguous else a_data # Test column-major blocks
a = torch._sparse_bsr_tensor_unsafe(a.crow_indices(), a.col_indices(),
a_data, (m * block_size, k * block_size))
b = make_tensor((k * block_size, n * block_size), dtype=dtype, device=device, noncontiguous=noncontiguous)
c = make_tensor((m * block_size, n * block_size), dtype=dtype, device=device, noncontiguous=noncontiguous)
for op_b, op_out in itertools.product([True, False], repeat=2):
self.run_test_block_addmm_addmv(torch.addmm, c, a, b, op_b, op_out, dtype=dtype, device=device)
@parametrize("block_size", [2, 3])
@parametrize("index_dtype", [torch.int32, torch.int64])
@parametrize("noncontiguous", [True, False])
@skipCPUIfNoMklSparse
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_block_addmv(self, device, dtype, index_dtype, block_size, noncontiguous):
# TODO: Explicitly disable block size 1 support
# if (TEST_WITH_ROCM or not TEST_CUSPARSE_GENERIC) and block_size == 1:
# return
for (m, k) in itertools.product([2, 5], repeat=2):
nnz = random.randint(0, m * k)
if not noncontiguous:
a = self.genSparseCSRTensor((m * block_size, k * block_size), nnz,
dtype=dtype, device=device, index_dtype=index_dtype)
a = a.to_sparse_bsr((block_size, block_size))
else:
a = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a_data = make_tensor((nnz, block_size, block_size), dtype=dtype, device=device)
a_data = a_data.mT if noncontiguous else a_data # Test column-major blocks
a = torch._sparse_bsr_tensor_unsafe(a.crow_indices(), a.col_indices(),
a_data, (m * block_size, k * block_size))
b = make_tensor((k * block_size,), dtype=dtype, device=device, noncontiguous=noncontiguous)
c = make_tensor((m * block_size,), dtype=dtype, device=device, noncontiguous=noncontiguous)
self.run_test_block_addmm_addmv(torch.addmv, c, a, b, dtype=dtype, device=device)
@parametrize("block_size", [2, 3])
@parametrize("index_dtype", [torch.int32, torch.int64])
@parametrize("noncontiguous", [True, False])
@skipCPUIfNoMklSparse
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_block_triangular_solve(self, device, dtype, index_dtype, block_size, noncontiguous):
def run_test(a, b, upper, transpose, unitriangular, op_out):
if unitriangular and self.device_type == 'cpu':
# TODO: When unitriangular=True results are not correct on CPU
return
if not upper and self.device_type == 'cpu':
# TODO: When upper=False some generated inputs might crash on CPU
return
actual = torch.triangular_solve(b, a, upper=upper, unitriangular=unitriangular, transpose=transpose)
actual_X = actual.solution
actual_A_clone = actual.cloned_coefficient
self.assertTrue(actual_A_clone.numel() == 0)
if a._nnz() == 0:
self.assertTrue(actual_X.isnan().all())
return
# TODO: replace with torch method when implemented to_dense() on block sparse tensor
a_bsr = sp.bsr_matrix(
(
a.values().cpu().numpy(),
a.col_indices().cpu().numpy(),
a.crow_indices().cpu().numpy(),
),
shape=a.shape,
)
expected_X, _ = torch.triangular_solve(
b,
torch.tensor(a_bsr.todense(), device=device),
transpose=transpose,
upper=upper,
unitriangular=unitriangular)
if expected_X.isnan().any():
# TODO: zeros on the diagonal are not handled for CPU path
# there's no way to query this info from MKL
if self.device_type == 'cuda' and not TEST_WITH_ROCM:
self.assertTrue(actual_X.isnan().any() or actual_X.isinf().any())
return
self.assertEqual(actual_X, expected_X)
out = torch.empty_like(b.mH if op_out and a.shape == b.shape else b)
torch.triangular_solve(
b, a,
upper=upper, unitriangular=unitriangular, transpose=transpose, out=(out, actual_A_clone)
)
self.assertEqual(out, actual_X)
self.assertEqual(out, expected_X)
for (m, k) in itertools.product([2, 3], [1, 3]):
nnz = random.randint(0, m * m)
if not noncontiguous:
a = self.genSparseCSRTensor((m * block_size, m * block_size), nnz,
dtype=dtype, device=device, index_dtype=index_dtype)
a = a.to_sparse_bsr((block_size, block_size))
else:
a = self.genSparseCSRTensor((m, m), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a_data = make_tensor((nnz, block_size, block_size), dtype=dtype, device=device)
a_data = a_data.mT if noncontiguous else a_data # Test column-major blocks
a = torch._sparse_bsr_tensor_unsafe(a.crow_indices(), a.col_indices(),
a_data, (m * block_size, m * block_size))
b = make_tensor((m * block_size, k), dtype=dtype, device=device, noncontiguous=noncontiguous)
for (upper, unitriangular, transpose, op_out) in itertools.product([True, False], repeat=4):
run_test(a, b, upper, unitriangular, transpose, op_out)
@skipCPUIfNoMklSparse
@unittest.skipIf(not CUDA11OrLater, "Only CUDA 11+ is supported")
@dtypes(torch.double)
def test_mm(self, device, dtype):
def test_shape(di, dj, dk, nnz0=None, nnz1=None):
for index_dtype in [torch.int32, torch.int64]:
alpha = random.random()
beta = random.random()
def _test_addmm(t, x, y):
# TODO: addmm doesn't support strided result for sparse inputs.
# res = beta * t + alpha * (x @ y)
res = torch.addmm(t, x, y, beta=beta, alpha=alpha)
expected = torch.addmm(t, x.to_dense(), y.to_dense(), beta=beta, alpha=alpha)
self.assertEqual(res, expected)
res = torch.addmm(t, x, y)
expected = torch.addmm(t, x.to_dense(), y.to_dense())
self.assertEqual(res, expected)
def _test_mm(x, y):
res = torch.mm(x, y)
expected = torch.mm(x.to_dense(), y.to_dense())
if x.layout is torch.strided or y.layout is torch.strided:
self.assertEqual(res.layout, torch.strided)
else:
self.assertEqual(res.layout, torch.sparse_csr)
self.assertEqual(res.to_dense(), expected)
def _test(t, x, y):
_test_addmm(t, x, y)
_test_mm(x, y)
if nnz0 is None:
nnz0 = random.randint(di * dk // 2, di * dk)
t = torch.randn(di, dj, dtype=dtype, device=device)
x = self.genSparseCSRTensor((di, dk), nnz0, device=device, dtype=dtype, index_dtype=index_dtype)
y = torch.randn(dk, dj, dtype=dtype, device=device)
_test(t, x, y)
if nnz1 is None:
nnz1 = random.randint(dk * dj // 2, dk * dj)
t = torch.randn(di, dj, dtype=dtype, device=device)
x = torch.randn(di, dk, dtype=dtype, device=device)
y = self.genSparseCSRTensor((dk, dj), nnz1, device=device, dtype=dtype, index_dtype=index_dtype)
_test(t, x, y)
x_shape, y_shape = x.shape, y.shape
gen_csr_csc = [self.genSparseCSRTensor, self.genSparseCSCTensor]
# Test mm({CSR, CSC}, {CSR, CSC})
for gen_x, gen_y in itertools.product(gen_csr_csc, gen_csr_csc):
x = gen_x(x_shape, nnz0, device=device, dtype=dtype, index_dtype=index_dtype)
y = gen_y(y_shape, nnz1, device=device, dtype=dtype, index_dtype=index_dtype)
_test_mm(x, y)
for i in [2, 4]:
for j in [2, 4, 7]:
for k in [2, 3, 7]:
test_shape(i, j, k)
test_shape(4, 4, 4, 0, 0)
@skipCPUIfNoMklSparse
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.half] if SM53OrLater and TEST_CUSPARSE_GENERIC else [],
*[torch.bfloat16] if SM80OrLater and TEST_CUSPARSE_GENERIC else []))
@precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2})
def test_sparse_mm(self, device, dtype):
def test_shape(d1, d2, d3, nnz, transposed, index_dtype):
if transposed:
D = torch.randn(d3, d2, dtype=dtype, device=device).t_()
else:
D = torch.randn(d2, d3, dtype=dtype, device=device)
S = self.genSparseCSRTensor((d1, d2), nnz, device=device, dtype=dtype, index_dtype=index_dtype)
S_dense = S.to_dense()
self.assertEqual(torch.sparse.mm(S, D), torch.mm(S_dense, D))
for index_dtype in [torch.int32, torch.int64]:
test_shape(7, 8, 9, 20, False, index_dtype)
test_shape(7, 8, 9, 20, True, index_dtype)
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.half] if SM53OrLater and TEST_CUSPARSE_GENERIC else [],
*[torch.bfloat16] if SM80OrLater and TEST_CUSPARSE_GENERIC else []))
@precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2})
def test_sparse_addmm(self, device, dtype):
def test_shape(m, n, p, nnz, broadcast, index_dtype, alpha_beta=None):
if alpha_beta is None:
alpha = random.random()
beta = random.random()
else:
alpha, beta = alpha_beta
if broadcast:
D1 = make_tensor((), dtype=dtype, device=device)
else:
D1 = make_tensor([n, p], dtype=dtype, device=device)
D2 = make_tensor([m, p], dtype=dtype, device=device)
S = self.genSparseCSRTensor([n, m], nnz, dtype=dtype, device=device, index_dtype=index_dtype)
S_dense = S.to_dense()
Y = torch.sparse.addmm(D1, S, D2, beta=beta, alpha=alpha)
Y_dense = torch.addmm(D1, S_dense, D2, beta=beta, alpha=alpha)
self.assertEqual(Y, Y_dense)
for index_dtype in [torch.int32, torch.int64]:
test_shape(7, 8, 9, 20, False, index_dtype, None)
test_shape(7, 8, 9, 20, True, index_dtype, None)
test_shape(7, 8, 9, 20, False, index_dtype, (1, 0))
test_shape(7, 8, 9, 20, True, index_dtype, (1, 0))
test_shape(7, 8, 9, 20, False, index_dtype, (1, 1))
test_shape(7, 8, 9, 20, True, index_dtype, (1, 1))
@skipCPUIfNoMklSparse
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,
torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*floating_types_and(torch.complex64,
*[torch.bfloat16] if SM80OrLater else [],
*[torch.half] if SM53OrLater else [],
*[torch.complex128] if CUSPARSE_SPMM_COMPLEX128_SUPPORTED else []))
@skipCUDAIf(
not _check_cusparse_spgemm_available(),
"cuSparse Generic API SpGEMM is not available"
)
def test_addmm_all_sparse_csr(self, device, dtype):
M = torch.randn(10, 25, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=torch.sparse_csr, mode="all_sparse")
# Test 0-strided
M = torch.randn(10, 1, device=device).to(dtype).expand(10, 25)
m1 = torch.randn(10, 1, device=device).to(dtype).expand(10, 50)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=torch.sparse_csr, mode="all_sparse")
# Test beta=0, M=nan
M = torch.full((10, 25), float('nan'), device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, beta=0, layout=torch.sparse_csr, mode="all_sparse")
# Test transpose
for t1, t2, t3, t4 in itertools.product([True, False], repeat=4):
def maybe_transpose(cond, m):
if not cond:
return m
return m.t().clone(memory_format=torch.contiguous_format).t()
M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype))
m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype))
m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))
_test_addmm_addmv(self, torch.addmm, M, m1, m2, transpose_out=t4, layout=torch.sparse_csr, mode="all_sparse")
@onlyCPU
@skipCPUIfNoMklSparse
@dtypes(*floating_and_complex_types())
def test_addmm_dense_result(self, device, dtype):
M = torch.randn(10, 25, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=torch.sparse_csr, mode="dense_result")
# Test 0-strided
M = torch.randn(10, 1, device=device).to(dtype).expand(10, 25)
m1 = torch.randn(10, 1, device=device).to(dtype).expand(10, 50)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=torch.sparse_csr, mode="dense_result")
# Test beta=0, M=nan
M = torch.full((10, 25), float('nan'), device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, beta=0, layout=torch.sparse_csr, mode="dense_result")
# Test transpose
for t1, t2, t3, t4 in itertools.product([True, False], repeat=4):
def maybe_transpose(cond, m):
if not cond:
return m
return m.t().clone(memory_format=torch.contiguous_format).t()
M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype))
m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype))
m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))
_test_addmm_addmv(self, torch.addmm, M, m1, m2, transpose_out=t4, layout=torch.sparse_csr, mode="dense_result")
@parametrize("k", [0, 1, 8])
@parametrize("n", [0, 1, 10])
@parametrize("m", [0, 1, 25])
@skipCPUIfNoMklSparse
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_types_and(torch.complex64,
*[torch.bfloat16] if SM80OrLater else [],
*[torch.half] if SM53OrLater else [],
*[torch.complex128] if CUSPARSE_SPMM_COMPLEX128_SUPPORTED else []))
@skipCUDAIf(
not _check_cusparse_spgemm_available(),
"cuSparse Generic API SpGEMM is not available"
)
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,
torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
def test_addmm_sizes_all_sparse_csr(self, device, dtype, m, n, k):
M = torch.randn(n, m, device=device).to(dtype)
m1 = torch.randn(n, k, device=device).to(dtype)
m2 = torch.randn(k, m, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=torch.sparse_csr, mode="all_sparse")
M = torch.randn(n, m, device=device).to(dtype).to_sparse_csr()
m1 = torch.randn(n, k + 1, device=device).to(dtype).to_sparse_csr()
m2 = torch.randn(k, m, device=device).to(dtype).to_sparse_csr()
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.addmm(M, m1, m2))
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.mm(m1, m2))
@skipCPUIfNoMklSparse
@dtypes(torch.float)
def test_addmm_errors(self, device, dtype):
# test that the errors are the same for dense and sparse versions
import re
def test1(*, is_sparse):
# shapes must be compatible for matrix multiplication
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.addmm(a, a_sparse, a)
else:
return torch.addmm(a, a, a)
def test2(*, is_sparse):
# mat2 must be a matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.addmm(a, a_sparse, a.unsqueeze(0))
else:
return torch.addmm(a, a, a.unsqueeze(0))
def test3(*, is_sparse):
# the first input needs to be 1D or 2D
a = make_tensor((3, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.addmm(a.unsqueeze(0), a_sparse, a)
else:
return torch.addmm(a.unsqueeze(0), a, a)
for test in (test1, test2, test3):
try:
test(is_sparse=False)
except RuntimeError as msg:
with self.assertRaisesRegex(RuntimeError, re.escape(str(msg))):
test(is_sparse=True)
@skipCPUIfNoMklSparse
@dtypes(torch.float)
def test_mm_errors(self, device, dtype):
# test that the errors are the same for dense and sparse versions
import re
def test1(*, is_sparse):
# shapes must be compatible for matrix multiplication
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.mm(a_sparse, a)
else:
return torch.mm(a, a)
def test2(*, is_sparse):
# mat2 must be a matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.mm(a_sparse, a.unsqueeze(0))
else:
return torch.mm(a, a.unsqueeze(0))
for test in (test1, test2):
try:
test(is_sparse=False)
except RuntimeError as msg:
with self.assertRaisesRegex(RuntimeError, re.escape(str(msg))):
test(is_sparse=True)
@dtypes(torch.float, torch.double)
def test_add(self, device, dtype):
def _test_spadd_shape(nnz, shape):
# sparse.to_dense() uses torch.add internally so if torch.add is wrong,
# the dense tensor will be wrong but this test would still pass
# there's a separate test that checks for the correctness of the .to_dense() call
x = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
y = torch.randn(*shape, dtype=dtype, device=device)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * x.to_dense()
self.assertEqual(res, expected)
# Non contiguous dense tensor
s = list(shape)
s[0] = shape[-1]
s[-1] = shape[0]
y = torch.randn(*s, dtype=torch.double, device=device)
y.transpose_(0, len(s) - 1)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * x.to_dense()
self.assertEqual(res, expected)
ns = [2, 5]
batch_shapes = [(), (2,), (2, 3)]
for b, m, n in itertools.product(batch_shapes, ns, ns):
_test_spadd_shape(0, (*b, m, n))
_test_spadd_shape(m * n // 2, (*b, m, n))
_test_spadd_shape(m * n, (*b, m, n))
@dtypes(torch.float, torch.double)
def test_mul(self, device, dtype):
# TODO: This whole test should be migrated to OpInfos
def _test_spadd_shape(fn, nnz, shape):
x = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
y = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
# Forward comparison
res_sparse_sparse = fn(y, x)
res_dense_sparse = fn(y.to_dense(), x)
res_sparse_dense = fn(y, x.to_dense())
expected = fn(y.to_dense(), x.to_dense()).to_sparse_csr()
self.assertEqual(res_sparse_sparse, expected)
# TODO: While result of mul(dense, csr) is csr, it is not fully compressed.
# That means it may contain materialized zeros, since the dense argument
# is converted according to the sparsity pattern of csr. In the future
# we might require the result to be fully compressed.
self.assertEqual(res_dense_sparse.to_dense(), expected.to_dense())
self.assertEqual(res_sparse_dense.to_dense(), expected.to_dense())
# Grad comparison
x = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
y = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
z = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
# csr * csr -> csr with csr, csr gradients
x_a = x.clone().requires_grad_()
y_a = y.clone().requires_grad_()
fn(y_a, x_a).backward(z)
x_dense_a = x.to_dense().requires_grad_()
y_dense_a = y.to_dense().requires_grad_()
fn(y_dense_a, x_dense_a).backward(z.to_dense())
self.assertEqual(x_a.grad.layout, torch.sparse_csr)
self.assertEqual(y_a.grad.layout, torch.sparse_csr)
self.assertEqual(x_a.grad.to_dense(), x_dense_a.grad)
self.assertEqual(y_a.grad.to_dense(), y_dense_a.grad)
# TODO: Currently strided Tensors cannot have csr gradients
# dense * csr -> csr with csr, dense gradients
x_a = x.clone().requires_grad_()
y_a = y.to_dense().clone().requires_grad_()
err_msg = "Function MulBackward0 returned an invalid gradient at index 0 - expected layout Strided but got SparseCsr"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(y_a, x_a).backward(z)
# csr * dense -> csr with dense, csr gradients
x_a = x.to_dense().clone().requires_grad_()
y_a = y.clone().requires_grad_()
err_msg = "Function MulBackward0 returned an invalid gradient at index 1 - expected layout Strided but got SparseCsr"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(y_a, x_a).backward(z)
_test_spadd_shape(torch.mul, 100, [100, 100])
_test_spadd_shape(torch.mul, 0, [100, 100])
_test_spadd_shape(torch.mul, 100, [100, 1])
_test_spadd_shape(torch.mul, 100, [1, 100])
@skipCPUIfNoMklSparse
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_sparse_add(self, device, dtype):
def run_test(m, n, index_dtype):
alpha = random.random()
nnz1 = random.randint(0, m * n)
nnz2 = random.randint(0, m * n)
nnz3 = random.randint(0, m * n)
if TEST_WITH_ROCM:
# ROCm fails when nnz = 0
nnz1, nnz2, nnz3 = max(1, nnz1), max(1, nnz2), max(1, nnz3)
S1 = self.genSparseCSRTensor([m, n], nnz1, dtype=dtype, device=device, index_dtype=index_dtype)
S2 = self.genSparseCSRTensor([m, n], nnz2, dtype=dtype, device=device, index_dtype=index_dtype)
S3 = self.genSparseCSRTensor([m, n], nnz3, dtype=dtype, device=device, index_dtype=index_dtype)
expected = torch.add(S1.to_dense(), S2.to_dense(), alpha=alpha)
actual = torch.add(S1, S2, alpha=alpha, out=S3)
self.assertEqual(actual.to_dense(), expected)
self.assertEqual(S3.to_dense(), expected)
for index_dtype in [torch.int32, torch.int64]:
for m, n in itertools.product([3, 5], [3, 5]):
run_test(m, n, index_dtype)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_sparse_add_errors(self, device, dtype):
def run_test(index_type):
a = self.genSparseCSRTensor((2, 2), 3, dtype=dtype, device=device, index_dtype=index_dtype)
b = self.genSparseCSRTensor((2, 1), 2, dtype=dtype, device=device, index_dtype=index_dtype)
with self.assertRaisesRegex(RuntimeError, "Expected input tensors to have the same shape"):
torch.add(a, b)
for index_dtype in [torch.int32, torch.int64]:
run_test(index_dtype)
@skipCPUIfNoMklSparse
@skipCUDAIf(
not _check_cusparse_triangular_solve_available(),
"cuSparse Generic API SpSV is not available"
)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_sparse_triangular_solve(self, device, dtype):
def run_test(n, k, upper, unitriangular, transpose, zero):
triangle_function = torch.triu if upper else torch.tril
make_A = torch.zeros if zero else make_tensor
A = make_A((n, n), dtype=dtype, device=device)
A = triangle_function(A)
A_sparse = A.to_sparse_csr()
B = make_tensor((n, k), dtype=dtype, device=device)
expected = torch.triangular_solve(B, A, upper=upper, unitriangular=unitriangular, transpose=transpose)
expected_X = expected.solution
actual = torch.triangular_solve(B, A_sparse, upper=upper, unitriangular=unitriangular, transpose=transpose)
actual_X = actual.solution
actual_A_clone = actual.cloned_coefficient
self.assertTrue(actual_A_clone.numel() == 0)
if A_sparse._nnz() == 0:
self.assertTrue(actual_X.isnan().all())
return
self.assertEqual(actual_X, expected_X)
# test out with C contiguous strides
out = torch.empty_strided((n, k), (k, 1), dtype=dtype, device=device)
torch.triangular_solve(
B, A_sparse,
upper=upper, unitriangular=unitriangular, transpose=transpose, out=(out, actual_A_clone)
)
self.assertEqual(out, expected_X)
# test out with F contiguous strides
out = torch.empty_strided((n, k), (1, n), dtype=dtype, device=device)
torch.triangular_solve(
B, A_sparse,
upper=upper, unitriangular=unitriangular, transpose=transpose, out=(out, actual_A_clone)
)
self.assertEqual(out, expected_X)
self.assertEqual(out.stride(), (1, n))
# test out with discontiguous strides
out = torch.empty_strided((2 * n, k), (1, 2 * n), dtype=dtype, device=device)[::2]
if n > 0 and k > 0:
self.assertFalse(out.is_contiguous())
self.assertFalse(out.t().is_contiguous())
before_stride = out.stride()
torch.triangular_solve(
B, A_sparse,
upper=upper, unitriangular=unitriangular, transpose=transpose, out=(out, actual_A_clone)
)
self.assertEqual(out, expected_X)
self.assertEqual(out.stride(), before_stride)
ks = [0, 1, 3]
ns = [5, 3, 0]
for (k, n), (upper, unitriangular, transpose, zero) in itertools.product(itertools.product(ks, ns),
itertools.product([True, False], repeat=4)):
run_test(n, k, upper, unitriangular, transpose, zero)
@skipCUDAIfRocm
@skipCUDAIf(
not _check_cusparse_sddmm_available(),
"cuSparse Generic API SDDMM is not available"
)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_sampled_addmm(self, device, dtype):
def run_test(c, a, b, op_a, op_b, *, alpha=None, beta=None):
if dtype.is_complex:
alpha = random.random() + 0.3j if alpha is None else alpha
beta = random.random() + 0.6j if beta is None else beta
else:
alpha = random.random() if alpha is None else alpha
beta = random.random() if beta is None else beta
if op_a and a.shape == b.shape:
a = a.mH
if op_b and a.shape == b.shape:
b = b.mH
actual = torch.sparse.sampled_addmm(c, a, b, alpha=alpha, beta=beta)
out = torch.sparse_csr_tensor(
*map(torch.clone, (actual.crow_indices(), actual.col_indices())),
torch.empty_like(actual.values()),
size=actual.shape
)
torch.sparse.sampled_addmm(c, a, b, alpha=alpha, beta=beta, out=out)
spy_c = torch.sparse_csr_tensor(c.crow_indices(), c.col_indices(), torch.ones_like(c.values()), size=c.shape)
expected = alpha * (a @ b) * spy_c.to_dense() + beta * c.to_dense()
self.assertEqual(actual.to_dense(), out.to_dense())
self.assertEqual(actual.to_dense(), expected)
mnk = itertools.product([2, 5], repeat=3)
batch_shapes = [(), (2,), (2, 3)] if self.device_type == 'cuda' else [(), ]
tf = [True, False]
for index_dtype in [torch.int32, torch.int64]:
for (m, n, k), b, noncontiguous, bcast_c in itertools.product(mnk, batch_shapes, tf, tf):
if bcast_c and len(b) == 0:
continue
nnz = random.randint(0, m * n)
c_batch = () if bcast_c else b
c = self.genSparseCSRTensor((*c_batch, m, n), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a = make_tensor((*b, m, k), dtype=dtype, device=device, noncontiguous=noncontiguous)
b = make_tensor((*b, k, n), dtype=dtype, device=device, noncontiguous=noncontiguous)
for op_a, op_b in itertools.product([True, False], repeat=2):
run_test(c, a, b, op_a, op_b)
@skipCUDAIfRocm
@skipCUDAIf(
not _check_cusparse_sddmm_available(),
"cuSparse Generic API SDDMM is not available"
)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_sampled_addmm_autograd(self, device, dtype):
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
samples = list(sample_inputs_sparse_sampled_addmm(None, device, dtype, requires_grad=True))
for sample, dense_covector in zip(samples, [True, False]):
c = sample.input
a = sample.args[0]
b = sample.args[1]
# Compute sparse result
output = torch.sparse.sampled_addmm(c, a, b, **sample.kwargs)
covector = torch.randn_like(output).to_dense() if dense_covector else torch.randn_like(output)
output.backward(covector)
# Compute dense result and compare with sparse result
c1, a1, b1 = map(lambda x: x.detach().to_dense().requires_grad_(True), [c, a, b])
dense_output = sample.kwargs['alpha'] * (a1 @ b1) * torch.ones_like(c).to_dense() + sample.kwargs['beta'] * c1
self.assertEqual(output, dense_output)
dense_covector = covector.to_dense()
dense_output.backward(dense_covector)
self.assertEqual(c.grad, c1.grad)
self.assertEqual(a.grad, a1.grad)
self.assertEqual(b.grad, b1.grad)
@skipCUDAIfRocm
@onlyCUDA
@skipCUDAIf(True, "Causes CUDA memory exception, see https://github.com/pytorch/pytorch/issues/72177")
@skipCUDAIf(
not _check_cusparse_sddmm_available(),
"cuSparse Generic API SDDMM is not available"
)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_sampled_addmm_zero_sized(self, device, dtype):
def run_test(c, a, b):
actual = torch.sparse.sampled_addmm(c, a, b)
self.assertEqual(actual.shape, c.shape)
for m, n, k in itertools.product([0, 5], repeat=3):
c = torch.empty(m, n, dtype=dtype, device=device, layout=torch.sparse_csr)
a = make_tensor((m, k), dtype=dtype, device=device)
b = make_tensor((k, n), dtype=dtype, device=device)
run_test(c, a, b)
@onlyCUDA
@skipCUDAIf(
not (TEST_WITH_ROCM or _check_cusparse_sddmm_available()),
"cuSparse Generic API SDDMM is not available"
)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_sampled_addmm_errors(self, device, dtype):
# test that the errors are the same for dense and sparse sampled versions
# import re
# shapes must be compatible for matrix multiplication
a = make_tensor((2, 3), dtype=dtype, device=device)
a_sparse = a.to_sparse_csr()
with self.assertRaisesRegex(RuntimeError, r"cannot be multiplied"):
torch.sparse.sampled_addmm(a_sparse, a, a)
# mat1 must be a matrix
with self.assertRaisesRegex(RuntimeError, r"Expected mat1 to be a matrix"):
torch.sparse.sampled_addmm(a_sparse, a[..., 0, :], a)
# mat2 must be a matrix
with self.assertRaisesRegex(RuntimeError, r"Expected mat2 to be a matrix"):
torch.sparse.sampled_addmm(a_sparse, a, a[..., 0, :])
a = make_tensor((2, 2), dtype=dtype, device=device)
b = make_tensor((3, 3), dtype=dtype, device=device)
b_sparse = b.to_sparse_csr()
with self.assertRaisesRegex(RuntimeError, r"self.shape\[-2\] must match mat1.shape\[-2\]"):
torch.sparse.sampled_addmm(b_sparse, a, a)
b = make_tensor((2, 3), dtype=dtype, device=device)
b_sparse = b.to_sparse_csr()
with self.assertRaisesRegex(RuntimeError, r"self.shape\[-1\] must match mat2.shape\[-1\]"):
torch.sparse.sampled_addmm(b_sparse, a, a)
a = make_tensor((2, 2), dtype=dtype, device=device)
a_sparse = a.to_sparse_csr()
with self.assertRaisesRegex(RuntimeError, r"Expected mat1 to have strided layout"):
torch.sparse.sampled_addmm(a_sparse, a_sparse, a_sparse)
with self.assertRaisesRegex(RuntimeError, r"Expected mat2 to have strided layout"):
torch.sparse.sampled_addmm(a_sparse, a, a_sparse)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_coo_csr_conversion(self, device, dtype):
for m, n in itertools.product([5, 2, 0], [5, 2, 0]):
size = (m, n)
dense = make_tensor(size, dtype=dtype, device=device)
coo_sparse = dense.to_sparse()
csr_sparse = coo_sparse.to_sparse_csr()
self.assertEqual(csr_sparse.to_dense(), dense)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_csr_coo_conversion(self, device, dtype):
for m, n in itertools.product([5, 2, 0], [5, 2, 0]):
size = (m, n)
dense = make_tensor(size, dtype=dtype, device=device)
csr_sparse = dense.to_sparse_csr()
coo_sparse = csr_sparse.to_sparse()
self.assertEqual(coo_sparse.to_dense(), dense)
# Currently, there is no rule in PyTorch for filling zeros in the outputs
# from operations on Sparse CSR tensors. Hence only those operators are supported
# which have 0->0 correspondence, example: sin(0) = 0, tan(0) = 0 but
# cos(0) = 1 (and hence it's not supported).
# Note: here, we do this test only for unary operators
@ops(sparse_csr_unary_ufuncs)
def test_zero_to_zero_correspondence_unary(self, device, dtype, op):
zero = torch.zeros((1, 2), dtype=dtype, device=device)
tensor_explicit_zeros = torch.sparse_csr_tensor([0, 1], [1], [0], dtype=dtype, device=device)
output_zero = op(zero)
expected_zero = zero.to(output_zero.dtype)
output_explicit_zeros = op(tensor_explicit_zeros).to_dense()
expected_explicit_zeros = tensor_explicit_zeros.to_dense().to(output_explicit_zeros.dtype)
for (output, expected) in [
(output_zero, expected_zero),
(output_explicit_zeros, expected_explicit_zeros)
]:
self.assertEqual(output, expected, f"This operator ({op.name}) should not be supported for "
"Sparse CSR as it breaks 0->0 correspondence.")
for inp in [zero.to_sparse_csr(), tensor_explicit_zeros]:
self.assertEqual(op(inp).values().numel(), inp.values().numel(),
f"{op.name} fails to preserve sparsity pattern.")
@ops(sparse_csr_unary_ufuncs)
def test_sparse_csr_unary_out(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
if not op.supports_out:
self.skipTest("Skipped! Out not supported")
for sample in samples:
assert torch.is_tensor(sample.input)
# Sparse CSR only supports 2D tensors as inputs
# Fail early to prevent silent success with this test
if sample.input.ndim != 2:
raise ValueError("Expected 2D tensor but got tensor with dimension: {sample.input.ndim}.")
sample.input = sample.input.to_sparse_csr()
expect = op(sample.input, *sample.args, **sample.kwargs)
out = self.genSparseCSRTensor(sample.input.size(), sample.input._nnz(),
device=sample.input.device, dtype=expect.dtype,
index_dtype=sample.input.crow_indices().dtype)
op(sample.input, *sample.args, **sample.kwargs, out=out)
self.assertEqual(out, expect)
@ops(sparse_csr_unary_ufuncs)
def test_sparse_csr_unary_inplace(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
if op.inplace_variant is None:
self.skipTest("Skipped! Inplace variant not supported!")
for sample in samples:
assert torch.is_tensor(sample.input)
# Sparse CSR only supports 2D tensors as inputs
# Fail early to prevent silent success with this test
if sample.input.ndim != 2:
raise ValueError("Expected 2D tensor but got tensor with dimension: {sample.input.ndim}.")
sample.input = sample.input.to_sparse_csr()
expect = op(sample.input, *sample.args, **sample.kwargs)
if not torch.can_cast(expect.dtype, dtype):
with self.assertRaisesRegex(RuntimeError, "result type"):
op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
continue
if sample.input.is_complex() and op.name == "abs":
with self.assertRaisesRegex(RuntimeError, "not supported"):
op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
continue
actual = op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
self.assertIs(actual, sample.input)
self.assertEqual(actual, expect)
@ops(sparse_csr_unary_ufuncs, dtypes=OpDTypes.supported, allowed_dtypes=[torch.double, torch.cdouble])
def test_autograd_sparse_csr_unary(self, device, dtype, op):
if op.name not in UNARY_EWISE_CSR_ALLOW_AUTOGRAD:
self.skipTest(f"Skipped! Unary op {op.name} not supported with CSR input and autograd")
samples = list(op.sample_inputs(device, dtype))
# Fail early to prevent silent success with this test
ndims_equals_2d = (s.input.ndim == 2 for s in samples)
if not any(ndims_equals_2d):
raise ValueError("Expected at least one 2D tensor in samples.")
for sample in samples:
sparse_input = sample.input.to_sparse_csr().requires_grad_(True)
def fn(input):
output = op.gradcheck_wrapper(op.get_op(), input, *sample.args, **sample.kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
# Compute sparse result
output = fn(sparse_input)
covector = torch.randn_like(output)
output.backward(covector)
self.assertTrue(torch.is_tensor(sparse_input.grad))
self.assertTrue(sparse_input.grad.is_sparse_csr)
# Compute dense result and compare with sparse result
dense_input = sparse_input.detach().to_dense().requires_grad_(True)
dense_output = fn(dense_input)
dense_covector = covector.to_dense()
dense_output.backward(dense_covector)
self.assertEqual(sparse_input.grad, dense_input.grad)
@skipCUDAIfRocm
@skipCUDAIf(
not _check_cusparse_sddmm_available(),
"cuSparse Generic API SDDMM is not available"
)
@dtypes(torch.float64)
def test_autograd_dense_output_addmm(self, device, dtype):
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
samples = list(sample_inputs_addmm(None, device, dtype, requires_grad=True))
# Fail early to prevent silent success with this test
ndims_equals_2d = (s.args[0].ndim == 2 for s in samples)
if not any(ndims_equals_2d):
raise ValueError("Expected at least one 2D tensor in samples to convert to sparse.")
for sample in samples:
a = sample.args[0].relu().to_sparse_csr()
# This path tests the autograd path wrt dense inputs
for addmm in [torch.addmm, torch.sparse.addmm]:
def fn(c, b):
output = addmm(c, a, b, **sample.kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
self.assertTrue(torch.autograd.gradcheck(fn, [sample.input, sample.args[1]], fast_mode=True))
# noncontiguous
c = make_tensor(sample.input.shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True)
b = make_tensor(sample.args[1].shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True)
self.assertTrue(torch.autograd.gradcheck(fn, [c, b], fast_mode=True))
# Now test the autograd path wrt sparse inputs
for reverse in [True, False]:
c, b = sample.input, sample.args[1]
if reverse and a.shape != b.shape:
continue
def fn(a):
inputs = (c, b, a) if reverse else (c, a, b)
output = addmm(*inputs, **sample.kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
# gradcheck doesn't work for sparse CSR yet, compare against dense path
# Compute sparse result
a = a.detach().requires_grad_(True)
output = fn(a)
covector = torch.randn_like(output)
output.backward(covector)
self.assertTrue(torch.is_tensor(a.grad))
if addmm == torch.sparse.addmm:
self.assertTrue(a.grad.is_sparse_csr)
else:
self.assertTrue(a.grad.layout == torch.strided)
# Compute dense result and compare with sparse result
dense_a = a.detach().to_dense().requires_grad_(True)
dense_output = fn(dense_a)
self.assertEqual(output, dense_output)
dense_covector = covector.to_dense()
dense_output.backward(dense_covector)
if addmm == torch.sparse.addmm:
self.assertEqual(a.grad, dense_a.grad.sparse_mask(a))
else:
self.assertEqual(a.grad, dense_a.grad)
@skipCUDAIfRocm
@skipCPUIfNoMklSparse
@dtypes(torch.float64)
def test_autograd_dense_output_addmv(self, device, dtype):
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
samples = list(sample_inputs_addmv(None, device, dtype, requires_grad=True))
# Fail early to prevent silent success with this test
ndims_equals_2d = (s.args[0].ndim == 2 for s in samples)
if not any(ndims_equals_2d):
raise ValueError("Expected at least one 2D tensor in samples to convert to sparse.")
for sample in samples:
# TODO: Remove detach once we have autograd support for CSR input
a = sample.args[0].to_sparse_csr().detach()
def fn(c, b):
output = torch.addmv(c, a, b, **sample.kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
self.assertTrue(torch.autograd.gradcheck(fn, [sample.input, sample.args[1]], fast_mode=True))
# noncontiguous
c = make_tensor(sample.input.shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True)
b = make_tensor(sample.args[1].shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True)
self.assertTrue(torch.autograd.gradcheck(fn, [c, b], fast_mode=True))
@ops(binary_ops_with_dense_output, dtypes=OpDTypes.supported, allowed_dtypes=[torch.double, ])
def test_autograd_dense_output(self, device, dtype, op):
if op.name == "mv" and no_mkl_sparse and self.device_type == 'cpu':
self.skipTest("MKL Sparse is not available")
if op.name == "mv" and TEST_WITH_ROCM and self.device_type == 'cuda':
# mv currently work only on CUDA
self.skipTest("ROCm is not supported")
samples = list(op.sample_inputs(device, dtype, requires_grad=True))
# Fail early to prevent silent success with this test
ndims_equals_2d = (s.input.ndim == 2 for s in samples)
if not any(ndims_equals_2d):
raise ValueError("Expected at least one 2D tensor in samples.")
# Here we assume that the signature is op(sparse_input, dense_input) -> dense_output
for sample in samples:
# TODO: Remove detach once we have autograd support for CSR input
sparse_input = sample.input.to_sparse_csr().detach()
def fn(*args):
output = op.gradcheck_wrapper(op.get_op(), sparse_input, *args, **sample.kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
self.assertTrue(torch.autograd.gradcheck(fn, sample.args, fast_mode=True))
# noncontiguous
args = [make_tensor(a.shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True) for a in sample.args]
self.assertTrue(torch.autograd.gradcheck(fn, args, fast_mode=True))
@dtypes(*all_types_and_complex())
def test_direct_coo_csr_conversion(self, device, dtype):
for m, n in itertools.product([5, 2, 0], [5, 2, 0]):
size = (m, n)
dense = make_tensor(size, dtype=dtype, device=device)
coo_sparse = dense.to_sparse_coo()
self.assertEqual(coo_sparse.to_sparse_csr().to_sparse_coo(), coo_sparse)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sum(self, device, dtype):
def run_test(shape, nnz, index_type):
a = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
self.assertEqual(a.sum(), a.values().sum())
if dtype in floating_types():
a.requires_grad_(True)
a.sum().backward()
self.assertEqual(a.grad, torch.ones(shape, dtype=dtype, device=device))
for shape, index_dtype in itertools.product(
[(10, 5), (10, 10)],
[torch.int32, torch.int64]):
run_test(shape, 0, index_dtype)
run_test(shape, max(shape), index_dtype)
run_test(shape, shape[0] * shape[1], index_dtype)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_transpose(self, device, dtype):
def run_test(shape, nnz, index_type):
# CSR
a = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
self.assertEqual(a.layout, torch.sparse_csr)
# CSC
a_t = a.transpose(0, 1)
self.assertEqual(a_t.layout, torch.sparse_csc)
# CSR
a_v = a.transpose(0, 0)
self.assertEqual(a_v.layout, torch.sparse_csr)
# CSR again
a_t_t = a_t.transpose(0, 1)
self.assertEqual(a_t_t.layout, torch.sparse_csr)
# TODO: Do we want to extend view properties to members as well?
# These checks are based on is_view_of from test_view_ops.py
self.assertTrue(a_t._is_view())
self.assertTrue(a_v._is_view())
self.assertTrue(a_t_t._is_view())
self.assertTrue(a_t._base is a)
self.assertTrue(a_v._base is a)
self.assertTrue(a_t_t._base is a)
self.assertFalse(a_t is a)
self.assertFalse(a_v is a)
self.assertFalse(a_t_t is a)
self.assertEqual(a.to_dense().transpose(0, 1), a_t.to_dense())
self.assertEqual(a.to_dense(), a_v.to_dense())
self.assertEqual(a.to_dense(), a_t_t.to_dense())
with self.assertRaisesRegex(RuntimeError, "torch.transpose_: in-place transposition is not supported"):
a.transpose_(0, 0)
with self.assertRaisesRegex(RuntimeError, "torch.transpose_: in-place transposition is not supported"):
a.transpose_(0, 1)
for shape, index_dtype in itertools.product(
[(10, 5), (10, 10)],
[torch.int32, torch.int64]):
run_test(shape, 0, index_dtype)
run_test(shape, max(shape), index_dtype)
run_test(shape, shape[0] * shape[1], index_dtype)
# TODO: This is a stopgap for a rigorous extension of our autograd tests
# to test the functionality of detach
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_exercise_detach(self, device, dtype):
shape = (3, 3)
nnz = 4
for index_dtype in [torch.int32, torch.int64]:
inp = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
detached_inp = inp.detach()
self.assertEqual(inp, detached_inp)
def _convert_to_layout(self, a, target_layout, blocksize=(2, 2)):
"""
Helper function to call the correct layout conversion
with reasonable defaults for the block size. Clearly there
is a need for a to.layout overload.
"""
if target_layout is torch.sparse_csr:
result = a.to_sparse_csr()
elif target_layout is torch.sparse_csc:
result = a.to_sparse_csc()
elif target_layout is torch.sparse_bsr:
result = a.to_sparse_bsr(blocksize)
elif target_layout is torch.sparse_bsc:
result = a.to_sparse_bsc(blocksize)
else:
raise NotImplementedError(repr(a))
assert result.layout is target_layout
# to_sparse_xyz methods use unsafe construction of sparse
# compressed tensors. Here we explicitly validate the results
# to make sure that the sparse tensors are consistent with the
# corresponding sparse tensor invariants.
compressed_indices_mth, plain_indices_mth = sparse_compressed_indices_methods[result.layout]
compressed_indices, plain_indices = compressed_indices_mth(result), plain_indices_mth(result)
torch._validate_sparse_compressed_tensor_args(compressed_indices, plain_indices, result.values(),
result.shape, result.layout)
return result
def _construct_sp_matrix(self, tensor, layout, blocksize=(2, 2)):
if tensor.layout in [torch.sparse_coo, torch.sparse_csr, torch.sparse_csc, torch.strided]:
tensor = tensor.to_dense()
else:
raise NotImplementedError(repr(tensor))
if layout is torch.sparse_csr:
return sp.csr_matrix(tensor.cpu().numpy())
if layout is torch.sparse_csc:
return sp.csc_matrix(tensor.cpu().numpy())
if layout is torch.sparse_bsr:
return sp.bsr_matrix(tensor.cpu().numpy(), blocksize=blocksize).sorted_indices()
# No native scipy BSC support?
raise NotImplementedError(repr(tensor))
@skipMeta
@all_sparse_compressed_layouts('to_layout')
@all_sparse_compressed_layouts('from_layout')
def test_compressed_layout_conversions_coverage(self, device, from_layout, to_layout):
"""
This test performs a smoke test for covered conversion and verifies
that an exception is thrown for unsupported conversions.
"""
def _to_from_layout(layout_a, layout_b):
a = make_tensor((6, 10), dtype=torch.float, device=device)
expect_error = (layout_a in [torch.sparse_csc, torch.sparse_bsc]
or layout_b in [torch.sparse_csc, torch.sparse_bsc])
expect_error = expect_error or (layout_a, layout_b) == (torch.sparse_bsr, torch.sparse_bsr)
expect_error = expect_error or (layout_a, layout_b) == (torch.sparse_bsr, torch.sparse_csr)
# CSC to CSR conversion is supported
if layout_a is torch.sparse_csc and layout_b is torch.sparse_csr:
expect_error = False
# CSC to CSC conversion is supported
if layout_a is torch.sparse_csc and layout_b is torch.sparse_csc:
expect_error = False
if expect_error:
with self.assertRaises(RuntimeError):
b = self._convert_to_layout(a, layout_a)
self._convert_to_layout(b, layout_b)
else:
b = self._convert_to_layout(a, layout_a)
c = self._convert_to_layout(b, layout_b)
if (layout_a is not torch.sparse_bsr and layout_b is not torch.sparse_bsr):
self.assertEqual(a.to_dense(), c.to_dense())
_to_from_layout(from_layout, to_layout)
@skipMeta
@all_sparse_compressed_layouts()
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
def test_dense_to_from_sparse_compressed(self, device, layout):
"""
This test tests conversion from dense to/from CSR and CSC
by comparing to SciPy's implementation.
TODO: Eventually this is meant to be merged into test_compressed_layout_conversions_coverage
"""
if layout is torch.sparse_bsc:
# TODO: Remove this once support has been enabled
return
shapes = [(6, 10), (0, 10), (6, 0), (0, 0)]
blocksizes = [(2, 2)]
batch_sizes = [(3, )]
if layout is torch.sparse_bsr:
blocksizes += [(3, 5), (6, 10)]
batch_sizes += [(2, 3), (1, 1, 1, 2)]
def _test_matrix(pt_matrix, dense, layout, blocksize):
sp_matrix = self._construct_sp_matrix(dense, layout, blocksize=blocksize)
compressed_indices_mth, plain_indices_mth = sparse_compressed_indices_methods[layout]
self.assertEqual(layout, pt_matrix.layout)
self.assertEqual(sp_matrix.shape, pt_matrix.shape)
self.assertEqual(torch.tensor(sp_matrix.indptr, dtype=torch.int64), compressed_indices_mth(pt_matrix))
self.assertEqual(torch.tensor(sp_matrix.indices, dtype=torch.int64), plain_indices_mth(pt_matrix))
self.assertEqual(torch.tensor(sp_matrix.data), pt_matrix.values())
for shape, blocksize in itertools.product(shapes, blocksizes):
dense = make_tensor(shape, dtype=torch.float, device=device)
dense = dense.relu() # Introduce some sparsity
pt_matrix = self._convert_to_layout(dense, layout, blocksize=blocksize)
_test_matrix(pt_matrix, dense, layout, blocksize)
self.assertEqual(dense, pt_matrix.to_dense())
if layout is not torch.sparse_bsr:
# TODO: Remove this once support has been enabled
return
# Test batch shapes (ND inputs)
# Case 1: Same sparsity pattern across matrices
for shape, blocksize, batch_shape in itertools.product(shapes, blocksizes, batch_sizes):
full_shape = batch_shape + shape
batch_len = functools.reduce(lambda x, y: x * y, batch_shape, 1)
dense = make_tensor(full_shape, dtype=torch.float, device=device)
# select the first batch to create the mask
mask = dense[tuple(np.unravel_index(0, batch_shape))].relu().bool()
dense = dense * mask
pt_tensor = self._convert_to_layout(dense, layout, blocksize=blocksize)
for i in range(batch_len):
batch_idx = tuple(np.unravel_index(i, batch_shape))
_test_matrix(pt_tensor[batch_idx], dense[batch_idx], layout, blocksize)
self.assertEqual(dense, pt_tensor.to_dense())
# Verify exception when given 0 sized batch
for shape, blocksize in itertools.product(shapes, blocksizes):
dense = make_tensor((0,) + shape, dtype=torch.float, device=device)
# TODO: Support zero sized batch dimensions
with self.assertRaisesRegex(RuntimeError, "to_sparse_bsr: Expected product of batch dimensions to be non-zero."):
self._convert_to_layout(dense, layout, blocksize=blocksize)
# TODO: Case 2: Different sparsity pattern across matrices, but same number of zeros
# NOTE: For blocksparse formats this applies at a per-block level,
dense = make_tensor((2, 4, 4), dtype=torch.float, device=device)
blocksize = (2, 2)
mask = torch.tensor([
[[True, True], [False, True]],
[[True, False], [True, True]]],
device=device).view((2, 2, 2, 1, 1))
mask = mask.expand((2, 2, 2, 2, 2))
mask = mask.transpose(2, 3)
mask = mask.reshape_as(dense)
dense = dense * mask
if layout == torch.sparse_bsr:
# this is not an error as long as the nse is equal for bsr
pt_tensor = self._convert_to_layout(dense, layout, blocksize=blocksize)
for i in range(2):
_test_matrix(pt_tensor[i], dense[i], layout, blocksize)
self.assertEqual(dense, pt_tensor.to_dense())
else:
with self.assertRaisesRegex(RuntimeError, "Expect the same sparsity pattern across matrices for ND input."):
self._convert_to_layout(dense, layout, blocksize=blocksize)
# TODO: Case 3: Different sparsity pattern across matrices, but different number of zeros
dense = make_tensor((2, 4, 4), dtype=torch.float, device=device)
blocksize = (2, 2)
mask = torch.tensor(
[[[True, True], [False, False]],
[[True, False], [True, True]]],
device=device).view((2, 2, 2, 1, 1))
mask = mask.expand((2, 2, 2, 2, 2))
mask = mask.transpose(2, 3)
mask = mask.reshape_as(dense)
dense = dense * mask
if layout == torch.sparse_bsr:
msg = "Expect the same number of specified elements per batch."
else:
msg = "Expect the same sparsity pattern across matrices for ND input."
with self.assertRaisesRegex(RuntimeError, msg):
self._convert_to_layout(dense, layout, blocksize=blocksize)
@skipMeta
@all_sparse_compressed_layouts()
@coalescedonoff
@dtypes(torch.double)
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
def test_sparse_to_sparse_compressed(self, device, dtype, coalesced, layout):
"""
This test tests conversion from COO to CSR and CSC and CSC to CSR and CSC
by comparing to SciPy's implementation.
TODO: Eventually this is meant to be merged into test_compressed_layout_conversions_coverage
"""
if layout is torch.sparse_bsc:
# TODO: Remove this once support has been enabled
return
if layout is torch.sparse_bsr:
# TODO: Remove this once support has been enabled
return
for shape in [(0, 10), (6, 0), (6, 10), (0, 0)]:
sparse_dim = 2
nnz = shape[0] * shape[1] // 2
sparse, _, _ = self.genSparseTensor(shape, sparse_dim, nnz, coalesced, device, dtype)
sp_matrix = self._construct_sp_matrix(sparse, layout)
pt_matrix = self._convert_to_layout(sparse, layout)
compressed_indices_mth = {
torch.sparse_csr: torch.Tensor.crow_indices,
torch.sparse_csc: torch.Tensor.ccol_indices,
}[layout]
plain_indices_mth = {
torch.sparse_csr: torch.Tensor.col_indices,
torch.sparse_csc: torch.Tensor.row_indices,
}[layout]
self.assertEqual(layout, pt_matrix.layout)
self.assertEqual(sp_matrix.shape, pt_matrix.shape)
self.assertEqual(torch.tensor(sp_matrix.indptr, dtype=torch.int64), compressed_indices_mth(pt_matrix))
self.assertEqual(torch.tensor(sp_matrix.indices, dtype=torch.int64), plain_indices_mth(pt_matrix))
self.assertEqual(torch.tensor(sp_matrix.data), pt_matrix.values())
sparse_csc = sparse.to_sparse_csc()
sp_matrix = self._construct_sp_matrix(sparse_csc, layout)
pt_matrix = self._convert_to_layout(sparse_csc, layout)
self.assertEqual(layout, pt_matrix.layout)
self.assertEqual(sp_matrix.shape, pt_matrix.shape)
self.assertEqual(torch.tensor(sp_matrix.indptr, dtype=torch.int64), compressed_indices_mth(pt_matrix))
self.assertEqual(torch.tensor(sp_matrix.indices, dtype=torch.int64), plain_indices_mth(pt_matrix))
self.assertEqual(torch.tensor(sp_matrix.data), pt_matrix.values())
# e.g., TestSparseCSRCPU and TestSparseCSRCUDA
instantiate_device_type_tests(TestSparseCSR, globals())
instantiate_device_type_tests(TestSparseCompressed, globals())
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_sparse_csr.py |
# Owner(s): ["module: unknown"]
import io
import numpy as np
import os
import shutil
import sys
import unittest
import uuid
TEST_TENSORBOARD = True
try:
import tensorboard.summary.writer.event_file_writer # noqa: F401
from tensorboard.compat.proto.summary_pb2 import Summary
except ImportError:
TEST_TENSORBOARD = False
HAS_TORCHVISION = True
try:
import torchvision
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
TEST_CAFFE2 = True
try:
import caffe2.python.caffe2_pybind11_state as _caffe2_pybind11_state # noqa: F401
from caffe2.python import brew, cnn, core, workspace
from caffe2.python.model_helper import ModelHelper
except ImportError:
TEST_CAFFE2 = False
skipIfNoCaffe2 = unittest.skipIf(not TEST_CAFFE2, "no caffe2")
TEST_MATPLOTLIB = True
try:
import matplotlib
if os.environ.get('DISPLAY', '') == '':
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except ImportError:
TEST_MATPLOTLIB = False
skipIfNoMatplotlib = unittest.skipIf(not TEST_MATPLOTLIB, "no matplotlib")
import torch
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ASAN, TEST_WITH_CROSSREF
def tensor_N(shape, dtype=float):
numel = np.prod(shape)
x = (np.arange(numel, dtype=dtype)).reshape(shape)
return x
class BaseTestCase(TestCase):
""" Base class used for all TensorBoard tests """
def setUp(self):
if not TEST_TENSORBOARD:
return self.skipTest("Skip the test since TensorBoard is not installed")
if TEST_WITH_CROSSREF:
return self.skipTest("Don't run TensorBoard tests with crossref")
self.temp_dirs = []
def createSummaryWriter(self):
temp_dir = str(uuid.uuid4())
self.temp_dirs.append(temp_dir)
return SummaryWriter(temp_dir)
def tearDown(self):
super(BaseTestCase, self).tearDown()
# Remove directories created by SummaryWriter
for temp_dir in self.temp_dirs:
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
if TEST_TENSORBOARD:
from tensorboard.compat.proto.graph_pb2 import GraphDef
from torch.utils.tensorboard import summary, SummaryWriter
from torch.utils.tensorboard._utils import _prepare_video, convert_to_HWC
from torch.utils.tensorboard._convert_np import make_np
from torch.utils.tensorboard._pytorch_graph import graph
from google.protobuf import text_format
from PIL import Image
if TEST_TENSORBOARD and TEST_CAFFE2:
from torch.utils.tensorboard import _caffe2_graph as c2_graph
class TestTensorBoardPyTorchNumpy(BaseTestCase):
def test_pytorch_np(self):
tensors = [torch.rand(3, 10, 10), torch.rand(1), torch.rand(1, 2, 3, 4, 5)]
for tensor in tensors:
# regular tensor
self.assertIsInstance(make_np(tensor), np.ndarray)
# CUDA tensor
if torch.cuda.device_count() > 0:
self.assertIsInstance(make_np(tensor.cuda()), np.ndarray)
# regular variable
self.assertIsInstance(make_np(torch.autograd.Variable(tensor)), np.ndarray)
# CUDA variable
if torch.cuda.device_count() > 0:
self.assertIsInstance(make_np(torch.autograd.Variable(tensor).cuda()), np.ndarray)
# python primitive type
self.assertIsInstance(make_np(0), np.ndarray)
self.assertIsInstance(make_np(0.1), np.ndarray)
def test_pytorch_autograd_np(self):
x = torch.autograd.Variable(torch.empty(1))
self.assertIsInstance(make_np(x), np.ndarray)
def test_pytorch_write(self):
with self.createSummaryWriter() as w:
w.add_scalar('scalar', torch.autograd.Variable(torch.rand(1)), 0)
def test_pytorch_histogram(self):
with self.createSummaryWriter() as w:
w.add_histogram('float histogram', torch.rand((50,)))
w.add_histogram('int histogram', torch.randint(0, 100, (50,)))
def test_pytorch_histogram_raw(self):
with self.createSummaryWriter() as w:
num = 50
floats = make_np(torch.rand((num,)))
bins = [0.0, 0.25, 0.5, 0.75, 1.0]
counts, limits = np.histogram(floats, bins)
sum_sq = floats.dot(floats).item()
w.add_histogram_raw('float histogram raw',
min=floats.min().item(),
max=floats.max().item(),
num=num,
sum=floats.sum().item(),
sum_squares=sum_sq,
bucket_limits=limits[1:].tolist(),
bucket_counts=counts.tolist())
ints = make_np(torch.randint(0, 100, (num,)))
bins = [0, 25, 50, 75, 100]
counts, limits = np.histogram(ints, bins)
sum_sq = ints.dot(ints).item()
w.add_histogram_raw('int histogram raw',
min=ints.min().item(),
max=ints.max().item(),
num=num,
sum=ints.sum().item(),
sum_squares=sum_sq,
bucket_limits=limits[1:].tolist(),
bucket_counts=counts.tolist())
ints = torch.tensor(range(0, 100)).float()
nbins = 100
counts = torch.histc(ints, bins=nbins, min=0, max=99)
limits = torch.tensor(range(nbins))
sum_sq = ints.dot(ints).item()
w.add_histogram_raw('int histogram raw',
min=ints.min().item(),
max=ints.max().item(),
num=num,
sum=ints.sum().item(),
sum_squares=sum_sq,
bucket_limits=limits.tolist(),
bucket_counts=counts.tolist())
class TestTensorBoardUtils(BaseTestCase):
def test_to_HWC(self):
test_image = np.random.randint(0, 256, size=(3, 32, 32), dtype=np.uint8)
converted = convert_to_HWC(test_image, 'chw')
self.assertEqual(converted.shape, (32, 32, 3))
test_image = np.random.randint(0, 256, size=(16, 3, 32, 32), dtype=np.uint8)
converted = convert_to_HWC(test_image, 'nchw')
self.assertEqual(converted.shape, (64, 256, 3))
test_image = np.random.randint(0, 256, size=(32, 32), dtype=np.uint8)
converted = convert_to_HWC(test_image, 'hw')
self.assertEqual(converted.shape, (32, 32, 3))
def test_convert_to_HWC_dtype_remains_same(self):
# test to ensure convert_to_HWC restores the dtype of input np array and
# thus the scale_factor calculated for the image is 1
test_image = torch.tensor([[[[1, 2, 3], [4, 5, 6]]]], dtype=torch.uint8)
tensor = make_np(test_image)
tensor = convert_to_HWC(tensor, 'NCHW')
scale_factor = summary._calc_scale_factor(tensor)
self.assertEqual(scale_factor, 1, msg='Values are already in [0, 255], scale factor should be 1')
def test_prepare_video(self):
# At each timeframe, the sum over all other
# dimensions of the video should be the same.
shapes = [
(16, 30, 3, 28, 28),
(36, 30, 3, 28, 28),
(19, 29, 3, 23, 19),
(3, 3, 3, 3, 3)
]
for s in shapes:
V_input = np.random.random(s)
V_after = _prepare_video(np.copy(V_input))
total_frame = s[1]
V_input = np.swapaxes(V_input, 0, 1)
for f in range(total_frame):
x = np.reshape(V_input[f], newshape=(-1))
y = np.reshape(V_after[f], newshape=(-1))
np.testing.assert_array_almost_equal(np.sum(x), np.sum(y))
def test_numpy_vid_uint8(self):
V_input = np.random.randint(0, 256, (16, 30, 3, 28, 28)).astype(np.uint8)
V_after = _prepare_video(np.copy(V_input)) * 255
total_frame = V_input.shape[1]
V_input = np.swapaxes(V_input, 0, 1)
for f in range(total_frame):
x = np.reshape(V_input[f], newshape=(-1))
y = np.reshape(V_after[f], newshape=(-1))
np.testing.assert_array_almost_equal(np.sum(x), np.sum(y))
freqs = [262, 294, 330, 349, 392, 440, 440, 440, 440, 440, 440]
true_positive_counts = [75, 64, 21, 5, 0]
false_positive_counts = [150, 105, 18, 0, 0]
true_negative_counts = [0, 45, 132, 150, 150]
false_negative_counts = [0, 11, 54, 70, 75]
precision = [0.3333333, 0.3786982, 0.5384616, 1.0, 0.0]
recall = [1.0, 0.8533334, 0.28, 0.0666667, 0.0]
class TestTensorBoardWriter(BaseTestCase):
def test_writer(self):
with self.createSummaryWriter() as writer:
sample_rate = 44100
n_iter = 0
writer.add_hparams(
{'lr': 0.1, 'bsize': 1},
{'hparam/accuracy': 10, 'hparam/loss': 10}
)
writer.add_scalar('data/scalar_systemtime', 0.1, n_iter)
writer.add_scalar('data/scalar_customtime', 0.2, n_iter, walltime=n_iter)
writer.add_scalar('data/new_style', 0.2, n_iter, new_style=True)
writer.add_scalars('data/scalar_group', {
"xsinx": n_iter * np.sin(n_iter),
"xcosx": n_iter * np.cos(n_iter),
"arctanx": np.arctan(n_iter)
}, n_iter)
x = np.zeros((32, 3, 64, 64)) # output from network
writer.add_images('Image', x, n_iter) # Tensor
writer.add_image_with_boxes('imagebox',
np.zeros((3, 64, 64)),
np.array([[10, 10, 40, 40], [40, 40, 60, 60]]),
n_iter)
x = np.zeros(sample_rate * 2)
writer.add_audio('myAudio', x, n_iter)
writer.add_video('myVideo', np.random.rand(16, 48, 1, 28, 28).astype(np.float32), n_iter)
writer.add_text('Text', 'text logged at step:' + str(n_iter), n_iter)
writer.add_text('markdown Text', '''a|b\n-|-\nc|d''', n_iter)
writer.add_histogram('hist', np.random.rand(100, 100), n_iter)
writer.add_pr_curve('xoxo', np.random.randint(2, size=100), np.random.rand(
100), n_iter) # needs tensorboard 0.4RC or later
writer.add_pr_curve_raw('prcurve with raw data', true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall, n_iter)
v = np.array([[[1, 1, 1], [-1, -1, 1], [1, -1, -1], [-1, 1, -1]]], dtype=float)
c = np.array([[[255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 0, 255]]], dtype=int)
f = np.array([[[0, 2, 3], [0, 3, 1], [0, 1, 2], [1, 3, 2]]], dtype=int)
writer.add_mesh('my_mesh', vertices=v, colors=c, faces=f)
class TestTensorBoardSummaryWriter(BaseTestCase):
def test_summary_writer_ctx(self):
# after using a SummaryWriter as a ctx it should be closed
with self.createSummaryWriter() as writer:
writer.add_scalar('test', 1)
self.assertIs(writer.file_writer, None)
def test_summary_writer_close(self):
# Opening and closing SummaryWriter a lot should not run into
# OSError: [Errno 24] Too many open files
passed = True
try:
writer = self.createSummaryWriter()
writer.close()
except OSError:
passed = False
self.assertTrue(passed)
def test_pathlib(self):
import pathlib
p = pathlib.Path('./pathlibtest' + str(uuid.uuid4()))
with SummaryWriter(p) as writer:
writer.add_scalar('test', 1)
import shutil
shutil.rmtree(str(p))
class TestTensorBoardEmbedding(BaseTestCase):
def test_embedding(self):
w = self.createSummaryWriter()
all_features = torch.tensor([[1., 2., 3.], [5., 4., 1.], [3., 7., 7.]])
all_labels = torch.tensor([33., 44., 55.])
all_images = torch.zeros(3, 3, 5, 5)
w.add_embedding(all_features,
metadata=all_labels,
label_img=all_images,
global_step=2)
dataset_label = ['test'] * 2 + ['train'] * 2
all_labels = list(zip(all_labels, dataset_label))
w.add_embedding(all_features,
metadata=all_labels,
label_img=all_images,
metadata_header=['digit', 'dataset'],
global_step=2)
# assert...
def test_embedding_64(self):
w = self.createSummaryWriter()
all_features = torch.tensor([[1., 2., 3.], [5., 4., 1.], [3., 7., 7.]])
all_labels = torch.tensor([33., 44., 55.])
all_images = torch.zeros((3, 3, 5, 5), dtype=torch.float64)
w.add_embedding(all_features,
metadata=all_labels,
label_img=all_images,
global_step=2)
dataset_label = ['test'] * 2 + ['train'] * 2
all_labels = list(zip(all_labels, dataset_label))
w.add_embedding(all_features,
metadata=all_labels,
label_img=all_images,
metadata_header=['digit', 'dataset'],
global_step=2)
class TestTensorBoardSummary(BaseTestCase):
def test_uint8_image(self):
'''
Tests that uint8 image (pixel values in [0, 255]) is not changed
'''
test_image = np.random.randint(0, 256, size=(3, 32, 32), dtype=np.uint8)
scale_factor = summary._calc_scale_factor(test_image)
self.assertEqual(scale_factor, 1, msg='Values are already in [0, 255], scale factor should be 1')
def test_float32_image(self):
'''
Tests that float32 image (pixel values in [0, 1]) are scaled correctly
to [0, 255]
'''
test_image = np.random.rand(3, 32, 32).astype(np.float32)
scale_factor = summary._calc_scale_factor(test_image)
self.assertEqual(scale_factor, 255, msg='Values are in [0, 1], scale factor should be 255')
def test_list_input(self):
with self.assertRaises(Exception) as e_info:
summary.histogram('dummy', [1, 3, 4, 5, 6], 'tensorflow')
def test_empty_input(self):
with self.assertRaises(Exception) as e_info:
summary.histogram('dummy', np.ndarray(0), 'tensorflow')
def test_image_with_boxes(self):
self.assertTrue(compare_image_proto(summary.image_boxes('dummy',
tensor_N(shape=(3, 32, 32)),
np.array([[10, 10, 40, 40]])),
self))
def test_image_with_one_channel(self):
self.assertTrue(compare_image_proto(
summary.image('dummy',
tensor_N(shape=(1, 8, 8)),
dataformats='CHW'),
self)) # noqa: E131
def test_image_with_one_channel_batched(self):
self.assertTrue(compare_image_proto(
summary.image('dummy',
tensor_N(shape=(2, 1, 8, 8)),
dataformats='NCHW'),
self)) # noqa: E131
def test_image_with_3_channel_batched(self):
self.assertTrue(compare_image_proto(
summary.image('dummy',
tensor_N(shape=(2, 3, 8, 8)),
dataformats='NCHW'),
self)) # noqa: E131
def test_image_without_channel(self):
self.assertTrue(compare_image_proto(
summary.image('dummy',
tensor_N(shape=(8, 8)),
dataformats='HW'),
self)) # noqa: E131
def test_video(self):
try:
import moviepy # noqa: F401
except ImportError:
return
self.assertTrue(compare_proto(summary.video('dummy', tensor_N(shape=(4, 3, 1, 8, 8))), self))
summary.video('dummy', np.random.rand(16, 48, 1, 28, 28))
summary.video('dummy', np.random.rand(20, 7, 1, 8, 8))
def test_audio(self):
self.assertTrue(compare_proto(summary.audio('dummy', tensor_N(shape=(42,))), self))
def test_text(self):
self.assertTrue(compare_proto(summary.text('dummy', 'text 123'), self))
def test_histogram_auto(self):
self.assertTrue(compare_proto(summary.histogram('dummy', tensor_N(shape=(1024,)), bins='auto', max_bins=5), self))
def test_histogram_fd(self):
self.assertTrue(compare_proto(summary.histogram('dummy', tensor_N(shape=(1024,)), bins='fd', max_bins=5), self))
def test_histogram_doane(self):
self.assertTrue(compare_proto(summary.histogram('dummy', tensor_N(shape=(1024,)), bins='doane', max_bins=5), self))
def test_custom_scalars(self):
layout = {
'Taiwan': {
'twse': ['Multiline', ['twse/0050', 'twse/2330']]
},
'USA': {
'dow': ['Margin', ['dow/aaa', 'dow/bbb', 'dow/ccc']],
'nasdaq': ['Margin', ['nasdaq/aaa', 'nasdaq/bbb', 'nasdaq/ccc']]
}
}
summary.custom_scalars(layout) # only smoke test. Because protobuf in python2/3 serialize dictionary differently.
def test_hparams_smoke(self):
hp = {'lr': 0.1, 'bsize': 4}
mt = {'accuracy': 0.1, 'loss': 10}
summary.hparams(hp, mt) # only smoke test. Because protobuf in python2/3 serialize dictionary differently.
hp = {'use_magic': True, 'init_string': "42"}
mt = {'accuracy': 0.1, 'loss': 10}
summary.hparams(hp, mt)
mt = {'accuracy': torch.zeros(1), 'loss': torch.zeros(1)}
summary.hparams(hp, mt)
def test_hparams_wrong_parameter(self):
with self.assertRaises(TypeError):
summary.hparams([], {})
with self.assertRaises(TypeError):
summary.hparams({}, [])
with self.assertRaises(ValueError):
res = summary.hparams({'pytorch': [1, 2]}, {'accuracy': 2.0})
# metric data is used in writer.py so the code path is different, which leads to different exception type.
with self.assertRaises(NotImplementedError):
with self.createSummaryWriter() as writer:
writer.add_hparams({'pytorch': 1.0}, {'accuracy': [1, 2]})
def test_hparams_number(self):
hp = {'lr': 0.1}
mt = {'accuracy': 0.1}
self.assertTrue(compare_proto(summary.hparams(hp, mt), self))
def test_hparams_bool(self):
hp = {'bool_var': True}
mt = {'accuracy': 0.1}
self.assertTrue(compare_proto(summary.hparams(hp, mt), self))
def test_hparams_string(self):
hp = {'string_var': "hi"}
mt = {'accuracy': 0.1}
self.assertTrue(compare_proto(summary.hparams(hp, mt), self))
def test_hparams_domain_discrete(self):
hp = {"lr": 0.1, "bool_var": True, "string_var": "hi"}
mt = {"accuracy": 0.1}
hp_domain = {"lr": [0.1], "bool_var": [True], "string_var": ["hi"]}
# hparam_domain_discrete keys needs to be subset of hparam_dict keys
with self.assertRaises(TypeError):
summary.hparams(hp, mt, hparam_domain_discrete={"wrong_key": []})
# hparam_domain_discrete values needs to be same type as hparam_dict values
with self.assertRaises(TypeError):
summary.hparams(hp, mt, hparam_domain_discrete={"lr": [True]})
# only smoke test. Because protobuf map serialization is nondeterministic.
summary.hparams(hp, mt, hparam_domain_discrete=hp_domain)
def test_mesh(self):
v = np.array([[[1, 1, 1], [-1, -1, 1], [1, -1, -1], [-1, 1, -1]]], dtype=float)
c = np.array([[[255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 0, 255]]], dtype=int)
f = np.array([[[0, 2, 3], [0, 3, 1], [0, 1, 2], [1, 3, 2]]], dtype=int)
mesh = summary.mesh('my_mesh', vertices=v, colors=c, faces=f, config_dict=None)
self.assertTrue(compare_proto(mesh, self))
def test_scalar_new_style(self):
scalar = summary.scalar('test_scalar', 1.0, new_style=True)
self.assertTrue(compare_proto(scalar, self))
with self.assertRaises(AssertionError):
summary.scalar('test_scalar2', torch.Tensor([1, 2, 3]), new_style=True)
def remove_whitespace(string):
return string.replace(' ', '').replace('\t', '').replace('\n', '')
def get_expected_file(function_ptr):
module_id = function_ptr.__class__.__module__
test_file = sys.modules[module_id].__file__
# Look for the .py file (since __file__ could be pyc).
test_file = ".".join(test_file.split('.')[:-1]) + '.py'
# Use realpath to follow symlinks appropriately.
test_dir = os.path.dirname(os.path.realpath(test_file))
functionName = function_ptr.id().split('.')[-1]
return os.path.join(test_dir,
"expect",
'TestTensorBoard.' + functionName + ".expect")
def read_expected_content(function_ptr):
expected_file = get_expected_file(function_ptr)
assert os.path.exists(expected_file)
with open(expected_file, "r") as f:
return f.read()
def compare_image_proto(actual_proto, function_ptr):
expected_str = read_expected_content(function_ptr)
expected_proto = Summary()
text_format.Parse(expected_str, expected_proto)
[actual, expected] = [actual_proto.value[0], expected_proto.value[0]]
actual_img = Image.open(io.BytesIO(actual.image.encoded_image_string))
expected_img = Image.open(io.BytesIO(expected.image.encoded_image_string))
return (
actual.tag == expected.tag and
actual.image.height == expected.image.height and
actual.image.width == expected.image.width and
actual.image.colorspace == expected.image.colorspace and
actual_img == expected_img
)
def compare_proto(str_to_compare, function_ptr):
expected = read_expected_content(function_ptr)
str_to_compare = str(str_to_compare)
return remove_whitespace(str_to_compare) == remove_whitespace(expected)
def write_proto(str_to_compare, function_ptr):
expected_file = get_expected_file(function_ptr)
with open(expected_file, 'w') as f:
f.write(str(str_to_compare))
class TestTensorBoardPytorchGraph(BaseTestCase):
def test_pytorch_graph(self):
dummy_input = (torch.zeros(1, 3),)
class myLinear(torch.nn.Module):
def __init__(self):
super(myLinear, self).__init__()
self.l = torch.nn.Linear(3, 5)
def forward(self, x):
return self.l(x)
with self.createSummaryWriter() as w:
w.add_graph(myLinear(), dummy_input)
actual_proto, _ = graph(myLinear(), dummy_input)
expected_str = read_expected_content(self)
expected_proto = GraphDef()
text_format.Parse(expected_str, expected_proto)
self.assertEqual(len(expected_proto.node), len(actual_proto.node))
for i in range(len(expected_proto.node)):
expected_node = expected_proto.node[i]
actual_node = actual_proto.node[i]
self.assertEqual(expected_node.name, actual_node.name)
self.assertEqual(expected_node.op, actual_node.op)
self.assertEqual(expected_node.input, actual_node.input)
self.assertEqual(expected_node.device, actual_node.device)
self.assertEqual(
sorted(expected_node.attr.keys()), sorted(actual_node.attr.keys()))
def test_nested_nn_squential(self):
dummy_input = torch.randn(2, 3)
class InnerNNSquential(torch.nn.Module):
def __init__(self, dim1, dim2):
super().__init__()
self.inner_nn_squential = torch.nn.Sequential(
torch.nn.Linear(dim1, dim2),
torch.nn.Linear(dim2, dim1),
)
def forward(self, x):
x = self.inner_nn_squential(x)
return x
class OuterNNSquential(torch.nn.Module):
def __init__(self, dim1=3, dim2=4, depth=2):
super().__init__()
layers = []
for _ in range(depth):
layers.append(InnerNNSquential(dim1, dim2))
self.outer_nn_squential = torch.nn.Sequential(*layers)
def forward(self, x):
x = self.outer_nn_squential(x)
return x
with self.createSummaryWriter() as w:
w.add_graph(OuterNNSquential(), dummy_input)
actual_proto, _ = graph(OuterNNSquential(), dummy_input)
expected_str = read_expected_content(self)
expected_proto = GraphDef()
text_format.Parse(expected_str, expected_proto)
self.assertEqual(len(expected_proto.node), len(actual_proto.node))
for i in range(len(expected_proto.node)):
expected_node = expected_proto.node[i]
actual_node = actual_proto.node[i]
self.assertEqual(expected_node.name, actual_node.name)
self.assertEqual(expected_node.op, actual_node.op)
self.assertEqual(expected_node.input, actual_node.input)
self.assertEqual(expected_node.device, actual_node.device)
self.assertEqual(
sorted(expected_node.attr.keys()), sorted(actual_node.attr.keys()))
def test_pytorch_graph_dict_input(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = torch.nn.Linear(3, 5)
def forward(self, x):
return self.l(x)
class ModelDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = torch.nn.Linear(3, 5)
def forward(self, x):
return {"out": self.l(x)}
dummy_input = torch.zeros(1, 3)
with self.createSummaryWriter() as w:
w.add_graph(Model(), dummy_input)
with self.createSummaryWriter() as w:
w.add_graph(Model(), dummy_input, use_strict_trace=True)
# expect error: Encountering a dict at the output of the tracer...
with self.assertRaises(RuntimeError):
with self.createSummaryWriter() as w:
w.add_graph(ModelDict(), dummy_input, use_strict_trace=True)
with self.createSummaryWriter() as w:
w.add_graph(ModelDict(), dummy_input, use_strict_trace=False)
def test_mlp_graph(self):
dummy_input = (torch.zeros(2, 1, 28, 28),)
# This MLP class with the above input is expected
# to fail JIT optimizations as seen at
# https://github.com/pytorch/pytorch/issues/18903
#
# However, it should not raise an error during
# the add_graph call and still continue.
class myMLP(torch.nn.Module):
def __init__(self):
super(myMLP, self).__init__()
self.input_len = 1 * 28 * 28
self.fc1 = torch.nn.Linear(self.input_len, 1200)
self.fc2 = torch.nn.Linear(1200, 1200)
self.fc3 = torch.nn.Linear(1200, 10)
def forward(self, x, update_batch_stats=True):
h = torch.nn.functional.relu(
self.fc1(x.view(-1, self.input_len)))
h = self.fc2(h)
h = torch.nn.functional.relu(h)
h = self.fc3(h)
return h
with self.createSummaryWriter() as w:
w.add_graph(myMLP(), dummy_input)
def test_wrong_input_size(self):
with self.assertRaises(RuntimeError) as e_info:
dummy_input = torch.rand(1, 9)
model = torch.nn.Linear(3, 5)
with self.createSummaryWriter() as w:
w.add_graph(model, dummy_input) # error
@skipIfNoTorchVision
def test_torchvision_smoke(self):
model_input_shapes = {
'alexnet': (2, 3, 224, 224),
'resnet34': (2, 3, 224, 224),
'resnet152': (2, 3, 224, 224),
'densenet121': (2, 3, 224, 224),
'vgg16': (2, 3, 224, 224),
'vgg19': (2, 3, 224, 224),
'vgg16_bn': (2, 3, 224, 224),
'vgg19_bn': (2, 3, 224, 224),
'mobilenet_v2': (2, 3, 224, 224),
}
for model_name, input_shape in model_input_shapes.items():
with self.createSummaryWriter() as w:
model = getattr(torchvision.models, model_name)()
w.add_graph(model, torch.zeros(input_shape))
class TestTensorBoardFigure(BaseTestCase):
@skipIfNoMatplotlib
def test_figure(self):
writer = self.createSummaryWriter()
figure, axes = plt.figure(), plt.gca()
circle1 = plt.Circle((0.2, 0.5), 0.2, color='r')
circle2 = plt.Circle((0.8, 0.5), 0.2, color='g')
axes.add_patch(circle1)
axes.add_patch(circle2)
plt.axis('scaled')
plt.tight_layout()
writer.add_figure("add_figure/figure", figure, 0, close=False)
self.assertTrue(plt.fignum_exists(figure.number))
writer.add_figure("add_figure/figure", figure, 1)
if matplotlib.__version__ != '3.3.0':
self.assertFalse(plt.fignum_exists(figure.number))
else:
print("Skipping fignum_exists, see https://github.com/matplotlib/matplotlib/issues/18163")
writer.close()
@skipIfNoMatplotlib
def test_figure_list(self):
writer = self.createSummaryWriter()
figures = []
for i in range(5):
figure = plt.figure()
plt.plot([i * 1, i * 2, i * 3], label="Plot " + str(i))
plt.xlabel("X")
plt.xlabel("Y")
plt.legend()
plt.tight_layout()
figures.append(figure)
writer.add_figure("add_figure/figure_list", figures, 0, close=False)
self.assertTrue(all([plt.fignum_exists(figure.number) is True for figure in figures])) # noqa: F812
writer.add_figure("add_figure/figure_list", figures, 1)
if matplotlib.__version__ != '3.3.0':
self.assertTrue(all([plt.fignum_exists(figure.number) is False for figure in figures])) # noqa: F812
else:
print("Skipping fignum_exists, see https://github.com/matplotlib/matplotlib/issues/18163")
writer.close()
class TestTensorBoardNumpy(BaseTestCase):
def test_scalar(self):
res = make_np(1.1)
self.assertIsInstance(res, np.ndarray) and self.assertEqual(res.shape, (1,))
res = make_np(1 << 64 - 1) # uint64_max
self.assertIsInstance(res, np.ndarray) and self.assertEqual(res.shape, (1,))
res = make_np(np.float16(1.00000087))
self.assertIsInstance(res, np.ndarray) and self.assertEqual(res.shape, (1,))
res = make_np(np.float128(1.00008 + 9))
self.assertIsInstance(res, np.ndarray) and self.assertEqual(res.shape, (1,))
res = make_np(np.int64(100000000000))
self.assertIsInstance(res, np.ndarray) and self.assertEqual(res.shape, (1,))
@skipIfNoCaffe2
def test_caffe2_np(self):
workspace.FeedBlob("testBlob", tensor_N(shape=(1, 3, 64, 64)))
self.assertIsInstance(make_np('testBlob'), np.ndarray)
@skipIfNoCaffe2
def test_caffe2_np_expect_fail(self):
with self.assertRaises(RuntimeError):
res = make_np('This_blob_does_not_exist')
def test_pytorch_np_expect_fail(self):
with self.assertRaises(NotImplementedError):
res = make_np({'pytorch': 1.0})
@skipIfNoCaffe2
@unittest.skipIf(TEST_WITH_ASAN, "Caffe2 failure with ASAN")
def test_caffe2_simple_model(self):
model = ModelHelper(name="mnist")
# how come those inputs don't break the forward pass =.=a
workspace.FeedBlob("data", np.random.randn(1, 3, 64, 64).astype(np.float32))
workspace.FeedBlob("label", np.random.randn(1, 1000).astype(np.int))
with core.NameScope("conv1"):
conv1 = brew.conv(model, "data", 'conv1', dim_in=1, dim_out=20, kernel=5)
# Image size: 24 x 24 -> 12 x 12
pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2)
# Image size: 12 x 12 -> 8 x 8
conv2 = brew.conv(model, pool1, 'conv2', dim_in=20, dim_out=100, kernel=5)
# Image size: 8 x 8 -> 4 x 4
pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)
with core.NameScope("classifier"):
# 50 * 4 * 4 stands for dim_out from previous layer multiplied by the image size
fc3 = brew.fc(model, pool2, 'fc3', dim_in=100 * 4 * 4, dim_out=500)
relu = brew.relu(model, fc3, fc3)
pred = brew.fc(model, relu, 'pred', 500, 10)
softmax = brew.softmax(model, pred, 'softmax')
xent = model.LabelCrossEntropy([softmax, "label"], 'xent')
# compute the expected loss
loss = model.AveragedLoss(xent, "loss")
model.net.RunAllOnMKL()
model.param_init_net.RunAllOnMKL()
model.AddGradientOperators([loss], skip=1)
blob_name_tracker = {}
graph = c2_graph.model_to_graph_def(
model,
blob_name_tracker=blob_name_tracker,
shapes={},
show_simplified=False,
)
compare_proto(graph, self)
@skipIfNoCaffe2
def test_caffe2_simple_cnnmodel(self):
model = cnn.CNNModelHelper("NCHW", name="overfeat")
workspace.FeedBlob("data", np.random.randn(1, 3, 64, 64).astype(np.float32))
workspace.FeedBlob("label", np.random.randn(1, 1000).astype(np.int))
with core.NameScope("conv1"):
conv1 = model.Conv("data", "conv1", 3, 96, 11, stride=4)
relu1 = model.Relu(conv1, conv1)
pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2)
with core.NameScope("classifier"):
fc = model.FC(pool1, "fc", 4096, 1000)
pred = model.Softmax(fc, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
loss = model.AveragedLoss(xent, "loss")
blob_name_tracker = {}
graph = c2_graph.model_to_graph_def(
model,
blob_name_tracker=blob_name_tracker,
shapes={},
show_simplified=False,
)
compare_proto(graph, self)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_tensorboard.py |
# Owner(s): ["module: cuda"]
from itertools import repeat, chain, product
from typing import NamedTuple
import collections
import contextlib
import ctypes
import gc
import io
import os
import pickle
import queue
import sys
import tempfile
import threading
import unittest
from random import randint
import torch
import torch.cuda
import torch.cuda.comm as comm
from torch.nn.parallel import scatter_gather
from torch.utils.checkpoint import checkpoint_sequential
from torch._six import inf, nan
from torch.testing._internal.common_methods_invocations import tri_tests_args, tri_large_tests_args, \
_compare_trilu_indices, _compare_large_trilu_indices
from torch.testing._internal.common_utils import TestCase, freeze_rng_state, run_tests, \
NO_MULTIPROCESSING_SPAWN, skipIfRocm, load_tests, IS_REMOTE_GPU, IS_SANDCASTLE, IS_WINDOWS, \
slowTest, skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf, TEST_WITH_ROCM, TEST_NUMPY, \
get_cycles_per_ms, parametrize, instantiate_parametrized_tests
from torch.testing._internal.autocast_test_lists import AutocastTestLists
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA and TEST_MULTIGPU from torch.testing._internal.common_cuda here,
# because if we do that, the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed
# multiple times as well during the execution of this test suite, and it will
# cause CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
TEST_LARGE_TENSOR = TEST_CUDA
TEST_MEDIUM_TENSOR = TEST_CUDA
TEST_CUDNN = TEST_CUDA
TEST_BF16 = False
if TEST_CUDA:
torch.ones(1).cuda() # initialize cuda context
TEST_CUDNN = TEST_CUDA and (TEST_WITH_ROCM or
torch.backends.cudnn.is_acceptable(torch.tensor(1., device=torch.device('cuda:0'))))
TEST_LARGE_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 12e9
TEST_MEDIUM_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 6e9
TEST_BF16 = torch.cuda.is_bf16_supported()
def make_sparse_tensor(t, n, *sizes):
assert t.is_sparse
tensor = t()
i = tensor._indices()
i = i.new(len(sizes), n).copy_(
torch.cat([torch.LongTensor(1, n).random_(s) for s in sizes], 0))
v = tensor._values()
v = v.new(n).copy_(torch.randn(n))
return t(i, v, torch.Size(sizes)).coalesce()
_cycles_per_ms = None
class TestCuda(TestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
FIFTY_MIL_CYCLES = 50000000
def setUp(self):
super(TestCuda, self).setUp()
self.autocast_lists = AutocastTestLists(torch.device('cuda:0'))
def tearDown(self):
del self.autocast_lists
super(TestCuda, self).tearDown()
def _check_memory_stat_consistency(self):
snapshot = torch.cuda.memory_snapshot()
expected_each_device = collections.defaultdict(lambda: collections.defaultdict(int))
for segment in snapshot:
expected = expected_each_device[segment["device"]]
pool_str = segment["segment_type"] + "_pool"
expected["segment.all.current"] += 1
expected["segment." + pool_str + ".current"] += 1
expected["allocated_bytes.all.current"] += segment["allocated_size"]
expected["allocated_bytes." + pool_str + ".current"] += segment["allocated_size"]
expected["reserved_bytes.all.current"] += segment["total_size"]
expected["reserved_bytes." + pool_str + ".current"] += segment["total_size"]
expected["active_bytes.all.current"] += segment["active_size"]
expected["active_bytes." + pool_str + ".current"] += segment["active_size"]
is_split = len(segment["blocks"]) > 1
for block in segment["blocks"]:
if block["state"] == "active_allocated":
expected["allocation.all.current"] += 1
expected["allocation." + pool_str + ".current"] += 1
if block["state"].startswith("active_"):
expected["active.all.current"] += 1
expected["active." + pool_str + ".current"] += 1
if block["state"] == "inactive" and is_split:
expected["inactive_split.all.current"] += 1
expected["inactive_split." + pool_str + ".current"] += 1
expected["inactive_split_bytes.all.current"] += block["size"]
expected["inactive_split_bytes." + pool_str + ".current"] += block["size"]
for device, expected in expected_each_device.items():
stats = torch.cuda.memory_stats(device)
for k, v in expected.items():
self.assertEqual(v, stats[k])
@staticmethod
def _test_memory_stats_generator(self, device=None, N=35):
if device is None:
device = torch.cuda.current_device()
m0 = torch.cuda.memory_allocated(device)
last_m_arr = [torch.cuda.memory_allocated(device)]
max_m_arr = [torch.cuda.max_memory_allocated(device)]
last_r_arr = [torch.cuda.memory_reserved(device)]
max_r_arr = [torch.cuda.max_memory_reserved(device)]
def alloc(*size):
with torch.cuda.device(device):
# NOTE: do **not** use methods that can have additional
# memory overhead, e.g., inplace random sampling methods.
# they can leave some memory occupied even after being
# deallocated, e.g., initialized RNG state, causing some
# memory checks below to fail.
return torch.cuda.FloatTensor(*size)
def assert_change(comp=1, empty_cache=False, reset_peak=False):
# comp > 0: increased
# comp = 0: equal
# comp < 0: decreased
new_m = torch.cuda.memory_allocated(device)
new_max_m = torch.cuda.max_memory_allocated(device)
if comp > 0:
self.assertGreater(new_m, last_m_arr[0])
elif comp < 0:
self.assertLess(new_m, last_m_arr[0])
else:
self.assertEqual(new_m, last_m_arr[0])
self.assertLessEqual(new_m, new_max_m)
self.assertGreaterEqual(new_max_m, max_m_arr[0])
last_m_arr[0] = new_m
max_m_arr[0] = new_max_m
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
# emptying cache may happen (due to allocation or empty_cache), so
# we can't assert new_c >= last_c
self.assertLessEqual(new_r, new_max_r)
self.assertGreaterEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
max_r_arr[0] = new_max_r
if empty_cache:
torch.cuda.empty_cache()
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
self.assertLessEqual(new_r, last_r_arr[0])
self.assertLessEqual(new_r, new_max_r)
self.assertEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
if reset_peak:
torch.cuda.reset_peak_memory_stats(device)
self.assertEqual(torch.cuda.memory_allocated(device), last_m_arr[0])
self.assertEqual(torch.cuda.max_memory_allocated(device), last_m_arr[0])
max_m_arr[0] = last_m_arr[0]
self.assertEqual(torch.cuda.memory_reserved(device), last_r_arr[0])
self.assertEqual(torch.cuda.max_memory_reserved(device), last_r_arr[0])
max_r_arr[0] = last_r_arr[0]
assert_change(0)
assert_change(0, reset_peak=True)
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
assert_change(0)
yield
tensors1 = [alloc(1), alloc(10, 20), alloc(200, 300, 2000)]
m1 = torch.cuda.memory_allocated(device)
assert_change(1)
yield
tensors2 = []
for i in range(1, int(N / 2) + 1):
# small ones
tensors2.append(alloc(i, i * 4))
assert_change(1)
yield
for i in range(5, int(N / 2) + 5):
# large ones
tensors2.append(alloc(i, i * 7, i * 9, i * 11))
assert_change(1, reset_peak=(i % 2 == 0))
yield
tensors2.append(alloc(0, 0, 0))
assert_change(0)
yield
permute = []
for i in torch.randperm(len(tensors2)):
permute.append(tensors2[i])
assert_change(0)
yield
del tensors2
assert_change(0)
yield
tensors2 = permute
assert_change(0)
yield
del permute
assert_change(0, reset_peak=True)
yield
for i in range(int(N / 2)):
x = tensors2[i].numel()
del tensors2[i]
assert_change(-x) # in case that tensors2[i] is empty
yield
for i in range(2, int(2 * N / 3) + 2):
tensors2.append(alloc(i, i * 3, i * 8))
assert_change(1)
yield
del tensors2
assert_change(-1, reset_peak=True)
assert_change(0)
self.assertEqual(torch.cuda.memory_allocated(device), m1)
yield True
del tensors1
assert_change(-1, reset_peak=True)
self.assertEqual(torch.cuda.memory_allocated(device), m0)
# test empty_cache and reset_peak
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
def test_cudart_register(self):
t = torch.ones(20)
self.assertFalse(t.is_pinned())
cudart = torch.cuda.cudart()
r = cudart.cudaHostRegister(t.data_ptr(), t.numel() * t.element_size(), 0)
self.assertEqual(r, 0)
self.assertTrue(t.is_pinned())
r = cudart.cudaHostUnregister(t.data_ptr())
self.assertEqual(r, 0)
self.assertFalse(t.is_pinned())
def test_memory_stats(self):
gc.collect()
torch.cuda.empty_cache()
for _ in self._test_memory_stats_generator(self):
self._check_memory_stat_consistency()
def test_memory_allocation(self):
gc.collect()
torch.cuda.empty_cache()
mem = None
size = 1
prev = 0
try:
prev = torch.cuda.memory_allocated()
mem = torch.cuda.caching_allocator_alloc(size)
self.assertGreater(torch.cuda.memory_allocated(), prev)
finally:
if mem is not None:
torch.cuda.caching_allocator_delete(mem)
self.assertEqual(torch.cuda.memory_allocated(), prev)
def test_check_error(self):
# Assert this call doesn't raise.
torch.cuda.check_error(0)
with self.assertRaisesRegex(torch.cuda.CudaError,
"out of memory|hipErrorOutOfMemory"):
torch.cuda.check_error(2)
def test_cuda_get_device_name(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_name = torch.cuda.get_device_name(current_device)
device_name_None = torch.cuda.get_device_name(None)
self.assertEqual(current_device_name, device_name_None)
# Testing the behaviour for No argument
device_name_no_argument = torch.cuda.get_device_name()
self.assertEqual(current_device_name, device_name_no_argument)
def test_cuda_get_device_capability(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_capability = torch.cuda.get_device_capability(current_device)
device_capability_None = torch.cuda.get_device_capability(None)
self.assertEqual(current_device_capability, device_capability_None)
# Testing the behaviour for No argument
device_capability_no_argument = torch.cuda.get_device_capability()
self.assertEqual(current_device_capability, device_capability_no_argument)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_stats_multigpu(self):
# advance a generator with a end flag
def advance(gen, end):
if not end:
try:
next(gen)
except StopIteration:
end = True
return end
# interlace
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device='cuda:0', N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
end1 = advance(gen1, end1)
# semi-random order
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device=0, N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
if not end0:
gen1_max_times = torch.LongTensor(1).random_(0, 3)[0]
else:
gen1_max_times = inf
t = 0
while t < gen1_max_times and not end1:
end1 = advance(gen1, end1)
t += 1
def test_out_of_memory(self):
tensor = torch.zeros(1024, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate 800000000.00 GiB"):
torch.empty(1024 * 1024 * 1024 * 800000000, dtype=torch.int8, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate more than 1EB memory"):
torch.empty(1024 * 1024 * 1024 * 8000000000, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
def test_set_per_process_memory_fraction(self):
# test invalid fraction value.
with self.assertRaisesRegex(TypeError, "Invalid type"):
torch.cuda.set_per_process_memory_fraction(int(1))
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(-0.1)
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(2.0)
tensor = torch.zeros(1024, device='cuda')
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
torch.cuda.set_per_process_memory_fraction(0.5, 0)
# test 0.499 allocation is ok.
application = int(total_memory * 0.499) - torch.cuda.max_memory_reserved()
tmp_tensor = torch.empty(application, dtype=torch.int8, device='cuda')
del tmp_tensor
torch.cuda.empty_cache()
application = int(total_memory * 0.5)
# it will get OOM when try to allocate more than half memory.
with self.assertRaisesRegex(RuntimeError, "out of memory"):
torch.empty(application, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_autogpu(self):
x = torch.randn(5, 5).cuda()
y = torch.randn(5, 5).cuda()
self.assertEqual(x.get_device(), 0)
self.assertEqual(x.get_device(), 0)
with torch.cuda.device(1):
z = torch.randn(5, 5).cuda()
self.assertEqual(z.get_device(), 1)
q = x.add(y)
self.assertEqual(q.get_device(), 0)
w = torch.randn(5, 5).cuda()
self.assertEqual(w.get_device(), 1)
self.assertEqual(y.cuda().get_device(), 1)
z = z.cuda()
self.assertEqual(z.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_new(self):
x = torch.randn(3, 3).cuda()
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_device(self):
x = torch.randn(5, 5).cuda()
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
x = torch.randn(5, 5)
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
def _test_copy_sync_current_stream(self, x, y):
x_plus_one = x + 1
s0 = torch.cuda.Stream(device=x.device)
s1 = torch.cuda.Stream(device=y.device)
s2 = torch.cuda.Stream(device=x.device)
s3 = torch.cuda.Stream(device=y.device)
# same dst stream different src streams
with torch.cuda.stream(s0):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s1):
y.copy_(x_plus_one)
with torch.cuda.stream(s2), torch.cuda.stream(s1):
y.copy_(x)
s1.synchronize()
# The copy() is synchronized on the current streams of both src and dst.
# In the above test, the _sleep() op on s0 will not block the copy() on
# s2, but both copies are synchronized on s1 in the dst device. Hence,
# x is copied to y after x_plus_one is copied to y. If x and y are on
# the same device, both copy() ops are synchronized on s1.
self.assertEqual(y, x)
# same src stream different dst streams
with torch.cuda.stream(s1):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s0):
y.copy_(x_plus_one)
with torch.cuda.stream(s3), torch.cuda.stream(s0):
y.copy_(x)
s0.synchronize()
# Similarly, both copy() ops are synchronized on s0.
self.assertEqual(y, x)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_streams(self):
d0 = torch.device('cuda:0')
x0 = torch.zeros(5, 5, device=d0)
d1 = torch.device('cuda:1')
x1 = torch.zeros(5, 5, device=d1)
self._test_copy_sync_current_stream(x0, x1)
x2 = torch.zeros(5, 5, device=d0)
self._test_copy_sync_current_stream(x0, x2)
def test_copy_non_blocking(self):
def _test_copy_non_blocking(a, b):
event = torch.cuda.Event()
a.copy_(b, non_blocking=True)
event.record()
event.synchronize()
self.assertEqual(a, b)
# 10MB copies
x = torch.ones(10000000, dtype=torch.uint8).cuda()
y = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
_test_copy_non_blocking(x, y)
x = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
y = torch.ones(10000000, dtype=torch.uint8).cuda()
_test_copy_non_blocking(x, y)
# Test the case where the pinned data_ptr is not equal to the storage data_ptr.
x_base = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
x = x_base[1:]
self.assertTrue(x.is_pinned())
self.assertTrue(x_base.is_pinned())
self.assertNotEqual(x_base.data_ptr(), x.data_ptr())
self.assertEqual(x_base.storage().data_ptr(), x.storage().data_ptr())
y = torch.ones(10000000 - 1, dtype=torch.uint8).cuda()
_test_copy_non_blocking(x, y)
def test_to_non_blocking(self):
stream = torch.cuda.current_stream()
def _test_to_non_blocking(a, non_blocking, dst):
torch.cuda.synchronize()
# Pushes an 0.1 second spin to stream so if the copy is non blocking,
# stream will almost surely be active when we query().
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
b = a.to(device=dst, non_blocking=non_blocking)
self.assertEqual(stream.query(), not non_blocking)
stream.synchronize()
self.assertEqual(a, b)
self.assertTrue(b.is_pinned() == (non_blocking and dst == "cpu"))
for dst, try_non_blocking in product(("cuda", "cpu"), (True, False)):
# Creates source on the opposite device from destination.
src = torch.randn(1000000,
device="cuda" if dst == "cpu" else "cpu",
pin_memory=True if dst == "cuda" else False)
_test_to_non_blocking(src, try_non_blocking, dst)
def test_to_cpu_blocking_by_default(self):
src = torch.randn(1000000, device="cuda")
torch.cuda.synchronize()
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
dst = src.to(device="cpu")
self.assertEqual(torch.cuda.current_stream().query(), True)
self.assertEqual(src, dst)
self.assertFalse(dst.is_pinned())
def test_serialization_array_with_storage(self):
x = torch.randn(5, 5).cuda()
y = torch.IntTensor(2, 5).fill_(0).cuda()
q = [x, y, x, y.storage()]
with tempfile.NamedTemporaryFile() as f:
torch.save(q, f)
f.seek(0)
q_copy = torch.load(f)
self.assertEqual(q_copy, q, atol=0, rtol=0)
q_copy[0].fill_(5)
self.assertEqual(q_copy[0], q_copy[2], atol=0, rtol=0)
self.assertTrue(isinstance(q_copy[0], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[1], torch.cuda.IntTensor))
self.assertTrue(isinstance(q_copy[2], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[3], torch.storage.TypedStorage))
self.assertTrue(isinstance(q_copy[3]._storage, torch.UntypedStorage))
q_copy[1].fill_(10)
self.assertEqual(q_copy[3], torch.cuda.IntStorage(10).fill_(10))
def test_cublas_allow_tf32_get_set(self):
skip_tf32_cublas = 'TORCH_ALLOW_TF32_CUBLAS_OVERRIDE' in os.environ and\
int(os.environ['TORCH_ALLOW_TF32_CUBLAS_OVERRIDE'])
if skip_tf32_cublas:
self.assertTrue(torch.backends.cuda.matmul.allow_tf32)
return
orig = torch.backends.cuda.matmul.allow_tf32
self.assertEqual(torch._C._get_cublas_allow_tf32(), orig)
torch.backends.cuda.matmul.allow_tf32 = not orig
self.assertEqual(torch._C._get_cublas_allow_tf32(), not orig)
torch.backends.cuda.matmul.allow_tf32 = orig
def test_float32_matmul_precision_get_set(self):
self.assertEqual(torch.get_float32_matmul_precision(), 'highest')
skip_tf32_cublas = 'TORCH_ALLOW_TF32_CUBLAS_OVERRIDE' in os.environ and\
int(os.environ['TORCH_ALLOW_TF32_CUBLAS_OVERRIDE'])
if not skip_tf32_cublas:
self.assertFalse(torch.backends.cuda.matmul.allow_tf32)
for p in ('medium', 'high'):
torch.set_float32_matmul_precision(p)
self.assertEqual(torch.get_float32_matmul_precision(), p)
if not skip_tf32_cublas:
self.assertTrue(torch.backends.cuda.matmul.allow_tf32)
torch.set_float32_matmul_precision('highest')
self.assertEqual(torch.get_float32_matmul_precision(), 'highest')
if not skip_tf32_cublas:
self.assertFalse(torch.backends.cuda.matmul.allow_tf32)
def test_cublas_allow_fp16_reduced_precision_reduction_get_set(self):
orig = torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction
self.assertEqual(torch._C._get_cublas_allow_fp16_reduced_precision_reduction(), orig)
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = not orig
self.assertEqual(torch._C._get_cublas_allow_fp16_reduced_precision_reduction(), not orig)
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig
def test_cudnn_allow_tf32_get_set(self):
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False):
self.assertFalse(torch.backends.cudnn.allow_tf32)
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True):
self.assertTrue(torch.backends.cudnn.allow_tf32)
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.cuda().double(), torch.cuda.DoubleTensor)
self.assertIsInstance(x.cuda().float(), torch.cuda.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu(), torch.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu().int(), torch.IntTensor)
y = x.storage()
self.assertIsInstance(y.float(), torch.FloatStorage)
self.assertIsInstance(y.cuda().double(), torch.cuda.DoubleStorage)
self.assertIsInstance(y.cuda().float(), torch.cuda.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu(), torch.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu().int(), torch.IntStorage)
@unittest.skip("was disabled due to not enough memory, but actually it always fail")
def test_arithmetic_large_tensor(self):
x = torch.empty(2**30, device='cuda')
x.fill_(1)
self.assertEqual(x.sum(), 2**30)
x += 1
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x -= 0.5
self.assertEqual(x.sum(), 2**29)
x.fill_(1)
x *= 2
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x /= 2
self.assertEqual(x.sum(), 2**29)
def test_gather_bool(self):
t = torch.tensor([[False, True], [True, True]], device='cuda')
self.assertEqual(torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]], device='cuda')),
torch.tensor([[False, False], [True, True]], device='cuda'))
def test_torch_manual_seed_seeds_cuda_devices(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
torch.manual_seed(2)
y = x.clone().uniform_()
self.assertEqual(x, y)
self.assertEqual(torch.cuda.initial_seed(), 2)
def test_manual_seed(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.cuda.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
a = torch.bernoulli(torch.full_like(x, 0.5))
torch.cuda.manual_seed(2)
y = x.clone().uniform_()
b = torch.bernoulli(torch.full_like(x, 0.5))
self.assertEqual(x, y)
self.assertEqual(a, b)
self.assertEqual(torch.cuda.initial_seed(), 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_cat_autogpu(self):
x = torch.randn(4, 4).cuda(1)
y = torch.randn(4, 4).cuda(1)
z = torch.cat([x, y], 0)
self.assertEqual(z.get_device(), x.get_device())
@unittest.skipIf(torch.cuda.device_count() >= 10, "Loading a cuda:9 tensor")
def test_load_nonexistent_device(self):
# Setup: create a serialized file object with a 'cuda:9' restore location
tensor = torch.randn(2, device='cuda')
buf = io.BytesIO()
torch.save(tensor, buf)
# NB: this might not work in the future if serialization changes
buf = io.BytesIO(buf.getvalue().replace(b'cuda:0', b'cuda:9'))
msg = r'Attempting to deserialize object on CUDA device 9'
with self.assertRaisesRegex(RuntimeError, msg):
_ = torch.load(buf)
def test_specify_improper_device_name(self):
import os
fname = "tempfile.pt"
try:
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
torch.save([torch.nn.Parameter(torch.randn(10, 10))], fname,
_use_new_zipfile_serialization=True)
torch.load(fname, 'cuda0')
finally:
if os.path.exists(fname):
os.remove(fname)
def test_get_device_index(self):
from torch.cuda._utils import _get_device_index
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
_get_device_index('cuda0', optional=True)
with self.assertRaisesRegex(ValueError, "Expected a cuda device"):
cpu_device = torch.device('cpu')
_get_device_index(cpu_device, optional=True)
def test_serialization_array_with_empty(self):
x = [torch.randn(4, 4).cuda(), torch.cuda.FloatTensor()]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), original.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
def gpu_remap(storage, location):
if location == 'cuda:1':
return storage.cuda(0)
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location=gpu_remap)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap_dict(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location={'cuda:1': 'cuda:0'})
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_storage_clone(self):
x = torch.randn(4, 4, device='cuda:1').storage()
y = x.clone()
self.assertEqual(x.get_device(), y.get_device())
for t in ['byte', 'char', 'short', 'int', 'long', 'half', 'double']:
self.assertEqual(getattr(x, t)().get_device(), x.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_cuda_set_device(self):
x = torch.randn(5, 5)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
torch.cuda.set_device(0)
self.assertEqual(x.cuda().get_device(), 0)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
self.assertEqual(x.cuda().get_device(), 0)
torch.cuda.set_device(1)
self.assertEqual(x.cuda().get_device(), 0)
def test_cuda_synchronize(self):
torch.cuda.synchronize()
torch.cuda.synchronize('cuda')
torch.cuda.synchronize('cuda:0')
torch.cuda.synchronize(0)
torch.cuda.synchronize(torch.device('cuda:0'))
if TEST_MULTIGPU:
torch.cuda.synchronize('cuda:1')
torch.cuda.synchronize(1)
torch.cuda.synchronize(torch.device('cuda:1'))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize(torch.device("cpu"))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize("cpu")
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_current_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(device=1)
s2 = torch.cuda.current_stream(device=0)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s2)
with torch.cuda.device(d1):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(1)
s2 = torch.cuda.current_stream(d0)
self.assertEqual(d1, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.current_stream(torch.device('cpu'))
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipCUDANonDefaultStreamIf(True)
def test_default_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.default_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.default_stream()
s2 = torch.cuda.default_stream(device=0)
s3 = torch.cuda.default_stream(d1)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(d1, s3.device)
self.assertEqual(s0, s2)
self.assertEqual(s1, s3)
with torch.cuda.device(d0):
self.assertEqual(torch.cuda.current_stream(), s0)
with torch.cuda.device(d1):
self.assertEqual(torch.cuda.current_stream(), s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.default_stream(torch.device('cpu'))
@skipCUDANonDefaultStreamIf(True)
def test_streams(self):
default_stream = torch.cuda.current_stream()
user_stream = torch.cuda.Stream()
self.assertEqual(torch.cuda.current_stream(), default_stream)
self.assertNotEqual(default_stream, user_stream)
self.assertEqual(default_stream.cuda_stream, 0)
self.assertNotEqual(user_stream.cuda_stream, 0)
with torch.cuda.stream(user_stream):
self.assertEqual(torch.cuda.current_stream(), user_stream)
self.assertTrue(user_stream.query())
tensor1 = torch.ByteTensor(5).pin_memory()
tensor2 = tensor1.cuda(non_blocking=True) + 1
default_stream.synchronize()
self.assertTrue(default_stream.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_device(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
e0 = torch.cuda.Event()
self.assertEqual(None, e0.device)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.Stream()
e1 = s1.record_event()
self.assertEqual(s0.device, torch.device('cuda:0'))
self.assertEqual(e0.device, torch.device('cuda:0'))
self.assertEqual(s1.device, torch.device('cuda:1'))
self.assertEqual(e1.device, torch.device('cuda:1'))
def test_stream_event_repr(self):
s = torch.cuda.current_stream()
self.assertTrue("torch.cuda.Stream" in s.__repr__())
e = torch.cuda.Event()
self.assertTrue("torch.cuda.Event" in e.__repr__())
s.record_event(e)
self.assertTrue("torch.cuda.Event" in e.__repr__())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_context(self):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream(device=1)
s2 = torch.cuda.Stream(device=0)
with torch.cuda.device(s1.device):
prev_stream_on_cuda1 = torch.cuda.current_stream()
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s1):
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.stream(s2):
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s0):
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.device(s1.device):
self.assertEqual(prev_stream_on_cuda1, torch.cuda.current_stream())
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu(self):
default_stream = torch.cuda.current_stream()
self.assertEqual(default_stream.device, torch.device('cuda:0'))
stream = torch.cuda.Stream(device=1)
self.assertEqual(stream.device, torch.device('cuda:1'))
with torch.cuda.device(1):
self.assertEqual(
torch.cuda.current_stream().device, torch.device('cuda:1'))
self.assertNotEqual(torch.cuda.current_stream(), default_stream)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
# deliberately using a different device
with torch.cuda.device(d0):
s1.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_eq(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s2 = torch.cuda.current_stream()
s3 = torch.cuda.current_stream()
self.assertTrue(s0 == s0)
self.assertTrue(s0 == s1)
self.assertTrue(s2 == s2)
self.assertTrue(s2 == s3)
self.assertFalse(s0 == s2)
self.assertFalse(s1 == s3)
self.assertEqual(s0.device, s1.device)
self.assertEqual(s0.cuda_stream, s1.cuda_stream)
self.assertEqual(s2.device, s3.device)
self.assertEqual(s2.cuda_stream, s3.cuda_stream)
self.assertNotEqual(s0.device, s3.device)
self.assertEqual(hash(s0), hash(s1))
self.assertEqual(hash(s2), hash(s3))
self.assertNotEqual(hash(s0), hash(s3))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_streams_priority(self):
low, high = torch.cuda.Stream.priority_range()
s0 = torch.cuda.Stream(device=0, priority=low)
self.assertEqual(low, s0.priority)
self.assertEqual(torch.device('cuda:0'), s0.device)
s1 = torch.cuda.Stream(device=1, priority=high)
self.assertEqual(high, s1.priority)
self.assertEqual(torch.device('cuda:1'), s1.device)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_tensor_device(self):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 1)
self.assertEqual(torch.cuda.FloatTensor(1, device=0).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=None).get_device(), 1)
def test_events(self):
stream = torch.cuda.current_stream()
event = torch.cuda.Event(enable_timing=True)
self.assertTrue(event.query())
start_event = torch.cuda.Event(enable_timing=True)
stream.record_event(start_event)
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
stream.record_event(event)
self.assertFalse(event.query())
event.synchronize()
self.assertTrue(event.query())
self.assertGreater(start_event.elapsed_time(event), 0)
@staticmethod
def _stream_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
e_tok.record(s)
s.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
s.record_event(e_tok)
e_tok.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_wait(self, spin_time_cycles):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream()
e_tik = torch.cuda.Event(blocking=True, enable_timing=True)
e_tok = torch.cuda.Event(blocking=True, enable_timing=True)
e_tik.record(s0)
torch.cuda._sleep(spin_time_cycles - 10)
e_sync = torch.cuda.Event(blocking=True)
e_sync.record()
e_sync.wait(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10)
s1.synchronize()
e_tok.record()
e_tok.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
self.assertTrue(e_sync.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _test_stream_event_nogil(self, sync_func, p2c, c2p):
with torch.cuda.device('cuda:1'):
c2p.put(0)
p2c.get()
c2p.put(sync_func(self, TestCuda.FIFTY_MIL_CYCLES))
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_nogil(self):
for sync_func in [TestCuda._stream_synchronize,
TestCuda._event_synchronize,
TestCuda._event_wait]:
p2c = queue.Queue()
c2p = queue.Queue()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
t = threading.Thread(
target=TestCuda._test_stream_event_nogil,
args=(self, sync_func, p2c, c2p))
t.daemon = True
t.start()
c2p.get()
with torch.cuda.device('cuda:0'):
e_tik.record()
p2c.put(0)
parent_time = sync_func(self, TestCuda.FIFTY_MIL_CYCLES)
child_time = c2p.get()
e_tok.record()
e_tok.synchronize()
total_time = e_tik.elapsed_time(e_tok)
# Without GIL, synchronizations in parent and child threads can
# overlap. The total execution time should be a little bit longer
# than spinning fifty million cycles and much shorter than twice of
# that. However, testing absolute execution time is not reliable as
# it may vary on different hardware in different environments.
# Therefore, this test uses relative comparisons, checking if the
# sum of parent and child threads execution time is greater than the
# real execution time by least 40%.
self.assertGreater(parent_time + child_time, total_time * 1.4)
# This test is flaky for ROCm, see issue #62602
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_wait(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e0 = torch.cuda.Event()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
self.assertFalse(s0.query())
self.assertTrue(s1.query())
s1.wait_event(e0)
s1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = s0.record_event()
s0.synchronize()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e1 = s1.record_event()
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
# deliberately using a different device
with torch.cuda.device(d0):
e1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipIfRocm
def test_events_multi_gpu_elapsed_time(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(10)
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
e1 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s1.record_event(e1)
e0.synchronize()
e1.synchronize()
with torch.cuda.device(d0):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d1):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e2 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s0.record_event(e2)
s0.synchronize()
self.assertGreater(e0.elapsed_time(e2), 0)
# deliberately calling from a different device
with torch.cuda.device(d1):
self.assertGreater(e0.elapsed_time(e2), 0)
def test_record_stream(self):
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1, 2, 3, 4]).pin_memory()
result = torch.cuda.FloatTensor(t.size())
stream = torch.cuda.Stream()
ptr = [None]
# Performs the CPU->GPU copy in a background stream
def perform_copy():
with torch.cuda.stream(stream):
tmp = t.cuda(non_blocking=True)
ptr[0] = tmp.data_ptr()
torch.cuda.current_stream().wait_stream(stream)
tmp.record_stream(torch.cuda.current_stream())
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
result.copy_(tmp)
perform_copy()
with torch.cuda.stream(stream):
tmp2 = torch.cuda.FloatTensor(t.size())
tmp2.zero_()
self.assertNotEqual(tmp2.data_ptr(), ptr[0], msg='allocation re-used to soon')
self.assertEqual(result.tolist(), [1, 2, 3, 4])
# Check that the block will be re-used after the main stream finishes
torch.cuda.current_stream().synchronize()
with torch.cuda.stream(stream):
tmp3 = torch.cuda.FloatTensor(t.size())
self.assertEqual(tmp3.data_ptr(), ptr[0], msg='allocation not re-used')
def test_record_stream_on_shifted_view(self):
# See issue #27366
# This test detects unexpected block reallocation. For reliable test,
# the stream to allocate tensors is isolated. The allocator will not
# reuse free blocks which were allocated from another stream.
stream_alloc = torch.cuda.Stream()
with torch.cuda.stream(stream_alloc):
base = torch.cuda.FloatTensor([10, 10])
# Record another stream on a shifted view tensor.
view = base[5:]
assert view.storage_offset() > 0
stream_record = torch.cuda.Stream()
with torch.cuda.stream(stream_record):
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
view.record_stream(stream_record)
# Delete those tensors to make the block free soon.
data_ptr = base.data_ptr()
del base, view
# A new tensor should not be allocated to the block above.
stream_alloc.synchronize()
with torch.cuda.stream(stream_alloc):
try_realloc = torch.cuda.FloatTensor([10, 10])
self.assertNotEqual(try_realloc.data_ptr(), data_ptr)
@contextlib.contextmanager
def _get_external_stream(self, device):
cudart = torch.cuda.cudart()
stream = ctypes.c_ulonglong(0)
stream_p = ctypes.POINTER(ctypes.c_void_p)(stream)
stream_p_int = ctypes.cast(stream_p, ctypes.c_void_p).value
with device:
try:
out = cudart.cudaStreamCreate(stream_p_int)
self.assertEqual(out, 0)
self.assertNotEqual(stream.value, 0)
yield stream.value
finally:
out = cudart.cudaStreamDestroy(stream.value)
self.assertEqual(out, 0)
def test_external_streams(self):
device = torch.cuda.device(0)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.ExternalStream(stream_v)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_external_streams_multi_device(self):
device = torch.cuda.device(1)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.ExternalStream(
stream_v, device=device)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
def test_noncontiguous_pinned_memory(self):
# See issue #3266
x = torch.arange(0, 10).view((2, 5))
self.assertEqual(x.t(), x.t().pin_memory())
def test_caching_pinned_memory(self):
cycles_per_ms = get_cycles_per_ms()
# check that allocations are re-used after deletion
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertEqual(t.data_ptr(), ptr, msg='allocation not reused')
# check that the allocation is not re-used if it's in-use by a copy
gpu_tensor = torch.cuda.FloatTensor([0])
torch.cuda._sleep(int(1000 * cycles_per_ms)) # delay the copy by 1s
gpu_tensor.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
self.assertEqual(list(gpu_tensor), [1])
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_caching_pinned_memory_multi_gpu(self):
# checks that the events preventing pinned memory from being re-used
# too early are recorded on the correct GPU
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
gpu_tensor0 = torch.cuda.FloatTensor([0], device=0)
gpu_tensor1 = torch.cuda.FloatTensor([0], device=1)
with torch.cuda.device(1):
torch.cuda._sleep(int(1000 * cycles_per_ms)) # delay the copy by 1s
gpu_tensor1.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([2]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
with torch.cuda.device(0):
gpu_tensor0.copy_(t, non_blocking=True)
self.assertEqual(gpu_tensor1[0], 1)
self.assertEqual(gpu_tensor0[0], 2)
def test_caching_allocator_record_stream_oom(self):
"""allocations delayed by a record_stream call should still be freed on
an out-of-memory in cuda_malloc_retry. see issue #19219"""
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
y = torch.zeros(40 * 1024 * 1024, device='cuda')
for _ in range(100):
x = torch.empty(40 * 1024 * 1024, device='cuda')
with torch.cuda.stream(stream):
y += x
# delays re-use of `x` until after all operations in `stream`
x.record_stream(stream)
del x
# we've made a mess by allocating up to the device capacity. free any
# cached blocks in case it affects future tests.
torch.cuda.empty_cache()
# Tests for historic illegal memory access, see #17040.
def test_reduction_gpu_memory_accessing(self):
x = torch.ones(512, 8, dtype=torch.float32, device='cuda')
torch.sum(x, 0)
def test_sum_fp16(self):
x = torch.zeros(10, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 0)
x = torch.ones(65504, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 65504)
self.assertEqual(x.sum(dtype=torch.float32), 65504)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(dtype=torch.float32), 65536)
a = torch.zeros(1203611).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum().item(), a.sum().item())
a = torch.zeros(100, 121, 80).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum((0, 2)).float().cpu(), a.sum((0, 2)))
def test_mean_fp16(self):
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(), 1)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(dtype=torch.float32), 1)
def test_prod_large(self):
# tests global reduction (should_global_reduce = true) in case of non-zero identity element
x = torch.ones(240000, device='cuda', dtype=torch.float32)
self.assertEqual(x.prod(), 1)
# test for complex types. Note 240k is divisible by 4
for dtype in [torch.cfloat, torch.cdouble]:
x = torch.ones(240000, device='cuda', dtype=dtype) * (0 + 1j)
self.assertEqual(x.prod(), 1)
def test_multinomial_ext(self):
# Test two corner cases from older PyTorch (Issue #4858)
freqs = torch.cuda.FloatTensor([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.03178183361887932, 0.027680952101945877, 0.033176131546497345,
0.046052902936935425, 0.07742464542388916, 0.11543981730937958,
0.14148041605949402, 0.15784293413162231, 0.13180233538150787,
0.08271478116512299, 0.049702685326337814, 0.027557924389839172,
0.018125897273421288, 0.011851548217236996, 0.010252203792333603,
0.007422595750540495, 0.005372154992073774, 0.0045109698548913,
0.0036087757907807827, 0.0035267581697553396, 0.0018864056328311563,
0.0024605290964245796, 0.0022964938543736935, 0.0018453967059031129,
0.0010662291897460818, 0.0009842115687206388, 0.00045109697384759784,
0.0007791675161570311, 0.00020504408166743815, 0.00020504408166743815,
0.00020504408166743815, 0.00012302644609007984, 0.0,
0.00012302644609007984, 4.100881778867915e-05, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0])
torch.cuda.manual_seed(11042)
sample = torch.multinomial(freqs, 1000, True)
self.assertNotEqual(freqs[sample].min(), 0)
p = torch.zeros(3421, 2, device="cuda", dtype=torch.float)
p[:, 1] = 1
torch.cuda.manual_seed(5214)
r = torch.multinomial(p, 1)
self.assertNotEqual(r.min().item(), 0)
# test corner case from Issue #13867
torch.cuda.manual_seed(33)
probs = torch.randn(1000000, device='cuda').clamp(min=0) * 3e-5
samples = probs.multinomial(1000000, replacement=True)
self.assertGreater(probs[samples].min().item(), 0)
def _spawn_test_multinomial_invalid_probs_cuda(self, probs):
import subprocess
try:
p = subprocess.Popen([sys.executable, '-c', f"""\
import sys
import torch
from torch._six import inf, nan
try:
with torch.random.fork_rng(devices=[0]):
torch.multinomial(torch.tensor({probs}).to('cuda'), 2, replacement=True)
torch.cuda.synchronize()
sys.exit(-1) # Should not be reached
except RuntimeError as e:
sys.exit(-2)
"""], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate(timeout=10)
p.wait(timeout=10)
except subprocess.TimeoutExpired as e:
p.kill()
out, err = p.communicate()
expected_messages = [
'device-side assert triggered', # CUDA
'Assertion', # CUDA
'HSA_STATUS_ERROR_EXCEPTION', # ROCm
'Device-side assertion' # ROCm
]
self.assertTrue(any([msg in out or msg in err for msg in expected_messages]))
@slowTest
@unittest.skipIf(TEST_WITH_ROCM, "ROCm doesn't support device side asserts")
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
def test_multinomial_invalid_probs_cuda(self):
self._spawn_test_multinomial_invalid_probs_cuda([1., -1., 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., -inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., 1., nan])
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_huge_index(self):
src = torch.empty(15000000, 45, device='cuda', dtype=torch.long).random_(0, 2**22)
idx = torch.randperm(src.shape[0], device='cuda')
res = src[idx]
res_cpu = src.cpu()[idx.cpu()]
self.assertEqual(res.cpu(), res_cpu)
def test_min_max_inits(self):
# Testing if THC_reduceAll received the correct index initialization.
# This affects the result of THC_reduceAll operations at extreme values
x = torch.cuda.ByteTensor([0])
y = torch.cuda.ByteTensor([255])
expected = torch.cuda.LongTensor([0])[0]
_, v = x.max(dim=0)
self.assertEqual(v, expected)
_, v = y.min(dim=0)
self.assertEqual(v, expected)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_get_set_rng_state_all(self):
states = torch.cuda.get_rng_state_all()
before0 = torch.cuda.FloatTensor(100, device=0).normal_()
before1 = torch.cuda.FloatTensor(100, device=1).normal_()
torch.cuda.set_rng_state_all(states)
after0 = torch.cuda.FloatTensor(100, device=0).normal_()
after1 = torch.cuda.FloatTensor(100, device=1).normal_()
self.assertEqual(before0, after0, atol=0, rtol=0)
self.assertEqual(before1, after1, atol=0, rtol=0)
def test_nvtx(self):
# Just making sure we can see the symbols
torch.cuda.nvtx.range_push("foo")
torch.cuda.nvtx.mark("bar")
torch.cuda.nvtx.range_pop()
range_handle = torch.cuda.nvtx.range_start("range_start")
torch.cuda.nvtx.range_end(range_handle)
def test_bincount_ext(self):
# ensure CUDA code coverage
input_size = (5000,)
w = torch.randn(input_size, dtype=torch.double, device='cuda')
w_cpu = w.cpu()
# test shared memory impl
t = torch.randint(50, input_size, dtype=torch.int8, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test multi block memory impl
# see `THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM` in SummaryOps.cu
t = torch.randint(500, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test global memory impl
# see `THRESH_NUMBER_BINS_FOR_GLOBAL_MEM` in SummaryOps.cu
t = torch.randint(2000, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
t = torch.zeros([10], dtype=torch.int32, device='cuda')
# 35488 * 65536 as int32 would cause overflow to negative value
# giving negative bin offset
t[0] = 35488
counted = t.bincount(minlength=65536)
self.assertEqual(torch.sum(counted), 10)
def test_tiny_half_norm_(self):
a = torch.arange(25).cuda().float()
a /= 100000000
b = a.half()
self.assertGreater(b.norm().item(), 0)
def test_norm_type_conversion(self):
a = torch.ones(65536).cuda().half()
self.assertEqual(a.norm(p=0, dtype=torch.float32), 65536)
# Verifies that mem_get_info works, including when called for a different device
def test_mem_get_info(self):
def _test(idx):
before_free_bytes, before_available_bytes = torch.cuda.mem_get_info(idx)
# increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms
t = torch.randn(1024 * 1024 * 8, device='cuda:' + str(idx))
after_free_bytes, after_available_bytes = torch.cuda.mem_get_info(idx)
self.assertTrue(after_free_bytes < before_free_bytes)
self.assertEqual(before_available_bytes, after_available_bytes)
_test(0)
if TEST_MULTIGPU:
_test(1)
# Test that wrap_with_cuda_memory_check successfully detects leak
# skip for ROCM. Look into #62533.
@skipIfRocm
def test_cuda_memory_leak_detection(self):
l = []
@self.wrap_with_cuda_memory_check
def no_leak():
pass
@self.wrap_with_cuda_memory_check
def leak_gpu0():
# increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms
l.append(torch.randn(1024 * 1024 * 8, device=torch.device("cuda:0")))
no_leak()
with self.assertRaisesRegex(RuntimeError, r"CUDA driver API confirmed .+ on device 0.+"):
leak_gpu0()
if TEST_MULTIGPU:
@self.wrap_with_cuda_memory_check
def leak_gpu1():
# increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms
l.append(torch.randn(1024 * 1024 * 8, device=torch.device("cuda:1")))
with self.assertRaisesRegex(RuntimeError, r"CUDA driver API confirmed .+ on device 1.+"):
leak_gpu1()
def test_cuda_memory_leak_detection_propagates_errors(self):
with self.assertRaisesRegex(RuntimeError, r"The size of tensor a \(3\) must match"):
with self.assertLeaksNoCudaTensors():
x = torch.randn(3, 1, device='cuda')
y = torch.randn(2, 1, device='cuda')
z = x + y
def test_trilu_indices(self):
for test_args in tri_tests_args:
_compare_trilu_indices(self, *test_args, device='cuda')
# test default options
x = torch.ones(
3, 3, dtype=torch.long, device='cuda', layout=torch.strided)
self.assertEqual(
x.tril(0).nonzero().transpose(0, 1),
torch.tril_indices(3, 3, device='cuda'))
self.assertEqual(
x.triu(0).nonzero().transpose(0, 1),
torch.triu_indices(3, 3, device='cuda'))
def test_large_trilu_indices(self):
for test_args in tri_large_tests_args:
_compare_large_trilu_indices(self, *test_args, device='cuda')
@unittest.skipIf(not TEST_MEDIUM_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow(self):
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**30 + 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**30]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**30], expected)
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow_large(self):
# Make sure input.numel() > INT_MAX is handled:
x = torch.randn(1, 1, 1, 2**31, dtype=torch.float16, device="cuda")
with self.assertRaisesRegex(RuntimeError, "integer out of range"):
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**31 - 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**31 - 2]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**31 - 2], expected)
# this might create a reference cycle on self...
def _make_multiply_in_stream(self):
class MultiplyInStream(torch.autograd.Function):
@staticmethod
def forward(ctx, x, val):
ctx.val = val
ctx.stream = torch.cuda.current_stream()
return x * val
@staticmethod
def backward(ctx, grad):
self.assertEqual(torch.cuda.current_stream(), ctx.stream)
# delays the operation in the the background stream
torch.cuda._sleep(1000 * 5000)
return grad * ctx.val, None
return MultiplyInStream
@skipCUDANonDefaultStreamIf(True)
def test_streaming_backwards_sync(self):
default_stream = torch.cuda.current_stream()
stream = torch.cuda.Stream()
MultiplyInStream = self._make_multiply_in_stream()
# Tests using grads outside the backward() stream context
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 2)
output.sum().backward()
# sync needed
default_stream.wait_stream(stream)
self.assertEqual(x.grad, torch.ones_like(x) * 2)
self.assertEqual(torch.cuda.current_stream(), default_stream)
# Tests that using grads in the same stream context as backward()
# is safe regardless what streams bwd ops ran on
bwd_ambient_stream = torch.cuda.Stream()
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 3)
with torch.cuda.stream(bwd_ambient_stream):
bwd_ambient_stream.wait_stream(stream)
output.sum().backward()
# x was first used on "stream" so its AccumulateGrad leaf should run on "stream".
# The end of backward() should have synced "bwd_ambient_stream" with "stream"
# so it should be safe to use x.grad here without any syncs.
self.assertEqual(x.grad, torch.ones_like(x) * 3)
self.assertEqual(torch.cuda.current_stream(), bwd_ambient_stream)
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
def test_streaming_backwards_multiple_streams(self):
MultiplyInStream = self._make_multiply_in_stream()
class StreamModel(torch.nn.Module):
def __init__(self):
super(StreamModel, self).__init__()
self.event = torch.cuda.Event()
self.stream0 = torch.cuda.Stream()
self.stream1 = torch.cuda.Stream()
def forward(self, x, x_first_use_on_ambient):
if x_first_use_on_ambient:
x0 = x.clone()
self.stream0.wait_stream(torch.cuda.current_stream())
self.stream1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream0):
if not x_first_use_on_ambient:
x0 = x.clone()
y0 = MultiplyInStream.apply(x0, 2)
self.event.record(stream=torch.cuda.current_stream())
with torch.cuda.stream(self.stream1):
y1 = MultiplyInStream.apply(x, 3)
self.stream1.wait_event(self.event)
return y0 + y1
stream = torch.cuda.Stream()
for x_first_use_on_ambient in (True, False):
# the out_of_place=False, iters=1 case stresses if proper syncs are inserted
# when grads are initially None and stolen by backward ops.
for out_of_place, iters in ((True, 1),
(False, 1),
(False, 5)):
with torch.cuda.stream(stream):
x = torch.randn(5, 5, device='cuda', requires_grad=True)
model = StreamModel().cuda()
x.register_hook(lambda grad: self.assertEqual(torch.cuda.current_stream(),
stream if x_first_use_on_ambient else model.stream0))
for p in model.parameters():
self.assertTrue(p.grad is None)
for i in range(iters):
loss = model(x, x_first_use_on_ambient).sum()
if out_of_place:
x_grad = torch.autograd.grad((loss,), (x,))[0]
else:
loss.backward()
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
torch.cuda.current_stream().wait_stream(stream)
if out_of_place:
self.assertEqual(x_grad, torch.ones_like(x) * 5 * iters)
else:
self.assertEqual(x.grad, torch.ones_like(x) * 5 * iters)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_streaming_backwards_device_transfer(self):
# This function must run with non-default current streams on all devices, otherwise it's meaningless.
# The intention is to test that to()'s backward (CopyBackward) interacts properly with the
# synchronization logic in torch/csrc/autograd/input_buffer.cpp.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
# Unfortunately I need to make the tensors largeish.
# Bigger tensors = longer D2D transfers = more likely to expose races.
size = 2**26
a = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
b = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
# Here to_backward_recipient = a*b is used only once, so MulBackward's InputBuffer slot only expects 1 input.
# This tests the situation where we don't call InputBuffer::accumulate for MulBackward's InputBuffer.
to_backward_recipient = a * b
s = to_backward_recipient.to(device="cuda:0").sum()
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s.backward()
self.assertTrue(a.grad.sum().item() == size)
self.assertTrue(b.grad.sum().item() == size)
# Here to_backward_recipient = a*b is used twice, so MulBackward's InputBuffer slot expects 2 inputs.
# This tests the situation where we do call InputBuffer::accumulate for MulBackward's InputBuffer.
a.grad = None
b.grad = None
to_backward_recipient = a * b
# Multiply by 2 here so to's backward creates gradient values that are different from the case above,
# to mitigate weirdness if the caching allocator happens to reuse memory regions that were populated
# with 1s by the case above
s0 = to_backward_recipient.to(device="cuda:0").sum() * 2.
s1 = to_backward_recipient.to(device="cuda:0").sum() * 2.
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s0.backward(retain_graph=True)
s1.backward()
self.assertTrue(a.grad.sum().item() == 4 * size)
self.assertTrue(b.grad.sum().item() == 4 * size)
def test_streaming_backwards_sync_graph_root(self):
# This function tests if bwd ops running on a side stream properly sync with the GraphRoot.
# The potential bug it targets is a race condition. The test uses multiple trials and
# torch.cuda._sleep such that if the race condition exists, the test will almost certainly fail,
# but there's a chance it may spuriously pass. Passing does not guarantee the backend is bug-free,
# but failure does guarantee there is a bug.
fwd_bwd_op_stream = torch.cuda.Stream()
bwd_ambient_stream = torch.cuda.Stream()
# We need these streams to be different otherwise the test is meaningless.
self.assertTrue(fwd_bwd_op_stream != bwd_ambient_stream)
size = int(1e3)
a = torch.full((size,), 2.0, device="cuda", requires_grad=True)
b = torch.full((size,), 3.0, device="cuda", requires_grad=True)
# I don't think we need any manual record_streams below.
# a and b remain in scope for the entire test.
# c and grad remain in scope for each iteration, and there's a full sync between iterations.
for trial in range(5):
torch.cuda.synchronize()
a.grad = b.grad = None
with torch.cuda.stream(fwd_bwd_op_stream):
c = a * b
with torch.cuda.stream(bwd_ambient_stream):
torch.cuda.synchronize()
# Long-running dummy kernel on bwd_ambient_stream delays filling of grad
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
# Fills grad on bwd_ambient_stream
grad = torch.full((size,), float(trial + 1), device="cuda")
# Bwd ops still run on fwd_bwd_ops_stream, so the following will likely fail if
# bwd ops don't sync with bwd_ambient_stream before consuming grad.
torch.autograd.backward(tensors=c, grad_tensors=grad)
# See https://github.com/pytorch/pytorch/issues/47028
# assertEquals below run on bwd_ambient_stream, so this test may also fail
# if backward() fails to sync with bwd_ambient_stream at the end.
# Synchronizing here works around the issue until a proper fix can be made.
torch.cuda.synchronize()
with torch.no_grad():
self.assertEqual(a.grad, grad * b)
self.assertEqual(b.grad, grad * a)
def test_streaming_backwards_callback(self):
# Tests if autograd callbacks sync properly with respect to leaf streams and
# the user-facing stream surrounding backward(). If it fails, first suspect is
# sync logic where "final_callbacks_" are called in torch/csrc/autograd/engine.cpp
MultiplyInStream = self._make_multiply_in_stream()
size = int(1e3)
a = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
b = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
stash = []
# sets up a nontrivial structure of leaf streams
s0.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s0):
c = MultiplyInStream.apply(a, 2)
s1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s1):
d = MultiplyInStream.apply(b, 3)
s1.wait_stream(s0)
e = c * d
def clone_leaf_grads():
stash.append(a.grad.clone())
stash.append(b.grad.clone())
# Use a hook on e to install the callback
e.register_hook(lambda grad: torch.autograd.Variable._execution_engine.queue_callback(clone_leaf_grads))
s2.wait_stream(s1)
with torch.cuda.stream(s2):
e.sum().backward()
# The autograd engine should sync s2 with all leaf streams then run the callback clone_leaf_grads on s2.
# If those things happened properly, checking the values of the cloned grads on s2 should be safe:
self.assertEqual(stash[0], torch.full_like(a, 6))
self.assertEqual(stash[1], torch.full_like(a, 6))
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@unittest.skipIf(IS_SANDCASTLE or IS_REMOTE_GPU, "Does not work on Sandcastle")
def test_cuda_init_race(self):
# See https://github.com/pytorch/pytorch/issues/16559
import subprocess
subprocess.check_call([sys.executable, '-c', """\
import torch
import threading
def worker(rank):
torch.tensor([1.]).cuda(rank)
t1 = threading.Thread(target=worker, args=(0,))
t2 = threading.Thread(target=worker, args=(1,))
t1.start()
t2.start()
"""])
@unittest.skipIf(TEST_WITH_ROCM, "ROCm doesn't support device side asserts")
def test_fixed_cuda_assert_async(self):
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch._assert_async(torch.tensor([], device="cuda"))
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"):
torch._assert_async(torch.tensor([0, 0], device="cuda"))
torch._assert_async(torch.tensor(1, device="cuda"))
torch._assert_async(torch.tensor(0.1, device="cuda"))
torch._assert_async(torch.tensor(-0.1, device="cuda"))
torch._assert_async(torch.tensor(True, device="cuda"))
torch._assert_async(torch.tensor(0 + 0.1j, device="cuda"))
fail_stmts = [
"torch._assert_async(torch.tensor(0, device='cuda'))",
"torch._assert_async(torch.tensor(0.0, device='cuda'))",
"torch._assert_async(torch.tensor(False, device='cuda'))",
"torch._assert_async(torch.tensor(0 + 0j, device='cuda'))",
]
import subprocess
for stmt in fail_stmts:
with self.subTest(stmt=stmt):
r = subprocess.call([sys.executable, '-c', f"""\
import torch
{stmt}
torch.cuda.synchronize()
"""])
self.assertTrue(r != 0)
def test_grad_scaling_unscale(self, dtype=torch.float):
inv_scale = torch.full((1,), 0.25, dtype=torch.float, device="cuda:0")
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
size = 10
g = torch.full((size, size), 4.0, dtype=dtype, device="cuda:0")
ginf = g.clone()
ginf[2, 2] = float('inf')
gnan = g.clone()
gnan[2, 2] = float('nan')
# Tries selected combinations of
# - contiguous grads
# - g.clone().t() which is not contiguous but still non overlapping and dense
# - variants of g.clone()[:, :5] which are not non overlapping and dense
# Non overlapping and dense grads route into a multi tensor apply kernel,
# others use a fallback per-tensor kernel, so we should try both.
cases = (
([g.clone(), g.clone()], False),
([g.clone(), g.clone().t()], False),
([g.clone(), g.clone()[:, :5]], False),
([g.clone()[:, :5], g.clone()[:, :5]], False),
([g.clone(), ginf.clone()], True),
([g.clone(), gnan.clone()], True),
([g.clone(), ginf.clone()[:, :5]], True),
([g.clone(), gnan.clone()[:, :5]], True),
([ginf.clone(), g.clone()[:, :5]], True),
([ginf.clone()[:, :5], g.clone()[:, :5]], True),
)
for grads, has_inf in cases:
found_inf.zero_()
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
if has_inf:
self.assertEqual(found_inf, 1.0)
else:
self.assertEqual(found_inf, 0.0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# When passing lists with mismatched dtypes to a raw
# _amp_foreach_non_finite_check_and_unscale_ call,
# it's expected to fall back to single-tensor TensorIterator kernel.
grads = [g.clone(), g.to(dtype=torch.float16)]
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# Passing lists with mismatched devices to a raw
# _amp_foreach_non_finite_check_and_unscale_ call should raise errors.
if TEST_MULTIGPU:
with self.assertRaisesRegex(RuntimeError, r"Expected all tensors to be on the same device"):
torch._amp_foreach_non_finite_check_and_unscale_([g.clone(), g.to(device="cuda:1")],
found_inf,
inv_scale)
# Creates a list of grads with mismatched dtypes and devices, to ensure
# scaler._unscale_grads_ organizes grads by dtype and device before calling
# _amp_foreach_non_finite_check_and_unscale_ on each set.
# If inject_inf >= 0, writes an inf into one grad for _unscale_grads_ to find.
def perfect_storm_grads(inject_inf):
grads = [g.clone(), g.clone()[:, :5], g.to(dtype=torch.float16), g.to(dtype=torch.float16)]
if TEST_MULTIGPU:
grads += [g.to(device="cuda:1"),
g.to(device="cuda:1")[:, :5],
g.to(device="cuda:1", dtype=torch.float16),
g.to(device="cuda:1", dtype=torch.float16)]
if inject_inf >= 0:
grads[inject_inf][2, 2] = float('inf')
return grads
scaler = torch.cuda.amp.GradScaler()
dummy_params = [torch.empty_like(g) for g in perfect_storm_grads(-1)]
dummy_opt = torch.optim.SGD(dummy_params, lr=1.)
# Ensures the inf/nan checking can find an inf injected onto any grad in the perfect storm.
for inject_inf in range(-1, len(dummy_params)):
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
grads = perfect_storm_grads(inject_inf)
for i, p in enumerate(dummy_params):
p.grad = grads[i]
found_inf_per_device = scaler._unscale_grads_(dummy_opt, inv_scale, found_inf, True)
if inject_inf < 0:
# No inf was injected, ensures unscaling worked normally.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
else:
# inf was injected, ensures inf was found.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 1)
def test_grad_scaling_update_scale(self, device="cuda", dtype=torch.float):
growth = 2.0
backoff = 0.25
growth_interval = 2
scale = torch.full((1,), 4.0, dtype=dtype, device=device)
growth_tracker = torch.full((1,), 0.0, dtype=torch.int32, device=device)
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
# Simulates 2 consecutive unskipped iterations
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 1)
self.assertEqual(scale, 4.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 8.0)
# Simulates a skipped iteration
found_inf.fill_(1.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 2.0)
def test_grad_scaling_unscale_sparse(self, device="cuda", dtype=torch.float):
scaler = torch.cuda.amp.GradScaler()
inv_scale = torch.full((1,), 0.25, dtype=dtype, device=device)
found_inf = torch.empty((1,), dtype=dtype, device=device)
cur = found_inf.device
# As of d0c925f (4/16/20), docs are unclear about best API for sparse cuda tensor construction.
# https://pytorch.org/docs/master/tensors.html shows torch.sparse_coo_tensor(...), but it has no docstring.
# The same page shows several tensors with layout=torch.sparse_coo, but no constructors using that layout.
# Meanwhile, https://pytorch.org/docs/master/sparse.html shows torch.sparse.FloatTensor(...), which looks
# legacy and does not accept a device="cuda" kwarg. Going with torch.sparse_coo_tensor.
i = torch.tensor([[0, 1, 1],
[2, 0, 2]], device="cuda", dtype=torch.int64)
v = torch.tensor([16., 32., 64.], device="cuda", dtype=torch.float)
s = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
p = s.clone()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s / 4).to_dense())
v = torch.FloatTensor([16., 32., float('inf')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
v = torch.FloatTensor([16., 32., float('nan')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
p = s.clone().half()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone().half()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s.half() / 4).to_dense())
# Creates fp16 sparse tensor with duplicated indices (uncoalesced). The uncoalesced representation
# does not overflow in fp16, but the coalesced representation would, because 64000 + 64000 > fp16 max.
# _amp_non_finite_check_and_unscale_ should report an overflow here.
i = torch.LongTensor([[0, 1, 0],
[2, 0, 2]])
v = torch.FloatTensor([64000., 32., 64000.])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=torch.float16)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 1.0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_device_as_key(self):
# Ensure that different instances of "device" objects that point to the same device
# are treated as identical keys by dicts. GradScaler relies on this behavior, and may
# error otherwise in a way that's difficult to detect (a silent performance hit).
d = {}
t = torch.empty((1,), device="cuda:0")
dev0a = torch.device("cuda:0")
dev0b = torch.device("cuda:0")
dev1a = torch.device("cuda:1")
dev1b = torch.device("cuda:1")
self.assertTrue(hash(dev0a) == hash(dev0b))
self.assertTrue(hash(dev1a) == hash(dev1b))
d[dev0a] = "0a"
d[dev0b] = "0b"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "0b")
d[t.device] = "t"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "t")
d[dev1a] = "1a"
d[dev1b] = "1b"
self.assertTrue(len(d) == 2)
self.assertTrue(d[dev1a] == "1b")
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_scale(self):
scaler = torch.cuda.amp.GradScaler(init_scale=2.)
t0 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0")
t1 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:1")
# Create some nested iterables of tensors on different devices.
outputs = (t1.clone(), (t0.clone(), t1.clone()), [t0.clone(), (t1.clone(), t0.clone())])
outputs = scaler.scale(outputs)
self.assertTrue(outputs[0] == 8.0 and outputs[1][0] == 8.0 and outputs[1][1] == 8.0 and
outputs[2][0] == 8.0 and outputs[2][1][0] == 8.0 and outputs[2][1][1] == 8.0)
self.assertTrue(scaler._scale.device == t1.device)
def test_grad_scaling_state_dict(self):
for lazy_init_scale in True, False:
s0 = torch.cuda.amp.GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2)
s1 = torch.cuda.amp.GradScaler(init_scale=6., growth_factor=7., backoff_factor=.8, growth_interval=1)
# sets a random value for load_state_dict to overwrite
s1._init_growth_tracker = 7
if lazy_init_scale:
# Dummy scale() call to ensure the scale tensor is lazily initialized.
s1.scale(torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0"))
self.assertTrue(isinstance(s1._scale, torch.cuda.FloatTensor))
s1.load_state_dict(s0.state_dict())
self.assertEqual(s1.get_scale(), 3.)
self.assertEqual(s1.get_growth_factor(), 4.)
self.assertEqual(s1.get_backoff_factor(), .5)
self.assertEqual(s1.get_growth_interval(), 2)
self.assertEqual(s1._init_growth_tracker, 0)
def _create_scaling_models_optimizers(self, device="cuda"):
# Create a module+optimizer that will use scaling, and a control module+optimizer
# that will not use scaling, against which the scaling-enabled module+optimizer can be compared.
mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
s.data.copy_(c.data)
opt_control = torch.optim.SGD(mod_control.parameters(), lr=1.0)
opt_scaling = torch.optim.SGD(mod_scaling.parameters(), lr=1.0)
return mod_control, mod_scaling, opt_control, opt_scaling
def _create_scaling_case(self, device="cuda", dtype=torch.float):
data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))]
loss_fn = torch.nn.MSELoss().cuda()
skip_iter = 2
return self._create_scaling_models_optimizers(device=device) + (data, loss_fn, skip_iter)
# _run_scaling_case generalizes some single-optimizer test logic to avoid too much copy-pasting below.
def _run_scaling_case(self, run, unskipped, skipped, atol=1e-7):
# Ensure scaling can be disabled without changing user control flow.
for enabled in True, False:
mod_control, mod_scaling, opt_control, opt_scaling, data, loss_fn, skip_iter = self._create_scaling_case()
# For functionality, test with a modest initial scale, and an unrealistically-large growth factor
# so any potential errors with the growth factor handling will be magnified.
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
_ = run(data, mod_control, opt_control, scaler, loss_fn, skip_iter, False)
ret = run(data, mod_scaling, opt_scaling, scaler, loss_fn, skip_iter, True)
# Allows run() to optionally return a different scaler instance.
scaler = ret if ret else scaler
# If scaling was enabled, the scale factor should have been multiplied by the growth factor
# len(data) - skipped times and the backoff factor "skipped" times.
if enabled:
net_growth = scaler.get_growth_factor()**unskipped if unskipped > 0 else 1.0
net_backoff = scaler.get_backoff_factor()**skipped if skipped > 0 else 1.0
self.assertTrue(scaler.get_scale() == (128. * net_growth * net_backoff))
else:
self.assertTrue(scaler.get_scale() == 1.0)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
self.assertEqual(c, s, atol=atol, rtol=1e-05)
# Compares no scaling + no autocasting against scaling + autocasting.
def test_grad_scaling_autocast(self):
try_pickle = False
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
with torch.autocast('cuda', enabled=try_scaling_api):
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
if try_pickle:
scaler = pickle.loads(pickle.dumps(scaler))
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
return scaler
# sets atol=1e-3 because we're comparing pure fp32 arithmetic vs a mixture of fp16 and fp32
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
# this will be picked up by try_pickle within run():
try_pickle = True
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
def test_grad_scaling_clipping(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm * scaler.get_scale())
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-5)
def test_grad_scaling_clipping_separate_unscale(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm, error_if_nonfinite=False)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
@unittest.skipIf(IS_WINDOWS, 'FIXME: fix this test for Windows')
def test_grad_scaling_penalty(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
grad_params = torch.autograd.grad(scaler.scale(loss),
model.parameters(), create_graph=True)
inv_scale = 1. / scaler.get_scale()
grad_params = [p * inv_scale for p in grad_params]
else:
grad_params = torch.autograd.grad(loss, model.parameters(), create_graph=True)
grad_norm = 0
for grad in grad_params:
grad_norm += grad.pow(2).sum()
grad_norm = grad_norm.sqrt()
loss = loss + grad_norm
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
def test_grad_scaling_accumulation(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
iters_to_accumulate = 2
for i, (input, target) in enumerate(data):
output = model(input)
loss = loss_fn(output, target)
loss = loss / iters_to_accumulate
if try_scaling_api:
scaler.scale(loss).backward()
else:
loss.backward()
if (i + 1) % iters_to_accumulate == 0:
if try_scaling_api:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
else:
optimizer.step()
optimizer.zero_grad()
self._run_scaling_case(run, unskipped=2, skipped=0)
def test_grad_scaling_multiple(self):
# Tests gradient scaling with 2 models and 2 optimizers that both receive gradients from 2 losses.
# Some of the logic here cannot reuse the generic helper functions created for the 1-optimizer cases.
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers()
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input)
loss0 = loss_fn(0.3 * output0 + 0.7 * output1, target)
loss1 = loss_fn(0.6 * output0 - 0.4 * output1, target)
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_multigpu(self):
# Same as above, but runs some of the models on device 1.
# GradScaler should transparently handle losses and gradients on multiple devices.
# This test could be combined with the test above, but I think it makes sense to treat
# multi-GPU operations separately.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers(device=dev1)
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input.to(dev1))
loss0 = loss_fn(0.3 * output0 + 0.7 * output1.to(dev0), target)
loss1 = loss_fn(0.6 * output0.to(dev1) - 0.4 * output1, target.to(dev1))
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
# Make sure the found_infs were collected properly across optimizers and devices.
if scaler.is_enabled():
self.assertTrue(len(scaler._found_inf_per_device(optimizer0)) == 1)
self.assertTrue(len(scaler._found_inf_per_device(optimizer1)) == 1)
self.assertTrue(scaler._found_inf_per_device(optimizer0)[dev0].item() == 0.)
self.assertTrue(scaler._found_inf_per_device(optimizer1)[dev1].item() ==
float(i == skip_iter))
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
# Copy mod_control1 and mod_scaling1 back the device 0 for comparison
mod_control1.to(dev0)
mod_scaling1.to(dev0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
def test_cublas_multiple_threads_same_device(self):
# Note, these parameters should be very carefully tuned
# Too small number makes it hard for the racing condition
# to happen, while too large number sometimes cause hang
size = 1024
num_threads = 2
trials = 3
test_iters = 100
weight = torch.ones((size, size), device='cuda')
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = torch.mm(results[t], weight)
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
# Test is flaky on Windows (https://github.com/pytorch/pytorch/issues/57401)
@unittest.skipIf(IS_WINDOWS, 'Test is flaky on Windows (see issue 57401)')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
@skipIfRocm
def test_cudnn_multiple_threads_same_device(self):
# This function is intended to test the lazy creation and reuse of per-thread
# cudnn handles on each device in aten/src/ATen/cudnn/Handles.cpp.
# Failure here likely indicates something wrong with that logic.
weight = torch.ones((1, 1, 2, 2), device='cuda')
results = {}
num_threads = 2
trials = 3
test_iters = 1000
barrier = threading.Barrier(num_threads)
with torch.backends.cudnn.flags(enabled=True):
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for _ in range(test_iters):
# If all threads are sharing the same cudnn handle,
# the following sequence may occur:
# thread 0 calls setCuDNNStreamToCurrent()
# thread 1 calls setCuDNNStreamToCurrent()
# thread 0 launches its raw convolution, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but now races with its convolution.
results[t] = torch.nn.functional.conv2d(results[t], weight, padding=0)
results[t].div_(4.0)
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((1, 1, 2048, 2048), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(),
(2048 - test_iters) * (2048 - test_iters))
def test_cusparse_multiple_threads_same_device(self):
size = 1024
num_threads = 2
trials = 3
test_iters = 500
def ones_sparse(size):
a = torch.arange(size, device='cuda')
indices = torch.cartesian_prod(a, a).t()
values = torch.ones(size * size, device='cuda')
return torch.sparse_coo_tensor(indices, values)
weight = ones_sparse(size)
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = weight.mm(results[t])
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
def _run_autocast_outofplace(self, op, args, run_as_type, out_type=None, module=torch, add_kwargs=None):
# helper to cast args
def cast(val, to_type):
if isinstance(val, torch.Tensor):
return val.to(to_type) if val.is_floating_point() else val
elif isinstance(val, collections.abc.Iterable):
return type(val)(cast(v, to_type) for v in val)
else:
return val
if add_kwargs is None:
add_kwargs = {}
fast_dtype = torch.bfloat16 if run_as_type == torch.bfloat16 else torch.float16
self.assertFalse(torch.is_autocast_enabled())
with torch.autocast('cuda', dtype=fast_dtype):
self.assertTrue(torch.is_autocast_enabled())
out_type = out_type if out_type is not None else run_as_type
output = output_method = None
# Try module.* variant, if requested:
if module is not None and hasattr(module, op):
output = getattr(module, op)(*args, **add_kwargs)
if isinstance(output, torch.Tensor):
self.assertTrue(out_type == output.dtype,
"autocast for torch.{} produced {}, should produce {}"
.format(op, output.dtype, out_type))
# Try Tensor.* variant:
if hasattr(torch.Tensor, op):
output_method = getattr(args[0], op)(*args[1:], **add_kwargs)
if isinstance(output_method, torch.Tensor):
self.assertTrue(out_type == output_method.dtype,
"autocast for torch.{} produced {}, should produce torch.{}"
.format(op, output_method.dtype, out_type))
self.assertTrue((output is not None) or (output_method is not None),
"{} not found as an attribute on either Tensor or the requested module {}".format(
op, module))
# Accounts for ops that return Tensors, iterables, and other non-Tensors.
# For example, lstm_cell returns a tuple and equal returns bool.
def compare(first, second):
if isinstance(first, torch.Tensor):
return torch.equal(first, second)
elif isinstance(first, collections.abc.Iterable):
return all(compare(f, s) for f, s in zip(first, second))
else:
return first == second
# If both torch.* and Tensor.* variants were found, check outputs are identical
if (output is not None) and (output_method is not None):
self.assertTrue(type(output) == type(output_method))
comparison = compare(output, output_method)
self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op))
# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
# as the C++-side autocasting, and should be bitwise accurate.
output_to_compare = output if output is not None else output_method
with torch.autocast('cuda', enabled=False):
self.assertFalse(torch.is_autocast_enabled())
if module is not None and hasattr(module, op):
control = getattr(module, op)(*cast(args, run_as_type), **add_kwargs)
else:
control = getattr(args[0].to(run_as_type), op)(*cast(args[1:], run_as_type), **add_kwargs)
self.assertTrue(type(output_to_compare) == type(control))
comparison = compare(output_to_compare, control)
self.assertTrue(comparison, "torch.{} result did not match control".format(op))
self.assertTrue(torch.is_autocast_enabled())
self.assertFalse(torch.is_autocast_enabled())
def args_maybe_kwargs(self, op_with_args):
if len(op_with_args) == 2:
return op_with_args[0], op_with_args[1], {}
else:
return op_with_args[0], op_with_args[1], op_with_args[2]
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
if not skip_test:
self._run_autocast_outofplace(op, args, torch.float16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
should_error_from_cudnn = 'cudnn' in op and not\
('TORCH_CUDNN_V8_API_ENABLED' in os.environ and
int(os.environ['TORCH_CUDNN_V8_API_ENABLED']) and
torch.cuda.get_device_capability() >= (8, 0))
should_error_from_not_implemented = should_error_from_cudnn or 'prelu' in op or 'thnn' in op \
or 'fused' in op or 'gru' in op or op == '_thnn_fused_lstm_cell' or op == 'lstm_cell'
if not skip_test:
if should_error_from_not_implemented:
with self.assertRaises(RuntimeError, msg=str(op) + ' should not be supported for bfloat16!'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp32(self):
for op_with_args in self.autocast_lists.torch_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(op, args, torch.float32)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.torch_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, out_type=out_type)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp32(self):
for op, args in self.autocast_lists.nn_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_linalg_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.linalg_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._linalg)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.methods_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp32(self):
for op, args in self.autocast_lists.methods_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.methods_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, module=None, out_type=out_type)
def test_autocast_banned(self):
with torch.autocast('cuda'):
for op, args, module in self.autocast_lists.banned:
with self.assertRaises(RuntimeError):
getattr(module, op)(*args)
def test_autocast_ignored_types(self):
with torch.autocast('cuda'):
for ignore_type in (torch.double, torch.int32):
a_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
b_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
c_16 = torch.ones((8, 8), dtype=torch.float16, device="cuda:0")
# Tests if CastPolicy::fp16 ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with self.assertRaises(RuntimeError):
torch.mm(a_ignore, c_16)
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.mm(a_ignore, b_ignore).dtype
self.assertTrue(torch.mm(a_ignore, b_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32 ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.pow(a_ignore, 2.0).dtype
self.assertTrue(torch.pow(a_ignore, 2.0).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_set_opt_dtype ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.sum(a_ignore).dtype
self.assertTrue(torch.sum(a_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_append_dtype ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.norm(a_ignore).dtype
self.assertTrue(torch.norm(a_ignore).dtype is type_no_autocast)
def test_autocast_custom_enabled(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd
def forward(ctx, a, b):
self.assertTrue(a.dtype is torch.float32)
self.assertTrue(b.dtype is torch.float32)
self.assertTrue(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertTrue(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), a.t().mm(grad)
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
y = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
with torch.cuda.amp.autocast():
output = mymm(x, y)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_custom_cast_inputs(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)
def forward(ctx, a, container, expect_type):
b = container[1][0]
self.assertTrue(a.dtype is expect_type)
self.assertTrue(b.dtype is expect_type)
self.assertFalse(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertFalse(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), None, None
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
# Puts one input tensor in a nested container. y's contained Tensor won't receive a gradient,
# because torch.autograd.Function can't hand gradients back to non-Tensor forward arguments.
# Sets requires_grad=False explicitly so we don't lie about expecting a gradient.
y = (0, {0: torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=False)})
with torch.autocast('cuda', ):
output = mymm(x, y, torch.float32)
self.assertTrue(output.dtype is torch.float32)
loss = output.sum()
loss.backward()
# Tests if custom_fwd becomes a no-op when mymm runs outside an autocast-enabled region.
output = mymm(x, y, torch.float16)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_cat_jit(self):
# Reported at https://github.com/pytorch/pytorch/issues/38958
class Model(torch.nn.Module):
def forward(self):
a = torch.randn(1)
b = torch.randn(1)
c = torch.cat((a, b), 0)
d = torch.stack([c, c], 0)
return d
# The JIT here doesn't really matter, we just need to call
# cat via the boxed API
model = Model()
model_jit_script = torch.jit.script(model)
with torch.autocast('cuda', enabled=True):
model()
model_jit_script()
# cudnn RNNs require special backend handling (weights are cast to FP16 and reflattened)
# so they get a dedicated test.
# Despite the large number of RNN cases it tries, the test takes < 15 seconds on a Titan V (similar to V100).
@skipIfRocm
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_rnn(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
# seq, batch, features, hidden size
clses = ("RNN", "GRU", "LSTM")
T, B, F, H = 3, 4, 5, 6
dtypes = (torch.float16, torch.float32)
input_layouts = ("seq_first", "batch_first", "packed")
for (cls, num_layers, bias, input_layout, bidirectional, try_nonpreflattened_weights,
input_dtype, hidden_dtype, weight_dtype) in \
product(clses, (1, 2), (True, False), input_layouts, (True, False), (True, False),
dtypes, dtypes, dtypes):
if input_layout == "seq_first":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
elif input_layout == "batch_first":
batch_first = True
x = torch.randn((B, T, F), device="cuda", dtype=input_dtype)
elif input_layout == "packed":
batch_first = False
x = torch.nn.utils.rnn.pack_padded_sequence(torch.randn((T, B, F),
device="cuda", dtype=input_dtype),
lengths=(3, 2, 1, 3),
enforce_sorted=False)
rnn = getattr(torch.nn, cls)(F, H, num_layers=num_layers, bidirectional=bidirectional,
bias=bias, batch_first=batch_first).cuda().to(dtype=weight_dtype)
if try_nonpreflattened_weights:
for p in rnn.parameters():
with torch.no_grad():
p.set_(p.clone())
h = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
if cls == "LSTM":
c = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
h = (h, c)
with torch.autocast('cuda', ):
out, h_out = rnn(x, h)
out = out.data if input_layout == "packed" else out
self.assertEqual(out.dtype, torch.float16)
# Autocast wrapper requires at::_cudnn_rnn is autograd-exposed. This check can't guarantee
# at::_cudnn_rnn is autograd-exposed, but if it fires, it indicates some funny business has
# occurred and we should double check that at::_cudnn_rnn remains autograd-exposed.
self.assertEqual(out.grad_fn.name(), "CudnnRnnBackward0")
out.sum().backward()
grads = [p.grad.clone() for p in rnn.parameters()]
rnn.zero_grad()
if cls == "LSTM":
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), (h[0].half(), h[1].half()))
else:
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), h.half())
out_control = out_control.data if input_layout == "packed" else out_control
out_control.sum().backward()
grads_control = [p.grad.clone() for p in rnn.parameters()]
# Compares with default tolerances, even for FP16 execution. Barring nondeterminism,
# autocast and control results should be bitwise identical.
self.assertEqual(out, out_control)
if cls == "LSTM":
self.assertTrue(h_out[0].dtype is torch.float16 and h_out[1].dtype is torch.float16)
self.assertEqual(h_out[0], h_out_control[0])
self.assertEqual(h_out[1], h_out_control[1])
else:
self.assertEqual(h_out.dtype, torch.float16)
self.assertEqual(h_out, h_out_control)
for grad, grad_control in zip(grads, grads_control):
self.assertEqual(grad.half(), grad_control)
def test_autocast_cache_leak(self):
# Reported at https://github.com/pytorch/pytorch/issues/48049
# Test is used to check, if autocast recaches the same parameters
# when executed in a `torch.no_grad()` block.
linear = torch.nn.Linear(10, 10).to('cuda')
data = torch.randn(1, 10, device='cuda')
with torch.autocast('cuda', ):
with torch.no_grad():
out = linear(data)
first_iter_mem = torch.cuda.memory_allocated()
for _ in range(3):
out = linear(data)
self.assertTrue(first_iter_mem == torch.cuda.memory_allocated())
def test_autocast_checkpointing(self):
model = torch.nn.Sequential(torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8)).cuda()
input = torch.rand((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
with torch.autocast('cuda', ):
output = checkpoint_sequential(model, 2, input)
self.assertTrue(output.requires_grad)
self.assertTrue(output.dtype is torch.float16)
output.sum().backward()
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_max_large_axis(self):
x = torch.zeros(2**32, device='cuda', dtype=torch.int8)
x[-1] = 1
val, idx = x.max(0)
self.assertEqual(val, 1)
self.assertEqual(idx, x.shape[0] - 1)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_to_numpy(self):
self.assertRaises(TypeError, lambda: torch.empty(1, device="cuda").numpy())
def test_graph_is_current_stream_capturing(self):
self.assertFalse(torch.cuda.is_current_stream_capturing())
if (TEST_CUDA and (not TEST_WITH_ROCM) and int(torch.version.cuda.split(".")[0]) >= 11):
s = torch.cuda.Stream()
with torch.cuda.stream(s):
g = torch.cuda.CUDAGraph()
self.assertFalse(torch.cuda.is_current_stream_capturing())
g.capture_begin()
self.assertTrue(torch.cuda.is_current_stream_capturing())
g.capture_end()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_capture_simple(self):
s = torch.cuda.Stream()
with torch.cuda.stream(s):
a = torch.full((1000,), 1, device="cuda")
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
b = a
for _ in range(10):
b = b + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
g.replay()
self.assertTrue(b.sum().item() == 11000.)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_capture_oom(self):
with self.assertRaisesRegex(RuntimeError, "out of memory"):
with torch.cuda.graph(torch.cuda.CUDAGraph()):
torch.zeros(2 ** 40, device="cuda")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_functional(self):
ops_with_kwargs = ((torch.nn.functional.dropout, {"p": 0.1}),
(torch.nn.functional.rrelu, {"training": True}),)
size = 10000
def run(op, kwargs):
a = torch.randn((size,), device="cuda", dtype=torch.float)
# Control
torch.cuda.manual_seed(5)
eager_out = a
for _ in range(6):
eager_out = op(eager_out, **kwargs)
graph_in = a.clone()
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
graph_out = graph_in
for _ in range(2):
graph_out = op(graph_out, **kwargs)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
# Runs a graphed->eager->graphed sequence of RNG ops.
# replay() plays 2 invocations of the op, so the sequence has 6
# invocations total, matching Control.
# replay() reads from graph_in and writes to graph_out.
g.replay()
out = op(graph_out, **kwargs)
out = op(out, **kwargs)
graph_in.copy_(out)
g.replay()
# If replay() updated RNG state correctly, graph_out
# should now hold data equal to eager_out.
try:
self.assertEqual(eager_out, graph_out)
except Exception as e:
raise RuntimeError("Failed on ", op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op, kwargs in ops_with_kwargs:
run(op, kwargs)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_distributions(self):
size = 10000
input = torch.rand((size,), device="cuda", dtype=torch.float)
alloc = torch.empty((size,), device="cuda", dtype=torch.float)
# Torch ops to test with sample args (tuple) and kwargs (dict)
torch_with_args = (("bernoulli", (input.clone(),), {}),
# multinomial uses some uncapturable CUDA calls.
# TODO: reenable multinomial tests if/when the implementation is capturable.
# ("multinomial", (input.clone(), size, True), {}),
# ("multinomial", (input.clone(), size // 2, False), {}),
# TODO: reenable normal test, where std is a device
# tensor, when graph test failures are fixed
# ("normal", (input.clone() + 1, input.clone()), {}),
("normal", (input.clone() + 1, 1.0), {}),
("poisson", (input.clone(),), {}),
("rand", (size,), {"device": "cuda", "dtype": torch.float}),
("randint", (0, 3, (size,)), {"device": "cuda", "dtype": torch.float}),
("randn", (size,), {"device": "cuda", "dtype": torch.float}),)
# Tensor methods to test with sample args (tuple)
tensor_with_args = (("bernoulli_", (input.clone(),)),
("cauchy_", ()),
("exponential_", ()),
("geometric_", (0.3,)),
("log_normal_", ()),
("normal_", ()),
("random_", ()),
("uniform_", ()),)
def run(module, op, args, kwargs):
torch.cuda.manual_seed(5)
# Each path runs a dummy op to increment the state a bit before creating controls.
if (module == "torch"):
dummy = getattr(torch, op)(*args, **kwargs)
control1 = getattr(torch, op)(*args, **kwargs)
control2 = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
control1 = alloc.clone()
control2 = alloc.clone()
getattr(dummy, op)(*args)
getattr(control1, op)(*args)
getattr(control2, op)(*args)
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
if (module == "torch"):
g.capture_begin()
t1 = getattr(torch, op)(*args, **kwargs)
t2 = getattr(torch, op)(*args, **kwargs)
g.capture_end()
else:
t1 = alloc.clone()
t2 = alloc.clone()
g.capture_begin()
getattr(t1, op)(*args)
getattr(t2, op)(*args)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
try:
self.assertNotEqual(control1, t1)
self.assertNotEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# Runs a dummy op prelude, as for controls, to make sure replay()
# picks up the dummy op's state increment.
if module == "torch":
dummy = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
getattr(dummy, op)(*args)
# Runs RNG ops that fill t1 and t2.
g.replay()
try:
self.assertEqual(control1, t1)
self.assertEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op_with_args in torch_with_args:
run("torch", *op_with_args)
for meth_with_args in tensor_with_args:
# Adds an empty dict for kwargs, which none of the Tensor methods use
run("Tensor", *(meth_with_args + ({},)))
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_two_successive(self):
torch.cuda.empty_cache()
size = 1000
kSmallBuffer = 2097152
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
for _ in range(5):
b = func_with_temps(b, 1)
g1.capture_end()
torch.cuda.current_stream().wait_stream(s)
# mixes unrelated eager ops with replays
c = a.clone()
for _ in range(2):
c = func_with_temps(c, 3)
g0.replay()
for _ in range(2):
c = func_with_temps(c, 3)
g1.replay()
for _ in range(2):
c = func_with_temps(c, 3)
self.assertEqual(b.sum().item(), size * 3070)
self.assertEqual(c.sum().item(), size * 442)
if share_mem != "Don't share":
self.assertEqual(reserved_no_sharing - torch.cuda.memory_stats()["reserved_bytes.all.current"],
kSmallBuffer)
else:
reserved_no_sharing = torch.cuda.memory_stats()["reserved_bytes.all.current"]
del a, b, c, g0, g1
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skip("Temporarily disabled due to a graphs bug in libcuda.so, " +
"see https://github.com/pytorch/pytorch/pull/57556")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_concurrent_replay(self):
torch.cuda.empty_cache()
size = 1000000 # largeish to help expose race conditions
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
c = a.clone()
for _ in range(5):
c = func_with_temps(c, 2)
g1.capture_end()
# To reproduce data corruption, I need g0 and g1's kernels to run concurrently.
# But replay() (especially cudaGraphLaunch) can incur significant CPU overhead.
# The following pattern helps align device-side execution of g0 and g1's kernels.
torch.cuda.synchronize()
with torch.cuda.stream(s0):
torch.cuda._sleep(1000000)
s1.wait_stream(s0)
g0.replay()
with torch.cuda.stream(s1):
g1.replay()
torch.cuda.current_stream().wait_stream(s0)
torch.cuda.current_stream().wait_stream(s1)
if share_mem != "Don't share":
# Confirms concurrent replays using the same mempool corrupted each other.
self.assertNotEqual(b.sum().item(), size * 94)
self.assertNotEqual(c.sum().item(), size * 156)
else:
# Confirms concurrent replays using different mempools did not corrupt each other.
self.assertEqual(b.sum().item(), size * 94)
self.assertEqual(c.sum().item(), size * 156)
del a, b, c, g0, g1
# Tensors used across streams (a, b, c) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_three_successive(self):
torch.cuda.empty_cache()
size = 1000
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
a = torch.ones((size,), device="cuda")
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
g2 = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
c = b + 1
d = b + 2
g0.capture_end()
args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*args)
e = c + 3
del c
g1.capture_end()
g2.capture_begin(*args)
f = d + 4
g2.capture_end()
torch.cuda.current_stream().wait_stream(s)
# Tests that replaying in capture order is valid
g0.replay()
g1.replay()
g2.replay()
self.assertEqual(e.sum().item(), size * 5)
self.assertEqual(f.sum().item(), size * 7)
# Tests that replaying as g0, g2, g1 is only valid if they don't share a pool
g0.replay()
g2.replay()
g1.replay()
# If share_mem is True, g2's capture should have reused c's memory for f. We replayed g2 then g1,
# so we expect g1's captured "e = c + 3" mistakenly filled e with "f's vals + 3".
self.assertEqual(e.sum().item(), size * (7 + 3) if share_mem != "Don't share" else size * 5)
self.assertEqual(f.sum().item(), size * 7)
del a, b, d, e, f, g0, g1, g2
# Tensors used across streams (a, e, f) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_memory_stats_and_use_result_after_destroy_graph(self):
kSmallSize = 1048576
kSmallBuffer = 2097152
kLargeBuffer = 20971520
kMinLargeAlloc = 10485760
kRoundLarge = 2097152
elem = 4
# this was annoying to write but stresses the expectations pretty rigorously
cases = ((512 // elem, 1, kSmallBuffer, kSmallBuffer, "small_pool"),
(kSmallSize // elem, 2, 2 * kSmallBuffer, kSmallBuffer, "small_pool"),
((kSmallSize + 512) // elem, 1, kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc - 512) // elem, 2, 2 * kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc + 512) // elem, 3,
3 * (kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge)),
kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge),
"large_pool"),)
stats_to_check = ("segment.",
"reserved_bytes.",
"active.",
"active_bytes.")
gc.collect()
torch.cuda.empty_cache()
s = torch.cuda.Stream()
for (numel,
delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_cudaMalloc_bytes_post_del_g,
pool_string) in cases:
if pool_string == "small_pool":
delta_active_blocks = 2 # one from "b" plus a sneaky one from CUDAGraph's one-element rng offset holder
delta_active_bytes = numel * elem + 512 # + 512 for CUDAGraph's rng offset holder
else:
delta_active_blocks = 1 # We only check the large pool, which isn't affected by rng offset holder
delta_active_bytes = numel * elem
g = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
# Allocation stat estimates assume input is created on the same stream as capture_begin()
# (in other words, the same stream silo as the rng offset holder, which is not allocated from the
# capture's private pool).
a = torch.ones((numel,), device="cuda")
precapture_stats = torch.cuda.memory_stats()
g.capture_begin()
b = a.clone()
for _ in range(5):
b = b.clone() + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
gc.collect()
postcapture_stats = torch.cuda.memory_stats()
expecteds = (delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_active_blocks,
delta_active_bytes)
# Double checks replay and stats before and after a call to empty_cache
for i in range(2):
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postcapture_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre to post capture delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
g.replay()
self.assertEqual(b.sum().item(), 6 * numel)
if i == 0:
torch.cuda.empty_cache()
del g
gc.collect()
torch.cuda.empty_cache()
postdel_stats = torch.cuda.memory_stats()
# Uses graph result b after graph has been deleted
self.assertEqual(b.sum().item(), 6 * numel)
# b should be the only live reference remaining from the graph's private pool
expecteds = (1, delta_cudaMalloc_bytes_post_del_g, 1, numel * elem)
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postdel_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre capture to post graph delete delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
# del a, b before the next case is essential, otherwise overwriting a and b in the next case
# can throw off its allocation/deallocation counts.
del a, b
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_record_stream(self):
# Makes sure graph capture defers attempting to reclaim allocations used across streams. See
# "Q. Why skip process_events if a capture might be underway?" in c10/cuda/CUDACachingAllocator.cpp
torch.cuda.empty_cache()
potential_problem = torch.zeros((3,), device="cuda")
a = torch.zeros((3,), device="cuda")
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
g = torch.cuda.CUDAGraph()
torch.cuda.synchronize()
with torch.cuda.stream(s0):
potential_problem.record_stream(s0)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
potential_problem.fill_(1.)
del potential_problem
with torch.cuda.stream(s1):
g.capture_begin()
# potential_problem's allocation should still be outstanding. if DeviceCachingAllocator::malloc
# mistakenly calls process_events, it will trigger cudaEventQueries on potential_problem's end-of-life
# event, which will cause the capture to error.
b = a.clone()
# Let's also see what happens if we record_stream on a tensor during capture.
s2.wait_stream(s1)
with torch.cuda.stream(s2):
b.fill_(1.)
b.record_stream(s2) # dummy record_stream
del b
s1.wait_stream(s2)
g.capture_end()
torch.cuda.synchronize()
# dummy allocation triggers process_events, Hopefully successfully processes b's end-of-life event.
c = torch.zeros((3,), device="cuda")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
# If this test is the first in the process to try cudnn rnns with dropout, it'll initialize
# DropoutState's long-lived internal buffer. Calling code perceives this (correct) behavior
# as a memory leak unless we skip the leak check.
@skipCUDAMemoryLeakCheckIf(True)
def test_graph_cudnn_dropout(self):
# Tests the interaction of cuda graph capture with DropoutState's syncs in ATen/native/cudnn/RNN.cpp.
# In particular, if user runs a sequence of captured and noncaptured cudnn rnns, DropoutState should
# avoid syncing noncapturing streams with captured events or vice versa.
torch.cuda.empty_cache()
model = torch.nn.LSTM(512, 512, 2, dropout=0.5).cuda()
x = torch.ones(100, 192, 512, device="cuda")
y = model(x)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g.capture_begin()
y = model(x)
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
y = model(x)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_grad_scaling(self):
torch.cuda.empty_cache()
scaler = torch.cuda.amp.GradScaler(init_scale=4.)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
weight = torch.ones((100,), device="cuda", requires_grad=True)
opt = torch.optim.SGD([weight], lr=0.1)
static_input = torch.ones_like(weight)
static_grad = torch.ones_like(weight)
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
torch.cuda.current_stream().wait_stream(s)
opt.zero_grad(set_to_none=True)
# capture
with torch.cuda.graph(g):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
input_vals = [5, 20000, 5, 40000]
# If the scale gets updated properly, these are the scale, growth tracker,
# and grad values we expect.
expected_scales = [4, 2, 2, 1]
expected_growth_trackers = [1, 0, 1, 0]
expected_grad_vals = [5 * 4, float("inf"), 5 * 2, float("inf")]
for data, scale, growth_tracker, grad_val in zip(input_vals,
expected_scales,
expected_growth_trackers,
expected_grad_vals):
static_input.fill_(data)
g.replay()
self.assertEqual(weight.grad, torch.full_like(weight.grad, grad_val))
scaler.step(opt)
scaler.update()
self.assertEqual(scaler._scale, scale)
self.assertEqual(scaler._growth_tracker, growth_tracker)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
@parametrize('with_amp,cache_enabled', [(False, False), (True, False), (True, True)],
name_fn=lambda x, y: '{}{}'.format({True: "with_amp", False: "without_amp"}[x],
{True: "_cache_enabled", False: "_cache_disabled"}[y] if x else ''))
def test_graph_make_graphed_callables(self, with_amp, cache_enabled):
torch.manual_seed(5)
torch.cuda.manual_seed(5)
N, D_in, H, D_out = 640, 4096, 2048, 1024
models = []
for _ in range(2):
model_section1 = torch.nn.Sequential(torch.nn.Linear(D_in, H),
torch.nn.Dropout(p=0.1)).cuda()
model_section2 = torch.nn.Sequential(torch.nn.Linear(H, D_out),
torch.nn.Dropout(p=0.2)).cuda()
models.append(torch.nn.Sequential(model_section1, model_section2))
model_graphed = models[0]
model_control = models[1]
model_graphed.load_state_dict(model_control.state_dict())
opt_graphed = torch.optim.SGD(model_graphed.parameters(), lr=0.1)
opt_control = torch.optim.SGD(model_control.parameters(), lr=0.1)
x = torch.randn(N, D_in, device='cuda')
h = torch.randn(N, H, device='cuda', requires_grad=True)
y_pred = torch.randn(N, D_out, device='cuda', requires_grad=True)
y = torch.randn(N, D_out, device='cuda')
loss_fn_control = torch.nn.functional.mse_loss
relu_control = torch.nn.functional.relu
# This is a good stress test. It graphs four callables: two Modules and two python functions.
with torch.cuda.amp.autocast(with_amp, cache_enabled=cache_enabled):
model_graphed[0], model_graphed[1], relu_graphed, loss_fn_graphed = \
torch.cuda.make_graphed_callables((model_graphed[0], model_graphed[1], relu_control, loss_fn_control),
((x,), (h,), (y_pred,), (y_pred, y)))
real_inputs = [torch.rand_like(x) for _ in range(10)]
real_targets = [torch.rand_like(y) for _ in range(10)]
for m, opt, relu, loss_fn in zip((model_graphed, model_control),
(opt_graphed, opt_control),
(relu_graphed, relu_control),
(loss_fn_graphed, loss_fn_control)):
# Resets RNC states before iterations for graphed and ungraphed models,
# so dropout math should be bitwise identical for both.
torch.manual_seed(5)
torch.cuda.manual_seed(5)
for data, target in zip(real_inputs, real_targets):
opt.zero_grad(set_to_none=True)
with torch.cuda.amp.autocast(with_amp, cache_enabled=cache_enabled):
y_pred = m(data)
y_pred = relu(y_pred)
loss = loss_fn(y_pred, target)
loss.backward()
opt.step()
for p, pc in zip(model_graphed.parameters(), model_control.parameters()):
self.assertEqual(p, pc)
# We graphed the models in training mode. Eval should still run ungraphed.
model_graphed.eval()
model_control.eval()
self.assertEqual(model_graphed(real_inputs[0]), model_control(real_inputs[0]))
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_adam_adamw(self):
OptClasses = (torch.optim.Adam, torch.optim.AdamW)
cases = []
# Needs generalization if we want to extend this test to non-Adam-like optimizers.
for Class, foreach, amsgrad in product(OptClasses, (False, True), (False, True)):
cases.append((Class, {"lr": 0.1, "betas": (0.8, 0.7), "foreach": foreach, "amsgrad": amsgrad}))
steps_warmup = 3
steps_train = 2
for OptClass, kwargs in cases:
for actually_do_graphs in (True, False):
params = [torch.randn((i + 5, i + 5), device="cuda") for i in range(2)]
params_control = [p.clone().requires_grad_() for p in params]
params_graphed = [p.clone().requires_grad_() for p in params]
grads = [[torch.randn_like(p) for p in params] for _ in range(steps_warmup + steps_train)]
# Control (capturable=False)
opt = OptClass(params_control, capturable=False, **kwargs)
for i in range(steps_warmup + steps_train):
for j, p in enumerate(params_control):
p.grad = grads[i][j]
opt.step()
# capturable=True
opt = OptClass(params_graphed, capturable=True, **kwargs)
for i in range(steps_warmup):
for j, p in enumerate(params_graphed):
p.grad = grads[i][j]
opt.step()
if actually_do_graphs:
g = torch.cuda.CUDAGraph()
with torch.cuda.graph(g):
opt.step()
for i in range(steps_train):
if actually_do_graphs:
for j, p in enumerate(params_graphed):
p.grad.copy_(grads[i + steps_warmup][j])
g.replay()
else:
# Passing capturable=True to the constructor and running without graphs should still be
# numerically correct, even if it's not ideal for performance.
for j, p in enumerate(params_graphed):
p.grad = grads[i + steps_warmup][j]
opt.step()
for p_control, p_graphed in zip(params_control, params_graphed):
self.assertEqual(p_control, p_graphed)
def test_batch_norm_gather_stats(self):
input = torch.randn(1, 3, 3, 3, device='cuda')
mean, invstd = torch.batch_norm_gather_stats(
input, mean=torch.ones(2, 3, device='cuda'), invstd=torch.ones(2, 3, device='cuda'),
running_mean=None, running_var=None , momentum=.1, eps=1e-5, count=2
)
self.assertEqual(mean, torch.ones(3, device='cuda'))
self.assertEqual(invstd, torch.ones(3, device='cuda'))
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_cuda_device_memory_allocated(self):
from torch.cuda import memory_allocated
device_count = torch.cuda.device_count()
current_alloc = [memory_allocated(idx) for idx in range(device_count)]
x = torch.ones(10, device="cuda:0")
self.assertTrue(memory_allocated(0) > current_alloc[0])
self.assertTrue(all(memory_allocated(torch.cuda.device(idx)) == current_alloc[idx] for idx in range(1, device_count)))
def test_matmul_memory_use(self):
def get_max_used():
torch.cuda.synchronize()
val = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
return val
a = torch.rand(1, 32, 32, device="cuda")
b = torch.rand(24, 32, 1, device="cuda")
get_max_used()
torch.matmul(a, b)
matmul_mem = get_max_used()
a = a.expand(24, 32, 32)
torch.matmul(a, b)
matmul_expand_mem = get_max_used()
torch.bmm(a, b)
bmm_mem = get_max_used()
self.assertEqual(matmul_expand_mem, matmul_mem)
self.assertEqual(bmm_mem, matmul_mem)
@unittest.skipIf(not TEST_WITH_ROCM, "ROCm-only test")
def test_rocm_backward_pass_guard(self):
# The test exercises a ROCm-specific feature.
class MyFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, tensor, constant):
self.assertFalse(torch._C._rocm_is_backward_pass())
ctx.constant = constant
return tensor * constant
@staticmethod
def backward(ctx, grad_output):
self.assertTrue(torch._C._rocm_is_backward_pass())
return grad_output * ctx.constant, None
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = torch.nn.Parameter(torch.randn(()))
def forward(self, x):
return MyFunction.apply(x, self.a)
model = MyModule()
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-6)
x = torch.randn(5, 5)
result = model(x)
loss = criterion(result, x)
optimizer.zero_grad()
loss.backward()
optimizer.step()
@unittest.skipIf(TEST_WITH_ROCM, "ROCm doesn't support CUDA_VISIBLE_DEVICES")
@unittest.skipIf(TEST_MULTIGPU, "Testing on one GPU is sufficient")
def test_lazy_init(self):
""" Validate that no CUDA calls are made during `import torch` call"""
from subprocess import check_output
test_script = "import os; import torch;os.environ['CUDA_VISIBLE_DEVICES']='32';print(torch.cuda.device_count())"
rc = check_output([sys.executable, '-c', test_script]).decode("ascii").strip()
self.assertEqual(rc, "0")
class TestCudaComm(TestCase):
def _test_broadcast(self, input):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
# test regular
results = comm.broadcast(input, (0, 1))
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
if input.is_cuda and input.get_device() == i: # test not copying on same device
self.assertEqual(t.data_ptr(), input.data_ptr())
# test out=
for inplace in [True, False]:
if inplace:
outputs = [torch.empty_like(input, device=0), torch.empty_like(input, device=1)]
else:
outputs = [input.cuda(0), torch.empty_like(input, device=1)]
results = comm.broadcast(input, out=outputs)
for r, o in zip(results, outputs):
self.assertIs(r, o)
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"Exactly one of 'devices' and 'out'"):
comm.broadcast(input, (0, 1), out=outputs)
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cpu()])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to have same shape as the source .+ at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cuda(1).unsqueeze(0)])
def test_broadcast_cpu(self):
self._test_broadcast(torch.randn(5, 5))
def test_broadcast_gpu(self):
self._test_broadcast(torch.randn(5, 5).cuda())
def _test_broadcast_coalesced(self, tensors, buffer_size):
b_tensors = [comm.broadcast(t, (0, 1)) for t in tensors]
for (_, bt), t in zip(b_tensors, tensors):
self.assertEqual(bt.get_device(), 1)
self.assertEqual(bt, t)
self.assertIsInstance(bt, type(t))
bc_tensors = comm.broadcast_coalesced(tensors, (0, 1), buffer_size=buffer_size)
bc_tensors_t = list(zip(*bc_tensors))
self.assertEqual(b_tensors, bc_tensors_t)
for (_, bt), (_, bct) in zip(b_tensors, bc_tensors_t):
self.assertEqual(bt.get_device(), bct.get_device())
self.assertIsInstance(bct, type(bt))
# check that tensors on device[0] are returned as-is
for out_tensors in (b_tensors, bc_tensors_t):
for inp_t, (out_t, _) in zip(tensors, out_tensors):
self.assertIs(inp_t, out_t)
# check that the tensors not on device[0] have different version counters
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for _, t in bc_tensors_t]
for old_version, (_, t) in zip(versions, bc_tensors_t):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
# Note: fails sometimes on the CI, passes on dual gfx906
def test_broadcast_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_empty_tensors(self):
tensors = [
torch.tensor([]).byte().cuda(),
torch.randn(5).cuda(),
torch.randn(5).double().cuda()
]
self._test_broadcast_coalesced(tensors, 256)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
x_cuda = x.cuda(0)
y_cuda = y.cuda(1)
result = comm.reduce_add((x_cuda, y_cuda))
self.assertEqual(result.get_device(), 0)
self.assertEqual(result.cpu(), x + y)
def _test_reduce_add_coalesced(self, tensors, buffer_size):
dup_tensors = [tensors, [t.cuda(1) for t in tensors]]
r_tensors = [comm.reduce_add(t) for t in zip(*dup_tensors)]
for r, t in zip(r_tensors, tensors):
self.assertEqualTypeString(r, t)
self.assertEqual(r.coalesce() if r.is_sparse else r, t * 2)
rc_tensors = comm.reduce_add_coalesced(dup_tensors, buffer_size=buffer_size)
self.assertEqual(r_tensors, rc_tensors)
for r, rc in zip(r_tensors, rc_tensors):
self.assertEqualTypeString(rc, r)
# Since we have both cuda:0 and cuda:1 inputs, the outputs must be new.
# We can check that they have different version counters.
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for t in rc_tensors]
for old_version, t in zip(versions, rc_tensors):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
def _test_scatter(self, input, chunk_sizes=None, dim=0):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
if chunk_sizes is None:
ref_chunk_sizes = tuple(repeat(input.size(dim) // 2, 2))
else:
ref_chunk_sizes = chunk_sizes
# test regular
result = comm.scatter(input, (0, 1), chunk_sizes, dim)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
if r.device == input.device:
self.assertEqual(r.data_ptr(), input.data_ptr()) # for target @ same device, a view should be returned
# test out
out = [torch.empty_like(t) for t in result]
result = comm.scatter(input, dim=dim, out=out)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
self.assertIs(r, out[i])
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
# test error msg
if chunk_sizes is not None:
with self.assertRaisesRegex(RuntimeError, r"Expected devices and chunk_sizes to be of same length"):
comm.scatter(input, [0 for _ in range(len(chunk_sizes) + 1)], dim=dim, chunk_sizes=chunk_sizes)
with self.assertRaisesRegex(RuntimeError, r"'devices' must not be specified"):
comm.scatter(input, (0, 1), dim=dim, out=out)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one device to scatter to"):
comm.scatter(input, (), dim=dim)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one output tensor to scatter to"):
comm.scatter(input, dim=dim, out=[])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 0"):
comm.scatter(input, dim=dim, out=([out[0].cpu()] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Output tensor at index 0 has incorrect shape"):
comm.scatter(input, dim=dim, out=([out[0].unsqueeze(0)] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Total size for output tensors along scatter dim \d+ does not match"):
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(1, None)
comm.scatter(input, dim=dim, out=([out[0][tuple(index)]] + out[1:]))
def test_scatter_cpu(self):
self._test_scatter(torch.randn(4, 4), dim=0)
def test_scatter_cpu_dim(self):
self._test_scatter(torch.randn(4, 4), dim=1)
def test_scatter_cpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4), dim=-2)
def test_scatter_cpu_sizes(self):
self._test_scatter(torch.randn(6, 4), chunk_sizes=(2, 4))
def test_scatter_gpu(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=0)
def test_scatter_gpu_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=1)
def test_scatter_gpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=-2)
def test_scatter_gpu_sizes(self):
self._test_scatter(torch.randn(6, 4).cuda(), chunk_sizes=(2, 4))
def _test_gather(self, dim):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
x = torch.randn(2, 5, device=0)
y = torch.randn(2, 5, device=1)
expected_size = list(x.size())
expected_size[dim] += y.size(dim)
expected_size = torch.Size(expected_size)
destinations = [None, torch.device('cuda:0'), torch.device('cpu')]
if torch.cuda.device_count() > 2:
destinations.append(torch.device('cuda:2'))
with torch.cuda.device(1):
for destination in destinations:
if destination is None:
expected_device = torch.device('cuda', torch.cuda.current_device())
else:
expected_device = destination
for use_out in [True, False]:
if use_out:
out = torch.empty(expected_size, device=expected_device)
result = comm.gather((x, y), dim, out=out)
self.assertIs(out, result)
else:
result = comm.gather((x, y), dim, destination=destination)
self.assertEqual(result.device, expected_device)
self.assertEqual(result.size(), expected_size)
index = [slice(None, None), slice(None, None)]
index[dim] = slice(0, x.size(dim))
self.assertEqual(result[tuple(index)], x)
index[dim] = slice(x.size(dim), x.size(dim) + y.size(dim))
self.assertEqual(result[tuple(index)], y)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"'destination' must not be specified"):
comm.gather((x, y), dim, destination='cpu', out=torch.empty(expected_size, device='cpu'))
with self.assertRaisesRegex(RuntimeError, r"Expected at least one tensor to gather from"):
comm.gather(())
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to be CUDA tensors, "):
comm.gather((x.cpu(), y))
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to have the same number of dimensions"):
comm.gather((x, y.unsqueeze(0)))
with self.assertRaisesRegex(RuntimeError, r"Input tensor at index 1 has invalid shape"):
if dim in [0, -2]:
comm.gather((x, y[:, 1:]), dim=dim)
elif dim in [1, -1]:
comm.gather((x, y[1:, :]), dim=dim)
def test_gather(self):
self._test_gather(0)
def test_gather_dim(self):
self._test_gather(1)
def test_gather_neg_dim(self):
self._test_gather(-1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_format_scatter_gather(self):
nhwc = torch.randn((10, 3, 32, 32), device='cpu').contiguous(memory_format=torch.channels_last)
results = torch.cuda.comm.scatter(nhwc, (0, 1), None, 0)
for result in results:
self.assertFalse(result.is_contiguous())
self.assertTrue(result.is_contiguous(memory_format=torch.channels_last))
gathered = torch.cuda.comm.gather(results)
self.assertTrue(gathered.is_contiguous(memory_format=torch.channels_last))
def test_matmul_device_mismatch(self):
cpu = torch.rand((10, 10))
cuda = cpu.cuda()
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cpu @ cuda
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cuda @ cpu
for s, m1, m2 in product((cpu, cuda), repeat=3):
if s.device == m1.device == m2.device:
torch.addmm(s, m1, m2)
else:
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.addmm(s, m1, m2)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_scatter_namedtuple(self):
# tests ability to scatter namedtuples and retrieve a list where each
# element is of the expected namedtuple type.
fields = ("a", "b")
TestNamedTupleInput_0 = collections.namedtuple("NamedTuple", fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_0(a, b)
target_gpus = [torch.device(i) for i in range(num_gpus)]
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_1(a, b)
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_gather_namedtuple(self):
# tests ability to gather a list of namedtuples and return a namedtuple where each
# element is of the expected tensor type.
fields = ['a', 'b']
TestNamedTupleInput_0 = collections.namedtuple('NamedTuple', fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_0(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_0(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1]))) # x must be a tensor
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_1(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_1(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
def test_memory_snapshot(self):
try:
torch.cuda.memory.empty_cache()
torch.cuda.memory._record_memory_history(True)
x = torch.rand(311, 411, device='cuda')
# create a bunch of tensors that all will tile into the
# same segment to exercise the history merging code
# 512B is the minimum block size,
# so we allocate all the tensors to this size to make sure
# they tile evenly
tensors = [torch.rand(128, device='cuda') for _ in range(1000)]
while tensors:
del tensors[randint(0, len(tensors) - 1)]
# exercise the history trimming code
torch.rand(128 * 5, device='cuda')
ss = torch.cuda.memory._snapshot()
found_it = False
for seg in ss:
for b in seg['blocks']:
if 'history' in b:
for h in b['history']:
if h['real_size'] == 311 * 411 * 4:
self.assertTrue('test_cuda' in h['frames'][0]['filename'])
found_it = True
self.assertTrue(found_it)
if not IS_WINDOWS:
with tempfile.NamedTemporaryFile() as f:
torch.cuda.memory._save_segment_usage(f.name)
with open(f.name, 'r') as f2:
self.assertTrue('test_cuda.py' in f2.read())
finally:
torch.cuda.memory._record_memory_history(False)
def test_raises_oom(self):
with self.assertRaises(torch.cuda.OutOfMemoryError):
torch.empty(1024 * 1024 * 1024 * 1024, device='cuda')
instantiate_parametrized_tests(TestCuda)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_cuda.py |
# Owner(s): ["oncall: package/deploy"]
import textwrap
import types
from torch.utils._freeze import Freezer, PATH_MARKER
from torch.testing._internal.common_utils import run_tests, TestCase
class TestFreezer(TestCase):
"""Tests the freeze.py script"""
def test_compile_string(self):
freezer = Freezer(True)
code_str = textwrap.dedent(
"""
class MyCls:
def __init__(self):
pass
"""
)
co = freezer.compile_string(code_str)
num_co = 0
def verify_filename(co: types.CodeType):
nonlocal num_co
if not isinstance(co, types.CodeType):
return
self.assertEqual(PATH_MARKER, co.co_filename)
num_co += 1
for nested_co in co.co_consts:
verify_filename(nested_co)
verify_filename(co)
# there is at least one nested code object besides the top level one
self.assertTrue(num_co >= 2)
if __name__ == "__main__":
run_tests()
| pytorch-master | test/test_deploy.py |
# -*- coding: utf-8 -*-
# Owner(s): ["module: scatter & gather ops"]
import random
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(parametrize, run_tests, TestCase,)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, dtypesIfCUDA,
toleranceOverride, tol,)
from torch.testing._internal.common_dtype import \
(get_all_dtypes,)
# Protects against includes accidentally setting the default dtype
assert torch.get_default_dtype() is torch.float32
# Note: test_scatter_gather_ops.py
# This test file tests scatter and gather operations,
# like torch.scatter and torch.gather.
class TestScatterGather(TestCase):
# Fills an index tensor with valid indices
def _fill_indices(self, idx, dim, dim_size, elems_per_row, m, n, o, unique_indices=True):
for i in range(1 if dim == 0 else m):
for j in range(1 if dim == 1 else n):
for k in range(1 if dim == 2 else o):
ii = [i, j, k]
ii[dim] = slice(0, idx.size(dim) + 1)
if unique_indices:
idx[tuple(ii)] = torch.randperm(dim_size)[0:elems_per_row]
else:
idx[tuple(ii)] = torch.randint(dim_size, (elems_per_row,))
@dtypes(torch.float32, torch.complex64)
def test_gather(self, device, dtype):
m, n, o = random.randint(10, 20), random.randint(10, 20), random.randint(10, 20)
elems_per_row = random.randint(1, 10)
dim = random.randrange(3)
src = make_tensor((m, n, o), device=device, dtype=dtype)
idx_size = [m, n, o]
idx_size[dim] = elems_per_row
idx = make_tensor(idx_size, device=device, dtype=torch.long)
self._fill_indices(idx, dim, src.size(dim), elems_per_row, m, n, o)
actual = torch.gather(src, dim, idx)
expected = torch.zeros(idx_size, device=device, dtype=dtype)
for i in range(idx_size[0]):
for j in range(idx_size[1]):
for k in range(idx_size[2]):
ii = [i, j, k]
ii[dim] = idx[i, j, k]
expected[i, j, k] = src[tuple(ii)]
self.assertEqual(actual, expected, atol=0, rtol=0)
# Guarded because torch.max isn't defined for complex types
if not dtype.is_complex:
src = make_tensor((3, 4, 5), device=device, dtype=dtype)
expected, idx = src.max(2, True)
actual = torch.gather(src, 2, idx)
self.assertEqual(actual, expected, atol=0, rtol=0)
@dtypes(torch.bool)
def test_gather_bool(self, device, dtype):
src = torch.tensor(((False, True), (True, True)), device=device, dtype=dtype)
idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long)
actual = torch.gather(src, 1, idx)
expected = torch.tensor(((False, False), (True, True)), device=device, dtype=dtype)
self.assertEqual(actual, expected, atol=0, rtol=0)
@parametrize("sparse_grad", [False, True])
@dtypes(torch.float32, torch.float64)
def test_gather_backward_with_empty_index_tensor(self, device, dtype, sparse_grad):
dim = -1
input = torch.rand([10, 5], dtype=dtype, device=device, requires_grad=True)
index = torch.randint(0, 2, [3, 0], dtype=torch.int64, device=device)
res = torch.gather(input, dim, index, sparse_grad=sparse_grad)
res.sum().backward()
grad = input.grad.to_dense() if sparse_grad else input.grad
expected_grad = torch.zeros_like(input, requires_grad=False)
self.assertEqual(grad, expected_grad, atol=0, rtol=0)
def _test_scatter_base(self, fn, *, device, dtype, is_scalar, reduction,
unique_indices=True, include_self=True):
m, n, o = random.randint(10, 20), random.randint(10, 20), random.randint(10, 20)
elems_per_row = random.randint(1, 10)
dim = random.randrange(3)
idx_size = [m, n, o]
idx_size[dim] = elems_per_row
idx = torch.empty(tuple(idx_size), device=device, dtype=torch.long)
self._fill_indices(idx, dim, ([m, n, o])[dim], elems_per_row, m, n, o, unique_indices)
if is_scalar:
src = random.random()
else:
src_size = [random.randint(1, 5) + s for s in idx_size]
src = make_tensor(tuple(src_size), device=device, dtype=dtype)
base = make_tensor((m, n, o), device=device, dtype=dtype)
if reduction is not None:
if fn is torch.Tensor.scatter_reduce_:
actual = fn(base.clone(), dim, idx, src, reduce=reduction, include_self=include_self)
else:
actual = fn(base.clone(), dim, idx, src, reduce=reduction)
else:
actual = fn(base.clone(), dim, idx, src)
expected = base.clone()
counts = torch.zeros(base.shape, dtype=torch.long, device=device) + include_self
for i in range(idx_size[0]):
for j in range(idx_size[1]):
for k in range(idx_size[2]):
ii = [i, j, k]
ii[dim] = idx[i, j, k]
if fn is torch.Tensor.scatter_add_:
expected[tuple(ii)] += src[i, j, k]
else:
# method may be 'scatter_', 'scatter', 'scatter_reduce'
# or 'scatter_reduce_', the former two might have a reduction argument
# while the latter two always do
value = src if is_scalar else src[i, j, k]
if ((not include_self) and counts[tuple(ii)] == 0):
expected[tuple(ii)] = value
else:
if reduction == "add" or reduction == "sum":
expected[tuple(ii)] += value
elif reduction == "multiply" or reduction == "prod":
expected[tuple(ii)] *= value
elif reduction == "amax":
expected[tuple(ii)] = max(expected[tuple(ii)], value)
elif reduction == "amin":
expected[tuple(ii)] = min(expected[tuple(ii)], value)
elif reduction == "mean":
expected[tuple(ii)] += value
else:
expected[tuple(ii)] = value
counts[tuple(ii)] += 1
if (reduction == "mean"):
counts.masked_fill_(counts == 0, 1)
if (dtype.is_floating_point or dtype.is_complex):
expected /= counts
else:
expected.div_(counts, rounding_mode="floor")
self.assertEqual(actual, expected, atol=0, rtol=0)
# Tests empty index
dst = make_tensor((2, 2), device=device, dtype=dtype)
idx = torch.tensor((), device=device, dtype=torch.long)
src = make_tensor((2, 2), device=device, dtype=dtype)
if reduction is not None:
actual = fn(dst, 0, idx, src, reduce=reduction)
else:
actual = fn(dst, 0, idx, src)
self.assertEqual(actual, dst, atol=0, rtol=0)
@dtypes(torch.float16, torch.float32, torch.complex64)
def test_scatter_(self, device, dtype):
self._test_scatter_base(torch.Tensor.scatter_, device=device, dtype=dtype,
is_scalar=False, reduction=None)
@dtypes(torch.float16, torch.float32, torch.complex64)
def test_scatter__scalar(self, device, dtype):
self._test_scatter_base(torch.Tensor.scatter_, device=device, dtype=dtype,
is_scalar=True, reduction=None)
# FIXME: RuntimeError: "cuda_scatter_gather_base_kernel_reduce_multiply" not implemented for 'ComplexFloat'
@toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0)})
@dtypesIfCUDA(torch.float16, torch.float32)
@dtypes(torch.float16, torch.float32, torch.complex64)
def test_scatter__reductions(self, device, dtype):
for reduction in ("add", "multiply"):
self._test_scatter_base(torch.Tensor.scatter_, device=device, dtype=dtype,
is_scalar=False, reduction=reduction)
self._test_scatter_base(torch.Tensor.scatter_, device=device, dtype=dtype,
is_scalar=True, reduction=reduction)
@dtypes(torch.float16, torch.float32, torch.complex64)
def test_scatter_add_(self, device, dtype):
self._test_scatter_base(torch.Tensor.scatter_add_, device=device, dtype=dtype,
is_scalar=False, reduction=None)
@dtypes(torch.float32)
def test_scatter_add_mult_index_base(self, device, dtype):
m, n = 30, 40
idx = torch.zeros(m, n, device=device, dtype=torch.long)
src = torch.ones(m, n, device=device, dtype=dtype)
res0 = torch.zeros(m, n, device=device, dtype=dtype).scatter_add_(0, idx, src)
res1 = torch.zeros(m, n, device=device, dtype=dtype).scatter_add_(1, idx, src)
self.assertEqual(res0[0, :], m * torch.ones(n, device=device, dtype=dtype), atol=0, rtol=0)
self.assertEqual(res1[:, 0], n * torch.ones(m, device=device, dtype=dtype), atol=0, rtol=0)
# FIXME: discrepancy between bool ReduceAdd on CUDA and CPU (a + b on CPU and buggy a && b on CUDA)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_bool=False))
def test_scatter_reduce_sum(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
is_scalar=False, reduction='sum', unique_indices=False,
include_self=include_self)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True))
@dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
def test_scatter_reduce_prod(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
is_scalar=False, reduction='prod', unique_indices=False,
include_self=include_self)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_bool=False))
@dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
def test_scatter_reduce_mean(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
is_scalar=False, reduction='mean', unique_indices=False,
include_self=include_self)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False))
@dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
def test_scatter_reduce_amax(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
is_scalar=False, reduction='amax', unique_indices=False,
include_self=include_self)
# simple test for nan/inf propagation
if (dtype.is_floating_point):
input = torch.zeros(3, device=device, dtype=dtype)
src = torch.tensor([1, float('nan'), -float('inf'), -float('inf'), 2, float('inf')], device=device, dtype=dtype)
idx = torch.tensor([0, 0, 1, 1, 2, 2], device=device)
input.scatter_reduce_(0, idx, src, 'amax', include_self=include_self)
expected_result = torch.tensor([float('nan'), -float('inf'), float('inf')], device=device, dtype=dtype)
if (include_self):
expected_result[1] = 0
self.assertEqual(input, expected_result)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False))
@dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
def test_scatter_reduce_amin(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
is_scalar=False, reduction='amin', unique_indices=False,
include_self=include_self)
# simple test for nan/inf propagation
if (dtype.is_floating_point):
input = torch.zeros(3, device=device, dtype=dtype)
src = torch.tensor([1, float('nan'), -2, -float('inf'), float('inf'), float('inf')], device=device, dtype=dtype)
idx = torch.tensor([0, 0, 1, 1, 2, 2], device=device)
input.scatter_reduce_(0, idx, src, 'amin', include_self=include_self)
expected_result = torch.tensor([float('nan'), -float('inf'), float('inf')], device=device, dtype=dtype)
if (include_self):
expected_result[2] = 0
self.assertEqual(input, expected_result)
# Generic Device Test Framework instantation, see
# https://github.com/pytorch/pytorch/wiki/Running-and-writing-tests
# for details.
instantiate_device_type_tests(TestScatterGather, globals())
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_scatter_gather_ops.py |
#!/usr/bin/env python3
import argparse
import copy
from datetime import datetime
from distutils.util import strtobool
from distutils.version import LooseVersion
import functools
import os
import pathlib
import shutil
import signal
import subprocess
import sys
import tempfile
import json
from typing import Dict, Optional, List, cast, Any
import torch
from torch.utils import cpp_extension
from torch.testing._internal.common_utils import (
IS_CI,
FILE_SCHEMA,
TEST_WITH_ROCM,
shell,
set_cwd,
parser as common_parser,
)
import torch.distributed as dist
REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent
try:
# using tools/ to optimize test run.
sys.path.append(str(REPO_ROOT))
from tools.stats.export_test_times import TEST_TIMES_FILE
from tools.testing.test_selections import (
get_reordered_tests,
get_test_case_configs,
calculate_shards,
)
HAVE_TEST_SELECTION_TOOLS = True
except ImportError:
HAVE_TEST_SELECTION_TOOLS = False
print(
"Unable to import test_selections from tools/testing. Running without test selection stats..."
)
def discover_tests(
base_dir: Optional[pathlib.Path] = None,
blocklisted_patterns: Optional[List[str]] = None,
blocklisted_tests: Optional[List[str]] = None,
extra_tests: Optional[List[str]] = None) -> List[str]:
"""
Searches for all python files starting with test_ excluding one specified by patterns
"""
def skip_test_p(name: str) -> bool:
rc = False
if blocklisted_patterns is not None:
rc |= any(name.startswith(pattern) for pattern in blocklisted_patterns)
if blocklisted_tests is not None:
rc |= name in blocklisted_tests
return rc
cwd = pathlib.Path(__file__).resolve().parent if base_dir is None else base_dir
all_py_files = list(cwd.glob('**/test_*.py'))
rc = [str(fname.relative_to(cwd))[:-3] for fname in all_py_files]
# Invert slashes on Windows
if sys.platform == "win32":
rc = [name.replace('\\', '/') for name in rc]
rc = [test for test in rc if not skip_test_p(test)]
if extra_tests is not None:
rc += extra_tests
return sorted(rc)
TESTS = discover_tests(
blocklisted_patterns=[
'ao',
'bottleneck_test',
'custom_backend',
'custom_operator',
'fx', # executed by test_fx.py
'jit', # executed by test_jit.py
'mobile',
'onnx',
'package', # executed by test_package.py
'quantization', # executed by test_quantization.py
'autograd', # executed by test_autograd.py
],
blocklisted_tests=[
'test_bundled_images',
'test_cpp_extensions_aot',
'test_determination',
'test_jit_fuser',
'test_jit_simple',
'test_jit_string',
'test_kernel_launch_checks',
'test_metal',
'test_nnapi',
'test_segment_reductions',
'test_static_runtime',
'test_throughput_benchmark',
'test_typing',
"distributed/bin/test_script",
"distributed/elastic/multiprocessing/bin/test_script",
"distributed/launcher/bin/test_script",
"distributed/launcher/bin/test_script_init_method",
"distributed/launcher/bin/test_script_is_torchelastic_launched",
"distributed/launcher/bin/test_script_local_rank",
"distributed/test_c10d_spawn",
'distributions/test_transforms',
'distributions/test_utils',
],
extra_tests=[
"test_cpp_extensions_aot_ninja",
"test_cpp_extensions_aot_no_ninja",
"distributed/elastic/timer/api_test",
"distributed/elastic/timer/local_timer_example",
"distributed/elastic/timer/local_timer_test",
"distributed/elastic/events/lib_test",
"distributed/elastic/metrics/api_test",
"distributed/elastic/utils/logging_test",
"distributed/elastic/utils/util_test",
"distributed/elastic/utils/distributed_test",
"distributed/elastic/multiprocessing/api_test",
"test_deploy",
]
)
FSDP_TEST = [test for test in TESTS if test.startswith("distributed/fsdp")]
# Tests need to be run with pytest.
USE_PYTEST_LIST = [
"distributed/pipeline/sync/skip/test_api",
"distributed/pipeline/sync/skip/test_gpipe",
"distributed/pipeline/sync/skip/test_inspect_skip_layout",
"distributed/pipeline/sync/skip/test_leak",
"distributed/pipeline/sync/skip/test_portal",
"distributed/pipeline/sync/skip/test_stash_pop",
"distributed/pipeline/sync/skip/test_tracker",
"distributed/pipeline/sync/skip/test_verify_skippables",
"distributed/pipeline/sync/test_balance",
"distributed/pipeline/sync/test_bugs",
"distributed/pipeline/sync/test_checkpoint",
"distributed/pipeline/sync/test_copy",
"distributed/pipeline/sync/test_deferred_batch_norm",
"distributed/pipeline/sync/test_dependency",
"distributed/pipeline/sync/test_inplace",
"distributed/pipeline/sync/test_microbatch",
"distributed/pipeline/sync/test_phony",
"distributed/pipeline/sync/test_pipe",
"distributed/pipeline/sync/test_pipeline",
"distributed/pipeline/sync/test_stream",
"distributed/pipeline/sync/test_transparency",
"distributed/pipeline/sync/test_worker",
"distributions/test_constraints",
"distributions/test_transforms",
"distributions/test_utils",
"test_typing",
"distributed/elastic/events/lib_test",
"distributed/elastic/agent/server/test/api_test",
"test_deploy",
]
WINDOWS_BLOCKLIST = [
"distributed/nn/jit/test_instantiator",
"distributed/rpc/test_faulty_agent",
"distributed/rpc/test_tensorpipe_agent",
"distributed/rpc/test_share_memory",
"distributed/rpc/cuda/test_tensorpipe_agent",
"distributed/pipeline/sync/skip/test_api",
"distributed/pipeline/sync/skip/test_gpipe",
"distributed/pipeline/sync/skip/test_inspect_skip_layout",
"distributed/pipeline/sync/skip/test_leak",
"distributed/pipeline/sync/skip/test_portal",
"distributed/pipeline/sync/skip/test_stash_pop",
"distributed/pipeline/sync/skip/test_tracker",
"distributed/pipeline/sync/skip/test_verify_skippables",
"distributed/pipeline/sync/test_balance",
"distributed/pipeline/sync/test_bugs",
"distributed/pipeline/sync/test_checkpoint",
"distributed/pipeline/sync/test_copy",
"distributed/pipeline/sync/test_deferred_batch_norm",
"distributed/pipeline/sync/test_dependency",
"distributed/pipeline/sync/test_inplace",
"distributed/pipeline/sync/test_microbatch",
"distributed/pipeline/sync/test_phony",
"distributed/pipeline/sync/test_pipe",
"distributed/pipeline/sync/test_pipeline",
"distributed/pipeline/sync/test_stream",
"distributed/pipeline/sync/test_transparency",
"distributed/pipeline/sync/test_worker",
"distributed/elastic/agent/server/test/api_test",
"distributed/elastic/multiprocessing/api_test",
"distributed/_shard/checkpoint/test_checkpoint"
"distributed/_shard/checkpoint/test_file_system_checkpoint"
"distributed/_shard/sharding_spec/test_sharding_spec",
"distributed/_shard/sharding_plan/test_sharding_plan",
"distributed/_shard/sharded_tensor/test_megatron_prototype",
"distributed/_shard/sharded_tensor/test_sharded_tensor",
"distributed/_shard/sharded_tensor/test_sharded_tensor_reshard",
"distributed/_shard/sharded_tensor/ops/test_chunk",
"distributed/_shard/sharded_tensor/ops/test_elementwise_ops",
"distributed/_shard/sharded_tensor/ops/test_embedding",
"distributed/_shard/sharded_tensor/ops/test_embedding_bag",
"distributed/_shard/sharded_tensor/ops/test_binary_cmp",
"distributed/_shard/sharded_tensor/ops/test_init",
"distributed/_shard/sharded_tensor/ops/test_linear",
"distributed/_shard/sharded_tensor/ops/test_math_ops",
"distributed/_shard/sharded_tensor/ops/test_matrix_ops",
"distributed/_shard/sharded_tensor/ops/test_softmax",
"distributed/_shard/sharded_optim/test_sharded_optim",
"distributed/_shard/test_partial_tensor",
"distributed/_shard/test_replicated_tensor",
] + FSDP_TEST
ROCM_BLOCKLIST = [
"distributed/rpc/test_faulty_agent",
"distributed/rpc/test_tensorpipe_agent",
"distributed/rpc/test_share_memory",
"distributed/rpc/cuda/test_tensorpipe_agent",
"distributed/_shard/checkpoint/test_checkpoint"
"distributed/_shard/checkpoint/test_file_system_checkpoint"
"distributed/_shard/sharding_spec/test_sharding_spec",
"distributed/_shard/sharding_plan/test_sharding_plan",
"distributed/_shard/sharded_tensor/test_megatron_prototype",
"distributed/_shard/sharded_tensor/test_sharded_tensor",
"distributed/_shard/sharded_tensor/test_sharded_tensor_reshard",
"distributed/_shard/sharded_tensor/ops/test_chunk",
"distributed/_shard/sharded_tensor/ops/test_elementwise_ops",
"distributed/_shard/sharded_tensor/ops/test_embedding",
"distributed/_shard/sharded_tensor/ops/test_embedding_bag",
"distributed/_shard/sharded_tensor/ops/test_binary_cmp",
"distributed/_shard/sharded_tensor/ops/test_init",
"distributed/_shard/sharded_tensor/ops/test_linear",
"distributed/_shard/sharded_tensor/ops/test_math_ops",
"distributed/_shard/sharded_tensor/ops/test_matrix_ops",
"distributed/_shard/sharded_tensor/ops/test_softmax",
"distributed/_shard/sharded_optim/test_sharded_optim",
"distributed/_shard/test_partial_tensor",
"distributed/_shard/test_replicated_tensor",
"test_determination",
"test_jit_legacy",
"test_openmp",
]
RUN_PARALLEL_BLOCKLIST = [
"test_cpp_extensions_jit",
"test_cpp_extensions_open_device_registration",
"test_jit_disabled",
"test_mobile_optimizer",
"test_multiprocessing",
"test_multiprocessing_spawn",
"test_namedtuple_return_api",
"test_overrides",
"test_show_pickle",
"test_tensorexpr",
"test_cuda_primary_ctx",
"test_cuda_trace",
] + FSDP_TEST
# A subset of our TEST list that validates PyTorch's ops, modules, and autograd function as expected
CORE_TEST_LIST = [
"test_autograd",
"test_modules",
"test_nn",
"test_ops",
"test_ops_gradients",
"test_ops_jit",
"test_torch"
]
# if a test file takes longer than 5 min, we add it to TARGET_DET_LIST
SLOW_TEST_THRESHOLD = 300
DISTRIBUTED_TESTS_CONFIG = {}
if dist.is_available():
DISTRIBUTED_TESTS_CONFIG["test"] = {"WORLD_SIZE": "1"}
if not TEST_WITH_ROCM and dist.is_mpi_available():
DISTRIBUTED_TESTS_CONFIG["mpi"] = {
"WORLD_SIZE": "3",
"TEST_REPORT_SOURCE_OVERRIDE": "dist-mpi",
}
if dist.is_nccl_available():
DISTRIBUTED_TESTS_CONFIG["nccl"] = {
"WORLD_SIZE": "2" if torch.cuda.device_count() == 2 else "3",
"TEST_REPORT_SOURCE_OVERRIDE": "dist-nccl",
}
if dist.is_gloo_available():
DISTRIBUTED_TESTS_CONFIG["gloo"] = {
"WORLD_SIZE": "2" if torch.cuda.device_count() == 2 else "3",
"TEST_REPORT_SOURCE_OVERRIDE": "dist-gloo",
}
# https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python
SIGNALS_TO_NAMES_DICT = {
getattr(signal, n): n for n in dir(signal) if n.startswith("SIG") and "_" not in n
}
CPP_EXTENSIONS_ERROR = """
Ninja (https://ninja-build.org) is required for some of the C++ extensions
tests, but it could not be found. Install ninja with `pip install ninja`
or `conda install ninja`. Alternatively, disable said tests with
`run_test.py --exclude test_cpp_extensions_aot_ninja test_cpp_extensions_jit`.
"""
PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))
JIT_EXECUTOR_TESTS = [
"test_jit_profiling",
"test_jit_legacy",
"test_jit_fuser_legacy",
]
DISTRIBUTED_TESTS = [test for test in TESTS if test.startswith("distributed")]
def discover_functorch_tests():
pytorch_root = pathlib.Path(__file__).resolve().parent.parent
functorch_test_dir = os.path.join(pytorch_root, 'functorch', 'test')
result = discover_tests(pathlib.Path(functorch_test_dir))
result = [os.path.join(functorch_test_dir, r) for r in result]
# Sanity check
assert len(result) >= 8
return result
FUNCTORCH_TESTS = discover_functorch_tests()
TESTS_REQUIRING_LAPACK = [
"distributions/test_constraints",
"distributions/test_distributions",
]
def print_to_stderr(message):
print(message, file=sys.stderr)
def get_executable_command(options, allow_pytest, disable_coverage=False):
if options.coverage and not disable_coverage:
executable = ["coverage", "run", "--parallel-mode", "--source=torch"]
else:
executable = [sys.executable, "-bb"]
if options.pytest:
if allow_pytest:
executable += ["-m", "pytest"]
# Enable xdoctest
# TODO: enable xdoctest later
# Many doctests assume the existence of these variables
# xdoctest_global_exec_lines = r'\n'.join([
# 'from torch import nn',
# 'import torch.nn.functional as F',
# 'import torch',
# ])
# executable += [
# "--xdoctest",
# "--xdoctest-style=google",
# f"--xdoctest-global-exec='{xdoctest_global_exec_lines}'",
# "--xdoctest-options=+IGNORE_WHITESPACE"
# ]
else:
print_to_stderr(
"Pytest cannot be used for this test. Falling back to unittest."
)
return executable
def run_test(
test_module, test_directory, options, launcher_cmd=None, extra_unittest_args=None
):
unittest_args = options.additional_unittest_args.copy()
if options.verbose:
unittest_args.append(f'-{"v"*options.verbose}') # in case of pytest
if test_module in RUN_PARALLEL_BLOCKLIST:
unittest_args = [
arg for arg in unittest_args if not arg.startswith("--run-parallel")
]
if extra_unittest_args:
assert isinstance(extra_unittest_args, list)
unittest_args.extend(extra_unittest_args)
# If using pytest, replace -f with equivalent -x
if options.pytest:
unittest_args = [arg if arg != "-f" else "-x" for arg in unittest_args]
elif IS_CI:
# use the downloaded test cases configuration, not supported in pytest
unittest_args.extend(["--import-slow-tests", "--import-disabled-tests"])
# Extra arguments are not supported with pytest
executable = get_executable_command(
options, allow_pytest=not extra_unittest_args
)
# Can't call `python -m unittest test_*` here because it doesn't run code
# in `if __name__ == '__main__': `. So call `python test_*.py` instead.
argv = [test_module + ".py"] + unittest_args
command = (launcher_cmd or []) + executable + argv
print_to_stderr("Executing {} ... [{}]".format(command, datetime.now()))
return shell(command, test_directory)
def test_cuda_primary_ctx(test_module, test_directory, options):
return run_test(
test_module, test_directory, options, extra_unittest_args=["--subprocess"]
)
run_test_with_subprocess = functools.partial(run_test, extra_unittest_args=["--subprocess"])
def get_run_test_with_subprocess_fn():
return lambda test_module, test_directory, options: run_test_with_subprocess(test_module, test_directory, options)
def _test_cpp_extensions_aot(test_directory, options, use_ninja):
if use_ninja:
try:
cpp_extension.verify_ninja_availability()
except RuntimeError:
print(CPP_EXTENSIONS_ERROR)
return 1
# Wipe the build folder, if it exists already
cpp_extensions_test_dir = os.path.join(test_directory, "cpp_extensions")
cpp_extensions_test_build_dir = os.path.join(cpp_extensions_test_dir, "build")
if os.path.exists(cpp_extensions_test_build_dir):
shutil.rmtree(cpp_extensions_test_build_dir)
# Build the test cpp extensions modules
shell_env = os.environ.copy()
shell_env["USE_NINJA"] = str(1 if use_ninja else 0)
cmd = [sys.executable, "setup.py", "install", "--root", "./install"]
return_code = shell(cmd, cwd=cpp_extensions_test_dir, env=shell_env)
if return_code != 0:
return return_code
if sys.platform != "win32":
return_code = shell(
cmd,
cwd=os.path.join(cpp_extensions_test_dir, "no_python_abi_suffix_test"),
env=shell_env,
)
if return_code != 0:
return return_code
# "install" the test modules and run tests
python_path = os.environ.get("PYTHONPATH", "")
from shutil import copyfile
os.environ['USE_NINJA'] = shell_env['USE_NINJA']
test_module = "test_cpp_extensions_aot" + ("_ninja" if use_ninja else "_no_ninja")
copyfile(
test_directory + "/test_cpp_extensions_aot.py",
test_directory + "/" + test_module + ".py",
)
try:
cpp_extensions = os.path.join(test_directory, "cpp_extensions")
install_directory = ""
# install directory is the one that is named site-packages
for root, directories, _ in os.walk(os.path.join(cpp_extensions, "install")):
for directory in directories:
if "-packages" in directory:
install_directory = os.path.join(root, directory)
assert install_directory, "install_directory must not be empty"
os.environ["PYTHONPATH"] = os.pathsep.join([install_directory, python_path])
return run_test(test_module, test_directory, options)
finally:
os.environ["PYTHONPATH"] = python_path
if os.path.exists(test_directory + "/" + test_module + ".py"):
os.remove(test_directory + "/" + test_module + ".py")
os.environ.pop('USE_NINJA')
def test_cpp_extensions_aot_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot(test_directory, options, use_ninja=True)
def test_cpp_extensions_aot_no_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot(test_directory, options, use_ninja=False)
def test_distributed(test_module, test_directory, options):
# MPI tests are broken with Python-3.9
mpi_available = subprocess.call(
"command -v mpiexec", shell=True
) == 0 and sys.version_info < (3, 9)
if options.verbose and not mpi_available:
print_to_stderr("MPI not available -- MPI backend tests will be skipped")
config = DISTRIBUTED_TESTS_CONFIG
for backend, env_vars in config.items():
if sys.platform == "win32" and backend != "gloo":
continue
if backend == "mpi" and not mpi_available:
continue
for with_init_file in {True, False}:
if sys.platform == "win32" and not with_init_file:
continue
tmp_dir = tempfile.mkdtemp()
if options.verbose:
init_str = "with {} init_method"
with_init = init_str.format("file" if with_init_file else "env")
print_to_stderr(
"Running distributed tests for the {} backend {}".format(
backend, with_init
)
)
old_environ = dict(os.environ)
os.environ["TEMP_DIR"] = tmp_dir
os.environ["BACKEND"] = backend
os.environ["INIT_METHOD"] = "env://"
os.environ.update(env_vars)
if with_init_file:
if test_module == "test_distributed_spawn":
init_method = f"{FILE_SCHEMA}{tmp_dir}/"
else:
init_method = f"{FILE_SCHEMA}{tmp_dir}/shared_init_file"
os.environ["INIT_METHOD"] = init_method
try:
os.mkdir(os.path.join(tmp_dir, "barrier"))
os.mkdir(os.path.join(tmp_dir, "test_dir"))
if backend == "mpi":
# test mpiexec for --noprefix option
with open(os.devnull, "w") as devnull:
allowrunasroot_opt = (
"--allow-run-as-root"
if subprocess.call(
'mpiexec --allow-run-as-root -n 1 bash -c ""',
shell=True,
stdout=devnull,
stderr=subprocess.STDOUT,
)
== 0
else ""
)
noprefix_opt = (
"--noprefix"
if subprocess.call(
f'mpiexec {allowrunasroot_opt} -n 1 --noprefix bash -c ""',
shell=True,
stdout=devnull,
stderr=subprocess.STDOUT,
)
== 0
else ""
)
mpiexec = ["mpiexec", "-n", "3", noprefix_opt, allowrunasroot_opt]
return_code = run_test(
test_module, test_directory, options, launcher_cmd=mpiexec
)
else:
return_code = run_test(test_module, test_directory, options, extra_unittest_args=["--subprocess"])
if return_code != 0:
return return_code
finally:
shutil.rmtree(tmp_dir)
os.environ.clear()
os.environ.update(old_environ)
return 0
CUSTOM_HANDLERS = {
"test_cuda_primary_ctx": test_cuda_primary_ctx,
"test_cuda_trace": get_run_test_with_subprocess_fn(),
"test_cpp_extensions_aot_no_ninja": test_cpp_extensions_aot_no_ninja,
"test_cpp_extensions_aot_ninja": test_cpp_extensions_aot_ninja,
"distributed/test_distributed_spawn": test_distributed,
"distributed/algorithms/quantization/test_quantization": test_distributed,
"distributed/test_c10d_nccl": get_run_test_with_subprocess_fn(),
"distributed/test_c10d_gloo": get_run_test_with_subprocess_fn(),
"distributed/test_c10d_common": get_run_test_with_subprocess_fn(),
"distributed/test_c10d_spawn_gloo": get_run_test_with_subprocess_fn(),
"distributed/test_c10d_spawn_nccl": get_run_test_with_subprocess_fn(),
"distributed/test_store": get_run_test_with_subprocess_fn(),
"distributed/test_pg_wrapper": get_run_test_with_subprocess_fn(),
"distributed/rpc/test_faulty_agent": get_run_test_with_subprocess_fn(),
"distributed/rpc/test_tensorpipe_agent": get_run_test_with_subprocess_fn(),
"distributed/rpc/test_share_memory": get_run_test_with_subprocess_fn(),
"distributed/rpc/cuda/test_tensorpipe_agent": get_run_test_with_subprocess_fn(),
}
def parse_test_module(test):
return test.split(".")[0]
class TestChoices(list):
def __init__(self, *args, **kwargs):
super(TestChoices, self).__init__(args[0])
def __contains__(self, item):
return list.__contains__(self, parse_test_module(item))
def parse_args():
parser = argparse.ArgumentParser(
description="Run the PyTorch unit test suite",
epilog="where TESTS is any of: {}".format(", ".join(TESTS)),
formatter_class=argparse.RawTextHelpFormatter,
parents=[common_parser]
)
parser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
help="print verbose information and test-by-test results",
)
parser.add_argument("--jit", "--jit", action="store_true", help="run all jit tests")
parser.add_argument(
"--distributed-tests",
"--distributed-tests",
action="store_true",
help="run all distributed tests",
)
parser.add_argument(
"--functorch",
"--functorch",
action="store_true",
help=(
"If this flag is present, we will only run functorch tests. "
"If this flag is not present, we will not run any functorch tests. "
"This requires functorch to already be installed."
)
)
parser.add_argument(
"-core",
"--core",
action="store_true",
help="Only run core tests, or tests that validate PyTorch's ops, modules,"
"and autograd. They are defined by CORE_TEST_LIST."
)
parser.add_argument(
"-pt",
"--pytest",
action="store_true",
help="If true, use `pytest` to execute the tests. E.g., this runs "
"TestTorch with pytest in verbose and coverage mode: "
"python run_test.py -vci torch -pt",
)
parser.add_argument(
"-c",
"--coverage",
action="store_true",
help="enable coverage",
default=PYTORCH_COLLECT_COVERAGE,
)
parser.add_argument(
"-i",
"--include",
nargs="+",
choices=TestChoices(TESTS),
default=TESTS,
metavar="TESTS",
help="select a set of tests to include (defaults to ALL tests)."
" tests must be a part of the TESTS list defined in run_test.py",
)
parser.add_argument(
"-x",
"--exclude",
nargs="+",
choices=TESTS,
metavar="TESTS",
default=[],
help="select a set of tests to exclude",
)
parser.add_argument(
"-f",
"--first",
choices=TESTS,
metavar="TESTS",
help="select the test to start from (excludes previous tests)",
)
parser.add_argument(
"-l",
"--last",
choices=TESTS,
metavar="TESTS",
help="select the last test to run (excludes following tests)",
)
parser.add_argument(
"--bring-to-front",
nargs="+",
choices=TestChoices(TESTS),
default=[],
metavar="TESTS",
help="select a set of tests to run first. This can be used in situations"
" where you want to run all tests, but care more about some set, "
"e.g. after making a change to a specific component",
)
parser.add_argument(
"--ignore-win-blocklist",
action="store_true",
help="always run blocklisted windows tests",
)
# NS: Disable target determination until it can be made more reliable
# parser.add_argument(
# "--determine-from",
# help="File of affected source filenames to determine which tests to run.",
# )
parser.add_argument(
"--continue-through-error",
action="store_true",
help="Runs the full test suite despite one of the tests failing",
default=strtobool(os.environ.get("CONTINUE_THROUGH_ERROR", "False")),
)
parser.add_argument(
"additional_unittest_args",
nargs="*",
help="additional arguments passed through to unittest, e.g., "
"python run_test.py -i sparse -- TestSparse.test_factory_size_check",
)
parser.add_argument(
"--shard",
nargs=2,
type=int,
help="runs a shard of the tests (taking into account other selections), e.g., "
"--shard 2 3 will break up the selected tests into 3 shards and run the tests "
"in the 2nd shard (the first number should not exceed the second)",
)
parser.add_argument(
"--exclude-jit-executor",
action="store_true",
help="exclude tests that are run for a specific jit config",
)
parser.add_argument(
"--exclude-distributed-tests",
action="store_true",
help="exclude distributed tests",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Only list the test that will run.",
)
return parser.parse_args()
def find_test_index(test, selected_tests, find_last_index=False):
"""Find the index of the first or last occurrence of a given test/test module in the list of selected tests.
This function is used to determine the indices when slicing the list of selected tests when
``options.first``(:attr:`find_last_index`=False) and/or ``options.last``(:attr:`find_last_index`=True) are used.
:attr:`selected_tests` can be a list that contains multiple consequent occurrences of tests
as part of the same test module, e.g.:
```
selected_tests = ['autograd', 'cuda', **'torch.TestTorch.test_acos',
'torch.TestTorch.test_tan', 'torch.TestTorch.test_add'**, 'utils']
```
If :attr:`test`='torch' and :attr:`find_last_index`=False, result should be **2**.
If :attr:`test`='torch' and :attr:`find_last_index`=True, result should be **4**.
Args:
test (str): Name of test to lookup
selected_tests (list): List of tests
find_last_index (bool, optional): should we lookup the index of first or last
occurrence (first is default)
Returns:
index of the first or last occurrence of the given test
"""
idx = 0
found_idx = -1
for t in selected_tests:
if t.startswith(test):
found_idx = idx
if not find_last_index:
break
idx += 1
return found_idx
def exclude_tests(exclude_list, selected_tests, exclude_message=None):
for exclude_test in exclude_list:
tests_copy = selected_tests[:]
for test in tests_copy:
if test.startswith(exclude_test):
if exclude_message is not None:
print_to_stderr("Excluding {} {}".format(test, exclude_message))
selected_tests.remove(test)
return selected_tests
def get_selected_tests(options):
selected_tests = options.include
# filter if there's JIT only and distributed only test options
if options.jit:
selected_tests = list(
filter(lambda test_name: "jit" in test_name, selected_tests)
)
if options.distributed_tests:
selected_tests = list(
filter(lambda test_name: test_name in DISTRIBUTED_TESTS, selected_tests)
)
# Filter to only run core tests when --core option is specified
if options.core:
selected_tests = list(
filter(lambda test_name: test_name in CORE_TEST_LIST, selected_tests)
)
if options.functorch:
selected_tests = FUNCTORCH_TESTS
# process reordering
if options.bring_to_front:
to_front = set(options.bring_to_front)
selected_tests = options.bring_to_front + list(
filter(lambda name: name not in to_front, selected_tests)
)
if options.first:
first_index = find_test_index(options.first, selected_tests)
selected_tests = selected_tests[first_index:]
if options.last:
last_index = find_test_index(options.last, selected_tests, find_last_index=True)
selected_tests = selected_tests[: last_index + 1]
# process exclusion
if options.exclude_jit_executor:
options.exclude.extend(JIT_EXECUTOR_TESTS)
if options.exclude_distributed_tests:
options.exclude.extend(DISTRIBUTED_TESTS)
# these tests failing in CUDA 11.6 temporary disabling. issue https://github.com/pytorch/pytorch/issues/75375
if torch.version.cuda is not None and LooseVersion(torch.version.cuda) >= "11.6":
options.exclude.extend(["distributions/test_constraints"])
selected_tests = exclude_tests(options.exclude, selected_tests)
if sys.platform == "win32" and not options.ignore_win_blocklist:
target_arch = os.environ.get("VSCMD_ARG_TGT_ARCH")
if target_arch != "x64":
WINDOWS_BLOCKLIST.append("cpp_extensions_aot_no_ninja")
WINDOWS_BLOCKLIST.append("cpp_extensions_aot_ninja")
WINDOWS_BLOCKLIST.append("cpp_extensions_jit")
WINDOWS_BLOCKLIST.append("jit")
WINDOWS_BLOCKLIST.append("jit_fuser")
# This is exception that's caused by this issue https://github.com/pytorch/pytorch/issues/69460
# This below code should be removed once this issue is solved
if torch.version.cuda is not None and LooseVersion(torch.version.cuda) >= "11.5":
WINDOWS_BLOCKLIST.append("test_cpp_extensions_aot")
WINDOWS_BLOCKLIST.append("test_cpp_extensions_aot_ninja")
WINDOWS_BLOCKLIST.append("test_cpp_extensions_aot_no_ninja")
selected_tests = exclude_tests(WINDOWS_BLOCKLIST, selected_tests, "on Windows")
elif TEST_WITH_ROCM:
selected_tests = exclude_tests(ROCM_BLOCKLIST, selected_tests, "on ROCm")
# sharding
if options.shard:
assert len(options.shard) == 2, "Unexpected shard format"
assert min(options.shard) > 0, "Shards must be positive numbers"
which_shard, num_shards = options.shard
assert (
which_shard <= num_shards
), "Selected shard must be less than or equal to total number of shards"
assert num_shards <= len(
selected_tests
), f"Number of shards must be less than {len(selected_tests)}"
if num_shards == 1:
return selected_tests
# Download previous test times to make sharding decisions
path = os.path.join(str(REPO_ROOT), TEST_TIMES_FILE)
if os.path.exists(path):
with open(path, "r") as f:
test_file_times = cast(Dict[str, Any], json.load(f))
else:
test_file_times = {}
test_config = os.environ.get("TEST_CONFIG")
if test_config not in test_file_times:
print(
"::warning:: Gathered no stats from artifacts. Proceeding with default sharding plan."
)
selected_tests = selected_tests[which_shard - 1 :: num_shards]
else:
print("Found test time stats from artifacts")
test_file_times_config = test_file_times[test_config]
shards = calculate_shards(num_shards, selected_tests, test_file_times_config)
_, tests_from_shard = shards[which_shard - 1]
selected_tests = tests_from_shard
# skip all distributed tests if distributed package is not available.
if not dist.is_available():
selected_tests = exclude_tests(DISTRIBUTED_TESTS, selected_tests,
"PyTorch is built without distributed support.")
# skip tests that require LAPACK when it's not available
if not torch._C.has_lapack:
selected_tests = exclude_tests(TESTS_REQUIRING_LAPACK, selected_tests,
"PyTorch is built without LAPACK support.")
return selected_tests
def run_test_module(test: str, test_directory: str, options) -> Optional[str]:
test_module = parse_test_module(test)
# Printing the date here can help diagnose which tests are slow
print_to_stderr("Running {} ... [{}]".format(test, datetime.now()))
handler = CUSTOM_HANDLERS.get(test_module, run_test)
return_code = handler(test_module, test_directory, options)
assert isinstance(return_code, int) and not isinstance(
return_code, bool
), "Return code should be an integer"
if return_code == 0:
return None
message = f"{test} failed!"
if return_code < 0:
# subprocess.Popen returns the child process' exit signal as
# return code -N, where N is the signal number.
signal_name = SIGNALS_TO_NAMES_DICT[-return_code]
message += f" Received signal: {signal_name}"
return message
def main():
options = parse_args()
test_directory = str(REPO_ROOT / "test")
selected_tests = get_selected_tests(options)
if options.verbose:
print_to_stderr("Selected tests:\n {}".format("\n ".join(selected_tests)))
if options.dry_run:
return
if options.coverage and not PYTORCH_COLLECT_COVERAGE:
shell(["coverage", "erase"])
if IS_CI:
selected_tests = get_reordered_tests(selected_tests)
# downloading test cases configuration to local environment
get_test_case_configs(dirpath=test_directory)
has_failed = False
failure_messages = []
try:
for test in selected_tests:
options_clone = copy.deepcopy(options)
if test in USE_PYTEST_LIST:
options_clone.pytest = True
err_message = run_test_module(test, test_directory, options_clone)
if err_message is None:
continue
has_failed = True
failure_messages.append(err_message)
if not options_clone.continue_through_error:
raise RuntimeError(err_message)
print_to_stderr(err_message)
finally:
if options.coverage:
from coverage import Coverage
with set_cwd(test_directory):
cov = Coverage()
if PYTORCH_COLLECT_COVERAGE:
cov.load()
cov.combine(strict=False)
cov.save()
if not PYTORCH_COLLECT_COVERAGE:
cov.html_report()
if options.continue_through_error and has_failed:
for err in failure_messages:
print_to_stderr(err)
sys.exit(1)
if __name__ == "__main__":
main()
| pytorch-master | test/run_test.py |
# Owner(s): ["module: cuda"]
import sys
import unittest
import unittest.mock
import torch
import torch.utils._cuda_trace as cuda_trace
from torch.testing._internal.common_utils import TestCase, run_tests
# NOTE: Each test needs to be run in a brand new process, to reset the registered hooks
# and make sure the CUDA streams are initialized for each test that uses them.
# We cannot import TEST_CUDA from torch.testing._internal.common_cuda here,
# because if we do that, the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed
# multiple times as well during the execution of this test suite, and it will
# cause CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
if not TEST_CUDA:
print("CUDA not available, skipping tests", file=sys.stderr)
TestCase = object # noqa: F811
class TestCudaTrace(TestCase):
def setUp(self):
torch._C._activate_cuda_trace()
self.mock = unittest.mock.MagicMock()
def test_event_creation_callback(self):
cuda_trace.register_callback_for_cuda_event_creation(self.mock)
event = torch.cuda.Event()
event.record()
self.mock.assert_called_once_with(event._as_parameter_.value)
def test_event_deletion_callback(self):
cuda_trace.register_callback_for_cuda_event_deletion(self.mock)
event = torch.cuda.Event()
event.record()
event_id = event._as_parameter_.value
del event
self.mock.assert_called_once_with(event_id)
def test_event_record_callback(self):
cuda_trace.register_callback_for_cuda_event_record(self.mock)
event = torch.cuda.Event()
event.record()
self.mock.assert_called_once_with(
event._as_parameter_.value, torch.cuda.default_stream().cuda_stream
)
def test_event_wait_callback(self):
cuda_trace.register_callback_for_cuda_event_wait(self.mock)
event = torch.cuda.Event()
event.record()
event.wait()
self.mock.assert_called_once_with(
event._as_parameter_.value, torch.cuda.default_stream().cuda_stream
)
def test_memory_allocation_callback(self):
cuda_trace.register_callback_for_cuda_memory_allocation(self.mock)
tensor = torch.empty(10, 4, device="cuda")
self.mock.assert_called_once_with(tensor.data_ptr())
def test_memory_deallocation_callback(self):
cuda_trace.register_callback_for_cuda_memory_deallocation(self.mock)
tensor = torch.empty(3, 8, device="cuda")
data_ptr = tensor.data_ptr()
del tensor
self.mock.assert_called_once_with(data_ptr)
def test_stream_creation_callback(self):
cuda_trace.register_callback_for_cuda_stream_creation(self.mock)
torch.cuda.Stream()
self.mock.assert_called()
def test_all_trace_callbacks_called(self):
other = unittest.mock.MagicMock()
cuda_trace.register_callback_for_cuda_memory_allocation(self.mock)
cuda_trace.register_callback_for_cuda_memory_allocation(other)
tensor = torch.empty(10, 4, device="cuda")
self.mock.assert_called_once_with(tensor.data_ptr())
other.assert_called_once_with(tensor.data_ptr())
if __name__ == "__main__":
run_tests()
| pytorch-master | test/test_cuda_trace.py |
# Owner(s): ["module: cpp-extensions"]
import os
import unittest
import torch.testing._internal.common_utils as common
from torch.testing._internal.common_utils import IS_WINDOWS
from torch.testing._internal.common_cuda import TEST_CUDA
import torch
import torch.backends.cudnn
import torch.utils.cpp_extension
try:
import pytest
HAS_PYTEST = True
except ImportError as e:
HAS_PYTEST = False
# TODO: Rewrite these tests so that they can be collected via pytest without
# using run_test.py
try:
if HAS_PYTEST:
cpp_extension = pytest.importorskip("torch_test_cpp_extension.cpp")
ort_extension = pytest.importorskip("torch_test_cpp_extension.ort")
rng_extension = pytest.importorskip("torch_test_cpp_extension.rng")
else:
import torch_test_cpp_extension.cpp as cpp_extension
import torch_test_cpp_extension.ort as ort_extension
import torch_test_cpp_extension.rng as rng_extension
except ImportError as e:
raise RuntimeError(
"test_cpp_extensions_aot.py cannot be invoked directly. Run "
"`python run_test.py -i test_cpp_extensions_aot_ninja` instead."
) from e
class TestCppExtensionAOT(common.TestCase):
"""Tests ahead-of-time cpp extensions
NOTE: run_test.py's test_cpp_extensions_aot_ninja target
also runs this test case, but with ninja enabled. If you are debugging
a test failure here from the CI, check the logs for which target
(test_cpp_extensions_aot_no_ninja vs test_cpp_extensions_aot_ninja)
failed.
"""
def test_extension_function(self):
x = torch.randn(4, 4)
y = torch.randn(4, 4)
z = cpp_extension.sigmoid_add(x, y)
self.assertEqual(z, x.sigmoid() + y.sigmoid())
def test_extension_module(self):
mm = cpp_extension.MatrixMultiplier(4, 8)
weights = torch.rand(8, 4, dtype=torch.double)
expected = mm.get().mm(weights)
result = mm.forward(weights)
self.assertEqual(expected, result)
def test_backward(self):
mm = cpp_extension.MatrixMultiplier(4, 8)
weights = torch.rand(8, 4, dtype=torch.double, requires_grad=True)
result = mm.forward(weights)
result.sum().backward()
tensor = mm.get()
expected_weights_grad = tensor.t().mm(torch.ones([4, 4], dtype=torch.double))
self.assertEqual(weights.grad, expected_weights_grad)
expected_tensor_grad = torch.ones([4, 4], dtype=torch.double).mm(weights.t())
self.assertEqual(tensor.grad, expected_tensor_grad)
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
def test_cuda_extension(self):
import torch_test_cpp_extension.cuda as cuda_extension
x = torch.zeros(100, device="cuda", dtype=torch.float32)
y = torch.zeros(100, device="cuda", dtype=torch.float32)
z = cuda_extension.sigmoid_add(x, y).cpu()
# 2 * sigmoid(0) = 2 * 0.5 = 1
self.assertEqual(z, torch.ones_like(z))
@common.skipIfRocm
@unittest.skipIf(common.IS_WINDOWS, "Windows not supported")
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
def test_cublas_extension(self):
from torch_test_cpp_extension import cublas_extension
x = torch.zeros(100, device="cuda", dtype=torch.float32)
z = cublas_extension.noop_cublas_function(x)
self.assertEqual(z, x)
@common.skipIfRocm
@unittest.skipIf(common.IS_WINDOWS, "Windows not supported")
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
def test_cusolver_extension(self):
from torch_test_cpp_extension import cusolver_extension
x = torch.zeros(100, device="cuda", dtype=torch.float32)
z = cusolver_extension.noop_cusolver_function(x)
self.assertEqual(z, x)
@unittest.skipIf(IS_WINDOWS, "Not available on Windows")
def test_no_python_abi_suffix_sets_the_correct_library_name(self):
# For this test, run_test.py will call `python setup.py install` in the
# cpp_extensions/no_python_abi_suffix_test folder, where the
# `BuildExtension` class has a `no_python_abi_suffix` option set to
# `True`. This *should* mean that on Python 3, the produced shared
# library does not have an ABI suffix like
# "cpython-37m-x86_64-linux-gnu" before the library suffix, e.g. "so".
root = os.path.join("cpp_extensions", "no_python_abi_suffix_test", "build")
matches = [f for _, _, fs in os.walk(root) for f in fs if f.endswith("so")]
self.assertEqual(len(matches), 1, msg=str(matches))
self.assertEqual(matches[0], "no_python_abi_suffix_test.so", msg=str(matches))
def test_optional(self):
has_value = cpp_extension.function_taking_optional(torch.ones(5))
self.assertTrue(has_value)
has_value = cpp_extension.function_taking_optional(None)
self.assertFalse(has_value)
@common.skipIfRocm
@unittest.skipIf(common.IS_WINDOWS, "Windows not supported")
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
@unittest.skipIf(os.getenv('USE_NINJA', '0') == '0', "cuda extension with dlink requires ninja to build")
def test_cuda_dlink_libs(self):
from torch_test_cpp_extension import cuda_dlink
a = torch.randn(8, dtype=torch.float, device='cuda')
b = torch.randn(8, dtype=torch.float, device='cuda')
ref = a + b
test = cuda_dlink.add(a, b)
self.assertEqual(test, ref)
class TestORTTensor(common.TestCase):
def test_unregistered(self):
a = torch.arange(0, 10, device='cpu')
with self.assertRaisesRegex(RuntimeError, "Could not run"):
b = torch.arange(0, 10, device='ort')
def test_zeros(self):
a = torch.empty(5, 5, device='cpu')
self.assertEqual(a.device, torch.device('cpu'))
b = torch.empty(5, 5, device='ort')
self.assertEqual(b.device, torch.device('ort', 0))
self.assertEqual(ort_extension.get_test_int(), 0)
self.assertEqual(torch.get_default_dtype(), b.dtype)
c = torch.empty((5, 5), dtype=torch.int64, device='ort')
self.assertEqual(ort_extension.get_test_int(), 0)
self.assertEqual(torch.int64, c.dtype)
def test_add(self):
a = torch.empty(5, 5, device='ort', requires_grad=True)
self.assertEqual(ort_extension.get_test_int(), 0)
b = torch.empty(5, 5, device='ort')
self.assertEqual(ort_extension.get_test_int(), 0)
c = a + b
self.assertEqual(ort_extension.get_test_int(), 1)
def test_conv_backend_override(self):
# To simplify tests, we use 4d input here to avoid doing view4d( which
# needs more overrides) in _convolution.
input = torch.empty(2, 4, 10, 2, device='ort', requires_grad=True)
weight = torch.empty(6, 4, 2, 2, device='ort', requires_grad=True)
bias = torch.empty(6, device='ort')
# Make sure forward is overriden
out = torch.nn.functional.conv2d(input, weight, bias, 2, 0, 1, 1)
self.assertEqual(ort_extension.get_test_int(), 2)
self.assertEqual(out.shape[0], input.shape[0])
self.assertEqual(out.shape[1], weight.shape[0])
# Make sure backward is overriden
# Double backward is dispatched to _convolution_double_backward.
# It is not tested here as it involves more computation/overrides.
grad = torch.autograd.grad(out, input, out, create_graph=True)
self.assertEqual(ort_extension.get_test_int(), 3)
self.assertEqual(grad[0].shape, input.shape)
class TestRNGExtension(common.TestCase):
def setUp(self):
super(TestRNGExtension, self).setUp()
def test_rng(self):
fourty_two = torch.full((10,), 42, dtype=torch.int64)
t = torch.empty(10, dtype=torch.int64).random_()
self.assertNotEqual(t, fourty_two)
gen = torch.Generator(device='cpu')
t = torch.empty(10, dtype=torch.int64).random_(generator=gen)
self.assertNotEqual(t, fourty_two)
self.assertEqual(rng_extension.getInstanceCount(), 0)
gen = rng_extension.createTestCPUGenerator(42)
self.assertEqual(rng_extension.getInstanceCount(), 1)
copy = gen
self.assertEqual(rng_extension.getInstanceCount(), 1)
self.assertEqual(gen, copy)
copy2 = rng_extension.identity(copy)
self.assertEqual(rng_extension.getInstanceCount(), 1)
self.assertEqual(gen, copy2)
t = torch.empty(10, dtype=torch.int64).random_(generator=gen)
self.assertEqual(rng_extension.getInstanceCount(), 1)
self.assertEqual(t, fourty_two)
del gen
self.assertEqual(rng_extension.getInstanceCount(), 1)
del copy
self.assertEqual(rng_extension.getInstanceCount(), 1)
del copy2
self.assertEqual(rng_extension.getInstanceCount(), 0)
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
class TestTorchLibrary(common.TestCase):
def test_torch_library(self):
import torch_test_cpp_extension.torch_library # noqa: F401
def f(a: bool, b: bool):
return torch.ops.torch_library.logical_and(a, b)
self.assertTrue(f(True, True))
self.assertFalse(f(True, False))
self.assertFalse(f(False, True))
self.assertFalse(f(False, False))
s = torch.jit.script(f)
self.assertTrue(s(True, True))
self.assertFalse(s(True, False))
self.assertFalse(s(False, True))
self.assertFalse(s(False, False))
self.assertIn('torch_library::logical_and', str(s.graph))
if __name__ == "__main__":
common.run_tests()
| pytorch-master | test/test_cpp_extensions_aot.py |
# Owner(s): ["oncall: jit"]
from test_jit import JitTestCase
from torch.testing._internal.common_utils import run_tests
from typing import List, Tuple
class TestScript(JitTestCase):
def test_str_ops(self):
def test_str_is(s: str) -> Tuple[bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool]:
return s.isupper(), s.islower(), s.isdigit(), s.isspace(), \
s.isalnum(), s.isalpha(), s.isdecimal(), s.isnumeric(), \
s.isidentifier(), s.istitle(), s.isprintable()
def test_str_to(s: str) -> Tuple[str, str, str, str, str]:
return s.upper(), s.lower(), s.capitalize(), s.title(), s.swapcase()
def test_str_strip(s: str) -> Tuple[str, str, str]:
return (
s.lstrip(),
s.rstrip(),
s.strip(),
)
def test_str_strip_char_set(s: str, char_set: str) -> Tuple[str, str, str]:
return (
s.lstrip(char_set),
s.rstrip(char_set),
s.strip(char_set),
)
inputs = ["", "12a", "!B", "12", "a", "B", "aB", "$12", "B12", "AB ",
" \t", " \n", "\na", "abc", "123.3", "s a", "b12a ",
"more strings with spaces", "Titular Strings", "\x0acan'tprintthis",
"spaces at the end ", " begin"]
def test_str_center(i: int, s: str) -> str:
return s.center(i)
def test_str_center_fc(i: int, s: str) -> str:
return s.center(i, '*')
def test_str_center_error(s: str) -> str:
return s.center(10, '**')
def test_ljust(s: str, i: int) -> str:
return s.ljust(i)
def test_ljust_fc(s: str, i: int, fc: str) -> str:
return s.ljust(i, fc)
def test_ljust_fc_err(s: str) -> str:
return s.ljust(10, '**')
def test_rjust(s: str, i: int) -> str:
return s.rjust(i)
def test_rjust_fc(s: str, i: int, fc: str) -> str:
return s.rjust(i, fc)
def test_rjust_fc_err(s: str) -> str:
return s.rjust(10, '**')
def test_zfill(s: str, i: int) -> str:
return s.zfill(i)
for input in inputs:
self.checkScript(test_str_is, (input,))
self.checkScript(test_str_to, (input,))
self.checkScript(test_str_strip, (input,))
for char_set in ["abc", "123", " ", "\t"]:
self.checkScript(test_str_strip_char_set, (input, char_set))
for i in range(7):
self.checkScript(test_str_center, (i, input,))
self.checkScript(test_str_center_fc, (i, input,))
self.checkScript(test_ljust, (input, i))
self.checkScript(test_ljust_fc, (input, i, '*'))
self.checkScript(test_rjust, (input, i))
self.checkScript(test_rjust_fc, (input, i, '*'))
self.checkScript(test_zfill, (input, i))
with self.assertRaises(Exception):
test_str_center_error("error")
test_ljust("error")
def test_count() -> Tuple[int, int, int, int, int, int, int, int, int, int, int, int]:
return (
"hello".count("h"),
"hello".count("h", 0, 1),
"hello".count("h", -3),
"hello".count("h", -10, 1),
"hello".count("h", 0, -10),
"hello".count("h", 0, 10),
"hello".count("ell"),
"hello".count("ell", 0, 1),
"hello".count("ell", -3),
"hello".count("ell", -10, 1),
"hello".count("ell", 0, -10),
"hello".count("ell", 0, 10)
)
self.checkScript(test_count, ())
def test_endswith() -> Tuple[bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool]:
return (
"hello".endswith("lo"),
"hello".endswith("lo", 0),
"hello".endswith("lo", -2),
"hello".endswith("lo", -8),
"hello".endswith("lo", 0, -5),
"hello".endswith("lo", -2, 3),
"hello".endswith("lo", -8, 4),
"hello".endswith("l"),
"hello".endswith("l", 0),
"hello".endswith("l", -2),
"hello".endswith("l", -8),
"hello".endswith("l", 0, -5),
"hello".endswith("l", -2, 3),
"hello".endswith("l", -8, 4)
)
self.checkScript(test_endswith, ())
def test_startswith() -> Tuple[bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool]:
return (
"hello".startswith("lo"),
"hello".startswith("lo", 0),
"hello".startswith("lo", -2),
"hello".startswith("lo", -8),
"hello".startswith("lo", 0, -5),
"hello".startswith("lo", -2, 3),
"hello".startswith("lo", -8, 4),
"hello".startswith("l"),
"hello".startswith("l", 0),
"hello".startswith("l", -2),
"hello".startswith("l", -8),
"hello".startswith("l", 0, -5),
"hello".startswith("l", -2, 3),
"hello".startswith("l", -8, 4)
)
self.checkScript(test_startswith, ())
def test_expandtabs() -> Tuple[str, str, str, str, str, str]:
return (
'xyz\t82345\tabc'.expandtabs(),
'xyz\t32345\tabc'.expandtabs(3),
'xyz\t52345\tabc'.expandtabs(5),
'xyz\t62345\tabc'.expandtabs(6),
'xyz\t72345\tabc'.expandtabs(7),
'xyz\t62345\tabc'.expandtabs(-5),
)
self.checkScript(test_expandtabs, ())
def test_rfind() -> Tuple[int, int, int, int, int, int, int, int, int]:
return (
"hello123abc".rfind("llo"),
"hello123abc".rfind("12"),
"hello123abc".rfind("ab"),
"hello123abc".rfind("ll", -1),
"hello123abc".rfind("12", 4),
"hello123abc".rfind("ab", -7),
"hello123abc".rfind("ll", -1, 8),
"hello123abc".rfind("12", 4, -4),
"hello123abc".rfind("ab", -7, -20),
)
self.checkScript(test_rfind, ())
def test_find() -> Tuple[int, int, int, int, int, int, int, int, int]:
return (
"hello123abc".find("llo"),
"hello123abc".find("12"),
"hello123abc".find("ab"),
"hello123abc".find("ll", -1),
"hello123abc".find("12", 4),
"hello123abc".find("ab", -7),
"hello123abc".find("ll", -1, 8),
"hello123abc".find("12", 4, -4),
"hello123abc".find("ab", -7, -20),
)
self.checkScript(test_find, ())
def test_index() -> Tuple[int, int, int, int, int, int]:
return (
"hello123abc".index("llo"),
"hello123abc".index("12"),
"hello123abc".index("ab"),
"hello123abc".index("12", 4),
"hello123abc".index("ab", -7),
"hello123abc".index("12", 4, -4),
)
self.checkScript(test_index, ())
def test_rindex() -> Tuple[int, int, int, int, int, int]:
return (
"hello123abc".rindex("llo"),
"hello123abc".rindex("12"),
"hello123abc".rindex("ab"),
"hello123abc".rindex("12", 4),
"hello123abc".rindex("ab", -7),
"hello123abc".rindex("12", 4, -4),
)
self.checkScript(test_rindex, ())
def test_replace() -> Tuple[str, str, str, str, str, str, str]:
return (
"hello123abc".replace("llo", "sdf"),
"ff".replace("f", "ff"),
"abc123".replace("a", "testing"),
"aaaaaa".replace("a", "testing", 3),
"bbb".replace("a", "testing", 3),
"ccc".replace("c", "ccc", 3),
"cc".replace("c", "ccc", -3),
)
self.checkScript(test_replace, ())
def test_partition() -> Tuple[Tuple[str, str, str], Tuple[str, str, str], Tuple[str, str, str],
Tuple[str, str, str], Tuple[str, str, str], Tuple[str, str, str],
Tuple[str, str, str]]:
return (
"hello123abc".partition("llo"),
"ff".partition("f"),
"abc123".partition("a"),
"aaaaaa".partition("testing"),
"bbb".partition("a"),
"ccc".partition("ccc"),
"cc".partition("ccc"),
)
self.checkScript(test_partition, ())
def test_rpartition() -> Tuple[Tuple[str, str, str], Tuple[str, str, str], Tuple[str, str, str],
Tuple[str, str, str], Tuple[str, str, str], Tuple[str, str, str],
Tuple[str, str, str]]:
return (
"hello123abc".rpartition("llo"),
"ff".rpartition("f"),
"abc123".rpartition("a"),
"aaaaaa".rpartition("testing"),
"bbb".rpartition("a"),
"ccc".rpartition("ccc"),
"cc".rpartition("ccc"),
)
self.checkScript(test_rpartition, ())
def test_split() -> Tuple[List[str], List[str], List[str], List[str], List[str],
List[str], List[str], List[str], List[str], List[str], List[str]]:
return (
"a a a a a".split(),
"a a a a a".split(),
" a a\ta \v a \v\f\n a \t ".split(),
" a a a a a ".split(" "),
"a a a a a ".split(" ", 10),
"a a a a a ".split(" ", -1),
"a a a a a ".split(" ", 3),
" a a a a a ".split("*"),
" a*a a*a a".split("*"),
" a*a a*a a ".split("*", -1),
" a*a a*a a ".split("a*", 10),
)
self.checkScript(test_split, ())
# test raising error for empty separator
def test_split_empty_separator():
s = "test"
return s.split("")
self.checkScriptRaisesRegex(test_split_empty_separator, (), Exception,
"empty separator")
def test_rsplit() -> Tuple[List[str], List[str], List[str], List[str], List[str],
List[str], List[str], List[str], List[str]]:
return (
"a a a a a".rsplit(),
" a a a a a ".rsplit(" "),
"a a a a a ".rsplit(" ", 10),
"a a a a a ".rsplit(" ", -1),
"a a a a a ".rsplit(" ", 3),
" a a a a a ".rsplit("*"),
" a*a a*a a ".rsplit("*"),
" a*a a*a a ".rsplit("*", -1),
" a*a a*a a".rsplit("a*", 10),
)
self.checkScript(test_rsplit, ())
def test_splitlines() -> Tuple[List[str], List[str], List[str], List[str],
List[str], List[str]]:
return (
"hello\ntest".splitlines(),
"hello\n\ntest\n".splitlines(),
"hello\ntest\n\n".splitlines(),
"hello\vtest".splitlines(),
"hello\v\f\ntest".splitlines(),
"hello\ftest".splitlines(),
)
self.checkScript(test_splitlines, ())
def test_str_cmp(a: str, b: str) -> Tuple[bool, bool, bool, bool, bool, bool]:
return a != b, a == b, a < b, a > b, a <= b, a >= b
for i in range(len(inputs) - 1):
self.checkScript(test_str_cmp, (inputs[i], inputs[i + 1]))
def test_str_join():
return (
",".join(["a"]),
",".join(["a", "b", "c"]),
",".join(["aa", "bb", "cc"]),
",".join(["a,a", "bb", "c,c"]),
"**a**".join(["b", "c", "d", "e"]),
"".join(["a", "b", "c"]),
)
self.checkScript(test_str_join, ())
def test_bool_conversion(a: str):
if a:
return a
else:
return "default"
self.checkScript(test_bool_conversion, ("nonempty",))
self.checkScript(test_bool_conversion, ("",))
def test_string_slice(self):
def test_slice(a: str) -> Tuple[str, str, str, str, str]:
return (
a[0:1:2],
a[0:6:1],
a[4:1:2],
a[0:3:2],
a[-1:1:3],
)
self.checkScript(test_slice, ("hellotest",))
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_jit_string.py |
# Owner(s): ["oncall: mobile"]
import unittest
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
@unittest.skipUnless(torch.is_vulkan_available(),
"Vulkan backend must be available for these tests.")
class TestVulkanRewritePass(TestCase):
@staticmethod
def validate_transformed_module(
# To please flake
self,
pattern_count_map,
data_shape,
prepack_removal=False,
fuse_clamping_ops=False):
module_instance = self
scripted_model = torch.jit.script(module_instance)
scripted_model.eval()
input_data = torch.normal(1, 20, size=data_shape)
ref_result = scripted_model(input_data)
torch._C._jit_pass_vulkan_insert_prepacked_ops(scripted_model._c)
if fuse_clamping_ops or prepack_removal:
scripted_model._c = torch._C._freeze_module(scripted_model._c)
if fuse_clamping_ops:
torch._C._jit_pass_vulkan_fuse_clamp_w_prepacked_conv(scripted_model._c)
if prepack_removal:
torch._C._jit_pass_vulkan_fold_prepacking_ops(scripted_model._c)
buffer = io.BytesIO()
torch.jit.save(scripted_model, buffer)
buffer.seek(0)
deserialized_scripted_model = torch.jit.load(buffer)
for pattern, v in pattern_count_map.items():
if (v == 0):
FileCheck().check(pattern).run(deserialized_scripted_model.graph)
elif (v == -1):
FileCheck().check_not(pattern).run(deserialized_scripted_model.graph)
else:
FileCheck().check_count(pattern, v, exactly=True).run(deserialized_scripted_model.graph)
def test_conv(self):
# Conv params
batch_size = 2
input_channels_per_group = 6
height = 16
width = 16
output_channels_per_group = 6
groups = 4
kernel_h = kernel_w = 3
stride_h = stride_w = 1
pad_h = pad_w = 1
dilation = 1
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
paddings = (pad_h, pad_w)
dilations = (dilation, dilation)
conv_weight_shape = (output_channels, input_channels_per_group, kernel_h, kernel_w)
conv_bias_shape = (output_channels)
class Conv2D(torch.nn.Module):
def __init__(self):
super(Conv2D, self).__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
return F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"vulkan_prepack::conv2d_clamp_prepack": 1,
"vulkan_prepack::conv2d_clamp_run": 1}
TestVulkanRewritePass.validate_transformed_module(Conv2D(), pattern_count_map, data_shape)
class Conv2DRelu(torch.nn.Module):
def __init__(self):
super(Conv2DRelu, self).__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
o = F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
o = F.relu(o)
return o
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"vulkan_prepack::conv2d_clamp_prepack": 1,
"vulkan_prepack::conv2d_clamp_run": 1}
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(), pattern_count_map, data_shape)
pattern_count_map["aten::relu"] = 1
pattern_count_map["vulkan_prepack::conv2d_clamp_prepack"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True)
pattern_count_map["aten::relu"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
class Conv2DHardtanh(torch.nn.Module):
def __init__(self):
super(Conv2DHardtanh, self).__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
o = F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
o = F.hardtanh(o)
return o
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"vulkan_prepack::conv2d_clamp_prepack": 1,
"vulkan_prepack::conv2d_clamp_run": 1}
TestVulkanRewritePass.validate_transformed_module(Conv2DHardtanh(), pattern_count_map, data_shape)
pattern_count_map["aten::hardtanh"] = 1
pattern_count_map["vulkan_prepack::conv2d_clamp_prepack"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DHardtanh(),
pattern_count_map,
data_shape,
prepack_removal=True)
pattern_count_map["aten::hardtanh"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
if __name__ == "__main__":
run_tests()
| pytorch-master | test/test_vulkan.py |
# Owner(s): ["module: unknown"]
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch._C import parse_schema
class TestFunctionSchema(TestCase):
def test_serialize_and_deserialize(self):
schemas = torch._C._jit_get_all_schemas()
# so far we have around 1700 registered schemas
self.assertGreater(len(schemas), 1000)
for schema in schemas:
parsed_schema = parse_schema(str(schema))
self.assertEqual(parsed_schema, schema)
self.assertTrue(parsed_schema.is_backward_compatible_with(schema))
def test_out_schema(self):
schema_with_out = parse_schema('any.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)')
self.assertTrue(schema_with_out.arguments[-1].is_out)
schema_without_out = parse_schema('any.not_out(Tensor self, Tensor b) -> Tensor')
self.assertFalse(schema_without_out.arguments[-1].is_out)
def test_backward_compatible_structure(self):
old_schema = parse_schema('any.over(Tensor self, *, Tensor b) -> Tensor')
# BC: A new schema without changes.
new_schema = parse_schema('any.over(Tensor self, *, Tensor b) -> Tensor')
self.assertTrue(new_schema.is_backward_compatible_with(old_schema))
# No-BC: A new schema with different name.
new_schema = parse_schema('any_.over(Tensor self, *, Tensor b) -> Tensor')
self.assertFalse(new_schema.is_backward_compatible_with(old_schema))
# No-BC: A new schema with different overload name.
new_schema = parse_schema('any.other(Tensor self, *, Tensor b) -> Tensor')
self.assertFalse(new_schema.is_backward_compatible_with(old_schema))
# No-BC: A new schema that adds vararg.
new_schema = parse_schema('any.over(Tensor self, *, Tensor b, ...) -> Tensor')
self.assertFalse(new_schema.is_backward_compatible_with(old_schema))
# No-BC: A new schema with different number of outputs.
new_schema = parse_schema('any.over(Tensor self, *, Tensor b) -> (Tensor, Tensor)')
self.assertFalse(new_schema.is_backward_compatible_with(old_schema))
def test_backward_compatible_outputs(self):
old_schema = parse_schema('any.over(Tensor self, *, Tensor b) -> Tensor')
# No-BC: A new schema with output becoming of optional type.
new_schema = parse_schema('any.over(Tensor self, *, Tensor b) -> Tensor?')
self.assertFalse(new_schema.is_backward_compatible_with(old_schema))
# BC: (the opposite case) An schema where the output is not of optional type anymore.
self.assertTrue(old_schema.is_backward_compatible_with(new_schema))
# No-BC: A new schema with a different output type.
new_schema = parse_schema('any.over(Tensor self, *, Tensor b) -> int')
self.assertFalse(new_schema.is_backward_compatible_with(old_schema))
# No-BC: A new schema with a different output type.
new_schema = parse_schema('any.over(Tensor self, *, Tensor b) -> Tensor out')
self.assertFalse(new_schema.is_backward_compatible_with(old_schema))
def test_backward_compatible_arguments(self):
old_schema = parse_schema('any(Tensor self, *, Tensor b, int c) -> Tensor')
# No-BC: A new schema with less arguments.
new_schema = parse_schema('any(Tensor self, *, Tensor b) -> Tensor')
self.assertFalse(new_schema.is_backward_compatible_with(old_schema))
# No-BC: A new schema with more arguments, appended, but no default value.
new_schema = parse_schema('any(Tensor self, *, Tensor b, int c, int d) -> Tensor')
self.assertFalse(new_schema.is_backward_compatible_with(old_schema))
# BC: A new schema with more arguments, appended, that have a default value.
new_schema = parse_schema('any(Tensor self, *, Tensor b, int c, int d=1) -> Tensor')
self.assertTrue(new_schema.is_backward_compatible_with(old_schema))
# No-BC: A new schema with more arguments, not-appended, that have a default value.
new_schema = parse_schema('any(Tensor self, int d=1, *, Tensor b, int c) -> Tensor')
self.assertFalse(new_schema.is_backward_compatible_with(old_schema))
# BC: A new schema where old kwargs becomes positional.
new_schema = parse_schema('any(Tensor self, Tensor b, *, int c) -> Tensor')
self.assertTrue(new_schema.is_backward_compatible_with(old_schema))
# BC: (the opposite case) A new schema where an old positional argument becomes kwarg.
self.assertFalse(old_schema.is_backward_compatible_with(new_schema))
# BC: A new schema where all old kwargs become positional.
new_schema = parse_schema('any(Tensor self, Tensor b, int c) -> Tensor')
self.assertTrue(new_schema.is_backward_compatible_with(old_schema))
# BC: (the opposite case) A new schema where all old positional arguments become kwarg.
self.assertFalse(old_schema.is_backward_compatible_with(new_schema))
# No-BC: A new schema where old kwargs appear in different order.
new_schema = parse_schema('any(Tensor self, *, int c, Tensor b) -> Tensor')
self.assertFalse(new_schema.is_backward_compatible_with(old_schema))
# BC: A new schema where argument becomes of type optional.
new_schema = parse_schema('any(Tensor self, *, Tensor b, int? c) -> Tensor')
self.assertTrue(new_schema.is_backward_compatible_with(old_schema))
# BC: A new schema where argument gains a default value.
new_schema = parse_schema('any(Tensor self, *, Tensor b, int c=1) -> Tensor')
self.assertTrue(new_schema.is_backward_compatible_with(old_schema))
# No-BC: A new schema where argument is "renamed".
new_schema = parse_schema('any(Tensor self, *, Tensor b, int renamed) -> Tensor')
self.assertFalse(new_schema.is_backward_compatible_with(old_schema))
# No-BC: A new schema where argument type changes to an incompatible type.
new_schema = parse_schema('any(Tensor self, *, Tensor b, int[] c) -> Tensor')
self.assertFalse(new_schema.is_backward_compatible_with(old_schema))
def test_backward_compatible_with_smart_serialization(self):
# cases where out arg is provided
old_schema = parse_schema('foo(Tensor self, *, int a, Tensor(a!) out) -> Tensor(a!)')
new_schema_same_out = parse_schema('foo(Tensor self, *, int a, int b=1, Tensor(a!) out) -> Tensor(a!)')
new_schema_wrong_default = parse_schema('foo(Tensor self, *, int b=1, int a, Tensor(a!) out) -> Tensor(a!)')
new_schema_more_out = parse_schema('foo(Tensor self, *, int a, int b=1, Tensor(a!) out, Tensor(b!) b) -> Tensor(a!)')
new_schema_wrong_pos = parse_schema('foo(Tensor self, *, int a, int b=1, Tensor(b!) b, Tensor(a!) out) -> Tensor(a!)')
self.assertTrue(new_schema_same_out.is_backward_compatible_with(old_schema))
self.assertTrue(new_schema_more_out.is_backward_compatible_with(old_schema))
self.assertFalse(new_schema_wrong_default.is_backward_compatible_with(old_schema))
self.assertFalse(new_schema_wrong_pos.is_backward_compatible_with(old_schema))
# cases where out arg is not provided
old_schema_without_arg = parse_schema('foo(Tensor self, int a, int b=1) -> int')
new_schema_without_arg = parse_schema('foo(Tensor self, int a, int b=1, int c=2) -> int')
new_schema_without_arg_multiple_default = parse_schema('foo(Tensor self, int a, int b=1, int c=2, int d=3) -> int')
new_schema_without_arg_wrong_pos = parse_schema('foo(Tensor self, int a, int c=2, int b=1) -> int')
self.assertTrue(new_schema_without_arg.is_backward_compatible_with(old_schema_without_arg))
self.assertTrue(new_schema_without_arg_multiple_default.is_backward_compatible_with(old_schema_without_arg))
self.assertFalse(new_schema_without_arg_wrong_pos.is_backward_compatible_with(old_schema_without_arg))
def test_string_optional_parameter_default_value(self):
schema_a = parse_schema("example::op(str? order=\"NCHW\") -> (Tensor)")
schema_b = parse_schema(str(schema_a))
self.assertEqual(schema_a, schema_b)
def test_forward_compatible_arguments_without_out(self):
old_schema = parse_schema('any(Tensor self, int a, int b=1) -> Tensor')
# deleting default arg is FC compatible
new_schema = parse_schema('any(Tensor self, int a) -> Tensor')
is_fc, _ = new_schema.check_forward_compatible_with(old_schema)
self.assertTrue(is_fc)
# adding default arg is FC compatible
new_schema = parse_schema('any(Tensor self, int a, int b=1, int c=1) -> Tensor')
is_fc, _ = new_schema.check_forward_compatible_with(old_schema)
self.assertTrue(is_fc)
# adding default arg with container type is NOT FC compatible
new_schema = parse_schema('any(Tensor self, int a, int b=1, int[2] c=1) -> Tensor')
is_fc, reason = new_schema.check_forward_compatible_with(old_schema)
self.assertFalse(is_fc)
self.assertEqual(reason, "Function schema is not forward compatible since the new argument"
" \'c\' of type int[] has a container type as its default value.")
# updating the default value of a default arg is NOT FC compatible
new_schema = parse_schema('any(Tensor self, int a, int b=4) -> Tensor')
is_fc, reason = new_schema.check_forward_compatible_with(old_schema)
self.assertFalse(is_fc)
self.assertEqual(reason, "\'b\' is not forward compatible with the older version of the schema")
# updating the arg name of a default arg is NOT FC compatible
new_schema = parse_schema('any(Tensor self, int a, int c=1) -> Tensor')
is_fc, reason = new_schema.check_forward_compatible_with(old_schema)
self.assertFalse(is_fc)
self.assertEqual(reason, "\'c\' is not forward compatible with the older version of the schema")
# not adding default arg in the end is NOT FC compatible
new_schema = parse_schema('any(Tensor self, int a, int c=1, int b=1) -> Tensor')
is_fc, reason = new_schema.check_forward_compatible_with(old_schema)
self.assertFalse(is_fc)
self.assertEqual(reason, "\'c\' is not forward compatible with the older version of the schema")
# making default arg into positional arg is NOT FC compatible
new_schema = parse_schema('any(Tensor self, int a, int b) -> Tensor')
is_fc, reason = new_schema.check_forward_compatible_with(old_schema)
self.assertFalse(is_fc)
self.assertEqual(reason, "\'b\' is not forward compatible with the older version of the schema")
# making positional arg into default arg is NOT FC compatible
new_schema = parse_schema('any(Tensor self, int a=1, int b=1) -> Tensor')
is_fc, reason = new_schema.check_forward_compatible_with(old_schema)
self.assertFalse(is_fc)
self.assertEqual(reason, "\'a\' is not forward compatible with the older version of the schema")
def test_forward_compatible_arguments_real_use_case(self):
# this change introduced forward incompatibility in the past
old_slice_schema = parse_schema('slice(Tensor(a) self, int dim=0, int start=0, int end=0, int step=1) -> Tensor(a)')
new_slice_schema = parse_schema('slice(Tensor(a) self, int dim=0, int? start=None, int? end=None, int step=1) -> Tensor(a)')
is_fc, reason = new_slice_schema.check_forward_compatible_with(old_slice_schema)
self.assertFalse(is_fc)
self.assertEqual(reason, "\'start\' is not forward compatible with the older version of the schema")
def test_forward_compatible_arguments_with_out(self):
old_schema = parse_schema('any(Tensor self, *, int a, int b=1, Tensor(a!) out) -> Tensor(a!)')
new_schema = parse_schema('any(Tensor self, *, int a, Tensor(a!) out) -> Tensor(a!)')
is_fc, _ = new_schema.check_forward_compatible_with(old_schema)
self.assertTrue(is_fc)
new_schema = parse_schema('any(Tensor self, *, int a, int b=1, int c=1, Tensor(a!) out) -> Tensor(a!)')
is_fc, _ = new_schema.check_forward_compatible_with(old_schema)
self.assertTrue(is_fc)
new_schema = parse_schema('any(Tensor self, *, int a, Tensor(d!) d, int b=1, Tensor(a!) out) -> Tensor(a!)')
is_fc, reason = new_schema.check_forward_compatible_with(old_schema)
self.assertFalse(is_fc)
self.assertEqual(reason, "Function schema should have the same number of out arguments")
def test_schema_error(self):
with self.assertRaisesRegex(RuntimeError, r"schemas with vararg \(...\) can't have default value args"):
schema = parse_schema("any.foo(int arg1, int arg2=0, ...)")
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_function_schema.py |
# -*- coding: utf-8 -*-
# Owner(s): ["oncall: jit"]
import unittest
import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing import FileCheck
from unittest import skipIf
from torch.testing._internal.common_utils import run_tests, IS_SANDCASTLE, ProfilingMode, GRAPH_EXECUTOR, \
enable_profiling_mode_for_profiling_tests, IS_WINDOWS, TemporaryDirectoryName, shell
from torch.testing._internal.jit_utils import JitTestCase, enable_cpu_fuser, _inline_everything, \
RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, warmup_backward
from textwrap import dedent
from itertools import product, permutations
from torch.testing._internal.common_cuda import with_tf32_off
from test_jit import backward_graph, all_backward_graphs, get_lstm_inputs, get_milstm_inputs, \
LSTMCellC, LSTMCellF, LSTMCellS, MiLSTMCell
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
def strip_profiling_nodes(nodes):
profiling_opcodes = set(['prim::BailoutTemplate', 'prim::BailOut'])
return [n for n in nodes if n.kind() not in profiling_opcodes]
def warmup_forward(f, *args):
profiling_count = 2
for i in range(profiling_count):
results = f(*args)
return results
@skipIf(GRAPH_EXECUTOR == ProfilingMode.LEGACY, "skip due to SIGIOT failures, #67646")
class TestFuser(JitTestCase):
def assertAllFused(self, graph, except_for=()):
diff_graphs = [n for n in graph.nodes() if n.kind() == 'prim::DifferentiableGraph']
if len(diff_graphs) > 0:
self.assertEqual(len(diff_graphs), 1)
graph = diff_graphs[0].g('Subgraph')
allowed_nodes = {'prim::Constant', 'prim::FusionGroup', 'prim::BailoutTemplate',
'prim::BailOut', 'prim::TupleConstruct'} | set(except_for)
self.assertTrue(all(node.kind() in allowed_nodes for node in graph.nodes()),
'got {}'.format(graph))
self.assertTrue([node.kind() for node in graph.nodes()].count('prim::FusionGroup') == 1)
def _test_fused_abs(self, device='cpu'):
def func(x):
return x.abs() * 2
a = torch.randn(5, device=device)
scripted = self.checkScript(func, (a,))
self.assertAllFused(scripted.graph_for(a))
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
def test_abs_cpu(self):
self._test_fused_abs()
@unittest.skipIf(not IS_WINDOWS, "This is meant to be Windows-specific")
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
def test_abs_cpu_unicode_temp_dir(self):
with TemporaryDirectoryName(suffix='中文') as dname:
shell_env = os.environ.copy()
shell_env['TMP'] = dname
cmd = [sys.executable, os.path.basename(__file__), type(self).__name__ + '.test_abs_cpu']
legacy_jit_flag = '--jit_executor=legacy'
for v in sys.argv:
if v == legacy_jit_flag:
cmd.append(legacy_jit_flag)
return_code = shell(cmd, cwd=os.path.dirname(__file__), env=shell_env)
self.assertEqual(return_code, 0)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
def test_abs_cuda(self):
self._test_fused_abs(device="cuda")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
def test_zero_element_tensors(self):
def decode(sin_t, cos_t):
theta = torch.atan2(sin_t.float(), cos_t.float())
return theta
sin = torch.zeros(0, device="cuda")
cos = torch.zeros(0, device="cuda")
inputs = [sin, cos]
ge = self.checkScript(decode, inputs)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_arg_configurations_smoke_cuda(self):
# A smoke test to make sure we won't use the same kernel for contiguous
# and non-contiguous arguments.
# TODO: add optionally enabled debug counters to the fuser to verify
# that we really can tell the difference between configurations
def f(x, y):
z1, z2 = (x + y).chunk(2, dim=1)
return z1 * z2
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
traced_f = torch.jit.trace(f, (x, y,))
self.assertEqual(traced_f(x.t().contiguous(), y), traced_f(x.t(), y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_broadcast_cuda(self):
def scaleshift(x, scale, shift):
return x * scale + shift
inputs = [
torch.randn(4, 4, dtype=torch.float, device='cuda'),
torch.randn(4, dtype=torch.float, device='cuda'),
torch.randn(4, dtype=torch.float, device='cuda'),
]
ge = self.checkTrace(scaleshift, inputs)
self.assertAllFused(ge.graph_for(*inputs))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "no bfloat support with profiling on")
def test_cuda_bfloat16(self):
def foo(x, y):
return (x + y).relu()
m = torch.jit.script(foo)
x = torch.randn(65536).cuda().bfloat16()
y = torch.randn_like(x)
self.assertAllFused(m.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_HALF, "no half support")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "no half support with profiling on")
def test_cuda_half(self):
x = torch.randn(4, 4, dtype=torch.half, device='cuda')
y = torch.randn(4, 4, dtype=torch.half, device='cuda')
funcs = [
self.fn_test_comparison_gt_lt,
self.fn_test_relu,
self.fn_test_exp
]
# Note: Non fused inputs must be float to prevent loss of precision
inputs = (x.float(), y.float())
fusion_inputs = (x, y)
for fn in funcs:
local_inputs = [t.clone().requires_grad_() for t in inputs]
local_fusion_inputs = [t.clone().requires_grad_() for t in fusion_inputs]
# Verifies outputs
fusion = torch.jit.trace(fn, local_fusion_inputs, check_trace=False)
outputs = fn(*local_inputs)
fusion_outputs = fusion(*local_fusion_inputs)
outputs_half = [t.half() for t in outputs]
self.assertEqual(outputs_half, fusion_outputs)
# Verifies gradients
for output, fusion_output in zip(outputs_half, fusion_outputs):
grads = torch.autograd.grad(
output.float().sum(), local_inputs, allow_unused=True, retain_graph=True)
fusion_grads = torch.autograd.grad(
fusion_output.sum(), local_fusion_inputs, allow_unused=True, retain_graph=True)
grads_half = [t.half() for t in grads]
self.assertEqual(grads_half, fusion_grads)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_checks_cat_inputs(self):
# We shouldn't treat cat nodes as broadcasting. All their inputs
# need to be checked for having the same map size, before we can
# run the kernel.
def f(x, y):
return torch.cat([x + 2 * x + x ** 2, y + 4 * y + y ** 3], dim=0)
# NOTE: y is broadcastable to x, but output of f(x, y) should have
# shape 3x4, and not 4x4.
x = torch.randn(2, 4, dtype=torch.float, device='cuda')
y = torch.randn(1, 4, dtype=torch.float, device='cuda')
scripted = self.checkScript(f, (x, y))
self.assertAllFused(scripted.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "No CUDA")
def test_remainder_cuda(self):
def cuda_rem(x, y):
return 1 + torch.remainder(x, y) - 1
a = torch.rand([512], dtype=torch.float).cuda()
b = torch.rand([512], dtype=torch.float).cuda()
inputs = [a, b]
ge = self.checkScript(cuda_rem, inputs)
graph = ge.graph_for(*inputs)
self.assertAllFused(graph)
@unittest.skipIf(not RUN_CUDA, "No CUDA")
def test_chunk_cuda(self):
def fn(x):
a, b, c = x.chunk(3, 1)
return a * b + c
inputs = [torch.randn(10, 6, dtype=torch.float, device='cuda')]
ge = self.checkScript(fn, inputs)
graph = ge.graph_for(*inputs)
self.assertAllFused(graph)
FileCheck().check("prim::ConstantChunk[chunks=3, dim=1]").run(str(graph))
@staticmethod
def _test_chunk_correctness(self, device='cpu'):
def chunk_4_0(x):
x0, x1, x2, x3 = x.chunk(4, 0)
return x0 + x1 + x2 + x3
def chunk_4_1(x):
x0, x1, x2, x3 = x.chunk(4, 1)
return x0 + x1 + x2 + x3
def chunk_4_last(x):
x0, x1, x2, x3 = x.chunk(4, 2)
return x0 + x1 + x2 + x3
fns = [chunk_4_0, chunk_4_1, chunk_4_last]
tensors = [
# splitSize = 1
torch.randn(4, 4, 4, dtype=torch.float, device=device),
# contiguous case
torch.randn(12, 8, 16, dtype=torch.float, device=device),
# non-contiguous case
torch.randn(12, 8, 16, dtype=torch.float, device=device).transpose(1, 2),
]
for tensor in tensors:
for fn in fns:
self.checkScript(fn, [tensor])
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
def test_chunk_correctness(self):
return self._test_chunk_correctness(self, 'cpu')
@unittest.skipIf(not RUN_CUDA, "No CUDA")
def test_chunk_correctness_cuda(self):
return self._test_chunk_correctness(self, 'cuda')
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_chunk_distributes_cuda(self):
def f(x, y):
z1, z2 = (x + y).chunk(2, dim=1)
return z1 * z2
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(f, (x, y))
graph = ge.graph_for(x, y)
FileCheck().check("broadcast_tensors").check('with prim::FusionGroup_') \
.check_count('ConstantChunk', 2, exactly=True).run(str(graph))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_chunk_motion_deduplicates_inputs(self):
def func1(x):
z = x * x
z0, z1 = z.chunk(2)
return z0 * z1
def func2(x):
z = x * x * x
z0, z1 = z.chunk(2)
return z0 * z1
inputs = [
torch.tensor([1.1, 1.2], device='cuda', dtype=torch.float),
]
for func in [func1, func2]:
module = self.checkScript(func, inputs)
forward_graph = module.graph_for(*inputs)
self.assertGraphContainsExactly(forward_graph, 'prim::FusionGroup', 1)
fusion_group = list(forward_graph.nodes())[-1]
self.assertEqual(len(list(fusion_group.inputs())), 1)
@unittest.skipIf(not RUN_CUDA, "No CUDA")
def test_chunk_multiple_cuda(self):
# The arguments are intentionally used out of order as a test to see
# if the fusion compiler adds extra args in the correct order
def fn(s, x, y, z):
z1, z2 = z.chunk(2, 2)
x1, x2, x3 = x.chunk(3, 1)
y1, y2 = y.chunk(2, 0)
return s + x1 + x2 + x3 + y1 + y2 + z1 + z2
inputs = [
torch.randn(5, 2, 3, dtype=torch.float, device='cuda'),
torch.randn(5, 6, 3, dtype=torch.float, device='cuda'),
torch.randn(10, 2, 3, dtype=torch.float, device='cuda'),
torch.randn(5, 2, 6, dtype=torch.float, device='cuda'),
]
ge = self.checkScript(fn, inputs)
self.assertAllFused(ge.graph_for(*inputs))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_minmax(self):
def tmax(a, b):
return torch.max(2 * a, b)
def tmin(a, b):
return torch.min(2 * a, b)
a = torch.randn(4, 4, dtype=torch.float, device="cuda")
b = torch.randn(4, 4, dtype=torch.float, device="cuda")
nan = torch.tensor(float('nan'), dtype=torch.float, device="cuda")
for f, inputs in product(
(tmax, tmin),
([a, b], [a, nan], [b, nan])):
s = self.checkScript(f, inputs)
self.assertAllFused(s.graph_for(*inputs))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_clamp(self):
def func2(a, b):
return torch.clamp(a + b, min=0, max=2)
def funcInf(a, b):
return torch.clamp(a + b, min=0, max=float('inf'))
def funcOptMin(a, b):
return torch.clamp(a + b, max=2)
def funcOptMax(a, b):
return torch.clamp(a + b, min=0)
a = torch.randn(4, 4, dtype=torch.float, device='cuda', requires_grad=True)
b = torch.randn(4, 4, dtype=torch.float, device='cuda')
nan = torch.tensor(float('nan'), dtype=torch.float, device='cuda')
funcs = (func2, funcInf, funcOptMin, funcOptMax)
for f, inputs in product(funcs, [[a, b], [a, nan]]):
f.__disable_jit_function_caching__ = True
inp1, inp2 = inputs
s = self.checkScript(f, (inp1, inp2), profiling=ProfilingMode.PROFILING)
self.assertAllFused(s.graph_for(inp1, inp2), except_for={'aten::size', 'aten::_size_if_not_equal'})
c = s(inp1, inp2)
with enable_profiling_mode_for_profiling_tests():
warmup_backward(c.sum())
graph = backward_graph(s)
self.assertAllFused(graph, except_for={'aten::Float', 'aten::_grad_sum_to_size'})
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "no half support with profiling on")
def test_dropout(self):
def func(x):
x = torch.nn.functional.dropout(x)
return torch.nn.functional.relu(x)
a = torch.randn(4, 4, dtype=torch.float, device='cuda', requires_grad=True)
s = torch.jit.script(func)
c = s(a)
c = s(a)
warmup_backward(c.sum())
# skip_check to skip extra bailout nodes in between
graph = backward_graph(s, skip_check=True)
self.assertAllFused(graph, except_for={'aten::div', 'prim::Constant'})
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_comparison_eq_ne(self):
def f(x, y):
mask = (x == 0).type_as(x)
z = x * mask + y
mask = (x != 0).type_as(x)
z = z * mask + y
return z
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(f, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@staticmethod
def fn_test_comparison_gt_lt(x, y):
mask = (x > 0).type_as(x)
z = x * mask + y
mask = (x < 0).type_as(x)
z = z * mask + y
return z
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_comparison_gt_lt_cuda(self):
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(self.fn_test_comparison_gt_lt, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_comparison_ge_le_cuda(self):
def f(x, y):
mask = (x >= 0).type_as(x)
z = x * mask + y
mask = (x <= 0).type_as(x)
z = z * mask + y
return z
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(f, (x, y))
self.assertAllFused(ge.graph_for(x, y))
x.requires_grad_(True)
y.requires_grad_(True)
self.assertAllFused(ge.graph_for(x, y), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_addcmul_cuda(self):
t = torch.randn(1, 4, dtype=torch.float, device='cuda')
t1 = torch.randn(4, 1, dtype=torch.float, device='cuda')
t2 = torch.randn(1, 4, dtype=torch.float, device='cuda')
def foo(t, t1, t2):
return t.addcmul(t + 1, t2, value=0.1)
ge = self.checkTrace(foo, (t, t1, t2), allow_unused=True)
graph = ge.graph_for(t, t1, t2)
self.assertAllFused(graph)
# TODO: We leak CUDA memory here because the traced graph holds onto a
# constant-ified tensor. Since the Python-global CompilationUnit is alive
# until the end of the process, the memory is effectively leaked.
# Removed `_cuda` suffix from this test which disables leak-checking.
# If this is a real problem, we'll need to revisit Torchscript Function
# lifetimes in Python.
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_lerp(self):
start = torch.randn(4, 1, dtype=torch.float, device='cuda')
end = torch.randn(1, 4, dtype=torch.float, device='cuda')
weight = torch.tensor(0.5, dtype=torch.float, device='cuda')
# scalar weight overload
def foo_weight_scalar(start, end):
return torch.lerp(start + 1, end, 0.5)
# tensor weight overload
def foo_weight_tensor(start, end):
return torch.lerp(start + 1, end, weight)
ge_weight_scalar = self.checkTrace(foo_weight_scalar, (start, end))
graph = ge_weight_scalar.graph_for(start, end)
self.assertAllFused(graph)
ge_weight_tensor = self.checkTrace(foo_weight_tensor, (start, end))
graph = ge_weight_tensor.graph_for(start, end)
self.assertAllFused(graph)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_concat_cuda(self):
hx = torch.randn(3, 20, dtype=torch.float, device='cuda')
cx = torch.randn(3, 20, dtype=torch.float, device='cuda')
def foo(hx, cx):
return torch.cat((hx + cx, hx * cx))
ge = self.checkTrace(foo, (hx, cx))
graph = ge.graph_for(hx, cx)
self.assertAllFused(graph)
FileCheck().check("FusedConcat").check_next("return").run(str(graph))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_concat_invariant_cuda(self):
# Invariant: the output of prim::FusedConcat may
# not be an input to any node inside the FusionGroup.
def fn(x, y, z):
x1 = x + y
y1 = x - y
w = torch.cat([x1, y1])
return w + z
x = torch.randn(2, 2, dtype=torch.float, device='cuda')
y = torch.randn(2, 2, dtype=torch.float, device='cuda')
z = torch.randn(4, 2, dtype=torch.float, device='cuda')
ge = self.checkTrace(fn, (x, y, z))
graph = ge.graph_for(x, y, z)
self.assertAllFused(graph, except_for={'aten::add'})
FileCheck().check("FusedConcat").check_next("return").run(str(graph))
@staticmethod
def fn_test_exp(x, y):
return (x + .5 * y).exp()
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_exp_cuda(self):
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(self.fn_test_exp, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "broken with profiling on")
@torch._jit_internal._disable_emit_hooks_decorator
@_inline_everything
def test_fuse_decompose_normalization(self):
class ResLike(torch.jit.ScriptModule):
def __init__(self, norm_module):
super(ResLike, self).__init__()
self.nm = norm_module
@torch.jit.script_method
def forward(self, x, y):
return y + torch.relu(self.nm(x))
def test_norm_decompose(nm, in_opt_graph, not_in_opt_graph, in_fusegraph):
model = ResLike(nm).cuda()
model_noopt = ResLike(nm).cuda()
model_noopt.load_state_dict(model.state_dict())
x = torch.randn(2, 16, 8, 8, device='cuda')
y = torch.randn(2, 16, 8, 8, device='cuda')
# FIXME: We need differentiation for CNNs for this optimization to trigger
with torch.no_grad():
out = model(x, y)
graph = model.graph_for(x, y)
rep = str(graph)
with torch.jit.optimized_execution(False):
out_noopt = model_noopt(x, y)
rep_noopt = str(model_noopt.graph_for(x, y))
self.assertEqual(out, out_noopt, atol=3e-5)
# Check that normalization op has really been decomposed
for node_in_graph in in_opt_graph:
self.assertIn(node_in_graph, rep)
for node_not_in_graph in not_in_opt_graph:
self.assertNotIn(node_not_in_graph, rep)
self.assertIn(node_not_in_graph, rep_noopt)
fusion_groups = [node for node in graph.nodes() if node.kind() == 'prim::FusionGroup']
self.assertEqual(len(fusion_groups), 1)
fused_graph = str(fusion_groups[0].g('Subgraph'))
for node_in_fusegraph in in_fusegraph:
self.assertIn(node_in_fusegraph, fused_graph)
# test for batchnorm decompose
bm = nn.BatchNorm2d(16)
test_norm_decompose(bm, ['aten::batch_norm_update_stats'],
['aten::batch_norm('], ['aten::sqrt'])
# test for layernorm decompose
lm = nn.LayerNorm(8)
test_norm_decompose(lm, ['aten::batch_norm_stats'],
['aten::layer_norm('], ['aten::sub', 'aten::mul', 'aten::add'])
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_threshold(self):
def f(x):
return torch.threshold(x, 0, -10) + x + x + x
x = torch.tensor([-1, -0.5, 0, 1, 2, 3], device='cuda')
scripted = self.checkScript(f, (x,))
self.assertAllFused(scripted.graph_for(x))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_scalar_arg_cuda(self):
def fn_test_scalar_arg(x: torch.Tensor, p: float) -> torch.Tensor:
return p * (x * x + x)
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
p = 3
scripted = self.checkScript(fn_test_scalar_arg, (x, p))
self.assertAllFused(scripted.graph_for(x, p))
x.requires_grad_(True)
# use another function otherwise we will bailout
# and won't be able to do fused checks
def fn_test_scalar_arg_requires_grad(x: torch.Tensor, p: float) -> torch.Tensor:
return p * (x * x + x)
scripted = torch.jit.script(fn_test_scalar_arg_requires_grad)
out = scripted(x, p)
self.assertAllFused(scripted.graph_for(x, p), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@unittest.skip("deduplicating introduces aliasing in backward graph's outputs")
@enable_cpu_fuser
def test_fuser_deduplication(self):
# See that fusion kernel outputs are deduplicated when removing _grad_sum_to_size in the fuser's compilation
# see the discussion in PR #14957.
def f(x, y):
return torch.sigmoid(x + y)
b = torch.randn(5, 5, requires_grad=True)
a = torch.randn(5, 5, requires_grad=True)
s = self.checkScript(f, (a, b))
self.assertAllFused(s.graph_for(a, b), except_for={
'aten::size', 'aten::_size_if_not_equal', 'prim::BroadcastSizes'})
c = s(a, b)
results = warmup_backward(c.sum(), [a, b])
ga2, gb2 = results.pop()
graph = backward_graph(s)
self.assertAllFused(graph)
# check that a, b share storage, i.e. were generated as a single output in the fuser
self.assertEqual(ga2.data_ptr(), gb2.data_ptr())
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
@unittest.skip("temporarily disabled because fusion was restricted in fixing #22833")
def test_fuser_iou(self):
# This checks if most of Intersection over Union is fused.
# In particular, the backward contains many _grad_sum_to_size.
def iou(b1x1, b1y1, b1x2, b1y2, b2x1, b2y1, b2x2, b2y2):
ltx = torch.max(b1x1, b2x1) # [N,M]
lty = torch.max(b1y1, b2y1)
rbx = torch.min(b1x2, b2x2)
rby = torch.min(b1y2, b2y2)
w = (rbx - ltx).clamp(min=0, max=float('inf')) # [N,M]
h = (rby - lty).clamp(min=0, max=float('inf')) # [N,M]
inter = w * h # [N,M]
area1 = (b1x2 - b1x1) * (b1y2 - b1y2) # [N,1]
area2 = (b2x2 - b2x1) * (b2y2 - b2y2) # [1,M]
iou = inter / (area1 + area2 - inter)
return iou
box1 = torch.randn(5, 4, requires_grad=True)
box2 = torch.randn(5, 4, requires_grad=True)
# unsqueezing can currently not be fused
b1x1 = box1[:, 0].unsqueeze(1) # [N,1]
b1y1 = box1[:, 1].unsqueeze(1)
b1x2 = box1[:, 2].unsqueeze(1)
b1y2 = box1[:, 3].unsqueeze(1)
b2x1 = box2[:, 0].unsqueeze(0) # [1,N]
b2y1 = box2[:, 1].unsqueeze(0)
b2x2 = box2[:, 2].unsqueeze(0)
b2y2 = box2[:, 3].unsqueeze(0)
s = self.checkScript(iou, (b1x1, b1y1, b1x2, b1y2, b2x1, b2y1, b2x2, b2y2))
self.assertAllFused(s.graph_for(b1x1, b1y1, b1x2, b1y2, b2x1, b2y1, b2x2, b2y2),
except_for={'aten::size', 'prim::BroadcastSizes', 'aten::_size_if_not_equal'})
with enable_profiling_mode_for_profiling_tests(True):
c = s(b1x1, b1y1, b1x2, b1y2, b2x1, b2y1, b2x2, b2y2)
warmup_backward(c.sum(), [b1x1, b1y1, b1x2, b1y2, b2x1, b2y1, b2x2, b2y2])
graph = backward_graph(s)
self.assertAllFused(graph, except_for={'aten::size', 'prim::BroadcastSizes', 'aten::_size_if_not_equal'})
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
@enable_cpu_fuser
def test_fusion_reuse_multi_gpu(self):
def fn(x, y):
return x * y * x * y
inputs_cpu = [
torch.randn(4, 4, dtype=torch.float),
torch.randn(4, 4, dtype=torch.float),
]
inputs_cuda0 = [x.cuda(0) for x in inputs_cpu]
inputs_cuda1 = [y.cuda(1) for y in inputs_cpu]
# Should not crash; these should compile different kernels.
ge = self.checkScript(fn, inputs_cpu)
self.assertAllFused(ge.graph_for(*inputs_cpu))
ge(*inputs_cuda0)
ge(*inputs_cuda1)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
@enable_cpu_fuser
def test_kernel_cache_multi_gpu(self):
def not_fusible(x):
return x
def fn(x, y, z):
x_out = x * x * x * x * x # fusion: lambda x. x * x * x * x * x
y_out = y * y * y * y * y
z_out = z * z * z * z * z
return not_fusible(x_out), not_fusible(y_out), not_fusible(z_out)
inputs = [
torch.randn(4, 4, dtype=torch.float),
torch.randn(4, 4, dtype=torch.float, device='cuda:0'),
torch.randn(4, 4, dtype=torch.float, device='cuda:1'),
]
prev_cache_size = torch._C._jit_debug_fuser_num_cached_kernel_specs()
# There are 3 FusionGroups. Because they have the same graph, they
# should reuse the same KernelSpec in the KernelSpec cache.
ge = self.checkScript(fn, inputs)
self.assertGraphContainsExactly(
ge.graph_for(*inputs), 'prim::FusionGroup', 3, True)
new_cache_size = torch._C._jit_debug_fuser_num_cached_kernel_specs()
# XXX: This assumes that the same kernel isn't already used by another test
self.assertEqual(new_cache_size - prev_cache_size, 1)
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device")
def test_nonzero_device_cuda(self):
device = 'cuda:' + str(1)
x = torch.tensor([0.4], dtype=torch.float, device=device)
y = torch.tensor([0.7], dtype=torch.float, device=device)
def doit(x, y):
return torch.sigmoid(torch.tanh(x * (x + y) + x))
ge = self.checkTrace(doit, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_lstm_cuda(self):
inputs = get_lstm_inputs('cuda', training=True)
module = self.checkScript(LSTMCellS, inputs)
return
forward_graph = module.graph_for(*inputs)
self.assertGraphContainsExactly(
forward_graph, 'prim::FusionGroup', 1, consider_subgraphs=True)
self.assertTrue(len(strip_profiling_nodes(forward_graph.nodes())) == 2)
# Everything is differentiable but TupleConstruct return
FileCheck().check("DifferentiableGraph").check_next("TupleConstruct") \
.check_next("return").run(str(forward_graph))
with enable_profiling_mode_for_profiling_tests(True):
hy, cy = module(*inputs)
warmup_backward((hy + cy).sum())
backward = backward_graph(module)
self.assertAllFused(backward, except_for=("aten::t", "aten::mm",
"aten::_grad_sum_to_size"))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
# By default, on Ampere or later GPUs, LSTM computes float tensors at TF32 precision.
# We want float tensors to be computed at full precision in order to use the default precision
@with_tf32_off
def test_lstm_concat_cuda(self):
inputs = get_lstm_inputs('cuda')
ge = self.checkTrace(LSTMCellC, inputs)
graph = ge.graph_for(*inputs)
FileCheck().check("FusedConcat").check_next("return").run(str(graph))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_lstm_gates_permutations_cuda(self):
# lstm has gates = x.mm(w_ih.t()) + hx.mm(w_hh.t()) + b_ih + b_hh.
# Test that any permutation of this will still result in one FusionGroup.
choices = ['x.mm(w_ih.t())', 'hx.mm(w_hh.t())', 'b_ih', 'b_hh']
template = dedent('''
def cell(x, hx, cx, w_ih, w_hh, b_ih, b_hh):
gates = {} + {} + {} + {}
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
return ingate * forgetgate * cellgate * outgate
''')
for permutation in permutations(choices, len(choices)):
code = template.format(*permutation)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
inputs = get_lstm_inputs('cuda', training=False)
self.assertEqual(cu.cell(*inputs), scope['cell'](*inputs))
forward_graph = cu.cell.graph_for(*inputs)
self.assertGraphContainsExactly(forward_graph, 'prim::FusionGroup', 1)
# TODO: Fuser doesn't work at all when inputs require grad. Fix that
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
# By default, on Ampere or later GPUs, LSTM computes float tensors at TF32 precision.
# We want float tensors to be computed at full precision in order to use the default precision
@with_tf32_off
def test_lstm_traced_cuda(self):
inputs = get_lstm_inputs('cuda')
ge = self.checkTrace(LSTMCellF, inputs)
graph = ge.graph_for(*inputs)
# .check_not("aten::add") don't get pulled into FusionGroup because of BailOuts
FileCheck().check_not("Chunk").check_not("aten::sigmoid") \
.check_not("aten::tanh").check("FusionGroup").check_next("TupleConstruct") \
.check_next("return").check_not("FusionGroup_2").run(str(graph))
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@unittest.skip("Test is flaky, see https://github.com/pytorch/pytorch/issues/8746")
@enable_cpu_fuser
def test_lstm_traced_cpu(self):
inputs = get_lstm_inputs('cpu')
try:
ge = self.checkTrace(LSTMCellF, inputs)
graph = ge.graph_for(*inputs)
FileCheck.check("FusionGroup").run(str(graph))
except RuntimeError as e:
if 'Failed to compile' in e.args[0]:
warnings.warn('CPU fuser test has failed! This is not a hard failure, '
'because the kernels sometimes trigger bugs in compilers '
'(most notably GCC 7.2).')
raise unittest.SkipTest('Failed to compile') from e
else:
raise
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_milstm_cuda(self):
inputs = get_milstm_inputs('cuda', training=True)
module = self.checkScript(MiLSTMCell, inputs)
forward_graph = module.graph_for(*inputs)
self.assertGraphContainsExactly(
forward_graph, 'prim::FusionGroup', 1, consider_subgraphs=True)
FileCheck().check("DifferentiableGraph").check_next("TupleConstruct") \
.check_next("return").check("FusionGroup").run(str(forward_graph))
hy, cy = module(*inputs)
warmup_backward((hy + cy).sum())
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.LEGACY, "borked on the legacy executor")
def test_rand_cuda(self):
class M(torch.jit.ScriptModule):
__constants__ = ['d']
def __init__(self):
super(M, self).__init__()
self.d = torch.device('cuda')
@torch.jit.script_method
def create(self, x):
return x * x + x + torch.rand_like(x)
x = torch.zeros([3, 4, 5], dtype=torch.float, device='cuda')
m = M()
out1 = m.create(x)
out2 = m.create(x)
self.assertNotEqual(out1, out2)
self.assertTrue(torch.all(out1 >= 0))
self.assertTrue(torch.all(out1 < 1))
self.assertTrue(torch.all(out2 >= 0))
self.assertTrue(torch.all(out2 < 1))
self.assertAllFused(m.create.graph_for(x))
@staticmethod
def fn_test_relu(x, y):
return F.relu(x + .5 * y)
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_relu_cuda(self):
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(self.fn_test_relu, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_erf_cuda(self):
def fn_test_erf(x):
return F.relu(torch.erf(x) - torch.erfc(x))
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(fn_test_erf, (x,))
self.assertAllFused(ge.graph_for(x))
x.requires_grad_(True)
ge = self.checkTrace(fn_test_erf, (x,))
self.assertAllFused(ge.graph_for(x), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.LEGACY, "borked on the legacy executor")
def test_rand_broadcast_cuda(self):
def fn_test_rand(x, y):
r = torch.rand_like(y)
return r * x + x
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
script_f = torch.jit.script(fn_test_rand)
out = script_f(x, y)
self.assertAllFused(script_f.graph_for(x, y))
x.requires_grad_(True)
out = script_f(x, y)
self.assertAllFused(script_f.graph_for(x, y), except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
# test that broadcasting random produces correct results
x = torch.ones(4, 4, dtype=torch.float, device='cuda')
y = torch.ones(4, dtype=torch.float, device='cuda')
out = script_f(x, y)
self.assertEqual(out[0], out[1])
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
def test_scalar(self):
def fn(x, y):
return 2 * x + y
x = torch.tensor(0.1, dtype=torch.float, device='cpu')
y = torch.tensor(1, dtype=torch.float, device='cpu')
ge = self.checkScript(fn, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_small_constant_cuda(self):
def fn_test_small_constant(x, y):
return (1e-8 * x + 5e-9 * y) * 1e8
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
ge = self.checkTrace(fn_test_small_constant, (x, y))
self.assertAllFused(ge.graph_for(x, y))
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_tensor_scalar_ops_cuda(self):
def should_fuse(x):
z = 3.
y = x + z
return x * y
# XXX: right now we only support fusing scalars if
# they're constant (#9940)
def should_not_fuse(x, z):
y = x + int(z)
return x * y
inputs = [torch.randn(2, 2, dtype=torch.float, device='cuda')]
ge = self.checkScript(should_fuse, inputs)
self.assertAllFused(ge.graph_for(*inputs))
inputs = [
torch.randn(2, 2, dtype=torch.float, device='cuda'),
torch.tensor(3., dtype=torch.float, device='cuda'),
]
ge = self.checkScript(should_not_fuse, inputs)
self.assertGraphContainsExactly(
ge.graph_for(*inputs), 'prim::FusionGroup', 0, consider_subgraphs=True)
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
def test_where_and_typing(self):
def f(x, y):
mask = x > y
res = torch.where(mask, x, y)
return mask, res
x = torch.randn(4, 4, dtype=torch.double)
y = torch.randn(4, 4, dtype=torch.double)
script_f = self.checkScript(f, (x, y))
self.assertAllFused(script_f.graph_for(x, y), except_for={'prim::TupleConstruct'})
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "no half support with profiling on")
def test_grad_sum_to_size_elimination(self):
def my_broadcasted_cell(a, b, c):
return (a + b) + c
s1 = torch.randn(5, 1, requires_grad=True, device='cuda')
s2 = torch.randn(5, 5, requires_grad=True, device='cuda')
module = self.checkScript(my_broadcasted_cell, (s1, s1, s1), profiling=ProfilingMode.PROFILING)
forward_graph = module.graph_for(s1, s1, s1)
self.assertAllFused(forward_graph, except_for=("aten::size", "prim::BroadcastSizes",
"aten::_size_if_not_equal"))
old_plans = set()
for i in range(3):
# if we have s2, then the s1 are _grad_sum_to_size'd
args = s2 if i < 1 else s1, s2 if i < 2 else s1, s2
args = [a.detach_().requires_grad_() for a in args]
# recompile, so we don't trigger bailouts
module = self.checkScript(my_broadcasted_cell, args, profiling=ProfilingMode.PROFILING)
res = module(s2 if i < 1 else s1, s2 if i < 2 else s1, s2)
warmup_backward(res.sum(), args)
grads = torch.autograd.grad(res.sum(), args)
for inp, gr in zip(args, grads):
self.assertEqual(inp.shape, gr.shape)
backward = None
# this is a workaround for the backward graphs not being
# in order for Python 2
for g in all_backward_graphs(module):
if str(g) not in old_plans:
assert backward is None
backward = g
old_plans.add(str(backward))
num_grads = 1 if i > 0 else 0
self.assertEqual(len([n for n in backward.nodes() if n.kind() == 'aten::_grad_sum_to_size']), num_grads)
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_jit_fuser.py |
# Owner(s): ["module: primTorch"]
import torch
import os
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten
from torch._subclasses.meta_utils import MetaConverter
import torch.utils._python_dispatch
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
suppress_warnings,
TEST_WITH_ASAN,
run_tests,
skipIfSlowGradcheckEnv,
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
)
from torch.testing._internal.common_methods_invocations import op_db
from torchgen.utils import YamlLoader
from torchgen.model import OperatorName
import sys
import yaml
import atexit
import re
from collections import defaultdict
import unittest
import warnings
import weakref
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
dtype_abbrs = {
torch.bfloat16: 'bf16',
torch.float64: 'f64',
torch.float32: 'f32',
torch.float16: 'f16',
torch.complex32: 'c32',
torch.complex64: 'c64',
torch.complex128: 'c128',
torch.int8: 'i8',
torch.int16: 'i16',
torch.int32: 'i32',
torch.int64: 'i64',
torch.bool: 'b8',
torch.uint8: 'u8',
}
@skipIfSlowGradcheckEnv
class TestMetaConverter(TestCase):
def assertSameVersionCounter(self, m1, m2):
# Cannot easily test m1 and m2 have same storage due to
# lack of Storage bindings. Use version counter.
vc = m1._version
self.assertEqual(m2._version, vc)
# Doing it this way ensures that we get VC bump even with leaves
with torch.no_grad():
m1._base.add_(3)
self.assertNotEqual(m1._version, vc)
self.assertEqual(m2._version, m1._version)
def test_view_of_non_leaf(self):
x = torch.randn(4, requires_grad=True)
y = x.neg()
z1 = y[:]
z2 = y[:]
to_meta = MetaConverter()
m1 = to_meta(z1)
m2 = to_meta(z2)
self.assertEqual(m1.shape, z1.shape)
self.assertTrue(m1._is_view())
self.assertFalse(m1._base.is_leaf)
self.assertSameVersionCounter(m1, m2)
def test_view_of_leaf(self):
x = torch.randn(4, requires_grad=True)
z1 = x[:]
z2 = x[:]
to_meta = MetaConverter()
m1 = to_meta(z1)
m2 = to_meta(z2)
self.assertEqual(m1.shape, z1.shape)
self.assertTrue(m1._is_view())
self.assertTrue(m1._base.is_leaf)
self.assertSameVersionCounter(m1, m2)
def test_leaf(self):
x = torch.randn(4, requires_grad=True)
to_meta = MetaConverter()
m = to_meta(x)
self.assertEqual(m.shape, x.shape)
self.assertTrue(m.is_leaf)
self.assertTrue(m.requires_grad)
def test_non_leaf(self):
x = torch.randn(4, requires_grad=True)
y = x.neg()
to_meta = MetaConverter()
m = to_meta(y)
self.assertEqual(m.shape, y.shape)
self.assertFalse(m.is_leaf)
self.assertTrue(m.requires_grad)
def test_requires_grad_false(self):
x = torch.randn(4, requires_grad=False)
to_meta = MetaConverter()
m = to_meta(x)
self.assertEqual(m.shape, x.shape)
self.assertFalse(m.requires_grad)
# NB: complex stuff is not actually exercised right now because
# we have a blanket exclusion for complex conversion
def test_view_as_real(self):
x = torch.randn(4, dtype=torch.complex64)
y = torch.view_as_real(x)
m = MetaConverter()(y)
self.assertEqual(m.shape, y.shape)
self.assertEqual(m.stride(), y.stride())
self.assertEqual(m.dtype, y.dtype)
def test_complex_noncontiguous_bug(self):
x = torch.randn((2, 2, 4, 9), dtype=torch.complex32)[:, 0, :, :]
m = MetaConverter()(x)
self.assertEqual(m.shape, x.shape)
self.assertEqual(m.stride(), x.stride())
self.assertEqual(m.dtype, x.dtype)
def test_view_as_complex(self):
x = torch.randn((4, 2), dtype=torch.float32)
y = torch.view_as_complex(x)
m = MetaConverter()(y)
self.assertEqual(m.shape, y.shape)
self.assertEqual(m.stride(), y.stride())
self.assertEqual(m.dtype, y.dtype)
def test_view_dtype(self):
x = torch.randn(4, dtype=torch.float32)
y = x.view(dtype=torch.int32)
m = MetaConverter()(y)
self.assertEqual(m.shape, y.shape)
self.assertEqual(m.stride(), y.stride())
self.assertEqual(m.dtype, y.dtype)
def test_imag(self):
x = torch.randn(4, dtype=torch.complex64)
y = x.imag
m = MetaConverter()(y)
self.assertEqual(m.shape, y.shape)
self.assertEqual(m.dtype, y.dtype)
self.assertEqual(m.stride(), y.stride())
self.assertEqual(m.storage_offset(), y.storage_offset())
def test_weakref(self):
x = torch.randn(4, 4, 4)
m = MetaConverter()
y = m(x)
z = m(x)
self.assertIs(y, z)
self.assertEqual(len(m.tensor_memo), 1)
self.assertEqual(len(m.storage_memo), 1)
del x
self.assertEqual(len(m.tensor_memo), 0)
m.check_for_expired_weak_storages()
self.assertEqual(len(m.storage_memo), 0)
li = []
for i in range(4):
li.append(torch.rand([i]))
m(li[-1])
self.assertEqual(len(m.tensor_memo), 4)
del li
self.assertEqual(len(m.tensor_memo), 0)
m.check_for_expired_weak_storages()
self.assertEqual(len(m.storage_memo), 0)
def test_tensor_outlives_converter(self):
m = MetaConverter()
ref = weakref.ref(m)
x = torch.randn([4, 4])
y = m(x)
del m
self.assertIs(ref(), None)
def assert_ref_meta_equal(test_case, meta_rs, rs, msg_callable):
flat_meta_rs, _ = tree_flatten(meta_rs)
flat_rs, _ = tree_flatten(rs)
test_case.assertEqual(len(flat_meta_rs), len(flat_rs))
for i, meta_r, r in zip(range(len(flat_rs)), flat_meta_rs, flat_rs):
def test_assert(cond, msg):
if not cond:
raise RuntimeError(f"output {i}: {msg_callable(msg)}")
if not isinstance(r, torch.Tensor):
continue
test_assert(isinstance(meta_r, torch.Tensor), f"but real {i}th result is Tensor")
test_assert(meta_r.dtype == r.dtype, f"but real dtype was {r.dtype}")
test_assert(meta_r.shape == r.shape, f"but real shape was {r.shape}")
# NOTE: stride checking is currently disabled
# See https://github.com/pytorch/pytorch/issues/78050
# same_strides, _ = prims.utils.check_significant_strides(meta_r, r)
# test_assert(same_strides, f"but real stride was {r.stride()}")
test_assert(
meta_r.storage_offset() == r.storage_offset(),
f"but real storage_offset was {r.storage_offset()}")
test_assert(meta_r.requires_grad == r.requires_grad, f"but real requires_grad was {r.requires_grad}")
test_assert(meta_r.is_conj() == r.is_conj(), f"but real is_conj was {r.is_conj()}")
test_assert(meta_r.is_neg() == r.is_neg(), f"but real is_neg was {r.is_neg()}")
# This environment variable controls whether or not we print expected failure
# lists at the end of a test suite run. The intended usage looks like this:
#
# 1. Run `PYTORCH_COLLECT_EXPECT=1 python test/test_meta.py` on a CUDA build
# of PyTorch that has LAPACK/MAGMA installed. You can filter `-k test_meta`
# or `-k test_dispatch_meta` to only focus on one or another list
# 2. Given the printed skip/xfail list, add them to the corresponding lists;
# torch.* entries go in meta_function and aten.* entries go in meta_dispatch.
# If there are preexisting entries, you need to merge in the entries.
#
# This is somewhat manual but typically you shouldn't need to do this, unless
# you've made a major change (e.g., added a new dtype to PyTorch) and need to
# refresh the lists. If you want to do it from scratch, just clear out the
# preexisting lists before running.
#
# WARNING: Python dict literals will silently ignore duplicate keys
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
def print_seen():
expected_failures = []
skips = []
def fmt_dtypes(dtypes):
r = ', '.join(sorted(dtype_abbrs[d] for d in dtypes))
return '{' + r + '}'
for op, failed_dtypes in seen_failed.items():
ops = resolve_name(op)
succeeded_dtypes = seen_succeeded.get(op, set())
expected_failures_dtypes = failed_dtypes - succeeded_dtypes
skips_dtypes = failed_dtypes & succeeded_dtypes
reasons = ""
if failed_reasons[op]:
reasons = " # " + ", ".join(sorted(failed_reasons[op]))
if expected_failures_dtypes:
expected_failures.append(f" {ops}: {fmt_dtypes(expected_failures_dtypes)},{reasons}")
if skips_dtypes:
skips.append(f" {ops}: {fmt_dtypes(skips_dtypes)},")
expected_failures.sort()
skips.sort()
nl = '\n'
print(f"""\
expected_failures = {{
{nl.join(expected_failures)}
}}
skips = {{
{nl.join(skips)}
}}
""")
if COLLECT_EXPECT:
atexit.register(print_seen)
# Success forces pass; failure forces fail; skip unconditionally skips testing
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
# unlike print produce strides
def verbose_print(e):
class Lit:
def __init__(self, s):
self.s = s
def __repr__(self):
return self.s
def go(t):
if isinstance(t, torch.Tensor):
return Lit(f"{t} stride={t.stride()}")
else:
return t
return repr(tree_map(go, e))
def run_meta_crossref(
test_case,
test_expect,
func,
args,
kwargs,
*,
dtype,
device_type,
):
to_meta = MetaConverter()
do_meta = test_expect is not TestExpect.SKIP
if do_meta:
try:
meta_args = tree_map(to_meta, args)
meta_kwargs = tree_map(to_meta, kwargs)
except Exception as e:
raise RuntimeError(
f"failed to convert args to meta; "
f"originally (*{args}, **{kwargs})") from e
rs = func(*args, **kwargs)
# TODO: also handle cases where func raise an exception
# For now, only attempt if we managed to convert all tensor types
# (if any of them failed, we're in a mixed device situation and
# this isn't well supported)
if do_meta and to_meta.successful():
# Special cases
if func is torch.tensor_split:
# Use original indices_or_sections, this argument is data dependent
meta_args = (meta_args[0], args[1]) + meta_args[2:]
elif func is torch.ops.aten.repeat_interleave.Tensor:
if kwargs.get("output_size", None) is None:
meta_args = args
elif func is torch.ops.aten.index.Tensor:
# Don't convert boolean tensors to meta as they will have nonzero
# called on them
indices = []
for meta_index, real_index in zip(meta_args[1], args[1]):
if meta_index is not None and meta_index.dtype in [torch.int8, torch.bool]:
indices.append(real_index)
else:
indices.append(meta_index)
meta_args = (meta_args[0], indices)
if kwargs.get("device", None) is not None:
meta_kwargs["device"] = "meta"
try:
# Suppress warnings, this doesn't matter for test_meta.py
# but it does matter if you want to use this decorator
# for cross-ref testing, as some tests may be looking at
# errors
with warnings.catch_warnings():
warnings.simplefilter("ignore")
meta_rs = func(*meta_args, **meta_kwargs)
except Exception as e:
if test_expect is TestExpect.XFAILURE:
return rs
seen_failed.setdefault(func, set()).add(dtype)
if isinstance(e, NotImplementedError):
m = RE_NOT_IMPLEMENTED_MSG.search(e.args[0])
if m:
failed_reasons[func].add(m.group(1))
if COLLECT_EXPECT:
return rs
raise RuntimeError(f"""\
failed to run: {resolve_name(func)}(
*{verbose_print(meta_args)},
**{verbose_print(meta_kwargs)}
)""") from e
else:
try:
delim = ',\n '
assert_ref_meta_equal(test_case, meta_rs, rs, lambda msg: f"""\
meta disagrees with real impl:
{resolve_name(func)}(
{delim.join(map(verbose_print, meta_args))},
{delim.join(k + ": " + verbose_print(v) for k, v in meta_kwargs.items())}
) = (
{verbose_print(meta_rs)}
)
{msg}
""")
except Exception:
if test_expect is TestExpect.XFAILURE:
return rs
seen_failed.setdefault(func, set()).add(dtype)
if COLLECT_EXPECT:
return rs
raise
else:
seen_succeeded.setdefault(func, set()).add(dtype)
if test_expect is TestExpect.XFAILURE and not COLLECT_EXPECT:
raise RuntimeError(f"unexpected success {resolve_name(func)}")
return rs
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32},
torch.count_nonzero : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.linalg.householder_product : {f64, c64, c128, f32},
torch.linalg.solve_triangular : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.matrix_exp : {f64, c128, c64, bf16, f32},
torch.nn.functional.unfold : {f64, f16, c128, c64, bf16, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.ormqr : {f64, c64, c128, f32},
torch.repeat_interleave : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.take : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.bucketize : {f64, i32, i64, f16, u8, i16, bf16, i8, f32},
torch.frexp : {f64, f16, bf16, f32},
torch.functional.unique : {f64, i32, i64, u8, i16, bf16, b8, i8, f32},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, bf16, b8, i8, f32},
torch.histc : {f64, bf16, f32},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.kthvalue : {f64, i32, i64, u8, i16, bf16, i8, f32},
torch.logcumsumexp : {f64, bf16, f32},
torch.median : {f64, i32, i64, u8, i16, bf16, i8, f32},
torch.mode : {f64, i32, i64, f16, u8, i16, bf16, b8, i8, f32},
torch.multinomial : {f64, bf16, f32},
torch.mvlgamma : {f64, i32, i64, u8, i16, bf16, i8, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f64, bf16, f32},
torch.nn.functional.grid_sample : {f64, f32},
torch.nn.functional.max_pool3d : {f64, f32},
torch.nn.functional.max_pool3d_with_indices : {f64, f32},
torch.nn.functional.max_unpool1d : {f64, f32},
torch.nn.functional.max_unpool2d : {f64, f32},
torch.nn.functional.max_unpool3d : {f64, f32},
torch.nn.functional.multi_margin_loss : {f64, f32},
torch.nn.functional.multilabel_margin_loss : {f64, f32},
torch.nn.functional.one_hot : {i64},
torch.nn.functional.pdist : {f64, f32},
torch.nn.functional.rrelu : {f64, bf16, f32},
torch.polar : {f64, f32},
torch.segment_reduce : {f64, f16, bf16, f32},
torch.searchsorted : {f64, i32, i64, f16, u8, i16, bf16, i8, f32},
torch.symeig : {f64, f32, c128, c64},
torch.cholesky : {f64, f32, c128, c64},
torch.cholesky_inverse : {f64, f32, c128, c64},
torch.cholesky_solve : {f64, f32, c128, c64},
torch.eig : {f64, f32, c128, c64},
torch.linalg.eig : {f64, f32, c128, c64},
torch.linalg.eigvals : {f64, f32, c128, c64},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
"""
# This is some sample code for how we could dump these dicts into YAML
# file for easier reading/writing
import yaml
print(yaml.dump(
{resolve_name(k): [dtype_abbrs[d] for d in v]
for k, v in meta_function_expected_failures.items()}, default_flow_style=None))
import sys
sys.exit()
"""
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.fft.fft2 : {i8, i64, u8, c128, b8, f64, i16, f32, i32, c64, c32, f16},
torch.fft.fft : {i8, i64, u8, c128, b8, f64, i16, f32, i32, c64, c32, f16},
torch.fft.fftn : {i8, i64, u8, c128, b8, f64, i16, f32, i32, c64, c32, f16},
torch.fft.ifft2 : {i8, i64, u8, c128, b8, f64, i16, f32, i32, c64, c32, f16, c32},
torch.fft.ifft : {c128, c64, c32, f16},
torch.fft.ifftn : {i8, i64, u8, c128, b8, f64, i16, f32, i32, c64, c32, f16},
torch.fft.hfft: {f16},
torch.fft.hfftn: {f16},
torch.fft.hfft2: {f16},
torch.fft.ihfft: {f16},
torch.fft.ihfft2 : {i8, i64, u8, f64, b8, f32, i32, i16, f16, c32, f16},
torch.fft.ihfftn : {i8, i64, u8, f64, b8, f32, i32, i16, c32, f16},
torch.fft.irfft2 : {f16},
torch.fft.irfft : {f16},
torch.fft.irfftn : {f16},
torch.fft.rfft2 : {i8, i64, u8, f64, b8, f32, i32, i16, c32, f16},
torch.fft.rfft : {i8, i64, u8, f64, b8, f32, i32, i16, c32, f16},
torch.fft.rfftn : {i8, i64, u8, f64, b8, f32, i32, i16, c32, f16},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.functional.stft : {c128, f32, c64, f64},
torch.functional.tensordot : {bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.inner : {bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.lu_solve : {c128, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_power : {c128, c64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.aminmax : {i8, i64, u8, f64, b8, f32, i32, i16},
torch.cummax : {bf16, i8, i64, u8, f64, b8, f32, i32, i16},
torch.cummin : {bf16, i8, i64, u8, f64, b8, f32, i32, i16},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cdist : {f64, f32},
torch.nanmean : {bf16, f64, f32, f16},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.interpolate : {bf16, f64, f32, u8},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.pinv : {f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vander: {c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
torch.histc: {i16, i32, i64, i8}, # aten::histc, aten::histc.out
torch.kthvalue: {f16}, # aten::kthvalue.values
torch.linalg.householder_product: {f32, f64}, # aten::linalg_householder_product, aten::linalg_householder_product.out
torch.linalg.solve_triangular: {f32, f64}, # aten::linalg_solve_triangular, aten::linalg_solve_triangular.out
torch.logcumsumexp: {bf16, f16}, # aten::_logcumsumexp, aten::_logcumsumexp.out
torch.matrix_exp: {f16}, # aten::linalg_matrix_exp
torch.median: {f16}, # aten::median, aten::median.dim_values
torch.multinomial: {f16}, # aten::multinomial, aten::multinomial.out
torch.mvlgamma: {f16}, # aten::_local_scalar_dense, aten::mvlgamma.out
torch.nn.functional.gaussian_nll_loss: {f16}, # aten::_local_scalar_dense
torch.nn.functional.grid_sample: {f16}, # aten::grid_sampler_2d, aten::grid_sampler_3d
torch.nn.functional.max_pool3d: {bf16, f16}, # aten::max_pool3d_with_indices
torch.nn.functional.max_pool3d_with_indices: {bf16, f16}, # aten::max_pool3d_with_indices
torch.nn.functional.max_unpool1d: {f16}, # aten::max_unpool2d
torch.nn.functional.max_unpool2d: {f16}, # aten::max_unpool2d
torch.nn.functional.max_unpool3d: {f16}, # aten::max_unpool3d
torch.nn.functional.multi_margin_loss: {bf16, f16}, # aten::multi_margin_loss
torch.nn.functional.multilabel_margin_loss: {bf16, f16}, # aten::multilabel_margin_loss_forward
torch.nn.functional.rrelu: {f16}, # aten::rrelu_with_noise
torch.ormqr: {f32, f64}, # aten::ormqr, aten::ormqr.out
}
meta_function_device_skips['cuda'] = {
torch.cummax: {f16},
torch.cummin: {f16},
torch.functional.tensordot: {f16},
torch.inner: {f16},
torch.linalg.matrix_power: {f32, f64},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
# This is a __torch_function__ mode that, when enabled, interposes every
# Torch API call and runs the operator as normal, and then reruns it
# with meta inputs, and then checks that everything about the output agrees.
# Most of the logic deals with faithfully replicating the original tensor
# as a meta tensor, which is nontrivial because there are a lot of subsystems
# that may potentially be exercised.
#
# That being said, this class is a little overkill for what it is doing in
# this test file (since I could have just inlined __torch_function__ on the
# OpInfo call, and OpInfos generally have very regular inputs), but it will be
# useful for more comprehensive testing e.g., as seen in
# https://github.com/pytorch/pytorch/pull/75994 The big benefit is it is
# A LOT more efficient that torch dispatch mode (at the cost of less coverage)
class MetaCrossRefFunctionMode(torch.overrides.TorchFunctionMode):
test_case: TestCase
device_type: str
dtype: torch.dtype
def __init__(self, test_case, *, device, dtype):
self.test_case = test_case
self.device_type = torch.device(device).type
self.dtype = dtype
def __torch_function__(self, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
if torch.jit.is_tracing() or isinstance(func, torch.ScriptMethod):
return func(*args, **kwargs)
if self.dtype in meta_function_skips.get(func, set()):
test_expect = TestExpect.SKIP
elif self.dtype in meta_function_device_skips[self.device_type].get(func, set()):
test_expect = TestExpect.SKIP
elif self.dtype in meta_function_expected_failures.get(func, set()):
test_expect = TestExpect.XFAILURE
elif self.dtype in meta_function_device_expected_failures[self.device_type].get(func, set()):
test_expect = TestExpect.XFAILURE
else:
test_expect = TestExpect.SUCCESS
return run_meta_crossref(
self.test_case, test_expect, func, args,
kwargs, dtype=self.dtype, device_type=self.device_type
)
aten = torch.ops.aten
# these always fail
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten._fft_c2c.out : {f16, c64, i8, f64, c128, i32, i64, f32, c32, b8, i16, u8},
aten._fft_r2c.out : {f16, i8, f64, i32, i64, f32, b8, i16, u8},
aten.cholesky.default : {c64, c128, f64, f32},
aten.cholesky.out : {c64, c128, f64, f32},
aten.cholesky_inverse.default : {c64, c128, f64, f32},
aten.cholesky_inverse.out : {c64, c128, f64, f32},
aten.cholesky_solve.default : {c64, c128, f64, f32},
aten.cholesky_solve.out : {c64, c128, f64, f32},
aten.count_nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.count_nonzero.dim_IntList : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.eig.default : {c64, c128, f64, f32},
aten.geqrf.default : {c64, c128, f64, f32},
aten.im2col.default : {c64, bf16, f32, f16, f64, c128},
aten.linalg_eig.default : {c64, c128, f64, f32},
aten.linalg_householder_product.default : {c64, c128, f64, f32},
aten.linalg_householder_product.out : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.linalg_matrix_exp.default : {c64, bf16, f32, f64, c128},
aten.linalg_solve_triangular.default : {c64, c128, f64, f32},
aten.linalg_solve_triangular.out : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.native_group_norm.default : {bf16},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.ormqr.default : {c64, c128, f64, f32},
aten.ormqr.out : {c64, c128, f64, f32},
aten.polar.out : {f32, f64},
aten.symeig.default : {c64, c128, f64, f32},
aten.take.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.take.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.tensordot.out : {c64, i8, f64, c128, i64, bf16, f32, i32, i16, u8},
aten.to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.default : {f32, f64},
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._pdist_forward.default : {f32, f64},
aten._unique2.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.bucketize.Tensor : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.bucketize.Tensor_out : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.col2im.default : {c64, f32, f64, c128},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.frexp.Tensor : {bf16, f32, f16, f64},
aten.grid_sampler_2d.default : {f32, f64},
aten.grid_sampler_3d.default : {f32, f64},
aten.histc.default : {bf16, f32, f64},
aten.histc.out : {bf16, f32, f64},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.kthvalue.default : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.log_sigmoid_forward.output : {bf16, f32, f64},
aten.logcumsumexp.default : {bf16, f32, f64},
aten.logcumsumexp.out : {bf16, f32, f64},
aten.max_pool3d_with_indices.default : {f32, f64},
aten.max_unpool2d.default : {f32, f64},
aten.max_unpool3d.default : {f32, f64},
aten.median.default : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.median.dim : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.mode.default : {f16, i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.multi_margin_loss.default : {f32, f64},
aten.multilabel_margin_loss_forward.default : {f32, f64},
aten.multinomial.default : {bf16, f32, f64},
aten.multinomial.out : {bf16, f32, f64},
aten.mvlgamma.default : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.mvlgamma.out : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.nll_loss2d_forward.default : {bf16, f32, f64},
aten.polar.default : {f32, f64},
aten.rrelu_with_noise.default : {bf16, f32, f64},
aten.searchsorted.Tensor : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.searchsorted.Tensor_out : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.segment_reduce.default : {bf16, f32, f16, f64},
aten.unique_consecutive.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.unique_dim.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
# these sometimes pass and sometimes fail
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.aminmax.default: {i64, u8, b8, f32, i8, f64, i16, i32},
aten.cummax.default: {i64, bf16, u8, b8, f32, i8, f64, i16, i32},
aten.cummin.default: {i64, bf16, u8, b8, f32, i8, f64, i16, i32},
aten.linalg_lu_solve.default: {c32, c64, c128},
aten.linalg_lu_solve.out: {c32, c64, c128},
aten.linalg_pinv.atol_rtol_tensor: {f32, f64},
aten.linalg_pinv.atol_rtol_tensor_out: {f32, f64},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.empty.SymInt: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.grid_sampler_2d.default: {f16}, # aten::grid_sampler_2d
aten.grid_sampler_3d.default: {f16}, # aten::grid_sampler_3d
aten.histc.default: {i16, i32, i64, i8}, # aten::histc
aten.histc.out: {i16, i32, i64, i8}, # aten::histc.out
aten.kthvalue.default: {f16}, # aten::kthvalue.values
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.linalg_householder_product.default: {f32, f64}, # aten::linalg_householder_product
aten.linalg_householder_product.out: {f32, f64}, # aten::linalg_householder_product.out
aten.linalg_matrix_exp.default: {f16}, # aten::linalg_matrix_exp
aten.linalg_solve_triangular.default: {f32, f64}, # aten::linalg_solve_triangular
aten.linalg_solve_triangular.out: {f32, f64}, # aten::linalg_solve_triangular.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output: {f16}, # aten::log_sigmoid_forward.output
aten.logcumsumexp.default: {bf16, f16}, # aten::_logcumsumexp
aten.logcumsumexp.out: {bf16, f16}, # aten::_logcumsumexp.out
aten.max_pool3d_with_indices.default: {bf16, f16}, # aten::max_pool3d_with_indices
aten.max_unpool2d.default: {f16}, # aten::max_unpool2d
aten.max_unpool3d.default: {f16}, # aten::max_unpool3d
aten.median.default: {f16}, # aten::median
aten.median.dim: {f16}, # aten::median.dim_values
aten.multi_margin_loss.default: {bf16, f16}, # aten::multi_margin_loss
aten.multilabel_margin_loss_forward.default: {bf16, f16}, # aten::multilabel_margin_loss_forward
aten.multinomial.default: {f16}, # aten::multinomial
aten.multinomial.out: {f16}, # aten::multinomial.out
aten.mvlgamma.default: {f16}, # aten::_local_scalar_dense
aten.mvlgamma.out: {f16}, # aten::mvlgamma.out
aten.native_group_norm.default: {bf16, f16},
aten.nll_loss2d_forward.default: {f16}, # aten::nll_loss2d_forward
aten.ormqr.default: {f32, f64}, # aten::ormqr
aten.ormqr.out: {f32, f64}, # aten::ormqr.out
aten.rrelu_with_noise.default: {f16}, # aten::rrelu_with_noise
aten.tensordot.out: {f16}, # aten::tensordot.out
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.cummax.default: {f16},
aten.cummin.default: {f16},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
class MetaCrossRefDispatchMode(torch.utils._python_dispatch.TorchDispatchMode):
test_case: TestCase
device: torch.device
dtype: torch.dtype
def __init__(self, test_case, *, device, dtype):
self.test_case = test_case
# save TLS
self.precision = test_case.precision
self.rel_tol = test_case.rel_tol
self.device_type = torch.device(device).type
self.dtype = dtype
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
self.test_case.precision = self.precision
self.test_case.rel_tol = self.rel_tol
if self.dtype in meta_dispatch_skips.get(func, set()):
test_expect = TestExpect.SKIP
elif self.dtype in meta_dispatch_device_skips[self.device_type].get(func, set()):
test_expect = TestExpect.SKIP
elif self.dtype in meta_dispatch_expected_failures.get(func, set()):
test_expect = TestExpect.XFAILURE
elif self.dtype in meta_dispatch_device_expected_failures[self.device_type].get(func, set()):
test_expect = TestExpect.XFAILURE
else:
test_expect = TestExpect.SUCCESS
return run_meta_crossref(
self.test_case,
test_expect,
func,
args,
kwargs,
dtype=self.dtype,
device_type=self.device_type,
)
# NB: we're running these tests only on CUDA because there are some
# inconsistencies between CUDA and CPU, and running on CUDA makes it easier
# to ignore the CPU case when inconsistencies arise. Ideally we deal
# with the inconsistencies but this takes time.
@skipIfSlowGradcheckEnv
class TestMeta(TestCase):
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@onlyCUDA
@skipIfCrossRef
@suppress_warnings
@ops(op_db)
def test_meta(self, device, dtype, op):
# run the OpInfo sample inputs, cross-referencing them with the
# meta implementation and check the results are the same. All
# the heavy lifting happens in MetaCrossRefFunctionMode
func = op.get_op()
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in samples:
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
with MetaCrossRefFunctionMode(self, dtype=dtype, device=device):
expected = func(*args, **kwargs)
if isinstance(expected, torch.Tensor) and op.supports_out:
func(*args, **kwargs, out=expected)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@onlyCUDA
@skipIfCrossRef
@suppress_warnings
@ops(op_db)
def test_dispatch_meta(self, device, dtype, op):
func = op.get_op()
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in samples:
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
with MetaCrossRefDispatchMode.push(self, dtype=dtype, device=device):
expected = func(*args, **kwargs)
if isinstance(expected, torch.Tensor) and op.supports_out:
func(*args, **kwargs, out=expected)
def test_empty_quantized(self):
r = torch.empty(2 ** 52, device='meta', dtype=torch.qint8)
self.assertEqual(r.device.type, 'meta')
def test_map_location_deserialize(self):
import io
t = torch.rand(10)
b = io.BytesIO()
torch.save(t, b)
b.seek(0)
r = torch.load(b, map_location=torch.device("meta"))
self.assertEqual(r.device.type, 'meta')
self.assertEqual(r.shape, t.shape)
self.assertEqual(r.dtype, t.dtype)
self.assertEqual(r.storage().data_ptr(), 0)
instantiate_device_type_tests(TestMeta, globals())
def print_op_str_if_not_supported(op_str):
op = OperatorName.parse(op_str)
packet = getattr(torch.ops.aten, str(op.name))
overload = getattr(packet, op.overload_name if op.overload_name else "default")
if any(overload in d for d in [meta_dispatch_skips, meta_dispatch_device_skips['cuda']]):
print(f"{overload} # SKIP")
if any(overload in d for d in [meta_dispatch_expected_failures, meta_dispatch_device_expected_failures['cuda']]):
print(overload)
if __name__ == "__main__":
COMPARE_XLA = os.getenv('PYTORCH_COMPARE_XLA', None)
if COMPARE_XLA is not None:
with open(COMPARE_XLA, "r") as f:
d = yaml.load(f, Loader=YamlLoader)
ops = d.get("full_codegen", []) + d.get("supported", []) + d.get("autograd", [])
for op_str in ops:
print_op_str_if_not_supported(op_str)
sys.exit(0)
COMPARE_TEXT = os.getenv('PYTORCH_COMPARE_TEXT', None)
if COMPARE_TEXT is not None:
with open(COMPARE_TEXT, "r") as f:
for op_str in f:
print_op_str_if_not_supported(op_str.strip())
sys.exit(0)
run_tests()
| pytorch-master | test/test_meta.py |
# Owner(s): ["module: primTorch"]
from functools import partial
from itertools import product
from warnings import catch_warnings
import unittest
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import parametrize, run_tests, TestCase, TEST_SCIPY
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCUDA,
skipCUDAIfRocm,
dtypes,
)
from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input
import torch._prims as prims
from torch._prims.executor import make_traced
import torch._refs as refs
if TEST_SCIPY:
import scipy.special
class TestPrims(TestCase):
@onlyCUDA
@skipCUDAIfRocm
@dtypes(torch.float32)
def test_broadcast_in_dim(self, device, dtype):
# nvfuser is not currently capable of realizing a broadcasted tensor
# when the broadcast is the only operation. Another op is needed.
def _wrapper(a, b, broadcast_dimensions):
a_bc = prims.broadcast_in_dim(a, b.shape, broadcast_dimensions)
return prims.add(a_bc, b)
traced = make_traced(_wrapper)
make_arg = partial(make_tensor, device=device, dtype=dtype)
for executor in ('aten', 'strictly_nvfuser'):
fn = partial(traced, executor=executor)
# Same shape
shape = (5, 5)
a = make_arg(shape)
b = make_arg(shape, low=0.0, high=0.0)
result = fn(a, b, (0, 1))
self.assertEqual(result.shape, a.shape)
self.assertTrue(result.is_contiguous)
self.assertEqual(a, result)
# Error input: reordering dims
with self.assertRaises(Exception):
result = fn(a, b, (1, 0))
# Adding outermost dimensions
a = make_arg((5, 5))
b = make_arg((3, 3, 5, 5), low=0.0, high=0.0)
result = fn(a, b, (2, 3))
self.assertEqual(result.shape, b.shape)
self.assertEqual(a.broadcast_to(b.shape), result)
# Expands
a = make_arg((1, 5, 1))
b = make_arg((3, 5, 7), low=0.0, high=0.0)
result = fn(a, b, (0, 1, 2))
self.assertEqual(result.shape, b.shape)
self.assertEqual(a.expand_as(result), result)
# Unsqueezes
a = make_arg((1, 2, 3))
b = make_arg((1, 2, 1, 3), low=0.0, high=0.0)
result = fn(a, b, (0, 1, 3))
self.assertEqual(result.shape, b.shape)
self.assertEqual(a.unsqueeze(2), result)
# FIXME: This test exposes an issue in nvfuser
# Adds outermost, expands, and unsqueezes
"""
a = make_arg((1, 2, 3))
b = make_arg((4, 1, 7, 2, 3, 3), low=0.0, high=0.0)
result = fn(a, b, (1, 3, 4))
self.assertEqual(result.shape, b.shape)
a.unsqueeze_(3)
a.unsqueeze_(1)
a.unsqueeze_(0)
self.assertEqual(a.expand_as(result), result)
"""
@onlyCUDA
@skipCUDAIfRocm
@dtypes(torch.float32)
def test_broadcast_in_dim_sum(self, device, dtype):
def _wrapper(a):
a_sum = prims.sum(a, [0, 1])
a_bc = prims.broadcast_in_dim(a_sum, [], [])
return a_bc
traced = make_traced(_wrapper)
make_arg = partial(make_tensor, device=device, dtype=dtype)
for executor in ('aten', 'strictly_nvfuser'):
fn = partial(traced, executor=executor)
shape = (5, 5)
a = make_arg(shape)
result = fn(a)
self.assertEqual(result.shape, ())
self.assertTrue(result.is_contiguous)
self.assertEqual(_wrapper(a), result)
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@dtypes(torch.float64, torch.long)
def test_cbrt_prim(self, device, dtype):
make_arg = partial(make_tensor, device=device, dtype=dtype)
batches = [(), (1,), (2,), (0, 1), (1, 1), (2, 2)]
shapes = [(), (0,), (1,), (5,)]
try:
# Sets the default dtype to NumPy's default dtype of double
cur_default = torch.get_default_dtype()
torch.set_default_dtype(torch.double)
# Tested here, as this OP is not currently exposed or tested in ATen
for b, s in product(batches, shapes):
x = make_arg(b + s)
y = prims.cbrt(x)
x_np = x.cpu().numpy()
y_np = scipy.special.cbrt(x_np)
self.assertEqual(y, y_np, exact_device=False)
finally:
torch.set_default_dtype(cur_default)
@onlyCUDA
@skipCUDAIfRocm
def test_nvfuser_impl_is_used(self, device):
# This test is to ensure that when the nvfuser implementation exists it is used
# Assuming one-to-one mapping between prims and nvfuser implementations
# This test is not intended to test the correctness of the nvfuser implementation
from torch._C._nvfuser import FusionDefinition as fd
prim_nvfuser_ops = set(torch._prims.__all__).intersection(dir(fd.ops))
ops_without_nvfuser_impl = {
name
for name in prim_nvfuser_ops
if getattr(torch.ops.nvprims, name, None) is None
}
assert (
len(ops_without_nvfuser_impl) == 0
), (f"The following prims do not have 'impl_nvfuser' defined: {ops_without_nvfuser_impl} ",
"while there exists nvfuser implementations for them.")
@onlyCUDA
@skipCUDAIfRocm
def test_nvfuser_executor_cached_noncontiguous(self, device):
# This test is to ensure that nvfuser computes correct results for noncontiguous tensors
from torch.fx.experimental.proxy_tensor import make_fx
from torch._prims.context import TorchRefsMode
from torch._prims.executor import execute
a = torch.randn(3, 3, device=device)
def func(a):
return torch.sigmoid(a)
with TorchRefsMode():
gm = make_fx(func)(a)
# First run to create the cache
execute(gm, a, executor="nvfuser")
# a.mT is noncontiguous, but it shouldn't affect correctness
expected = execute(gm, a.mT, executor="aten")
actual = execute(gm, a.mT, executor="nvfuser")
self.assertEqual(expected, actual)
def test_nvfuser_capability_context(self, device):
# This test is to ensure that the torch calls are replaced with refs
# based on the nvfuser+prims capability
from torch.fx.experimental.proxy_tensor import make_fx
from torch._prims.context import TorchRefsNvfuserCapabilityMode
# It's assumed that digamma is not supported by nvfuser
# If it's ever supported, this test will need to be updated
self.assertTrue(getattr(torch.ops.nvprims, "digamma", None) is None)
a = torch.randn(3, 3, device=device)
def func(a):
return torch.digamma(a)
with TorchRefsNvfuserCapabilityMode():
gm = make_fx(func)(a)
# Check that the torch.digamma is not replaced with torch.ops.prims.digamma
call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes))
includes_aten_digamma = any(
torch.ops.aten.digamma.default == node.target
for node in call_function_nodes
)
includes_prims_digamma = any(
torch.ops.prims.digamma.default == node.target
for node in call_function_nodes
)
self.assertTrue(includes_aten_digamma)
self.assertFalse(includes_prims_digamma)
# Check mixed case, sigmoid is replaced with refs, but digamma is not
def func(a):
return torch.sigmoid(torch.digamma(a))
with TorchRefsNvfuserCapabilityMode():
gm = make_fx(func)(a)
call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes))
includes_aten_sigmoid = any(
torch.ops.aten.sigmoid.default == node.target
for node in call_function_nodes
)
includes_prims_digamma = any(
torch.ops.prims.digamma.default == node.target
for node in call_function_nodes
)
includes_nvprims_exp = any(
torch.ops.nvprims.exp.default == node.target
for node in call_function_nodes
)
self.assertFalse(includes_aten_sigmoid)
self.assertFalse(includes_prims_digamma)
self.assertTrue(includes_nvprims_exp)
@onlyCUDA
@skipCUDAIfRocm
def test_nvfuser_executor_partitioned(self, device):
# This test is to ensure that nvfuser partitioned executor works correctly
# It's assumed that digamma is not supported by nvfuser
# If it's ever supported, this test will need to be updated
self.assertTrue(getattr(torch.ops.nvprims, "digamma", None) is None)
from torch.fx.experimental.proxy_tensor import make_fx
from torch._prims.context import TorchRefsMode
from torch._prims.executor import execute
a = torch.randn(3, 4, device=device)
b = torch.rand(3, 1, device=device)
c = torch.rand(3, 4, device=device)
def func(a, b, c):
aa = torch.digamma(a) # not supported by nvfuser
d = torch.add(b, c)
dd = torch.sqrt(d)
return torch.mul(aa, dd.digamma())
with TorchRefsMode():
gm = make_fx(func)(a, b, c)
expected = execute(gm, a, b, c, executor="aten")
actual = execute(gm, a, b, c, executor="nvfuser")
self.assertEqual(expected, actual)
@onlyCUDA
@skipCUDAIfRocm
def test_nvfuser_executor_partitioned_no_partitions_error(self, device):
# This test is to ensure that nvfuser partitioned executor works correctly
# It's assumed that digamma is not supported by nvfuser
# If it's ever supported, this test will need to be updated
self.assertTrue(getattr(torch.ops.nvprims, "digamma", None) is None)
from torch.fx.experimental.proxy_tensor import make_fx
from torch._prims.context import TorchRefsMode
from torch._prims.executor import execute
a = torch.randn(3, 4, device=device)
def func(a):
return torch.digamma(a) # not supported by nvfuser
with TorchRefsMode():
gm = make_fx(func)(a)
with catch_warnings(record=True) as w:
# Trigger warning
execute(gm, a, executor="nvfuser")
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("is not supported by nvFuser" in str(w[-1].message))
def test_nvprims(self, device):
# This test is to ensure that nvfuser specific prims are exposed
# and can be traced with make_fx
from torch.fx.experimental.proxy_tensor import make_fx
def func(a):
return torch.ops.nvprims.add(a, a)
a = torch.randn(3, 4, device=device)
gm = make_fx(func)(a)
for node in gm.graph.nodes:
if node.op == "call_function":
self.assertTrue(node.name == "add_default")
self.assertTrue(node.target == torch.ops.nvprims.add.default)
self.assertFalse(node.target == torch.ops.prims.add.default)
self.assertFalse(node.target == torch.ops.aten.add.default)
@onlyCUDA
@skipCUDAIfRocm
@dtypes(torch.float32)
@parametrize("correction", [0, 1])
def test_var(self, device, dtype, correction):
def _wrapper(a):
return prims.var(a, [0, 1], correction=correction)
traced = make_traced(_wrapper)
make_arg = partial(make_tensor, device=device, dtype=dtype)
for executor in ('aten', 'strictly_nvfuser'):
fn = partial(traced, executor=executor)
shape = (5, 5)
a = make_arg(shape)
result = fn(a)
self.assertEqual(result.shape, ())
self.assertTrue(result.is_contiguous)
self.assertEqual(_wrapper(a), result)
@onlyCUDA
@skipCUDAIfRocm
@dtypes(torch.float32)
def test_pytree_input_output(self, device, dtype):
@make_traced
def fn(a, b_dict):
b = b_dict["b"]
d = {}
d["c"] = torch.add(a, b)
return (d, torch.add(a, d["c"]))
make_arg = partial(make_tensor, device=device, dtype=dtype)
a = make_arg((5, 5))
b = make_arg((1, 5))
b_dict = {"b": b}
result_aten = fn(a, b_dict, executor="aten")
result_nvfuser = fn(a, b_dict, executor="strictly_nvfuser")
self.assertEqual(result_aten, result_nvfuser)
@dtypes(torch.float32)
def test_memory_format_strides(self, device, dtype):
shapes = (
(),
(0,),
(1,),
(5),
(1, 0),
(1, 1),
(3, 7),
(3, 0, 2),
(1, 1, 2),
(4, 1, 1),
(7, 8, 9),
)
channels_last_shapes = (
(0, 0, 0, 0),
(1, 0, 3, 0),
(0, 2, 3, 5),
(2, 2, 2, 0),
(5, 4, 3, 2),
(8, 8, 7, 2),
(9, 1, 3, 1),
(4, 5, 8, 7)
)
channels_last_3d_shapes = (
(0, 8, 7, 9, 2),
(5, 0, 7, 9, 2),
(5, 0, 7, 9, 0),
(5, 8, 7, 9, 2),
(5, 1, 7, 9, 2),
(5, 1, 7, 9, 1),
)
pairs = (
(shapes, torch.contiguous_format),
(channels_last_shapes, torch.contiguous_format),
(channels_last_3d_shapes, torch.contiguous_format),
(channels_last_shapes, torch.channels_last),
(channels_last_3d_shapes, torch.channels_last_3d),
)
for shapes, memory_format in pairs:
for shape in shapes:
# tests empty
expected = torch.empty(shape, device=device, dtype=dtype, memory_format=memory_format)
actual = refs.empty(shape, device=device, dtype=dtype, memory_format=memory_format)
self.assertEqual(expected.stride(), actual.stride())
# tests clone
a = torch.testing.make_tensor(shape, device=device, dtype=dtype)
expected = torch.clone(a, memory_format=memory_format)
actual = torch.clone(a, memory_format=memory_format)
self.assertEqual(expected.stride(), actual.stride())
# tests contiguous
a = torch.testing.make_tensor(shape, device=device, dtype=dtype, noncontiguous=True)
expected = a.contiguous(memory_format=memory_format)
actual = refs.contiguous(a, memory_format=memory_format)
self.assertEqual(expected.stride(), actual.stride())
@dtypes(torch.float32)
def test_reshape_view_method(self, device, dtype):
make_arg = partial(make_tensor, device=device, dtype=dtype)
a = make_arg((5, 5))
new_shape = 1, 5, 1, 5
result_eager = a.reshape(*new_shape)
result_refs = refs.reshape(a, *new_shape)
self.assertEqual(result_eager, result_refs)
result_eager = a.view(*new_shape)
result_refs = refs.view(a, *new_shape)
self.assertEqual(result_eager, result_refs)
class TestPrimsBasic(TestCase):
def test_torch_ops(self):
r = make_tensor((2,), device='cpu', dtype=torch.float)
self.assertEqual(torch.ops.prims.sin(r), torch.sin(r))
r = LoggingTensor(r)
with capture_logs() as logs:
log_input("input", r)
prims.sin(r)
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.prims.sin.default($0)""")
def test_mul_complex(self):
prims.mul(torch.randn(2), 1 + 1j)
instantiate_device_type_tests(TestPrims, globals())
class TestRefs(TestCase):
@dtypes(torch.float32)
def test_constant_pad_nd_memory_format(self, device, dtype):
# Test memory format is preserved in unambiguous cases
for mf, ndim in (
(torch.channels_last, 4),
(torch.contiguous_format, 4),
(torch.channels_last_3d, 5),
(torch.contiguous_format, 5),
):
a = torch.zeros([2] * ndim).to(memory_format=mf)
res = refs.constant_pad_nd(a, pad=[1] * (2 * ndim))
self.assertTrue(res.is_contiguous(memory_format=mf))
# Ambiguous cases
# is_channels_last_ and is_contiguous_, results in channels_last output
a = torch.empty_strided((2, 1, 2, 2), stride=(4, 1, 2, 1))
self.assertTrue(a.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(a.is_contiguous())
actual = refs.constant_pad_nd(a, pad=[1] * 8)
expect = torch.constant_pad_nd(a, pad=[1] * 8)
self.assertEqual(actual.stride(), expect.stride())
self.assertTrue(actual.is_contiguous(memory_format=torch.channels_last))
# is_channels_last_contiguous_ but not is_channels_last_, results in
# contiguous output
a = torch.empty_strided((2, 1, 2, 2), stride=(4, 4, 2, 1))
self.assertTrue(a.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(a.is_contiguous())
actual = refs.constant_pad_nd(a, pad=[1] * 8)
expect = torch.constant_pad_nd(a, pad=[1] * 8)
self.assertEqual(actual.stride(), expect.stride())
self.assertTrue(actual.is_contiguous())
instantiate_device_type_tests(TestRefs, globals())
if __name__ == "__main__":
run_tests()
| pytorch-master | test/test_prims.py |
# Owner(s): ["module: functionalization"]
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.fx.passes.reinplace import reinplace
from torch.fx.experimental.proxy_tensor import make_fx
try:
from functorch.experimental import functionalize
HAS_FUNCTIONALIZATION = True
except Exception as e:
HAS_FUNCTIONALIZATION = False
class TestReinplacePass(TestCase):
def test_reinplace_basic(self):
# Basic test: the out-of-place add() call should be converted
# into add_()
def f(x):
a = x.clone()
b = a.add(1)
return b
inpt = torch.ones(2)
f2 = reinplace(make_fx(f)(inpt), inpt)
expected_out = f(inpt)
actual_out = f2(inpt)
self.assertEqual(actual_out, expected_out)
self.assertExpectedInline(f2.code, """\
def forward(self, x_1):
clone_default = torch.ops.aten.clone.default(x_1); x_1 = None
add_tensor = torch.ops.aten.add_.Tensor(clone_default, 1)
return clone_default
""")
def test_reinplace_with_view(self):
def f(x):
a = x.clone()
a_view = a.view(-1)
# We shouldn't re-inplace the first add(), because an alias of a is re-used later in the program
b = a.add(1)
# Second add() is fine to re-inplace
c = a_view.add(1)
return c
inpt = torch.ones(2)
f2 = reinplace(make_fx(f)(inpt), inpt)
expected_out = f(inpt)
actual_out = f2(inpt)
self.assertEqual(actual_out, expected_out)
self.assertExpectedInline(f2.code, """\
def forward(self, x_1):
clone_default = torch.ops.aten.clone.default(x_1); x_1 = None
view_default = torch.ops.aten.view.default(clone_default, [-1])
add_tensor = torch.ops.aten.add.Tensor(clone_default, 1); clone_default = None
add_tensor_1 = torch.ops.aten.add_.Tensor(view_default, 1)
return view_default
""")
# This test won't actually run in CI, because it requires functionalize() from functorch.
# I'm planning on testing more comprehensively with torchbench models,
# but we can make this testing better once functorch moves into pytorch/pytorch.
def test_reinplace_scatter_op(self):
def f(a_):
# for now, don't test mutations to inputs
a = a_.clone()
e = a.view(-1)
b = a.view(-1)
c = b[0]
d = c.view(-1)
d.add_(1)
return a + e
if not HAS_FUNCTIONALIZATION:
return
inpt = torch.ones(4)
f2 = reinplace(make_fx(functionalize(f))(inpt), inpt)
expected_out = f(inpt)
actual_out = f2(inpt)
self.assertEqual(actual_out, expected_out)
# NOTE: one slight pessimization here is the fact that
# there are a bunch of redundant views in the graph.
# Technically, half of these views are duplicates that we could de-dup.
# This shouldn't really hurt performance though, since creating an extra view
# is effectively just moving some metadata around (and allocating a new TensorImpl).
# We can/should update the pass in the future to clean this up.
self.assertExpectedInline(f2.code, """\
def forward(self, a__1):
clone_default = torch.ops.aten.clone.default(a__1); a__1 = None
view_default = torch.ops.aten.view.default(clone_default, [-1])
view_default_1 = torch.ops.aten.view.default(clone_default, [-1])
select_int = torch.ops.aten.select.int(view_default_1, 0, 0); view_default_1 = None
view_default_2 = torch.ops.aten.view.default(select_int, [-1]); select_int = None
add_tensor = torch.ops.aten.add_.Tensor(view_default_2, 1)
view_default_3 = torch.ops.aten.view.default(clone_default, [-1]); clone_default = None
select_int_1 = torch.ops.aten.select.int(view_default_3, 0, 0)
view_default_4 = torch.ops.aten.view.default(view_default_2, []); view_default_2 = None
view_default_5 = torch.ops.aten.view.default(view_default_3, [4]); view_default_3 = None
view_default_6 = torch.ops.aten.view.default(view_default_5, [-1])
add_tensor_1 = torch.ops.aten.add_.Tensor(view_default_5, view_default_6); view_default_6 = None
return view_default_5
""")
def test_reinplace_scatter_twice(self):
def f(a_):
# for now, don't test mutations to inputs
a = a_.clone()
b = a[:, 1]
c = b[1]
c.add_(1)
return a
if not HAS_FUNCTIONALIZATION:
return
inpt = torch.ones(4, 4)
f2 = reinplace(make_fx(functionalize(f))(inpt), inpt)
expected_out = f(inpt)
actual_out = f2(inpt)
self.assertEqual(actual_out, expected_out)
self.assertExpectedInline(f2.code, """\
def forward(self, a__1):
clone_default = torch.ops.aten.clone.default(a__1); a__1 = None
slice_tensor = torch.ops.aten.slice.Tensor(clone_default, 0, 0, 9223372036854775807)
select_int = torch.ops.aten.select.int(slice_tensor, 1, 1); slice_tensor = None
select_int_1 = torch.ops.aten.select.int(select_int, 0, 1); select_int = None
add_tensor = torch.ops.aten.add_.Tensor(select_int_1, 1); select_int_1 = None
slice_tensor_1 = torch.ops.aten.slice.Tensor(clone_default, 0, 0, 9223372036854775807)
select_int_2 = torch.ops.aten.select.int(slice_tensor_1, 1, 1); slice_tensor_1 = None
return clone_default
""")
def test_reinplace_scatter_twice_with_different_view_op_valid(self):
def f(a_):
a = a_.clone()
b = a[:, 1]
c = b[1]
c_updated = c.add(1)
good_mirror_of_b = a.as_strided((4,), (4,), 1)
# good_mirror_of_b points to the same region of memory as b.
# and this scatter op below tries to scatter c_updated into the same region
# that c currently takes up.
# reinplacing logic checks this by confirming that:
# c_updated
# good_mirror_of_b.select(0, 1)
# have the same size/stride/storage_offset.
b_updated = torch.select_scatter(good_mirror_of_b, c_updated, 0, 1)
return b_updated
inpt = torch.ones(4, 4)
f2 = reinplace(make_fx(f)(inpt), inpt)
expected_out = f(inpt)
actual_out = f2(inpt)
self.assertEqual(actual_out, expected_out)
self.assertExpectedInline(f2.code, """\
def forward(self, a__1):
clone_default = torch.ops.aten.clone.default(a__1); a__1 = None
slice_tensor = torch.ops.aten.slice.Tensor(clone_default, 0, 0, 9223372036854775807)
select_int = torch.ops.aten.select.int(slice_tensor, 1, 1); slice_tensor = None
select_int_1 = torch.ops.aten.select.int(select_int, 0, 1); select_int = None
add_tensor = torch.ops.aten.add_.Tensor(select_int_1, 1); select_int_1 = None
as_strided_default = torch.ops.aten.as_strided.default(clone_default, [4], [4], 1); clone_default = None
return as_strided_default
""")
# Test example where we have a scatter op, where the base tensor
# has the same size/stride/storage offset (even though it is a different view),
# making it valid to re-inplace
def test_reinplace_scatter_twice_with_different_view_op_invalid(self):
def f(a_):
a = a_.clone()
b = a[:, 1]
c = b[1]
c_updated = c.add(1)
good_mirror_of_b = a.as_strided((4,), (4,), 1)
# The first arg to select_scatter is an equivalent view to b.
# However, the select_scatter call below tries to put c_updated
# into a different slice of "b" than what "c" currently occupies.
#
b_updated = torch.select_scatter(good_mirror_of_b, c_updated, 0, 0)
return b_updated
inpt = torch.ones(4, 4)
f2 = reinplace(make_fx(f)(inpt), inpt)
expected_out = f(inpt)
actual_out = f2(inpt)
self.assertEqual(actual_out, expected_out)
self.assertExpectedInline(f2.code, """\
def forward(self, a__1):
clone_default = torch.ops.aten.clone.default(a__1); a__1 = None
slice_tensor = torch.ops.aten.slice.Tensor(clone_default, 0, 0, 9223372036854775807)
select_int = torch.ops.aten.select.int(slice_tensor, 1, 1); slice_tensor = None
select_int_1 = torch.ops.aten.select.int(select_int, 0, 1); select_int = None
add_tensor = torch.ops.aten.add.Tensor(select_int_1, 1); select_int_1 = None
as_strided_default = torch.ops.aten.as_strided.default(clone_default, [4], [4], 1); clone_default = None
select_scatter_default = torch.ops.aten.select_scatter.default(as_strided_default, add_tensor, 0, 0); as_strided_default = add_tensor = None
return select_scatter_default
""") # noqa: B950
def test_reinplace_scatter_twice_with_different_view_op_invalid2(self):
def f(a_):
a = a_.clone()
b = a[:, 1]
c = b[1]
c_updated = c.add(1)
bad_mirror_of_b = a.as_strided((4,), (4,), 0)
# The first arg to select_scatter points to a different than c's base.
# This makes it invalid to re-inplace.
b_updated = torch.select_scatter(bad_mirror_of_b, c_updated, 0, 1)
return b_updated
inpt = torch.ones(4, 4)
f2 = reinplace(make_fx(f)(inpt), inpt)
expected_out = f(inpt)
actual_out = f2(inpt)
# self.assertEqual(actual_out, expected_out)
self.assertExpectedInline(f2.code, """\
def forward(self, a__1):
clone_default = torch.ops.aten.clone.default(a__1); a__1 = None
slice_tensor = torch.ops.aten.slice.Tensor(clone_default, 0, 0, 9223372036854775807)
select_int = torch.ops.aten.select.int(slice_tensor, 1, 1); slice_tensor = None
select_int_1 = torch.ops.aten.select.int(select_int, 0, 1); select_int = None
add_tensor = torch.ops.aten.add.Tensor(select_int_1, 1); select_int_1 = None
as_strided_default = torch.ops.aten.as_strided.default(clone_default, [4], [4], 0); clone_default = None
select_scatter_default = torch.ops.aten.select_scatter.default(as_strided_default, add_tensor, 0, 1); as_strided_default = add_tensor = None
return select_scatter_default
""") # noqa: B950
if __name__ == '__main__':
run_tests()
| pytorch-master | test/test_fx_reinplace_pass.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.