code stringlengths 17 6.64M |
|---|
class TestFileClient():
@classmethod
def setup_class(cls):
cls.test_data_dir = (Path(__file__).parent / 'data')
cls.img_path = (cls.test_data_dir / 'color.jpg')
cls.img_shape = (300, 400, 3)
cls.text_path = (cls.test_data_dir / 'filelist.txt')
def test_error(self):
with pytest.raises(ValueError):
FileClient('hadoop')
def test_disk_backend(self):
disk_backend = FileClient('disk')
assert (disk_backend.name == 'HardDiskBackend')
assert disk_backend.allow_symlink
img_bytes = disk_backend.get(self.img_path)
img = mmcv.imfrombytes(img_bytes)
assert (self.img_path.open('rb').read() == img_bytes)
assert (img.shape == self.img_shape)
img_bytes = disk_backend.get(str(self.img_path))
img = mmcv.imfrombytes(img_bytes)
assert (self.img_path.open('rb').read() == img_bytes)
assert (img.shape == self.img_shape)
value_buf = disk_backend.get_text(self.text_path)
assert (self.text_path.open('r').read() == value_buf)
value_buf = disk_backend.get_text(str(self.text_path))
assert (self.text_path.open('r').read() == value_buf)
with tempfile.TemporaryDirectory() as tmp_dir:
filepath1 = (Path(tmp_dir) / 'test.jpg')
disk_backend.put(b'disk', filepath1)
assert (filepath1.open('rb').read() == b'disk')
_filepath1 = ((Path(tmp_dir) / 'not_existed_dir1') / 'test.jpg')
disk_backend.put(b'disk', _filepath1)
assert (_filepath1.open('rb').read() == b'disk')
filepath2 = (Path(tmp_dir) / 'test.txt')
disk_backend.put_text('disk', filepath2)
assert (filepath2.open('r').read() == 'disk')
_filepath2 = ((Path(tmp_dir) / 'not_existed_dir2') / 'test.txt')
disk_backend.put_text('disk', _filepath2)
assert (_filepath2.open('r').read() == 'disk')
assert disk_backend.isfile(filepath2)
assert (not disk_backend.isfile((Path(tmp_dir) / 'not/existed/path')))
disk_backend.remove(filepath2)
assert (not disk_backend.exists(filepath2))
with disk_backend.get_local_path(filepath1) as path:
assert (str(filepath1) == path)
assert osp.isfile(filepath1)
disk_dir = '/path/of/your/directory'
assert (disk_backend.join_path(disk_dir, 'file') == osp.join(disk_dir, 'file'))
assert (disk_backend.join_path(disk_dir, 'dir', 'file') == osp.join(disk_dir, 'dir', 'file'))
with build_temporary_directory() as tmp_dir:
assert (set(disk_backend.list_dir_or_file(tmp_dir)) == set(['dir1', 'dir2', 'text1.txt', 'text2.txt']))
assert (set(disk_backend.list_dir_or_file(tmp_dir, recursive=True)) == set(['dir1', osp.join('dir1', 'text3.txt'), 'dir2', osp.join('dir2', 'dir3'), osp.join('dir2', 'dir3', 'text4.txt'), osp.join('dir2', 'img.jpg'), 'text1.txt', 'text2.txt']))
assert (set(disk_backend.list_dir_or_file(tmp_dir, list_file=False)) == set(['dir1', 'dir2']))
with pytest.raises(TypeError, match='`suffix` should be None when `list_dir` is True'):
disk_backend.client.list_dir_or_file(tmp_dir, list_file=False, suffix='.txt')
assert (set(disk_backend.list_dir_or_file(tmp_dir, list_file=False, recursive=True)) == set(['dir1', 'dir2', osp.join('dir2', 'dir3')]))
assert (set(disk_backend.list_dir_or_file(tmp_dir, list_dir=False)) == set(['text1.txt', 'text2.txt']))
assert (set(disk_backend.list_dir_or_file(tmp_dir, list_dir=False, recursive=True)) == set([osp.join('dir1', 'text3.txt'), osp.join('dir2', 'dir3', 'text4.txt'), osp.join('dir2', 'img.jpg'), 'text1.txt', 'text2.txt']))
assert (set(disk_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix='.txt')) == set(['text1.txt', 'text2.txt']))
assert (set(disk_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix=('.txt', '.jpg'))) == set(['text1.txt', 'text2.txt']))
with pytest.raises(TypeError, match='`suffix` must be a string or tuple of strings'):
disk_backend.client.list_dir_or_file(tmp_dir, list_dir=False, suffix=['.txt', '.jpg'])
assert (set(disk_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix='.txt', recursive=True)) == set([osp.join('dir1', 'text3.txt'), osp.join('dir2', 'dir3', 'text4.txt'), 'text1.txt', 'text2.txt']))
assert (set(disk_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix=('.txt', '.jpg'), recursive=True)) == set([osp.join('dir1', 'text3.txt'), osp.join('dir2', 'dir3', 'text4.txt'), osp.join('dir2', 'img.jpg'), 'text1.txt', 'text2.txt']))
@patch('ceph.S3Client', MockS3Client)
def test_ceph_backend(self):
ceph_backend = FileClient('ceph')
assert (not ceph_backend.allow_symlink)
with pytest.raises(NotImplementedError):
ceph_backend.get_text(self.text_path)
with pytest.raises(NotImplementedError):
ceph_backend.get_text(str(self.text_path))
img_bytes = ceph_backend.get(self.img_path)
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == self.img_shape)
img_bytes = ceph_backend.get(str(self.img_path))
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == self.img_shape)
with pytest.raises(AssertionError):
FileClient('ceph', path_mapping=1)
ceph_path = 's3://user/data'
ceph_backend = FileClient('ceph', path_mapping={str(self.test_data_dir): ceph_path})
ceph_backend.client._client.Get = MagicMock(return_value=ceph_backend.client._client.Get(self.img_path))
img_bytes = ceph_backend.get(self.img_path)
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == self.img_shape)
ceph_backend.client._client.Get.assert_called_with(str(self.img_path).replace(str(self.test_data_dir), ceph_path))
@patch('petrel_client.client.Client', MockPetrelClient)
@pytest.mark.parametrize('backend,prefix', [('petrel', None), (None, 's3')])
def test_petrel_backend(self, backend, prefix):
petrel_backend = FileClient(backend=backend, prefix=prefix)
assert (not petrel_backend.allow_symlink)
img_bytes = petrel_backend.get(self.img_path)
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == self.img_shape)
img_bytes = petrel_backend.get(str(self.img_path))
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == self.img_shape)
with pytest.raises(AssertionError):
FileClient('petrel', path_mapping=1)
petrel_dir = 's3://user/data'
petrel_backend = FileClient('petrel', path_mapping={str(self.test_data_dir): petrel_dir})
assert (petrel_backend.client._map_path(str(self.img_path)) == str(self.img_path).replace(str(self.test_data_dir), petrel_dir))
petrel_path = f'{petrel_dir}/test.jpg'
petrel_backend = FileClient('petrel')
assert (petrel_backend.client._format_path('s3://user\\data\\test.jpg') == petrel_path)
with patch.object(petrel_backend.client._client, 'Get', return_value=b'petrel') as mock_get:
assert (petrel_backend.get(petrel_path) == b'petrel')
mock_get.assert_called_once_with(petrel_path)
with patch.object(petrel_backend.client._client, 'Get', return_value=b'petrel') as mock_get:
assert (petrel_backend.get_text(petrel_path) == 'petrel')
mock_get.assert_called_once_with(petrel_path)
with patch.object(petrel_backend.client._client, 'put') as mock_put:
petrel_backend.put(b'petrel', petrel_path)
mock_put.assert_called_once_with(petrel_path, b'petrel')
with patch.object(petrel_backend.client._client, 'put') as mock_put:
petrel_backend.put_text('petrel', petrel_path)
mock_put.assert_called_once_with(petrel_path, b'petrel')
assert has_method(petrel_backend.client._client, 'delete')
with delete_and_reset_method(petrel_backend.client._client, 'delete'):
assert (not has_method(petrel_backend.client._client, 'delete'))
with pytest.raises(NotImplementedError):
petrel_backend.remove(petrel_path)
with patch.object(petrel_backend.client._client, 'delete') as mock_delete:
petrel_backend.remove(petrel_path)
mock_delete.assert_called_once_with(petrel_path)
assert has_method(petrel_backend.client._client, 'contains')
assert has_method(petrel_backend.client._client, 'isdir')
with delete_and_reset_method(petrel_backend.client._client, 'contains'), delete_and_reset_method(petrel_backend.client._client, 'isdir'):
assert (not has_method(petrel_backend.client._client, 'contains'))
assert (not has_method(petrel_backend.client._client, 'isdir'))
with pytest.raises(NotImplementedError):
petrel_backend.exists(petrel_path)
with patch.object(petrel_backend.client._client, 'contains', return_value=True) as mock_contains:
assert petrel_backend.exists(petrel_path)
mock_contains.assert_called_once_with(petrel_path)
assert has_method(petrel_backend.client._client, 'isdir')
with delete_and_reset_method(petrel_backend.client._client, 'isdir'):
assert (not has_method(petrel_backend.client._client, 'isdir'))
with pytest.raises(NotImplementedError):
petrel_backend.isdir(petrel_path)
with patch.object(petrel_backend.client._client, 'isdir', return_value=True) as mock_isdir:
assert petrel_backend.isdir(petrel_dir)
mock_isdir.assert_called_once_with(petrel_dir)
assert has_method(petrel_backend.client._client, 'contains')
with delete_and_reset_method(petrel_backend.client._client, 'contains'):
assert (not has_method(petrel_backend.client._client, 'contains'))
with pytest.raises(NotImplementedError):
petrel_backend.isfile(petrel_path)
with patch.object(petrel_backend.client._client, 'contains', return_value=True) as mock_contains:
assert petrel_backend.isfile(petrel_path)
mock_contains.assert_called_once_with(petrel_path)
assert (petrel_backend.join_path(petrel_dir, 'file') == f'{petrel_dir}/file')
assert (petrel_backend.join_path(f'{petrel_dir}/', 'file') == f'{petrel_dir}/file')
assert (petrel_backend.join_path(petrel_dir, 'dir', 'file') == f'{petrel_dir}/dir/file')
with patch.object(petrel_backend.client._client, 'Get', return_value=b'petrel') as mock_get, patch.object(petrel_backend.client._client, 'contains', return_value=True) as mock_contains:
with petrel_backend.get_local_path(petrel_path) as path:
assert (Path(path).open('rb').read() == b'petrel')
assert (not osp.isfile(path))
mock_get.assert_called_once_with(petrel_path)
mock_contains.assert_called_once_with(petrel_path)
assert has_method(petrel_backend.client._client, 'list')
with delete_and_reset_method(petrel_backend.client._client, 'list'):
assert (not has_method(petrel_backend.client._client, 'list'))
with pytest.raises(NotImplementedError):
list(petrel_backend.list_dir_or_file(petrel_dir))
with build_temporary_directory() as tmp_dir:
assert (set(petrel_backend.list_dir_or_file(tmp_dir)) == set(['dir1', 'dir2', 'text1.txt', 'text2.txt']))
assert (set(petrel_backend.list_dir_or_file(tmp_dir, recursive=True)) == set(['dir1', '/'.join(('dir1', 'text3.txt')), 'dir2', '/'.join(('dir2', 'dir3')), '/'.join(('dir2', 'dir3', 'text4.txt')), '/'.join(('dir2', 'img.jpg')), 'text1.txt', 'text2.txt']))
assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_file=False)) == set(['dir1', 'dir2']))
with pytest.raises(TypeError, match='`list_dir` should be False when `suffix` is not None'):
petrel_backend.client.list_dir_or_file(tmp_dir, list_file=False, suffix='.txt')
assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_file=False, recursive=True)) == set(['dir1', 'dir2', '/'.join(('dir2', 'dir3'))]))
assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_dir=False)) == set(['text1.txt', 'text2.txt']))
assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_dir=False, recursive=True)) == set(['/'.join(('dir1', 'text3.txt')), '/'.join(('dir2', 'dir3', 'text4.txt')), '/'.join(('dir2', 'img.jpg')), 'text1.txt', 'text2.txt']))
assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix='.txt')) == set(['text1.txt', 'text2.txt']))
assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix=('.txt', '.jpg'))) == set(['text1.txt', 'text2.txt']))
with pytest.raises(TypeError, match='`suffix` must be a string or tuple of strings'):
petrel_backend.client.list_dir_or_file(tmp_dir, list_dir=False, suffix=['.txt', '.jpg'])
assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix='.txt', recursive=True)) == set(['/'.join(('dir1', 'text3.txt')), '/'.join(('dir2', 'dir3', 'text4.txt')), 'text1.txt', 'text2.txt']))
assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix=('.txt', '.jpg'), recursive=True)) == set(['/'.join(('dir1', 'text3.txt')), '/'.join(('dir2', 'dir3', 'text4.txt')), '/'.join(('dir2', 'img.jpg')), 'text1.txt', 'text2.txt']))
@patch('mc.MemcachedClient.GetInstance', MockMemcachedClient)
@patch('mc.pyvector', MagicMock)
@patch('mc.ConvertBuffer', (lambda x: x.content))
def test_memcached_backend(self):
mc_cfg = dict(server_list_cfg='', client_cfg='', sys_path=None)
mc_backend = FileClient('memcached', **mc_cfg)
assert (not mc_backend.allow_symlink)
with pytest.raises(NotImplementedError):
mc_backend.get_text(self.text_path)
with pytest.raises(NotImplementedError):
mc_backend.get_text(str(self.text_path))
img_bytes = mc_backend.get(self.img_path)
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == self.img_shape)
img_bytes = mc_backend.get(str(self.img_path))
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == self.img_shape)
def test_lmdb_backend(self):
lmdb_path = (self.test_data_dir / 'demo.lmdb')
lmdb_backend = FileClient('lmdb', db_path=lmdb_path)
assert (not lmdb_backend.allow_symlink)
with pytest.raises(NotImplementedError):
lmdb_backend.get_text(self.text_path)
img_bytes = lmdb_backend.get('baboon')
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == (120, 125, 3))
lmdb_backend = FileClient('lmdb', db_path=str(lmdb_path))
with pytest.raises(NotImplementedError):
lmdb_backend.get_text(str(self.text_path))
img_bytes = lmdb_backend.get('baboon')
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == (120, 125, 3))
@pytest.mark.parametrize('backend,prefix', [('http', None), (None, 'http')])
def test_http_backend(self, backend, prefix):
http_backend = FileClient(backend=backend, prefix=prefix)
img_url = 'https://raw.githubusercontent.com/open-mmlab/mmcv/master/tests/data/color.jpg'
text_url = 'https://raw.githubusercontent.com/open-mmlab/mmcv/master/tests/data/filelist.txt'
assert (not http_backend.allow_symlink)
with pytest.raises(Exception):
http_backend.get(self.img_path)
with pytest.raises(Exception):
http_backend.get(str(self.img_path))
with pytest.raises(Exception):
http_backend.get_text(self.text_path)
with pytest.raises(Exception):
http_backend.get_text(str(self.text_path))
img_bytes = http_backend.get(img_url)
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == self.img_shape)
value_buf = http_backend.get_text(text_url)
assert (self.text_path.open('r').read() == value_buf)
with http_backend.get_local_path(img_url) as path:
assert (mmcv.imread(path).shape == self.img_shape)
assert (not osp.isfile(path))
def test_new_magic_method(self):
class DummyBackend1(BaseStorageBackend):
def get(self, filepath):
return filepath
def get_text(self, filepath, encoding='utf-8'):
return filepath
FileClient.register_backend('dummy_backend', DummyBackend1)
client1 = FileClient(backend='dummy_backend')
client2 = FileClient(backend='dummy_backend')
assert (client1 is client2)
class DummyBackend2(BaseStorageBackend):
def get(self, filepath):
pass
def get_text(self, filepath):
pass
FileClient.register_backend('dummy_backend', DummyBackend2, force=True)
client3 = FileClient(backend='dummy_backend')
client4 = FileClient(backend='dummy_backend')
assert (client3 is not client4)
def test_parse_uri_prefix(self):
with pytest.raises(AssertionError):
FileClient.parse_uri_prefix(None)
with pytest.raises(AssertionError):
FileClient.parse_uri_prefix([])
assert (FileClient.parse_uri_prefix(self.img_path) is None)
assert (FileClient.parse_uri_prefix(str(self.img_path)) is None)
img_url = 'https://raw.githubusercontent.com/open-mmlab/mmcv/master/tests/data/color.jpg'
assert (FileClient.parse_uri_prefix(img_url) == 'https')
img_url = 's3://your_bucket/img.png'
assert (FileClient.parse_uri_prefix(img_url) == 's3')
img_url = 'clusterName:s3://your_bucket/img.png'
assert (FileClient.parse_uri_prefix(img_url) == 's3')
def test_infer_client(self):
file_client_args = {'backend': 'disk'}
client = FileClient.infer_client(file_client_args)
assert (client.name == 'HardDiskBackend')
client = FileClient.infer_client(uri=self.img_path)
assert (client.name == 'HardDiskBackend')
file_client_args = {'backend': 'petrel'}
client = FileClient.infer_client(file_client_args)
assert (client.name == 'PetrelBackend')
uri = 's3://user_data'
client = FileClient.infer_client(uri=uri)
assert (client.name == 'PetrelBackend')
def test_register_backend(self):
with pytest.raises(TypeError):
class TestClass1():
pass
FileClient.register_backend(1, TestClass1)
with pytest.raises(TypeError):
FileClient.register_backend('int', 0)
with pytest.raises(TypeError):
class TestClass1():
pass
FileClient.register_backend('TestClass1', TestClass1)
class ExampleBackend(BaseStorageBackend):
def get(self, filepath):
return filepath
def get_text(self, filepath, encoding='utf-8'):
return filepath
FileClient.register_backend('example', ExampleBackend)
example_backend = FileClient('example')
assert (example_backend.get(self.img_path) == self.img_path)
assert (example_backend.get_text(self.text_path) == self.text_path)
assert ('example' in FileClient._backends)
class Example2Backend(BaseStorageBackend):
def get(self, filepath):
return b'bytes2'
def get_text(self, filepath, encoding='utf-8'):
return 'text2'
with pytest.raises(KeyError):
FileClient.register_backend('example', Example2Backend)
FileClient.register_backend('example', Example2Backend, force=True)
example_backend = FileClient('example')
assert (example_backend.get(self.img_path) == b'bytes2')
assert (example_backend.get_text(self.text_path) == 'text2')
@FileClient.register_backend(name='example3')
class Example3Backend(BaseStorageBackend):
def get(self, filepath):
return b'bytes3'
def get_text(self, filepath, encoding='utf-8'):
return 'text3'
example_backend = FileClient('example3')
assert (example_backend.get(self.img_path) == b'bytes3')
assert (example_backend.get_text(self.text_path) == 'text3')
assert ('example3' in FileClient._backends)
with pytest.raises(KeyError):
@FileClient.register_backend(name='example3')
class Example4Backend(BaseStorageBackend):
def get(self, filepath):
return b'bytes4'
def get_text(self, filepath, encoding='utf-8'):
return 'text4'
@FileClient.register_backend(name='example3', force=True)
class Example5Backend(BaseStorageBackend):
def get(self, filepath):
return b'bytes5'
def get_text(self, filepath, encoding='utf-8'):
return 'text5'
example_backend = FileClient('example3')
assert (example_backend.get(self.img_path) == b'bytes5')
assert (example_backend.get_text(self.text_path) == 'text5')
class Example6Backend(BaseStorageBackend):
def get(self, filepath):
return b'bytes6'
def get_text(self, filepath, encoding='utf-8'):
return 'text6'
FileClient.register_backend('example4', Example6Backend, force=True, prefixes='example4_prefix')
example_backend = FileClient('example4')
assert (example_backend.get(self.img_path) == b'bytes6')
assert (example_backend.get_text(self.text_path) == 'text6')
example_backend = FileClient(prefix='example4_prefix')
assert (example_backend.get(self.img_path) == b'bytes6')
assert (example_backend.get_text(self.text_path) == 'text6')
example_backend = FileClient('example4', prefix='example4_prefix')
assert (example_backend.get(self.img_path) == b'bytes6')
assert (example_backend.get_text(self.text_path) == 'text6')
class Example7Backend(BaseStorageBackend):
def get(self, filepath):
return b'bytes7'
def get_text(self, filepath, encoding='utf-8'):
return 'text7'
FileClient.register_backend('example5', Example7Backend, force=True, prefixes=['example5_prefix1', 'example5_prefix2'])
example_backend = FileClient('example5')
assert (example_backend.get(self.img_path) == b'bytes7')
assert (example_backend.get_text(self.text_path) == 'text7')
example_backend = FileClient(prefix='example5_prefix1')
assert (example_backend.get(self.img_path) == b'bytes7')
assert (example_backend.get_text(self.text_path) == 'text7')
example_backend = FileClient(prefix='example5_prefix2')
assert (example_backend.get(self.img_path) == b'bytes7')
assert (example_backend.get_text(self.text_path) == 'text7')
class Example8Backend(BaseStorageBackend):
def get(self, filepath):
return b'bytes8'
def get_text(self, filepath, encoding='utf-8'):
return 'text8'
FileClient.register_backend('example6', Example8Backend, force=True, prefixes='example6_prefix')
example_backend = FileClient('example6')
assert (example_backend.get(self.img_path) == b'bytes8')
assert (example_backend.get_text(self.text_path) == 'text8')
example_backend = FileClient('example6', prefix='example4_prefix')
assert (example_backend.get(self.img_path) == b'bytes8')
assert (example_backend.get_text(self.text_path) == 'text8')
|
def _test_handler(file_format, test_obj, str_checker, mode='r+'):
dump_str = mmcv.dump(test_obj, file_format=file_format)
str_checker(dump_str)
tmp_filename = osp.join(tempfile.gettempdir(), 'mmcv_test_dump')
mmcv.dump(test_obj, tmp_filename, file_format=file_format)
assert osp.isfile(tmp_filename)
load_obj = mmcv.load(tmp_filename, file_format=file_format)
assert (load_obj == test_obj)
os.remove(tmp_filename)
method = ('put' if ('b' in mode) else 'put_text')
with patch.object(PetrelBackend, method, return_value=None) as mock_method:
filename = 's3://path/of/your/file'
mmcv.dump(test_obj, filename, file_format=file_format)
mock_method.assert_called()
with tempfile.NamedTemporaryFile(mode, delete=False) as f:
tmp_filename = f.name
mmcv.dump(test_obj, f, file_format=file_format)
assert osp.isfile(tmp_filename)
with open(tmp_filename, mode) as f:
load_obj = mmcv.load(f, file_format=file_format)
assert (load_obj == test_obj)
os.remove(tmp_filename)
tmp_filename = osp.join(tempfile.gettempdir(), ('mmcv_test_dump.' + file_format))
mmcv.dump(test_obj, tmp_filename)
assert osp.isfile(tmp_filename)
load_obj = mmcv.load(tmp_filename)
assert (load_obj == test_obj)
os.remove(tmp_filename)
|
def test_json():
def json_checker(dump_str):
assert (dump_str in ['[{"a": "abc", "b": 1}, 2, "c"]', '[{"b": 1, "a": "abc"}, 2, "c"]'])
_test_handler('json', obj_for_test, json_checker)
|
def test_yaml():
def yaml_checker(dump_str):
assert (dump_str in ['- {a: abc, b: 1}\n- 2\n- c\n', '- {b: 1, a: abc}\n- 2\n- c\n', '- a: abc\n b: 1\n- 2\n- c\n', '- b: 1\n a: abc\n- 2\n- c\n'])
_test_handler('yaml', obj_for_test, yaml_checker)
|
def test_pickle():
def pickle_checker(dump_str):
import pickle
assert (pickle.loads(dump_str) == obj_for_test)
_test_handler('pickle', obj_for_test, pickle_checker, mode='rb+')
|
def test_exception():
test_obj = [{'a': 'abc', 'b': 1}, 2, 'c']
with pytest.raises(ValueError):
mmcv.dump(test_obj)
with pytest.raises(TypeError):
mmcv.dump(test_obj, 'tmp.txt')
|
def test_register_handler():
@mmcv.register_handler('txt')
class TxtHandler1(mmcv.BaseFileHandler):
def load_from_fileobj(self, file):
return file.read()
def dump_to_fileobj(self, obj, file):
file.write(str(obj))
def dump_to_str(self, obj, **kwargs):
return str(obj)
@mmcv.register_handler(['txt1', 'txt2'])
class TxtHandler2(mmcv.BaseFileHandler):
def load_from_fileobj(self, file):
return file.read()
def dump_to_fileobj(self, obj, file):
file.write('\n')
file.write(str(obj))
def dump_to_str(self, obj, **kwargs):
return str(obj)
content = mmcv.load(osp.join(osp.dirname(__file__), 'data/filelist.txt'))
assert (content == '1.jpg\n2.jpg\n3.jpg\n4.jpg\n5.jpg')
tmp_filename = osp.join(tempfile.gettempdir(), 'mmcv_test.txt2')
mmcv.dump(content, tmp_filename)
with open(tmp_filename, 'r') as f:
written = f.read()
os.remove(tmp_filename)
assert (written == ('\n' + content))
|
def test_list_from_file():
filename = osp.join(osp.dirname(__file__), 'data/filelist.txt')
filelist = mmcv.list_from_file(filename)
assert (filelist == ['1.jpg', '2.jpg', '3.jpg', '4.jpg', '5.jpg'])
filelist = mmcv.list_from_file(filename, prefix='a/')
assert (filelist == ['a/1.jpg', 'a/2.jpg', 'a/3.jpg', 'a/4.jpg', 'a/5.jpg'])
filelist = mmcv.list_from_file(filename, offset=2)
assert (filelist == ['3.jpg', '4.jpg', '5.jpg'])
filelist = mmcv.list_from_file(filename, max_num=2)
assert (filelist == ['1.jpg', '2.jpg'])
filelist = mmcv.list_from_file(filename, offset=3, max_num=3)
assert (filelist == ['4.jpg', '5.jpg'])
with patch.object(HTTPBackend, 'get_text', return_value='1.jpg\n2.jpg\n3.jpg'):
filename = 'http://path/of/your/file'
filelist = mmcv.list_from_file(filename, file_client_args={'backend': 'http'})
assert (filelist == ['1.jpg', '2.jpg', '3.jpg'])
filelist = mmcv.list_from_file(filename, file_client_args={'prefix': 'http'})
assert (filelist == ['1.jpg', '2.jpg', '3.jpg'])
filelist = mmcv.list_from_file(filename)
assert (filelist == ['1.jpg', '2.jpg', '3.jpg'])
with patch.object(PetrelBackend, 'get_text', return_value='1.jpg\n2.jpg\n3.jpg'):
filename = 's3://path/of/your/file'
filelist = mmcv.list_from_file(filename, file_client_args={'backend': 'petrel'})
assert (filelist == ['1.jpg', '2.jpg', '3.jpg'])
filelist = mmcv.list_from_file(filename, file_client_args={'prefix': 's3'})
assert (filelist == ['1.jpg', '2.jpg', '3.jpg'])
filelist = mmcv.list_from_file(filename)
assert (filelist == ['1.jpg', '2.jpg', '3.jpg'])
|
def test_dict_from_file():
filename = osp.join(osp.dirname(__file__), 'data/mapping.txt')
mapping = mmcv.dict_from_file(filename)
assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'})
mapping = mmcv.dict_from_file(filename, key_type=int)
assert (mapping == {1: 'cat', 2: ['dog', 'cow'], 3: 'panda'})
with patch.object(HTTPBackend, 'get_text', return_value='1 cat\n2 dog cow\n3 panda'):
filename = 'http://path/of/your/file'
mapping = mmcv.dict_from_file(filename, file_client_args={'backend': 'http'})
assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'})
mapping = mmcv.dict_from_file(filename, file_client_args={'prefix': 'http'})
assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'})
mapping = mmcv.dict_from_file(filename)
assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'})
with patch.object(PetrelBackend, 'get_text', return_value='1 cat\n2 dog cow\n3 panda'):
filename = 's3://path/of/your/file'
mapping = mmcv.dict_from_file(filename, file_client_args={'backend': 'petrel'})
assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'})
mapping = mmcv.dict_from_file(filename, file_client_args={'prefix': 's3'})
assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'})
mapping = mmcv.dict_from_file(filename)
assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'})
|
@pytest.mark.skipif((torch is None), reason='requires torch library')
def test_tensor2imgs():
with pytest.raises(AssertionError):
tensor = np.random.rand(2, 3, 3)
mmcv.tensor2imgs(tensor)
with pytest.raises(AssertionError):
tensor = torch.randn(2, 3, 3)
mmcv.tensor2imgs(tensor)
with pytest.raises(AssertionError):
tensor = torch.randn(2, 4, 3, 3)
mmcv.tensor2imgs(tensor)
with pytest.raises(AssertionError):
tensor = torch.randn(2, 3, 5, 5)
mmcv.tensor2imgs(tensor, mean=(1,))
tensor = torch.randn(2, 1, 5, 5)
mmcv.tensor2imgs(tensor, mean=(0, 0, 0))
with pytest.raises(AssertionError):
tensor = torch.randn(2, 3, 5, 5)
mmcv.tensor2imgs(tensor, std=(1,))
tensor = torch.randn(2, 1, 5, 5)
mmcv.tensor2imgs(tensor, std=(1, 1, 1))
with pytest.raises(AssertionError):
tensor = torch.randn(2, 1, 5, 5)
mmcv.tensor2imgs(tensor, mean=(0,), std=(1,), to_rgb=True)
tensor = torch.randn(2, 3, 5, 5)
gts = [t.cpu().numpy().transpose(1, 2, 0).astype(np.uint8) for t in tensor.flip(1)]
outputs = mmcv.tensor2imgs(tensor, to_rgb=True)
for (gt, output) in zip(gts, outputs):
assert_array_equal(gt, output)
tensor = torch.randn(2, 3, 5, 5)
gts = [t.cpu().numpy().transpose(1, 2, 0).astype(np.uint8) for t in tensor]
outputs = mmcv.tensor2imgs(tensor, to_rgb=False)
for (gt, output) in zip(gts, outputs):
assert_array_equal(gt, output)
tensor = torch.randn(2, 1, 5, 5)
gts = [t.squeeze(0).cpu().numpy().astype(np.uint8) for t in tensor]
outputs = mmcv.tensor2imgs(tensor, to_rgb=False)
for (gt, output) in zip(gts, outputs):
assert_array_equal(gt, output)
|
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')])
def test_set_mmcv_home():
os.environ.pop(ENV_MMCV_HOME, None)
mmcv_home = osp.join(osp.dirname(__file__), 'data/model_zoo/mmcv_home/')
os.environ[ENV_MMCV_HOME] = mmcv_home
assert (_get_mmcv_home() == mmcv_home)
|
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')])
def test_default_mmcv_home():
os.environ.pop(ENV_MMCV_HOME, None)
os.environ.pop(ENV_XDG_CACHE_HOME, None)
assert (_get_mmcv_home() == os.path.expanduser(os.path.join(DEFAULT_CACHE_DIR, 'mmcv')))
model_urls = get_external_models()
assert (model_urls == mmcv.load(osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')))
|
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')])
def test_get_external_models():
os.environ.pop(ENV_MMCV_HOME, None)
mmcv_home = osp.join(osp.dirname(__file__), 'data/model_zoo/mmcv_home/')
os.environ[ENV_MMCV_HOME] = mmcv_home
ext_urls = get_external_models()
assert (ext_urls == {'train': 'https://localhost/train.pth', 'test': 'test.pth', 'val': 'val.pth', 'train_empty': 'train.pth'})
|
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')])
def test_get_deprecated_models():
os.environ.pop(ENV_MMCV_HOME, None)
mmcv_home = osp.join(osp.dirname(__file__), 'data/model_zoo/mmcv_home/')
os.environ[ENV_MMCV_HOME] = mmcv_home
dep_urls = get_deprecated_model_names()
assert (dep_urls == {'train_old': 'train', 'test_old': 'test'})
|
def load_from_http(url, map_location=None):
return ('url:' + url)
|
def load_url(url, map_location=None, model_dir=None):
return load_from_http(url)
|
def load(filepath, map_location=None):
return ('local:' + filepath)
|
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')])
@patch('mmcv.runner.checkpoint.load_from_http', load_from_http)
@patch('mmcv.runner.checkpoint.load_url', load_url)
@patch('torch.load', load)
def test_load_external_url():
url = _load_checkpoint('modelzoo://resnet50')
if (TORCH_VERSION < '1.9.0'):
assert (url == 'url:https://download.pytorch.org/models/resnet50-19c8e357.pth')
else:
assert (url == 'url:https://download.pytorch.org/models/resnet50-0676ba61.pth')
url = _load_checkpoint('torchvision://resnet50')
if (TORCH_VERSION < '1.9.0'):
assert (url == 'url:https://download.pytorch.org/models/resnet50-19c8e357.pth')
else:
assert (url == 'url:https://download.pytorch.org/models/resnet50-0676ba61.pth')
os.environ.pop(ENV_MMCV_HOME, None)
os.environ.pop(ENV_XDG_CACHE_HOME, None)
url = _load_checkpoint('open-mmlab://train')
assert (url == 'url:https://localhost/train.pth')
os.environ.pop(ENV_MMCV_HOME, None)
os.environ.pop(ENV_XDG_CACHE_HOME, None)
with pytest.warns(Warning, match='open-mmlab://train_old is deprecated in favor of open-mmlab://train'):
url = _load_checkpoint('open-mmlab://train_old')
assert (url == 'url:https://localhost/train.pth')
os.environ.pop(ENV_MMCV_HOME, None)
os.environ.pop(ENV_XDG_CACHE_HOME, None)
with pytest.warns(Warning, match='openmmlab://train_old is deprecated in favor of openmmlab://train'):
url = _load_checkpoint('openmmlab://train_old')
assert (url == 'url:https://localhost/train.pth')
os.environ.pop(ENV_MMCV_HOME, None)
mmcv_home = osp.join(osp.dirname(__file__), 'data/model_zoo/mmcv_home')
os.environ[ENV_MMCV_HOME] = mmcv_home
url = _load_checkpoint('open-mmlab://train')
assert (url == 'url:https://localhost/train.pth')
with pytest.raises(FileNotFoundError, match='train.pth can not be found.'):
_load_checkpoint('open-mmlab://train_empty')
url = _load_checkpoint('open-mmlab://test')
assert (url == f"local:{osp.join(_get_mmcv_home(), 'test.pth')}")
url = _load_checkpoint('open-mmlab://val')
assert (url == f"local:{osp.join(_get_mmcv_home(), 'val.pth')}")
url = _load_checkpoint('http://localhost/train.pth')
assert (url == 'url:http://localhost/train.pth')
with pytest.raises(FileNotFoundError, match='train.pth can not be found.'):
_load_checkpoint('train.pth')
url = _load_checkpoint(osp.join(_get_mmcv_home(), 'test.pth'))
assert (url == f"local:{osp.join(_get_mmcv_home(), 'test.pth')}")
|
@pytest.mark.parametrize('device', ['cpu', pytest.param('cuda', marks=pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support'))])
def test_active_rotated_filter(device):
feature = torch.tensor(np_feature, dtype=torch.float, device=device, requires_grad=True)
indices = torch.tensor(np_indices, dtype=torch.int, device=device)
output = active_rotated_filter(feature, indices)
output.backward(torch.ones_like(output))
assert np.allclose(output.data.cpu().numpy(), expected_output, atol=0.001)
assert np.allclose(feature.grad.data.cpu().numpy(), expected_grad, atol=0.001)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_paconv_assign_scores():
scores = torch.tensor([[[[0.06947571, 0.6065746], [0.28462553, 0.8378516], [0.7595994, 0.97220325], [0.519155, 0.766185]], [[0.15348864, 0.6051019], [0.21510637, 0.31916398], [0.00236845, 0.5842595], [0.6783676, 0.5216348]]], [[[0.23089725, 0.5568468], [0.7405102, 0.06438422], [0.6887394, 0.22089851], [0.0502342, 0.79228795]], [[0.44883424, 0.15427643], [0.13817799, 0.34856772], [0.7989621, 0.33788306], [0.15699774, 0.7693662]]]]).float().cuda()
scores.requires_grad_()
points = torch.tensor([[[[0.06001121, 0.92963666, 0.5753327, 0.7251477], [0.53563064, 0.23129565, 0.92366195, 0.44261628]], [[0.5770022, 0.56625944, 0.23560429, 0.11178821], [0.7735967, 0.95678777, 0.25468266, 0.02895975]], [[0.0589869, 0.09017515, 0.5977862, 0.02797985], [0.603862, 0.35991007, 0.85761684, 0.3096559]], [[0.22359002, 0.13983732, 0.5544243, 0.68863827], [0.85646236, 0.75651926, 0.8638947, 0.83600986]], [[0.45424145, 0.27458847, 0.6456112, 0.47162914], [0.15773582, 0.47645122, 0.79964715, 0.3323908]], [[0.8351399, 0.84696376, 0.9431732, 0.29418713], [0.77168906, 0.6996871, 0.19354361, 0.03392768]], [[0.30976456, 0.7074133, 0.581795, 0.976677], [0.69656056, 0.07199162, 0.4708506, 0.29117996]], [[0.5829035, 0.30201727, 0.76556486, 0.0935446], [0.88030535, 0.16129416, 0.9242525, 0.49545723]]], [[[0.50899494, 0.06482804, 0.44939405, 0.37704808], [0.47028124, 0.11969638, 0.62823206, 0.28560323]], [[0.40690207, 0.689753, 0.51636654, 0.23040164], [0.06935787, 0.00488842, 0.22462702, 0.09182382]], [[0.26611632, 0.00184339, 0.7730655, 0.5228131], [0.87776035, 0.77895886, 0.2787183, 0.16620636]], [[0.502574, 0.04039001, 0.5368497, 0.98379374], [0.40973026, 0.3238272, 0.9733018, 0.13988364]], [[0.04586202, 0.20983845, 0.20662665, 0.22270602], [0.60387236, 0.5155574, 0.51237285, 0.6528438]], [[0.45735973, 0.86821306, 0.61054605, 0.8370336], [0.45193362, 0.3734138, 0.7825672, 0.5699416]], [[0.44591594, 0.12447512, 0.09282011, 0.7055254], [0.25223452, 0.46696228, 0.7051136, 0.892151]], [[0.49615085, 0.47321403, 0.93138885, 0.7652197], [0.38766378, 0.30332977, 0.23131835, 0.02863514]]]]).float().cuda()
points.requires_grad_()
centers = torch.tensor([[[[0.83878064, 0.96658987, 0.8033424, 0.9598312], [0.45035273, 0.8768925, 0.977736, 0.54547966]], [[0.01041394, 0.597893, 0.36212963, 0.4410367], [0.94879234, 0.8372817, 0.21237361, 0.67945415]], [[0.5096087, 0.26401454, 0.60034937, 0.5417416], [0.87591463, 0.546456, 0.4096033, 0.16373193]], [[0.79547447, 0.1482386, 0.12840575, 0.45384115], [0.5640288, 0.944541, 0.5745328, 0.73229736]], [[0.93011934, 0.7406011, 0.62621707, 0.8677915], [0.91563636, 0.3595413, 0.6678378, 0.6085383]], [[0.22431666, 0.65617776, 0.7483924, 0.6263364], [0.30968404, 0.78204364, 0.14899081, 0.09628749]], [[0.73675203, 0.72104895, 0.4648038, 0.6101647], [0.7817645, 0.16572917, 0.3311919, 0.43407398]], [[0.8193154, 0.09559608, 0.05978829, 0.90262103], [0.4256065, 0.8165596, 0.8206446, 0.6604721]]], [[[0.7159653, 0.18600845, 0.21433902, 0.3159626], [0.3921569, 0.33221376, 0.5061177, 0.7961841]], [[0.95338356, 0.04785997, 0.67185795, 0.6538394], [0.4729132, 0.33404195, 0.17750603, 0.8445621]], [[0.6755793, 0.16193843, 0.75943846, 0.92123103], [0.2781859, 0.03114432, 0.710638, 0.52729136]], [[0.8376105, 0.10858494, 0.13208169, 0.365772], [0.5930795, 0.27390373, 0.14036089, 0.170403]], [[0.3479789, 0.89855295, 0.04844379, 0.9871029], [0.29781651, 0.0244137, 0.9179047, 0.8081611]], [[0.12460887, 0.44991326, 0.19382608, 0.35037738], [0.2773472, 0.4362057, 0.36757517, 0.5993509]], [[0.29630446, 0.90046406, 0.5417113, 0.13510644], [0.09623539, 0.04226565, 0.32001644, 0.44358212]], [[0.5274848, 0.82096446, 0.9415489, 0.7123748], [0.7537517, 0.8086482, 0.85345286, 0.7472754]]]]).float().cuda()
centers.requires_grad_()
knn_idx = torch.tensor([[[6, 7, 4, 6], [2, 4, 2, 4]], [[7, 1, 3, 2], [6, 0, 2, 6]]]).long().cuda()
aggregate = 'sum'
expected_output = torch.tensor([[[[(- 0.08134781), 0.03877336, (- 0.8212776), (- 0.2869547)], [(- 0.23378491), (- 0.24112664), (- 0.1600166), (- 0.4121864)]], [[(- 0.05780616), (- 0.12298299), (- 0.0370461), (- 0.07889931)], [(- 0.13956165), (- 0.02006848), (- 0.10940295), (- 0.0293439)]], [[0.09284145, 0.58250105, 0.5927749, 0.16774094], [0.27070042, 0.13422406, 0.2617501, 0.23416464]], [[(- 0.06121218), (- 0.09561322), (- 0.20408826), 0.08079343], [0.00944228, 0.03874819, 0.08404065, 0.04041629]]], [[[(- 0.2110898), (- 0.13335688), (- 0.09315082), 0.08512095], [0.09121774, 0.15976946, 0.23994486, 0.14350912]], [[(- 0.36167958), (- 0.14891288), (- 0.64470863), (- 0.0646704)], [(- 0.28276974), (- 0.08847666), (- 0.46904767), 0.20491874]], [[(- 0.34877953), (- 0.35533834), (- 0.25225785), (- 0.4638189)], [(- 0.1420663), 0.09467781, 0.17088932, 0.22580585]], [[(- 0.3879708), (- 0.3991068), 0.05276498, (- 0.46989647)], [0.32522714, (- 0.02163534), 0.21604237, 0.4346682]]]]).float()
output = assign_score_withk(scores, points, centers, knn_idx, aggregate)
assert torch.allclose(output.detach().cpu(), expected_output, atol=1e-06)
loss = output.sum()
loss.backward()
expected_scores_grad = torch.tensor([[[[0.04288036, (- 0.18217683)], [(- 0.78873926), 0.7485497], [(- 0.6866992), 0.05346543], [0.04288036, (- 0.18217683)]], [[(- 1.1407862), 0.13533896], [(- 0.06964391), (- 0.22948086)], [(- 1.1407862), 0.13533896], [(- 0.06964391), (- 0.22948086)]]], [[[(- 0.3363995), (- 2.212181)], [(- 1.1589496), (- 2.7724311)], [(- 0.9387654), (- 1.3163853)], [(- 1.4385346), (- 1.0614843)]], [[(- 0.5048497), 1.4143617], [(- 0.47332114), 0.6017133], [(- 0.30974793), 1.1995442], [(- 0.5048497), 1.4143617]]]]).float()
expected_points_grad = torch.tensor([[[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.15585709, 0.15585709, 0.15585709, 0.15585709], [1.1893613, 1.1893613, 1.1893613, 1.1893613]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[1.6530733, 1.6530733, 1.6530733, 1.6530733], [1.8130021, 1.8130021, 1.8130021, 1.8130021]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.58863074, 0.58863074, 0.58863074, 0.58863074], [1.3727596, 1.3727596, 1.3727596, 1.3727596]], [[0.28462553, 0.28462553, 0.28462553, 0.28462553], [0.8378516, 0.8378516, 0.8378516, 0.8378516]]], [[[0.13817799, 0.13817799, 0.13817799, 0.13817799], [0.34856772, 0.34856772, 0.34856772, 0.34856772]], [[0.7405102, 0.7405102, 0.7405102, 0.7405102], [0.06438422, 0.06438422, 0.06438422, 0.06438422]], [[0.8491963, 0.8491963, 0.8491963, 0.8491963], [1.1301711, 1.1301711, 1.1301711, 1.1301711]], [[0.6887394, 0.6887394, 0.6887394, 0.6887394], [0.22089851, 0.22089851, 0.22089851, 0.22089851]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.605832, 0.605832, 0.605832, 0.605832], [0.92364264, 0.92364264, 0.92364264, 0.92364264]], [[0.23089725, 0.23089725, 0.23089725, 0.23089725], [0.5568468, 0.5568468, 0.5568468, 0.5568468]]]]).float()
expected_centers_grad = torch.tensor([[[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[(- 1.0493311), (- 1.0493311), (- 1.0493311), (- 1.0493311)], [(- 2.0301602), (- 2.0301602), (- 2.0301602), (- 2.0301602)]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[(- 1.6328557), (- 1.6328557), (- 1.6328557), (- 1.6328557)], [(- 3.1828144), (- 3.1828144), (- 3.1828144), (- 3.1828144)]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]], [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[(- 1.5429721), (- 1.5429721), (- 1.5429721), (- 1.5429721)], [(- 1.6100934), (- 1.6100934), (- 1.6100934), (- 1.6100934)]], [[(- 1.7103812), (- 1.7103812), (- 1.7103812), (- 1.7103812)], [(- 1.6344175), (- 1.6344175), (- 1.6344175), (- 1.6344175)]]]]).float()
assert torch.allclose(scores.grad.detach().cpu(), expected_scores_grad, atol=1e-06)
assert torch.allclose(points.grad.detach().cpu(), expected_points_grad, atol=1e-06)
assert torch.allclose(centers.grad.detach().cpu(), expected_centers_grad, atol=1e-06)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_ball_query():
new_xyz = torch.tensor([[[(- 0.074), 1.3147, (- 1.3625)], [(- 2.2769), 2.7817, (- 0.2334)], [(- 0.4003), 2.4666, (- 0.5116)], [(- 0.074), 1.3147, (- 1.3625)], [(- 0.074), 1.3147, (- 1.3625)]], [[(- 2.0289), 2.4952, (- 0.1708)], [(- 2.0668), 6.0278, (- 0.4875)], [0.4066, 1.4211, (- 0.2947)], [(- 2.0289), 2.4952, (- 0.1708)], [(- 2.0289), 2.4952, (- 0.1708)]]]).cuda()
xyz = torch.tensor([[[(- 0.074), 1.3147, (- 1.3625)], [0.5555, 1.0399, (- 1.3634)], [(- 0.4003), 2.4666, (- 0.5116)], [(- 0.5251), 2.4379, (- 0.8466)], [(- 0.9691), 1.1418, (- 1.3733)], [(- 0.2232), 0.9561, (- 1.3626)], [(- 2.2769), 2.7817, (- 0.2334)], [(- 0.2822), 1.3192, (- 1.3645)], [0.1533, 1.5024, (- 1.0432)], [0.4917, 1.1529, (- 1.3496)]], [[(- 2.0289), 2.4952, (- 0.1708)], [(- 0.7188), 0.9956, (- 0.5096)], [(- 2.0668), 6.0278, (- 0.4875)], [(- 1.9304), 3.3092, 0.661], [0.0949, 1.4332, 0.314], [(- 1.2879), 2.0008, (- 0.7791)], [(- 0.7252), 0.9611, (- 0.6371)], [0.4066, 1.4211, (- 0.2947)], [0.322, 1.4447, 0.3548], [(- 0.9744), 2.3856, (- 1.2)]]]).cuda()
idx = ball_query(0, 0.2, 5, xyz, new_xyz)
expected_idx = torch.tensor([[[0, 0, 0, 0, 0], [6, 6, 6, 6, 6], [2, 2, 2, 2, 2], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0], [2, 2, 2, 2, 2], [7, 7, 7, 7, 7], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]).cuda()
assert torch.all((idx == expected_idx))
idx = ball_query(0.2, 0.4, 5, xyz, new_xyz)
expected_idx = torch.tensor([[[0, 5, 7, 0, 0], [6, 6, 6, 6, 6], [2, 3, 2, 2, 2], [0, 5, 7, 0, 0], [0, 5, 7, 0, 0]], [[0, 0, 0, 0, 0], [2, 2, 2, 2, 2], [7, 7, 7, 7, 7], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]).cuda()
assert torch.all((idx == expected_idx))
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
class TestBBox(object):
def _test_bbox_overlaps(self, dtype=torch.float):
from mmcv.ops import bbox_overlaps
b1 = torch.tensor([[1.0, 1.0, 3.0, 4.0], [2.0, 2.0, 3.0, 4.0], [7.0, 7.0, 8.0, 8.0]]).cuda().type(dtype)
b2 = torch.tensor([[0.0, 2.0, 2.0, 5.0], [2.0, 1.0, 3.0, 3.0]]).cuda().type(dtype)
should_output = np.array([[0.33333334, 0.5], [0.2, 0.5], [0.0, 0.0]])
out = bbox_overlaps(b1, b2, offset=1)
assert np.allclose(out.cpu().numpy(), should_output, 0.01)
b1 = torch.tensor([[1.0, 1.0, 3.0, 4.0], [2.0, 2.0, 3.0, 4.0]]).cuda().type(dtype)
b2 = torch.tensor([[0.0, 2.0, 2.0, 5.0], [2.0, 1.0, 3.0, 3.0]]).cuda().type(dtype)
should_output = np.array([0.33333334, 0.5])
out = bbox_overlaps(b1, b2, aligned=True, offset=1)
assert np.allclose(out.cpu().numpy(), should_output, 0.01)
b1 = torch.tensor([[0.0, 0.0, 3.0, 3.0]]).cuda().type(dtype)
b1 = torch.tensor([[0.0, 0.0, 3.0, 3.0]]).cuda().type(dtype)
b2 = torch.tensor([[4.0, 0.0, 5.0, 3.0], [3.0, 0.0, 4.0, 3.0], [2.0, 0.0, 3.0, 3.0], [1.0, 0.0, 2.0, 3.0]]).cuda().type(dtype)
should_output = np.array([0, 0.2, 0.5, 0.5])
out = bbox_overlaps(b1, b2, offset=1)
assert np.allclose(out.cpu().numpy(), should_output, 0.01)
def test_bbox_overlaps_float(self):
self._test_bbox_overlaps(torch.float)
def test_bbox_overlaps_half(self):
self._test_bbox_overlaps(torch.half)
|
class TestBilinearGridSample(object):
def _test_bilinear_grid_sample(self, dtype=torch.float, align_corners=False, multiplier=1, precision=0.001):
from mmcv.ops.point_sample import bilinear_grid_sample
input = torch.rand(1, 1, 20, 20, dtype=dtype)
grid = torch.Tensor([[[1, 0, 0], [0, 1, 0]]])
grid = F.affine_grid(grid, (1, 1, 15, 15), align_corners=align_corners).type_as(input)
grid *= multiplier
out = bilinear_grid_sample(input, grid, align_corners=align_corners)
ref_out = F.grid_sample(input, grid, align_corners=align_corners)
assert np.allclose(out.data.detach().cpu().numpy(), ref_out.data.detach().cpu().numpy(), precision)
def test_bilinear_grid_sample(self):
self._test_bilinear_grid_sample(torch.double, False)
self._test_bilinear_grid_sample(torch.double, True)
self._test_bilinear_grid_sample(torch.float, False)
self._test_bilinear_grid_sample(torch.float, True)
self._test_bilinear_grid_sample(torch.float, False)
self._test_bilinear_grid_sample(torch.float, True, 5)
self._test_bilinear_grid_sample(torch.float, False, 10)
self._test_bilinear_grid_sample(torch.float, True, (- 6))
self._test_bilinear_grid_sample(torch.float, False, (- 10))
self._test_bilinear_grid_sample(torch.double, True, 5)
self._test_bilinear_grid_sample(torch.double, False, 10)
self._test_bilinear_grid_sample(torch.double, True, (- 6))
self._test_bilinear_grid_sample(torch.double, False, (- 10))
|
def _test_border_align_allclose(device, dtype, pool_size):
if ((not torch.cuda.is_available()) and (device == 'cuda')):
pytest.skip('test requires GPU')
try:
from mmcv.ops import BorderAlign, border_align
except ModuleNotFoundError:
pytest.skip('BorderAlign op is not successfully compiled')
np_input = np.array(input_arr)
np_boxes = np.array(boxes_arr)
np_output = np.array(output_dict[pool_size])
np_grad = np.array(input_grad_dict[pool_size])
input = torch.tensor(np_input, dtype=dtype, device=device, requires_grad=True)
boxes = torch.tensor(np_boxes, dtype=dtype, device=device)
input_cp = copy.deepcopy(input)
output = border_align(input_cp, boxes, pool_size)
output.backward(torch.ones_like(output))
assert np.allclose(output.data.type(dtype).cpu().numpy(), np_output, atol=1e-05)
assert np.allclose(input_cp.grad.data.type(dtype).cpu().numpy(), np_grad, atol=1e-05)
pool_module = BorderAlign(pool_size)
output = pool_module(input, boxes)
output.backward(torch.ones_like(output))
assert np.allclose(output.data.type(dtype).cpu().numpy(), np_output, atol=1e-05)
assert np.allclose(input.grad.data.type(dtype).cpu().numpy(), np_grad, atol=1e-05)
|
@pytest.mark.parametrize('device', ['cuda'])
@pytest.mark.parametrize('dtype', [torch.float, torch.half, torch.double])
@pytest.mark.parametrize('pool_size', [1, 2])
def test_border_align(device, dtype, pool_size):
_test_border_align_allclose(device, dtype, pool_size)
|
class TestBoxIoURotated(object):
def test_box_iou_rotated_cpu(self):
from mmcv.ops import box_iou_rotated
np_boxes1 = np.asarray([[1.0, 1.0, 3.0, 4.0, 0.5], [2.0, 2.0, 3.0, 4.0, 0.6], [7.0, 7.0, 8.0, 8.0, 0.4]], dtype=np.float32)
np_boxes2 = np.asarray([[0.0, 2.0, 2.0, 5.0, 0.3], [2.0, 1.0, 3.0, 3.0, 0.5], [5.0, 5.0, 6.0, 7.0, 0.4]], dtype=np.float32)
np_expect_ious = np.asarray([[0.3708, 0.4351, 0.0], [0.1104, 0.4487, 0.0424], [0.0, 0.0, 0.3622]], dtype=np.float32)
np_expect_ious_aligned = np.asarray([0.3708, 0.4487, 0.3622], dtype=np.float32)
boxes1 = torch.from_numpy(np_boxes1)
boxes2 = torch.from_numpy(np_boxes2)
ious = box_iou_rotated(boxes1, boxes2)
assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001)
ious = box_iou_rotated(boxes1, boxes2, aligned=True)
assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001)
boxes1[(..., (- 1))] *= (- 1)
boxes2[(..., (- 1))] *= (- 1)
ious = box_iou_rotated(boxes1, boxes2, clockwise=False)
assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001)
ious = box_iou_rotated(boxes1, boxes2, aligned=True, clockwise=False)
assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_box_iou_rotated_cuda(self):
from mmcv.ops import box_iou_rotated
np_boxes1 = np.asarray([[1.0, 1.0, 3.0, 4.0, 0.5], [2.0, 2.0, 3.0, 4.0, 0.6], [7.0, 7.0, 8.0, 8.0, 0.4]], dtype=np.float32)
np_boxes2 = np.asarray([[0.0, 2.0, 2.0, 5.0, 0.3], [2.0, 1.0, 3.0, 3.0, 0.5], [5.0, 5.0, 6.0, 7.0, 0.4]], dtype=np.float32)
np_expect_ious = np.asarray([[0.3708, 0.4351, 0.0], [0.1104, 0.4487, 0.0424], [0.0, 0.0, 0.3622]], dtype=np.float32)
np_expect_ious_aligned = np.asarray([0.3708, 0.4487, 0.3622], dtype=np.float32)
boxes1 = torch.from_numpy(np_boxes1).cuda()
boxes2 = torch.from_numpy(np_boxes2).cuda()
ious = box_iou_rotated(boxes1, boxes2)
assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001)
ious = box_iou_rotated(boxes1, boxes2, aligned=True)
assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001)
boxes1[(..., (- 1))] *= (- 1)
boxes2[(..., (- 1))] *= (- 1)
ious = box_iou_rotated(boxes1, boxes2, clockwise=False)
assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001)
ious = box_iou_rotated(boxes1, boxes2, aligned=True, clockwise=False)
assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001)
def test_box_iou_rotated_iof_cpu(self):
from mmcv.ops import box_iou_rotated
np_boxes1 = np.asarray([[1.0, 1.0, 3.0, 4.0, 0.5], [2.0, 2.0, 3.0, 4.0, 0.6], [7.0, 7.0, 8.0, 8.0, 0.4]], dtype=np.float32)
np_boxes2 = np.asarray([[0.0, 2.0, 2.0, 5.0, 0.3], [2.0, 1.0, 3.0, 3.0, 0.5], [5.0, 5.0, 6.0, 7.0, 0.4]], dtype=np.float32)
np_expect_ious = np.asarray([[0.4959, 0.5306, 0.0], [0.1823, 0.542, 0.1832], [0.0, 0.0, 0.4404]], dtype=np.float32)
np_expect_ious_aligned = np.asarray([0.4959, 0.542, 0.4404], dtype=np.float32)
boxes1 = torch.from_numpy(np_boxes1)
boxes2 = torch.from_numpy(np_boxes2)
ious = box_iou_rotated(boxes1, boxes2, mode='iof')
assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001)
ious = box_iou_rotated(boxes1, boxes2, mode='iof', aligned=True)
assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001)
boxes1[(..., (- 1))] *= (- 1)
boxes2[(..., (- 1))] *= (- 1)
ious = box_iou_rotated(boxes1, boxes2, mode='iof', clockwise=False)
assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001)
ious = box_iou_rotated(boxes1, boxes2, mode='iof', aligned=True, clockwise=False)
assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_box_iou_rotated_iof_cuda(self):
from mmcv.ops import box_iou_rotated
np_boxes1 = np.asarray([[1.0, 1.0, 3.0, 4.0, 0.5], [2.0, 2.0, 3.0, 4.0, 0.6], [7.0, 7.0, 8.0, 8.0, 0.4]], dtype=np.float32)
np_boxes2 = np.asarray([[0.0, 2.0, 2.0, 5.0, 0.3], [2.0, 1.0, 3.0, 3.0, 0.5], [5.0, 5.0, 6.0, 7.0, 0.4]], dtype=np.float32)
np_expect_ious = np.asarray([[0.4959, 0.5306, 0.0], [0.1823, 0.542, 0.1832], [0.0, 0.0, 0.4404]], dtype=np.float32)
np_expect_ious_aligned = np.asarray([0.4959, 0.542, 0.4404], dtype=np.float32)
boxes1 = torch.from_numpy(np_boxes1).cuda()
boxes2 = torch.from_numpy(np_boxes2).cuda()
ious = box_iou_rotated(boxes1, boxes2, mode='iof')
assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001)
ious = box_iou_rotated(boxes1, boxes2, mode='iof', aligned=True)
assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001)
boxes1[(..., (- 1))] *= (- 1)
boxes2[(..., (- 1))] *= (- 1)
ious = box_iou_rotated(boxes1, boxes2, mode='iof', clockwise=False)
assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001)
ious = box_iou_rotated(boxes1, boxes2, mode='iof', aligned=True, clockwise=False)
assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001)
|
class TestCarafe(object):
def test_carafe_naive_gradcheck(self):
if (not torch.cuda.is_available()):
return
from mmcv.ops import CARAFENaive
feat = torch.randn(2, 64, 3, 3, requires_grad=True, device='cuda').double()
mask = torch.randn(2, 100, 6, 6, requires_grad=True, device='cuda').sigmoid().double()
gradcheck(CARAFENaive(5, 4, 2), (feat, mask), atol=0.0001, eps=0.0001)
def test_carafe_gradcheck(self):
if (not torch.cuda.is_available()):
return
from mmcv.ops import CARAFE
feat = torch.randn(2, 64, 3, 3, requires_grad=True, device='cuda').double()
mask = torch.randn(2, 100, 6, 6, requires_grad=True, device='cuda').sigmoid().double()
gradcheck(CARAFE(5, 4, 2), (feat, mask), atol=0.0001, eps=0.0001)
|
class Loss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, target):
input = input.view((- 1))
target = target.view((- 1))
return torch.mean((input - target))
|
class TestCrissCrossAttention(object):
def test_cc_attention(self):
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
from mmcv.ops import CrissCrossAttention
loss_func = Loss()
input = np.fromfile('tests/data/for_ccattention/ccattention_input.bin', dtype=np.float32)
output = np.fromfile('tests/data/for_ccattention/ccattention_output.bin', dtype=np.float32)
input = input.reshape((1, 32, 45, 45))
output = output.reshape((1, 32, 45, 45))
label = torch.ones((1, 32, 45, 45))
input = torch.FloatTensor(input)
output = torch.FloatTensor(output)
input.requires_grad = True
shape = input.shape
channel = shape[1]
cca = CrissCrossAttention(channel)
cca.to(device)
input = input.to(device)
label = label.to(device)
cca.train()
test_output = cca(input)
test_loss = loss_func(test_output, label)
test_loss.backward()
test_output = test_output.detach().cpu().numpy()
output = output.numpy()
assert np.allclose(test_output, output)
assert (test_output.shape == shape)
|
def test_contour_expand():
from mmcv.ops import contour_expand
np_internal_kernel_label = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 2, 0], [0, 0, 1, 1, 0, 0, 0, 0, 2, 0], [0, 0, 1, 1, 0, 0, 0, 0, 2, 0], [0, 0, 1, 1, 0, 0, 0, 0, 2, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]).astype(np.int32)
np_kernel_mask1 = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]).astype(np.uint8)
np_kernel_mask2 = (np_internal_kernel_label > 0).astype(np.uint8)
np_kernel_mask = np.stack([np_kernel_mask1, np_kernel_mask2])
min_area = 1
kernel_region_num = 3
result = contour_expand(np_kernel_mask, np_internal_kernel_label, min_area, kernel_region_num)
gt = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 2, 2, 2, 0], [0, 0, 1, 1, 1, 1, 2, 2, 2, 0], [0, 0, 1, 1, 1, 1, 2, 2, 2, 0], [0, 0, 1, 1, 1, 1, 2, 2, 2, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert np.allclose(result, gt)
np_kernel_mask_t = torch.from_numpy(np_kernel_mask)
np_internal_kernel_label_t = torch.from_numpy(np_internal_kernel_label)
result = contour_expand(np_kernel_mask_t, np_internal_kernel_label_t, min_area, kernel_region_num)
assert np.allclose(result, gt)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_convex_iou():
pointsets = torch.from_numpy(np_pointsets).cuda().float()
polygons = torch.from_numpy(np_polygons).cuda().float()
expected_iou = torch.from_numpy(np_expected_iou).cuda().float()
assert torch.allclose(convex_iou(pointsets, polygons), expected_iou, atol=0.001)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_convex_giou():
pointsets = torch.from_numpy(np_pointsets).cuda().float()
polygons = torch.from_numpy(np_polygons).cuda().float()
expected_giou = torch.from_numpy(np_expected_giou).cuda().float()
expected_grad = torch.from_numpy(np_expected_grad).cuda().float()
(giou, grad) = convex_giou(pointsets, polygons)
assert torch.allclose(giou, expected_giou, atol=0.001)
assert torch.allclose(grad, expected_grad, atol=0.001)
|
def test_corner_pool_device_and_dtypes_cpu():
'\n CommandLine:\n xdoctest -m tests/test_corner_pool.py test_corner_pool_device_and_dtypes_cpu\n '
with pytest.raises(AssertionError):
pool = CornerPool('corner')
lr_tensor = torch.tensor([[[[0, 0, 0, 0, 0], [2, 1, 3, 0, 2], [5, 4, 1, 1, 6], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]])
tb_tensor = torch.tensor([[[[0, 3, 1, 0, 0], [0, 1, 1, 0, 0], [0, 3, 4, 0, 0], [0, 2, 2, 0, 0], [0, 0, 2, 0, 0]]]])
left_answer = torch.tensor([[[[0, 0, 0, 0, 0], [3, 3, 3, 2, 2], [6, 6, 6, 6, 6], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]])
pool = CornerPool('left')
left_tensor = pool(lr_tensor)
assert (left_tensor.type() == lr_tensor.type())
assert torch.equal(left_tensor, left_answer)
right_answer = torch.tensor([[[[0, 0, 0, 0, 0], [2, 2, 3, 3, 3], [5, 5, 5, 5, 6], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]])
pool = CornerPool('right')
right_tensor = pool(lr_tensor)
assert (right_tensor.type() == lr_tensor.type())
assert torch.equal(right_tensor, right_answer)
top_answer = torch.tensor([[[[0, 3, 4, 0, 0], [0, 3, 4, 0, 0], [0, 3, 4, 0, 0], [0, 2, 2, 0, 0], [0, 0, 2, 0, 0]]]])
pool = CornerPool('top')
top_tensor = pool(tb_tensor)
assert (top_tensor.type() == tb_tensor.type())
assert torch.equal(top_tensor, top_answer)
bottom_answer = torch.tensor([[[[0, 3, 1, 0, 0], [0, 3, 1, 0, 0], [0, 3, 4, 0, 0], [0, 3, 4, 0, 0], [0, 3, 4, 0, 0]]]])
pool = CornerPool('bottom')
bottom_tensor = pool(tb_tensor)
assert (bottom_tensor.type() == tb_tensor.type())
assert torch.equal(bottom_tensor, bottom_answer)
|
def assert_equal_tensor(tensor_a, tensor_b):
assert tensor_a.eq(tensor_b).all()
|
class TestCorrelation():
def _test_correlation(self, dtype=torch.float):
layer = Correlation(max_displacement=0)
input1 = torch.tensor(_input1, dtype=dtype).cuda()
input2 = torch.tensor(_input2, dtype=dtype).cuda()
input1.requires_grad = True
input2.requires_grad = True
out = layer(input1, input2)
out.backward(torch.ones_like(out))
gt_out = torch.tensor(_gt_out, dtype=dtype).cuda()
assert_equal_tensor(out, gt_out)
assert_equal_tensor(input1.grad.detach(), input2)
assert_equal_tensor(input2.grad.detach(), input1)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_correlation(self):
self._test_correlation(torch.float)
self._test_correlation(torch.double)
self._test_correlation(torch.half)
|
class TestDeformconv(object):
def _test_deformconv(self, dtype=torch.float, threshold=0.001, device='cuda', batch_size=10, im2col_step=2):
if ((not torch.cuda.is_available()) and (device == 'cuda')):
pytest.skip('test requires GPU')
from mmcv.ops import DeformConv2dPack
c_in = 1
c_out = 1
batch_size = 10
repeated_input = np.repeat(input, batch_size, axis=0)
repeated_gt_out = np.repeat(gt_out, batch_size, axis=0)
repeated_gt_x_grad = np.repeat(gt_x_grad, batch_size, axis=0)
x = torch.tensor(repeated_input, device=device, dtype=dtype)
x.requires_grad = True
model = DeformConv2dPack(in_channels=c_in, out_channels=c_out, kernel_size=2, stride=1, padding=0, im2col_step=im2col_step)
model.conv_offset.weight.data = torch.nn.Parameter(torch.Tensor(offset_weight).reshape(8, 1, 2, 2))
model.conv_offset.bias.data = torch.nn.Parameter(torch.Tensor(offset_bias).reshape(8))
model.weight.data = torch.nn.Parameter(torch.Tensor(deform_weight).reshape(1, 1, 2, 2))
if (device == 'cuda'):
model.cuda()
model.type(dtype)
out = model(x)
out.backward(torch.ones_like(out))
assert np.allclose(out.data.detach().cpu().numpy(), repeated_gt_out, threshold)
assert np.allclose(x.grad.detach().cpu().numpy(), repeated_gt_x_grad, threshold)
assert np.allclose((model.conv_offset.weight.grad.detach().cpu().numpy() / batch_size), gt_offset_weight_grad, threshold)
assert np.allclose((model.conv_offset.bias.grad.detach().cpu().numpy() / batch_size), gt_offset_bias_grad, threshold)
assert np.allclose((model.weight.grad.detach().cpu().numpy() / batch_size), gt_deform_weight_grad, threshold)
from mmcv.ops import DeformConv2d
model = DeformConv2d(1, 1, 2, stride=1, padding=0)
assert (not hasattr(model, 'bias'))
with pytest.raises(AssertionError):
model = DeformConv2d(1, 1, 2, stride=1, padding=0, bias=True)
with pytest.raises(AssertionError):
model = DeformConv2d(3, 2, 3, groups=2)
with pytest.raises(AssertionError):
model = DeformConv2d(3, 4, 3, groups=3)
def _test_amp_deformconv(self, input_dtype, threshold=0.001, batch_size=10, im2col_step=2):
'The function to test amp released on pytorch 1.6.0.\n\n The type of input data might be torch.float or torch.half,\n so we should test deform_conv in both cases. With amp, the\n data type of model will NOT be set manually.\n\n Args:\n input_dtype: torch.float or torch.half.\n threshold: the same as above function.\n '
if (not torch.cuda.is_available()):
return
from mmcv.ops import DeformConv2dPack
c_in = 1
c_out = 1
repeated_input = np.repeat(input, batch_size, axis=0)
repeated_gt_out = np.repeat(gt_out, batch_size, axis=0)
repeated_gt_x_grad = np.repeat(gt_x_grad, batch_size, axis=0)
x = torch.Tensor(repeated_input).cuda().type(input_dtype)
x.requires_grad = True
model = DeformConv2dPack(in_channels=c_in, out_channels=c_out, kernel_size=2, stride=1, padding=0, im2col_step=im2col_step)
model.conv_offset.weight.data = torch.nn.Parameter(torch.Tensor(offset_weight).reshape(8, 1, 2, 2))
model.conv_offset.bias.data = torch.nn.Parameter(torch.Tensor(offset_bias).reshape(8))
model.weight.data = torch.nn.Parameter(torch.Tensor(deform_weight).reshape(1, 1, 2, 2))
model.cuda()
out = model(x)
out.backward(torch.ones_like(out))
assert np.allclose(out.data.detach().cpu().numpy(), repeated_gt_out, threshold)
assert np.allclose(x.grad.detach().cpu().numpy(), repeated_gt_x_grad, threshold)
assert np.allclose((model.conv_offset.weight.grad.detach().cpu().numpy() / batch_size), gt_offset_weight_grad, threshold)
assert np.allclose((model.conv_offset.bias.grad.detach().cpu().numpy() / batch_size), gt_offset_bias_grad, threshold)
assert np.allclose((model.weight.grad.detach().cpu().numpy() / batch_size), gt_deform_weight_grad, threshold)
from mmcv.ops import DeformConv2d
model = DeformConv2d(1, 1, 2, stride=1, padding=0)
assert (not hasattr(model, 'bias'))
with pytest.raises(AssertionError):
model = DeformConv2d(1, 1, 2, stride=1, padding=0, bias=True)
with pytest.raises(AssertionError):
model = DeformConv2d(3, 2, 3, groups=2)
with pytest.raises(AssertionError):
model = DeformConv2d(3, 4, 3, groups=3)
def test_deformconv(self):
self._test_deformconv(torch.double, device='cpu')
self._test_deformconv(torch.float, device='cpu', threshold=0.1)
self._test_deformconv(torch.double)
self._test_deformconv(torch.float)
self._test_deformconv(torch.half, threshold=0.1)
self._test_deformconv(torch.float, batch_size=1, im2col_step=2)
with pytest.raises(AssertionError, match='batch size must be divisible by im2col_step'):
self._test_deformconv(torch.float, batch_size=10, im2col_step=3)
if ((TORCH_VERSION != 'parrots') and (digit_version(TORCH_VERSION) >= digit_version('1.6.0'))):
with autocast(enabled=True):
self._test_amp_deformconv(torch.float, 0.1)
self._test_amp_deformconv(torch.half, 0.1)
|
class TestDeformRoIPool(object):
def test_deform_roi_pool_gradcheck(self):
if (not torch.cuda.is_available()):
return
from mmcv.ops import DeformRoIPoolPack
pool_h = 2
pool_w = 2
spatial_scale = 1.0
sampling_ratio = 2
for case in inputs:
np_input = np.array(case[0])
np_rois = np.array(case[1])
x = torch.tensor(np_input, device='cuda', dtype=torch.float, requires_grad=True)
rois = torch.tensor(np_rois, device='cuda', dtype=torch.float)
output_c = x.size(1)
droipool = DeformRoIPoolPack((pool_h, pool_w), output_c, spatial_scale=spatial_scale, sampling_ratio=sampling_ratio).cuda()
if _USING_PARROTS:
gradcheck(droipool, (x, rois), no_grads=[rois])
else:
gradcheck(droipool, (x, rois), eps=0.01, atol=0.01)
def test_modulated_deform_roi_pool_gradcheck(self):
if (not torch.cuda.is_available()):
return
from mmcv.ops import ModulatedDeformRoIPoolPack
pool_h = 2
pool_w = 2
spatial_scale = 1.0
sampling_ratio = 2
for case in inputs:
np_input = np.array(case[0])
np_rois = np.array(case[1])
x = torch.tensor(np_input, device='cuda', dtype=torch.float, requires_grad=True)
rois = torch.tensor(np_rois, device='cuda', dtype=torch.float)
output_c = x.size(1)
droipool = ModulatedDeformRoIPoolPack((pool_h, pool_w), output_c, spatial_scale=spatial_scale, sampling_ratio=sampling_ratio).cuda()
if _USING_PARROTS:
gradcheck(droipool, (x, rois), no_grads=[rois])
else:
gradcheck(droipool, (x, rois), eps=0.01, atol=0.01)
|
class Testfocalloss(object):
def _test_softmax(self, dtype=torch.float):
if (not torch.cuda.is_available()):
return
from mmcv.ops import softmax_focal_loss
alpha = 0.25
gamma = 2.0
for (case, output) in zip(inputs, softmax_outputs):
np_x = np.array(case[0])
np_y = np.array(case[1])
np_x_grad = np.array(output[1])
x = torch.from_numpy(np_x).cuda().type(dtype)
x.requires_grad_()
y = torch.from_numpy(np_y).cuda().long()
loss = softmax_focal_loss(x, y, gamma, alpha, None, 'mean')
loss.backward()
assert np.allclose(loss.data.cpu().numpy(), output[0], 0.01)
assert np.allclose(x.grad.data.cpu(), np_x_grad, 0.01)
def _test_sigmoid(self, dtype=torch.float):
if (not torch.cuda.is_available()):
return
from mmcv.ops import sigmoid_focal_loss
alpha = 0.25
gamma = 2.0
for (case, output) in zip(inputs, sigmoid_outputs):
np_x = np.array(case[0])
np_y = np.array(case[1])
np_x_grad = np.array(output[1])
x = torch.from_numpy(np_x).cuda().type(dtype)
x.requires_grad_()
y = torch.from_numpy(np_y).cuda().long()
loss = sigmoid_focal_loss(x, y, gamma, alpha, None, 'mean')
loss.backward()
assert np.allclose(loss.data.cpu().numpy(), output[0], 0.01)
assert np.allclose(x.grad.data.cpu(), np_x_grad, 0.01)
def _test_grad_softmax(self, dtype=torch.float):
if (not torch.cuda.is_available()):
return
from mmcv.ops import SoftmaxFocalLoss
alpha = 0.25
gamma = 2.0
for case in inputs:
np_x = np.array(case[0])
np_y = np.array(case[1])
x = torch.from_numpy(np_x).cuda().type(dtype)
x.requires_grad_()
y = torch.from_numpy(np_y).cuda().long()
floss = SoftmaxFocalLoss(gamma, alpha)
if _USING_PARROTS:
pass
else:
gradcheck(floss, (x, y), eps=0.01, atol=0.01)
def _test_grad_sigmoid(self, dtype=torch.float):
if (not torch.cuda.is_available()):
return
from mmcv.ops import SigmoidFocalLoss
alpha = 0.25
gamma = 2.0
for case in inputs:
np_x = np.array(case[0])
np_y = np.array(case[1])
x = torch.from_numpy(np_x).cuda().type(dtype)
x.requires_grad_()
y = torch.from_numpy(np_y).cuda().long()
floss = SigmoidFocalLoss(gamma, alpha)
if _USING_PARROTS:
pass
else:
gradcheck(floss, (x, y), eps=0.01, atol=0.01)
def test_softmax_float(self):
self._test_softmax(dtype=torch.float)
def test_softmax_half(self):
self._test_softmax(dtype=torch.half)
def test_sigmoid_float(self):
self._test_sigmoid(dtype=torch.float)
def test_sigmoid_half(self):
self._test_sigmoid(dtype=torch.half)
def test_grad_softmax_float(self):
self._test_grad_softmax(dtype=torch.float)
def test_grad_sigmoid_float(self):
self._test_grad_sigmoid(dtype=torch.float)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_fps():
xyz = torch.tensor([[[(- 0.2748), 1.002, (- 1.1674)], [0.1015, 1.3952, (- 1.2681)], [(- 0.807), 2.4137, (- 0.5845)], [(- 1.0001), 2.1982, (- 0.5859)], [0.3841, 1.8983, (- 0.7431)]], [[(- 1.0696), 3.0758, (- 0.1899)], [(- 0.2559), 3.5521, (- 0.1402)], [0.8164, 4.0081, (- 0.1839)], [(- 1.1), 3.0213, (- 0.8205)], [(- 0.0518), 3.7251, (- 0.395)]]]).cuda()
idx = furthest_point_sample(xyz, 3)
expected_idx = torch.tensor([[0, 2, 4], [0, 2, 1]]).cuda()
assert torch.all((idx == expected_idx))
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_fps_with_dist():
xyz = torch.tensor([[[(- 0.2748), 1.002, (- 1.1674)], [0.1015, 1.3952, (- 1.2681)], [(- 0.807), 2.4137, (- 0.5845)], [(- 1.0001), 2.1982, (- 0.5859)], [0.3841, 1.8983, (- 0.7431)]], [[(- 1.0696), 3.0758, (- 0.1899)], [(- 0.2559), 3.5521, (- 0.1402)], [0.8164, 4.0081, (- 0.1839)], [(- 1.1), 3.0213, (- 0.8205)], [(- 0.0518), 3.7251, (- 0.395)]]]).cuda()
expected_idx = torch.tensor([[0, 2, 4], [0, 2, 1]]).cuda()
xyz_square_dist = ((xyz.unsqueeze(dim=1) - xyz.unsqueeze(dim=2)) ** 2).sum((- 1))
idx = furthest_point_sample_with_dist(xyz_square_dist, 3)
assert torch.all((idx == expected_idx))
import numpy as np
fps_idx = np.load('tests/data/for_3d_ops/fps_idx.npy')
features_for_fps_distance = np.load('tests/data/for_3d_ops/features_for_fps_distance.npy')
expected_idx = torch.from_numpy(fps_idx).cuda()
features_for_fps_distance = torch.from_numpy(features_for_fps_distance).cuda()
idx = furthest_point_sample_with_dist(features_for_fps_distance, 16)
assert torch.all((idx == expected_idx))
|
class TestFusedBiasLeakyReLU(object):
@classmethod
def setup_class(cls):
if (not torch.cuda.is_available()):
return
cls.input_tensor = torch.randn((2, 2, 2, 2), requires_grad=True).cuda()
cls.bias = torch.zeros(2, requires_grad=True).cuda()
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires cuda')
def test_gradient(self):
from mmcv.ops import FusedBiasLeakyReLU
if _USING_PARROTS:
gradcheck(FusedBiasLeakyReLU(2).cuda(), self.input_tensor, delta=0.0001, pt_atol=0.001)
else:
gradcheck(FusedBiasLeakyReLU(2).cuda(), self.input_tensor, eps=0.0001, atol=0.001)
@pytest.mark.skipif(((not torch.cuda.is_available()) or _USING_PARROTS), reason='requires cuda')
def test_gradgradient(self):
from mmcv.ops import FusedBiasLeakyReLU
gradgradcheck(FusedBiasLeakyReLU(2).cuda(), self.input_tensor, eps=0.0001, atol=0.001)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_gather_points():
features = torch.tensor([[[(- 1.6095), (- 0.1029), (- 0.8876), (- 1.2447), (- 2.4031), 0.3708, (- 1.1586), (- 1.4967), (- 0.48), 0.2252], [1.9138, 3.4979, 1.6854, 1.5631, 3.6776, 3.1154, 2.1705, 2.5221, 2.0411, 3.1446], [(- 1.4173), 0.3073, (- 1.4339), (- 1.434), (- 1.277), (- 0.2867), (- 1.4162), (- 1.4044), (- 1.4245), (- 1.4074)]], [[0.216, 0.0842, 0.3661, (- 0.2749), (- 0.4909), (- 0.6066), (- 0.8773), (- 0.0745), (- 0.9496), 0.1434], [1.3644, 1.8087, 1.6855, 1.9563, 1.2746, 1.9662, 0.9566, 1.8778, 1.1437, 1.3639], [(- 0.7172), 0.1692, 0.2241, 0.0721, (- 0.754), 0.0462, (- 0.6227), 0.3223, (- 0.6944), (- 0.5294)]]]).cuda()
idx = torch.tensor([[0, 1, 4, 0, 0, 0], [0, 5, 6, 0, 0, 0]]).int().cuda()
output = gather_points(features, idx)
expected_output = torch.tensor([[[(- 1.6095), (- 0.1029), (- 2.4031), (- 1.6095), (- 1.6095), (- 1.6095)], [1.9138, 3.4979, 3.6776, 1.9138, 1.9138, 1.9138], [(- 1.4173), 0.3073, (- 1.277), (- 1.4173), (- 1.4173), (- 1.4173)]], [[0.216, (- 0.6066), (- 0.8773), 0.216, 0.216, 0.216], [1.3644, 1.9662, 0.9566, 1.3644, 1.3644, 1.3644], [(- 0.7172), 0.0462, (- 0.6227), (- 0.7172), (- 0.7172), (- 0.7172)]]]).cuda()
assert torch.allclose(output, expected_output)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_grouping_points():
idx = torch.tensor([[[0, 0, 0], [3, 3, 3], [8, 8, 8], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [6, 6, 6], [9, 9, 9], [0, 0, 0], [0, 0, 0], [0, 0, 0]]]).int().cuda()
festures = torch.tensor([[[0.5798, (- 0.7981), (- 0.928), (- 1.3311), 1.3687, 0.9277, (- 0.4164), (- 1.8274), 0.9268, 0.8414], [5.4247, 1.5113, 2.3944, 1.474, 5.03, 5.103, 1.936, 2.1939, 2.1581, 3.4666], [(- 1.6266), (- 1.0281), (- 1.0393), (- 1.6931), (- 1.3982), (- 0.5732), (- 1.083), (- 1.7561), (- 1.6786), (- 1.6967)]], [[(- 0.038), (- 0.188), (- 1.5724), 0.6905, (- 0.319), 0.7798, (- 0.3693), (- 0.9457), (- 0.2942), (- 1.8527)], [1.1773, 1.5009, 2.6399, 5.9242, 1.0962, 2.7346, 6.0865, 1.5555, 4.3303, 2.8229], [(- 0.6646), (- 0.687), (- 0.1125), (- 0.2224), (- 0.3445), (- 1.4049), 0.499, (- 0.7037), (- 0.9924), 0.0386]]]).cuda()
output = grouping_operation(festures, idx)
expected_output = torch.tensor([[[[0.5798, 0.5798, 0.5798], [(- 1.3311), (- 1.3311), (- 1.3311)], [0.9268, 0.9268, 0.9268], [0.5798, 0.5798, 0.5798], [0.5798, 0.5798, 0.5798], [0.5798, 0.5798, 0.5798]], [[5.4247, 5.4247, 5.4247], [1.474, 1.474, 1.474], [2.1581, 2.1581, 2.1581], [5.4247, 5.4247, 5.4247], [5.4247, 5.4247, 5.4247], [5.4247, 5.4247, 5.4247]], [[(- 1.6266), (- 1.6266), (- 1.6266)], [(- 1.6931), (- 1.6931), (- 1.6931)], [(- 1.6786), (- 1.6786), (- 1.6786)], [(- 1.6266), (- 1.6266), (- 1.6266)], [(- 1.6266), (- 1.6266), (- 1.6266)], [(- 1.6266), (- 1.6266), (- 1.6266)]]], [[[(- 0.038), (- 0.038), (- 0.038)], [(- 0.3693), (- 0.3693), (- 0.3693)], [(- 1.8527), (- 1.8527), (- 1.8527)], [(- 0.038), (- 0.038), (- 0.038)], [(- 0.038), (- 0.038), (- 0.038)], [(- 0.038), (- 0.038), (- 0.038)]], [[1.1773, 1.1773, 1.1773], [6.0865, 6.0865, 6.0865], [2.8229, 2.8229, 2.8229], [1.1773, 1.1773, 1.1773], [1.1773, 1.1773, 1.1773], [1.1773, 1.1773, 1.1773]], [[(- 0.6646), (- 0.6646), (- 0.6646)], [0.499, 0.499, 0.499], [0.0386, 0.0386, 0.0386], [(- 0.6646), (- 0.6646), (- 0.6646)], [(- 0.6646), (- 0.6646), (- 0.6646)], [(- 0.6646), (- 0.6646), (- 0.6646)]]]]).cuda()
assert torch.allclose(output, expected_output)
|
class TestInfo(object):
def test_info(self):
if (not torch.cuda.is_available()):
return
from mmcv.ops import get_compiler_version, get_compiling_cuda_version
cv = get_compiler_version()
ccv = get_compiling_cuda_version()
assert (cv is not None)
assert (ccv is not None)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_boxes_iou_bev():
np_boxes1 = np.asarray([[1.0, 1.0, 3.0, 4.0, 0.5], [2.0, 2.0, 3.0, 4.0, 0.6], [7.0, 7.0, 8.0, 8.0, 0.4]], dtype=np.float32)
np_boxes2 = np.asarray([[0.0, 2.0, 2.0, 5.0, 0.3], [2.0, 1.0, 3.0, 3.0, 0.5], [5.0, 5.0, 6.0, 7.0, 0.4]], dtype=np.float32)
np_expect_ious = np.asarray([[0.2621, 0.2948, 0.0], [0.0549, 0.1587, 0.0], [0.0, 0.0, 0.0]], dtype=np.float32)
boxes1 = torch.from_numpy(np_boxes1).cuda()
boxes2 = torch.from_numpy(np_boxes2).cuda()
ious = boxes_iou_bev(boxes1, boxes2)
assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_nms_bev():
np_boxes = np.array([[6.0, 3.0, 8.0, 7.0, 2.0], [3.0, 6.0, 9.0, 11.0, 1.0], [3.0, 7.0, 10.0, 12.0, 1.0], [1.0, 4.0, 13.0, 7.0, 3.0]], dtype=np.float32)
np_scores = np.array([0.6, 0.9, 0.7, 0.2], dtype=np.float32)
np_inds = np.array([1, 0, 3])
boxes = torch.from_numpy(np_boxes)
scores = torch.from_numpy(np_scores)
inds = nms_bev(boxes.cuda(), scores.cuda(), thresh=0.3)
assert np.allclose(inds.cpu().numpy(), np_inds)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_nms_normal_bev():
np_boxes = np.array([[6.0, 3.0, 8.0, 7.0, 2.0], [3.0, 6.0, 9.0, 11.0, 1.0], [3.0, 7.0, 10.0, 12.0, 1.0], [1.0, 4.0, 13.0, 7.0, 3.0]], dtype=np.float32)
np_scores = np.array([0.6, 0.9, 0.7, 0.2], dtype=np.float32)
np_inds = np.array([1, 0, 3])
boxes = torch.from_numpy(np_boxes)
scores = torch.from_numpy(np_scores)
inds = nms_normal_bev(boxes.cuda(), scores.cuda(), thresh=0.3)
assert np.allclose(inds.cpu().numpy(), np_inds)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_knn():
new_xyz = torch.tensor([[[(- 0.074), 1.3147, (- 1.3625)], [(- 2.2769), 2.7817, (- 0.2334)], [(- 0.4003), 2.4666, (- 0.5116)], [(- 0.074), 1.3147, (- 1.3625)], [(- 0.074), 1.3147, (- 1.3625)]], [[(- 2.0289), 2.4952, (- 0.1708)], [(- 2.0668), 6.0278, (- 0.4875)], [0.4066, 1.4211, (- 0.2947)], [(- 2.0289), 2.4952, (- 0.1708)], [(- 2.0289), 2.4952, (- 0.1708)]]]).cuda()
xyz = torch.tensor([[[(- 0.074), 1.3147, (- 1.3625)], [0.5555, 1.0399, (- 1.3634)], [(- 0.4003), 2.4666, (- 0.5116)], [(- 0.5251), 2.4379, (- 0.8466)], [(- 0.9691), 1.1418, (- 1.3733)], [(- 0.2232), 0.9561, (- 1.3626)], [(- 2.2769), 2.7817, (- 0.2334)], [(- 0.2822), 1.3192, (- 1.3645)], [0.1533, 1.5024, (- 1.0432)], [0.4917, 1.1529, (- 1.3496)]], [[(- 2.0289), 2.4952, (- 0.1708)], [(- 0.7188), 0.9956, (- 0.5096)], [(- 2.0668), 6.0278, (- 0.4875)], [(- 1.9304), 3.3092, 0.661], [0.0949, 1.4332, 0.314], [(- 1.2879), 2.0008, (- 0.7791)], [(- 0.7252), 0.9611, (- 0.6371)], [0.4066, 1.4211, (- 0.2947)], [0.322, 1.4447, 0.3548], [(- 0.9744), 2.3856, (- 1.2)]]]).cuda()
idx = knn(5, xyz, new_xyz)
new_xyz_ = new_xyz.unsqueeze(2).repeat(1, 1, xyz.shape[1], 1)
xyz_ = xyz.unsqueeze(1).repeat(1, new_xyz.shape[1], 1, 1)
dist = ((new_xyz_ - xyz_) * (new_xyz_ - xyz_)).sum((- 1))
expected_idx = dist.topk(k=5, dim=2, largest=False)[1].transpose(2, 1)
assert torch.all((idx == expected_idx))
idx = knn(5, xyz.transpose(1, 2).contiguous(), new_xyz.transpose(1, 2).contiguous(), True)
assert torch.all((idx == expected_idx))
idx = knn(5, xyz, xyz)
xyz_ = xyz.unsqueeze(2).repeat(1, 1, xyz.shape[1], 1)
xyz__ = xyz.unsqueeze(1).repeat(1, xyz.shape[1], 1, 1)
dist = ((xyz_ - xyz__) * (xyz_ - xyz__)).sum((- 1))
expected_idx = dist.topk(k=5, dim=2, largest=False)[1].transpose(2, 1)
assert torch.all((idx == expected_idx))
|
class TestMaskedConv2d(object):
def test_masked_conv2d(self):
if (not torch.cuda.is_available()):
return
from mmcv.ops import MaskedConv2d
input = torch.randn(1, 3, 16, 16, requires_grad=True, device='cuda')
mask = torch.randn(1, 16, 16, requires_grad=True, device='cuda')
conv = MaskedConv2d(3, 3, 3).cuda()
output = conv(input, mask)
assert (output is not None)
|
def test_sum_cell():
inputs_x = torch.randn([2, 256, 32, 32])
inputs_y = torch.randn([2, 256, 16, 16])
sum_cell = SumCell(256, 256)
output = sum_cell(inputs_x, inputs_y, out_size=inputs_x.shape[(- 2):])
assert (output.size() == inputs_x.size())
output = sum_cell(inputs_x, inputs_y, out_size=inputs_y.shape[(- 2):])
assert (output.size() == inputs_y.size())
output = sum_cell(inputs_x, inputs_y)
assert (output.size() == inputs_x.size())
|
def test_concat_cell():
inputs_x = torch.randn([2, 256, 32, 32])
inputs_y = torch.randn([2, 256, 16, 16])
concat_cell = ConcatCell(256, 256)
output = concat_cell(inputs_x, inputs_y, out_size=inputs_x.shape[(- 2):])
assert (output.size() == inputs_x.size())
output = concat_cell(inputs_x, inputs_y, out_size=inputs_y.shape[(- 2):])
assert (output.size() == inputs_y.size())
output = concat_cell(inputs_x, inputs_y)
assert (output.size() == inputs_x.size())
|
def test_global_pool_cell():
inputs_x = torch.randn([2, 256, 32, 32])
inputs_y = torch.randn([2, 256, 32, 32])
gp_cell = GlobalPoolingCell(with_out_conv=False)
gp_cell_out = gp_cell(inputs_x, inputs_y, out_size=inputs_x.shape[(- 2):])
assert (gp_cell_out.size() == inputs_x.size())
gp_cell = GlobalPoolingCell(256, 256)
gp_cell_out = gp_cell(inputs_x, inputs_y, out_size=inputs_x.shape[(- 2):])
assert (gp_cell_out.size() == inputs_x.size())
|
def test_resize_methods():
inputs_x = torch.randn([2, 256, 128, 128])
target_resize_sizes = [(128, 128), (256, 256)]
resize_methods_list = ['nearest', 'bilinear']
for method in resize_methods_list:
merge_cell = BaseMergeCell(upsample_mode=method)
for target_size in target_resize_sizes:
merge_cell_out = merge_cell._resize(inputs_x, target_size)
gt_out = F.interpolate(inputs_x, size=target_size, mode=method)
assert merge_cell_out.equal(gt_out)
target_size = (64, 64)
merge_cell = BaseMergeCell()
merge_cell_out = merge_cell._resize(inputs_x, target_size)
kernel_size = (inputs_x.shape[(- 1)] // target_size[(- 1)])
gt_out = F.max_pool2d(inputs_x, kernel_size=kernel_size, stride=kernel_size)
assert (merge_cell_out == gt_out).all()
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_min_area_polygons():
pointsets = torch.from_numpy(np_pointsets).cuda().float()
assert np.allclose(min_area_polygons(pointsets).cpu().numpy(), expected_polygons, atol=0.0001)
|
class TestMdconv(object):
def _test_mdconv(self, dtype=torch.float, device='cuda'):
if ((not torch.cuda.is_available()) and (device == 'cuda')):
pytest.skip('test requires GPU')
from mmcv.ops import ModulatedDeformConv2dPack
input = torch.tensor(input_t, dtype=dtype, device=device)
input.requires_grad = True
dcn = ModulatedDeformConv2dPack(1, 1, kernel_size=(2, 2), stride=1, padding=1, deform_groups=1, bias=False)
if (device == 'cuda'):
dcn.cuda()
dcn.weight.data.fill_(1.0)
dcn.type(dtype)
output = dcn(input)
output.sum().backward()
assert numpy.allclose(output.cpu().detach().numpy(), output_t, 0.01)
assert numpy.allclose(input.grad.cpu().detach().numpy(), input_grad, 0.01)
assert numpy.allclose(dcn.weight.grad.cpu().detach().numpy(), dcn_w_grad, 0.01)
assert numpy.allclose(dcn.conv_offset.weight.grad.cpu().detach().numpy(), dcn_offset_w_grad, 0.01)
assert numpy.allclose(dcn.conv_offset.bias.grad.cpu().detach().numpy(), dcn_offset_b_grad, 0.01)
def _test_amp_mdconv(self, input_dtype=torch.float):
'The function to test amp released on pytorch 1.6.0.\n\n The type of input data might be torch.float or torch.half,\n so we should test mdconv in both cases. With amp, the data\n type of model will NOT be set manually.\n\n Args:\n input_dtype: torch.float or torch.half.\n '
if (not torch.cuda.is_available()):
return
from mmcv.ops import ModulatedDeformConv2dPack
input = torch.tensor(input_t).cuda().type(input_dtype)
input.requires_grad = True
dcn = ModulatedDeformConv2dPack(1, 1, kernel_size=(2, 2), stride=1, padding=1, deform_groups=1, bias=False).cuda()
dcn.weight.data.fill_(1.0)
output = dcn(input)
output.sum().backward()
assert numpy.allclose(output.cpu().detach().numpy(), output_t, 0.01)
assert numpy.allclose(input.grad.cpu().detach().numpy(), input_grad, 0.01)
assert numpy.allclose(dcn.weight.grad.cpu().detach().numpy(), dcn_w_grad, 0.01)
assert numpy.allclose(dcn.conv_offset.weight.grad.cpu().detach().numpy(), dcn_offset_w_grad, 0.01)
assert numpy.allclose(dcn.conv_offset.bias.grad.cpu().detach().numpy(), dcn_offset_b_grad, 0.01)
def test_mdconv(self):
self._test_mdconv(torch.double, device='cpu')
self._test_mdconv(torch.float, device='cpu')
self._test_mdconv(torch.double)
self._test_mdconv(torch.float)
self._test_mdconv(torch.half)
if ((TORCH_VERSION != 'parrots') and (digit_version(TORCH_VERSION) >= digit_version('1.6.0'))):
with autocast(enabled=True):
self._test_amp_mdconv(torch.float)
self._test_amp_mdconv(torch.half)
|
@pytest.mark.parametrize('device_type', ['cpu', pytest.param('cuda:0', marks=pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support'))])
def test_multiscale_deformable_attention(device_type):
with pytest.raises(ValueError):
MultiScaleDeformableAttention(embed_dims=256, num_heads=7)
device = torch.device(device_type)
msda = MultiScaleDeformableAttention(embed_dims=3, num_levels=2, num_heads=3)
msda.init_weights()
num_query = 5
bs = 1
embed_dims = 3
query = torch.rand(num_query, bs, embed_dims).to(device)
key = torch.rand(num_query, bs, embed_dims).to(device)
spatial_shapes = torch.Tensor([[2, 2], [1, 1]]).long().to(device)
level_start_index = torch.Tensor([0, 4]).long().to(device)
reference_points = torch.rand(bs, num_query, 2, 2).to(device)
msda.to(device)
msda(query, key, key, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index)
|
def test_forward_multi_scale_deformable_attn_pytorch():
(N, M, D) = (1, 2, 2)
(Lq, L, P) = (2, 2, 2)
shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long)
S = sum([(H * W).item() for (H, W) in shapes])
torch.manual_seed(3)
value = (torch.rand(N, S, M, D) * 0.01)
sampling_locations = torch.rand(N, Lq, M, L, P, 2)
attention_weights = (torch.rand(N, Lq, M, L, P) + 1e-05)
attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True)
multi_scale_deformable_attn_pytorch(value.double(), shapes, sampling_locations.double(), attention_weights.double()).detach()
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_forward_equal_with_pytorch_double():
(N, M, D) = (1, 2, 2)
(Lq, L, P) = (2, 2, 2)
shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda()
level_start_index = torch.cat((shapes.new_zeros((1,)), shapes.prod(1).cumsum(0)[:(- 1)]))
S = sum([(H * W).item() for (H, W) in shapes])
torch.manual_seed(3)
value = (torch.rand(N, S, M, D).cuda() * 0.01)
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05)
attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True)
im2col_step = 2
output_pytorch = multi_scale_deformable_attn_pytorch(value.double(), shapes, sampling_locations.double(), attention_weights.double()).detach().cpu()
output_cuda = MultiScaleDeformableAttnFunction.apply(value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step).detach().cpu()
assert torch.allclose(output_cuda, output_pytorch)
max_abs_err = (output_cuda - output_pytorch).abs().max()
max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max()
assert (max_abs_err < 1e-18)
assert (max_rel_err < 1e-15)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_forward_equal_with_pytorch_float():
(N, M, D) = (1, 2, 2)
(Lq, L, P) = (2, 2, 2)
shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda()
level_start_index = torch.cat((shapes.new_zeros((1,)), shapes.prod(1).cumsum(0)[:(- 1)]))
S = sum([(H * W).item() for (H, W) in shapes])
torch.manual_seed(3)
value = (torch.rand(N, S, M, D).cuda() * 0.01)
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05)
attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True)
im2col_step = 2
output_pytorch = multi_scale_deformable_attn_pytorch(value, shapes, sampling_locations, attention_weights).detach().cpu()
output_cuda = MultiScaleDeformableAttnFunction.apply(value, shapes, level_start_index, sampling_locations, attention_weights, im2col_step).detach().cpu()
assert torch.allclose(output_cuda, output_pytorch, rtol=0.01, atol=0.001)
max_abs_err = (output_cuda - output_pytorch).abs().max()
max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max()
assert (max_abs_err < 1e-09)
assert (max_rel_err < 1e-06)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
@pytest.mark.parametrize('channels', [4, 30, 32, 64, 71, 1025])
def test_gradient_numerical(channels, grad_value=True, grad_sampling_loc=True, grad_attn_weight=True):
(N, M, _) = (1, 2, 2)
(Lq, L, P) = (2, 2, 2)
shapes = torch.as_tensor([(3, 2), (2, 1)], dtype=torch.long).cuda()
level_start_index = torch.cat((shapes.new_zeros((1,)), shapes.prod(1).cumsum(0)[:(- 1)]))
S = sum([(H * W).item() for (H, W) in shapes])
value = (torch.rand(N, S, M, channels).cuda() * 0.01)
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05)
attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True)
im2col_step = 2
func = MultiScaleDeformableAttnFunction.apply
value.requires_grad = grad_value
sampling_locations.requires_grad = grad_sampling_loc
attention_weights.requires_grad = grad_attn_weight
if _USING_PARROTS:
assert gradcheck(func, (value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step), no_grads=[shapes, level_start_index])
else:
assert gradcheck(func, (value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step))
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_points_in_polygons():
points = np.array([[300.0, 300.0], [400.0, 400.0], [100.0, 100], [300, 250], [100, 0]])
polygons = np.array([[200.0, 200.0, 400.0, 400.0, 500.0, 200.0, 400.0, 100.0], [400.0, 400.0, 500.0, 500.0, 600.0, 300.0, 500.0, 200.0], [300.0, 300.0, 600.0, 700.0, 700.0, 700.0, 700.0, 100.0]])
expected_output = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
points = torch.from_numpy(points).cuda().float()
polygons = torch.from_numpy(polygons).cuda().float()
expected_output = torch.from_numpy(expected_output).cuda().float()
assert torch.allclose(points_in_polygons(points, polygons), expected_output, 0.001)
|
class Loss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, target):
input = input.view((- 1))
target = target.view((- 1))
return torch.mean((input - target))
|
class TestPSAMask(object):
def test_psa_mask_collect(self):
if (not torch.cuda.is_available()):
return
from mmcv.ops import PSAMask
test_loss = Loss()
input = np.fromfile('tests/data/for_psa_mask/psa_input.bin', dtype=np.float32)
output_collect = np.fromfile('tests/data/for_psa_mask/psa_output_collect.bin', dtype=np.float32)
input = input.reshape((4, 16, 8, 8))
output_collect = output_collect.reshape((4, 64, 8, 8))
label = torch.ones((4, 64, 8, 8))
input = torch.FloatTensor(input)
input.requires_grad = True
psamask_collect = PSAMask('collect', (4, 4))
test_output = psamask_collect(input)
loss = test_loss(test_output, label)
loss.backward()
test_output = test_output.detach().numpy()
assert np.allclose(test_output, output_collect)
assert (test_output.shape == output_collect.shape)
psamask_collect.cuda()
input = input.cuda()
label = label.cuda()
test_output = psamask_collect(input)
loss = test_loss(test_output, label)
loss.backward()
test_output = test_output.detach().cpu().numpy()
assert np.allclose(test_output, output_collect)
assert (test_output.shape == output_collect.shape)
def test_psa_mask_distribute(self):
if (not torch.cuda.is_available()):
return
from mmcv.ops import PSAMask
test_loss = Loss()
input = np.fromfile('tests/data/for_psa_mask/psa_input.bin', dtype=np.float32)
output_distribute = np.fromfile('tests/data/for_psa_mask/psa_output_distribute.bin', dtype=np.float32)
input = input.reshape((4, 16, 8, 8))
output_distribute = output_distribute.reshape((4, 64, 8, 8))
label = torch.ones((4, 64, 8, 8))
input = torch.FloatTensor(input)
input.requires_grad = True
psamask_distribute = PSAMask('distribute', (4, 4))
test_output = psamask_distribute(input)
loss = test_loss(test_output, label)
loss.backward()
test_output = test_output.detach().numpy()
assert np.allclose(test_output, output_distribute)
assert (test_output.shape == output_distribute.shape)
psamask_distribute.cuda()
input = input.cuda()
label = label.cuda()
test_output = psamask_distribute(input)
loss = test_loss(test_output, label)
loss.backward()
test_output = test_output.detach().cpu().numpy()
assert np.allclose(test_output, output_distribute)
assert (test_output.shape == output_distribute.shape)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_roialign_rotated_gradcheck():
x = torch.tensor(np_feature, dtype=torch.float, device='cuda', requires_grad=True)
rois = torch.tensor(np_rois, dtype=torch.float, device='cuda')
froipool = RiRoIAlignRotated((pool_h, pool_w), spatial_scale, num_samples, num_orientations, clockwise)
gradcheck(froipool, (x, rois), eps=0.001, atol=0.001)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_roialign_rotated_allclose():
x = torch.tensor(np_feature, dtype=torch.float, device='cuda', requires_grad=True)
rois = torch.tensor(np_rois, dtype=torch.float, device='cuda')
froipool = RiRoIAlignRotated((pool_h, pool_w), spatial_scale, num_samples, num_orientations, clockwise)
output = froipool(x, rois)
output.backward(torch.ones_like(output))
assert np.allclose(output.data.type(torch.float).cpu().numpy(), expect_output, atol=0.001)
assert np.allclose(x.grad.data.type(torch.float).cpu().numpy(), expect_grad, atol=0.001)
|
def _test_roialign_gradcheck(device, dtype):
if ((not torch.cuda.is_available()) and (device == 'cuda')):
pytest.skip('test requires GPU')
try:
from mmcv.ops import RoIAlign
except ModuleNotFoundError:
pytest.skip('RoIAlign op is not successfully compiled')
if (dtype is torch.half):
pytest.skip('grad check does not support fp16')
for case in inputs:
np_input = np.array(case[0])
np_rois = np.array(case[1])
x = torch.tensor(np_input, dtype=dtype, device=device, requires_grad=True)
rois = torch.tensor(np_rois, dtype=dtype, device=device)
froipool = RoIAlign((pool_h, pool_w), spatial_scale, sampling_ratio)
if (torch.__version__ == 'parrots'):
gradcheck(froipool, (x, rois), no_grads=[rois], delta=1e-05, pt_atol=1e-05)
else:
gradcheck(froipool, (x, rois), eps=1e-05, atol=1e-05)
|
def _test_roialign_allclose(device, dtype):
if ((not torch.cuda.is_available()) and (device == 'cuda')):
pytest.skip('test requires GPU')
try:
from mmcv.ops import roi_align
except ModuleNotFoundError:
pytest.skip('test requires compilation')
pool_h = 2
pool_w = 2
spatial_scale = 1.0
sampling_ratio = 2
for (case, output) in zip(inputs, outputs):
np_input = np.array(case[0])
np_rois = np.array(case[1])
np_output = np.array(output[0])
np_grad = np.array(output[1])
x = torch.tensor(np_input, dtype=dtype, device=device, requires_grad=True)
rois = torch.tensor(np_rois, dtype=dtype, device=device)
output = roi_align(x, rois, (pool_h, pool_w), spatial_scale, sampling_ratio, 'avg', True)
output.backward(torch.ones_like(output))
assert np.allclose(output.data.type(torch.float).cpu().numpy(), np_output, atol=0.001)
assert np.allclose(x.grad.data.type(torch.float).cpu().numpy(), np_grad, atol=0.001)
|
@pytest.mark.parametrize('device', ['cuda', 'cpu'])
@pytest.mark.parametrize('dtype', [torch.float, torch.double, torch.half])
def test_roialign(device, dtype):
if (dtype is torch.double):
_test_roialign_gradcheck(device=device, dtype=dtype)
_test_roialign_allclose(device=device, dtype=dtype)
|
def _test_roialign_rotated_gradcheck(device, dtype):
if ((not torch.cuda.is_available()) and (device == 'cuda')):
pytest.skip('unittest does not support GPU yet.')
try:
from mmcv.ops import RoIAlignRotated
except ModuleNotFoundError:
pytest.skip('RoIAlignRotated op is not successfully compiled')
if (dtype is torch.half):
pytest.skip('grad check does not support fp16')
for case in inputs:
np_input = np.array(case[0])
np_rois = np.array(case[1])
x = torch.tensor(np_input, dtype=dtype, device=device, requires_grad=True)
rois = torch.tensor(np_rois, dtype=dtype, device=device)
froipool = RoIAlignRotated((pool_h, pool_w), spatial_scale, sampling_ratio)
if (torch.__version__ == 'parrots'):
gradcheck(froipool, (x, rois), no_grads=[rois], delta=1e-05, pt_atol=1e-05)
else:
gradcheck(froipool, (x, rois), eps=1e-05, atol=1e-05)
|
def _test_roialign_rotated_allclose(device, dtype):
if ((not torch.cuda.is_available()) and (device == 'cuda')):
pytest.skip('unittest does not support GPU yet.')
try:
from mmcv.ops import RoIAlignRotated, roi_align_rotated
except ModuleNotFoundError:
pytest.skip('test requires compilation')
pool_h = 2
pool_w = 2
spatial_scale = 1.0
sampling_ratio = 2
for (case, output) in zip(inputs, outputs):
np_input = np.array(case[0])
np_rois = np.array(case[1])
np_output = np.array(output[0])
np_grad = np.array(output[1])
x = torch.tensor(np_input, dtype=dtype, device=device, requires_grad=True)
rois = torch.tensor(np_rois, dtype=dtype, device=device)
output = roi_align_rotated(x, rois, (pool_h, pool_w), spatial_scale, sampling_ratio, True)
output.backward(torch.ones_like(output))
assert np.allclose(output.data.type(torch.float).cpu().numpy(), np_output, atol=0.001)
assert np.allclose(x.grad.data.type(torch.float).cpu().numpy(), np_grad, atol=0.001)
roi_align_rotated_module_deprecated = RoIAlignRotated(out_size=(pool_h, pool_w), spatial_scale=spatial_scale, sample_num=sampling_ratio)
output_1 = roi_align_rotated_module_deprecated(x, rois)
roi_align_rotated_module_new = RoIAlignRotated(output_size=(pool_h, pool_w), spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
output_2 = roi_align_rotated_module_new(x, rois)
assert np.allclose(output_1.data.type(torch.float).cpu().numpy(), output_2.data.type(torch.float).cpu().numpy())
|
@pytest.mark.parametrize('device', ['cuda', 'cpu'])
@pytest.mark.parametrize('dtype', [torch.float, torch.double, torch.half])
def test_roialign_rotated(device, dtype):
if (dtype is torch.double):
_test_roialign_rotated_gradcheck(device=device, dtype=dtype)
_test_roialign_rotated_allclose(device=device, dtype=dtype)
|
class TestRoiPool(object):
def test_roipool_gradcheck(self):
if (not torch.cuda.is_available()):
return
from mmcv.ops import RoIPool
pool_h = 2
pool_w = 2
spatial_scale = 1.0
for case in inputs:
np_input = np.array(case[0])
np_rois = np.array(case[1])
x = torch.tensor(np_input, device='cuda', requires_grad=True)
rois = torch.tensor(np_rois, device='cuda')
froipool = RoIPool((pool_h, pool_w), spatial_scale)
if _USING_PARROTS:
pass
else:
gradcheck(froipool, (x, rois), eps=0.01, atol=0.01)
def _test_roipool_allclose(self, dtype=torch.float):
if (not torch.cuda.is_available()):
return
from mmcv.ops import roi_pool
pool_h = 2
pool_w = 2
spatial_scale = 1.0
for (case, output) in zip(inputs, outputs):
np_input = np.array(case[0])
np_rois = np.array(case[1])
np_output = np.array(output[0])
np_grad = np.array(output[1])
x = torch.tensor(np_input, dtype=dtype, device='cuda', requires_grad=True)
rois = torch.tensor(np_rois, dtype=dtype, device='cuda')
output = roi_pool(x, rois, (pool_h, pool_w), spatial_scale)
output.backward(torch.ones_like(output))
assert np.allclose(output.data.cpu().numpy(), np_output, 0.001)
assert np.allclose(x.grad.data.cpu().numpy(), np_grad, 0.001)
def test_roipool_allclose(self):
self._test_roipool_allclose(torch.double)
self._test_roipool_allclose(torch.float)
self._test_roipool_allclose(torch.half)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_RoIAwarePool3d():
roiaware_pool3d_max = RoIAwarePool3d(out_size=4, max_pts_per_voxel=128, mode='max')
roiaware_pool3d_avg = RoIAwarePool3d(out_size=4, max_pts_per_voxel=128, mode='avg')
rois = torch.tensor([[1.0, 2.0, 3.0, 5.0, 4.0, 6.0, ((- 0.3) - (np.pi / 2))], [(- 10.0), 23.0, 16.0, 20.0, 10.0, 20.0, ((- 0.5) - (np.pi / 2))]], dtype=torch.float32).cuda()
pts = torch.tensor([[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], [0.8, 1.2, 3.9], [(- 9.2), 21.0, 18.2], [3.8, 7.9, 6.3], [4.7, 3.5, (- 12.2)], [3.8, 7.6, (- 2)], [(- 10.6), (- 12.9), (- 20)], [(- 16), (- 18), 9], [(- 21.3), (- 52), (- 5)], [0, 0, 0], [6, 7, 8], [(- 2), (- 3), (- 4)]], dtype=torch.float32).cuda()
pts_feature = pts.clone()
pooled_features_max = roiaware_pool3d_max(rois=rois, pts=pts, pts_feature=pts_feature)
assert (pooled_features_max.shape == torch.Size([2, 4, 4, 4, 3]))
assert torch.allclose(pooled_features_max.sum(), torch.tensor(51.1).cuda(), 0.001)
pooled_features_avg = roiaware_pool3d_avg(rois=rois, pts=pts, pts_feature=pts_feature)
assert (pooled_features_avg.shape == torch.Size([2, 4, 4, 4, 3]))
assert torch.allclose(pooled_features_avg.sum(), torch.tensor(49.75).cuda(), 0.001)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_points_in_boxes_part():
boxes = torch.tensor([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3]], [[(- 10.0), 23.0, 16.0, 10, 20, 20, 0.5]]], dtype=torch.float32).cuda()
pts = torch.tensor([[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], [0.8, 1.2, 3.9], [(- 9.2), 21.0, 18.2], [3.8, 7.9, 6.3], [4.7, 3.5, (- 12.2)]], [[3.8, 7.6, (- 2)], [(- 10.6), (- 12.9), (- 20)], [(- 16), (- 18), 9], [(- 21.3), (- 52), (- 5)], [0, 0, 0], [6, 7, 8], [(- 2), (- 3), (- 4)], [6, 4, 9]]], dtype=torch.float32).cuda()
point_indices = points_in_boxes_part(points=pts, boxes=boxes)
expected_point_indices = torch.tensor([[0, 0, 0, 0, 0, (- 1), (- 1), (- 1)], [(- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]], dtype=torch.int32).cuda()
assert (point_indices.shape == torch.Size([2, 8]))
assert (point_indices == expected_point_indices).all()
boxes = torch.tensor([[[0.0, 0.0, 0.0, 1.0, 20.0, 1.0, 0.523598]]], dtype=torch.float32).cuda()
pts = torch.tensor([[[4, 6.928, 0], [6.928, 4, 0], [4, (- 6.928), 0], [6.928, (- 4), 0], [(- 4), 6.928, 0], [(- 6.928), 4, 0], [(- 4), (- 6.928), 0], [(- 6.928), (- 4), 0]]], dtype=torch.float32).cuda()
point_indices = points_in_boxes_part(points=pts, boxes=boxes)
expected_point_indices = torch.tensor([[(- 1), (- 1), 0, (- 1), 0, (- 1), (- 1), (- 1)]], dtype=torch.int32).cuda()
assert (point_indices == expected_point_indices).all()
|
def test_points_in_boxes_cpu():
boxes = torch.tensor([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3], [(- 10.0), 23.0, 16.0, 10, 20, 20, 0.5]]], dtype=torch.float32)
pts = torch.tensor([[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], [0.8, 1.2, 3.9], [(- 9.2), 21.0, 18.2], [3.8, 7.9, 6.3], [4.7, 3.5, (- 12.2)], [3.8, 7.6, (- 2)], [(- 10.6), (- 12.9), (- 20)], [(- 16), (- 18), 9], [(- 21.3), (- 52), (- 5)], [0, 0, 0], [6, 7, 8], [(- 2), (- 3), (- 4)]]], dtype=torch.float32)
point_indices = points_in_boxes_cpu(points=pts, boxes=boxes)
expected_point_indices = torch.tensor([[[1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [0, 1], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]], dtype=torch.int32)
assert (point_indices.shape == torch.Size([1, 15, 2]))
assert (point_indices == expected_point_indices).all()
boxes = torch.tensor([[[0.0, 0.0, 0.0, 1.0, 20.0, 1.0, 0.523598]]], dtype=torch.float32)
pts = torch.tensor([[[4, 6.928, 0], [6.928, 4, 0], [4, (- 6.928), 0], [6.928, (- 4), 0], [(- 4), 6.928, 0], [(- 6.928), 4, 0], [(- 4), (- 6.928), 0], [(- 6.928), (- 4), 0]]], dtype=torch.float32)
point_indices = points_in_boxes_cpu(points=pts, boxes=boxes)
expected_point_indices = torch.tensor([[[0], [0], [1], [0], [1], [0], [0], [0]]], dtype=torch.int32)
assert (point_indices == expected_point_indices).all()
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_points_in_boxes_all():
boxes = torch.tensor([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3], [(- 10.0), 23.0, 16.0, 10, 20, 20, 0.5]]], dtype=torch.float32).cuda()
pts = torch.tensor([[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], [0.8, 1.2, 3.9], [(- 9.2), 21.0, 18.2], [3.8, 7.9, 6.3], [4.7, 3.5, (- 12.2)], [3.8, 7.6, (- 2)], [(- 10.6), (- 12.9), (- 20)], [(- 16), (- 18), 9], [(- 21.3), (- 52), (- 5)], [0, 0, 0], [6, 7, 8], [(- 2), (- 3), (- 4)]]], dtype=torch.float32).cuda()
point_indices = points_in_boxes_all(points=pts, boxes=boxes)
expected_point_indices = torch.tensor([[[1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [0, 1], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]], dtype=torch.int32).cuda()
assert (point_indices.shape == torch.Size([1, 15, 2]))
assert (point_indices == expected_point_indices).all()
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_gather_points():
feats = torch.tensor([[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], [0.8, 1.2, 3.9], [(- 9.2), 21.0, 18.2], [3.8, 7.9, 6.3], [4.7, 3.5, (- 12.2)], [3.8, 7.6, (- 2)], [(- 10.6), (- 12.9), (- 20)], [(- 16), (- 18), 9], [(- 21.3), (- 52), (- 5)], [0, 0, 0], [6, 7, 8], [(- 2), (- 3), (- 4)]], dtype=torch.float32).unsqueeze(0).cuda()
points = feats.clone()
rois = torch.tensor([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3], [(- 10.0), 23.0, 16.0, 10, 20, 20, 0.5]]], dtype=torch.float32).cuda()
roipoint_pool3d = RoIPointPool3d(num_sampled_points=4)
(roi_feat, empty_flag) = roipoint_pool3d(feats, points, rois)
expected_roi_feat = torch.tensor([[[[1, 2, 3.3, 1, 2, 3.3], [1.2, 2.5, 3, 1.2, 2.5, 3], [0.8, 2.1, 3.5, 0.8, 2.1, 3.5], [1.6, 2.6, 3.6, 1.6, 2.6, 3.6]], [[(- 9.2), 21, 18.2, (- 9.2), 21, 18.2], [(- 9.2), 21, 18.2, (- 9.2), 21, 18.2], [(- 9.2), 21, 18.2, (- 9.2), 21, 18.2], [(- 9.2), 21, 18.2, (- 9.2), 21, 18.2]]]]).cuda()
expected_empty_flag = torch.tensor([[0, 0]]).int().cuda()
assert torch.allclose(roi_feat, expected_roi_feat)
assert torch.allclose(empty_flag, expected_empty_flag)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_rotated_feature_align():
feature = torch.tensor([[[[1.2924, (- 0.2172), (- 0.5222), 0.1172], [0.9144, 1.2248, 1.3115, (- 0.969)], [(- 0.8949), (- 1.1797), (- 0.9093), (- 0.3961)], [(- 0.4586), 0.5062, (- 0.7947), (- 0.7397)]], [[(- 1.0943), (- 0.7495), 1.3461, (- 1.1652)], [0.2034, 0.6763, (- 1.2357), 0.5231], [(- 1.0062), 1.2592, 1.4225, (- 0.3951)], [(- 0.1242), (- 1.624), 0.1932, 2.7181]], [[(- 1.6271), (- 1.0276), 0.0578, (- 0.2997)], [(- 0.9684), (- 1.6946), (- 1.3188), (- 1.1938)], [(- 1.6744), (- 0.8917), (- 0.6556), 1.0073], [(- 0.1205), 0.3671, (- 0.3731), (- 0.5347)]]], [[[0.7035, 0.2089, (- 0.1774), 3.467], [(- 0.8505), (- 0.9278), 1.4714, 0.1644], [0.0898, 0.3531, (- 0.4007), 0.1927], [1.2569, (- 0.2636), (- 0.5223), 0.0616]], [[0.176, (- 0.7639), (- 0.46), (- 1.326)], [(- 0.9921), (- 0.297), (- 0.8955), 1.0508], [1.3515, (- 0.1641), 1.9679, 1.1986], [(- 0.3616), 0.6287, 0.4933, 0.336]], [[(- 0.586), 0.2124, (- 0.87), 2.42], [(- 0.0551), (- 1.5103), (- 1.6779), 0.8399], [0.8431, 1.2414, (- 1.1243), (- 0.3887)], [(- 2.1254), 0.6047, (- 0.3515), 0.7254]]]], device='cuda', requires_grad=True)
bbox = torch.tensor([[[[13.08, 12.688, 11.214, 93.944, (- 0.91905)], [38.104, 10.134, 146.59, 90.306, (- 0.98211)], [(- 53.213), 49.508, 51.513, 32.055, (- 0.31954)], [26.974, 25.248, 54.495, 3.1083, (- 0.62127)]], [[(- 15.604), (- 51.908), 239.98, 15.008, (- 1.2546)], [31.354, (- 7.3635), 67.879, 35.081, (- 0.33851)], [(- 5.3292), 9.1946, 12.834, 10.485, (- 1.3039)], [(- 23.925), 36.623, 39.875, 72.009, (- 0.65934)]], [[72.114, (- 23.781), 29.106, 84.501, (- 1.134)], [26.258, (- 7.7034), 176.29, 106.15, (- 1.2156)], [38.057, 46.016, 12.965, 6.9384, (- 1.0855)], [24.428, (- 16.189), 205.72, 31.622, (- 0.15719)]], [[3.8226, 29.608, 14.457, 68.179, (- 0.91997)], [25.003, (- 42.49), 96.007, 49.086, (- 1.4786)], [85.983, 54.98, 78.08, 100.03, (- 1.0926)], [9.9065, 41.457, 5.9799, 17.973, (- 0.56313)]]], [[[(- 18.244), 4.6309, 53.01, 24.31, (- 0.70345)], [19.419, 36.704, 52.39, 54.133, (- 0.3773)], [56.387, 23.752, 9.0441, 17.792, (- 1.5583)], [36.303, 16.396, 20.283, 19.148, (- 0.83419)]], [[32.169, 30.521, 26.283, 196.8, (- 0.30454)], [25.788, (- 32.189), 88.882, 102.07, (- 1.5328)], [8.4676, (- 16.668), 24.657, 112.75, (- 0.40388)], [(- 10.799), 6.0422, 9.5807, 33.677, (- 0.35438)]], [[69.363, 10.85, 25.968, 22.311, (- 0.16408)], [2.814, 4.6843, 3.1289, 21.48, (- 0.67583)], [26.661, 45.29, 6.1679, 30.005, (- 0.89806)], [5.0871, 13.234, 92.087, 49.622, (- 0.2802)]], [[(- 12.643), 25.176, 50.488, 54.246, (- 0.4484)], [(- 34.521), 0.98435, 52.413, 9.7996, (- 0.84218)], [49.829, (- 10.808), 29.848, 73.579, (- 0.62672)], [80.446, 28.064, 45.273, 53.809, (- 1.2359)]]]], device='cuda', requires_grad=True)
expected_output = torch.tensor([[[[1.1095, (- 0.2172), (- 0.5222), (- 0.6225)], [0.9144, 0.7662, 1.0487, (- 0.969)], [(- 0.8949), (- 1.6384), (- 0.9093), (- 0.3961)], [(- 0.8604), 0.5062, (- 0.7947), (- 0.7397)]], [[(- 0.3961), (- 0.7495), 1.3461, 1.5528], [0.2034, 0.5522, (- 1.6722), 0.5231], [(- 1.0062), 1.135, 1.4225, (- 0.3951)], [(- 0.4826), (- 1.624), 0.1932, 2.7181]], [[(- 2.6436), (- 1.0276), 0.0578, (- 0.8344)], [(- 0.9684), (- 1.8151), (- 2.1843), (- 1.1938)], [(- 1.6744), (- 1.0121), (- 0.6556), 1.0073], [(- 0.8474), 0.3671, (- 0.3731), (- 0.5347)]]], [[[0.7035, 0.2089, (- 0.1774), 3.467], [(- 0.8505), (- 0.9278), 1.4714, 0.1644], [0.0898, 0.3064, (- 0.4007), 0.5849], [1.2569, (- 0.2636), (- 0.5223), 0.0616]], [[0.176, (- 0.7639), (- 0.46), (- 1.326)], [(- 0.9921), (- 0.297), (- 0.8955), 1.0508], [1.3515, (- 0.6125), 1.9679, 0.555], [(- 0.3616), 0.6287, 0.4933, 0.336]], [[(- 0.586), 0.2124, (- 0.87), 2.42], [(- 0.0551), (- 1.5103), (- 1.6779), 0.8399], [0.8431, 0.8455, (- 1.1243), (- 1.5994)], [(- 2.1254), 0.6047, (- 0.3515), 0.7254]]]]).cuda()
expected_grad = torch.tensor([[[[1.0, 1.8507, 1.1493, 1.5222], [1.0, 1.1511, 1.2139, 1.4778], [1.0, 1.2629, 1.3721, 1.0], [3.0, 1.0, 1.0, 2.0]], [[1.0, 1.8507, 1.1493, 1.5222], [1.0, 1.1511, 1.2139, 1.4778], [1.0, 1.2629, 1.3721, 1.0], [3.0, 1.0, 1.0, 2.0]], [[1.0, 1.8507, 1.1493, 1.5222], [1.0, 1.1511, 1.2139, 1.4778], [1.0, 1.2629, 1.3721, 1.0], [3.0, 1.0, 1.0, 2.0]]], [[[1.2687, 1.5055, 1.2382, 1.0], [1.1458, 1.4258, 1.416, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.2687, 1.5055, 1.2382, 1.0], [1.1458, 1.4258, 1.416, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.2687, 1.5055, 1.2382, 1.0], [1.1458, 1.4258, 1.416, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]]]).cuda()
output = rotated_feature_align(feature, bbox, spatial_scale=(1 / 8), points=1)
output.backward(torch.ones_like(output))
assert torch.allclose(output, expected_output, 0.01)
assert torch.allclose(feature.grad, expected_grad, 0.01)
|
def test_sacconv():
x = torch.rand(1, 3, 256, 256)
saconv = SAConv2d(3, 5, kernel_size=3, padding=1)
sac_out = saconv(x)
refer_conv = nn.Conv2d(3, 5, kernel_size=3, padding=1)
refer_out = refer_conv(x)
assert (sac_out.shape == refer_out.shape)
dalited_saconv = SAConv2d(3, 5, kernel_size=3, padding=2, dilation=2)
dalited_sac_out = dalited_saconv(x)
refer_conv = nn.Conv2d(3, 5, kernel_size=3, padding=2, dilation=2)
refer_out = refer_conv(x)
assert (dalited_sac_out.shape == refer_out.shape)
deform_saconv = SAConv2d(3, 5, kernel_size=3, padding=1, use_deform=True)
if torch.cuda.is_available():
x = torch.rand(1, 3, 256, 256).cuda()
deform_saconv = SAConv2d(3, 5, kernel_size=3, padding=1, use_deform=True).cuda()
deform_sac_out = deform_saconv(x).cuda()
refer_conv = nn.Conv2d(3, 5, kernel_size=3, padding=1).cuda()
refer_out = refer_conv(x)
assert (deform_sac_out.shape == refer_out.shape)
else:
deform_sac_out = deform_saconv(x)
refer_conv = nn.Conv2d(3, 5, kernel_size=3, padding=1)
refer_out = refer_conv(x)
assert (deform_sac_out.shape == refer_out.shape)
x = torch.rand(1, 4, 256, 256)
group_saconv = SAConv2d(4, 4, kernel_size=3, padding=1, groups=2)
group_sac_out = group_saconv(x)
refer_conv = nn.Conv2d(4, 4, kernel_size=3, padding=1, groups=2)
refer_out = refer_conv(x)
assert (group_sac_out.shape == refer_out.shape)
|
def make_sparse_convmodule(in_channels, out_channels, kernel_size, indice_key, stride=1, padding=0, conv_type='SubMConv3d', norm_cfg=None, order=('conv', 'norm', 'act')):
'Make sparse convolution module.\n\n Args:\n in_channels (int): the number of input channels\n out_channels (int): the number of out channels\n kernel_size (int|tuple(int)): kernel size of convolution\n indice_key (str): the indice key used for sparse tensor\n stride (int|tuple(int)): the stride of convolution\n padding (int or list[int]): the padding number of input\n conv_type (str): sparse conv type in spconv\n norm_cfg (dict[str]): config of normalization layer\n order (tuple[str]): The order of conv/norm/activation layers. It is a\n sequence of "conv", "norm" and "act". Common examples are\n ("conv", "norm", "act") and ("act", "conv", "norm").\n\n Returns:\n spconv.SparseSequential: sparse convolution module.\n '
assert (isinstance(order, tuple) and (len(order) <= 3))
assert ((set(order) | {'conv', 'norm', 'act'}) == {'conv', 'norm', 'act'})
conv_cfg = dict(type=conv_type, indice_key=indice_key)
layers = list()
for layer in order:
if (layer == 'conv'):
if (conv_type not in ['SparseInverseConv3d', 'SparseInverseConv2d', 'SparseInverseConv1d']):
layers.append(build_conv_layer(conv_cfg, in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=False))
else:
layers.append(build_conv_layer(conv_cfg, in_channels, out_channels, kernel_size, bias=False))
elif (layer == 'norm'):
layers.append(build_norm_layer(norm_cfg, out_channels)[1])
elif (layer == 'act'):
layers.append(nn.ReLU(inplace=True))
layers = SparseSequential(*layers)
return layers
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_make_sparse_convmodule():
voxel_features = torch.tensor([[6.56126, 0.9648336, (- 1.7339306), 0.315], [6.8162713, (- 2.480431), (- 1.3616394), 0.36], [11.643568, (- 4.744306), (- 1.3580885), 0.16], [23.482342, 6.5036807, 0.5806964, 0.35]], dtype=torch.float32, device='cuda')
coordinates = torch.tensor([[0, 12, 819, 131], [0, 16, 750, 136], [1, 16, 705, 232], [1, 35, 930, 469]], dtype=torch.int32, device='cuda')
input_sp_tensor = SparseConvTensor(voxel_features, coordinates, [41, 1600, 1408], 2)
sparse_block0 = make_sparse_convmodule(4, 16, 3, 'test0', stride=1, padding=0, conv_type='SubMConv3d', norm_cfg=dict(type='BN1d', eps=0.001, momentum=0.01), order=('conv', 'norm', 'act')).cuda()
assert isinstance(sparse_block0[0], SubMConv3d)
assert (sparse_block0[0].in_channels == 4)
assert (sparse_block0[0].out_channels == 16)
assert isinstance(sparse_block0[1], torch.nn.BatchNorm1d)
assert (sparse_block0[1].eps == 0.001)
assert (sparse_block0[1].momentum == 0.01)
assert isinstance(sparse_block0[2], torch.nn.ReLU)
out_features = sparse_block0(input_sp_tensor)
assert (out_features.features.shape == torch.Size([4, 16]))
sparse_block1 = make_sparse_convmodule(4, 16, 3, 'test1', stride=1, padding=0, conv_type='SparseInverseConv3d', norm_cfg=dict(type='BN1d', eps=0.001, momentum=0.01), order=('norm', 'act', 'conv')).cuda()
assert isinstance(sparse_block1[0], torch.nn.BatchNorm1d)
assert isinstance(sparse_block1[1], torch.nn.ReLU)
assert isinstance(sparse_block1[2], SparseInverseConv3d)
|
class TestSyncBN(object):
def dist_init(self):
rank = int(os.environ['SLURM_PROCID'])
world_size = int(os.environ['SLURM_NTASKS'])
local_rank = int(os.environ['SLURM_LOCALID'])
node_list = str(os.environ['SLURM_NODELIST'])
node_parts = re.findall('[0-9]+', node_list)
os.environ['MASTER_ADDR'] = (f'{node_parts[1]}.{node_parts[2]}' + f'.{node_parts[3]}.{node_parts[4]}')
os.environ['MASTER_PORT'] = '12341'
os.environ['WORLD_SIZE'] = str(world_size)
os.environ['RANK'] = str(rank)
dist.init_process_group('nccl')
torch.cuda.set_device(local_rank)
def _test_syncbn_train(self, size=1, half=False):
if (('SLURM_NTASKS' not in os.environ) or (int(os.environ['SLURM_NTASKS']) != 4)):
print('must run with slurm has 4 processes!\nsrun -p test --gres=gpu:4 -n4')
return
else:
print('Running syncbn test')
from mmcv.ops import SyncBatchNorm
assert (size in (1, 2, 4))
if (not dist.is_initialized()):
self.dist_init()
rank = dist.get_rank()
torch.manual_seed(9)
torch.cuda.manual_seed(9)
self.x = torch.rand(16, 3, 2, 3).cuda()
self.y_bp = torch.rand(16, 3, 2, 3).cuda()
if half:
self.x = self.x.half()
self.y_bp = self.y_bp.half()
dist.broadcast(self.x, src=0)
dist.broadcast(self.y_bp, src=0)
torch.cuda.synchronize()
if (size == 1):
groups = [None, None, None, None]
groups[0] = dist.new_group([0])
groups[1] = dist.new_group([1])
groups[2] = dist.new_group([2])
groups[3] = dist.new_group([3])
group = groups[rank]
elif (size == 2):
groups = [None, None, None, None]
groups[0] = groups[1] = dist.new_group([0, 1])
groups[2] = groups[3] = dist.new_group([2, 3])
group = groups[rank]
elif (size == 4):
group = dist.group.WORLD
syncbn = SyncBatchNorm(3, group=group).cuda()
syncbn.weight.data[0] = 0.2
syncbn.weight.data[1] = 0.5
syncbn.weight.data[2] = 0.7
syncbn.train()
bn = nn.BatchNorm2d(3).cuda()
bn.weight.data[0] = 0.2
bn.weight.data[1] = 0.5
bn.weight.data[2] = 0.7
bn.train()
sx = self.x[(rank * 4):((rank * 4) + 4)]
sx.requires_grad_()
sy = syncbn(sx)
sy.backward(self.y_bp[(rank * 4):((rank * 4) + 4)])
smean = syncbn.running_mean
svar = syncbn.running_var
sx_grad = sx.grad
sw_grad = syncbn.weight.grad
sb_grad = syncbn.bias.grad
if (size == 1):
x = self.x[(rank * 4):((rank * 4) + 4)]
y_bp = self.y_bp[(rank * 4):((rank * 4) + 4)]
elif (size == 2):
x = self.x[((rank // 2) * 8):(((rank // 2) * 8) + 8)]
y_bp = self.y_bp[((rank // 2) * 8):(((rank // 2) * 8) + 8)]
elif (size == 4):
x = self.x
y_bp = self.y_bp
x.requires_grad_()
y = bn(x)
y.backward(y_bp)
if (size == 2):
y = y[((rank % 2) * 4):(((rank % 2) * 4) + 4)]
elif (size == 4):
y = y[(rank * 4):((rank * 4) + 4)]
mean = bn.running_mean
var = bn.running_var
if (size == 1):
x_grad = x.grad
w_grad = bn.weight.grad
b_grad = bn.bias.grad
elif (size == 2):
x_grad = x.grad[((rank % 2) * 4):(((rank % 2) * 4) + 4)]
w_grad = (bn.weight.grad / 2)
b_grad = (bn.bias.grad / 2)
elif (size == 4):
x_grad = x.grad[(rank * 4):((rank * 4) + 4)]
w_grad = (bn.weight.grad / 4)
b_grad = (bn.bias.grad / 4)
assert np.allclose(mean.data.cpu().numpy(), smean.data.cpu().numpy(), 0.001)
assert np.allclose(var.data.cpu().numpy(), svar.data.cpu().numpy(), 0.001)
assert np.allclose(y.data.cpu().numpy(), sy.data.cpu().numpy(), 0.001)
assert np.allclose(w_grad.data.cpu().numpy(), sw_grad.data.cpu().numpy(), 0.001)
assert np.allclose(b_grad.data.cpu().numpy(), sb_grad.data.cpu().numpy(), 0.001)
assert np.allclose(x_grad.data.cpu().numpy(), sx_grad.data.cpu().numpy(), 0.01)
def _test_syncbn_empty_train(self, size=1, half=False):
if (('SLURM_NTASKS' not in os.environ) or (int(os.environ['SLURM_NTASKS']) != 4)):
print('must run with slurm has 4 processes!\nsrun -p test --gres=gpu:4 -n4')
return
else:
print('Running syncbn test')
from mmcv.ops import SyncBatchNorm
assert (size in (1, 2, 4))
if (not dist.is_initialized()):
self.dist_init()
rank = dist.get_rank()
torch.manual_seed(9)
torch.cuda.manual_seed(9)
self.x = torch.rand(0, 3, 2, 3).cuda()
self.y_bp = torch.rand(0, 3, 2, 3).cuda()
if half:
self.x = self.x.half()
self.y_bp = self.y_bp.half()
dist.broadcast(self.x, src=0)
dist.broadcast(self.y_bp, src=0)
torch.cuda.synchronize()
if (size == 1):
groups = [None, None, None, None]
groups[0] = dist.new_group([0])
groups[1] = dist.new_group([1])
groups[2] = dist.new_group([2])
groups[3] = dist.new_group([3])
group = groups[rank]
elif (size == 2):
groups = [None, None, None, None]
groups[0] = groups[1] = dist.new_group([0, 1])
groups[2] = groups[3] = dist.new_group([2, 3])
group = groups[rank]
elif (size == 4):
group = dist.group.WORLD
syncbn = SyncBatchNorm(3, group=group, stats_mode='N').cuda()
syncbn.weight.data[0] = 0.2
syncbn.weight.data[1] = 0.5
syncbn.weight.data[2] = 0.7
syncbn.train()
bn = nn.BatchNorm2d(3).cuda()
bn.weight.data[0] = 0.2
bn.weight.data[1] = 0.5
bn.weight.data[2] = 0.7
bn.train()
sx = self.x[(rank * 4):((rank * 4) + 4)]
sx.requires_grad_()
sy = syncbn(sx)
sy.backward(self.y_bp[(rank * 4):((rank * 4) + 4)])
smean = syncbn.running_mean
svar = syncbn.running_var
sx_grad = sx.grad
sw_grad = syncbn.weight.grad
sb_grad = syncbn.bias.grad
if (size == 1):
x = self.x[(rank * 4):((rank * 4) + 4)]
y_bp = self.y_bp[(rank * 4):((rank * 4) + 4)]
elif (size == 2):
x = self.x[((rank // 2) * 8):(((rank // 2) * 8) + 8)]
y_bp = self.y_bp[((rank // 2) * 8):(((rank // 2) * 8) + 8)]
elif (size == 4):
x = self.x
y_bp = self.y_bp
x.requires_grad_()
y = bn(x)
y.backward(y_bp)
if (size == 2):
y = y[((rank % 2) * 4):(((rank % 2) * 4) + 4)]
elif (size == 4):
y = y[(rank * 4):((rank * 4) + 4)]
mean = bn.running_mean
var = bn.running_var
if (size == 1):
x_grad = x.grad
w_grad = bn.weight.grad
b_grad = bn.bias.grad
elif (size == 2):
x_grad = x.grad[((rank % 2) * 4):(((rank % 2) * 4) + 4)]
w_grad = (bn.weight.grad / 2)
b_grad = (bn.bias.grad / 2)
elif (size == 4):
x_grad = x.grad[(rank * 4):((rank * 4) + 4)]
w_grad = (bn.weight.grad / 4)
b_grad = (bn.bias.grad / 4)
assert np.allclose(mean.data.cpu().numpy(), smean.data.cpu().numpy(), 0.001)
assert np.allclose(var.data.cpu().numpy(), svar.data.cpu().numpy(), 0.001)
assert np.allclose(y.data.cpu().numpy(), sy.data.cpu().numpy(), 0.001)
assert np.allclose(w_grad.data.cpu().numpy(), sw_grad.data.cpu().numpy(), 0.001)
assert np.allclose(b_grad.data.cpu().numpy(), sb_grad.data.cpu().numpy(), 0.001)
assert np.allclose(x_grad.data.cpu().numpy(), sx_grad.data.cpu().numpy(), 0.01)
with pytest.raises(AssertionError):
SyncBatchNorm(3, group=group, stats_mode='X')
def test_syncbn_1(self):
self._test_syncbn_train(size=1)
def test_syncbn_2(self):
self._test_syncbn_train(size=2)
def test_syncbn_4(self):
self._test_syncbn_train(size=4)
def test_syncbn_1_half(self):
self._test_syncbn_train(size=1, half=True)
def test_syncbn_2_half(self):
self._test_syncbn_train(size=2, half=True)
def test_syncbn_4_half(self):
self._test_syncbn_train(size=4, half=True)
def test_syncbn_empty_1(self):
self._test_syncbn_empty_train(size=1)
def test_syncbn_empty_2(self):
self._test_syncbn_empty_train(size=2)
def test_syncbn_empty_4(self):
self._test_syncbn_empty_train(size=4)
def test_syncbn_empty_1_half(self):
self._test_syncbn_empty_train(size=1, half=True)
def test_syncbn_empty_2_half(self):
self._test_syncbn_empty_train(size=2, half=True)
def test_syncbn_empty_4_half(self):
self._test_syncbn_empty_train(size=4, half=True)
|
def remove_tmp_file(func):
@wraps(func)
def wrapper(*args, **kwargs):
onnx_file = 'tmp.onnx'
kwargs['onnx_file'] = onnx_file
try:
result = func(*args, **kwargs)
finally:
if os.path.exists(onnx_file):
os.remove(onnx_file)
return result
return wrapper
|
@remove_tmp_file
def export_nms_module_to_onnx(module, onnx_file):
torch_model = module()
torch_model.eval()
input = (torch.rand([100, 4], dtype=torch.float32), torch.rand([100], dtype=torch.float32))
torch.onnx.export(torch_model, input, onnx_file, opset_version=11, input_names=['boxes', 'scores'], output_names=['output'])
onnx_model = onnx.load(onnx_file)
return onnx_model
|
def test_can_handle_nms_with_constant_maxnum():
class ModuleNMS(torch.nn.Module):
def forward(self, boxes, scores):
return nms(boxes, scores, iou_threshold=0.4, max_num=10)
onnx_model = export_nms_module_to_onnx(ModuleNMS)
preprocess_onnx_model = preprocess_onnx(onnx_model)
for node in preprocess_onnx_model.graph.node:
if ('NonMaxSuppression' in node.name):
assert (len(node.attribute) == 5), 'The NMS must have 5 attributes.'
|
def test_can_handle_nms_with_undefined_maxnum():
class ModuleNMS(torch.nn.Module):
def forward(self, boxes, scores):
return nms(boxes, scores, iou_threshold=0.4)
onnx_model = export_nms_module_to_onnx(ModuleNMS)
preprocess_onnx_model = preprocess_onnx(onnx_model)
for node in preprocess_onnx_model.graph.node:
if ('NonMaxSuppression' in node.name):
assert (len(node.attribute) == 5), 'The NMS must have 5 attributes.'
assert (node.attribute[2].i > 0), 'The max_output_boxes_per_class is not defined correctly.'
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_three_interpolate():
features = torch.tensor([[[2.435, 4.7516, 4.4995, 2.435, 2.435, 2.435], [3.1236, 2.6278, 3.0447, 3.1236, 3.1236, 3.1236], [2.6732, 2.8677, 2.6436, 2.6732, 2.6732, 2.6732], [0.0124, 7.015, 7.0199, 0.0124, 0.0124, 0.0124], [0.3207, 0.0, 0.3411, 0.3207, 0.3207, 0.3207]], [[0.0, 0.9544, 2.4532, 0.0, 0.0, 0.0], [0.5346, 1.9176, 1.4715, 0.5346, 0.5346, 0.5346], [0.0, 0.2744, 2.0842, 0.0, 0.0, 0.0], [0.3414, 1.5063, 1.6209, 0.3414, 0.3414, 0.3414], [0.5814, 0.0103, 0.0, 0.5814, 0.5814, 0.5814]]]).cuda()
idx = torch.tensor([[[0, 1, 2], [2, 3, 4], [2, 3, 4], [0, 1, 2], [0, 1, 2], [0, 1, 3]], [[0, 2, 3], [1, 3, 4], [2, 1, 4], [0, 2, 4], [0, 2, 4], [0, 1, 2]]]).int().cuda()
weight = torch.tensor([[[0.33333, 0.33333, 0.33333], [1.0, 5.8155e-08, 2.2373e-08], [1.0, 1.7737e-08, 1.7356e-08], [0.33333, 0.33333, 0.33333], [0.33333, 0.33333, 0.33333], [0.33333, 0.33333, 0.33333]], [[0.33333, 0.33333, 0.33333], [1.0, 1.3651e-08, 7.7312e-09], [1.0, 1.7148e-08, 1.407e-08], [0.33333, 0.33333, 0.33333], [0.33333, 0.33333, 0.33333], [0.33333, 0.33333, 0.33333]]]).cuda()
output = three_interpolate(features, idx, weight)
expected_output = torch.tensor([[[3.8953, 4.4995, 4.4995, 3.8953, 3.8953, 3.2072], [2.932, 3.0447, 3.0447, 2.932, 2.932, 2.9583], [2.7281, 2.6436, 2.6436, 2.7281, 2.7281, 2.738], [4.6824, 7.0199, 7.0199, 4.6824, 4.6824, 2.3466], [0.2206, 0.3411, 0.3411, 0.2206, 0.2206, 0.2138]], [[0.81773, 0.9544, 2.4532, 0.81773, 0.81773, 1.1359], [0.84689, 1.9176, 1.4715, 0.84689, 0.84689, 1.3079], [0.69473, 0.2744, 2.0842, 0.69473, 0.69473, 0.78619], [0.76789, 1.5063, 1.6209, 0.76789, 0.76789, 1.1562], [0.3876, 0.0103, 8.3569e-09, 0.3876, 0.3876, 0.19723]]]).cuda()
assert torch.allclose(output, expected_output, 0.0001)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_three_nn():
known = torch.tensor([[[(- 1.8373), 3.5605, (- 0.7867)], [0.7615, 2.942, 0.2314], [(- 0.6503), 3.6637, (- 1.0622)], [(- 1.8373), 3.5605, (- 0.7867)], [(- 1.8373), 3.5605, (- 0.7867)]], [[(- 1.3399), 1.9991, (- 0.3698)], [(- 0.0799), 0.9698, (- 0.8457)], [0.0858, 2.4721, (- 0.1928)], [(- 1.3399), 1.9991, (- 0.3698)], [(- 1.3399), 1.9991, (- 0.3698)]]]).cuda()
unknown = torch.tensor([[[(- 1.8373), 3.5605, (- 0.7867)], [0.7615, 2.942, 0.2314], [(- 0.6503), 3.6637, (- 1.0622)], [(- 1.5237), 2.3976, (- 0.8097)], [(- 0.0722), 3.4017, (- 0.288)], [0.5198, 3.0661, (- 0.4605)], [(- 2.0185), 3.5019, (- 0.3236)], [0.5098, 3.102, 0.5799], [(- 1.6137), 3.8443, (- 0.5269)], [0.7341, 2.9626, (- 0.3189)]], [[(- 1.3399), 1.9991, (- 0.3698)], [(- 0.0799), 0.9698, (- 0.8457)], [0.0858, 2.4721, (- 0.1928)], [(- 0.9022), 1.656, (- 1.309)], [0.1156, 1.6901, (- 0.4366)], [(- 0.6477), 2.3576, (- 0.1563)], [(- 0.8482), 1.1466, (- 1.2704)], [(- 0.8753), 2.0845, (- 0.346)], [(- 0.5621), 1.4233, (- 1.2858)], [(- 0.5883), 1.3114, (- 1.2899)]]]).cuda()
(dist, idx) = three_nn(unknown, known)
expected_dist = torch.tensor([[[0.0, 0.0, 0.0], [0.0, 2.0463, 2.8588], [0.0, 1.2229, 1.2229], [1.2047, 1.2047, 1.2047], [1.0011, 1.0845, 1.8411], [0.7433, 1.4451, 2.4304], [0.5007, 0.5007, 0.5007], [0.4587, 2.0875, 2.7544], [0.445, 0.445, 0.445], [0.5514, 1.7206, 2.6811]], [[0.0, 0.0, 0.0], [0.0, 1.6464, 1.6952], [0.0, 1.5125, 1.5125], [1.0915, 1.0915, 1.0915], [0.8197, 0.8511, 1.4894], [0.7433, 0.8082, 0.8082], [0.8955, 1.334, 1.334], [0.473, 0.473, 0.473], [0.7949, 1.3325, 1.3325], [0.7566, 1.3727, 1.3727]]]).cuda()
expected_idx = torch.tensor([[[0, 3, 4], [1, 2, 0], [2, 0, 3], [0, 3, 4], [2, 1, 0], [1, 2, 0], [0, 3, 4], [1, 2, 0], [0, 3, 4], [1, 2, 0]], [[0, 3, 4], [1, 2, 0], [2, 0, 3], [0, 3, 4], [2, 1, 0], [2, 0, 3], [1, 0, 3], [0, 3, 4], [1, 0, 3], [1, 0, 3]]]).cuda()
assert torch.allclose(dist, expected_dist, 0.0001)
assert torch.all((idx == expected_idx))
|
def _test_tinshift_gradcheck(dtype):
try:
from mmcv.ops import tin_shift
except ModuleNotFoundError:
pytest.skip('TINShift op is not successfully compiled')
if (dtype == torch.half):
pytest.skip('"add_cpu/sub_cpu" not implemented for Half')
for shift in shifts:
np_input = np.array(inputs)
np_shift = np.array(shift)
x = torch.tensor(np_input, dtype=dtype, device='cuda', requires_grad=True)
shift = torch.tensor(np_shift, device='cuda').int()
if (torch.__version__ == 'parrots'):
gradcheck(tin_shift, (x, shift))
else:
gradcheck(tin_shift, (x, shift), atol=1, rtol=0.1)
|
def _test_tinshift_allclose(dtype):
try:
from mmcv.ops import tin_shift
except ModuleNotFoundError:
pytest.skip('TINShift op is not successfully compiled')
for (shift, output, grad) in zip(shifts, outputs, grads):
np_input = np.array(inputs)
np_shift = np.array(shift)
np_output = np.array(output)
np_grad = np.array(grad)
x = torch.tensor(np_input, dtype=dtype, device='cuda', requires_grad=True)
shift = torch.tensor(np_shift, device='cuda').int()
output = tin_shift(x, shift)
output.backward(torch.ones_like(output))
assert np.allclose(output.data.type(torch.float).cpu().numpy(), np_output, 0.001)
assert np.allclose(x.grad.data.type(torch.float).cpu().numpy(), np_grad, 0.001)
|
def _test_tinshift_assert(dtype):
try:
from mmcv.ops import tin_shift
except ModuleNotFoundError:
pytest.skip('TINShift op is not successfully compiled')
inputs = [torch.rand(2, 3, 4, 2), torch.rand(2, 3, 4, 2)]
shifts = [torch.rand(2, 3), torch.rand(2, 5)]
for (x, shift) in zip(inputs, shifts):
x = x.cuda()
shift = shift.cuda()
with pytest.raises(ValueError):
tin_shift(x, shift)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
@pytest.mark.parametrize('dtype', [torch.float, torch.double, torch.half])
def test_tinshift(dtype):
_test_tinshift_allclose(dtype=dtype)
_test_tinshift_gradcheck(dtype=dtype)
_test_tinshift_assert(dtype=dtype)
|
def mock(*args, **kwargs):
pass
|
@patch('torch.distributed._broadcast_coalesced', mock)
@patch('torch.distributed.broadcast', mock)
@patch('torch.nn.parallel.DistributedDataParallel._ddp_init_helper', mock)
def test_is_module_wrapper():
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1)
def forward(self, x):
return self.conv(x)
if hasattr(torch.distributed, '_verify_model_across_ranks'):
torch.distributed._verify_model_across_ranks = mock
if hasattr(torch.distributed, '_verify_params_across_processes'):
torch.distributed._verify_params_across_processes = mock
model = Model()
assert (not is_module_wrapper(model))
dp = DataParallel(model)
assert is_module_wrapper(dp)
mmdp = MMDataParallel(model)
assert is_module_wrapper(mmdp)
ddp = DistributedDataParallel(model, process_group=MagicMock())
assert is_module_wrapper(ddp)
mmddp = MMDistributedDataParallel(model, process_group=MagicMock())
assert is_module_wrapper(mmddp)
deprecated_mmddp = DeprecatedMMDDP(model)
assert is_module_wrapper(deprecated_mmddp)
@MODULE_WRAPPERS.register_module()
class ModuleWrapper(object):
def __init__(self, module):
self.module = module
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
module_wraper = ModuleWrapper(model)
assert is_module_wrapper(module_wraper)
|
def test_get_input_device():
input = torch.zeros([1, 3, 3, 3])
assert (get_input_device(input) == (- 1))
inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])]
assert (get_input_device(inputs) == (- 1))
if torch.cuda.is_available():
input = torch.zeros([1, 3, 3, 3]).cuda()
assert (get_input_device(input) == 0)
inputs = [torch.zeros([1, 3, 3, 3]).cuda(), torch.zeros([1, 4, 4, 4]).cuda()]
assert (get_input_device(inputs) == 0)
with pytest.raises(Exception):
get_input_device(5)
|
def test_scatter():
input = torch.zeros([1, 3, 3, 3])
output = scatter(input=input, devices=[(- 1)])
assert torch.allclose(input, output)
inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])]
outputs = scatter(input=inputs, devices=[(- 1)])
for (input, output) in zip(inputs, outputs):
assert torch.allclose(input, output)
if torch.cuda.is_available():
input = torch.zeros([1, 3, 3, 3])
output = scatter(input=input, devices=[0])
assert torch.allclose(input.cuda(), output)
inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])]
outputs = scatter(input=inputs, devices=[0])
for (input, output) in zip(inputs, outputs):
assert torch.allclose(input.cuda(), output)
with pytest.raises(Exception):
scatter(5, [(- 1)])
|
def test_Scatter():
target_gpus = [(- 1)]
input = torch.zeros([1, 3, 3, 3])
outputs = Scatter.forward(target_gpus, input)
assert isinstance(outputs, tuple)
assert torch.allclose(input, outputs[0])
target_gpus = [(- 1)]
inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])]
outputs = Scatter.forward(target_gpus, inputs)
assert isinstance(outputs, tuple)
for (input, output) in zip(inputs, outputs):
assert torch.allclose(input, output)
if torch.cuda.is_available():
target_gpus = [0]
input = torch.zeros([1, 3, 3, 3])
outputs = Scatter.forward(target_gpus, input)
assert isinstance(outputs, tuple)
assert torch.allclose(input.cuda(), outputs[0])
target_gpus = [0]
inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])]
outputs = Scatter.forward(target_gpus, inputs)
assert isinstance(outputs, tuple)
for (input, output) in zip(inputs, outputs):
assert torch.allclose(input.cuda(), output[0])
|
@COMPONENTS.register_module()
class FooConv1d(BaseModule):
def __init__(self, init_cfg=None):
super().__init__(init_cfg)
self.conv1d = nn.Conv1d(4, 1, 4)
def forward(self, x):
return self.conv1d(x)
|
@COMPONENTS.register_module()
class FooConv2d(BaseModule):
def __init__(self, init_cfg=None):
super().__init__(init_cfg)
self.conv2d = nn.Conv2d(3, 1, 3)
def forward(self, x):
return self.conv2d(x)
|
@COMPONENTS.register_module()
class FooLinear(BaseModule):
def __init__(self, init_cfg=None):
super().__init__(init_cfg)
self.linear = nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.