| | |
| | |
| | |
| | |
| |
|
| | """Launch Isaac Sim Simulator first.""" |
| |
|
| | from isaaclab.app import AppLauncher |
| |
|
| | |
| | simulation_app = AppLauncher(headless=True).app |
| |
|
| | """Rest everything follows.""" |
| |
|
| | import pytest |
| | import torch |
| |
|
| | try: |
| | from isaacsim.core.prims import XFormPrim as _IsaacSimXformPrimView |
| | except (ModuleNotFoundError, ImportError): |
| | _IsaacSimXformPrimView = None |
| |
|
| | import isaaclab.sim as sim_utils |
| | from isaaclab.sim.views import XformPrimView as XformPrimView |
| | from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR |
| |
|
| |
|
| | @pytest.fixture(autouse=True) |
| | def test_setup_teardown(): |
| | """Create a blank new stage for each test.""" |
| | |
| | sim_utils.create_new_stage() |
| | sim_utils.update_stage() |
| |
|
| | |
| | yield |
| |
|
| | |
| | sim_utils.clear_stage() |
| |
|
| |
|
| | """ |
| | Helper functions. |
| | """ |
| |
|
| |
|
| | def _prepare_indices(index_type, target_indices, num_prims, device): |
| | """Helper function to prepare indices based on type.""" |
| | if index_type == "list": |
| | return target_indices, target_indices |
| | elif index_type == "torch_tensor": |
| | return torch.tensor(target_indices, dtype=torch.int64, device=device), target_indices |
| | elif index_type == "slice_none": |
| | return slice(None), list(range(num_prims)) |
| | else: |
| | raise ValueError(f"Unknown index type: {index_type}") |
| |
|
| |
|
| | """ |
| | Tests - Initialization. |
| | """ |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_xform_prim_view_initialization_single_prim(device): |
| | """Test XformPrimView initialization with a single prim.""" |
| | |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | |
| | stage = sim_utils.get_current_stage() |
| | sim_utils.create_prim("/World/Object", "Xform", translation=(1.0, 2.0, 3.0), stage=stage) |
| |
|
| | |
| | view = XformPrimView("/World/Object", device=device) |
| |
|
| | |
| | assert view.count == 1 |
| | assert view.prim_paths == ["/World/Object"] |
| | assert view.device == device |
| | assert len(view.prims) == 1 |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_xform_prim_view_initialization_multiple_prims(device): |
| | """Test XformPrimView initialization with multiple prims using pattern matching.""" |
| | |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | |
| | num_prims = 10 |
| | stage = sim_utils.get_current_stage() |
| | for i in range(num_prims): |
| | sim_utils.create_prim(f"/World/Env_{i}/Object", "Xform", translation=(i * 2.0, 0.0, 1.0), stage=stage) |
| |
|
| | |
| | view = XformPrimView("/World/Env_.*/Object", device=device) |
| |
|
| | |
| | assert view.count == num_prims |
| | assert view.device == device |
| | assert len(view.prims) == num_prims |
| | assert view.prim_paths == [f"/World/Env_{i}/Object" for i in range(num_prims)] |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_xform_prim_view_initialization_multiple_prims_order(device): |
| | """Test XformPrimView initialization with multiple prims using pattern matching with multiple objects per prim. |
| | |
| | This test validates that XformPrimView respects USD stage traversal order, which is based on |
| | creation order (depth-first search), NOT alphabetical/lexical sorting. This is an important |
| | edge case that ensures deterministic prim ordering that matches USD's internal representation. |
| | |
| | The test creates prims in a deliberately non-alphabetical order (1, 0, A, a, 2) and verifies |
| | that they are retrieved in creation order, not sorted order (0, 1, 2, A, a). |
| | """ |
| | |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | |
| | num_prims = 10 |
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | for i in range(num_prims): |
| | sim_utils.create_prim(f"/World/Env_{i}/Object_1", "Xform", translation=(i * 2.0, -2.0, 1.0), stage=stage) |
| | sim_utils.create_prim(f"/World/Env_{i}/Object_0", "Xform", translation=(i * 2.0, 2.0, 1.0), stage=stage) |
| | sim_utils.create_prim(f"/World/Env_{i}/Object_A", "Xform", translation=(i * 2.0, 0.0, -1.0), stage=stage) |
| |
|
| | |
| | |
| | for i in range(num_prims): |
| | sim_utils.create_prim(f"/World/Env_{i}/Object_a", "Xform", translation=(i * 2.0, 2.0, -1.0), stage=stage) |
| | sim_utils.create_prim(f"/World/Env_{i}/Object_2", "Xform", translation=(i * 2.0, 2.0, 1.0), stage=stage) |
| |
|
| | |
| | view = XformPrimView("/World/Env_.*/Object_.*", device=device) |
| |
|
| | |
| | |
| | |
| | expected_prim_paths_ordering = [] |
| | for i in range(num_prims): |
| | expected_prim_paths_ordering.append(f"/World/Env_{i}/Object_1") |
| | expected_prim_paths_ordering.append(f"/World/Env_{i}/Object_0") |
| | expected_prim_paths_ordering.append(f"/World/Env_{i}/Object_A") |
| | expected_prim_paths_ordering.append(f"/World/Env_{i}/Object_a") |
| | expected_prim_paths_ordering.append(f"/World/Env_{i}/Object_2") |
| |
|
| | |
| | assert view.count == num_prims * 5 |
| | assert view.device == device |
| | assert len(view.prims) == num_prims * 5 |
| | assert view.prim_paths == expected_prim_paths_ordering |
| |
|
| | |
| | |
| | alphabetical_order = [] |
| | for i in range(num_prims): |
| | alphabetical_order.append(f"/World/Env_{i}/Object_0") |
| | alphabetical_order.append(f"/World/Env_{i}/Object_1") |
| | alphabetical_order.append(f"/World/Env_{i}/Object_2") |
| | alphabetical_order.append(f"/World/Env_{i}/Object_A") |
| | alphabetical_order.append(f"/World/Env_{i}/Object_a") |
| |
|
| | assert view.prim_paths != alphabetical_order, ( |
| | "Prim paths should follow creation order, not alphabetical order. " |
| | "This test validates that USD stage traversal respects creation order." |
| | ) |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_xform_prim_view_initialization_invalid_prim(device): |
| | """Test XformPrimView initialization fails for non-xformable prims.""" |
| | |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | stage.DefinePrim("/World/InvalidPrim", "Xform") |
| |
|
| | |
| | with pytest.raises(ValueError, match="not a xformable prim"): |
| | XformPrimView("/World/InvalidPrim", device=device) |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_xform_prim_view_initialization_empty_pattern(device): |
| | """Test XformPrimView initialization with pattern that matches no prims.""" |
| | |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | sim_utils.create_new_stage() |
| |
|
| | |
| | view = XformPrimView("/World/NonExistent_.*", device=device) |
| |
|
| | |
| | assert view.count == 0 |
| | assert len(view.prims) == 0 |
| |
|
| |
|
| | """ |
| | Tests - Getters. |
| | """ |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_get_world_poses(device): |
| | """Test getting world poses from XformPrimView.""" |
| | if device.startswith("cuda") and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | expected_positions = [(1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0)] |
| | expected_orientations = [(1.0, 0.0, 0.0, 0.0), (0.7071068, 0.0, 0.0, 0.7071068), (0.7071068, 0.7071068, 0.0, 0.0)] |
| |
|
| | for i, (pos, quat) in enumerate(zip(expected_positions, expected_orientations)): |
| | sim_utils.create_prim(f"/World/Object_{i}", "Xform", translation=pos, orientation=quat, stage=stage) |
| |
|
| | |
| | view = XformPrimView("/World/Object_.*", device=device) |
| |
|
| | |
| | positions, orientations = view.get_world_poses() |
| |
|
| | |
| | assert positions.shape == (3, 3) |
| | assert orientations.shape == (3, 4) |
| |
|
| | |
| | expected_positions_tensor = torch.tensor(expected_positions, dtype=torch.float32, device=device) |
| | expected_orientations_tensor = torch.tensor(expected_orientations, dtype=torch.float32, device=device) |
| |
|
| | |
| | torch.testing.assert_close(positions, expected_positions_tensor, atol=1e-5, rtol=0) |
| |
|
| | |
| | try: |
| | torch.testing.assert_close(orientations, expected_orientations_tensor, atol=1e-5, rtol=0) |
| | except AssertionError: |
| | torch.testing.assert_close(orientations, -expected_orientations_tensor, atol=1e-5, rtol=0) |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_get_local_poses(device): |
| | """Test getting local poses from XformPrimView.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | sim_utils.create_prim("/World/Parent", "Xform", translation=(10.0, 0.0, 0.0), stage=stage) |
| |
|
| | |
| | expected_local_positions = [(1.0, 0.0, 0.0), (0.0, 2.0, 0.0), (0.0, 0.0, 3.0)] |
| | expected_local_orientations = [ |
| | (1.0, 0.0, 0.0, 0.0), |
| | (0.7071068, 0.0, 0.0, 0.7071068), |
| | (0.7071068, 0.7071068, 0.0, 0.0), |
| | ] |
| |
|
| | for i, (pos, quat) in enumerate(zip(expected_local_positions, expected_local_orientations)): |
| | sim_utils.create_prim(f"/World/Parent/Child_{i}", "Xform", translation=pos, orientation=quat, stage=stage) |
| |
|
| | |
| | view = XformPrimView("/World/Parent/Child_.*", device=device) |
| |
|
| | |
| | translations, orientations = view.get_local_poses() |
| |
|
| | |
| | assert translations.shape == (3, 3) |
| | assert orientations.shape == (3, 4) |
| |
|
| | |
| | expected_translations_tensor = torch.tensor(expected_local_positions, dtype=torch.float32, device=device) |
| | expected_orientations_tensor = torch.tensor(expected_local_orientations, dtype=torch.float32, device=device) |
| |
|
| | |
| | torch.testing.assert_close(translations, expected_translations_tensor, atol=1e-5, rtol=0) |
| |
|
| | |
| | try: |
| | torch.testing.assert_close(orientations, expected_orientations_tensor, atol=1e-5, rtol=0) |
| | except AssertionError: |
| | torch.testing.assert_close(orientations, -expected_orientations_tensor, atol=1e-5, rtol=0) |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_get_scales(device): |
| | """Test getting scales from XformPrimView.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | expected_scales = [(1.0, 1.0, 1.0), (2.0, 2.0, 2.0), (1.0, 2.0, 3.0)] |
| |
|
| | for i, scale in enumerate(expected_scales): |
| | sim_utils.create_prim(f"/World/Object_{i}", "Xform", scale=scale, stage=stage) |
| |
|
| | |
| | view = XformPrimView("/World/Object_.*", device=device) |
| |
|
| | |
| | scales = view.get_scales() |
| |
|
| | |
| | assert scales.shape == (3, 3) |
| | expected_scales_tensor = torch.tensor(expected_scales, dtype=torch.float32, device=device) |
| | torch.testing.assert_close(scales, expected_scales_tensor, atol=1e-5, rtol=0) |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_get_visibility(device): |
| | """Test getting visibility when all prims are visible.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | num_prims = 5 |
| | for i in range(num_prims): |
| | sim_utils.create_prim(f"/World/Object_{i}", "Xform", translation=(float(i), 0.0, 0.0), stage=stage) |
| |
|
| | |
| | view = XformPrimView("/World/Object_.*", device=device) |
| |
|
| | |
| | visibility = view.get_visibility() |
| |
|
| | |
| | assert visibility.shape == (num_prims,) |
| | assert visibility.dtype == torch.bool |
| | assert torch.all(visibility), "All prims should be visible by default" |
| |
|
| |
|
| | """ |
| | Tests - Setters. |
| | """ |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_set_world_poses(device): |
| | """Test setting world poses in XformPrimView.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | num_prims = 5 |
| | for i in range(num_prims): |
| | sim_utils.create_prim(f"/World/Object_{i}", "Xform", translation=(0.0, 0.0, 0.0), stage=stage) |
| |
|
| | |
| | view = XformPrimView("/World/Object_.*", device=device) |
| |
|
| | |
| | new_positions = torch.tensor( |
| | [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0], [10.0, 11.0, 12.0], [13.0, 14.0, 15.0]], device=device |
| | ) |
| | new_orientations = torch.tensor( |
| | [ |
| | [1.0, 0.0, 0.0, 0.0], |
| | [0.7071068, 0.0, 0.0, 0.7071068], |
| | [0.7071068, 0.7071068, 0.0, 0.0], |
| | [0.9238795, 0.3826834, 0.0, 0.0], |
| | [0.7071068, 0.0, 0.7071068, 0.0], |
| | ], |
| | device=device, |
| | ) |
| |
|
| | view.set_world_poses(new_positions, new_orientations) |
| |
|
| | |
| | retrieved_positions, retrieved_orientations = view.get_world_poses() |
| |
|
| | |
| | torch.testing.assert_close(retrieved_positions, new_positions, atol=1e-5, rtol=0) |
| | |
| | try: |
| | torch.testing.assert_close(retrieved_orientations, new_orientations, atol=1e-5, rtol=0) |
| | except AssertionError: |
| | torch.testing.assert_close(retrieved_orientations, -new_orientations, atol=1e-5, rtol=0) |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_set_world_poses_only_positions(device): |
| | """Test setting only positions, leaving orientations unchanged.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | initial_quat = (0.7071068, 0.0, 0.0, 0.7071068) |
| | for i in range(3): |
| | sim_utils.create_prim( |
| | f"/World/Object_{i}", "Xform", translation=(0.0, 0.0, 0.0), orientation=initial_quat, stage=stage |
| | ) |
| |
|
| | |
| | view = XformPrimView("/World/Object_.*", device=device) |
| |
|
| | |
| | _, initial_orientations = view.get_world_poses() |
| |
|
| | |
| | new_positions = torch.tensor([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0]], device=device) |
| | view.set_world_poses(positions=new_positions, orientations=None) |
| |
|
| | |
| | retrieved_positions, retrieved_orientations = view.get_world_poses() |
| |
|
| | |
| | torch.testing.assert_close(retrieved_positions, new_positions, atol=1e-5, rtol=0) |
| |
|
| | |
| | try: |
| | torch.testing.assert_close(retrieved_orientations, initial_orientations, atol=1e-5, rtol=0) |
| | except AssertionError: |
| | torch.testing.assert_close(retrieved_orientations, -initial_orientations, atol=1e-5, rtol=0) |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_set_world_poses_only_orientations(device): |
| | """Test setting only orientations, leaving positions unchanged.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | for i in range(3): |
| | sim_utils.create_prim(f"/World/Object_{i}", "Xform", translation=(float(i), 0.0, 0.0), stage=stage) |
| |
|
| | |
| | view = XformPrimView("/World/Object_.*", device=device) |
| |
|
| | |
| | initial_positions, _ = view.get_world_poses() |
| |
|
| | |
| | new_orientations = torch.tensor( |
| | [[0.7071068, 0.0, 0.0, 0.7071068], [0.7071068, 0.7071068, 0.0, 0.0], [0.9238795, 0.3826834, 0.0, 0.0]], |
| | device=device, |
| | ) |
| | view.set_world_poses(positions=None, orientations=new_orientations) |
| |
|
| | |
| | retrieved_positions, retrieved_orientations = view.get_world_poses() |
| |
|
| | |
| | torch.testing.assert_close(retrieved_positions, initial_positions, atol=1e-5, rtol=0) |
| |
|
| | |
| | try: |
| | torch.testing.assert_close(retrieved_orientations, new_orientations, atol=1e-5, rtol=0) |
| | except AssertionError: |
| | torch.testing.assert_close(retrieved_orientations, -new_orientations, atol=1e-5, rtol=0) |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_set_world_poses_with_hierarchy(device): |
| | """Test setting world poses correctly handles parent transformations.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | for i in range(3): |
| | parent_pos = (i * 10.0, 0.0, 0.0) |
| | parent_quat = (0.7071068, 0.0, 0.0, 0.7071068) |
| | sim_utils.create_prim( |
| | f"/World/Parent_{i}", "Xform", translation=parent_pos, orientation=parent_quat, stage=stage |
| | ) |
| | |
| | sim_utils.create_prim(f"/World/Parent_{i}/Child", "Xform", translation=(0.0, 0.0, 0.0), stage=stage) |
| |
|
| | |
| | view = XformPrimView("/World/Parent_.*/Child", device=device) |
| |
|
| | |
| | desired_world_positions = torch.tensor([[5.0, 5.0, 0.0], [15.0, 5.0, 0.0], [25.0, 5.0, 0.0]], device=device) |
| | desired_world_orientations = torch.tensor( |
| | [[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]], device=device |
| | ) |
| |
|
| | view.set_world_poses(desired_world_positions, desired_world_orientations) |
| |
|
| | |
| | retrieved_positions, retrieved_orientations = view.get_world_poses() |
| |
|
| | |
| | torch.testing.assert_close(retrieved_positions, desired_world_positions, atol=1e-4, rtol=0) |
| | try: |
| | torch.testing.assert_close(retrieved_orientations, desired_world_orientations, atol=1e-4, rtol=0) |
| | except AssertionError: |
| | torch.testing.assert_close(retrieved_orientations, -desired_world_orientations, atol=1e-4, rtol=0) |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_set_local_poses(device): |
| | """Test setting local poses in XformPrimView.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | sim_utils.create_prim("/World/Parent", "Xform", translation=(5.0, 5.0, 5.0), stage=stage) |
| |
|
| | |
| | num_prims = 4 |
| | for i in range(num_prims): |
| | sim_utils.create_prim(f"/World/Parent/Child_{i}", "Xform", translation=(0.0, 0.0, 0.0), stage=stage) |
| |
|
| | |
| | view = XformPrimView("/World/Parent/Child_.*", device=device) |
| |
|
| | |
| | new_translations = torch.tensor([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0], [4.0, 4.0, 4.0]], device=device) |
| | new_orientations = torch.tensor( |
| | [ |
| | [1.0, 0.0, 0.0, 0.0], |
| | [0.7071068, 0.0, 0.0, 0.7071068], |
| | [0.7071068, 0.7071068, 0.0, 0.0], |
| | [0.9238795, 0.3826834, 0.0, 0.0], |
| | ], |
| | device=device, |
| | ) |
| |
|
| | view.set_local_poses(new_translations, new_orientations) |
| |
|
| | |
| | retrieved_translations, retrieved_orientations = view.get_local_poses() |
| |
|
| | |
| | torch.testing.assert_close(retrieved_translations, new_translations, atol=1e-5, rtol=0) |
| | try: |
| | torch.testing.assert_close(retrieved_orientations, new_orientations, atol=1e-5, rtol=0) |
| | except AssertionError: |
| | torch.testing.assert_close(retrieved_orientations, -new_orientations, atol=1e-5, rtol=0) |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_set_local_poses_only_translations(device): |
| | """Test setting only local translations.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | sim_utils.create_prim("/World/Parent", "Xform", translation=(0.0, 0.0, 0.0), stage=stage) |
| | initial_quat = (0.7071068, 0.0, 0.0, 0.7071068) |
| |
|
| | for i in range(3): |
| | sim_utils.create_prim( |
| | f"/World/Parent/Child_{i}", "Xform", translation=(0.0, 0.0, 0.0), orientation=initial_quat, stage=stage |
| | ) |
| |
|
| | |
| | view = XformPrimView("/World/Parent/Child_.*", device=device) |
| |
|
| | |
| | _, initial_orientations = view.get_local_poses() |
| |
|
| | |
| | new_translations = torch.tensor([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0]], device=device) |
| | view.set_local_poses(translations=new_translations, orientations=None) |
| |
|
| | |
| | retrieved_translations, retrieved_orientations = view.get_local_poses() |
| |
|
| | |
| | torch.testing.assert_close(retrieved_translations, new_translations, atol=1e-5, rtol=0) |
| |
|
| | |
| | try: |
| | torch.testing.assert_close(retrieved_orientations, initial_orientations, atol=1e-5, rtol=0) |
| | except AssertionError: |
| | torch.testing.assert_close(retrieved_orientations, -initial_orientations, atol=1e-5, rtol=0) |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_set_scales(device): |
| | """Test setting scales in XformPrimView.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | num_prims = 5 |
| | for i in range(num_prims): |
| | sim_utils.create_prim(f"/World/Object_{i}", "Xform", scale=(1.0, 1.0, 1.0), stage=stage) |
| |
|
| | |
| | view = XformPrimView("/World/Object_.*", device=device) |
| |
|
| | |
| | new_scales = torch.tensor( |
| | [[2.0, 2.0, 2.0], [1.0, 2.0, 3.0], [0.5, 0.5, 0.5], [3.0, 1.0, 2.0], [1.5, 1.5, 1.5]], device=device |
| | ) |
| |
|
| | view.set_scales(new_scales) |
| |
|
| | |
| | retrieved_scales = view.get_scales() |
| |
|
| | |
| | torch.testing.assert_close(retrieved_scales, new_scales, atol=1e-5, rtol=0) |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_set_visibility(device): |
| | """Test toggling visibility multiple times.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | num_prims = 3 |
| | for i in range(num_prims): |
| | sim_utils.create_prim(f"/World/Object_{i}", "Xform", stage=stage) |
| |
|
| | |
| | view = XformPrimView("/World/Object_.*", device=device) |
| |
|
| | |
| | visibility = view.get_visibility() |
| | assert torch.all(visibility), "All should be visible initially" |
| |
|
| | |
| | view.set_visibility(torch.zeros(num_prims, dtype=torch.bool, device=device)) |
| | visibility = view.get_visibility() |
| | assert not torch.any(visibility), "All should be invisible" |
| |
|
| | |
| | view.set_visibility(torch.ones(num_prims, dtype=torch.bool, device=device)) |
| | visibility = view.get_visibility() |
| | assert torch.all(visibility), "All should be visible again" |
| |
|
| | |
| | view.set_visibility(torch.tensor([False], dtype=torch.bool, device=device), indices=[1]) |
| | visibility = view.get_visibility() |
| | assert visibility[0] and not visibility[1] and visibility[2], "Only middle prim should be invisible" |
| |
|
| |
|
| | """ |
| | Tests - Index Handling. |
| | """ |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | @pytest.mark.parametrize("index_type", ["list", "torch_tensor", "slice_none"]) |
| | @pytest.mark.parametrize("method", ["world_poses", "local_poses", "scales", "visibility"]) |
| | def test_index_types_get_methods(device, index_type, method): |
| | """Test that getter methods work with different index types.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | num_prims = 10 |
| | if method == "local_poses": |
| | |
| | sim_utils.create_prim("/World/Parent", "Xform", translation=(10.0, 0.0, 0.0), stage=stage) |
| | for i in range(num_prims): |
| | sim_utils.create_prim( |
| | f"/World/Parent/Child_{i}", "Xform", translation=(float(i), float(i) * 0.5, 0.0), stage=stage |
| | ) |
| | view = XformPrimView("/World/Parent/Child_.*", device=device) |
| | elif method == "scales": |
| | |
| | for i in range(num_prims): |
| | scale = (1.0 + i * 0.5, 1.0 + i * 0.3, 1.0 + i * 0.2) |
| | sim_utils.create_prim(f"/World/Object_{i}", "Xform", scale=scale, stage=stage) |
| | view = XformPrimView("/World/Object_.*", device=device) |
| | else: |
| | |
| | for i in range(num_prims): |
| | sim_utils.create_prim(f"/World/Object_{i}", "Xform", translation=(float(i), 0.0, 0.0), stage=stage) |
| | view = XformPrimView("/World/Object_.*", device=device) |
| |
|
| | |
| | if method == "world_poses": |
| | all_data1, all_data2 = view.get_world_poses() |
| | elif method == "local_poses": |
| | all_data1, all_data2 = view.get_local_poses() |
| | elif method == "scales": |
| | all_data1 = view.get_scales() |
| | all_data2 = None |
| | else: |
| | all_data1 = view.get_visibility() |
| | all_data2 = None |
| |
|
| | |
| | target_indices_base = [2, 5, 7] |
| | indices, target_indices = _prepare_indices(index_type, target_indices_base, num_prims, device) |
| |
|
| | |
| | if method == "world_poses": |
| | subset_data1, subset_data2 = view.get_world_poses(indices=indices) |
| | elif method == "local_poses": |
| | subset_data1, subset_data2 = view.get_local_poses(indices=indices) |
| | elif method == "scales": |
| | subset_data1 = view.get_scales(indices=indices) |
| | subset_data2 = None |
| | else: |
| | subset_data1 = view.get_visibility(indices=indices) |
| | subset_data2 = None |
| |
|
| | |
| | expected_count = len(target_indices) |
| | if method == "visibility": |
| | assert subset_data1.shape == (expected_count,) |
| | else: |
| | assert subset_data1.shape == (expected_count, 3) |
| | if subset_data2 is not None: |
| | assert subset_data2.shape == (expected_count, 4) |
| |
|
| | |
| | target_indices_tensor = torch.tensor(target_indices, dtype=torch.int64, device=device) |
| | torch.testing.assert_close(subset_data1, all_data1[target_indices_tensor], atol=1e-5, rtol=0) |
| | if subset_data2 is not None and all_data2 is not None: |
| | torch.testing.assert_close(subset_data2, all_data2[target_indices_tensor], atol=1e-5, rtol=0) |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | @pytest.mark.parametrize("index_type", ["list", "torch_tensor", "slice_none"]) |
| | @pytest.mark.parametrize("method", ["world_poses", "local_poses", "scales", "visibility"]) |
| | def test_index_types_set_methods(device, index_type, method): |
| | """Test that setter methods work with different index types.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | num_prims = 10 |
| | if method == "local_poses": |
| | |
| | sim_utils.create_prim("/World/Parent", "Xform", translation=(5.0, 5.0, 0.0), stage=stage) |
| | for i in range(num_prims): |
| | sim_utils.create_prim(f"/World/Parent/Child_{i}", "Xform", translation=(float(i), 0.0, 0.0), stage=stage) |
| | view = XformPrimView("/World/Parent/Child_.*", device=device) |
| | else: |
| | for i in range(num_prims): |
| | sim_utils.create_prim(f"/World/Object_{i}", "Xform", translation=(0.0, 0.0, 0.0), stage=stage) |
| | view = XformPrimView("/World/Object_.*", device=device) |
| |
|
| | |
| | if method == "world_poses": |
| | initial_data1, initial_data2 = view.get_world_poses() |
| | elif method == "local_poses": |
| | initial_data1, initial_data2 = view.get_local_poses() |
| | elif method == "scales": |
| | initial_data1 = view.get_scales() |
| | initial_data2 = None |
| | else: |
| | initial_data1 = view.get_visibility() |
| | initial_data2 = None |
| |
|
| | |
| | target_indices_base = [2, 5, 7] |
| | indices, target_indices = _prepare_indices(index_type, target_indices_base, num_prims, device) |
| |
|
| | |
| | num_to_set = len(target_indices) |
| | if method in ["world_poses", "local_poses"]: |
| | new_data1 = torch.randn(num_to_set, 3, device=device) * 10.0 |
| | new_data2 = torch.tensor([[1.0, 0.0, 0.0, 0.0]] * num_to_set, dtype=torch.float32, device=device) |
| | elif method == "scales": |
| | new_data1 = torch.rand(num_to_set, 3, device=device) * 2.0 + 0.5 |
| | new_data2 = None |
| | else: |
| | |
| | new_data1 = torch.zeros(num_to_set, dtype=torch.bool, device=device) |
| | new_data2 = None |
| |
|
| | |
| | if method == "world_poses": |
| | view.set_world_poses(positions=new_data1, orientations=new_data2, indices=indices) |
| | elif method == "local_poses": |
| | view.set_local_poses(translations=new_data1, orientations=new_data2, indices=indices) |
| | elif method == "scales": |
| | view.set_scales(scales=new_data1, indices=indices) |
| | else: |
| | view.set_visibility(visibility=new_data1, indices=indices) |
| |
|
| | |
| | if method == "world_poses": |
| | updated_data1, updated_data2 = view.get_world_poses() |
| | elif method == "local_poses": |
| | updated_data1, updated_data2 = view.get_local_poses() |
| | elif method == "scales": |
| | updated_data1 = view.get_scales() |
| | updated_data2 = None |
| | else: |
| | updated_data1 = view.get_visibility() |
| | updated_data2 = None |
| |
|
| | |
| | for i, target_idx in enumerate(target_indices): |
| | torch.testing.assert_close(updated_data1[target_idx], new_data1[i], atol=1e-5, rtol=0) |
| | if new_data2 is not None and updated_data2 is not None: |
| | try: |
| | torch.testing.assert_close(updated_data2[target_idx], new_data2[i], atol=1e-5, rtol=0) |
| | except AssertionError: |
| | |
| | torch.testing.assert_close(updated_data2[target_idx], -new_data2[i], atol=1e-5, rtol=0) |
| |
|
| | |
| | if index_type != "slice_none": |
| | for i in range(num_prims): |
| | if i not in target_indices: |
| | torch.testing.assert_close(updated_data1[i], initial_data1[i], atol=1e-5, rtol=0) |
| | if initial_data2 is not None and updated_data2 is not None: |
| | try: |
| | torch.testing.assert_close(updated_data2[i], initial_data2[i], atol=1e-5, rtol=0) |
| | except AssertionError: |
| | |
| | torch.testing.assert_close(updated_data2[i], -initial_data2[i], atol=1e-5, rtol=0) |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_indices_single_element(device): |
| | """Test with a single index.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | num_prims = 5 |
| | for i in range(num_prims): |
| | sim_utils.create_prim(f"/World/Object_{i}", "Xform", translation=(float(i), 0.0, 0.0), stage=stage) |
| |
|
| | |
| | view = XformPrimView("/World/Object_.*", device=device) |
| |
|
| | |
| | indices = [3] |
| | positions, orientations = view.get_world_poses(indices=indices) |
| |
|
| | |
| | assert positions.shape == (1, 3) |
| | assert orientations.shape == (1, 4) |
| |
|
| | |
| | new_position = torch.tensor([[100.0, 200.0, 300.0]], device=device) |
| | view.set_world_poses(positions=new_position, indices=indices) |
| |
|
| | |
| | retrieved_positions, _ = view.get_world_poses(indices=indices) |
| | torch.testing.assert_close(retrieved_positions, new_position, atol=1e-5, rtol=0) |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_indices_out_of_order(device): |
| | """Test with indices provided in non-sequential order.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | num_prims = 10 |
| | for i in range(num_prims): |
| | sim_utils.create_prim(f"/World/Object_{i}", "Xform", translation=(0.0, 0.0, 0.0), stage=stage) |
| |
|
| | |
| | view = XformPrimView("/World/Object_.*", device=device) |
| |
|
| | |
| | indices = [7, 2, 9, 0, 5] |
| | new_positions = torch.tensor( |
| | [[7.0, 0.0, 0.0], [2.0, 0.0, 0.0], [9.0, 0.0, 0.0], [0.0, 0.0, 0.0], [5.0, 0.0, 0.0]], device=device |
| | ) |
| |
|
| | |
| | view.set_world_poses(positions=new_positions, indices=indices) |
| |
|
| | |
| | all_positions, _ = view.get_world_poses() |
| |
|
| | |
| | expected_x_values = [0.0, 0.0, 2.0, 0.0, 0.0, 5.0, 0.0, 7.0, 0.0, 9.0] |
| | for i in range(num_prims): |
| | assert abs(all_positions[i, 0].item() - expected_x_values[i]) < 1e-5 |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_indices_with_only_positions_or_orientations(device): |
| | """Test indices work correctly when setting only positions or only orientations.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | num_prims = 5 |
| | for i in range(num_prims): |
| | sim_utils.create_prim( |
| | f"/World/Object_{i}", "Xform", translation=(0.0, 0.0, 0.0), orientation=(1.0, 0.0, 0.0, 0.0), stage=stage |
| | ) |
| |
|
| | |
| | view = XformPrimView("/World/Object_.*", device=device) |
| |
|
| | |
| | initial_positions, initial_orientations = view.get_world_poses() |
| |
|
| | |
| | indices = [1, 3] |
| | new_positions = torch.tensor([[10.0, 0.0, 0.0], [30.0, 0.0, 0.0]], device=device) |
| | view.set_world_poses(positions=new_positions, orientations=None, indices=indices) |
| |
|
| | |
| | updated_positions, updated_orientations = view.get_world_poses() |
| |
|
| | |
| | torch.testing.assert_close(updated_positions[1], new_positions[0], atol=1e-5, rtol=0) |
| | torch.testing.assert_close(updated_positions[3], new_positions[1], atol=1e-5, rtol=0) |
| | torch.testing.assert_close(updated_positions[0], initial_positions[0], atol=1e-5, rtol=0) |
| |
|
| | |
| | try: |
| | torch.testing.assert_close(updated_orientations, initial_orientations, atol=1e-5, rtol=0) |
| | except AssertionError: |
| | torch.testing.assert_close(updated_orientations, -initial_orientations, atol=1e-5, rtol=0) |
| |
|
| | |
| | indices2 = [0, 4] |
| | new_orientations = torch.tensor([[0.7071068, 0.0, 0.0, 0.7071068], [0.7071068, 0.7071068, 0.0, 0.0]], device=device) |
| | view.set_world_poses(positions=None, orientations=new_orientations, indices=indices2) |
| |
|
| | |
| | final_positions, final_orientations = view.get_world_poses() |
| |
|
| | |
| | torch.testing.assert_close(final_positions, updated_positions, atol=1e-5, rtol=0) |
| |
|
| | |
| | try: |
| | torch.testing.assert_close(final_orientations[0], new_orientations[0], atol=1e-5, rtol=0) |
| | torch.testing.assert_close(final_orientations[4], new_orientations[1], atol=1e-5, rtol=0) |
| | except AssertionError: |
| | |
| | torch.testing.assert_close(final_orientations[0], -new_orientations[0], atol=1e-5, rtol=0) |
| | torch.testing.assert_close(final_orientations[4], -new_orientations[1], atol=1e-5, rtol=0) |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_index_type_none_equivalent_to_all(device): |
| | """Test that indices=None is equivalent to getting/setting all prims.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | num_prims = 6 |
| | for i in range(num_prims): |
| | sim_utils.create_prim(f"/World/Object_{i}", "Xform", translation=(float(i), 0.0, 0.0), stage=stage) |
| |
|
| | |
| | view = XformPrimView("/World/Object_.*", device=device) |
| |
|
| | |
| | pos_none, quat_none = view.get_world_poses(indices=None) |
| |
|
| | |
| | pos_default, quat_default = view.get_world_poses() |
| |
|
| | |
| | pos_slice, quat_slice = view.get_world_poses(indices=slice(None)) |
| |
|
| | |
| | torch.testing.assert_close(pos_none, pos_default, atol=1e-10, rtol=0) |
| | torch.testing.assert_close(quat_none, quat_default, atol=1e-10, rtol=0) |
| | torch.testing.assert_close(pos_none, pos_slice, atol=1e-10, rtol=0) |
| | torch.testing.assert_close(quat_none, quat_slice, atol=1e-10, rtol=0) |
| |
|
| | |
| | new_positions = torch.randn(num_prims, 3, device=device) * 10.0 |
| | new_orientations = torch.tensor([[1.0, 0.0, 0.0, 0.0]] * num_prims, dtype=torch.float32, device=device) |
| |
|
| | |
| | view.set_world_poses(positions=new_positions, orientations=new_orientations, indices=None) |
| | pos_after_none, quat_after_none = view.get_world_poses() |
| |
|
| | |
| | view.set_world_poses(positions=torch.zeros(num_prims, 3, device=device), indices=None) |
| |
|
| | |
| | view.set_world_poses(positions=new_positions, orientations=new_orientations, indices=slice(None)) |
| | pos_after_slice, quat_after_slice = view.get_world_poses() |
| |
|
| | |
| | torch.testing.assert_close(pos_after_none, pos_after_slice, atol=1e-5, rtol=0) |
| | torch.testing.assert_close(quat_after_none, quat_after_slice, atol=1e-5, rtol=0) |
| |
|
| |
|
| | """ |
| | Tests - Integration. |
| | """ |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_with_franka_robots(device): |
| | """Test XformPrimView with real Franka robot USD assets.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | franka_usd_path = f"{ISAAC_NUCLEUS_DIR}/Robots/FrankaRobotics/FrankaPanda/franka.usd" |
| |
|
| | |
| | sim_utils.create_prim("/World/Franka_1", "Xform", usd_path=franka_usd_path, stage=stage) |
| | sim_utils.create_prim("/World/Franka_2", "Xform", usd_path=franka_usd_path, stage=stage) |
| |
|
| | |
| | frankas_view = XformPrimView("/World/Franka_.*", device=device) |
| |
|
| | |
| | assert frankas_view.count == 2 |
| |
|
| | |
| | initial_positions, initial_orientations = frankas_view.get_world_poses() |
| |
|
| | |
| | expected_initial_positions = torch.zeros(2, 3, device=device) |
| | torch.testing.assert_close(initial_positions, expected_initial_positions, atol=1e-5, rtol=0) |
| |
|
| | |
| | expected_initial_orientations = torch.tensor([[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]], device=device) |
| | try: |
| | torch.testing.assert_close(initial_orientations, expected_initial_orientations, atol=1e-5, rtol=0) |
| | except AssertionError: |
| | torch.testing.assert_close(initial_orientations, -expected_initial_orientations, atol=1e-5, rtol=0) |
| |
|
| | |
| | new_positions = torch.tensor([[10.0, 10.0, 0.0], [-40.0, -40.0, 0.0]], device=device) |
| | |
| | new_orientations = torch.tensor( |
| | [[0.7071068, 0.0, 0.0, 0.7071068], [0.7071068, 0.0, 0.0, -0.7071068]], device=device |
| | ) |
| |
|
| | frankas_view.set_world_poses(positions=new_positions, orientations=new_orientations) |
| |
|
| | |
| | retrieved_positions, retrieved_orientations = frankas_view.get_world_poses() |
| |
|
| | torch.testing.assert_close(retrieved_positions, new_positions, atol=1e-5, rtol=0) |
| | try: |
| | torch.testing.assert_close(retrieved_orientations, new_orientations, atol=1e-5, rtol=0) |
| | except AssertionError: |
| | torch.testing.assert_close(retrieved_orientations, -new_orientations, atol=1e-5, rtol=0) |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_with_nested_targets(device): |
| | """Test with nested frame/target structure similar to Isaac Sim tests.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | for i in range(1, 4): |
| | sim_utils.create_prim(f"/World/Frame_{i}", "Xform", stage=stage) |
| | sim_utils.create_prim(f"/World/Frame_{i}/Target", "Xform", stage=stage) |
| |
|
| | |
| | frames_view = XformPrimView("/World/Frame_.*", device=device) |
| | targets_view = XformPrimView("/World/Frame_.*/Target", device=device) |
| |
|
| | assert frames_view.count == 3 |
| | assert targets_view.count == 3 |
| |
|
| | |
| | frame_translations = torch.tensor([[0.0, 0.0, 0.0], [0.0, 10.0, 5.0], [0.0, 3.0, 5.0]], device=device) |
| | frames_view.set_local_poses(translations=frame_translations) |
| |
|
| | |
| | target_translations = torch.tensor([[0.0, 20.0, 10.0], [0.0, 30.0, 20.0], [0.0, 50.0, 10.0]], device=device) |
| | targets_view.set_local_poses(translations=target_translations) |
| |
|
| | |
| | world_positions, _ = targets_view.get_world_poses() |
| |
|
| | |
| | expected_positions = torch.tensor([[0.0, 20.0, 10.0], [0.0, 40.0, 25.0], [0.0, 53.0, 15.0]], device=device) |
| |
|
| | torch.testing.assert_close(world_positions, expected_positions, atol=1e-5, rtol=0) |
| |
|
| |
|
| | @pytest.mark.parametrize("device", ["cpu", "cuda"]) |
| | def test_visibility_with_hierarchy(device): |
| | """Test visibility with parent-child hierarchy and inheritance.""" |
| | if device == "cuda" and not torch.cuda.is_available(): |
| | pytest.skip("CUDA not available") |
| |
|
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | sim_utils.create_prim("/World/Parent", "Xform", stage=stage) |
| |
|
| | num_children = 4 |
| | for i in range(num_children): |
| | sim_utils.create_prim(f"/World/Parent/Child_{i}", "Xform", stage=stage) |
| |
|
| | |
| | parent_view = XformPrimView("/World/Parent", device=device) |
| | children_view = XformPrimView("/World/Parent/Child_.*", device=device) |
| |
|
| | |
| | parent_visibility = parent_view.get_visibility() |
| | children_visibility = children_view.get_visibility() |
| | assert parent_visibility[0], "Parent should be visible initially" |
| | assert torch.all(children_visibility), "All children should be visible initially" |
| |
|
| | |
| | new_visibility = torch.tensor([True, False, True, False], dtype=torch.bool, device=device) |
| | children_view.set_visibility(new_visibility) |
| |
|
| | |
| | retrieved_visibility = children_view.get_visibility() |
| | torch.testing.assert_close(retrieved_visibility, new_visibility) |
| |
|
| | |
| | children_view.set_visibility(torch.ones(num_children, dtype=torch.bool, device=device)) |
| | all_visible = children_view.get_visibility() |
| | assert torch.all(all_visible), "All children should be visible again" |
| |
|
| | |
| | |
| | parent_view.set_visibility(torch.tensor([False], dtype=torch.bool, device=device)) |
| |
|
| | |
| | parent_visibility = parent_view.get_visibility() |
| | assert not parent_visibility[0], "Parent should be invisible" |
| |
|
| | |
| | children_visibility = children_view.get_visibility() |
| | assert not torch.any(children_visibility), "All children should be invisible when parent is invisible" |
| |
|
| | |
| | parent_view.set_visibility(torch.tensor([True], dtype=torch.bool, device=device)) |
| |
|
| | |
| | parent_visibility = parent_view.get_visibility() |
| | assert parent_visibility[0], "Parent should be visible again" |
| |
|
| | |
| | children_visibility = children_view.get_visibility() |
| | assert torch.all(children_visibility), "All children should be visible again when parent is visible" |
| |
|
| |
|
| | """ |
| | Tests - Comparison with Isaac Sim Implementation. |
| | """ |
| |
|
| |
|
| | def test_compare_get_world_poses_with_isaacsim(): |
| | """Compare get_world_poses with Isaac Sim's implementation.""" |
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | if _IsaacSimXformPrimView is None: |
| | pytest.skip("Isaac Sim is not available") |
| |
|
| | |
| | num_prims = 10 |
| | for i in range(num_prims): |
| | pos = (i * 2.0, i * 0.5, i * 1.5) |
| | |
| | if i % 3 == 0: |
| | quat = (1.0, 0.0, 0.0, 0.0) |
| | elif i % 3 == 1: |
| | quat = (0.7071068, 0.0, 0.0, 0.7071068) |
| | else: |
| | quat = (0.7071068, 0.7071068, 0.0, 0.0) |
| | sim_utils.create_prim(f"/World/Env_{i}/Object", "Xform", translation=pos, orientation=quat, stage=stage) |
| |
|
| | pattern = "/World/Env_.*/Object" |
| |
|
| | |
| | isaaclab_view = XformPrimView(pattern, device="cpu") |
| | isaacsim_view = _IsaacSimXformPrimView(pattern, reset_xform_properties=False) |
| |
|
| | |
| | isaaclab_pos, isaaclab_quat = isaaclab_view.get_world_poses() |
| | isaacsim_pos, isaacsim_quat = isaacsim_view.get_world_poses() |
| |
|
| | |
| | if not isinstance(isaacsim_pos, torch.Tensor): |
| | isaacsim_pos = torch.tensor(isaacsim_pos, dtype=torch.float32) |
| | if not isinstance(isaacsim_quat, torch.Tensor): |
| | isaacsim_quat = torch.tensor(isaacsim_quat, dtype=torch.float32) |
| |
|
| | |
| | torch.testing.assert_close(isaaclab_pos, isaacsim_pos, atol=1e-5, rtol=0) |
| |
|
| | |
| | try: |
| | torch.testing.assert_close(isaaclab_quat, isaacsim_quat, atol=1e-5, rtol=0) |
| | except AssertionError: |
| | torch.testing.assert_close(isaaclab_quat, -isaacsim_quat, atol=1e-5, rtol=0) |
| |
|
| |
|
| | def test_compare_set_world_poses_with_isaacsim(): |
| | """Compare set_world_poses with Isaac Sim's implementation.""" |
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | if _IsaacSimXformPrimView is None: |
| | pytest.skip("Isaac Sim is not available") |
| |
|
| | |
| | num_prims = 8 |
| | for i in range(num_prims): |
| | sim_utils.create_prim(f"/World/Env_{i}/Object", "Xform", translation=(0.0, 0.0, 0.0), stage=stage) |
| |
|
| | pattern = "/World/Env_.*/Object" |
| |
|
| | |
| | isaaclab_view = XformPrimView(pattern, device="cpu") |
| | isaacsim_view = _IsaacSimXformPrimView(pattern, reset_xform_properties=False) |
| |
|
| | |
| | new_positions = torch.randn(num_prims, 3) * 10.0 |
| | new_orientations = torch.tensor([[1.0, 0.0, 0.0, 0.0]] * num_prims, dtype=torch.float32) |
| |
|
| | |
| | isaaclab_view.set_world_poses(new_positions.clone(), new_orientations.clone()) |
| | isaacsim_view.set_world_poses(new_positions.clone(), new_orientations.clone()) |
| |
|
| | |
| | isaaclab_pos, isaaclab_quat = isaaclab_view.get_world_poses() |
| | isaacsim_pos, isaacsim_quat = isaacsim_view.get_world_poses() |
| |
|
| | |
| | if not isinstance(isaacsim_pos, torch.Tensor): |
| | isaacsim_pos = torch.tensor(isaacsim_pos, dtype=torch.float32) |
| | if not isinstance(isaacsim_quat, torch.Tensor): |
| | isaacsim_quat = torch.tensor(isaacsim_quat, dtype=torch.float32) |
| |
|
| | |
| | torch.testing.assert_close(isaaclab_pos, isaacsim_pos, atol=1e-4, rtol=0) |
| | try: |
| | torch.testing.assert_close(isaaclab_quat, isaacsim_quat, atol=1e-4, rtol=0) |
| | except AssertionError: |
| | torch.testing.assert_close(isaaclab_quat, -isaacsim_quat, atol=1e-4, rtol=0) |
| |
|
| |
|
| | def test_compare_get_local_poses_with_isaacsim(): |
| | """Compare get_local_poses with Isaac Sim's implementation.""" |
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | if _IsaacSimXformPrimView is None: |
| | pytest.skip("Isaac Sim is not available") |
| |
|
| | |
| | num_prims = 5 |
| | for i in range(num_prims): |
| | |
| | sim_utils.create_prim(f"/World/Env_{i}", "Xform", translation=(i * 5.0, 0.0, 0.0), stage=stage) |
| | |
| | local_pos = (1.0, float(i), 0.0) |
| | local_quat = (1.0, 0.0, 0.0, 0.0) if i % 2 == 0 else (0.7071068, 0.0, 0.0, 0.7071068) |
| | sim_utils.create_prim( |
| | f"/World/Env_{i}/Object", "Xform", translation=local_pos, orientation=local_quat, stage=stage |
| | ) |
| |
|
| | pattern = "/World/Env_.*/Object" |
| |
|
| | |
| | isaaclab_view = XformPrimView(pattern, device="cpu") |
| | isaacsim_view = _IsaacSimXformPrimView(pattern, reset_xform_properties=False) |
| |
|
| | |
| | isaaclab_trans, isaaclab_quat = isaaclab_view.get_local_poses() |
| | isaacsim_trans, isaacsim_quat = isaacsim_view.get_local_poses() |
| |
|
| | |
| | if not isinstance(isaacsim_trans, torch.Tensor): |
| | isaacsim_trans = torch.tensor(isaacsim_trans, dtype=torch.float32) |
| | if not isinstance(isaacsim_quat, torch.Tensor): |
| | isaacsim_quat = torch.tensor(isaacsim_quat, dtype=torch.float32) |
| |
|
| | |
| | torch.testing.assert_close(isaaclab_trans, isaacsim_trans, atol=1e-5, rtol=0) |
| | try: |
| | torch.testing.assert_close(isaaclab_quat, isaacsim_quat, atol=1e-5, rtol=0) |
| | except AssertionError: |
| | torch.testing.assert_close(isaaclab_quat, -isaacsim_quat, atol=1e-5, rtol=0) |
| |
|
| |
|
| | def test_compare_set_local_poses_with_isaacsim(): |
| | """Compare set_local_poses with Isaac Sim's implementation.""" |
| | stage = sim_utils.get_current_stage() |
| |
|
| | |
| | if _IsaacSimXformPrimView is None: |
| | pytest.skip("Isaac Sim is not available") |
| |
|
| | |
| | num_prims = 6 |
| | for i in range(num_prims): |
| | sim_utils.create_prim(f"/World/Env_{i}", "Xform", translation=(i * 3.0, 0.0, 0.0), stage=stage) |
| | sim_utils.create_prim(f"/World/Env_{i}/Object", "Xform", translation=(0.0, 0.0, 0.0), stage=stage) |
| |
|
| | pattern = "/World/Env_.*/Object" |
| |
|
| | |
| | isaaclab_view = XformPrimView(pattern, device="cpu") |
| | isaacsim_view = _IsaacSimXformPrimView(pattern, reset_xform_properties=False) |
| |
|
| | |
| | new_translations = torch.randn(num_prims, 3) * 5.0 |
| | new_orientations = torch.tensor( |
| | [[1.0, 0.0, 0.0, 0.0], [0.7071068, 0.0, 0.0, 0.7071068]] * (num_prims // 2), dtype=torch.float32 |
| | ) |
| |
|
| | |
| | isaaclab_view.set_local_poses(new_translations.clone(), new_orientations.clone()) |
| | isaacsim_view.set_local_poses(new_translations.clone(), new_orientations.clone()) |
| |
|
| | |
| | isaaclab_trans, isaaclab_quat = isaaclab_view.get_local_poses() |
| | isaacsim_trans, isaacsim_quat = isaacsim_view.get_local_poses() |
| |
|
| | |
| | if not isinstance(isaacsim_trans, torch.Tensor): |
| | isaacsim_trans = torch.tensor(isaacsim_trans, dtype=torch.float32) |
| | if not isinstance(isaacsim_quat, torch.Tensor): |
| | isaacsim_quat = torch.tensor(isaacsim_quat, dtype=torch.float32) |
| |
|
| | |
| | torch.testing.assert_close(isaaclab_trans, isaacsim_trans, atol=1e-4, rtol=0) |
| | try: |
| | torch.testing.assert_close(isaaclab_quat, isaacsim_quat, atol=1e-4, rtol=0) |
| | except AssertionError: |
| | torch.testing.assert_close(isaaclab_quat, -isaacsim_quat, atol=1e-4, rtol=0) |
| |
|