File size: 34,151 Bytes
406662d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 | # Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# ignore private usage of variables warning
# pyright: reportPrivateUsage=none
"""Launch Isaac Sim Simulator first."""
from isaaclab.app import AppLauncher
# launch omniverse app
simulation_app = AppLauncher(headless=True, enable_cameras=True).app
"""Rest everything follows."""
import copy
import os
import random
import numpy as np
import pytest
import scipy.spatial.transform as tf
import torch
import omni.replicator.core as rep
from isaacsim.core.prims import SingleGeometryPrim, SingleRigidPrim
from pxr import Gf, Usd, UsdGeom
import isaaclab.sim as sim_utils
from isaaclab.sensors.camera import Camera, CameraCfg
from isaaclab.utils import convert_dict_to_backend
from isaaclab.utils.math import convert_quat
from isaaclab.utils.timer import Timer
# sample camera poses
POSITION = (2.5, 2.5, 2.5)
QUAT_ROS = (-0.17591989, 0.33985114, 0.82047325, -0.42470819)
QUAT_OPENGL = (0.33985113, 0.17591988, 0.42470818, 0.82047324)
QUAT_WORLD = (-0.3647052, -0.27984815, -0.1159169, 0.88047623)
# NOTE: setup and teardown are own function to allow calling them in the tests
# resolutions
HEIGHT = 240
WIDTH = 320
def setup() -> tuple[sim_utils.SimulationContext, CameraCfg, float]:
camera_cfg = CameraCfg(
height=HEIGHT,
width=WIDTH,
prim_path="/World/Camera",
update_period=0,
data_types=["distance_to_image_plane"],
spawn=sim_utils.PinholeCameraCfg(
focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5)
),
)
# Create a new stage
sim_utils.create_new_stage()
# Simulation time-step
dt = 0.01
# Load kit helper
sim_cfg = sim_utils.SimulationCfg(dt=dt)
sim = sim_utils.SimulationContext(sim_cfg)
# populate scene
_populate_scene()
# load stage
sim_utils.update_stage()
return sim, camera_cfg, dt
def teardown(sim: sim_utils.SimulationContext):
# Cleanup
# close all the opened viewport from before.
rep.vp_manager.destroy_hydra_textures("Replicator")
# stop simulation
# note: cannot use self.sim.stop() since it does one render step after stopping!! This doesn't make sense :(
sim._timeline.stop()
# clear the stage
sim.clear_all_callbacks()
sim.clear_instance()
@pytest.fixture
def setup_sim_camera():
"""Create a simulation context."""
sim, camera_cfg, dt = setup()
yield sim, camera_cfg, dt
teardown(sim)
def test_camera_init(setup_sim_camera):
"""Test camera initialization."""
# Create camera configuration
sim, camera_cfg, dt = setup_sim_camera
# Create camera
camera = Camera(camera_cfg)
# Check simulation parameter is set correctly
assert sim.has_rtx_sensors()
# Play sim
sim.reset()
# Check if camera is initialized
assert camera.is_initialized
# Check if camera prim is set correctly and that it is a camera prim
assert camera._sensor_prims[0].GetPath().pathString == camera_cfg.prim_path
assert isinstance(camera._sensor_prims[0], UsdGeom.Camera)
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
sim.step()
# Check buffers that exist and have correct shapes
assert camera.data.pos_w.shape == (1, 3)
assert camera.data.quat_w_ros.shape == (1, 4)
assert camera.data.quat_w_world.shape == (1, 4)
assert camera.data.quat_w_opengl.shape == (1, 4)
assert camera.data.intrinsic_matrices.shape == (1, 3, 3)
assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width)
assert camera.data.info == [{camera_cfg.data_types[0]: None}]
# Simulate physics
for _ in range(10):
# perform rendering
sim.step()
# update camera
camera.update(sim.cfg.dt)
# check image data
for im_data in camera.data.output.values():
assert im_data.shape == (1, camera_cfg.height, camera_cfg.width, 1)
def test_camera_init_offset(setup_sim_camera):
"""Test camera initialization with offset using different conventions."""
sim, camera_cfg, dt = setup_sim_camera
# define the same offset in all conventions
# -- ROS convention
cam_cfg_offset_ros = copy.deepcopy(camera_cfg)
cam_cfg_offset_ros.update_latest_camera_pose = True
cam_cfg_offset_ros.offset = CameraCfg.OffsetCfg(
pos=POSITION,
rot=QUAT_ROS,
convention="ros",
)
cam_cfg_offset_ros.prim_path = "/World/CameraOffsetRos"
camera_ros = Camera(cam_cfg_offset_ros)
# -- OpenGL convention
cam_cfg_offset_opengl = copy.deepcopy(camera_cfg)
cam_cfg_offset_opengl.update_latest_camera_pose = True
cam_cfg_offset_opengl.offset = CameraCfg.OffsetCfg(
pos=POSITION,
rot=QUAT_OPENGL,
convention="opengl",
)
cam_cfg_offset_opengl.prim_path = "/World/CameraOffsetOpengl"
camera_opengl = Camera(cam_cfg_offset_opengl)
# -- World convention
cam_cfg_offset_world = copy.deepcopy(camera_cfg)
cam_cfg_offset_world.update_latest_camera_pose = True
cam_cfg_offset_world.offset = CameraCfg.OffsetCfg(
pos=POSITION,
rot=QUAT_WORLD,
convention="world",
)
cam_cfg_offset_world.prim_path = "/World/CameraOffsetWorld"
camera_world = Camera(cam_cfg_offset_world)
# play sim
sim.reset()
# retrieve camera pose using USD API
prim_tf_ros = camera_ros._sensor_prims[0].ComputeLocalToWorldTransform(Usd.TimeCode.Default())
prim_tf_opengl = camera_opengl._sensor_prims[0].ComputeLocalToWorldTransform(Usd.TimeCode.Default())
prim_tf_world = camera_world._sensor_prims[0].ComputeLocalToWorldTransform(Usd.TimeCode.Default())
# convert them from column-major to row-major
prim_tf_ros = np.transpose(prim_tf_ros)
prim_tf_opengl = np.transpose(prim_tf_opengl)
prim_tf_world = np.transpose(prim_tf_world)
# check that all transforms are set correctly
np.testing.assert_allclose(prim_tf_ros[0:3, 3], cam_cfg_offset_ros.offset.pos)
np.testing.assert_allclose(prim_tf_opengl[0:3, 3], cam_cfg_offset_opengl.offset.pos)
np.testing.assert_allclose(prim_tf_world[0:3, 3], cam_cfg_offset_world.offset.pos)
np.testing.assert_allclose(
convert_quat(tf.Rotation.from_matrix(prim_tf_ros[:3, :3]).as_quat(), "wxyz"),
cam_cfg_offset_opengl.offset.rot,
rtol=1e-5,
)
np.testing.assert_allclose(
convert_quat(tf.Rotation.from_matrix(prim_tf_opengl[:3, :3]).as_quat(), "wxyz"),
cam_cfg_offset_opengl.offset.rot,
rtol=1e-5,
)
np.testing.assert_allclose(
convert_quat(tf.Rotation.from_matrix(prim_tf_world[:3, :3]).as_quat(), "wxyz"),
cam_cfg_offset_opengl.offset.rot,
rtol=1e-5,
)
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
sim.step()
# check if transform correctly set in output
np.testing.assert_allclose(camera_ros.data.pos_w[0].cpu().numpy(), cam_cfg_offset_ros.offset.pos, rtol=1e-5)
np.testing.assert_allclose(camera_ros.data.quat_w_ros[0].cpu().numpy(), QUAT_ROS, rtol=1e-5)
np.testing.assert_allclose(camera_ros.data.quat_w_opengl[0].cpu().numpy(), QUAT_OPENGL, rtol=1e-5)
np.testing.assert_allclose(camera_ros.data.quat_w_world[0].cpu().numpy(), QUAT_WORLD, rtol=1e-5)
def test_multi_camera_init(setup_sim_camera):
"""Test multi-camera initialization."""
sim, camera_cfg, dt = setup_sim_camera
# create two cameras with different prim paths
# -- camera 1
cam_cfg_1 = copy.deepcopy(camera_cfg)
cam_cfg_1.prim_path = "/World/Camera_1"
cam_1 = Camera(cam_cfg_1)
# -- camera 2
cam_cfg_2 = copy.deepcopy(camera_cfg)
cam_cfg_2.prim_path = "/World/Camera_2"
cam_2 = Camera(cam_cfg_2)
# play sim
sim.reset()
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
sim.step()
# Simulate physics
for _ in range(10):
# perform rendering
sim.step()
# update camera
cam_1.update(dt)
cam_2.update(dt)
# check image data
for cam in [cam_1, cam_2]:
for im_data in cam.data.output.values():
assert im_data.shape == (1, camera_cfg.height, camera_cfg.width, 1)
def test_multi_camera_with_different_resolution(setup_sim_camera):
"""Test multi-camera initialization with cameras having different image resolutions."""
sim, camera_cfg, dt = setup_sim_camera
# create two cameras with different prim paths
# -- camera 1
cam_cfg_1 = copy.deepcopy(camera_cfg)
cam_cfg_1.prim_path = "/World/Camera_1"
cam_1 = Camera(cam_cfg_1)
# -- camera 2
cam_cfg_2 = copy.deepcopy(camera_cfg)
cam_cfg_2.prim_path = "/World/Camera_2"
cam_cfg_2.height = 240
cam_cfg_2.width = 320
cam_2 = Camera(cam_cfg_2)
# play sim
sim.reset()
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
sim.step()
# perform rendering
sim.step()
# update camera
cam_1.update(dt)
cam_2.update(dt)
# check image sizes
assert cam_1.data.output["distance_to_image_plane"].shape == (1, camera_cfg.height, camera_cfg.width, 1)
assert cam_2.data.output["distance_to_image_plane"].shape == (1, cam_cfg_2.height, cam_cfg_2.width, 1)
def test_camera_init_intrinsic_matrix(setup_sim_camera):
"""Test camera initialization from intrinsic matrix."""
sim, camera_cfg, dt = setup_sim_camera
# get the first camera
camera_1 = Camera(cfg=camera_cfg)
# get intrinsic matrix
sim.reset()
intrinsic_matrix = camera_1.data.intrinsic_matrices[0].cpu().flatten().tolist()
teardown(sim)
# reinit the first camera
sim, camera_cfg, dt = setup()
camera_1 = Camera(cfg=camera_cfg)
# initialize from intrinsic matrix
intrinsic_camera_cfg = CameraCfg(
height=HEIGHT,
width=WIDTH,
prim_path="/World/Camera_2",
update_period=0,
data_types=["distance_to_image_plane"],
spawn=sim_utils.PinholeCameraCfg.from_intrinsic_matrix(
intrinsic_matrix=intrinsic_matrix,
width=WIDTH,
height=HEIGHT,
focal_length=24.0,
focus_distance=400.0,
clipping_range=(0.1, 1.0e5),
),
)
camera_2 = Camera(cfg=intrinsic_camera_cfg)
# play sim
sim.reset()
# update cameras
camera_1.update(dt)
camera_2.update(dt)
# check image data
torch.testing.assert_close(
camera_1.data.output["distance_to_image_plane"],
camera_2.data.output["distance_to_image_plane"],
rtol=5e-3,
atol=1e-4,
)
# check that both intrinsic matrices are the same
torch.testing.assert_close(
camera_1.data.intrinsic_matrices[0],
camera_2.data.intrinsic_matrices[0],
rtol=5e-3,
atol=1e-4,
)
def test_camera_set_world_poses(setup_sim_camera):
"""Test camera function to set specific world pose."""
sim, camera_cfg, dt = setup_sim_camera
# enable update latest camera pose
camera_cfg.update_latest_camera_pose = True
# init camera
camera = Camera(camera_cfg)
# play sim
sim.reset()
# convert to torch tensors
position = torch.tensor([POSITION], dtype=torch.float32, device=camera.device)
orientation = torch.tensor([QUAT_WORLD], dtype=torch.float32, device=camera.device)
# set new pose
camera.set_world_poses(position.clone(), orientation.clone(), convention="world")
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
sim.step()
# check if transform correctly set in output
torch.testing.assert_close(camera.data.pos_w, position)
torch.testing.assert_close(camera.data.quat_w_world, orientation)
def test_camera_set_world_poses_from_view(setup_sim_camera):
"""Test camera function to set specific world pose from view."""
sim, camera_cfg, dt = setup_sim_camera
# enable update latest camera pose
camera_cfg.update_latest_camera_pose = True
# init camera
camera = Camera(camera_cfg)
# play sim
sim.reset()
# convert to torch tensors
eyes = torch.tensor([POSITION], dtype=torch.float32, device=camera.device)
targets = torch.tensor([[0.0, 0.0, 0.0]], dtype=torch.float32, device=camera.device)
quat_ros_gt = torch.tensor([QUAT_ROS], dtype=torch.float32, device=camera.device)
# set new pose
camera.set_world_poses_from_view(eyes.clone(), targets.clone())
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
sim.step()
# check if transform correctly set in output
torch.testing.assert_close(camera.data.pos_w, eyes)
torch.testing.assert_close(camera.data.quat_w_ros, quat_ros_gt)
def test_intrinsic_matrix(setup_sim_camera):
"""Checks that the camera's set and retrieve methods work for intrinsic matrix."""
sim, camera_cfg, dt = setup_sim_camera
# enable update latest camera pose
camera_cfg.update_latest_camera_pose = True
# init camera
camera = Camera(camera_cfg)
# play sim
sim.reset()
# Desired properties (obtained from realsense camera at 320x240 resolution)
rs_intrinsic_matrix = [229.8, 0.0, 160.0, 0.0, 229.8, 120.0, 0.0, 0.0, 1.0]
rs_intrinsic_matrix = torch.tensor(rs_intrinsic_matrix, device=camera.device).reshape(3, 3).unsqueeze(0)
# Set matrix into simulator
camera.set_intrinsic_matrices(rs_intrinsic_matrix.clone())
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
sim.step()
# Simulate physics
for _ in range(10):
# perform rendering
sim.step()
# update camera
camera.update(dt)
# Check that matrix is correct
torch.testing.assert_close(rs_intrinsic_matrix[0, 0, 0], camera.data.intrinsic_matrices[0, 0, 0])
torch.testing.assert_close(rs_intrinsic_matrix[0, 1, 1], camera.data.intrinsic_matrices[0, 1, 1])
torch.testing.assert_close(rs_intrinsic_matrix[0, 0, 2], camera.data.intrinsic_matrices[0, 0, 2])
torch.testing.assert_close(rs_intrinsic_matrix[0, 1, 2], camera.data.intrinsic_matrices[0, 1, 2])
def test_depth_clipping(setup_sim_camera):
"""Test depth clipping.
.. note::
This test is the same for all camera models to enforce the same clipping behavior.
"""
# get camera cfgs
sim, _, dt = setup_sim_camera
camera_cfg_zero = CameraCfg(
prim_path="/World/CameraZero",
offset=CameraCfg.OffsetCfg(pos=(2.5, 2.5, 6.0), rot=(-0.125, 0.362, 0.873, -0.302), convention="ros"),
spawn=sim_utils.PinholeCameraCfg().from_intrinsic_matrix(
focal_length=38.0,
intrinsic_matrix=[380.08, 0.0, 467.79, 0.0, 380.08, 262.05, 0.0, 0.0, 1.0],
height=540,
width=960,
clipping_range=(0.1, 10),
),
height=540,
width=960,
data_types=["distance_to_image_plane", "distance_to_camera"],
depth_clipping_behavior="zero",
)
camera_zero = Camera(camera_cfg_zero)
camera_cfg_none = copy.deepcopy(camera_cfg_zero)
camera_cfg_none.prim_path = "/World/CameraNone"
camera_cfg_none.depth_clipping_behavior = "none"
camera_none = Camera(camera_cfg_none)
camera_cfg_max = copy.deepcopy(camera_cfg_zero)
camera_cfg_max.prim_path = "/World/CameraMax"
camera_cfg_max.depth_clipping_behavior = "max"
camera_max = Camera(camera_cfg_max)
# Play sim
sim.reset()
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
sim.step()
camera_zero.update(dt)
camera_none.update(dt)
camera_max.update(dt)
# none clipping should contain inf values
assert torch.isinf(camera_none.data.output["distance_to_camera"]).any()
assert torch.isinf(camera_none.data.output["distance_to_image_plane"]).any()
assert (
camera_none.data.output["distance_to_camera"][~torch.isinf(camera_none.data.output["distance_to_camera"])].min()
>= camera_cfg_zero.spawn.clipping_range[0]
)
assert (
camera_none.data.output["distance_to_camera"][~torch.isinf(camera_none.data.output["distance_to_camera"])].max()
<= camera_cfg_zero.spawn.clipping_range[1]
)
assert (
camera_none.data.output["distance_to_image_plane"][
~torch.isinf(camera_none.data.output["distance_to_image_plane"])
].min()
>= camera_cfg_zero.spawn.clipping_range[0]
)
assert (
camera_none.data.output["distance_to_image_plane"][
~torch.isinf(camera_none.data.output["distance_to_camera"])
].max()
<= camera_cfg_zero.spawn.clipping_range[1]
)
# zero clipping should result in zero values
assert torch.all(
camera_zero.data.output["distance_to_camera"][torch.isinf(camera_none.data.output["distance_to_camera"])] == 0.0
)
assert torch.all(
camera_zero.data.output["distance_to_image_plane"][
torch.isinf(camera_none.data.output["distance_to_image_plane"])
]
== 0.0
)
assert (
camera_zero.data.output["distance_to_camera"][camera_zero.data.output["distance_to_camera"] != 0.0].min()
>= camera_cfg_zero.spawn.clipping_range[0]
)
assert camera_zero.data.output["distance_to_camera"].max() <= camera_cfg_zero.spawn.clipping_range[1]
assert (
camera_zero.data.output["distance_to_image_plane"][
camera_zero.data.output["distance_to_image_plane"] != 0.0
].min()
>= camera_cfg_zero.spawn.clipping_range[0]
)
assert camera_zero.data.output["distance_to_image_plane"].max() <= camera_cfg_zero.spawn.clipping_range[1]
# max clipping should result in max values
assert torch.all(
camera_max.data.output["distance_to_camera"][torch.isinf(camera_none.data.output["distance_to_camera"])]
== camera_cfg_zero.spawn.clipping_range[1]
)
assert torch.all(
camera_max.data.output["distance_to_image_plane"][
torch.isinf(camera_none.data.output["distance_to_image_plane"])
]
== camera_cfg_zero.spawn.clipping_range[1]
)
assert camera_max.data.output["distance_to_camera"].min() >= camera_cfg_zero.spawn.clipping_range[0]
assert camera_max.data.output["distance_to_camera"].max() <= camera_cfg_zero.spawn.clipping_range[1]
assert camera_max.data.output["distance_to_image_plane"].min() >= camera_cfg_zero.spawn.clipping_range[0]
assert camera_max.data.output["distance_to_image_plane"].max() <= camera_cfg_zero.spawn.clipping_range[1]
def test_camera_resolution_all_colorize(setup_sim_camera):
"""Test camera resolution is correctly set for all types with colorization enabled."""
# Add all types
sim, camera_cfg, dt = setup_sim_camera
camera_cfg.data_types = [
"rgb",
"rgba",
"depth",
"distance_to_camera",
"distance_to_image_plane",
"normals",
"motion_vectors",
"semantic_segmentation",
"instance_segmentation_fast",
"instance_id_segmentation_fast",
]
camera_cfg.colorize_instance_id_segmentation = True
camera_cfg.colorize_instance_segmentation = True
camera_cfg.colorize_semantic_segmentation = True
# Create camera
camera = Camera(camera_cfg)
# Play sim
sim.reset()
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
sim.step()
camera.update(dt)
# expected sizes
hw_1c_shape = (1, camera_cfg.height, camera_cfg.width, 1)
hw_2c_shape = (1, camera_cfg.height, camera_cfg.width, 2)
hw_3c_shape = (1, camera_cfg.height, camera_cfg.width, 3)
hw_4c_shape = (1, camera_cfg.height, camera_cfg.width, 4)
# access image data and compare shapes
output = camera.data.output
assert output["rgb"].shape == hw_3c_shape
assert output["rgba"].shape == hw_4c_shape
assert output["depth"].shape == hw_1c_shape
assert output["distance_to_camera"].shape == hw_1c_shape
assert output["distance_to_image_plane"].shape == hw_1c_shape
assert output["normals"].shape == hw_3c_shape
assert output["motion_vectors"].shape == hw_2c_shape
assert output["semantic_segmentation"].shape == hw_4c_shape
assert output["instance_segmentation_fast"].shape == hw_4c_shape
assert output["instance_id_segmentation_fast"].shape == hw_4c_shape
# access image data and compare dtype
output = camera.data.output
assert output["rgb"].dtype == torch.uint8
assert output["rgba"].dtype == torch.uint8
assert output["depth"].dtype == torch.float
assert output["distance_to_camera"].dtype == torch.float
assert output["distance_to_image_plane"].dtype == torch.float
assert output["normals"].dtype == torch.float
assert output["motion_vectors"].dtype == torch.float
assert output["semantic_segmentation"].dtype == torch.uint8
assert output["instance_segmentation_fast"].dtype == torch.uint8
assert output["instance_id_segmentation_fast"].dtype == torch.uint8
def test_camera_resolution_no_colorize(setup_sim_camera):
"""Test camera resolution is correctly set for all types with no colorization enabled."""
# Add all types
sim, camera_cfg, dt = setup_sim_camera
camera_cfg.data_types = [
"rgb",
"rgba",
"depth",
"distance_to_camera",
"distance_to_image_plane",
"normals",
"motion_vectors",
"semantic_segmentation",
"instance_segmentation_fast",
"instance_id_segmentation_fast",
]
camera_cfg.colorize_instance_id_segmentation = False
camera_cfg.colorize_instance_segmentation = False
camera_cfg.colorize_semantic_segmentation = False
# Create camera
camera = Camera(camera_cfg)
# Play sim
sim.reset()
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(12):
sim.step()
camera.update(dt)
# expected sizes
hw_1c_shape = (1, camera_cfg.height, camera_cfg.width, 1)
hw_2c_shape = (1, camera_cfg.height, camera_cfg.width, 2)
hw_3c_shape = (1, camera_cfg.height, camera_cfg.width, 3)
hw_4c_shape = (1, camera_cfg.height, camera_cfg.width, 4)
# access image data and compare shapes
output = camera.data.output
assert output["rgb"].shape == hw_3c_shape
assert output["rgba"].shape == hw_4c_shape
assert output["depth"].shape == hw_1c_shape
assert output["distance_to_camera"].shape == hw_1c_shape
assert output["distance_to_image_plane"].shape == hw_1c_shape
assert output["normals"].shape == hw_3c_shape
assert output["motion_vectors"].shape == hw_2c_shape
assert output["semantic_segmentation"].shape == hw_1c_shape
assert output["instance_segmentation_fast"].shape == hw_1c_shape
assert output["instance_id_segmentation_fast"].shape == hw_1c_shape
# access image data and compare dtype
output = camera.data.output
assert output["rgb"].dtype == torch.uint8
assert output["rgba"].dtype == torch.uint8
assert output["depth"].dtype == torch.float
assert output["distance_to_camera"].dtype == torch.float
assert output["distance_to_image_plane"].dtype == torch.float
assert output["normals"].dtype == torch.float
assert output["motion_vectors"].dtype == torch.float
assert output["semantic_segmentation"].dtype == torch.int32
assert output["instance_segmentation_fast"].dtype == torch.int32
assert output["instance_id_segmentation_fast"].dtype == torch.int32
def test_camera_large_resolution_all_colorize(setup_sim_camera):
"""Test camera resolution is correctly set for all types with colorization enabled."""
# Add all types
sim, camera_cfg, dt = setup_sim_camera
camera_cfg.data_types = [
"rgb",
"rgba",
"depth",
"distance_to_camera",
"distance_to_image_plane",
"normals",
"motion_vectors",
"semantic_segmentation",
"instance_segmentation_fast",
"instance_id_segmentation_fast",
]
camera_cfg.colorize_instance_id_segmentation = True
camera_cfg.colorize_instance_segmentation = True
camera_cfg.colorize_semantic_segmentation = True
camera_cfg.width = 512
camera_cfg.height = 512
# Create camera
camera = Camera(camera_cfg)
# Play sim
sim.reset()
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
sim.step()
camera.update(dt)
# expected sizes
hw_1c_shape = (1, camera_cfg.height, camera_cfg.width, 1)
hw_2c_shape = (1, camera_cfg.height, camera_cfg.width, 2)
hw_3c_shape = (1, camera_cfg.height, camera_cfg.width, 3)
hw_4c_shape = (1, camera_cfg.height, camera_cfg.width, 4)
# access image data and compare shapes
output = camera.data.output
assert output["rgb"].shape == hw_3c_shape
assert output["rgba"].shape == hw_4c_shape
assert output["depth"].shape == hw_1c_shape
assert output["distance_to_camera"].shape == hw_1c_shape
assert output["distance_to_image_plane"].shape == hw_1c_shape
assert output["normals"].shape == hw_3c_shape
assert output["motion_vectors"].shape == hw_2c_shape
assert output["semantic_segmentation"].shape == hw_4c_shape
assert output["instance_segmentation_fast"].shape == hw_4c_shape
assert output["instance_id_segmentation_fast"].shape == hw_4c_shape
# access image data and compare dtype
output = camera.data.output
assert output["rgb"].dtype == torch.uint8
assert output["rgba"].dtype == torch.uint8
assert output["depth"].dtype == torch.float
assert output["distance_to_camera"].dtype == torch.float
assert output["distance_to_image_plane"].dtype == torch.float
assert output["normals"].dtype == torch.float
assert output["motion_vectors"].dtype == torch.float
assert output["semantic_segmentation"].dtype == torch.uint8
assert output["instance_segmentation_fast"].dtype == torch.uint8
assert output["instance_id_segmentation_fast"].dtype == torch.uint8
def test_camera_resolution_rgb_only(setup_sim_camera):
"""Test camera resolution is correctly set for RGB only."""
# Add all types
sim, camera_cfg, dt = setup_sim_camera
camera_cfg.data_types = ["rgb"]
# Create camera
camera = Camera(camera_cfg)
# Play sim
sim.reset()
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
sim.step()
camera.update(dt)
# expected sizes
hw_3c_shape = (1, camera_cfg.height, camera_cfg.width, 3)
# access image data and compare shapes
output = camera.data.output
assert output["rgb"].shape == hw_3c_shape
# access image data and compare dtype
assert output["rgb"].dtype == torch.uint8
def test_camera_resolution_rgba_only(setup_sim_camera):
"""Test camera resolution is correctly set for RGBA only."""
# Add all types
sim, camera_cfg, dt = setup_sim_camera
camera_cfg.data_types = ["rgba"]
# Create camera
camera = Camera(camera_cfg)
# Play sim
sim.reset()
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
sim.step()
camera.update(dt)
# expected sizes
hw_4c_shape = (1, camera_cfg.height, camera_cfg.width, 4)
# access image data and compare shapes
output = camera.data.output
assert output["rgba"].shape == hw_4c_shape
# access image data and compare dtype
assert output["rgba"].dtype == torch.uint8
def test_camera_resolution_depth_only(setup_sim_camera):
"""Test camera resolution is correctly set for depth only."""
# Add all types
sim, camera_cfg, dt = setup_sim_camera
camera_cfg.data_types = ["depth"]
# Create camera
camera = Camera(camera_cfg)
# Play sim
sim.reset()
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
sim.step()
camera.update(dt)
# expected sizes
hw_1c_shape = (1, camera_cfg.height, camera_cfg.width, 1)
# access image data and compare shapes
output = camera.data.output
assert output["depth"].shape == hw_1c_shape
# access image data and compare dtype
assert output["depth"].dtype == torch.float
def test_throughput(setup_sim_camera):
"""Checks that the single camera gets created properly with a rig."""
# Create directory temp dir to dump the results
file_dir = os.path.dirname(os.path.realpath(__file__))
temp_dir = os.path.join(file_dir, "output", "camera", "throughput")
os.makedirs(temp_dir, exist_ok=True)
# Create replicator writer
rep_writer = rep.BasicWriter(output_dir=temp_dir, frame_padding=3)
# create camera
sim, camera_cfg, dt = setup_sim_camera
camera_cfg.height = 480
camera_cfg.width = 640
camera = Camera(camera_cfg)
# Play simulator
sim.reset()
# Set camera pose
eyes = torch.tensor([[2.5, 2.5, 2.5]], dtype=torch.float32, device=camera.device)
targets = torch.tensor([[0.0, 0.0, 0.0]], dtype=torch.float32, device=camera.device)
camera.set_world_poses_from_view(eyes, targets)
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
sim.step()
# Simulate physics
for _ in range(5):
# perform rendering
sim.step()
# update camera
with Timer(f"Time taken for updating camera with shape {camera.image_shape}"):
camera.update(dt)
# Save images
with Timer(f"Time taken for writing data with shape {camera.image_shape} "):
# Pack data back into replicator format to save them using its writer
rep_output = {"annotators": {}}
camera_data = convert_dict_to_backend({k: v[0] for k, v in camera.data.output.items()}, backend="numpy")
for key, data, info in zip(camera_data.keys(), camera_data.values(), camera.data.info[0].values()):
if info is not None:
rep_output["annotators"][key] = {"render_product": {"data": data, **info}}
else:
rep_output["annotators"][key] = {"render_product": {"data": data}}
# Save images
rep_output["trigger_outputs"] = {"on_time": camera.frame[0]}
rep_writer.write(rep_output)
print("----------------------------------------")
# Check image data
for im_data in camera.data.output.values():
assert im_data.shape == (1, camera_cfg.height, camera_cfg.width, 1)
def test_sensor_print(setup_sim_camera):
"""Test sensor print is working correctly."""
# Create sensor
sim, camera_cfg, dt = setup_sim_camera
sensor = Camera(cfg=camera_cfg)
# Play sim
sim.reset()
# print info
print(sensor)
def _populate_scene():
"""Add prims to the scene."""
# Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# Lights
cfg = sim_utils.SphereLightCfg()
cfg.func("/World/Light/GreySphere", cfg, translation=(4.5, 3.5, 10.0))
cfg.func("/World/Light/WhiteSphere", cfg, translation=(-4.5, 3.5, 10.0))
# Random objects
random.seed(0)
for i in range(10):
# sample random position
position = np.random.rand(3) - np.asarray([0.05, 0.05, -1.0])
position *= np.asarray([1.5, 1.5, 0.5])
# create prim
prim_type = random.choice(["Cube", "Sphere", "Cylinder"])
prim = sim_utils.create_prim(
f"/World/Objects/Obj_{i:02d}",
prim_type,
translation=position,
scale=(0.25, 0.25, 0.25),
semantic_label=prim_type,
)
# cast to geom prim
geom_prim = getattr(UsdGeom, prim_type)(prim)
# set random color
color = Gf.Vec3f(random.random(), random.random(), random.random())
geom_prim.CreateDisplayColorAttr()
geom_prim.GetDisplayColorAttr().Set([color])
# add rigid properties
SingleGeometryPrim(f"/World/Objects/Obj_{i:02d}", collision=True)
SingleRigidPrim(f"/World/Objects/Obj_{i:02d}", mass=5.0)
|