File size: 3,847 Bytes
406662d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause

"""Launch Isaac Sim Simulator first."""

from isaaclab.app import AppLauncher

# Launch omniverse app
app_launcher = AppLauncher(headless=True, enable_cameras=True)
simulation_app = app_launcher.app

import os
import subprocess
import sys
import time

import env_benchmark_test_utils as utils
import gymnasium as gym
import pytest

import carb

from isaaclab_rl.utils.pretrained_checkpoint import WORKFLOW_EXPERIMENT_NAME_VARIABLE, WORKFLOW_TRAINER


def setup_environment():
    """Setup environment for testing."""
    # Acquire all Isaac environments names
    registered_task_specs = []
    for task_spec in gym.registry.values():
        if "Isaac" in task_spec.id and not task_spec.id.endswith("Play-v0"):
            registered_task_specs.append(task_spec)

    # Sort environments by name
    registered_task_specs.sort(key=lambda x: x.id)

    # This flag is necessary to prevent a bug where the simulation gets stuck randomly when running the
    # test on many environments.
    carb_settings_iface = carb.settings.get_settings()
    carb_settings_iface.set_bool("/physics/cooking/ujitsoCollisionCooking", False)

    return registered_task_specs


def train_job(workflow, task, env_config, num_gpus):
    """Train a single job for a given workflow, task, and configuration, and return the duration."""
    cmd = [
        sys.executable,
        WORKFLOW_TRAINER[workflow],
        "--task",
        task,
        "--enable_cameras",
        "--headless",
    ]

    # Add max iterations if specified
    max_iterations = env_config.get("max_iterations")
    if max_iterations is not None:
        cmd.extend(["--max_iterations", str(max_iterations)])

    if num_gpus > 1:
        cmd.append(f"--nnprod_per_node={num_gpus}")
        cmd.append("--distributed")

    # Add experiment name variable
    cmd.append(f"{WORKFLOW_EXPERIMENT_NAME_VARIABLE[workflow]}={task}")

    print("Running : " + " ".join(cmd))

    start_time = time.time()
    subprocess.run(cmd)
    duration = time.time() - start_time

    return duration


@pytest.mark.parametrize("task_spec", setup_environment())
def test_train_environments(workflow, task_spec, config_path, mode, num_gpus, kpi_store):
    """Train environments provided in the config file, save KPIs, and evaluate against thresholds"""
    # Skip if workflow not supported for this task
    if workflow + "_cfg_entry_point" not in task_spec.kwargs:
        pytest.skip(f"Workflow {workflow} not supported for task {task_spec.id}")

    # Load environment config
    task = task_spec.id
    if config_path.startswith("/"):
        full_config_path = config_path
    else:
        full_config_path = os.path.join(os.path.dirname(__file__), config_path)
    env_configs = utils.get_env_configs(full_config_path)
    env_config = utils.get_env_config(env_configs, mode, workflow, task)

    # Skip if config not found
    if env_config is None:
        pytest.skip(f"No config found for task {task} in {mode} mode")

    job_name = f"{workflow}:{task}"
    print(f">>> Training: {job_name}")

    # Train and capture duration
    duration = train_job(workflow, task, env_config, num_gpus)

    print(f">>> Evaluating trained: {job_name}")
    # Check if training logs were output and all thresholds passed
    kpi_payload = utils.evaluate_job(workflow, task, env_config, duration)

    success_flag = kpi_payload["success"]
    print(f">>> Trained {job_name} success flag: {success_flag}.")
    print("-" * 80)

    # Save KPI
    kpi_store[job_name] = kpi_payload

    # Verify job was successful
    if not kpi_payload["success"]:
        pytest.fail(f"Job {job_name} failed to meet success criteria")