File size: 15,223 Bytes
406662d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause

"""A runner script for all the tests within source directory.

.. code-block:: bash

    ./isaaclab.sh -p tools/run_all_tests.py

    # for dry run
    ./isaaclab.sh -p tools/run_all_tests.py --discover_only

    # for quiet run
    ./isaaclab.sh -p tools/run_all_tests.py --quiet

    # for increasing timeout (default is 600 seconds)
    ./isaaclab.sh -p tools/run_all_tests.py --timeout 1000

"""

import argparse
import logging
import os
import re
import subprocess
import sys
import time
from datetime import datetime
from pathlib import Path

from prettytable import PrettyTable

# Local imports
from test_settings import DEFAULT_TIMEOUT, ISAACLAB_PATH, PER_TEST_TIMEOUTS, TESTS_TO_SKIP


def parse_args() -> argparse.Namespace:
    """Parse command line arguments."""
    parser = argparse.ArgumentParser(description="Run all tests under current directory.")
    # add arguments
    parser.add_argument(
        "--skip_tests",
        default="",
        help="Space separated list of tests to skip in addition to those in tests_to_skip.py.",
        type=str,
        nargs="*",
    )

    # configure default test directory (source directory)
    default_test_dir = os.path.join(ISAACLAB_PATH, "source")

    parser.add_argument(
        "--test_dir", type=str, default=default_test_dir, help="Path to the directory containing the tests."
    )

    # configure default logging path based on time stamp
    log_file_name = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + ".log"
    default_log_path = os.path.join(ISAACLAB_PATH, "logs", "test_results", log_file_name)

    parser.add_argument(
        "--log_path", type=str, default=default_log_path, help="Path to the log file to store the results in."
    )
    parser.add_argument("--discover_only", action="store_true", help="Only discover and print tests, don't run them.")
    parser.add_argument("--quiet", action="store_true", help="Don't print to console, only log to file.")
    parser.add_argument("--timeout", type=int, default=DEFAULT_TIMEOUT, help="Timeout for each test in seconds.")
    parser.add_argument("--extension", type=str, default=None, help="Run tests only for the given extension.")
    # parse arguments
    args = parser.parse_args()
    return args


def test_all(
    test_dir: str,
    tests_to_skip: list[str],
    log_path: str,
    timeout: float = DEFAULT_TIMEOUT,
    per_test_timeouts: dict[str, float] = {},
    discover_only: bool = False,
    quiet: bool = False,
    extension: str | None = None,
) -> bool:
    """Run all tests under the given directory.

    Args:
        test_dir: Path to the directory containing the tests.
        tests_to_skip: List of tests to skip.
        log_path: Path to the log file to store the results in.
        timeout: Timeout for each test in seconds. Defaults to DEFAULT_TIMEOUT.
        per_test_timeouts: A dictionary of tests and their timeouts in seconds. Any tests not listed here will use the
            timeout specified by `timeout`. Defaults to an empty dictionary.
        discover_only: If True, only discover and print the tests without running them. Defaults to False.
        quiet: If False, print the output of the tests to the terminal console (in addition to the log file).
            Defaults to False.
        extension: Run tests only for the given extension. Defaults to None, which means all extensions'
            tests will be run.
    Returns:
        True if all un-skipped tests pass or `discover_only` is True. Otherwise, False.

    Raises:
        ValueError: If any test to skip is not found under the given `test_dir`.

    """
    # Create the log directory if it doesn't exist
    os.makedirs(os.path.dirname(log_path), exist_ok=True)

    # Add file handler to log to file
    logging_handlers = [logging.FileHandler(log_path)]
    # We also want to print to console
    if not quiet:
        logging_handlers.append(logging.StreamHandler())
    # Set up logger
    logging.basicConfig(level=logging.INFO, format="%(message)s", handlers=logging_handlers)

    all_test_paths, test_paths, skipped_test_paths, test_timeouts = extract_tests_and_timeouts(
        test_dir, extension, tests_to_skip, timeout, per_test_timeouts
    )

    # Print tests to be run
    logging.info("\n" + "=" * 60 + "\n")
    logging.info(f"The following {len(all_test_paths)} tests were found:")
    for i, test_path in enumerate(all_test_paths):
        logging.info(f"{i + 1:02d}: {test_path}, timeout: {test_timeouts[test_path]}")
    logging.info("\n" + "=" * 60 + "\n")

    logging.info(f"The following {len(skipped_test_paths)} tests are marked to be skipped:")
    for i, test_path in enumerate(skipped_test_paths):
        logging.info(f"{i + 1:02d}: {test_path}")
    logging.info("\n" + "=" * 60 + "\n")

    # Exit if only discovering tests
    if discover_only:
        return True

    results = {}

    # Run each script and store results
    for test_path in test_paths:
        results[test_path] = {}
        before = time.time()
        logging.info("\n" + "-" * 60 + "\n")
        logging.info(f"[INFO] Running '{test_path}'\n")
        try:
            completed_process = subprocess.run(
                [sys.executable, test_path], check=True, capture_output=True, timeout=test_timeouts[test_path]
            )
        except subprocess.TimeoutExpired as e:
            logging.error(f"Timeout occurred: {e}")
            result = "TIMEDOUT"
            stdout = e.stdout
            stderr = e.stderr
        except subprocess.CalledProcessError as e:
            # When check=True is passed to subprocess.run() above, CalledProcessError is raised if the process returns a
            # non-zero exit code. The caveat is returncode is not correctly updated in this case, so we simply
            # catch the exception and set this test as FAILED
            result = "FAILED"
            stdout = e.stdout
            stderr = e.stderr
        except Exception as e:
            logging.error(f"Unexpected exception {e}. Please report this issue on the repository.")
            result = "FAILED"
            stdout = None
            stderr = None
        else:
            result = "COMPLETED"
            stdout = completed_process.stdout
            stderr = completed_process.stderr

        after = time.time()
        time_elapsed = after - before

        # Decode stdout and stderr
        stdout = stdout.decode("utf-8") if stdout is not None else ""
        stderr = stderr.decode("utf-8") if stderr is not None else ""

        if result == "COMPLETED":
            # Check for success message in the output
            success_pattern = r"Ran \d+ tests? in [\d.]+s\s+OK"
            if re.search(success_pattern, stdout) or re.search(success_pattern, stderr):
                result = "PASSED"
            else:
                result = "FAILED"

        # Write to log file
        logging.info(stdout)
        logging.info(stderr)
        logging.info(f"[INFO] Time elapsed: {time_elapsed:.2f} s")
        logging.info(f"[INFO] Result '{test_path}': {result}")
        # Collect results
        results[test_path]["time_elapsed"] = time_elapsed
        results[test_path]["result"] = result

    # Calculate the number and percentage of passing tests
    num_tests = len(all_test_paths)
    num_passing = len([test_path for test_path in test_paths if results[test_path]["result"] == "PASSED"])
    num_failing = len([test_path for test_path in test_paths if results[test_path]["result"] == "FAILED"])
    num_timing_out = len([test_path for test_path in test_paths if results[test_path]["result"] == "TIMEDOUT"])
    num_skipped = len(skipped_test_paths)

    if num_tests == 0:
        passing_percentage = 100
    else:
        passing_percentage = (num_passing + num_skipped) / num_tests * 100

    # Print summaries of test results
    summary_str = "\n\n"
    summary_str += "===================\n"
    summary_str += "Test Result Summary\n"
    summary_str += "===================\n"

    summary_str += f"Total: {num_tests}\n"
    summary_str += f"Passing: {num_passing}\n"
    summary_str += f"Failing: {num_failing}\n"
    summary_str += f"Skipped: {num_skipped}\n"
    summary_str += f"Timing Out: {num_timing_out}\n"

    summary_str += f"Passing Percentage: {passing_percentage:.2f}%\n"

    # Print time elapsed in hours, minutes, seconds
    total_time = sum([results[test_path]["time_elapsed"] for test_path in test_paths])

    summary_str += f"Total Time Elapsed: {total_time // 3600}h"
    summary_str += f"{total_time // 60 % 60}m"
    summary_str += f"{total_time % 60:.2f}s"

    summary_str += "\n\n=======================\n"
    summary_str += "Per Test Result Summary\n"
    summary_str += "=======================\n"

    # Construct table of results per test
    per_test_result_table = PrettyTable(field_names=["Test Path", "Result", "Time (s)"])
    per_test_result_table.align["Test Path"] = "l"
    per_test_result_table.align["Time (s)"] = "r"
    for test_path in test_paths:
        per_test_result_table.add_row(
            [test_path, results[test_path]["result"], f"{results[test_path]['time_elapsed']:0.2f}"]
        )

    for test_path in skipped_test_paths:
        per_test_result_table.add_row([test_path, "SKIPPED", "N/A"])

    summary_str += per_test_result_table.get_string()

    # Print summary to console and log file
    logging.info(summary_str)

    # Only count failing and timing out tests towards failure
    return num_failing + num_timing_out == 0


def extract_tests_and_timeouts(
    test_dir: str,
    extension: str | None = None,
    tests_to_skip: list[str] = [],
    timeout: float = DEFAULT_TIMEOUT,
    per_test_timeouts: dict[str, float] = {},
) -> tuple[list[str], list[str], list[str], dict[str, float]]:
    """Extract all tests under the given directory or extension and their respective timeouts.

    Args:
        test_dir: Path to the directory containing the tests.
        extension: Run tests only for the given extension. Defaults to None, which means all extensions'
            tests will be run.
        tests_to_skip: List of tests to skip.
        timeout: Timeout for each test in seconds. Defaults to DEFAULT_TIMEOUT.
        per_test_timeouts: A dictionary of tests and their timeouts in seconds. Any tests not listed here will use the
            timeout specified by `timeout`. Defaults to an empty dictionary.

    Returns:
        A tuple containing the paths of all tests, tests to run, tests to skip, and their respective timeouts.

    Raises:
        ValueError: If any test to skip is not found under the given `test_dir`.
    """

    # Discover all tests under current directory
    all_test_paths = [str(path) for path in Path(test_dir).resolve().rglob("*test_*.py")]
    skipped_test_paths = []
    test_paths = []
    # Check that all tests to skip are actually in the tests
    for test_to_skip in tests_to_skip:
        for test_path in all_test_paths:
            if test_to_skip in test_path:
                break
        else:
            raise ValueError(f"Test to skip '{test_to_skip}' not found in tests.")

    # Filter tests by extension
    if extension is not None:
        all_tests_in_selected_extension = []

        for test_path in all_test_paths:
            # Extract extension name from test path
            extension_name = test_path[test_path.find("extensions") :].split("/")[1]

            # Skip tests that are not in the selected extension
            if extension_name != extension:
                continue

            all_tests_in_selected_extension.append(test_path)

        all_test_paths = all_tests_in_selected_extension

    # Remove tests to skip from the list of tests to run
    if len(tests_to_skip) != 0:
        for test_path in all_test_paths:
            if any([test_to_skip in test_path for test_to_skip in tests_to_skip]):
                skipped_test_paths.append(test_path)
            else:
                test_paths.append(test_path)
    else:
        test_paths = all_test_paths

    # Sort test paths so they're always in the same order
    all_test_paths.sort()
    test_paths.sort()
    skipped_test_paths.sort()

    # Initialize all tests to have the same timeout
    test_timeouts = {test_path: timeout for test_path in all_test_paths}

    # Overwrite timeouts for specific tests
    for test_path_with_timeout, test_timeout in per_test_timeouts.items():
        for test_path in all_test_paths:
            if test_path_with_timeout in test_path:
                test_timeouts[test_path] = test_timeout

    return all_test_paths, test_paths, skipped_test_paths, test_timeouts


def warm_start_app():
    """Warm start the app to compile shaders before running the tests."""

    print("[INFO] Warm starting the simulation app before running tests.")
    before = time.time()
    # headless experience
    warm_start_output = subprocess.run(
        [
            sys.executable,
            "-c",
            "from isaaclab.app import AppLauncher; app_launcher = AppLauncher(headless=True); app_launcher.app.close()",
        ],
        capture_output=True,
    )
    if len(warm_start_output.stderr) > 0:
        if "omni::fabric::IStageReaderWriter" not in str(warm_start_output.stderr) and "scaling_governor" not in str(
            warm_start_output.stderr
        ):
            logging.error(f"Error warm starting the app: {str(warm_start_output.stderr)}")
            exit(1)

    # headless experience with rendering
    warm_start_rendering_output = subprocess.run(
        [
            sys.executable,
            "-c",
            (
                "from isaaclab.app import AppLauncher; app_launcher = AppLauncher(headless=True,"
                " enable_cameras=True); app_launcher.app.close()"
            ),
        ],
        capture_output=True,
    )
    if len(warm_start_rendering_output.stderr) > 0:
        if "omni::fabric::IStageReaderWriter" not in str(
            warm_start_rendering_output.stderr
        ) and "scaling_governor" not in str(warm_start_output.stderr):
            logging.error(f"Error warm starting the app with rendering: {str(warm_start_rendering_output.stderr)}")
            exit(1)

    after = time.time()
    time_elapsed = after - before
    print(f"[INFO] Warm start completed successfully in {time_elapsed:.2f} s")


if __name__ == "__main__":
    # parse command line arguments
    args = parse_args()

    # warm start the app
    warm_start_app()

    # add tests to skip to the list of tests to skip
    tests_to_skip = TESTS_TO_SKIP
    tests_to_skip += args.skip_tests

    # run all tests
    test_success = test_all(
        test_dir=args.test_dir,
        tests_to_skip=tests_to_skip,
        log_path=args.log_path,
        timeout=args.timeout,
        per_test_timeouts=PER_TEST_TIMEOUTS,
        discover_only=args.discover_only,
        quiet=args.quiet,
        extension=args.extension,
    )
    # update exit status based on all tests passing or not
    if not test_success:
        exit(1)