| | def str_to_bool(value) -> int: |
| | """ |
| | Converts a string representation of truth to `True` (1) or `False` (0). |
| | True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`; |
| | """ |
| | value = value.lower() |
| | if value in ("y", "yes", "t", "true", "on", "1"): |
| | return 1 |
| | elif value in ("n", "no", "f", "false", "off", "0"): |
| | return 0 |
| | else: |
| | raise ValueError(f"invalid truth value {value}") |
| | def get_int_from_env(env_keys, default): |
| | """Returns the first positive env value found in the `env_keys` list or the default.""" |
| | for e in env_keys: |
| | val = int(os.environ.get(e, -1)) |
| | if val >= 0: |
| | return val |
| | return default |
| | def parse_flag_from_env(key, default=False): |
| | """Returns truthy value for `key` from the env if available else the default.""" |
| | value = os.environ.get(key, str(default)) |
| | return str_to_bool(value) == 1 |
| | def parse_choice_from_env(key, default="no"): |
| | value = os.environ.get(key, str(default)) |
| | return value |
| | def are_libraries_initialized(*library_names: str) -> Dict[str, bool]: |
| | """ |
| | Checks if any of `library_names` are imported in the environment. Will return results as a `key:bool` pair. |
| | """ |
| | return [lib_name for lib_name in library_names if lib_name in sys.modules.keys()] |
| | def get_gpu_info(): |
| | """ |
| | Gets GPU count and names using `nvidia-smi` instead of torch to not initialize CUDA. |
| | Largely based on the `gputil` library. |
| | """ |
| | if platform.system() == "Windows": |
| | |
| | |
| | command = spawn.find_executable("nvidia-smi") |
| | if command is None: |
| | command = "%s\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe" % os.environ["systemdrive"] |
| | else: |
| | command = "nvidia-smi" |
| | |
| | output = subprocess.check_output( |
| | [command, "--query-gpu=count,name", "--format=csv,noheader"], universal_newlines=True |
| | ) |
| | output = output.strip() |
| | gpus = output.split(os.linesep) |
| | |
| | gpu_count = len(gpus) |
| | gpu_names = [gpu.split(",")[1].strip() for gpu in gpus] |
| | return gpu_names, gpu_count |
| | def check_cuda_p2p_ib_support(): |
| | """ |
| | Checks if the devices being used have issues with P2P and IB communications, namely any consumer GPU hardware after |
| | the 3090. |
| | Noteably uses `nvidia-smi` instead of torch to not initialize CUDA. |
| | """ |
| | try: |
| | device_names, device_count = get_gpu_info() |
| | unsupported_devices = {"RTX 3090", "RTX 40"} |
| | if device_count > 1: |
| | if any( |
| | unsupported_device in device_name |
| | for device_name in device_names |
| | for unsupported_device in unsupported_devices |
| | ): |
| | return False |
| | except Exception: |
| | pass |
| | return True |
| | def check_fp8_capability(): |
| | """ |
| | Checks if all the current GPUs available support FP8. |
| | Notably must initialize `torch.cuda` to check. |
| | """ |
| | cuda_device_capacity = torch.cuda.get_device_capability() |
| | return cuda_device_capacity >= (8, 9) |
| |
|