| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import inspect |
| import os |
| import unittest |
|
|
| import torch |
|
|
| import accelerate |
| from accelerate import Accelerator |
| from accelerate.test_utils import execute_subprocess_async, require_multi_gpu |
| from accelerate.utils import get_launch_prefix, patch_environment |
|
|
|
|
| class MultiGPUTester(unittest.TestCase): |
| def setUp(self): |
| mod_file = inspect.getfile(accelerate.test_utils) |
| self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"]) |
| self.data_loop_file_path = os.path.sep.join( |
| mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"] |
| ) |
|
|
| @require_multi_gpu |
| def test_multi_gpu(self): |
| print(f"Found {torch.cuda.device_count()} devices.") |
| cmd = get_launch_prefix() + [self.test_file_path] |
| with patch_environment(omp_num_threads=1): |
| execute_subprocess_async(cmd, env=os.environ.copy()) |
|
|
| @require_multi_gpu |
| def test_pad_across_processes(self): |
| cmd = get_launch_prefix() + [inspect.getfile(self.__class__)] |
| with patch_environment(omp_num_threads=1): |
| execute_subprocess_async(cmd, env=os.environ.copy()) |
|
|
| @require_multi_gpu |
| def test_distributed_data_loop(self): |
| """ |
| This TestCase checks the behaviour that occurs during distributed training or evaluation, |
| when the batch size does not evenly divide the dataset size. |
| """ |
| print(f"Found {torch.cuda.device_count()} devices, using 2 devices only") |
| cmd = get_launch_prefix() + [f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path] |
| with patch_environment(omp_num_threads=1, cuda_visible_devices="0,1"): |
| execute_subprocess_async(cmd, env=os.environ.copy()) |
|
|
|
|
| if __name__ == "__main__": |
| accelerator = Accelerator() |
| shape = (accelerator.state.process_index + 2, 10) |
| tensor = torch.randint(0, 10, shape).to(accelerator.device) |
|
|
| error_msg = "" |
|
|
| tensor1 = accelerator.pad_across_processes(tensor) |
| if tensor1.shape[0] != accelerator.state.num_processes + 1: |
| error_msg += f"Found shape {tensor1.shape} but should have {accelerator.state.num_processes + 1} at dim 0." |
| if not torch.equal(tensor1[: accelerator.state.process_index + 2], tensor): |
| error_msg += "Tensors have different values." |
| if not torch.all(tensor1[accelerator.state.process_index + 2 :] == 0): |
| error_msg += "Padding was not done with the right value (0)." |
|
|
| tensor2 = accelerator.pad_across_processes(tensor, pad_first=True) |
| if tensor2.shape[0] != accelerator.state.num_processes + 1: |
| error_msg += f"Found shape {tensor2.shape} but should have {accelerator.state.num_processes + 1} at dim 0." |
| index = accelerator.state.num_processes - accelerator.state.process_index - 1 |
| if not torch.equal(tensor2[index:], tensor): |
| error_msg += "Tensors have different values." |
| if not torch.all(tensor2[:index] == 0): |
| error_msg += "Padding was not done with the right value (0)." |
|
|
| |
| if len(error_msg) > 0: |
| raise ValueError(error_msg) |
|
|