diff --git a/.gitattributes b/.gitattributes index f50ba686ad3738ed5494cf873b60aa0eed1be8ed..52a9b02565a9461d093f14ec2bb688c4e56bb5d0 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1534,3 +1534,4 @@ janus/lib/libgcc_s.so.1 filter=lfs diff=lfs merge=lfs -text infer_4_37_2/lib/python3.10/site-packages/decord/libdecord.so filter=lfs diff=lfs merge=lfs -text infer_4_47_1/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text infer_4_47_1/lib/python3.10/site-packages/msgpack/_cmsgpack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +infer_4_47_1/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text diff --git a/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5067f71c8faf166bc78e88f9b62e8627dda7c7 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/checkpointing.py b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/checkpointing.py new file mode 100644 index 0000000000000000000000000000000000000000..02e0b197e927ac31f098755cc12e4a06a391fcd6 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/checkpointing.py @@ -0,0 +1,1162 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Use to partition the activations stored for backward propagation +Therefore reduces the memory consumption +Also implements CPU checkpointing and contiguous memory checkpointing +Reduces memory consumption and memory fragmentation + +Code for rng checkpointing taken from NVIDIA Megatron-LM mpu/random.py +b886b7bb972afe72bac0f5de4f42a4a7bae8ebef +""" + +# Parts of the code here are adapted from PyTorch +# repo: https://github.com/pytorch/pytorch +import copy +import torch +import contextlib +from deepspeed import comm as dist +import weakref + +import mmap +from torch import _C + +from deepspeed.runtime.config import DeepSpeedConfig +from deepspeed.utils import logger +from deepspeed.runtime.utils import copy_to_device, move_to_device, see_memory_usage, bwc_tensor_model_parallel_rank +from deepspeed.utils.timer import SynchronizedWallClockTimer as Timers, FORWARD_GLOBAL_TIMER +from deepspeed.accelerator import get_accelerator + +# DeepSpeed Checkpointing Enabled or Disabled +deepspeed_checkpointing_enabled = False + +# MP parameters +mpu = None +mp_rank = None +mp_size = None +mp_group = None + +# Model Parameters +num_layers = None + +# Checkpointing buffers +contiguous_data_buffers = [] +data_offsets = [] + +contiguous_size_buffers = [] +size_offsets = [] + +timers = None + +# optimization flags +PARTITION_ACTIVATIONS = False +CPU_CHECKPOINT = False +CONTIGUOUS_CHECKPOINTING = False +SYNCHRONIZE = False +PROFILE_TIME = False + +# Default name for the model parallel rng tracker. +_MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng' +transport_stream = None +cuda_device = None + + +def detach_variable(inputs, device=None): + if isinstance(inputs, tuple): + out = [] + for inp in inputs: + if not isinstance(inp, torch.Tensor): + out.append(inp) + continue + + requires_grad = inp.requires_grad + + if device is not None: + x = inp.to(device=device) + else: + x = inp + + x = x.detach() + x.requires_grad = requires_grad + out.append(x) + return tuple(out) + else: + raise RuntimeError("Only tuple of tensors is supported. Got Unsupported input type: ", type(inputs).__name__) + + +def _set_cuda_rng_state(new_state, device=-1): + """Sets the random number generator state of the current GPU. + + Arguments: + new_state (torch.ByteTensor): The desired state + This function is adapted from PyTorch repo (torch.cuda.set_rng_state) #ignore-cuda + with a single change: the input state is not cloned. Cloning caused + major performance issues for +4 GPU cases. + """ + if hasattr(_C, '_cuda_setRNGState') and callable(_C._cuda_setRNGState): + # older PyTorch + def cb(): + with get_accelerator().device(device): + _C._cuda_setRNGState(new_state) + else: + # newer PyTorch + if device == -1: + device = torch.device(get_accelerator().device_name()) + elif isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device(get_accelerator().device_name(), device) + + def cb(): + idx = device.index + if idx is None: + idx = get_accelerator().current_device() + default_generator = get_accelerator().default_generator(idx) + default_generator.set_state(new_state) + + get_accelerator().lazy_call(cb) + + +class CudaRNGStatesTracker: + """Tracker for the cuda RNG states. + + Using the `add` method, a cuda rng state is initialized based on + the input `seed` and is assigned to `name`. Later, by forking the + rng state, we can perform operations and return to our starting + cuda state. + """ + + def __init__(self): + # Map from a string name to the cuda rng state. + self.states_ = {} + # Seeds are just for book keeping and ensure no seed is set twice. + self.seeds_ = set() + + def reset(self): + """Set to the initial state (no tracker).""" + self.states_ = {} + self.seeds_ = set() + + def get_states(self): + """Get rng states. Copy the dictionary so we have direct + pointers to the states, not just a pointer to the dictionary.""" + return copy.copy(self.states_) + + def set_states(self, states): + """Set the rng states. For efficiency purposes, we do not check + the size of seed for compatibility.""" + self.states_ = states + + def add(self, name, seed): + """Track the rng state.""" + # Check seed is not already used. + if seed in self.seeds_: + raise Exception('seed {} already exists'.format(seed)) + self.seeds_.add(seed) + # Check that state is not already defined. + if name in self.states_: + raise Exception('cuda rng state {} already exists'.format(name)) + # Get the current rng state. + orig_rng_state = get_accelerator().get_rng_state() + # Set the new state and store it. + get_accelerator().manual_seed(seed) + self.states_[name] = get_accelerator().get_rng_state() + # Reset rng state to what it was. + _set_cuda_rng_state(orig_rng_state) + + @contextlib.contextmanager + def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME): + """Fork the cuda rng state, perform operations, and exit with + the original state.""" + # Check if we have added the state + if name not in self.states_: + raise Exception('cuda rng state {} is not added'.format(name)) + # Store current rng state. + orig_cuda_rng_state = get_accelerator().get_rng_state() + # Set rng state to the desired one + _set_cuda_rng_state(self.states_[name]) + # Do the stuff we wanted to do. + try: + yield + finally: + # Update the current rng state for later use. + self.states_[name] = get_accelerator().get_rng_state() + # And set the state to the original state we started with. + _set_cuda_rng_state(orig_cuda_rng_state) + + +# RNG tracker object. +_CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker() + + +def get_cuda_rng_tracker(): + """Get cuda rng tracker.""" + return _CUDA_RNG_STATE_TRACKER + + +def model_parallel_cuda_manual_seed(seed): + """Initialize model parallel cuda seed. + + This function should be called after the model parallel is + initialized. Also, no get_accelerator().manual_seed should be called + after this function. Basically, this is replacement for that + function. + Two set of RNG states are tracked: + default state: This is for data parallelism and is the same among a + set of model parallel GPUs but different across + different model parallel groups. This is used for + example for dropout in the non-model-parallel regions. + model-parallel state: This state is different among a set of model + parallel GPUs, but the same across data parallel + groups. This is used for example for dropout in + model parallel regions. + """ + global mpu + + tp_rank = bwc_tensor_model_parallel_rank(mpu) + + # 2718 is just for fun and any POSITIVE value will work. + offset = seed + 2718 + model_parallel_seed = offset + tp_rank + # Data parallel gets the original seed. + data_parallel_seed = seed + + if dist.get_rank() == 0: + logger.info( + '> initializing model parallel cuda seeds on global rank {}, ' + 'model parallel rank {}, and data parallel rank {} with ' + 'model parallel seed: {} and data parallel seed: {}'.format(dist.get_rank(), tp_rank, + mpu.get_data_parallel_rank(), + model_parallel_seed, data_parallel_seed), ) + _CUDA_RNG_STATE_TRACKER.reset() + # Set the default state. + get_accelerator().manual_seed(data_parallel_seed) + # and model parallel state. + _CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME, model_parallel_seed) + + +def model_parallel_reconfigure_tp_seed(seed): + global mpu + tp_rank = bwc_tensor_model_parallel_rank(mpu) + model_parallel_seed = seed + 2718 + tp_rank + with _CUDA_RNG_STATE_TRACKER.fork(): + get_accelerator().manual_seed(model_parallel_seed) + + +def get_partition_start(item): + global mp_rank, mp_size, mp_group + size = item.numel() + partition_size = size / mp_size + start = partition_size * mp_rank + return int(start) + + +def get_partition_size(item): + global mp_rank, mp_size, mp_group + size = item.numel() + assert size % mp_size == 0, "Doesn't handle if partition activation if item is not divisible by mp size" + partition_size = size / mp_size + return int(partition_size) + + +def gather_partitioned_activations(tensors, device=None): + global mp_rank, mp_size, mp_group + assert len(tensors) % 2 == 0, f'Expected even count of tensors, instead got {len(tensors)}' + inputs = [] + num_args = int(len(tensors) / 2) + for i in range(num_args): + + item = tensors[2 * i] + size = tensors[2 * i + 1] + + if not is_activation_to_checkpoint(item): + inputs.append(item) + continue + + # don't need to do all_gather if model parallel is not enabled + if mp_group is None or mp_size == 1: + item = item.view(list(size.numpy())) + if device is not None: + item = item.to(device) + inputs.append(item) + continue + + partition_size = item.numel() + tensor_size = partition_size * mp_size + if device is not None: + flat_tensor = torch.zeros([tensor_size], dtype=item.dtype, device=device) + else: + flat_tensor = torch.zeros([tensor_size], dtype=item.dtype, device=item.device) + part = flat_tensor.narrow(0, partition_size * mp_rank, partition_size) + part.copy_(item) + dist.all_gather_into_tensor(flat_tensor, part, group=mp_group) + input_tensor = flat_tensor.view(list(size.numpy())) + item.data = input_tensor.data + + inputs.append(item) + + return tuple(inputs) + + +def extract_tensors(all_objects): + """ + Separate objects in list/tuple into tensors and non-tensors and create a mapping to enable re-aggregation. + The order of tensors and non-tensors is preserved in their respective output groups. + + Parameters: + all_objects (list/tuple): Objects containing tensors and non-tensors to be split. + + Returns: + tuple: Containing tensors, non-tensors, and bools of whether each position in original list/tuple was a tensor. + + """ + tensor_objects = [v for v in all_objects if torch.is_tensor(v)] + non_tensor_objects = [v for v in all_objects if not torch.is_tensor(v)] + tensor_flags = [torch.is_tensor(v) for v in all_objects] + if type(all_objects) is tuple: + return tuple(tensor_objects), tuple(non_tensor_objects), tuple(tensor_flags) + return tensor_objects, non_tensor_objects, tensor_flags + + +def merge_tensors(tensor_objects, non_tensor_objects, tensor_flags): + """ + Merge two lists (or tuples) of tensors and non-tensors using a mapping of positions in merged list (or tuple). + + Parameters: + tensor_objects (list/tuple): Tensors to merge. + non_tensor_objects (list/tuple): Non-tensors to merge. + tensor_flags (list/tuple): Indicates whether each position in output is a tensor. + + Returns: + tuple: Merge of tensors and non-tensors + """ + merged_objects = [] + tensor_idx = 0 + non_tensor_idx = 0 + + real_tensor_flags = None + + # remove the flags that are assigned to the size of the flattened tensors + if PARTITION_ACTIVATIONS: + real_tensor_flags = [] + previous_flag = False + for flag in tensor_flags: + if previous_flag: + previous_flag = False + continue + previous_flag = flag + real_tensor_flags.append(flag) + else: + real_tensor_flags = tensor_flags + + for is_tensor in real_tensor_flags: + if is_tensor: + merged_objects.append(tensor_objects[tensor_idx]) + tensor_idx += 1 + else: + merged_objects.append(non_tensor_objects[non_tensor_idx]) + non_tensor_idx += 1 + + return tuple(merged_objects) + + +def is_activation_to_checkpoint(item): + """ + Is an activation to be checkpointed + """ + global mp_size + return torch.is_tensor(item) and item.is_floating_point() and item.numel() >= mp_size + + +def partition_activations(args, cpu_checkpoint, contiguous_checkpoint): + global contiguous_data_buffers, data_offsets + + inputs = [] + num_non_fp_tensors = 0 + + for arg_index, item in enumerate(args): + if not is_activation_to_checkpoint(item): + inputs.append(item) + num_non_fp_tensors += 1 + continue + + i = arg_index - num_non_fp_tensors + partition_size = get_partition_size(item) + partition = item.detach().contiguous().view(-1).narrow(0, get_partition_start(item), partition_size).clone() + + buffer_device = torch.device('cpu') if cpu_checkpoint else partition.device + + if contiguous_checkpoint: + if i >= len(contiguous_data_buffers): + tensor_list = [ + torch.tensor(()).new_empty([partition_size], dtype=partition.dtype, device=buffer_device) + for _ in range(num_layers) + ] + contiguous_data_buffers.append(tensor_list) + data_offsets.append(0) + elif contiguous_data_buffers[i] is None: + tensor_list = [ + torch.tensor(()).new_empty([partition_size], dtype=partition.dtype, device=buffer_device) + for _ in range(num_layers) + ] + contiguous_data_buffers[i] = tensor_list + data_offsets[i] = 0 + + # Because the 'new_empty' returns uninitialized pages, + # the pages need to be populated during the cudaMemcpy time + # which increases the data copy time. To avoid this, we + # pre-populate these pages by simply writing 0 ahead of + # the actual cudaMemcpy operation time. Due to the + # previously launched GPU kernels, there is a small + # window of time here for CPUs to populate pages asynchronously. + contiguous_data_buffers[i][data_offsets[i]].data[range( + 0, contiguous_data_buffers[i][data_offsets[i]].data.shape[0], + int(mmap.PAGESIZE / contiguous_data_buffers[i][data_offsets[i]].data.element_size()))] = 0 + + contiguous_partition = contiguous_data_buffers[i][data_offsets[i]].data.copy_(partition.data) + data_offsets[i] = data_offsets[i] + 1 + inputs.append(contiguous_partition) + else: + partition = partition.cpu() if CPU_CHECKPOINT else partition + inputs.append(partition) + + return inputs + + +def get_partitioned_activations_for_backward(args, inputs, contiguous_checkpoint): + global contiguous_size_buffers, size_offsets + + new_args = [] + num_non_fp_tensors = 0 + + for arg_index, (arg, inp) in enumerate(zip(args, inputs)): + size = torch.tensor(arg.size()) if torch.is_tensor(arg) else None + if not is_activation_to_checkpoint(arg): + new_args.append(arg) + new_args.append(size) + num_non_fp_tensors += 1 + continue + + arg.data = torch.empty([], device=arg.device).data + arg.saved_data = inp.data + + new_args.append(arg) + i = arg_index - num_non_fp_tensors + + if contiguous_checkpoint: + numel = size.numel() + if i >= len(contiguous_size_buffers): + tmp = torch.tensor(()) + contiguous_size_buffers.append( + tmp.new_empty([numel * num_layers], dtype=size.dtype, device=size.device)) + size_offsets.append(0) + elif contiguous_size_buffers[i] is None: + tmp = torch.tensor(()) + contiguous_size_buffers[i] = tmp.new_empty([numel * num_layers], dtype=size.dtype, device=size.device) + size_offsets[i] = 0 + + contiguous_size = contiguous_size_buffers[i].narrow(0, size_offsets[i], numel).data.copy_(size.data) + contiguous_size = contiguous_size.view_as(size) + size_offsets[i] = size_offsets[i] + numel + new_args.append(contiguous_size) + else: + new_args.append(size) + + return new_args + + +def get_cpu_activations_for_backward(args, inputs): + new_args = [] + for i, (arg, inp) in enumerate(zip(args, inputs)): + if not is_activation_to_checkpoint(arg): + new_args.append(arg) + continue + + arg.data = torch.empty([], device=arg.device).data + arg.saved_data = inp.data + new_args.append(arg) + + return new_args + + +class CheckpointFunction(torch.autograd.Function): + """This function is adapted from torch.utils.checkpoint with + two main changes: + 1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state` #ignore-cuda + 2) the states in the model parallel tracker are also properly + tracked/set/reset. + 3) Performance activation partitioning, contiguous memory optimization + 4) CPU Checkpointing + 5) Profile forward and backward functions + """ + + @staticmethod + def forward(ctx, run_function, all_outputs, *args): + global mpu, timers, SYNCHRONIZE, PROFILE_TIME + + def save_args_for_backward(*all_args): + tensor_args, non_tensor_args, tensor_flags = extract_tensors(all_objects=all_args) + ctx.deepspeed_saved_tensors = tensor_args + ctx.non_tensor_args = non_tensor_args + ctx.tensor_flags = tensor_flags + + if SYNCHRONIZE: + get_accelerator().synchronize() + + if timers is None and PROFILE_TIME: + timers = Timers() + + if PROFILE_TIME: + timers(FORWARD_GLOBAL_TIMER).start() + + ctx.run_function = run_function + global num_layers + global mp_rank, mp_size, mp_group + global contiguous_data_buffers, contiguous_size_buffers + global data_offsets, size_offsets + if mp_rank is None: + if mpu is not None: + if hasattr(mpu, 'get_tensor_model_parallel_rank'): + mp_rank = mpu.get_tensor_model_parallel_rank() + mp_size = mpu.get_tensor_model_parallel_world_size() + mp_group = mpu.get_tensor_model_parallel_group() + else: + mp_rank = mpu.get_model_parallel_rank() + mp_size = mpu.get_model_parallel_world_size() + mp_group = mpu.get_model_parallel_group() + else: + mp_rank = 0 + mp_size = 1 + mp_group = None + + global cuda_device, transport_stream, PARTITION_ACTIVATIONS, buffer_0, buffer_1, buffer_0_offset, buffer_1_offset + + if cuda_device is None: + see_memory_usage("First Forward Beginning", force=False) + if dist.get_rank() == 0: + logger.info(f"Activation Checkpointing Information") + logger.info(f"----Partition Activations {PARTITION_ACTIVATIONS}, CPU CHECKPOINTING {CPU_CHECKPOINT}") + logger.info( + f"----contiguous Memory Checkpointing {CONTIGUOUS_CHECKPOINTING} with {num_layers} total layers") + logger.info(f"----Synchronization {SYNCHRONIZE}") + logger.info(f"----Profiling time in checkpointing {PROFILE_TIME}") + + cuda_device = get_accelerator().current_device_name() + transport_stream = get_accelerator().Stream(device=cuda_device) + + if PARTITION_ACTIVATIONS: + inputs = partition_activations(args, CPU_CHECKPOINT, CONTIGUOUS_CHECKPOINTING) + elif CPU_CHECKPOINT: + inputs = copy_to_device(args, device=torch.device('cpu'), criterion_func=is_activation_to_checkpoint) + + # just in case something funky is happening such as reuse of inputs + inputs_cuda = copy_to_device(args, device=cuda_device, criterion_func=is_activation_to_checkpoint) + + # Copy the rng states. + ctx.fwd_cpu_rng_state = torch.get_rng_state() + ctx.fwd_cuda_rng_state = get_accelerator().get_rng_state() + ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states() + + see_memory_usage("Before running forward on the layer", force=False) + # ctx.save_for_backward(*args) + with torch.no_grad(): + outputs = run_function(*inputs_cuda) + + see_memory_usage("After running forward on the layer", force=False) + del inputs_cuda + + if PARTITION_ACTIVATIONS: + new_args = get_partitioned_activations_for_backward(args, inputs, CONTIGUOUS_CHECKPOINTING) + assert len(new_args) % 2 == 0, f'save_for_backward called with odd number of args, {len(new_args)}' + save_args_for_backward(*new_args) + elif CPU_CHECKPOINT: + new_args = get_cpu_activations_for_backward(args, inputs) + save_args_for_backward(*new_args) + else: + save_args_for_backward(*args) + + if PROFILE_TIME: + timers(FORWARD_GLOBAL_TIMER).stop() + timers.log([FORWARD_GLOBAL_TIMER]) + if SYNCHRONIZE: + get_accelerator().synchronize() + + # Tensors returned from forward() may not be differentiable. + if torch.is_tensor(outputs): + non_grad_outputs = [outputs] if not outputs.is_floating_point() else [] + else: + non_grad_outputs = [o for o in outputs if torch.is_tensor(o) and not o.is_floating_point()] + ctx.mark_non_differentiable(*non_grad_outputs) + + if torch.is_tensor(outputs): + all_outputs += [outputs] + return outputs + else: + all_outputs += outputs + outputs, _, _ = extract_tensors(all_objects=outputs) + return tuple(outputs) + + @staticmethod + def backward(ctx, *grads): + global timers + see_memory_usage("In backward", force=False) + # removing pointers to the contiguous buffer memory + # so that they can be garbage collected once the checkpoints + # have been used + if SYNCHRONIZE: + get_accelerator().synchronize() + if PROFILE_TIME: + timers('backward').start() + + if CONTIGUOUS_CHECKPOINTING: + global data_offsets, size_offsets + global contiguous_data_buffers, contiguous_size_buffers + + for buffers in contiguous_data_buffers: + buffers = [] + + # frees up all the pointers to the checkpoints except for the ones + # stored by save for backward + contiguous_data_buffers = [] + contiguous_size_buffers = [] + data_offsets = [] + size_offsets = [] + + see_memory_usage("In backward checkpointing code", force=False) + if not torch.autograd._is_checkpoint_valid(): + raise RuntimeError("Checkpointing is not compatible with .grad(), " + "please use .backward() if possible") + + global cuda_device, transport_stream, PARTITION_ACTIVATIONS + + # Rebuild deepspeed_saved_tensors + for t in ctx.deepspeed_saved_tensors: + if t is not None and hasattr(t, 'saved_data') and t.saved_data is not None: + t.data = t.saved_data.to(t.device) + t.saved_data = None + + if PARTITION_ACTIVATIONS: + # with get_accelerator().stream(transport_stream): + inputs = gather_partitioned_activations(ctx.deepspeed_saved_tensors, + device=cuda_device if CPU_CHECKPOINT else None) + detached_inputs = detach_variable(inputs) + elif CPU_CHECKPOINT: + inputs = move_to_device(ctx.deepspeed_saved_tensors, cuda_device, is_activation_to_checkpoint) + detached_inputs = detach_variable(inputs) + else: + inputs = ctx.deepspeed_saved_tensors + detached_inputs = detach_variable(inputs) + + # Add non tensor input args + detached_inputs = merge_tensors(tensor_objects=detached_inputs, + non_tensor_objects=ctx.non_tensor_args, + tensor_flags=ctx.tensor_flags) + + # Store the current states. + bwd_cpu_rng_state = torch.get_rng_state() + bwd_cuda_rng_state = get_accelerator().get_rng_state() + bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states() + + # Set the states to what it used to be before the forward pass. + torch.set_rng_state(ctx.fwd_cpu_rng_state) + _set_cuda_rng_state(ctx.fwd_cuda_rng_state) + get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker) + + # if PARTITION_ACTIVATIONS: + # current_stream=get_accelerator().current_stream() + # current_stream.wait_stream(transport_stream) + + see_memory_usage("In backward checkpointing code before forward", force=False) + + with torch.enable_grad(): + outputs = ctx.run_function(*detached_inputs) + + see_memory_usage("In backward checkpointing code after forward", force=False) + # Set the states back to what it was at the start of this function. + torch.set_rng_state(bwd_cpu_rng_state) + _set_cuda_rng_state(bwd_cuda_rng_state) + get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker) + + if isinstance(outputs, torch.Tensor): + outputs = (outputs, ) + + # Filter out non tensor outputs + outputs, _, _ = extract_tensors(all_objects=outputs) + + # Construct arguments to autograd.backward(). + # This is usually just outputs and grads, but forward() can return tensors that + # are not differentiable. + output_tensors = [] + grad_tensors = [] + for out, grad in zip(outputs, grads): + if out.requires_grad: + output_tensors.append(out) + grad_tensors.append(grad) + + see_memory_usage("In backward checkpointing code before backward", force=False) + + torch.autograd.backward(output_tensors, grad_tensors) + + # Force clear our stashed tensors to prevent a memory leak in certain scenarios + ctx.deepspeed_saved_tensors = None + ctx.non_tensor_args = None + ctx.tensor_flags = None + + see_memory_usage("After backward checkpointing code after backward", force=False) + + if PROFILE_TIME: + timers('backward').stop() + timers.log(['backward']) + if SYNCHRONIZE: + get_accelerator().synchronize() + ret_list = [None, None] # first None for ctx + for inp in detached_inputs: + if torch.is_tensor(inp): + ret_list.append(inp.grad) + else: + ret_list.append(None) + + return tuple(ret_list) + + +def non_reentrant_checkpoint(function, *args): + """This function is union of `torch.utils.checkpoint._checkpoint_without_reentrant` and `CheckpointFunction` in this module + + This function is aim to solve the back probagation error raised from all input requires no grad. + * has already been implemented in pytorch for a while, the solution is stable at most time except for jit module mode. + * can help to solve the issue which is hacked by `deepspeed.runtime.pipe.module.PipelineModule._is_checkpointable` + + Main modifications compared to the implementation of torch: + 1. adapt to the signature of `checkpoint` function in this module + 2. solve the non-deterministic by random state management consistent with deepspeed `CheckpointFunction` + 3. when there is partition or cpu checkpointing, gather them in the unpack_hook during back probagation + 4. make all after backward blocks in the hook which will executed after all leaf nodes backward execution. + 5. above 4. is inspired by `torch.autograd.graph.register_multi_grad_hook`, which is only implemented after 2.0.0 + """ + global mpu, timers, SYNCHRONIZE, PROFILE_TIME + + deepspeed_saved_tensors = None + non_tensor_args = None + tensor_flags = None + + def save_args_for_backward(*all_args): + """keep this function to reduce the modification from original implementation""" + nonlocal deepspeed_saved_tensors, non_tensor_args, tensor_flags + tensor_args, non_tensor_args, tensor_flags = extract_tensors(all_objects=all_args) + deepspeed_saved_tensors = tensor_args + non_tensor_args = non_tensor_args + tensor_flags = tensor_flags + + if SYNCHRONIZE: + get_accelerator().synchronize() + + if timers is None and PROFILE_TIME: + timers = Timers() + + if PROFILE_TIME: + timers(FORWARD_GLOBAL_TIMER).start() + + global num_layers + global mp_rank, mp_size, mp_group + global contiguous_data_buffers, contiguous_size_buffers + global data_offsets, size_offsets + if mp_rank is None: + if mpu is not None: + if hasattr(mpu, 'get_tensor_model_parallel_rank'): + mp_rank = mpu.get_tensor_model_parallel_rank() + mp_size = mpu.get_tensor_model_parallel_world_size() + mp_group = mpu.get_tensor_model_parallel_group() + else: + mp_rank = mpu.get_model_parallel_rank() + mp_size = mpu.get_model_parallel_world_size() + mp_group = mpu.get_model_parallel_group() + else: + mp_rank = 0 + mp_size = 1 + mp_group = None + + global cuda_device, transport_stream, PARTITION_ACTIVATIONS, buffer_0, buffer_1, buffer_0_offset, buffer_1_offset + + if cuda_device is None: + see_memory_usage("First Forward Beginning", force=False) + if dist.get_rank() == 0: + logger.info(f"Activation Checkpointing Information") + logger.info(f"----Partition Activations {PARTITION_ACTIVATIONS}, CPU CHECKPOINTING {CPU_CHECKPOINT}") + logger.info( + f"----contiguous Memory Checkpointing {CONTIGUOUS_CHECKPOINTING} with {num_layers} total layers") + logger.info(f"----Synchronization {SYNCHRONIZE}") + logger.info(f"----Profiling time in checkpointing {PROFILE_TIME}") + + cuda_device = get_accelerator().current_device_name() + transport_stream = get_accelerator().Stream(device=cuda_device) + + if PARTITION_ACTIVATIONS: + inputs = partition_activations(args, CPU_CHECKPOINT, CONTIGUOUS_CHECKPOINTING) + elif CPU_CHECKPOINT: + inputs = copy_to_device(args, device=torch.device('cpu'), criterion_func=is_activation_to_checkpoint) + + # just in case something funky is happening such as reuse of inputs + inputs_cuda = copy_to_device(args, device=cuda_device, criterion_func=is_activation_to_checkpoint) + + # Copy the rng states. + fwd_cpu_rng_state = torch.get_rng_state() + fwd_cuda_rng_state = get_accelerator().get_rng_state() + fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states() + + if PARTITION_ACTIVATIONS: + new_args = get_partitioned_activations_for_backward(args, inputs, CONTIGUOUS_CHECKPOINTING) + assert len(new_args) % 2 == 0, f'save_for_backward called with odd number of args, {len(new_args)}' + save_args_for_backward(*new_args) + elif CPU_CHECKPOINT: + new_args = get_cpu_activations_for_backward(args, inputs) + save_args_for_backward(*new_args) + else: + save_args_for_backward(*args) + + class Holder(): + """the place holder object used as activations to save memory""" + pass + + # weakref seems utilized to discover the tensor deletion before a whole + # forward backward pair loop finished + storage: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary() + weak_holder_list = [] + leaf_tensors = [] + backward_visited_leaf_nodes = 0 + + def checkpoint_pack(tensor_from_forward): + """used to record the activation order in the `weak_holder_list` + + the activation order in holder list is consistent between the first forward and recomputing forward. + * the jit compiled forward will break the order consistency * + """ + res = Holder() + weak_holder_list.append(weakref.ref(res)) + + # if this is a leaf tensor, save it for backward progression trace + # leaf tensor used to be input or parameters, which is not activations and + # has no memory overhead + if tensor_from_forward.requires_grad and tensor_from_forward.is_leaf: + leaf_tensors.append(tensor_from_forward) + return res + + def checkpoint_unpack(holder_from_backward): + """retrieve the activations from recompute""" + nonlocal deepspeed_saved_tensors, non_tensor_args, tensor_flags + + # if this is the first step of backward probagation, recompute the graph and save + # all the activations with the same order as `checkpoint_pack` does + if len(storage) == 0: + unpack_counter = 0 + + def replay_pack(tensor_from_replay): + """save recompute activations""" + nonlocal unpack_counter + unpack_counter += 1 + + if weak_holder_list[unpack_counter - 1]() is None: + return + + detached_activations = tensor_from_replay.detach() + storage[weak_holder_list[unpack_counter - 1]()] = detached_activations + + return + + def replay_unpack(none_value): + """recompute graph need not to backward""" + raise RuntimeError("You are calling backwards on a tensor that is never exposed.") + + global timers + see_memory_usage("In backward", force=False) + # removing pointers to the contiguous buffer memory + # so that they can be garbage collected once the checkpoints + # have been used + if SYNCHRONIZE: + get_accelerator().synchronize() + if PROFILE_TIME: + timers('backward').start() + + if CONTIGUOUS_CHECKPOINTING: + global data_offsets, size_offsets + global contiguous_data_buffers, contiguous_size_buffers + + for buffers in contiguous_data_buffers: + buffers = [] + + # frees up all the pointers to the checkpoints except for the ones + # stored by save for backward + contiguous_data_buffers = [] + contiguous_size_buffers = [] + data_offsets = [] + size_offsets = [] + + see_memory_usage("In backward checkpointing code", force=False) + if not torch.autograd._is_checkpoint_valid(): + raise RuntimeError("Checkpointing is not compatible with .grad(), " + "please use .backward() if possible") + + global cuda_device, transport_stream, PARTITION_ACTIVATIONS + + # gather inputs which is partitioned or checkpointed before first forward + if PARTITION_ACTIVATIONS: + # with get_accelerator().stream(transport_stream): + inputs = gather_partitioned_activations(deepspeed_saved_tensors, + device=cuda_device if CPU_CHECKPOINT else None) + detached_inputs = detach_variable(inputs) + elif CPU_CHECKPOINT: + inputs = move_to_device(deepspeed_saved_tensors, cuda_device, is_activation_to_checkpoint) + detached_inputs = detach_variable(inputs) + else: + inputs = deepspeed_saved_tensors + detached_inputs = detach_variable(inputs) + + # Add non tensor input args + detached_inputs = merge_tensors(tensor_objects=detached_inputs, + non_tensor_objects=non_tensor_args, + tensor_flags=tensor_flags) + + # Store the current states. + bwd_cpu_rng_state = torch.get_rng_state() + bwd_cuda_rng_state = get_accelerator().get_rng_state() + bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states() + + # Set the states to what it used to be before the forward pass. + torch.set_rng_state(fwd_cpu_rng_state) + _set_cuda_rng_state(fwd_cuda_rng_state) + get_cuda_rng_tracker().set_states(fwd_cuda_rng_state_tracker) + + see_memory_usage("In backward checkpointing code before forward", force=False) + with torch.enable_grad(), torch.autograd.graph.saved_tensors_hooks(replay_pack, replay_unpack): + _unused = function(*detached_inputs) + + see_memory_usage("In backward checkpointing code after forward", force=False) + # Set the states back to what it was at the start of this function. + torch.set_rng_state(bwd_cpu_rng_state) + _set_cuda_rng_state(bwd_cuda_rng_state) + get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker) + + deepspeed_saved_tensors = None + non_tensor_args = None + tensor_flags = None + + if holder_from_backward not in storage: + raise RuntimeError("Attempt to retrieve a tensor saved by autograd multiple times without checkpoint" + " recomputation being triggered in between, this is not currently supported.") + + return storage[holder_from_backward] + + def after_backward_hook(_nonuse_grads): + """the hook registered to all leaf tensors""" + nonlocal leaf_tensors, backward_visited_leaf_nodes + backward_visited_leaf_nodes += 1 + + if backward_visited_leaf_nodes == len(leaf_tensors): + see_memory_usage("After backward checkpointing code after backward", force=False) + + if PROFILE_TIME: + timers('backward').stop() + timers.log(['backward']) + if SYNCHRONIZE: + get_accelerator().synchronize() + + with torch.autograd.graph.saved_tensors_hooks(checkpoint_pack, checkpoint_unpack): + outputs = function(*inputs_cuda) + for leaf_tensor in leaf_tensors: + leaf_tensor.register_hook(after_backward_hook) + + see_memory_usage("After running forward on the layer", force=False) + + if PROFILE_TIME: + timers(FORWARD_GLOBAL_TIMER).stop() + timers.log([FORWARD_GLOBAL_TIMER]) + if SYNCHRONIZE: + get_accelerator().synchronize() + + all_outputs = [] + if torch.is_tensor(outputs): + all_outputs += [outputs] + else: + all_outputs += outputs + + if len(all_outputs) == 1: + return all_outputs[0] + else: + return tuple(all_outputs) + + +def checkpoint(function, *args): + """Checkpoint a model or part of the model. + This has been directly copied from torch.utils.checkpoint. """ + + all_outputs = [] + CheckpointFunction.apply(function, all_outputs, *args) + if len(all_outputs) == 1: + return all_outputs[0] + else: + return tuple(all_outputs) + + +def partition_activations_in_checkpoint(partition_activation): + global PARTITION_ACTIVATIONS + PARTITION_ACTIVATIONS = partition_activation + if dist.get_rank() == 0: + logger.info(f"**************Partition Activations {PARTITION_ACTIVATIONS}************") + + +def set_num_layers(nlayers): + global num_layers + num_layers = nlayers + + +def reset(): + """Resets memory buffers related to contiguous memory optimizations. + Should be called during eval when multiple forward propagations are + computed without any backward propagation that usually clears these + buffers. + Arguments: + None + + Return: + None + """ + if CONTIGUOUS_CHECKPOINTING: + global data_offsets, size_offsets + global contiguous_data_buffers, contiguous_size_buffers + + for buffers in contiguous_data_buffers: + buffers = [] + + # frees up all the pointers to the checkpoints except for the ones + # stored by save for backward + contiguous_data_buffers = [] + contiguous_size_buffers = [] + data_offsets = [] + size_offsets = [] + + +def _configure_using_config_file(config, mpu=None): + global num_layers, PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \ + CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME + + config = DeepSpeedConfig(config, mpu=mpu).activation_checkpointing_config + if dist.get_rank() == 0: + logger.info(config.repr()) + PARTITION_ACTIVATIONS = config.partition_activations + CONTIGUOUS_CHECKPOINTING = config.contiguous_memory_optimization + num_layers = config.number_checkpoints + CPU_CHECKPOINT = config.cpu_checkpointing + SYNCHRONIZE = config.synchronize_checkpoint_boundary + PROFILE_TIME = config.profile + + +def _configure_defaults(): + + global mpu, num_layers, deepspeed_checkpointing_enabled + + global PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \ + CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME + + PARTITION_ACTIVATIONS = False + CONTIGUOUS_CHECKPOINTING = False + num_layers = False + CPU_CHECKPOINT = False + SYNCHRONIZE = False + PROFILE_TIME = False + deepspeed_checkpointing_enabled = True + + +def configure( + mpu_, + deepspeed_config=None, + partition_activations=None, + contiguous_checkpointing=None, + num_checkpoints=None, + checkpoint_in_cpu=None, + synchronize=None, + profile=None, +): + """Configure DeepSpeed Activation Checkpointing. + + Arguments: + mpu_: Optional: An object that implements the following methods + get_model_parallel_rank/group/world_size, and get_data_parallel_rank/group/world_size + + deepspeed_config: Optional: DeepSpeed Config json file when provided will be used to + configure DeepSpeed Activation Checkpointing + + partition_activations: Optional: Partitions activation checkpoint across model parallel + GPUs when enabled. By default False. Will overwrite deepspeed_config if provided + + contiguous_checkpointing: Optional: Copies activation checkpoints to a contiguous memory + buffer. Works only with homogeneous checkpoints when partition_activations is enabled. + Must provide num_checkpoints. By default False. Will overwrite deepspeed_config if + provided + + num_checkpoints: Optional: Number of activation checkpoints stored during the forward + propagation of the model. Used to calculate the buffer size for contiguous_checkpointing + Will overwrite deepspeed_config if provided + + checkpoint_in_cpu: Optional: Moves the activation checkpoint to CPU. Only works with + partition_activation. Default is false. Will overwrite deepspeed_config if provided + + synchronize: Optional: Performs get_accelerator().synchronize() at the beginning and end of + each call to deepspeed.checkpointing.checkpoint for both forward and backward pass. + By default false. Will overwrite deepspeed_config if provided + + profile: Optional: Logs the forward and backward time for each + deepspeed.checkpointing.checkpoint invocation. Will overwrite deepspeed_config + if provided + + Returns: + None + """ + global mpu, num_layers, deepspeed_checkpointing_enabled + + global PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \ + CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME + + _configure_defaults() + + if mpu_ is not None: + mpu = mpu_ + + if deepspeed_config is not None: + _configure_using_config_file(deepspeed_config, mpu=mpu) + + if partition_activations is not None: + PARTITION_ACTIVATIONS = partition_activations + + if contiguous_checkpointing is not None: + CONTIGUOUS_CHECKPOINTING = contiguous_checkpointing + + if num_checkpoints is not None: + num_layers = num_checkpoints + + if checkpoint_in_cpu is not None: + CPU_CHECKPOINT = checkpoint_in_cpu + + if synchronize is not None: + SYNCHRONIZE = synchronize + + if profile is not None: + PROFILE_TIME = profile + + if CONTIGUOUS_CHECKPOINTING: + assert PARTITION_ACTIVATIONS, "Contiguous Checkpointing is only available with partitioned activations. Set partitioned activations to true in deepspeed config" + if CONTIGUOUS_CHECKPOINTING: + assert num_layers is not None, "Must specify the number of layers with contiguous memory checkpointing" + + +def is_configured(): + """True if deepspeed activation checkpointing has been configured + by calling deepspeed.checkpointing.configure, else returns false + + Arguments: + None + + Return: + True of configured, else False + """ + return deepspeed_checkpointing_enabled diff --git a/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/config.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ed4c37b37a0235821b4ffdc9abd66f276f97938 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/config.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/constants.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56a33a36c5555383d292f7ff456f9b1f7e173ac4 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/constants.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5067f71c8faf166bc78e88f9b62e8627dda7c7 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_analyzer.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_analyzer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67f2cd33ee31eee73dd068f625d039b5229d2fa9 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_analyzer.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_sampler.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_sampler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a66e70746da4615f1383952b494b913d16eb1fa Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_sampler.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/indexed_dataset.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/indexed_dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0d6a3b1649b099ac9b7642f5ea4950377650edb Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/indexed_dataset.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/utils.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16d68760db19271e14c3678519206182ed769e54 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/utils.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_analyzer.py b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_analyzer.py new file mode 100644 index 0000000000000000000000000000000000000000..cb0d366ce798e3b16474f2622f50c0583e2fb031 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_analyzer.py @@ -0,0 +1,417 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +from collections import defaultdict +import csv +import time +from multiprocessing import Process, Manager +import numpy as np +import torch +from torch.utils.data import BatchSampler, SequentialSampler, DataLoader, Subset + +from deepspeed.utils import logger +from .indexed_dataset import MMapIndexedDataset +from .utils import split_dataset, split_index, create_mmap_dataset_builder, close_mmap_dataset_builder, find_fit_int_dtype + + +class DataAnalyzer(object): + + def __init__(self, + dataset, + num_workers=1, + worker_id=0, + num_threads=1, + num_threads_reduce=1, + specific_threads=[], + batch_size=1, + metric_names=[], + metric_functions=[], + metric_types=[], + metric_dtypes=[], + save_path="./", + collate_fn=None, + custom_map_init=None, + custom_map_update=None, + custom_map_finalize=None, + custom_reduce=None): + super().__init__() + self.dataset = dataset + self.num_workers = num_workers + self.worker_id = worker_id + self.num_threads = num_threads + self.num_threads_reduce = num_threads_reduce + self.specific_threads = specific_threads + self.batch_size = batch_size + self.metric_names = metric_names + self.metric_functions = metric_functions + self.metric_types = metric_types + self.metric_dtypes = metric_dtypes + self.save_path = save_path + self.collate_fn = collate_fn + self.custom_map_init = custom_map_init + self.custom_map_update = custom_map_update + self.custom_map_finalize = custom_map_finalize + self.custom_reduce = custom_reduce + + def init_metric_results(self, thread_id, metric_names, metric_types, metric_dtypes, save_path, worker_id): + metric_results = [] + for m_idx in range(len(metric_names)): + metric_name, metric_type, metric_dtype = metric_names[m_idx], \ + metric_types[m_idx], metric_dtypes[m_idx] + assert metric_dtype not in [ + np.float64, np.double + ], "Currently floating point metric values are not supported. Please change your metric into integer values (and potentially multiply a larger coefficient to keep the precision)." + metric_save_path = f"{save_path}/{metric_name}/worker{worker_id}_thread{thread_id}/" + os.makedirs(metric_save_path, exist_ok=True) + if metric_type == 'single_value_per_sample': + sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric" + sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_dtype) + metric_to_sample_fname = f"{metric_save_path}/{metric_name}_metric_to_sample" + os.system(f"rm -rf {metric_to_sample_fname}*") + metric_to_sample_dict = defaultdict(list) + metric_results.append({ + "sample_to_metric_fname": sample_to_metric_fname, + "sample_to_metric_builder": sample_to_metric_builder, + "metric_to_sample_fname": metric_to_sample_fname, + "metric_to_sample_dict": metric_to_sample_dict + }) + elif metric_type == 'accumulate_value_over_samples': + metric_value = None + metric_value_fname = f"{metric_save_path}/{metric_name}_metric_value" + metric_results.append({"metric_value": metric_value, "metric_value_fname": metric_value_fname}) + return metric_results + + def update_metric_results(self, data, metric_types, metric_functions, metric_results): + for m_idx in range(len(metric_types)): + metric_type, metric_function, metric_result = metric_types[m_idx], \ + metric_functions[m_idx], metric_results[m_idx] + if metric_type == 'single_value_per_sample': + metric_values = metric_function(data) + for row in range(metric_values.size()[0]): + metric_result["sample_to_metric_builder"].add_item(metric_values[row].reshape(-1)) + metric_result["metric_to_sample_dict"][metric_values[row].item()].append( + data['index'][row][0].item()) + for m_value in metric_result["metric_to_sample_dict"]: + if len(metric_result["metric_to_sample_dict"][m_value]) > 100: + metric_fname = metric_result["metric_to_sample_fname"] + with open(f"{metric_fname}_{m_value}.csv", 'a') as f: + writer = csv.writer(f) + writer.writerows([metric_result["metric_to_sample_dict"][m_value]]) + metric_result["metric_to_sample_dict"][m_value] = [] + elif metric_type == 'accumulate_value_over_samples': + metric_values = metric_function(data) + if metric_result["metric_value"] is None: + metric_result["metric_value"] = metric_values + else: + metric_result["metric_value"].add_(metric_values) + + def finalize_metric_results(self, metric_types, metric_dtypes, metric_results): + for m_idx in range(len(metric_types)): + metric_type, metric_dtype, metric_result = metric_types[m_idx], \ + metric_dtypes[m_idx], metric_results[m_idx] + if metric_type == 'single_value_per_sample': + metric_fname = metric_result["sample_to_metric_fname"] + close_mmap_dataset_builder(metric_result["sample_to_metric_builder"], metric_fname) + for m_value in metric_result["metric_to_sample_dict"]: + if len(metric_result["metric_to_sample_dict"][m_value]) > 0: + metric_fname = metric_result["metric_to_sample_fname"] + with open(f"{metric_fname}_{m_value}.csv", 'a') as f: + writer = csv.writer(f) + writer.writerows([metric_result["metric_to_sample_dict"][m_value]]) + metric_result["metric_to_sample_dict"][m_value] = [] + elif metric_type == 'accumulate_value_over_samples': + if metric_result["metric_value"] is not None: + metric_value_builder = create_mmap_dataset_builder(metric_result["metric_value_fname"], + metric_dtype) + metric_value_builder.add_item(metric_result["metric_value"].reshape(-1)) + close_mmap_dataset_builder(metric_value_builder, metric_result["metric_value_fname"]) + + def run_map_helper(self, thread_id): + start_idx, end_idx = self.thread_splits[thread_id][0], \ + self.thread_splits[thread_id][1] + logger.info(f"worker {self.worker_id} thread {thread_id}: start working " \ + f"on data subset {start_idx} to {end_idx}") + thread_dataset = Subset(self.dataset, list(range(start_idx, end_idx))) + sampler = BatchSampler(SequentialSampler(thread_dataset), batch_size=self.batch_size, drop_last=False) + if self.collate_fn is None: + iterator = iter(DataLoader(thread_dataset, batch_sampler=sampler, num_workers=0, pin_memory=False)) + else: + iterator = iter( + DataLoader(thread_dataset, + batch_sampler=sampler, + num_workers=0, + collate_fn=self.collate_fn, + pin_memory=False)) + if self.custom_map_init is None: + metric_results = self.init_metric_results(thread_id, self.metric_names, self.metric_types, + self.metric_dtypes, self.save_path, self.worker_id) + else: + metric_results = self.custom_map_init(thread_id, self.metric_names, self.metric_types, self.metric_dtypes, + self.save_path, self.worker_id) + total_sample = len(thread_dataset) + processed_sample = 0 + start = time.time() + while True: + try: + data = next(iterator) + if self.custom_map_update is None: + self.update_metric_results(data, self.metric_types, self.metric_functions, metric_results) + else: + self.custom_map_update(data, self.metric_types, self.metric_functions, metric_results) + processed_sample += self.batch_size + duration = (time.time() - start) / 3600.0 + remain_duration = duration * total_sample / processed_sample - duration + logger.info( + f"worker {self.worker_id} thread {thread_id}: {processed_sample} " \ + f"out of {total_sample} processed in {duration:.2f} hr, " \ + f"estimated to finish in {remain_duration:.2f} hr") + except StopIteration: + logger.info(f"worker {self.worker_id} thread {thread_id}: reach end of file") + break + if self.custom_map_finalize is None: + self.finalize_metric_results(self.metric_types, self.metric_dtypes, metric_results) + else: + self.custom_map_finalize(self.metric_types, self.metric_dtypes, metric_results) + logger.info(f"worker {self.worker_id} thread {thread_id}: finished") + + def run_map(self): + self.worker_splits, self.thread_splits = split_dataset(self.dataset, self.num_workers, self.worker_id, + self.num_threads) + if len(self.specific_threads) > 0: + threads_to_run = self.specific_threads + else: + threads_to_run = list(range(self.num_threads)) + if self.num_threads > 1: + p = [] + for thread in threads_to_run: + p.append(Process(target=self.run_map_helper, args=(thread, ))) + p[thread].start() + + for thread in threads_to_run: + p[thread].join() + else: + assert self.num_threads == 1 + self.run_map_helper(0) + + def get_metric_value_percentiles(self, metric_name, num_sample_per_value, total_num_samples): + logger.info(f"Checking the value percentiles of metric {metric_name}...") + processed_samples = 0 + current_percentile = 5 + for key in sorted(num_sample_per_value.keys()): + processed_samples += num_sample_per_value[key] + if processed_samples >= total_num_samples * current_percentile / 100.0: + logger.info(f"Metric {metric_name} {current_percentile}th percentile: {key}") + current_percentile += 5 + + def merge_gather_map_stats(self, num_workers, num_threads, num_threads_reduce, t_idx_reduce, metric_save_path, + metric_name, return_dict): + results = [] + for w_idx in range(num_workers): + for t_idx in range(num_threads): + if (w_idx * num_threads + t_idx) % num_threads_reduce == t_idx_reduce: + w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/" + w_sample_to_metric_fname = f"{w_metric_save_path}/{metric_name}_sample_to_metric" + w_sample_to_metric = MMapIndexedDataset(w_sample_to_metric_fname, skip_warmup=True) + unique_v = list(np.unique(w_sample_to_metric)) + sample_to_metric_count = len(w_sample_to_metric) + logger.info(f"Finished gathering map stats from worker {w_idx} thread {t_idx}.") + results.append([unique_v, sample_to_metric_count]) + return_dict[t_idx_reduce] = results + + def merge_sample_to_metric(self, t_idx_reduce, metric_save_path, metric_name, metric_value_dtype, + map_worker_thread): + sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric_thread{t_idx_reduce}" + sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_value_dtype) + for w_t in map_worker_thread: + w_metric_save_path = f"{metric_save_path}/worker{w_t[0]}_thread{w_t[1]}/" + w_sample_to_metric_fname = f"{w_metric_save_path}/{metric_name}_sample_to_metric" + w_data = MMapIndexedDataset(w_sample_to_metric_fname, skip_warmup=True) + for row in range(len(w_data)): + sample_to_metric_builder.add_item(torch.tensor(w_data[row].astype(np.int64), dtype=torch.long)) + logger.info(f"Finished merge_sample_to_metric from worker {w_t[0]} thread {w_t[1]}.") + close_mmap_dataset_builder(sample_to_metric_builder, sample_to_metric_fname) + + def merge_metric_to_sample(self, t_idx_reduce, metric_save_path, metric_name, sample_idx_dtype, metric_value_dtype, + unique_metric_values, num_workers, num_threads): + index_to_sample_fname = f"{metric_save_path}/{metric_name}_index_to_sample_thread{t_idx_reduce}" + index_to_sample_builder = create_mmap_dataset_builder(index_to_sample_fname, sample_idx_dtype) + index_to_metric_fname = f"{metric_save_path}/{metric_name}_index_to_metric_thread{t_idx_reduce}" + index_to_metric_builder = create_mmap_dataset_builder(index_to_metric_fname, metric_value_dtype) + for unique_v in unique_metric_values: + samples = [] + for w_idx in range(num_workers): + for t_idx in range(num_threads): + w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/" + w_metric_to_sample_fname = f"{w_metric_save_path}/{metric_name}_metric_to_sample_{unique_v}.csv" + if os.path.isfile(w_metric_to_sample_fname): + with open(w_metric_to_sample_fname, 'r') as f: + datareader = csv.reader(f) + for row in datareader: + samples += [int(x) for x in row] + index_to_sample_builder.add_item(torch.tensor(samples, dtype=torch.long)) + index_to_metric_builder.add_item(torch.tensor([unique_v], dtype=torch.long)) + logger.info(f"Finished reducing metric {metric_name} value {unique_v}.") + close_mmap_dataset_builder(index_to_sample_builder, index_to_sample_fname) + close_mmap_dataset_builder(index_to_metric_builder, index_to_metric_fname) + + def merge_map_results(self, dataset, metric_names, metric_types, save_path, num_workers, num_threads, + num_threads_reduce): + total_num_samples = len(dataset) + sample_idx_dtype = find_fit_int_dtype(0, total_num_samples - 1) + logger.info( + f"Total number of data samples: {total_num_samples}. Will use {sample_idx_dtype} to store the sample indexes." + ) + for m_idx in range(len(metric_names)): + metric_name, metric_type = metric_names[m_idx], metric_types[m_idx] + if metric_type == 'single_value_per_sample': + metric_save_path = f"{save_path}/{metric_name}/" + sample_to_metric_count = 0 + unique_metric_values = set([]) + manager = Manager() + return_dict = manager.dict() + p = [] + for t_idx_reduce in range(num_threads_reduce): + p.append( + Process(target=self.merge_gather_map_stats, + args=( + num_workers, + num_threads, + num_threads_reduce, + t_idx_reduce, + metric_save_path, + metric_name, + return_dict, + ))) + p[t_idx_reduce].start() + for t_idx_reduce in range(num_threads_reduce): + p[t_idx_reduce].join() + for t_idx_reduce in range(num_threads_reduce): + results = return_dict[t_idx_reduce] + for res in results: + unique_metric_values = unique_metric_values.union(set(res[0])) + sample_to_metric_count += res[1] + value_max = max(unique_metric_values) + value_min = min(unique_metric_values) + assert sample_to_metric_count == total_num_samples, "The number of samples in map result files are not correct. It's possible that some map worker didn't finish successfully." + metric_value_dtype = find_fit_int_dtype(value_min, value_max) + logger.info( + f"Metric {metric_name} has values between {value_min} and {value_max}. Will use {metric_value_dtype} to store the metric values." + ) + + # sample_to_metric + map_worker_thread = [] + for w_idx in range(num_workers): + for t_idx in range(num_threads): + map_worker_thread.append([w_idx, t_idx]) + thread_splits = split_index(0, len(map_worker_thread), num_threads_reduce) + p = [] + for t_idx_reduce in range(num_threads_reduce): + start_idx, end_idx = thread_splits[t_idx_reduce][0], thread_splits[t_idx_reduce][1] + p.append( + Process(target=self.merge_sample_to_metric, + args=( + t_idx_reduce, + metric_save_path, + metric_name, + metric_value_dtype, + map_worker_thread[start_idx:end_idx], + ))) + p[t_idx_reduce].start() + for t_idx_reduce in range(num_threads_reduce): + p[t_idx_reduce].join() + + sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric" + sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_value_dtype) + for t_idx_reduce in range(num_threads_reduce): + chunk_fname = f"{metric_save_path}/{metric_name}_sample_to_metric_thread{t_idx_reduce}" + logger.info(f"Merging file {chunk_fname}") + sample_to_metric_builder.merge_file_(chunk_fname) + close_mmap_dataset_builder(sample_to_metric_builder, sample_to_metric_fname) + sample_to_metric = MMapIndexedDataset(sample_to_metric_fname, skip_warmup=True) + assert len(sample_to_metric) == total_num_samples + + # metric_to_sample + unique_metric_values = list(sorted(unique_metric_values)) + thread_splits = split_index(0, len(unique_metric_values), num_threads_reduce) + p = [] + for t_idx_reduce in range(num_threads_reduce): + start_idx, end_idx = thread_splits[t_idx_reduce][0], thread_splits[t_idx_reduce][1] + p.append( + Process(target=self.merge_metric_to_sample, + args=( + t_idx_reduce, + metric_save_path, + metric_name, + sample_idx_dtype, + metric_value_dtype, + unique_metric_values[start_idx:end_idx], + num_workers, + num_threads, + ))) + p[t_idx_reduce].start() + for t_idx_reduce in range(num_threads_reduce): + p[t_idx_reduce].join() + index_to_sample_fname = f"{metric_save_path}/{metric_name}_index_to_sample" + index_to_sample_builder = create_mmap_dataset_builder(index_to_sample_fname, sample_idx_dtype) + index_to_metric_fname = f"{metric_save_path}/{metric_name}_index_to_metric" + index_to_metric_builder = create_mmap_dataset_builder(index_to_metric_fname, metric_value_dtype) + for t_idx_reduce in range(num_threads_reduce): + chunk_is_fname = f"{metric_save_path}/{metric_name}_index_to_sample_thread{t_idx_reduce}" + logger.info(f"Merging file {chunk_is_fname}") + index_to_sample_builder.merge_file_(chunk_is_fname) + chunk_im_fname = f"{metric_save_path}/{metric_name}_index_to_metric_thread{t_idx_reduce}" + logger.info(f"Merging file {chunk_im_fname}") + index_to_metric_builder.merge_file_(chunk_im_fname) + close_mmap_dataset_builder(index_to_sample_builder, index_to_sample_fname) + close_mmap_dataset_builder(index_to_metric_builder, index_to_metric_fname) + num_sample_per_value = {} + index_to_sample = MMapIndexedDataset(index_to_sample_fname, skip_warmup=True) + index_to_metric = MMapIndexedDataset(index_to_metric_fname, skip_warmup=True) + index_to_sample_merged_fname = f"{metric_save_path}/{metric_name}_index_to_sample_percentile_merged" + index_to_sample_merged_builder = create_mmap_dataset_builder(index_to_sample_merged_fname, + sample_idx_dtype) + for v_idx in range(len(index_to_sample)): + if v_idx > 0: + assert index_to_metric[v_idx] > index_to_metric[v_idx - 1] + num_sample_per_value[index_to_metric[v_idx][0]] = len(index_to_sample[v_idx]) + assert sum(num_sample_per_value.values()) == total_num_samples + merge_step = max(1, len(index_to_sample) // 100) + for v_idx in range(0, len(index_to_sample), merge_step): + merged_samples = np.copy( + np.concatenate(index_to_sample[v_idx:min(len(index_to_sample), (v_idx + merge_step))], + axis=None)) + index_to_sample_merged_builder.add_item( + torch.tensor(merged_samples.astype(np.int64), dtype=torch.long)) + logger.info(f"Finished merging index_to_sample {v_idx} to {v_idx+merge_step}.") + close_mmap_dataset_builder(index_to_sample_merged_builder, index_to_sample_merged_fname) + self.get_metric_value_percentiles(metric_name, num_sample_per_value, total_num_samples) + elif metric_type == 'accumulate_value_over_samples': + metric_save_path = f"{save_path}/{metric_name}/" + metric_value = None + for w_idx in range(num_workers): + for t_idx in range(num_threads): + w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/" + w_metric_value_fname = f"{w_metric_save_path}/{metric_name}_metric_value" + w_metric_value = MMapIndexedDataset(w_metric_value_fname, skip_warmup=True) + if metric_value is None: + metric_value = np.copy(w_metric_value[0]) + else: + metric_value += np.copy(w_metric_value[0]) + value_max = int(max(metric_value)) + value_min = int(min(metric_value)) + metric_value_dtype = find_fit_int_dtype(value_min, value_max) + metric_value_fname = f"{metric_save_path}/{metric_name}_metric_value" + metric_value_builder = create_mmap_dataset_builder(metric_value_fname, metric_value_dtype) + metric_value_builder.add_item(torch.tensor(metric_value.astype(np.int64), dtype=torch.long)) + close_mmap_dataset_builder(metric_value_builder, metric_value_fname) + + def run_reduce(self): + if self.custom_reduce is None: + self.merge_map_results(self.dataset, self.metric_names, self.metric_types, self.save_path, + self.num_workers, self.num_threads, self.num_threads_reduce) + else: + self.custom_reduce(self.dataset, self.metric_names, self.metric_types, self.save_path, self.num_workers, + self.num_threads, self.num_threads_reduce) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_sampler.py b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..ef845e4bc4907f746faa06665cf752088c111e8c --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_sampler.py @@ -0,0 +1,338 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +coding=utf-8 + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +Part of this code was adopted from https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/data/data_samplers.py +""" + +import torch +import os +import numpy as np + +import deepspeed.comm as dist +from deepspeed.utils import logger +from deepspeed.accelerator import get_accelerator +from ..constants import * +from ..curriculum_scheduler import CurriculumScheduler +from .indexed_dataset import MMapIndexedDataset +from .utils import create_mmap_dataset_builder, close_mmap_dataset_builder, find_fit_int_dtype + + +class DeepSpeedDataSampler(object): + + def __init__(self, + data_efficiency_config, + one_epoch_total_samples, + micro_batch_size, + data_parallel_rank, + data_parallel_size, + data_parallel_group, + gradient_accumulation_steps, + global_rank, + drop_last=True): + # Keep a copy of input params for later use. + self.data_efficiency_config = data_efficiency_config + self.one_epoch_total_samples = one_epoch_total_samples + self.index_dtype = find_fit_int_dtype(0, one_epoch_total_samples) + self.total_samples = one_epoch_total_samples * self.data_efficiency_config[DATA_SAMPLING][ + DATA_SAMPLING_NUM_EPOCHS] + self.micro_batch_size = micro_batch_size + self.data_parallel_rank = data_parallel_rank + self.data_parallel_group = data_parallel_group + self.micro_batch_times_data_parallel_size = \ + self.micro_batch_size * data_parallel_size + self.gradient_accumulation_steps = gradient_accumulation_steps + self.global_batch_size = self.micro_batch_times_data_parallel_size * \ + self.gradient_accumulation_steps + self.global_rank = global_rank + self.drop_last = drop_last + self.np_rng = np.random.default_rng(self.data_efficiency_config[DATA_EFFICIENCY_SEED]) + self.state = {} + self.batch = [] + self.consumed_samples = 0 + if self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_ENABLED]: + self.curriculum_step = 0 + self.current_difficulties = {} + self.data_cluster_paths = [] + self.data_cluster_current_position = [] + self.curriculum_schedulers = {} + self.curriculum_index_to_sample = {} + self.curriculum_index_to_metric = {} + self.difficulty_type = {} + self.clustering_type = {} + self.data_1epoch_size = None + if self.global_rank == 0: + self.data_clusters = [] + self.data_cluster_sizes = [] + cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ + CURRICULUM_LEARNING_CLUSTER_PATH] + if not os.path.exists(cluster_path): + os.makedirs(cluster_path) + for metric in self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS]: + self.curriculum_schedulers[metric] = CurriculumScheduler( + data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS][metric]) + self.difficulty_type[metric] = data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ + CURRICULUM_LEARNING_METRICS][metric][CURRICULUM_LEARNING_DIFFICULTY_TYPE] + self.clustering_type[metric] = data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ + CURRICULUM_LEARNING_METRICS][metric][CURRICULUM_LEARNING_CLUSTERING_TYPE] + if self.global_rank == 0: + if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER: + self.curriculum_index_to_sample[metric] = MMapIndexedDataset( + data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS] + [metric][CURRICULUM_LEARNING_SAMPLE_PATH], + skip_warmup=True) + if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED: + self.curriculum_index_to_metric[metric] = MMapIndexedDataset( + data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS] + [metric][CURRICULUM_LEARNING_METRIC_PATH], + skip_warmup=True) + + # Sanity checks. + assert self.total_samples > 0, \ + 'no sample to consume: {}'.format(self.total_samples) + assert self.micro_batch_size > 0 + assert data_parallel_size > 0 + assert self.data_parallel_rank < data_parallel_size, \ + 'data_parallel_rank should be smaller than data size: {}, ' \ + '{}'.format(self.data_parallel_rank, data_parallel_size) + + def __len__(self): + return self.total_samples + + def set_custom_curriculum_learning_schedule(self, schedule_func_dict): + for metric in self.curriculum_schedulers: + if metric in schedule_func_dict: + self.curriculum_schedulers[metric].set_custom_get_difficulty(schedule_func_dict[metric]) + + def get_start_end_idx(self): + start_idx = self.data_parallel_rank * self.micro_batch_size + end_idx = start_idx + self.micro_batch_size + return start_idx, end_idx + + def get_sample_based_on_metric_value(self, metric, value_start, value_end): + new_samples = None + for row in range(len(self.curriculum_index_to_sample[metric])): + if self.curriculum_index_to_metric[metric][row] <= value_end and self.curriculum_index_to_metric[metric][ + row] > value_start: + row_samples = np.copy(self.curriculum_index_to_sample[metric][row]) + new_samples = row_samples if new_samples is None else np.concatenate( + (new_samples, row_samples), axis=None) + return new_samples + + def get_sample_based_on_metric_percentile(self, metric, percentile_start, percentile_end): + new_samples = None + if self.data_1epoch_size is None: + self.data_1epoch_size = sum(len(x) for x in self.curriculum_index_to_sample[metric]) + max_percentile = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS][ + metric][CURRICULUM_LEARNING_MAX_DIFFICULTY] + sample_per_percentile = self.data_1epoch_size // max_percentile + start_count = sample_per_percentile * percentile_start + end_count = sample_per_percentile * percentile_end + if percentile_end == max_percentile: + end_count = self.data_1epoch_size + current_count = 0 + for row in range(len(self.curriculum_index_to_sample[metric])): + row_size = len(self.curriculum_index_to_sample[metric][row]) + if current_count + row_size > start_count: + row_start = max(0, start_count - current_count) + if current_count + row_size <= end_count: + row_end = row_size + else: + row_end = end_count - current_count + row_samples = np.copy(self.curriculum_index_to_sample[metric][row][row_start:row_end]) + new_samples = row_samples if new_samples is None else np.concatenate( + (new_samples, row_samples), axis=None) + current_count += row_size + if current_count >= end_count: + break + return new_samples + + def get_new_cluster(self, previous_difficulties): + cluster_fname = CURRICULUM_LEARNING_CLUSTER_PREFIX + for metric in self.curriculum_schedulers: + cluster_fname = f"{cluster_fname}_{metric}{self.current_difficulties[metric]}" + cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ + CURRICULUM_LEARNING_CLUSTER_PATH] + cluster_path = f"{cluster_path}/{cluster_fname}" + if self.global_rank == 0: + new_cluster = None + need_clustering = 0 + for metric in self.clustering_type: + if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER: + need_clustering += 1 + if need_clustering > 1: + for metric in self.curriculum_schedulers: + if self.clustering_type[metric] == CURRICULUM_LEARNING_SINGLE_CLUSTER: + metric_cluster = np.arange(start=0, + stop=self.one_epoch_total_samples, + step=1, + dtype=self.index_dtype) + else: + if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED: + metric_cluster = self.get_sample_based_on_metric_value(metric, float('-inf'), + self.current_difficulties[metric]) + elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED: + metric_cluster = self.get_sample_based_on_metric_percentile( + metric, 0, self.current_difficulties[metric]) + new_cluster = metric_cluster if new_cluster is None else \ + np.intersect1d(new_cluster, metric_cluster, assume_unique=True) + for cluster in self.data_clusters: + new_cluster = np.setdiff1d(new_cluster, cluster[0], assume_unique=True) + else: + if len(self.data_clusters) == 0: + new_cluster = np.arange(start=0, stop=self.one_epoch_total_samples, step=1, dtype=self.index_dtype) + for metric in self.curriculum_schedulers: + if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER: + if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED: + new_cluster = self.get_sample_based_on_metric_value(metric, previous_difficulties[metric], + self.current_difficulties[metric]) + elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED: + new_cluster = self.get_sample_based_on_metric_percentile( + metric, previous_difficulties[metric], self.current_difficulties[metric]) + if new_cluster is not None and len(new_cluster) > 0: + logger.info( + f"new data cluster (previous_difficulties {previous_difficulties}, current_difficulties {self.current_difficulties}) with size {len(new_cluster)} generated." + ) + self.np_rng.shuffle(new_cluster) + cluster_builder = create_mmap_dataset_builder(cluster_path, self.index_dtype) + cluster_builder.add_item_numpy(new_cluster) + close_mmap_dataset_builder(cluster_builder, cluster_path) + self.data_clusters.append(MMapIndexedDataset(cluster_path, skip_warmup=True)) + self.data_cluster_sizes.append(len(self.data_clusters[-1][0])) + else: + logger.info( + f"new data cluster (previous_difficulties {previous_difficulties}, current_difficulties {self.current_difficulties}) has no matched data thus skipped." + ) + dist.barrier(group=self.data_parallel_group) + if os.path.isfile(f"{cluster_path}.bin"): + self.data_cluster_paths.append(cluster_fname) + self.data_cluster_current_position.append(0) + + def sample_from_clusters(self): + num_clusters = len(self.data_clusters) + weight_sum = sum(self.data_cluster_sizes) + weights = [x / weight_sum for x in self.data_cluster_sizes] + samples = self.np_rng.choice(num_clusters, self.global_batch_size, replace=True, p=weights) + samples = np.bincount(samples, minlength=num_clusters) + return samples + + def reshuffle_clusters(self, cidx): + cluster_fname = self.data_cluster_paths[cidx] + cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ + CURRICULUM_LEARNING_CLUSTER_PATH] + cluster_path = f"{cluster_path}/{cluster_fname}" + cluster = np.copy(self.data_clusters[cidx][0]) + self.np_rng.shuffle(cluster) + cluster_builder = create_mmap_dataset_builder(cluster_path, self.index_dtype) + cluster_builder.add_item_numpy(cluster) + close_mmap_dataset_builder(cluster_builder, cluster_path) + self.data_clusters[cidx] = MMapIndexedDataset(cluster_path, skip_warmup=True) + + def get_sample_from_cluster(self, cidx, num_samples): + start_idx = self.data_cluster_current_position[cidx] + samples = list(np.copy(self.data_clusters[cidx][0][start_idx:(start_idx + num_samples)])) + self.data_cluster_current_position[cidx] += num_samples + if len(samples) < num_samples: + num_samples_remained = num_samples - len(samples) + logger.info(f"reshuffling cluster {cidx}.") + self.reshuffle_clusters(cidx) + samples += list(np.copy(self.data_clusters[cidx][0][:num_samples_remained])) + self.data_cluster_current_position[cidx] = num_samples_remained + return samples + + def get_next_global_batch(self): + if self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_ENABLED]: + self.curriculum_step += 1 + new_cluster = False + previous_difficulties = {} + for metric in self.curriculum_schedulers: + next_difficulty = self.curriculum_schedulers[metric].update_difficulty(self.curriculum_step) + if metric not in self.current_difficulties or \ + next_difficulty != self.current_difficulties[metric]: + new_cluster = True + if metric in self.current_difficulties: + previous_difficulties[metric] = self.current_difficulties[metric] + else: + if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED: + previous_difficulties[metric] = float('-inf') + elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED: + previous_difficulties[metric] = 0 + self.current_difficulties[metric] = next_difficulty + if new_cluster: + self.get_new_cluster(previous_difficulties) + if self.global_rank == 0: + samples_per_cluster = self.sample_from_clusters() + batch = [] + for cidx in range(len(samples_per_cluster)): + batch += self.get_sample_from_cluster(cidx, samples_per_cluster[cidx]) + self.np_rng.shuffle(batch) + batch = torch.tensor(batch, device=get_accelerator().current_device_name(), dtype=torch.long).view(-1) + else: + batch = torch.empty(self.global_batch_size, + device=get_accelerator().current_device_name(), + dtype=torch.long) + dist.broadcast(batch, 0, group=self.data_parallel_group) + self.batch = batch.tolist() + + def __iter__(self): + while self.consumed_samples <= self.total_samples: + if len(self.batch) == 0: + self.get_next_global_batch() + current_batch = self.batch[:self.micro_batch_times_data_parallel_size] + self.batch = self.batch[self.micro_batch_times_data_parallel_size:] + if len(current_batch) == self.micro_batch_times_data_parallel_size or \ + (len(current_batch) > 0 and not self.drop_last): + start_idx, end_idx = self.get_start_end_idx() + yield current_batch[start_idx:end_idx] + self.consumed_samples += len(current_batch) + current_batch = [] + + def state_dict(self): + return { + CURRICULUM_LEARNING_BATCH: self.batch, + CURRICULUM_LEARNING_CONSUMED_SAMPLES: self.consumed_samples, + CURRICULUM_LEARNING_STEP: self.curriculum_step, + CURRICULUM_LEARNING_CURRENT_DIFFICULTIES: self.current_difficulties, + CURRICULUM_LEARNING_DATA_CLUSTER_PATHS: self.data_cluster_paths, + CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION: self.data_cluster_current_position, + CURRICULUM_LEARNING_NP_RNG_STATE: np.random.get_state() + } + + def load_state_dict(self, state_dict): + self.batch = state_dict[CURRICULUM_LEARNING_BATCH] + self.consumed_samples = state_dict[CURRICULUM_LEARNING_CONSUMED_SAMPLES] + self.curriculum_step = state_dict[CURRICULUM_LEARNING_STEP] + self.current_difficulties = state_dict[CURRICULUM_LEARNING_CURRENT_DIFFICULTIES] + self.data_cluster_paths = state_dict[CURRICULUM_LEARNING_DATA_CLUSTER_PATHS] + self.data_cluster_current_position = state_dict[CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION] + np.random.set_state(state_dict[CURRICULUM_LEARNING_NP_RNG_STATE]) + cluster_root_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ + CURRICULUM_LEARNING_CLUSTER_PATH] + # Backward compatibility: previously data_cluster_paths were stored as + # absolute paths. Now we changed it to just the file name so that even + # if user moved the cluster files, the checkpoint loading still works + # as long as user set the correct new CURRICULUM_LEARNING_CLUSTER_PATH + # in deepspeed json config. + for idx in range(len(self.data_cluster_paths)): + if '/' in self.data_cluster_paths[idx]: + self.data_cluster_paths[idx] = self.data_cluster_paths[idx].split('/')[-1] + if self.global_rank == 0: + for cluster_fname in self.data_cluster_paths: + cluster_path = f"{cluster_root_path}/{cluster_fname}" + self.data_clusters.append(MMapIndexedDataset(cluster_path, skip_warmup=True)) + self.data_cluster_sizes.append(len(self.data_clusters[-1][0])) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08363624f96e596fbc53879b571ef6b3172c38fb Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/fused_optimizer.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/fused_optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12f87f479f7091b96d8e6dfbf4604151608dacd0 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/fused_optimizer.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/loss_scaler.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/loss_scaler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e112d1b3f12cf33deef86aad32f92bb155f3ad17 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/loss_scaler.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/unfused_optimizer.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/unfused_optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff921ac7a40ce4d9a208304cdef55365caa08924 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/unfused_optimizer.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae02914ea48392ac54928b0f69a74db694d80593 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/adam.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..578fa7e2bd63a8e0f0673f12b8e9f36836897128 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/adam.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/zoadam.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/zoadam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4ffaba77df8a31780c61d4152662bc09577d662 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/zoadam.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/adam.py b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/adam.py new file mode 100644 index 0000000000000000000000000000000000000000..ae3e5f573850d690c5f9f961eba300f30dcdf282 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/adam.py @@ -0,0 +1,306 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import types +import torch +import numpy as np +from deepspeed.accelerator import get_accelerator +from deepspeed.runtime.utils import required_torch_version +from deepspeed import comm as dist + + +class OnebitAdam(torch.optim.Optimizer): + """Implements the 1-bit Adam algorithm. Currently GPU-only. + For usage example please see https://www.deepspeed.ai/tutorials/onebit-adam/ + For technical details please read https://arxiv.org/abs/2102.02888 + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + freeze_step (int, optional): Number of steps for warmup (uncompressed) + stage before we start using compressed communication. (default 100000) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) NOT SUPPORTED in 1-bit Adam! + eps_inside_sqrt (boolean, optional): in the 'update parameters' step, + adds eps to the bias-corrected second moment estimate before + evaluating square root instead of adding it to the square root of + second moment estimate as in the original paper. (default: False) + cuda_aware (boolean, required): Set True if the underlying MPI implementation + supports CUDA-Aware communication. (default: False) + comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl') + .. _Adam\\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, + params, + deepspeed=None, + lr=1e-3, + freeze_step=100000, + bias_correction=True, + betas=(0.9, 0.999), + eps=1e-8, + eps_inside_sqrt=False, + weight_decay=0., + max_grad_norm=0., + amsgrad=False, + cuda_aware=False, + comm_backend_name='nccl'): + + if amsgrad: + raise RuntimeError('1-bit Adam does not support the AMSGrad variant.') + + defaults = dict(lr=lr, + bias_correction=bias_correction, + betas=betas, + eps=eps, + weight_decay=weight_decay, + max_grad_norm=max_grad_norm) + + super(OnebitAdam, self).__init__(params, defaults) + self.eps_mode = 0 if eps_inside_sqrt else 1 + self.comm_time = 0.0 + self.step_time = 0.0 + self.ave_step = 1 + self.bk_time = 0.0 + + self.deepspeed = deepspeed + self.adam_freeze_key = False + self.initialize = False + self.freeze_step = freeze_step + self.cuda_aware = cuda_aware + self.using_pipeline = False + + self.comm_backend_name = comm_backend_name + + assert dist.is_initialized(), "Please initialize the torch distributed backend." + # Empty initializer. Set handle based on the comm backend as follows. + self.comm_backend_handle = None + if self.comm_backend_name == 'nccl': + assert ( + required_torch_version(min_version=1.8) + ), "Please use torch 1.8 or greater to enable NCCL backend in 1-bit Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend" + from deepspeed.runtime.comm.nccl import NcclBackend + self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce') + self.comm_backend_handle = NcclBackend(self.deepspeed.mpu) + elif self.comm_backend_name == 'mpi': + from deepspeed.runtime.comm.mpi import MpiBackend + self.comm_backend_handle = MpiBackend(cuda_aware) + elif self.comm_backend_name == 'hccl': + from deepspeed.runtime.comm.hccl import HcclBackend + self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce') + self.comm_backend_handle = HcclBackend(self.deepspeed.mpu) + self.size = self.comm_backend_handle.size + + self.divider = int(self.size * 8 / np.gcd(self.size, 8)) + + def step(self, closure=None, grads=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + grads (list of tensors, optional): weight gradient to use for the + optimizer update. If gradients have type torch.half, parameters + are expected to be in type torch.float. (default: None) + output params (list of tensors, optional): A reduced precision copy + of the updated weights written out in addition to the regular + updated weights. Have to be of same type as gradients. (default: None) + scale (float, optional): factor to divide gradient tensor values + by before applying to weights. (default: 1) + """ + loss = None + if closure is not None: + loss = closure() + + gather_time = 0 + allgather_time = 0 + all_time = 0 + + if self.adam_freeze_key is False: + v_diff_buffer = 0.0 + + if grads is None: + grads_group = [None] * len(self.param_groups) + # backward compatibility + # assuming a list/generator of parameter means single group + elif isinstance(grads, types.GeneratorType): + grads_group = [grads] + elif type(grads[0]) != list: + grads_group = [grads] + else: + grads_group = grads + + for group, grads_this_group in zip(self.param_groups, grads_group): + if grads_this_group is None: + grads_this_group = [None] * len(group['params']) + + bias_correction = 1 if group['bias_correction'] else 0 + + for p, grad in zip(group['params'], grads_this_group): + if p.grad is None and grad is None: + continue + if grad is None: + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('1-bit Adam does not support sparse gradients') + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p.data) + + if not self.initialize or (self.adam_freeze_key and 'worker_error' not in state.keys()): + state['tensor_size'] = torch.numel(p.data) + state['corrected_tensor_size'] = state['tensor_size'] + + if state['tensor_size'] % (self.size * self.divider) != 0: + state['corrected_tensor_size'] += ((self.size * self.divider) - (state['tensor_size'] % + (self.size * self.divider))) + state['server_chunk_size'] = state['corrected_tensor_size'] // self.size + get_accelerator().empty_cache() + state['worker_error'] = torch.zeros(state['corrected_tensor_size'], device=p.device) + state['server_error'] = torch.zeros(state['server_chunk_size'], device=p.device) + get_accelerator().empty_cache() + self.adam_freeze_key = True + if not self.initialize and dist.get_rank() == 0: + print("Cupy Buffers Initialized Successfully.") + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + if self.adam_freeze_key is False: + exp_avg.mul_(beta1).add_(1 - beta1, grad) + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + grad = None + if self.initialize: + update = exp_avg / (exp_avg_sq.sqrt() + group['eps']) + + else: + if 'non_freeze' in group.keys() and group['non_freeze'] is True: + dist.all_reduce(grad) + grad.mul_(1 / dist.get_world_size()) + exp_avg.mul_(beta1).add_(1 - beta1, grad) + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + grad = None + else: + if self.initialize is True: + exp_avg.mul_(beta1).add_(1 - beta1, grad) + grad = None + + if self.size > 1: + exp_avg.set_( + self.comm_backend_handle.compressed_allreduce(exp_avg, state['worker_error'], + state['server_error'], + self.deepspeed.local_rank)) + # Because 1-bit compression cannot represent exact zero, it is required to + # provide a momentum mask for those params that have constant exact zeros in their + # momentums, otherwise the compression error would keep accumulating. + # For example, for BERT pre-training seq 128, bert.embeddings.position_embeddings.weight + # always have exact zeros in its momentum for row 129 to 512, because it only + # learns up to seq length 128 while the model supports up to 512 seq length. + # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.) + if 'exp_avg_mask' in group: + if exp_avg.device != group['exp_avg_mask'].device: + group['exp_avg_mask'] = group['exp_avg_mask'].to(device=exp_avg.device) + exp_avg.mul_(group['exp_avg_mask']) + + if self.initialize: + update = exp_avg / (exp_avg_sq.sqrt() + group['eps']) + + if self.initialize: + if group['weight_decay'] > 0.0: + update += group['weight_decay'] * p.data + with torch.no_grad(): + p.add_(-group['lr'] * update) + + if not self.initialize: + print('Pop out errors', flush=True) + state.pop('worker_error') + state.pop('server_error') + + if not self.initialize: + self.adam_freeze_key = False + self.initialize = True + print(f"Finished the initialization step at rank {dist.get_rank()}") + return loss + + if self.adam_freeze_key is False: + if state['step'] >= self.freeze_step: + print('OnebitAdam - starting compressed communication') + self.adam_freeze_key = True + if self.using_pipeline: + self.deepspeed.pipeline_enable_backward_allreduce = False + else: + self.deepspeed.enable_backward_allreduce = False + + return loss + + def load_state_dict(self, state_dict): + """ + Overrides load_state_dict() to add special handling when loading checkpoints + """ + # Because at different stage exp_avg_mask may change (e.g., + # BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask + # in checkpoints but always use the one user provided in training script. + # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.) + # Thus here we keep the exp_avg_mask unchanged when loading checkpoint + for i, group in enumerate(self.param_groups): + if 'exp_avg_mask' in group: + state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask'] + elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict['param_groups'][i]: + state_dict['param_groups'][i].pop('exp_avg_mask') + super().load_state_dict(state_dict) + if self.state[self.param_groups[0]['params'][0]]['step'] < self.freeze_step: + if dist.get_rank() == 0: + print("Checkpoint loaded and OnebitAdam warmup stage starts/continues.") + if self.adam_freeze_key is True: + self.adam_freeze_key = False + if self.using_pipeline: + self.deepspeed.pipeline_enable_backward_allreduce = True + else: + self.deepspeed.enable_backward_allreduce = True + else: + if dist.get_rank() == 0: + print("Checkpoint loaded and OnebitAdam compression stage starts/continues.") + if self.adam_freeze_key is False: + self.adam_freeze_key = True + if self.using_pipeline: + self.deepspeed.pipeline_enable_backward_allreduce = False + else: + self.deepspeed.enable_backward_allreduce = False + # We reset the compression errors when loading checkpoints for 3 reasons: + # 1) The worker and server error at each GPU are distinct, so in current implementation + # only rank 0's errors are saved in the checkpoint. Thus we have to reset the errors. + # If we want to save them correctly we need O(num_gpu*model_size) memory in order to + # gather all the error, which is a very large memory requirement. It's possible to save + # them in a distributed way, but it will make the checkpoint saving/loading much more complicated. + # 2) Even if we are able to save the compression errors correctly, you need to have the + # exact same number of GPUs in order to load them correctly. + # 3) We verified on BERT pre-training that occasionally resetting the compression error + # at checkpoint loading does not affect the convergence. + # However, please avoid frequent checkpoint loading which could break the error + # compensation mechanism thus affect the convergence. + for group in self.param_groups: + for p in group['params']: + if 'worker_error' in self.state[p]: + self.state[p].pop('worker_error') + if 'server_error' in self.state[p]: + self.state[p].pop('server_error') diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/bin/protoc b/infer_4_47_1/lib/python3.10/site-packages/torch/bin/protoc new file mode 100644 index 0000000000000000000000000000000000000000..f23bc1bcd86573d07a8fbaa6de1c47d2aac93d83 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/bin/protoc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3390873b2da56c1397adec3728f1588c51e182f15b123d3b4d4f248d31c1f4da +size 5330888 diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/check_kernel_launches.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/check_kernel_launches.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49a1b84b8099e1054da3642f9e457a95b8d2a5a1 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/check_kernel_launches.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dtype.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dtype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1355fe90ba7b30a9b3618ea6c5ff841886df18ef Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dtype.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_modules.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0446146a46000b8a34194cbedcf00fe3b9efb62 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_modules.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_op_db.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_op_db.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26acd8e8ca0a6b70de5c0fdb9e5b337b2e407b33 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_op_db.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_tensor.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9dfbbc07a4b454689c334f7dad2cb9ee8c6208fd Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_tensor.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/quantization_torch_package_models.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/quantization_torch_package_models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10c2189e5d8d0311774ad3201d4a60d6346a2ed7 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/quantization_torch_package_models.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1e3572cfc4c6a0ddc3d8fa2e1b056415204acdfa --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__init__.py @@ -0,0 +1 @@ +# mypy: ignore-errors diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/__init__.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73dd8f87cbd98e01ad8dda4897451cb927d40f29 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network1.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd1110903457e8e29937bbdce44c9f70b2f045e6 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network1.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network2.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce737e4c4de7e03cca4490fa208da88a5d186565 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network2.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5eb5660b76e5b4a4148cdd2a1afebee228e6ba80 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9f96967729225c353708dd0b73dbbfa1f2cb000 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..874162bcf4fb9e107b14b5309a38ce675865574b Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/ddp_under_dist_autograd_test.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/ddp_under_dist_autograd_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0aa79794dc8e46cd76c676a28132c015af226205 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/ddp_under_dist_autograd_test.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22be69daa78cd1ac17be4d9d83d72bddd9f55828 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..783c10e7f1aeb3f7099a9b53c51d6dd7e60d8b78 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a9a7578e7150b0a43514d30bf68db700c8bc90d Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00f9519481e27b7f932478992f9b3be5c60562ff Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/__init__.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f095085b05f189144f3a86ed6be42aada9f94527 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/test_common.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cedbb122377dceb5be3b03cde97a48e4330ab091 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/test_common.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_st_common.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_st_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1610cc0a17145fb0e0ff77e04a1e5c6dd6400eca Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_st_common.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/__init__.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..401a68e00792d9ec7bac4759dcdb81e0858c85e3 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/common_dtensor.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/common_dtensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be6c6c7a95b47cca0bc0548a9621fd04163925c2 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/common_dtensor.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/common_dtensor.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/common_dtensor.py new file mode 100644 index 0000000000000000000000000000000000000000..8514be7979190ced204c2286ec7fac454a63d78e --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/common_dtensor.py @@ -0,0 +1,548 @@ +# mypy: allow-untyped-defs + +# Copyright (c) Meta Platforms, Inc. and affiliates + +import itertools +import sys +from dataclasses import dataclass +from functools import wraps +from typing import Any, Callable, cast, Dict, Iterator, List, Sequence, Tuple, TypeVar + +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F + +from torch.distributed._tensor import DeviceMesh, distribute_tensor, Replicate, Shard +from torch.distributed._tensor.placement_types import Placement +from torch.distributed.tensor.parallel import ( + ColwiseParallel, + parallelize_module, + PrepareModuleInput, + RowwiseParallel, + SequenceParallel, +) +from torch.testing._internal.common_distributed import ( + MultiProcessTestCase, + MultiThreadedTestCase, + skip_if_lt_x_gpu, + run_subtests, + TEST_SKIPS, +) + +from torch.utils._pytree import tree_flatten, tree_unflatten, TreeSpec + +DEVICE_TYPE = ( + "cuda" if torch.cuda.is_available() and torch.cuda.device_count() > 1 else "cpu" +) + +NUM_DEVICES = 4 + +# We use this as a proxy for "multiple GPUs exist" +if torch.cuda.is_available() and torch.cuda.device_count() > 1: + # when we actually have multiple GPUs, relax the requirement to smaller counts. + NUM_DEVICES = min(NUM_DEVICES, torch.cuda.device_count()) + +T = TypeVar("T") + + +# simple RMSNorm layer for testing +class RMSNormPython(torch.nn.Module): + def __init__(self, dim: int, eps: float = 1e-6): + super().__init__() + self.eps = eps + self.weight = torch.nn.Parameter(torch.ones(dim)) + + def _norm(self, x): + return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) + + def forward(self, x): + output = self._norm(x) + return output * self.weight + + +class MLPModule(nn.Module): + def __init__(self, device, bias: bool = True): + super().__init__() + torch.manual_seed(5) + self.net1 = nn.Linear(10, 16, bias=bias, device=device) + self.relu = nn.ReLU() + self.net2 = nn.Linear(16, 10, bias=bias, device=device) + + def forward(self, x): + return self.net2(self.relu(self.net1(x))) + + def reset_parameters(self): + self.net1.reset_parameters() + self.net2.reset_parameters() + + +class MLPStacked(nn.Module): + def __init__(self, device, n_layers: int = 2): + super().__init__() + self.layers = nn.ModuleList([MLPModule(device) for i in range(n_layers)]) + + def forward(self, x): + for layer in self.layers: + x = layer(x) + return x + + +@dataclass +class ModelArgs: + n_layers: int = 2 + vocab_size: int = 8 + max_seq_len: int = 16 + dim: int = 16 + n_heads: int = 4 + dropout_p: float = 0.1 + use_attn_mask: bool = True + weight_tying: bool = True + checkpoint_activations: bool = False + + +class Attention(nn.Module): + def __init__(self, args: ModelArgs): + super().__init__() + assert args.dim % args.n_heads == 0 + self.head_dim = args.dim // args.n_heads + self.n_heads = args.n_heads + self.dropout_p = args.dropout_p + self.resid_dropout = nn.Dropout(args.dropout_p) + self.use_attn_mask = args.use_attn_mask + + self.wq = nn.Linear(args.dim, args.dim, bias=False) + self.wk = nn.Linear(args.dim, args.dim, bias=False) + self.wv = nn.Linear(args.dim, args.dim, bias=False) + self.wo = nn.Linear(args.dim, args.dim, bias=False) + + def forward(self, x): + bsz, seq_len, _ = x.size() + queries, keys, values = self.wq(x), self.wk(x), self.wv(x) + queries = queries.view(bsz, seq_len, self.n_heads, self.head_dim) + keys = keys.view(bsz, seq_len, self.n_heads, self.head_dim) + values = values.view(bsz, seq_len, self.n_heads, self.head_dim) + + queries = queries.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim) + keys = keys.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim) + values = values.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim) + + output = F.scaled_dot_product_attention( + queries, + keys, + values, + None, + self.dropout_p if self.training else 0, + self.use_attn_mask, + ) + output = output.transpose(1, 2).contiguous().view(bsz, seq_len, -1) + return self.resid_dropout(self.wo(output)) + + +class FeedForward(nn.Module): + def __init__(self, dim, hidden_dim, dropout_p): + super().__init__() + self.w1 = nn.Linear(dim, hidden_dim) + self.gelu = nn.GELU() + self.w2 = nn.Linear(hidden_dim, dim) + self.resid_dropout = nn.Dropout(dropout_p) + + def forward(self, x): + return self.resid_dropout(self.w2(self.gelu(self.w1(x)))) + + +class TransformerBlock(nn.Module): + def __init__(self, args: ModelArgs): + super().__init__() + self.attention_norm = nn.LayerNorm(args.dim) + self.attention = Attention(args) + self.ffn_norm = nn.LayerNorm(args.dim) + self.feed_forward = FeedForward( + args.dim, hidden_dim=4 * args.dim, dropout_p=args.dropout_p + ) + + def forward(self, x): + h = x + self.attention(self.attention_norm(x)) + out = h + self.feed_forward(self.ffn_norm(h)) + return out + + +# A toy transformer model, partly inspired by the nanoGPT model: +# https://github.com/karpathy/nanoGPT. +class Transformer(nn.Module): + def __init__(self, args: ModelArgs): + super().__init__() + assert args.vocab_size is not None + assert args.max_seq_len is not None + self.model_args = args + self.max_seq_len = args.max_seq_len + self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim) + self.pos_embeddings = nn.Embedding(args.max_seq_len, args.dim) + self.dropout = nn.Dropout(args.dropout_p) + self.layers = nn.ModuleList() + for _ in range(args.n_layers): + self.layers.append(TransformerBlock(args)) + self.norm = nn.LayerNorm(args.dim) + self.output = nn.Linear(args.dim, args.vocab_size, bias=False) + if args.weight_tying: + self.output.weight = self.tok_embeddings.weight + self.checkpoint_activations = args.checkpoint_activations + + def forward(self, tokens): + _bsz, seq_len = tokens.size() + assert seq_len <= self.max_seq_len + h = self.tok_embeddings(tokens) + pos = torch.arange(0, seq_len, device=tokens.device) + p = self.pos_embeddings(pos) # positional embeddings of shape (seq_len, dim) + h = h + p + h = self.dropout(h) + for layer in self.layers: + if self.checkpoint_activations: + h = torch.utils.checkpoint.checkpoint(layer, h, use_reentrant=False) + else: + h = layer(h) + h = self.norm(h) + output = self.output(h).float() + return output + + @staticmethod + def parallelize( + module: "Transformer", device_mesh: DeviceMesh, use_seq_parallel: bool, local_output_for_attn: bool = False + ) -> nn.Module: + assert isinstance(module, Transformer), f"Requires Transformer but got {module}" + # Parallelize the root submodules. + if use_seq_parallel: + root_plan = { + "tok_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Shard(1)), + "pos_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Shard(0)), + "norm": SequenceParallel(), + } + else: + root_plan = { + "tok_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Replicate()), + "pos_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Replicate()), + } + + module_tp = parallelize_module(module, device_mesh, root_plan) + # Parallelize the attention and feed forward submodules. + for layer in module_tp.layers: + layer_parallelize_plan = {} + if use_seq_parallel: + layer_parallelize_plan["attention"] = PrepareModuleInput( + input_layouts=Shard(1), + desired_input_layouts=Replicate(), + ) + # shard the RMSNorms + layer_parallelize_plan["attention_norm"] = SequenceParallel() + layer_parallelize_plan["ffn_norm"] = SequenceParallel() + layer_parallelize_plan["attention.wq"] = ColwiseParallel(use_local_output=local_output_for_attn) + layer_parallelize_plan["attention.wk"] = ColwiseParallel(use_local_output=local_output_for_attn) + layer_parallelize_plan["attention.wv"] = ColwiseParallel(use_local_output=local_output_for_attn) + layer_parallelize_plan["attention.wo"] = ( + RowwiseParallel(output_layouts=Shard(1)) + if use_seq_parallel + else RowwiseParallel() + ) + + layer_parallelize_plan["feed_forward.w1"] = ( + ColwiseParallel(input_layouts=Shard(1)) + if use_seq_parallel + else ColwiseParallel() + ) + layer_parallelize_plan["feed_forward.w2"] = ( + RowwiseParallel(output_layouts=Shard(1)) + if use_seq_parallel + else RowwiseParallel() + ) + + parallelize_module(layer, device_mesh, layer_parallelize_plan) + + # Parallelize the output submodule. If weight tying is enabled, we need to + # make sure output.weight is sharded consistently as tok_embeddings.weight, + # at the cost of the all_reduce operation using RowwiseParallel. + output_parallelize_plan = ( + ColwiseParallel( + input_layouts=Shard(1), + output_layouts=Replicate(), + ) + if use_seq_parallel + else ColwiseParallel(output_layouts=Replicate()) + ) + parallelize_module(module_tp.output, device_mesh, output_parallelize_plan) + + if local_output_for_attn: + for layer in module_tp.layers: + layer.attention.n_heads = module_tp.model_args.n_heads // device_mesh.size() + + # Manually set output.weight so that parameters and gradients are shared. + if module_tp.model_args.weight_tying: + module_tp.output.weight = module_tp.tok_embeddings.weight + + return module_tp + + +def skip_unless_torch_gpu(method: T) -> T: + """ + Test decorator which skips the test unless there's a GPU available to torch. + + >>> # xdoctest: +SKIP + >>> @skip_unless_torch_gpu + >>> def test_some_method(self) -> None: + >>> ... + """ + # The builtin @skip_if_no_gpu relies on os.environ['WORLD_SIZE'] being set. + return cast(T, skip_if_lt_x_gpu(NUM_DEVICES)(method)) + + +class DTensorTestBase(MultiProcessTestCase): + @property + def world_size(self) -> int: + return NUM_DEVICES + + @property + def backend(self) -> str: + backend = "nccl" if self.device_type == "cuda" else "gloo" + return backend + + def build_device_mesh(self) -> DeviceMesh: + return DeviceMesh(self.device_type, list(range(self.world_size))) + + def init_pg(self) -> None: + if "nccl" in self.backend and torch.cuda.device_count() < self.world_size: + sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code) + + if self.backend not in ["nccl", "gloo", "mpi", "cpu:gloo,cuda:nccl"]: + raise RuntimeError(f"Backend {self.backend} not supported!") + + dist.init_process_group( + backend=self.backend, + world_size=self.world_size, + rank=self.rank, # pyre-ignore[16] + init_method=f"file://{self.file_name}", # pyre-ignore[16] + ) + + # set device for nccl pg for collectives + if "nccl" in self.backend: + torch.cuda.set_device(self.rank) + + def destroy_pg(self) -> None: + # Wait for all ranks to reach here before starting shutdown. + # FIXME dist.barrier deadlocks with multiple threads and NCCL: https://github.com/pytorch/pytorch/issues/95895 + # dist.all_reduce(torch.zeros((1,), device="cuda" if torch.cuda.is_available() else "cpu")) + # FIXME can't use the above all_reduce as it causes hangs on bionic and focal. It hangs: + # test_dtensor.py -- DTensorMeshTest.test_dtensor_device_mesh_device_conversion + dist.barrier() + dist.destroy_process_group() + + def setUp(self) -> None: + super().setUp() + self._spawn_processes() + + # pyre-ignore[2]: + def _test_op(self, mesh: DeviceMesh, op_call, *args, **kwargs) -> None: + out = op_call(*args, **kwargs) + dtc = DTensorConverter(mesh, args, kwargs) + for d_args, d_kwargs in dtc: + # pyre can't find assertTrue anymore? + self.assertEqual(dtc.successful(), True) + d_out = op_call(*d_args, **d_kwargs) + self.assertEqual(d_out.full_tensor(), out) + + def run_subtests(self, *args, **kwargs): + return run_subtests(self, *args, **kwargs) + + +TestFunc = Callable[[object], object] + + +# wrapper to initialize comms (processgroup) +def with_comms(func: TestFunc) -> TestFunc: + assert func is not None + + @wraps(func) # pyre-ignore[6] + def wrapper( + self, *args: Tuple[object], **kwargs: Dict[str, Any] # type: ignore[misc] + ) -> None: + # if enough GPU we can use GPU, otherwise we fallback to CPU + if not torch.cuda.is_available() or torch.cuda.device_count() < self.world_size: + self.device_type = "cpu" + else: + self.device_type = DEVICE_TYPE + + self.init_pg() + + try: + func(self, *args, **kwargs) # type: ignore[misc] + except Exception as e: + dist.destroy_process_group() + raise e + + self.destroy_pg() + + return wrapper + + +class DTensorOpTestBase(MultiThreadedTestCase): + @property + def world_size(self) -> int: + return NUM_DEVICES + + @property + def device_type(self) -> str: + return DEVICE_TYPE + + def build_device_mesh(self): + return DeviceMesh(self.device_type, list(range(self.world_size))) + + def setUp(self) -> None: + super().setUp() + self._spawn_threads() + + +# This is a class for converting args/kwargs of an op into distributed args/kwargs +class DTensorConverter: + def __init__( + self, + mesh: DeviceMesh, + args: Tuple[object, ...], + kwargs: Dict[str, object], + ) -> None: + self.hit = 0 + self.miss = 0 + self.mesh = mesh + self.args = args + self.kwargs = kwargs + flatten_args, flatten_args_spec = tree_flatten(args) + flatten_kwargs, flatten_kwargs_spec = tree_flatten(kwargs) + + self.flatten_args: List[object] = flatten_args + self.flatten_args_spec: TreeSpec = flatten_args_spec + self.flatten_kwargs: List[object] = flatten_kwargs + self.flatten_kwargs_spec: TreeSpec = flatten_kwargs_spec + + choices_for_args = [] + for arg in self.flatten_args: + if isinstance(arg, torch.Tensor): + choices_for_args.append(self.gen_sharding_choices_for_arg(arg)) + + for arg in self.flatten_kwargs: + if isinstance(arg, torch.Tensor): + choices_for_args.append(self.gen_sharding_choices_for_arg(arg)) + + self.sharding_combs: Iterator[Sequence[Placement]] = iter( + itertools.product(*choices_for_args) + ) + + def successful(self) -> bool: + return self.hit > 0 and self.miss == 0 + + def is_supported_tensor(self, t: torch.Tensor) -> bool: + # TODO: dist tensor need to support quantized and sparse + # tensors, quantized tensor might be relatively easy, but + # sparse tensor have special layouts that we need to possibly + # deal with, until we are clear about them, we don't officially + # support them. + return not any( + [ + t.is_sparse_csr, + t.is_sparse, + t.is_mkldnn, + t.is_quantized, + t.is_nested, + torch._is_functional_tensor(t), + t.is_neg(), + t.is_conj(), + t.device.type in ("lazy", "meta"), + # We need a way to test if a tensor is batched but there + # is no official APi to do it + # torch._C._is_batched(t), + ] + ) + + def gen_sharding_choices_for_arg(self, arg: torch.Tensor) -> Sequence[Placement]: + mesh_size = self.mesh.size() + sharding_choices: List[Placement] = [Replicate()] + # c10d collective does not support bool tensor + # for bool tensor we treat it as replicated + if arg.dtype != torch.bool: + # only generating choices with: replicate, or sharding + # evenly on a dimension that could be sharded + sharding_choices = sharding_choices + [ + Shard(i) + for i, s in enumerate(arg.shape) + if s > 1 and s % mesh_size == 0 + ] + # TODO: add multi mesh choices + # all_choices = itertools.product( + # *(self.mesh.ndim * [sharding_choices]) + # ) + return sharding_choices + + def __iter__(self) -> "DTensorConverter": + return self + + def __next__(self) -> Tuple[Tuple[object, ...], Dict[str, object]]: + try: + next_sharding_choices = next(self.sharding_combs) + idx = 0 + + new_args: List[object] = [] + for arg in self.flatten_args: + if isinstance(arg, torch.Tensor): + new_args.append( + self.to_dist_tensor( + arg, self.mesh, [next_sharding_choices[idx]] + ) + ) + idx += 1 + else: + new_args.append(arg) + + new_kwargs: List[object] = [] + for arg in self.flatten_kwargs: + if isinstance(arg, torch.Tensor): + new_kwargs.append( + self.to_dist_tensor( + arg, self.mesh, [next_sharding_choices[idx]] + ) + ) + idx += 1 + else: + new_kwargs.append(arg) + + return ( + tree_unflatten(new_args, self.flatten_args_spec), + tree_unflatten(new_kwargs, self.flatten_kwargs_spec), + ) + except StopIteration as e: + raise StopIteration from e + + def to_dist_tensor( + self, t: torch.Tensor, mesh: DeviceMesh, placements: List[Placement] + ) -> torch.Tensor: + if type(t) is torch.Tensor or type(t) is nn.Parameter: + if self.is_supported_tensor(t): + self.hit += 1 + if t.ndim == 0: + # scalar tensor by default will be replicated + r = distribute_tensor(t, mesh, [Replicate()] * mesh.ndim) + else: + # distribute non-scalar tensors + r = distribute_tensor(t, mesh, placements) + if type(t) is nn.Parameter: + r = nn.Parameter( # type: ignore[assignment] + r, requires_grad=r.requires_grad + ) + return r + else: + self.miss += 1 + return t + elif torch.overrides.is_tensor_like(t): + # Blindly converting tensor subclasses to dist tensor can cause + # unpredictable problems, we explicitly disable this conversion + # for now (i.e. we don't support DTensor holding tensor subclass + # until there's a strong reason later). + self.miss += 1 + return t + else: + raise RuntimeError(f"Trying to convert to DTensor, but got {type(t)}") diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..c2ab9af9f197e86bd182ee6ad1b6d02970e8f9a0 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py @@ -0,0 +1,122 @@ +# mypy: allow-untyped-defs + +# Owner(s): ["oncall: distributed"] + +import copy +from itertools import chain +from typing import Any, Dict + +import torch +import torch.nn as nn +from torch.distributed._sharded_tensor import ShardedTensor +from torch.distributed._state_dict_utils import _gather_state_dict +from torch.distributed._tensor import DTensor +from torch.distributed.checkpoint.state_dict import ( + _PG, + _STATE, + set_state_dict, + StateDictOptions, +) + + +class VerifyStateDictMixin: + def _compare_tensor(self, orig_tensor, dist_tensor, offload_to_cpu=False): + if isinstance(dist_tensor, (DTensor, ShardedTensor)): + dist_tensor = _gather_state_dict({"mykey": dist_tensor}).pop("mykey") + + if offload_to_cpu: + orig_tensor = orig_tensor.cpu() + dist_tensor = dist_tensor.cpu() + self.assertTrue(isinstance(dist_tensor, torch.Tensor)) + self.assertTrue(torch.allclose(orig_tensor, dist_tensor)) + + def _verify_msd( + self, + msd: Dict[str, Any], + dist_msd: Dict[str, Any], + options: StateDictOptions = StateDictOptions(), + offload_to_cpu=False, + ) -> None: + if not options.ignore_frozen_params: + self.assertEqual(len(msd), len(dist_msd)) + for fqn, param in msd.items(): + dist_param = dist_msd.get(fqn, None) + if not options.ignore_frozen_params: + self.assertIsNotNone(dist_param, f"{fqn=}") + try: + self._compare_tensor(param, dist_param, offload_to_cpu) + except AssertionError as e: + raise AssertionError( + f"{fqn} has mismatched value {param} {dist_param}" + ) from e + elif dist_param is None: + self.assertFalse(param.requires_grad, f"{fqn=}") + + def _verify_osd( + self, + model: nn.Module, + optim: torch.optim.Optimizer, + osd: Dict[str, Any], + dist_osd: Dict[str, Any], + ) -> None: + params = list(chain.from_iterable(g["params"] for g in optim.param_groups)) + param_pid_mapping = dict(zip(params, range(len(params)))) + fqn_pid_mapping = {} + for fqn, param in model.named_parameters(): + pid = param_pid_mapping[param] + fqn_pid_mapping[fqn] = pid + fqn_pid_mapping[pid] = fqn + # Check optimizer_state_dict state + + self.assertEqual(len(osd[_STATE]), len(dist_osd[_STATE])) + for pid, states in osd[_STATE].items(): + fqn = fqn_pid_mapping[pid] + dist_states = dist_osd[_STATE].get(fqn, None) + self.assertIsNotNone(dist_states, fqn) + self.assertEqual(len(states), len(dist_states)) + for key, state in states.items(): + dist_state = states.get(key, None) + self.assertIsNotNone(dist_state) + self._compare_tensor(state, dist_state) + + # Check optimizer_state_dict param_group + old_dist_osd_pg = dist_osd[_PG] + if len(osd[_PG]) != len(dist_osd[_PG]): + self.assertTrue(len(dist_osd[_PG]) > len(osd[_PG])) + new_pg = copy.deepcopy(dist_osd[_PG][0]) + new_pg["params"] = [] + for dist_group in dist_osd[_PG]: + new_pg["params"].extend(dist_group["params"]) + dist_osd[_PG] = [new_pg] + + self.assertEqual(len(osd[_PG]), len(dist_osd[_PG])) + for group, dist_group in zip(osd[_PG], dist_osd[_PG]): + self.assertEqual(len(group), len(dist_group)) + for key, value in group.items(): + # Below doesn't work because param_groups can have None + # values. + # dist_value = dist_group.get(key, None) + # self.assertIsNotNone(dist_value, (dist_group, group)) + dist_value = dist_group[key] + if key == "params": + fqns = [fqn_pid_mapping[pid] for pid in value] + self.assertEqual(sorted(fqns), sorted(dist_value)) + else: + self.assertEqual(value, dist_value) + dist_osd[_PG] = old_dist_osd_pg + + def _verify_osd_by_load( + self, + model: nn.Module, + optim: torch.optim.Optimizer, + new_optim: torch.optim.Optimizer, + dist_osd: Dict[str, Any], + ) -> None: + new_dist_osd = _gather_state_dict(dist_osd) + set_state_dict( + model, + optimizers=new_optim, + model_state_dict={}, + optim_state_dict=new_dist_osd, + ) + self.assertEqual(optim.state_dict(), new_optim.state_dict()) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__pycache__/__init__.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4efc97f1dcb786143c5cbd3bb0967db7b7567b95 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/remote_module_test.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/remote_module_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19803f4500e56b38db0324b3ac40b339decae0d1 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/remote_module_test.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/remote_module_test.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/remote_module_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ee3a374c745df8175bf515626896a48c1b2b78d4 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/remote_module_test.py @@ -0,0 +1,734 @@ +# mypy: allow-untyped-defs + +import enum +from typing import Tuple + +import torch +import torch.distributed.rpc as rpc +import torch.testing._internal.dist_utils as dist_utils +from torch import Tensor, nn +from torch._jit_internal import Future +from torch.distributed.nn import RemoteModule +from torch.distributed.nn.api.remote_module import _REMOTE_MODULE_PICKLED_ATTRIBUTES +from torch.distributed.nn.api.remote_module import _RemoteModule +from torch.testing._internal.common_distributed import skip_if_lt_x_gpu +from torch.testing._internal.common_utils import TemporaryFileName +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + + +_PARAM_VAL = torch.nn.Parameter(torch.ones(1)) + + +# RPC handler for querying the device on the destination worker. +def remote_device(module_rref): + for param in module_rref.local_value().parameters(): + return param.device + + +# RPC handler for querying __dict__ on the destination worker. +def remote_module_attributes(remote_module): + return remote_module.__dict__ + + +# RPC handler for running forward on the destination worker. +def remote_forward(remote_module, args): + return remote_module.forward(*args) + +# RPC handler for running forward_async on the destination worker. +def remote_forward_async(remote_module, args): + # Since future cannot be pickled and sent over the RPC layer, + # have to wait and behave just like ``forward_sync``. + return remote_module.forward_async(*args).wait() + +# RPC handler for getting training mode on the destination worker. +def get_remote_training_arg(module_rref): + return module_rref.local_value().training + +class ModuleCreationMode(enum.Enum): + MODULE_CTOR_WITH_INTERFACE = "module_ctor_with_interface" + MODULE_CTOR = "module_ctor" + + +@torch.jit.interface +class MyModuleInterface: + def forward( + self, tensor: Tensor, number: int, word: str = "default" + ) -> Tuple[str, int, Tensor]: + # pyre-ignore[7]: Pyre and torch.jit.interface don't mix well + pass + + +@torch.jit.interface +class RemoteMyModuleInterface: + def forward( + self, tensor: Tensor, number: int, word: str = "default" + ) -> Tuple[str, int, Tensor]: + # pyre-ignore[7]: Pyre and torch.jit.interface don't mix well + pass + + def forward_async( + self, tensor: Tensor, number: int, word: str = "default" + ) -> Future[Tuple[str, int, Tensor]]: + pass + + +class MyModule(nn.Module): + def __init__(self, first_arg, first_kwarg=-1): + super().__init__() + self.param1 = _PARAM_VAL + + def forward( + self, tensor: Tensor, number: int, word: str = "default" + ) -> Tuple[str, int, Tensor]: + return word, number, tensor + + +class BadModule: + def __init__(self, first_arg, first_kwarg=-1): + pass + + +def create_scripted_module(first_arg, first_kwarg=-1): + module = MyModule(first_arg, first_kwarg=first_kwarg) + scripted_module = torch.jit.script(module) + return scripted_module + + +# Common utils for both CPU and CUDA test suites +class CommonRemoteModuleTest(RpcAgentTestFixture): + @property + def world_size(self): # Override setting in RpcAgentTestFixture + return 2 + + @staticmethod + def _create_remote_module_iter(remote_device, modes=None): + if modes is None: + modes = ModuleCreationMode.__members__.values() + + args = (1,) + kwargs = dict(first_kwarg=2) + + if ModuleCreationMode.MODULE_CTOR in modes: + remote_module = RemoteModule(remote_device, MyModule, args, kwargs) + yield remote_module + + if ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE in modes: + remote_module = _RemoteModule( + remote_device, + create_scripted_module, + args, + kwargs, + _module_interface_cls=MyModuleInterface, + ) + scripted_remote_module = torch.jit.script(remote_module) + yield scripted_remote_module + + +class RemoteModuleTest(CommonRemoteModuleTest): + @dist_utils.dist_init + def test_bad_module(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + remote_device = f"{dst_worker_name}/cpu" + args = (1,) + kwargs = dict(first_kwarg=2) + + with self.assertRaisesRegex( + ValueError, + r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of ,", + ): + RemoteModule(remote_device, BadModule, args, kwargs).forward() + + with self.assertRaisesRegex( + ValueError, + r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of ,", + ): + RemoteModule(remote_device, BadModule, args, kwargs).forward() + + + @dist_utils.dist_init + def test_forward_async(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + args = (torch.ones(1), 2, "3") + for remote_module in self._create_remote_module_iter(dst_worker_name): + ret_fut = remote_module.forward_async(*args) + ret = ret_fut.wait() + self.assertEqual(ret, tuple(reversed(args))) + + @dist_utils.dist_init + def test_forward_async_script(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + scripted_remote_module = next( + self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE] + ) + ) + + @torch.jit.script + def run_forward_async(scripted_remote_module: RemoteMyModuleInterface): + ret_fut = scripted_remote_module.forward_async(torch.ones(1), 2, "3") + ret = ret_fut.wait() + return ret + + ret = run_forward_async(scripted_remote_module) + + self.assertEqual(ret, ("3", 2, torch.ones(1))) + + @dist_utils.dist_init + def test_forward_sync(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + args = (torch.ones(1), 2, "3") + for remote_module in self._create_remote_module_iter(dst_worker_name): + ret = remote_module.forward(*args) + self.assertEqual(ret, tuple(reversed(args))) + + @dist_utils.dist_init + def test_forward_sync_script(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + scripted_remote_module = next( + self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE] + ) + ) + + @torch.jit.script + def run_forward(scripted_remote_module: MyModuleInterface): + ret = scripted_remote_module.forward(torch.ones(1), 2, "3") + return ret + + ret = run_forward(scripted_remote_module) + + self.assertEqual(ret, ("3", 2, torch.ones(1))) + + @dist_utils.dist_init + def test_forward_with_kwargs(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + args = (torch.ones(1), 2) + kwargs = dict(word="3") + # Only test Python nn.Module, because script module methods don't support taking kwargs. + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + ret_fut = remote_module.forward_async(*args, **kwargs) + ret = ret_fut.wait() + self.assertEqual(ret, tuple(reversed(args + ("3",)))) + + ret = remote_module.forward(*args, **kwargs) + self.assertEqual(ret, tuple(reversed(args + ("3",)))) + + @dist_utils.dist_init + def test_remote_parameters(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + # Only test Python nn.Module, because script module methods don't support ``remote_parameters``. + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + param_rrefs = remote_module.remote_parameters() + self.assertEqual(len(param_rrefs), 1) + self.assertTrue(torch.equal(param_rrefs[0].to_here(), _PARAM_VAL)) + + @dist_utils.dist_init + def test_get_module_rref(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + # Only test Python nn.Module, because script module methods don't support ``get_module_rref``. + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + rref = remote_module.get_module_rref() + self.assertEqual(rref, remote_module.module_rref) + for param in rref.to_here().parameters(): + self.assertTrue(torch.equal(param, _PARAM_VAL)) + + @dist_utils.dist_init + def test_train_eval(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + remote_module.train() + ret1 = rpc.rpc_sync(dst_worker_name, get_remote_training_arg, args=(remote_module.get_module_rref(),)) + self.assertEqual(ret1, True) + + remote_module.eval() + ret2 = rpc.rpc_sync(dst_worker_name, get_remote_training_arg, args=(remote_module.get_module_rref(),)) + self.assertEqual(ret2, False) + + @dist_utils.dist_init + def test_unsupported_methods(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + with self.assertRaisesRegex( + ValueError, r"Method ``register_buffer`` not supported for RemoteModule" + ): + remote_module.register_buffer("buffer", torch.ones(5)) + with self.assertRaisesRegex( + ValueError, + r"Method ``register_parameter`` not supported for RemoteModule", + ): + remote_module.register_parameter( + "param", torch.nn.Parameter(torch.ones(1)) + ) + with self.assertRaisesRegex( + ValueError, r"Method ``add_module`` not supported for RemoteModule" + ): + remote_module.add_module("empty", None) + + with self.assertRaisesRegex( + ValueError, r"Method ``apply`` not supported for RemoteModule" + ): + fn = torch.rand((3, 3), requires_grad=False) + remote_module.apply(fn) + + with self.assertRaisesRegex( + ValueError, r"Method ``cuda`` not supported for RemoteModule" + ): + remote_module.cuda() + with self.assertRaisesRegex( + ValueError, r"Method ``cpu`` not supported for RemoteModule" + ): + remote_module.cpu() + with self.assertRaisesRegex( + ValueError, r"Method ``type`` not supported for RemoteModule" + ): + remote_module.type(torch.FloatTensor) + with self.assertRaisesRegex( + ValueError, r"Method ``float`` not supported for RemoteModule" + ): + remote_module.float() + with self.assertRaisesRegex( + ValueError, r"Method ``double`` not supported for RemoteModule" + ): + remote_module.double() + with self.assertRaisesRegex( + ValueError, r"Method ``bfloat16`` not supported for RemoteModule" + ): + remote_module.bfloat16() + with self.assertRaisesRegex( + ValueError, r"Method ``to`` not supported for RemoteModule" + ): + remote_module.to("cpu", dtype=torch.int32) + + def hook(module, grad_input, grad_output): + pass + + with self.assertRaisesRegex( + ValueError, + r"Method ``register_backward_hook`` not supported for RemoteModule", + ): + remote_module.register_backward_hook(hook) + with self.assertRaisesRegex( + ValueError, + r"Method ``register_forward_pre_hook`` not supported for RemoteModule", + ): + remote_module.register_forward_pre_hook(hook) + with self.assertRaisesRegex( + ValueError, + r"Method ``register_forward_hook`` not supported for RemoteModule", + ): + remote_module.register_forward_hook(hook) + + with self.assertRaisesRegex( + ValueError, r"Method ``state_dict`` not supported for RemoteModule" + ): + remote_module.state_dict() + with self.assertRaisesRegex( + ValueError, r"Method ``load_state_dict`` not supported for RemoteModule" + ): + remote_module.load_state_dict({}) + + with self.assertRaisesRegex( + ValueError, + r"Method ``parameters`` not supported for RemoteModule. Please use ``remote_parameters`` instead.", + ): + remote_module.parameters() + with self.assertRaisesRegex( + ValueError, + r"Method ``named_parameters`` not supported for RemoteModule", + ): + remote_module.named_parameters() + with self.assertRaisesRegex( + ValueError, r"Method ``buffers`` not supported for RemoteModule" + ): + remote_module.buffers() + with self.assertRaisesRegex( + ValueError, r"Method ``named_buffers`` not supported for RemoteModule" + ): + remote_module.named_buffers() + with self.assertRaisesRegex( + ValueError, r"Method ``children`` not supported for RemoteModule" + ): + remote_module.children() + with self.assertRaisesRegex( + ValueError, r"Method ``named_children`` not supported for RemoteModule" + ): + remote_module.named_children() + with self.assertRaisesRegex( + ValueError, r"Method ``modules`` not supported for RemoteModule" + ): + remote_module.modules() + with self.assertRaisesRegex( + ValueError, r"Method ``named_modules`` not supported for RemoteModule" + ): + remote_module.named_modules() + + with self.assertRaisesRegex( + ValueError, r"Method ``requires_grad_`` not supported for RemoteModule" + ): + remote_module.requires_grad_() + with self.assertRaisesRegex( + ValueError, r"Method ``zero_grad`` not supported for RemoteModule" + ): + remote_module.zero_grad() + with self.assertRaisesRegex( + ValueError, r"Method ``share_memory`` not supported for RemoteModule" + ): + remote_module.share_memory() + with self.assertRaisesRegex( + ValueError, r"Method ``extra_repr`` not supported for RemoteModule" + ): + remote_module.extra_repr() + + @dist_utils.dist_init + def test_send_remote_module_with_a_new_attribute_not_pickled_over_the_wire(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + # If a new attribute is added to this RemoteModule after the initialization, + # and it will be sent over the wire by RPC, + # this new field will not be pickled, because it's not specified in _REMOTE_MODULE_PICKLED_ATTRIBUTES. + # Note that adding a new attribute out of constructor should rarely happen. + # If a new attribute is added to RemoteModule constructor, + # there is a sanity check to enforce developers to add this attribute to either + # _REMOTE_MODULE_PICKLED_ATTRIBUTES or _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING. + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + new_attr_name = "new_attr" + setattr(remote_module, new_attr_name, 1) + + attrs = rpc.rpc_sync( + dst_worker_name, remote_module_attributes, (remote_module,) + ) + self.assertNotIn(new_attr_name, attrs) + + @dist_utils.dist_init + def test_remote_module_py_pickle_not_supported(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + with TemporaryFileName() as fname: + with self.assertRaisesRegex( + RuntimeError, + "Cannot pickle RemoteModule in python pickler. RemoteModule can only be pickled when using RPC", + ): + torch.save(remote_module, fname) + + @dist_utils.dist_init + def test_remote_module_py_pickle_not_supported_script(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE] + ): + with TemporaryFileName() as fname: + with self.assertRaisesRegex(torch.jit.Error, "can only be pickled when using RPC"): + torch.save(remote_module, fname) + + +class ThreeWorkersRemoteModuleTest(CommonRemoteModuleTest): + @property + def world_size(self): # Override setting in CommonRemoteModuleTest + return 3 + + @dist_utils.dist_init + def test_send_remote_module_over_the_wire(self): + if self.rank != 0: + return + dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size) + + # Unpickled attributes include both the inherent attributes of RemoteModule + # (not inherited from the superclass) and two installed methods. + expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES) + expected_unpickled_attrs.append("forward_async") + expected_unpickled_attrs.append("forward") + + # Create a remote module on worker1 and then pass it to worker2 over the RPC layer. + for remote_module in self._create_remote_module_iter( + dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + # Test querying some simple attributes from worker2. + attrs = rpc.rpc_sync( + dst_worker2_name, remote_module_attributes, (remote_module,) + ) + self.assertListEqual(list(attrs.keys()), expected_unpickled_attrs) + self.assertEqual(attrs["on"], "worker1") + self.assertEqual(attrs["device"], "cpu") + self.assertFalse(attrs["is_device_map_set"]) + self.assertFalse(attrs["is_scriptable"]) + + # Test the installed methods on worker1's can be initiated by worker2 over RPC layer. + # NOTE: In practice a remote module should be directly stored on the worker that runs ``forward``` or ``forward_async``, + # not have another worker to initiate forward over the RPC layer. + args = (torch.ones(1), 2, "3") + ret1 = rpc.rpc_sync(dst_worker2_name, remote_forward, (remote_module, args)) + self.assertEqual(ret1, tuple(reversed(args))) + ret2 = rpc.rpc_sync( + dst_worker2_name, remote_forward_async, (remote_module, args) + ) + self.assertEqual(ret2, tuple(reversed(args))) + + @dist_utils.dist_init + def test_send_remote_module_over_the_wire_script_not_supported(self): + if self.rank != 0: + return + dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size) + + # Unpickled attributes include both the inherent attributes of RemoteModule + # (not inherited from the superclass) and two installed methods. + expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES) + expected_unpickled_attrs.append("forward_async") + expected_unpickled_attrs.append("forward") + + with self.assertRaisesRegex( + RuntimeError, "Passing a script RemoteModule over RPC is not supported." + ): + # Create a remote module on worker1 and then pass it to worker2 over the RPC layer. + for remote_module in self._create_remote_module_iter( + dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE] + ): + # Test querying some simple attributes from worker2. + attrs = rpc.rpc_sync( + dst_worker2_name, remote_module_attributes, (remote_module,) + ) + + @dist_utils.dist_init + def test_create_remote_module_from_module_rref(self): + if self.rank != 0: + return + dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size) + + # Create a remote module on worker1 and then pass its `module_rref` to worker2 over the RPC layer. + for remote_module in self._create_remote_module_iter( + dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + remote_module2 = rpc.rpc_sync( + dst_worker2_name, + RemoteModule.init_from_module_rref, + (dst_worker2_name, remote_module.get_module_rref()), + ) + + args = (torch.ones(1), 2, "3") + ret1 = rpc.rpc_sync( + dst_worker1_name, remote_forward, (remote_module, args) + ) + ret2 = rpc.rpc_sync( + dst_worker2_name, remote_forward, (remote_module2, args) + ) + self.assertEqual(ret2, ret2) + + +class CudaRemoteModuleTest(CommonRemoteModuleTest): + @skip_if_lt_x_gpu(1) + @dist_utils.dist_init + def test_valid_device(self): + if self.rank != 0: + return + dst_rank = (self.rank + 1) % self.world_size + dst_worker_name = dist_utils.worker_name(dst_rank) + + for remote_module in self._create_remote_module_iter( + f"{dst_worker_name}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR] + ): + device = rpc.rpc_sync( + dst_worker_name, remote_device, (remote_module.module_rref,) + ) + self.assertEqual(device.type, "cuda") + self.assertEqual(device.index, 0) + + # Test rank works as well. + for remote_module in self._create_remote_module_iter( + f"rank:{dst_rank}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR] + ): + device = rpc.rpc_sync( + dst_worker_name, remote_device, (remote_module.module_rref,) + ) + self.assertEqual(device.type, "cuda") + self.assertEqual(device.index, 0) + + @skip_if_lt_x_gpu(1) + @dist_utils.dist_init + def test_invalid_devices(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + with self.assertRaisesRegex( + RuntimeError, + r"Expected one of .+ device type at start of device string", + ): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/foo", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex( + RuntimeError, r"CUDA error: invalid device ordinal" + ): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/cuda:100", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex(RuntimeError, r"Invalid device string: 'cpu2'"): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/cpu2", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex(RuntimeError, r"Device string must not be empty"): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex( + ValueError, + r"Could not parse remote_device: worker1/cuda:0/cuda:1. The valid format is '/'", + ): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/cuda:0/cuda:1", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex( + ValueError, + r"Could not parse remote_device: /. The valid format is '/'", + ): + [ + m.forward() + for m in self._create_remote_module_iter( + "/", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex( + ValueError, + r"Could not parse remote_device: /cuda:0. The valid format is '/'", + ): + [ + m.forward() + for m in self._create_remote_module_iter( + "/cuda:0", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + @skip_if_lt_x_gpu(1) + @dist_utils.dist_init + def test_input_moved_to_cuda_device(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + # These two CPU tensors (in args and kwargs) should be implicitly moved to an appropriate cuda device. + t1 = torch.ones(1) + args = (t1, 2) + t2 = t1 * 2 + kwargs = dict(word=t2) + + # Only test Python nn.Module, because script module methods don't support taking kwargs. + for remote_module in self._create_remote_module_iter( + f"{dst_worker_name}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR] + ): + ret_fut = remote_module.forward_async(*args, **kwargs) + ret = ret_fut.wait() + self.assertEqual(ret, tuple(reversed(args + (t2,)))) + # TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0". + self.assertEqual(ret[0].device.type, "cpu") + self.assertEqual(ret[2].device.type, "cpu") + + ret = remote_module.forward(*args, **kwargs) + self.assertEqual(ret, tuple(reversed(args + (t2,)))) + # TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0". + self.assertEqual(ret[0].device.type, "cpu") + self.assertEqual(ret[2].device.type, "cpu") + + @skip_if_lt_x_gpu(1) + @dist_utils.dist_init + def test_input_moved_to_cuda_device_script(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + scripted_remote_module = next( + self._create_remote_module_iter( + f"{dst_worker_name}/cuda:0", + modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE], + ) + ) + + @torch.jit.script + def run_forward(scripted_remote_module: MyModuleInterface): + ret = scripted_remote_module.forward(torch.ones(1), 2, "3") + return ret + + ret = run_forward(scripted_remote_module) + + self.assertEqual(ret, ("3", 2, torch.ones(1))) + # TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0". + self.assertEqual(ret[2].device.type, "cpu") diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_autograd_test.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_autograd_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aad2009af01c0fb428a7d3d4e7939c5eb7e1b99b Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_autograd_test.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_optimizer_test.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_optimizer_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8a06f6c6cdf21c25bf31f1e0145aabfd81fdb35 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_optimizer_test.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/faulty_agent_rpc_test.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/faulty_agent_rpc_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f7d986bfe7e565120f857be6305321920362313 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/faulty_agent_rpc_test.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_autograd_test.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_autograd_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0a6b9a843b6294f316ebd91180c8ecb9ddf69524 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_autograd_test.py @@ -0,0 +1,2783 @@ +# mypy: allow-untyped-defs + +import sys +import threading +import time +from enum import Enum +import random +import torch +import torch.nn as nn +from datetime import timedelta +import torch.distributed as dist +import torch.distributed.autograd as dist_autograd +import torch.distributed.rpc as rpc +import torch.testing._internal.dist_utils +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.distributed.rpc import RRef +from torch.testing._internal.common_utils import IS_MACOS, skip_but_pass_in_sandcastle_if +from torch.testing._internal.dist_utils import ( + dist_init, + initialize_pg, + wait_until_node_failure, + worker_name, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) +from torch.testing._internal.common_distributed import skip_if_lt_x_gpu + + +# Right now we test up to 3-layer nested rpc calls. +# rpc_done[1] and ctx_ids[1] represent rpc is done in prev rank, and context id +# sent from prev rank respectively. +# rpc_done[2] and ctx_ids[2] represents for prev of prev rank. +# rpc_done[3] and ctx_ids[3] represents for prev of prev of prev rank. +# rpc_done[0] and ctx_ids[0] represents for current rank, but mostly not used. +rpc_done = [False, False, False, False] +ctx_ids = [-1, -1, -1, -1] + +known_context_ids = set() + +requires_grad_tensor = torch.ones(3, 3, requires_grad=True) + +# Send rpc done info and context_id to +# dst_rank = (self.rank + rank_distance) % self.world_size +# we don't need a lock here since the GIL is held while executing remote +# python UDFs, so access is serialized across several workers. +def _set_rpc_done(ctx_id, rank_distance): + global rpc_done + global ctx_ids + global known_context_ids + rpc_done[rank_distance] = True + ctx_ids[rank_distance] = ctx_id + known_context_ids.add(ctx_id) + + +def _check_rpc_done(rank_distance): + while not rpc_done[rank_distance]: + time.sleep(0.1) + + +def _torch_ones(sizes, requires_grad=False): + return torch.ones(sizes, requires_grad=requires_grad) + +# This method must be called on the rref owner, and verifies that the grad of +# rref tensor equals to the given grad. +def _compare_owner_value(context_id, rref, grad): + grads = dist_autograd.get_gradients(context_id) + x = grads[rref.local_value()] + if x.is_sparse: + assert grad.is_sparse + x = x.to_dense() + grad = grad.to_dense() + else: + assert not grad.is_sparse + return torch.equal(x, grad) + + +def create_tensor(): + return torch.ones((3, 3), requires_grad=True) + + +def build_sparse_tensor(coalesce=False, requires_grad=True, dtype=torch.float32): + i = [[0, 1, 1], [2, 0, 2]] + v = [3.2, 4.1, 5.3] + tensor = torch.sparse_coo_tensor( + i, v, (3, 3), requires_grad=requires_grad, dtype=dtype + ) + if coalesce: + tensor = tensor.coalesce() + return tensor + + +@torch.jit.script +def create_torchscript_tensor() -> torch.Tensor: + return torch.ones((3, 3)).requires_grad_() + + +def my_py_add(t1, t2): + return torch.add(t1, t2) + + +def my_scalar_add(a, b): + return a + b + + +def my_rref_add(rref_t1, t2): + ret = torch.add(rref_t1.local_value(), t2) + return ret + + +@torch.jit.script +def my_script_add(t1, t2): + return torch.add(t1, t2) + + +@torch.jit.script +def my_script_ref_add(ref_t1: RRef[torch.Tensor], t2: torch.Tensor) -> torch.Tensor: + t1 = ref_t1.to_here() + return torch.add(t1, t2) + + +def my_nested_rref_add(dst, rref_t1, t2): + return rpc.rpc_sync(dst, my_rref_add, args=(rref_t1, t2)) + + +def ret_requires_grad(): + return requires_grad_tensor + + +def my_py_nested_call(t1, t2, dst, world_size, hops): + next_dst = (dst + 1) % world_size + if hops > 0: + return rpc.rpc_sync( + worker_name(next_dst), + my_py_nested_call, + args=(t1, t2, next_dst, world_size, hops - 1), + ) + else: + return rpc.rpc_sync(worker_name(next_dst), my_py_add, args=(t1, t2)) + + +# after dist autograd context is cleaned up, it should be cleaned up on other +# nodes. This helper allows timeout_seconds for those RPCs to be completed, and +# ensures that all the contexts have been cleaned up in that timeframe.any +def _all_contexts_cleaned_up(timeout_seconds=10): + global known_context_ids + start = time.time() + context_id_to_raised = set() + while ( + time.time() - start < timeout_seconds + and context_id_to_raised != known_context_ids + ): + for context_id in known_context_ids: + try: + dist_autograd._retrieve_context(context_id) + except RuntimeError: + context_id_to_raised.add(context_id) + # all contexts have been cleaned up if trying to retrieve any context resulted in a RuntimeError. + success = context_id_to_raised == known_context_ids + return success + + +# This function creates a dis autograd context, run rpc_sync on the given ps, +# and then blocks until the ps has verified the grads are correctly accumulated. +def _run_trainer(rref_t1, t2, ps, rank_diff, sparse): + with dist_autograd.context() as context_id: + ret = rpc.rpc_sync(ps, my_rref_add, args=(rref_t1, t2)) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + dist_autograd.backward(context_id, [loss]) + # prevent deleting dist autograd context + rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff)) + rpc.rpc_sync(ps, _check_rpc_done, args=(0,)) + +# This function is the same as _run_trainer, except rpc calls torchscript +# function "my_script_ref_add" instead of python function "my_rref_add" +def _run_trainer_torchscript(rref_t1, t2, ps, rank_diff, sparse): + with dist_autograd.context() as context_id: + ret = rpc.rpc_sync(ps, my_script_ref_add, args=(rref_t1, t2)) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + dist_autograd.backward(context_id, [loss]) + # prevent deleting dist autograd context + rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff)) + rpc.rpc_sync(ps, _check_rpc_done, args=(0,)) + + +class SimulateBackwardError(Function): + _simulate_error = True + + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + @once_differentiable + def backward(ctx, input): + if SimulateBackwardError._simulate_error: + raise Exception("Simulate error on backward pass") # noqa: TRY002 + else: + return input + + +class ExecMode(Enum): + LOCAL = 1 # Run the operation locally. + RPC_SYNC = 2 # Run the operation using rpc_sync + REMOTE = 3 # Run the operation using remote. + RPC_ASYNC = 4 # Run the operation using rpc_async + + +# Common utils for both CPU and CUDA test suites +class CommonDistAutogradTest(RpcAgentTestFixture): + def _exec_func_with_dst(self, dst, exec_mode, method, *args): + if ExecMode.LOCAL == exec_mode: + if len(args) == 1 and isinstance(args[0], list): + return method(*args[0]) + return method(*args) + elif ExecMode.RPC_SYNC == exec_mode: + return rpc.rpc_sync(worker_name(dst), method, args=(args)) + elif ExecMode.REMOTE == exec_mode: + return rpc.remote(worker_name(dst), method, args=(args)).to_here() + elif ExecMode.RPC_ASYNC == exec_mode: + fut = rpc.rpc_async(worker_name(dst), method, args=(args)) + return fut.wait() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + def _exec_func(self, exec_mode, method, *args): + return self._exec_func_with_dst( + self._next_rank(), exec_mode, method, *args + ) + + def _next_rank(self): + if hasattr(self, "dst_rank"): + self.dst_rank = (self.dst_rank + 1) % self.world_size + if self.dst_rank == self.rank: + return self._next_rank() + else: + self.dst_rank = (self.rank + 1) % self.world_size + return self.dst_rank + + def _check_rpc_done(self, rank_distance): + _check_rpc_done(rank_distance) + + def _verify_backwards(self, exec_mode, tensors, context_id, local_grads, *args): + if exec_mode == ExecMode.LOCAL: + torch.autograd.backward(tensors) + return [arg.grad for arg in args] + else: + self._verify_backwards_remote(tensors, context_id, local_grads, *args) + + def _verify_backwards_remote(self, tensors, context_id, local_grads, *args): + dist_autograd.backward(context_id, tensors) + + # Verify grads were accumulated appropriately. + grads = dist_autograd.get_gradients(context_id) + nargs = len(args) + ngrads = 0 + for i in range(0, nargs): + if local_grads[i] is not None: + self.assertIn(args[i], grads) + self.assertEqual(local_grads[i], grads[args[i]]) + ngrads += 1 + else: + self.assertNotIn(args[i], grads) + + self.assertEqual(ngrads, len(grads)) + + def _test_graph(self, fn, exec_mode, sparse): + dst_rank = (self.rank + 1) % self.world_size + + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + if sparse: + t1 = build_sparse_tensor() + t2 = build_sparse_tensor() + else: + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync(worker_name(dst_rank), fn, args=(t1, t2)) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), fn, args=(t1, t2) + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + + # Verify graph for current context id. + ctx = dist_autograd._current_context() + self.assertEqual(context_id, ctx._context_id()) + send_functions = ctx._send_functions() + self.assertEqual(1, len(send_functions)) + recv_functions = ctx._recv_functions() + self.assertEqual(1, len(recv_functions)) + self._verify_graph_for_first_rpc_call( + next(iter(send_functions.values())), + next(iter(recv_functions.values())), + t1, + t2, + ret, + ) + + # Wait for the prev rank to be done with rpc. + self._check_rpc_done(1) + # Verify graph for previous context id. + ctx = dist_autograd._retrieve_context(ctx_ids[1]) + send_functions = ctx._send_functions() + self.assertEqual(1, len(send_functions)) + self._verify_graph_for_rpc_call_exec(next(iter(send_functions.values()))) + # this barrier is needed so one worker does not clean up their + # autograd context before another worker tries to access it. + dist.barrier() + + # autograd context should be cleaned up by now. + with self.assertRaises(RuntimeError): + ctx = dist_autograd._retrieve_context(context_id) + + # No autograd context available. + with self.assertRaises(RuntimeError): + ctx = dist_autograd._current_context() + + # 3-layer nested calls + def _test_graph_for_py_nested_call(self, exec_mode, sparse): + dst_rank = (self.rank + 1) % self.world_size + + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + if sparse: + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + else: + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + nest_dst_rank = (dst_rank + 1) % self.world_size + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync( + worker_name(dst_rank), + my_py_nested_call, + args=(t1, t2, dst_rank, self.world_size, 1), + ) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), + my_py_nested_call, + args=(t1, t2, dst_rank, self.world_size, 1), + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + # Barrier to ensure all RPCs are done. + dist.barrier() + + for rd in [1, 2, 3]: + rpc.rpc_sync( + worker_name((self.rank + rd) % self.world_size), + _set_rpc_done, + args=(context_id, rd), + ) + + # Barrier to ensure all set_rpc_done have completed. + dist.barrier() + + # For self.rank, it has 4 graphs to verify + # One is for current context id when this rank send first rpc call. + # Second one is for prev context id when this rank make 1st nested + # call. + # Third one is for prev prev context id when this rank make + # 2nd nested call. + # Last one is for prev prev prev context id when this rank + # execute the torch.add() operator. + + # Verify first graph for current context id. + ctx = dist_autograd._current_context() + self.assertEqual(context_id, ctx._context_id()) + send_functions = ctx._send_functions() + self.assertEqual(1, len(send_functions)) + recv_functions = ctx._recv_functions() + self.assertEqual(1, len(recv_functions)) + self._verify_graph_for_first_rpc_call( + next(iter(send_functions.values())), + next(iter(recv_functions.values())), + t1, + t2, + ret, + ) + + # Verify second graph for 1st nested call. + ctx = dist_autograd._retrieve_context(ctx_ids[1]) + self._verify_graph_for_nested_rpc_call(ctx) + + # Verify third graph for 2nd nested call. + ctx = dist_autograd._retrieve_context(ctx_ids[2]) + self._verify_graph_for_nested_rpc_call(ctx) + + # verify last graph for rpc call execution. + ctx = dist_autograd._retrieve_context(ctx_ids[3]) + send_functions = ctx._send_functions() + self.assertEqual(1, len(send_functions)) + self._verify_graph_for_rpc_call_exec(next(iter(send_functions.values()))) + # this barrier is needed so one worker does not clean up their + # autograd context before another worker tries to access it. + dist.barrier() + + # Rank0->Rank1->Rank0 + def _test_graph_for_py_nested_call_itself(self, exec_mode, sparse): + dst_rank = (self.rank + 1) % self.world_size + + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + if sparse: + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + else: + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync( + worker_name(dst_rank), + my_py_nested_call, + args=( + t1, + t2, + (self.rank - 1 + self.world_size) % self.world_size, + self.world_size, + 0, + ), + ) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), + my_py_nested_call, + args=( + t1, + t2, + (self.rank - 1 + self.world_size) % self.world_size, + self.world_size, + 0, + ), + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + rpc.rpc_sync( + worker_name((self.rank + 1) % self.world_size), + _set_rpc_done, + args=(context_id, 1), + ) + + # For self.rank, it has 2 graphs to verify. + # One is for current context id when this rank send first rpc + # call and execute the torch.add() operator. + # Another one is for prev context id when this rank make + # nested call. + ctx = dist_autograd._current_context() + self.assertEqual(context_id, ctx._context_id()) + send_functions = ctx._send_functions() + self.assertEqual(2, len(send_functions)) + recv_functions = ctx._recv_functions() + self.assertEqual(2, len(recv_functions)) + self._verify_graph_for_first_rpc_call( + next(iter(send_functions.values())), + list(recv_functions.values())[1], + t1, + t2, + ret, + ) + self._verify_graph_for_rpc_call_exec(list(send_functions.values())[1]) + + # Verify two pairs of send and recv functions for nested + # call + self._check_rpc_done(1) + ctx = dist_autograd._retrieve_context(ctx_ids[1]) + self._verify_graph_for_nested_rpc_call(ctx) + # this barrier is needed so one worker does not clean up their + # autograd context before another worker tries to access it. + dist.barrier() + + def _test_no_graph_with_tensors_not_require_grad(self, exec_mode, sparse): + initialize_pg(self.file_init_method, self.rank, self.world_size) + dst_rank = (self.rank + 1) % self.world_size + with dist_autograd.context() as context_id: + if sparse: + t1 = build_sparse_tensor(requires_grad=False) + t2 = build_sparse_tensor(requires_grad=False) + else: + t1 = torch.ones(3, 3, requires_grad=False) + t2 = torch.zeros(3, 3, requires_grad=False) + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(t1, t2) + ) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), torch.add, args=(t1, t2) + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + + ctx = dist_autograd._current_context() + send_functions = ctx._send_functions() + self.assertEqual(len(send_functions), 0) + recv_functions = ctx._recv_functions() + self.assertEqual(len(recv_functions), 0) + + # Wait for the prev rank to be done with rpc. + self._check_rpc_done(1) + # NB: RRef.to_here() always passes the autograd context to the + # the callee, as the caller does not know whether the return + # value would contain a requires_grad tensor or not. + # + # rpc/remote with udf (_set_rpc_done here) also always passes the + # autograd context to the callee due to the same reason. + self.assertNotEqual(-1, dist_autograd._retrieve_context(ctx_ids[1])) + dist.barrier() + + def _test_rpc_complex_args(self, exec_mode, sparse): + with dist_autograd.context() as context_id: + num_tensors = 10 + tensors = [] + for i in range(num_tensors): + if sparse: + tensor = build_sparse_tensor(requires_grad=(i % 2 == 0)) + else: + tensor = torch.ones(3, 3, requires_grad=(i % 2 == 0)) + tensors.append(tensor) + dst_rank = self._next_rank() + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync( + worker_name(dst_rank), torch.stack, args=(tensors,) + ) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), torch.stack, args=(tensors,) + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + self.assertEqual(torch.stack(tensors), ret) + + # Verify appropriate tensors have been attached the autograd graph. + next_funcs = next(iter(dist_autograd._current_context()._send_functions().values())).next_functions + idx = 0 + for i in range(len(next_funcs)): + self.assertEqual( + "torch::autograd::AccumulateGrad", next_funcs[i][0].name() + ) + self.assertEqual(tensors[i], next_funcs[i][0].variable) + + # Verify that the worker id has been recorded in the context + ctx = dist_autograd._current_context() + worker_ids = ctx._known_worker_ids() + self.assertEqual(len(worker_ids), 1) + self.assertEqual(worker_ids, {dst_rank}) + + def context_cleanup_test_helper(self, rpc_args, func, nested=False): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + # test that in dist autograd, in the case that tensors communicated over RPC do + # NOT require grad, we still cleanup the dist autograd contexts created + # on other nodes. This is because the autograd context is still + # communicated over RPC even if tensor arguments do not require grad, as + # it is possible that the response could. + if nested: + dst_rank = (self.rank + 1) % self.world_size + nested_dst_rank = (dst_rank + 1) % self.world_size + dst_ranks = {dst_rank} + else: + dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank} + + with dist_autograd.context() as context_id: + for dst_rank in dst_ranks: + rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args) + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + if nested: + rpc.rpc_sync( + worker_name(nested_dst_rank), + _set_rpc_done, + args=(context_id, 2), + ) + # the thread's context id should be cleaned up + with self.assertRaises(RuntimeError): + dist_autograd._retrieve_context(context_id) + # Ensure all peers have finished mutating the + # `known_context_ids` set. + dist.barrier() + # check that all contexts have been cleaned up. + success = _all_contexts_cleaned_up() + self.assertTrue(success) + + def _backward_no_grad_on_tensor(self, t1, t2, sparse): + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + worker_name(self._next_rank()), + torch.add, + args=(t1, t2)) + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + dist_autograd.backward(context_id, [loss], retain_graph=True) + self.assertIsNone(t1.grad) + self.assertIsNone(t2.grad) + + # Now populate .grad with local autograd engine and + # verify dist autograd doesn't mess with it. + loss_local = torch.add(t1, t2) + if sparse: + loss_local = torch.sparse.sum(loss_local) + else: + loss_local = loss_local.sum() + loss_local.backward() + self.assertIsNotNone(t1.grad) + self.assertIsNotNone(t2.grad) + + t1_grad_before = t1.grad + t2_grad_before = t2.grad + dist_autograd.backward(context_id, [loss]) + self.assertEqual(t1_grad_before, t1.grad) + self.assertEqual(t2_grad_before, t2.grad) + + # The current rank first creates a tensor on the rref_owner, and then passes + # the rref with another tensor to the callee to run either my_rref_add or + # my_nested_rref_add, depending on whether the callee is the rref owner. + # The grad of tensor lives on the current rank, and the grad of the rref + # tensor lives on the rref owner. + def _backward_rref(self, callee, rref_owner, t1, t2, local_grads, sparse): + local_ret = torch.add(t1, t2) + if sparse: + local_ret = torch.sparse.sum(local_ret) + else: + local_ret = local_ret.sum() + local_ret.backward() + with dist_autograd.context() as context_id: + if sparse: + rref_t1 = rpc.remote( + rref_owner, build_sparse_tensor, args=(False, True,) + ) + else: + rref_t1 = rpc.remote( + rref_owner, _torch_ones, args=((3, 3),), kwargs={"requires_grad": True} + ) + if callee == rref_owner: + rref = rpc.remote(callee, my_rref_add, args=(rref_t1, t2)) + else: + rref = rpc.remote( + callee, my_nested_rref_add, args=(rref_owner, rref_t1, t2) + ) + ret = rref.to_here() + if sparse: + ret = torch.sparse.sum(ret) + else: + ret = ret.sum() + dist_autograd.backward(context_id, [ret]) + + # verify grads on caller + grads = dist_autograd.get_gradients(context_id) + self.assertIn(t2, grads) + self.assertEqual(grads[t2], t2.grad) + + # verify grads on rref owner + self.assertTrue( + rpc.rpc_sync( + rref_owner, + _compare_owner_value, + args=(context_id, rref_t1, t1.grad), + ) + ) + + # In this test, every rank will serve as a parameter server (ps) and a + # driver, and then kicks off trainers on the other three ranks. So, we have: + # ps = rank0 with trainers = rank1/2/3 + # ps = rank2 with trainers = rank2/3/0 + # ps = rank3 with trainers = rank3/0/1 + # ps = rank4 with trainers = rank0/1/2 + # + # These four test ps-trainer groups run on completely separate autograd + # graphs, but they share the same set of underlying RpcAgents. + def _test_trainer_ps(self, create_ref_fn, trainer_fn, sparse): + if sparse: + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + else: + t1 = torch.ones((3, 3), requires_grad=True) + t2 = torch.zeros((3, 3), requires_grad=True) + + local_ret = torch.add(t1, t2) + if sparse: + torch.sparse.sum(local_ret).backward() + else: + local_ret.sum().backward() + + # create rref on self + rref_t1 = rpc.remote( + worker_name(self.rank), + create_ref_fn, + args=()) + + # kick off forward and backward pass on three other workers (trainers) + rank_diffs = [1, 2, 3] + futures = [] + for rank_diff in rank_diffs: + futures.append( + rpc.rpc_async( + worker_name((self.rank + rank_diff) % self.world_size), + trainer_fn, + args=(rref_t1, t2, worker_name(self.rank), rank_diff, sparse), + ) + ) + + # check if the trainers have done with their backward pass + for rank_diff in rank_diffs: + self._check_rpc_done(rank_diff) + + # trainers are done and holding the context for verification + accumulate_grad_func = None + for rank_diff in rank_diffs: + # make sure grads are accumulated for the same tensors and values + # are all correct + ctx_id = ctx_ids[rank_diff] + grads = dist_autograd.get_gradients(ctx_id) + local_t1 = rref_t1.to_here() + self.assertIn(local_t1, grads) + self.assertEqual(grads[local_t1], t1.grad) + + # unblock trainers + _set_rpc_done(None, 0) + + # wait until all trainers are done + torch.futures.wait_all(futures) + + def _backward_multiple_round_trips(self, t1, t2, t3, t4, t5, local_grads, sparse): + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + # Multiple RPCs between different nodes. + val = self._exec_func(exec_mode, torch.add, t1, t2) + val = self._exec_func(exec_mode, torch.mul, t3, val) + s1 = self._exec_func(exec_mode, torch.stack, (t4, val)) + s2 = self._exec_func(exec_mode, torch.stack, (t5, val)) + if sparse: + val = self._exec_func(exec_mode, torch.mul, s1, s2) + val = self._exec_func(exec_mode, torch.mul, val, val) + loss = torch.sparse.sum(val) + else: + val = self._exec_func(exec_mode, torch.bmm, s1, s2) + val = self._exec_func(exec_mode, torch.matmul, val, val) + loss = val.sum() + + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2, t3, t4, t5 + ) + local_grads = ret if ret else local_grads + + def _backward_different_dtypes(self, t1, t2, sparse): + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + loss = self._exec_func(exec_mode, torch.add, t1, t2) + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + local_grads = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + + # Run the same code locally and with dist autograd and verify gradients + # are same. + def _backward_simple_python_udf(self, t1, t2, sparse): + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func(exec_mode, my_py_add, t1, t2) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + local_grads = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + + # Run the same code locally and with dist autograd and verify gradients + # are same. + def _backward_simple_script_call(self, t1, t2, sparse): + local_grads = None + for exec_mode in [ + ExecMode.LOCAL, + ExecMode.RPC_SYNC, + ExecMode.RPC_ASYNC, + ExecMode.REMOTE, + ]: + with dist_autograd.context() as context_id: + forward_ret = self._exec_func(exec_mode, my_script_add, t1, t2) + if sparse: + loss = torch.sparse.sum(forward_ret) + else: + loss = forward_ret.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + def _nested_backward_accumulate_grads(self, t1, t2, sparse): + with dist_autograd.context() as context_id: + ret = rpc.rpc_sync( + worker_name(self._next_rank()), + DistAutogradTest._test_nested_backward_accumulate_grads, + args=(t1, t2, self._next_rank()), + ) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + # Run backward twice. + dist_autograd.backward(context_id, [loss], retain_graph=True) + dist_autograd.backward(context_id, [loss]) + + def _backwards_nested_python_udf(self, t1, t2, sparse): + t3 = t1 * t2 + t4 = t1 + t2 + res = t3 + t4 + loss = t1 * t2 * t3 * t4 * res + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + torch.autograd.backward([loss]) + + # Now run distributed autograd. + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + worker_name(self._next_rank()), + DistAutogradTest._nested_python_udf, + args=(t1, t2, self._next_rank()), + ) + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + dist_autograd.backward(context_id, [loss]) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(t1.grad, grads[t1]) + self.assertEqual(t2.grad, grads[t2]) + + def _mixed_requires_grad(self, t1, t2, sparse): + for exec_mode in [ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func( + exec_mode, DistAutogradTest._mixed_requires_grad_operaton, t1, t2 + ) + self.assertEqual(t1 * t2, ret) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + dist_autograd.backward(context_id, [loss]) + self.assertTrue(t1.requires_grad) + self.assertFalse(t2.requires_grad) + grads = dist_autograd.get_gradients(context_id) + self.assertIn(t1, grads) + self.assertNotIn(t2, grads) + self.assertEqual(t2, grads[t1]) + + def _multiple_backward(self, t1, t2, sparse): + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + worker_name(self._next_rank()), + torch.add, + args=(t1, t2)) + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + # Run backward in a loop multiple times. + for i in range(1000): + dist_autograd.backward(context_id, [loss], retain_graph=True) + + # For current context, this rank sends t1 and t2 tensors to dst_rank, + # then get t3 = torch.add(t1, t2) result tensor. + # For the current context in this rank, it expects graph like this: + # send function: + # rpcSendBackward + # / \ + # t1.AccumulateGrad t2.AccumulateGrad + # + # recv function: + # + # | + # t3.rpcRecvBackward + # + def _verify_graph_for_first_rpc_call( + self, send_function, recv_function, t1, t2, ret + ): + # Retrieve the next functions in the graph. + next_funcs = send_function.next_functions + self.assertEqual(2, len(next_funcs)) + + # We should now hit t1 and t2 in the autograd graph. + self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[0][0].name()) + self.assertEqual(t1, next_funcs[0][0].variable) + self.assertEqual(0, next_funcs[0][1]) + self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[1][0].name()) + self.assertEqual(t2, next_funcs[1][0].variable) + self.assertEqual(0, next_funcs[1][1]) + + # Test recv functions. + self.assertEqual(ret.grad_fn, recv_function) + + # Run the same code locally and with dist autograd and verify gradients + # are same. + def _backward_simple(self, dst, t1, t2, local_grads, sparse): + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func_with_dst( + dst, exec_mode, torch.add, t1, t2 + ) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + # For a context passed from previous nested chain calls, this rank + # receives two tensors t1 and t2, executes torch.add(t1, t2) and sends + # result tensor t3 back. + # For this context in this rank, it expects graph like this: + # send and recv functions: + # rpcSendBackward + # | + # t3.AddBackward0 + # / \ + # t1.recvRpcBackward t2.recvRpcBackward + def _verify_graph_for_rpc_call_exec(self, send_function): + # Verify next function is AddBackward0 + next_funcs = send_function.next_functions + self.assertEqual(1, len(next_funcs)) + add_backward_fn = next_funcs[0][0] + self.assertEqual("AddBackward0", add_backward_fn.name()) + + # Verify the next two functions are the same recv backward function. + next_funcs = add_backward_fn.next_functions + self.assertEqual(2, len(next_funcs)) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name() + ) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name() + ) + self.assertEqual(next_funcs[0][0], next_funcs[1][0]) + + # For a context passed from previous nested chain calls, this rank + # receives two tensors t1 and t2, forwards t1 and t2 tensors using + # nested rpc call to next dst. In return route, receive result tensor t3 + # from next dst and forwarding t3 back to previous calls. + # For this context in this rank, it expects graph like this: + # send and recv functions for receiving and forwarding t1 and t2: + # rpcSendBackward + # / \ + # t1.recvRpcBackward t2.recvRpcBackward + # send and recv functions for receiving and forwarding t3: + # rpcSendBackward + # | + # t3.recvRpcBackward + def _verify_graph_for_nested_rpc_call(self, ctx): + send_functions = ctx._send_functions() + self.assertEqual(2, len(send_functions)) + + # For send function when making nest rpc call, + # next functions of the send function are two recv functions + # for received two tensors from previous call + next_funcs = next(iter(send_functions.values())).next_functions + self.assertEqual(2, len(next_funcs)) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name() + ) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name() + ) + self.assertEqual(next_funcs[0][0], next_funcs[1][0]) + + # For send function when returning response to previous call + # next function of the send function is the recv function + # for received tensor result returned from nested call + next_funcs = list(send_functions.values())[1].next_functions + self.assertEqual(1, len(next_funcs)) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name() + ) + + +class TensorPipeAgentDistAutogradTest(CommonDistAutogradTest): + + # Sparse tests only work with TensorPipeAgent. + @dist_init + def test_graph_for_builtin_call_sparse(self): + self._test_graph(torch.add, ExecMode.RPC_SYNC, True) + + @dist_init + def test_graph_for_python_call_sparse(self): + self._test_graph(my_py_add, ExecMode.RPC_SYNC, True) + + @dist_init + def test_graph_for_builtin_remote_call_sparse(self): + self._test_graph(torch.add, ExecMode.REMOTE, True) + + @dist_init + def test_graph_for_python_remote_call_sparse(self): + self._test_graph(my_py_add, ExecMode.REMOTE, True) + + @dist_init + def test_graph_for_py_nested_call_sparse(self): + self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC, True) + + @dist_init + def test_graph_for_py_nested_remote_call_sparse(self): + self._test_graph_for_py_nested_call(ExecMode.REMOTE, True) + + @dist_init + def test_graph_for_py_nested_call_itself_sparse(self): + self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC, True) + + @dist_init + def test_graph_for_py_nested_remote_call_itself_sparse(self): + self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE, True) + + @dist_init + def test_no_graph_with_tensors_not_require_grad_sparse(self): + self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC, True) + + @dist_init + def test_no_graph_with_tensors_not_require_grad_remote_sparse(self): + self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE, True) + + @dist_init + def test_rpc_complex_args_sparse(self): + self._test_rpc_complex_args(ExecMode.RPC_SYNC, True) + + @dist_init + def test_remote_complex_args_sparse(self): + self._test_rpc_complex_args(ExecMode.REMOTE, True) + + @dist_init + def test_context_cleanup_tensor_with_grad_sparse(self): + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add) + + @dist_init + def test_context_cleanup_tensor_no_grad_sparse(self): + t1 = build_sparse_tensor(requires_grad=False) + self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add) + + @dist_init + def test_context_cleanup_nested_rpc_sparse(self): + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + dst_rank = (self.rank + 1) % self.world_size + args = (t1, t2, dst_rank, self.world_size, 0) + self.context_cleanup_test_helper( + rpc_args=args, func=my_py_nested_call, nested=True + ) + + @dist_init + def test_backward_no_grad_on_tensor_sparse(self): + self._backward_no_grad_on_tensor( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_backward_simple_sparse(self): + self._backward_simple( + self._next_rank(), + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_simple_self_sparse(self): + self._backward_simple( + self.rank, + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_rref_multi_sparse(self): + if self.rank > 0: + callee = "worker0" + rref_owner = callee + self._backward_rref( + callee, + rref_owner, + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_rref_sparse(self): + callee = worker_name(self._next_rank()) + rref_owner = callee + self._backward_rref( + callee, + rref_owner, + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_rref_nested_sparse(self): + callee = worker_name((self.rank + 1) % self.world_size) + rref_owner = worker_name((self.rank + 2) % self.world_size) + self._backward_rref( + callee, + rref_owner, + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_trainer_ps_sparse(self): + self._test_trainer_ps( + build_sparse_tensor, + _run_trainer, + True + ) + + @dist_init + def test_backward_multiple_round_trips_sparse(self): + self._backward_multiple_round_trips( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=False), + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=False), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_different_dtypes_sparse(self): + self._backward_different_dtypes( + build_sparse_tensor(requires_grad=True, dtype=torch.float32), + build_sparse_tensor(requires_grad=True, dtype=torch.float64), + True + ) + + @dist_init + def test_backward_simple_python_udf_sparse(self): + self._backward_simple_python_udf( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_backward_simple_script_call_sparse(self): + self._backward_simple_script_call( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_nested_backward_accumulate_grads_sparse(self): + self._nested_backward_accumulate_grads( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_backwards_nested_python_udf_sparse(self): + # Run equivalent of _nested_python_udf locally. + self._backwards_nested_python_udf( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_mixed_requires_grad_sparse(self): + self._mixed_requires_grad( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=False), + True + ) + + @dist_init + def test_multiple_backward_sparse(self): + self._multiple_backward( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_embedding_bag_with_no_grad_tensors(self): + dst = self._next_rank() + remote_embedding = rpc.remote( + worker_name(dst), + torch.nn.EmbeddingBag, + args=(16, 16), + kwargs={"mode": "sum", "sparse": True}, + ) + local_embedding = torch.nn.EmbeddingBag(16, 16, mode="sum", sparse=True) + + input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9]) + # requires_grad = True to record send/recv functions + per_sample_weights = torch.rand((8), requires_grad=True) + offsets = torch.LongTensor([0, 4]) + + local_res = local_embedding(input, offsets, per_sample_weights) + + # Run backward twice. + torch.autograd.backward([local_res.sum()], retain_graph=True) + torch.autograd.backward([local_res.sum()]) + local_grad = local_embedding.weight.grad + + with dist_autograd.context() as context_id: + res = rpc.rpc_sync( + worker_name(dst), + DistAutogradTest._call_remote_embedding, + args=(remote_embedding, input, offsets, per_sample_weights), + ) + + # Run backward twice to test accumulation of sparse gradients. + dist_autograd.backward(context_id, [res.sum()], retain_graph=True) + dist_autograd.backward(context_id, [res.sum()]) + + remote_grad = rpc.rpc_sync( + worker_name(dst), + DistAutogradTest._get_grad, + args=(remote_embedding, context_id), + ) + + self.assertEqual(local_grad, remote_grad) + + +class DistAutogradTest(CommonDistAutogradTest): + @dist_init + def test_autograd_context(self): + # Verify max possible id. + max_auto_increment = 281474976710655 + self.assertEqual( + max_auto_increment + (self.worker_id << 48), dist_autograd._get_max_id() + ) + + context_ids = [] + for i in range(200): + with dist_autograd.context() as context_id: + self.assertEqual( + context_id, + dist_autograd._retrieve_context(context_id)._context_id(), + ) + # First 16 bits should be worker_id. + self.assertEqual(self.worker_id, context_id >> 48) + context_ids.append(context_id) + + for context_id in context_ids: + with self.assertRaisesRegex( + RuntimeError, + f"Could not find autograd context with id: {context_id}", + ): + dist_autograd._retrieve_context(context_id) + + @dist_init + def test_nested_context(self): + with dist_autograd.context() as context_id: + # Nested contexts not supported. + with self.assertRaisesRegex( + RuntimeError, "Already have an autograd context id for this thread" + ): + with dist_autograd.context() as context_id: + pass + + @dist_init + def test_graph_for_builtin_call(self): + self._test_graph(torch.add, ExecMode.RPC_SYNC, False) + + @dist_init + def test_graph_for_python_call(self): + self._test_graph(my_py_add, ExecMode.RPC_SYNC, False) + + @dist_init + def test_graph_for_builtin_remote_call(self): + self._test_graph(torch.add, ExecMode.REMOTE, False) + + @dist_init + def test_graph_for_python_remote_call(self): + self._test_graph(my_py_add, ExecMode.REMOTE, False) + + @dist_init + def test_graph_for_py_nested_call(self): + self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC, False) + + @dist_init + def test_graph_for_py_nested_remote_call(self): + self._test_graph_for_py_nested_call(ExecMode.REMOTE, False) + + @dist_init + def test_graph_for_py_nested_call_itself(self): + self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC, False) + + @dist_init + def test_graph_for_py_nested_remote_call_itself(self): + self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE, False) + + @dist_init + def test_no_graph_with_tensors_not_require_grad(self): + self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC, False) + + @dist_init + def test_no_graph_with_tensors_not_require_grad_remote(self): + self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE, False) + + def _test_grad_only_on_return_value(self, exec_mode): + initialize_pg(self.file_init_method, self.rank, self.world_size) + dst_rank = (self.rank + 1) % self.world_size + with dist_autograd.context() as context_id: + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync(worker_name(dst_rank), ret_requires_grad) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), ret_requires_grad + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + dist_autograd.backward(context_id, [ret.sum()]) + + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + + # Wait for the prev rank to be done with rpc. + self._check_rpc_done(1) + grads = dist_autograd.get_gradients(ctx_ids[1]) + self.assertEqual(1, len(grads)) + self.assertIn(requires_grad_tensor, grads) + self.assertEqual(torch.ones_like(ret), grads[requires_grad_tensor]) + # due to the above get_gradients call, ensure that dist autograd + # contexts aren't cleaned up until all workers exit context managers + dist.barrier() + + @dist_init + def test_grad_only_on_return_value(self): + self._test_grad_only_on_return_value(ExecMode.RPC_SYNC) + + @dist_init + def test_grad_only_on_return_value_remote(self): + self._test_grad_only_on_return_value(ExecMode.REMOTE) + + @dist_init + def test_rpc_complex_args(self): + self._test_rpc_complex_args(ExecMode.RPC_SYNC, False) + + @dist_init + def test_remote_complex_args(self): + self._test_rpc_complex_args(ExecMode.REMOTE, False) + + @dist_init + def test_context_cleanup_tensor_with_grad(self): + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add) + + @dist_init + def test_context_cleanup_tensor_no_grad(self): + t1 = torch.ones(3, 3, requires_grad=False) + self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add) + + @dist_init + def test_context_cleanup_no_tensors(self): + self.context_cleanup_test_helper(rpc_args=(1, 1), func=my_scalar_add) + + @dist_init + def test_context_cleanup_nested_rpc(self): + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + dst_rank = (self.rank + 1) % self.world_size + args = (t1, t2, dst_rank, self.world_size, 0) + self.context_cleanup_test_helper( + rpc_args=args, func=my_py_nested_call, nested=True + ) + + @dist_init + def test_worker_ids_recorded(self): + dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank} + with dist_autograd.context() as context_id: + # if no tensors require grad, we should still record worker_ids, as + # the autograd context ID is still passed to other workers. + t1 = torch.ones(3, 3, requires_grad=False) + t2 = torch.zeros(3, 3, requires_grad=False) + for dst_rank in dst_ranks: + rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2)) + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + # all worker_ids in dst_ranks should be recorded. + ctx = dist_autograd._current_context() + worker_ids = ctx._known_worker_ids() + self.assertEqual(worker_ids, dst_ranks) + + # worker_ids should be recorded when tensors do require grad + t1.requires_grad = True + t2.requires_grad = True + for dst_rank in dst_ranks: + ret = rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(t1, t2) + ) + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + # all worker_ids in dst_ranks should be recorded. + worker_ids = ctx._known_worker_ids() + self.assertEqual(worker_ids, dst_ranks) + + @dist_init + def test_dist_autograd_profiling(self): + with dist_autograd.context() as context_id: + t1 = torch.rand(3, 3, requires_grad=True) + t2 = torch.rand(3, 3, requires_grad=True) + loss = rpc.rpc_sync(worker_name(self._next_rank()), torch.add, args=(t1, t2)).sum() + with torch.autograd.profiler.profile() as p: + dist_autograd.backward(context_id, [loss]) + + function_events = p.function_events + + def get_event(partial_key): + return next(event for event in function_events if partial_key in event.name) + + send_event = get_event("SendRpcBackward") + recv_event = get_event("RecvRpcBackward") + backward_event = get_event("torch::distributed::autograd::backward") + # There should be at least 1 send and recv_events each, corresponding to send/recv functions executed. + self.assertEqual(send_event.count, 1) + self.assertEqual(recv_event.count, 1) + # The CPU total for backward event should be great than send and recv, since + # applying those functions in the backwards pass is a subset of the entire backward pass. + self.assertGreater(backward_event.cpu_time_total, send_event.cpu_time_total) + self.assertGreater(backward_event.cpu_time_total, recv_event.cpu_time_total) + + @dist_init + def test_error_in_context(self): + with dist_autograd.context() as context_id: + t1 = torch.rand(3, 3, requires_grad=True) + t2 = torch.rand(6, 6, requires_grad=True) + + with self.assertRaises(RuntimeError): + # This should throw an error since matrix sizes don't match. + rpc.rpc_sync( + worker_name(self._next_rank()), torch.matmul, args=(t1, t2) + ) + + @dist_init + def test_backward_no_grad_on_tensor(self): + self._backward_no_grad_on_tensor( + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + False + ) + + @dist_init + def test_backward_simple(self): + self._backward_simple( + self._next_rank(), + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_simple_self(self): + self._backward_simple( + self.rank, + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_rref(self): + callee = worker_name(self._next_rank()) + rref_owner = callee + self._backward_rref( + callee, + rref_owner, + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_rref_multi(self): + if self.rank > 0: + callee = "worker0" + rref_owner = callee + self._backward_rref( + callee, + rref_owner, + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_rref_nested(self): + callee = worker_name((self.rank + 1) % self.world_size) + rref_owner = worker_name((self.rank + 2) % self.world_size) + self._backward_rref( + callee, + rref_owner, + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_trainer_ps(self): + self._test_trainer_ps( + create_tensor, + _run_trainer, + False + ) + + @dist_init + def test_trainer_ps_torchscript_functions(self): + # TODO, need more investigation + # there is rref leak when shutting down, suspect it is because + # ref as arg is passed to pybind boundary, and the ref is not garbage + # collected by python when calling shutdown() + import torch.distributed.rpc.api as api + api._ignore_rref_leak = True + + self._test_trainer_ps(create_torchscript_tensor, _run_trainer_torchscript, False) + + @dist_init + def test_backward_multiple_round_trips(self): + self._backward_multiple_round_trips( + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3)), + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3)), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_different_tensor_dims(self): + local_grads = None + t1 = torch.rand((4, 6), requires_grad=True) + t2 = torch.rand((6, 5)) + t3 = torch.rand((5, 7), requires_grad=True) + t4 = torch.rand((7, 9)) + + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + val = self._exec_func(exec_mode, torch.matmul, t1, t2) + val = self._exec_func(exec_mode, torch.linalg.multi_dot, (val, t3, t4)) + loss = val.sum() + + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2, t2, t3, t4 + ) + local_grads = ret if ret else local_grads + + @dist_init + def test_backward_unused_tensors(self): + local_grads = None + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + t3 = torch.rand((3, 3), requires_grad=True) + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + s = self._exec_func(exec_mode, torch.stack, (t1, t2, t3)) + val = self._exec_func( + exec_mode, + torch.matmul, + torch.narrow(s, 0, 0, 1), + torch.narrow(s, 0, 2, 1), + ) + + loss = val.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2, t3 + ) + local_grads = ret if ret else local_grads + + @dist_init + def test_backward_multiple_output_tensors(self): + local_grads = None + t = torch.rand((10, 2), requires_grad=True) + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + tensor_list = self._exec_func(exec_mode, torch.split, t, 2) + t1 = tensor_list[0] + t2 = tensor_list[2] + t3 = tensor_list[4] + + val = self._exec_func(exec_mode, torch.linalg.multi_dot, (t1, t2, t3)) + + loss = val.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t + ) + local_grads = ret if ret else local_grads + + def _run_test_backward_unused_send_function_in_thread(self): + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + + # We don't use the result of an RPC function, as a result the + # backward pass would hang in the "FAST" mode. + res = rpc.rpc_sync( + worker_name(self._next_rank()), torch.add, args=(t1, t2) + ) + + val = torch.mul(t1, t2) + + # Run backward, this would hang forever. + dist_autograd.backward(context_id, [val.sum()]) + + @dist_init + def test_backward_unused_send_function(self): + # Run the test in a thread which would never finish. + t = threading.Thread( + target=self._run_test_backward_unused_send_function_in_thread + ) + t.daemon = True + t.start() + t.join(10) # Wait for 10s. + + # Verify thread is still alive (indicating backward hasn't completed yet). + self.assertTrue(t.is_alive()) + + @dist_init + def test_backward_autograd_engine_error(self): + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + # Perform some ops before error simulation. + tmp = (t1 + t2) * (t1 + t2) + t3 = SimulateBackwardError.apply(tmp) + + # Run multiple round trips across different nodes and verify the + # original node receives an error thrown on a node deep in the chain. + val = rpc.rpc_sync( + worker_name(self._next_rank()), torch.add, args=(t2, t3) + ) + val = rpc.rpc_sync( + worker_name(self._next_rank()), torch.mul, args=(val, t2) + ) + val = rpc.rpc_sync( + worker_name(self._next_rank()), torch.matmul, args=(val, t2) + ) + val = rpc.rpc_sync( + worker_name(self._next_rank()), torch.div, args=(val, t2) + ) + + with self.assertRaisesRegex( + RuntimeError, "Error on Node [0-9]+: Simulate error on backward pass" + ): + # Run backwards, and validate we receive an error. + dist_autograd.backward(context_id, [val.sum()]) + + @dist_init(clean_shutdown=False) + @skip_but_pass_in_sandcastle_if( + IS_MACOS, + "Test is flaky on MacOS since libuv error handling is not as robust as TCP", + ) + def test_backward_node_failure(self): + rpc._set_rpc_timeout(5) # 5 seconds + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + res = rpc.rpc_sync( + worker_name(self._next_rank()), torch.add, args=(t1, t2) + ) + + # Wait for all RPCs to be done. + dist.barrier() + + # Kill all odd rank nodes. + if self.rank % 2 == 0: + shutdown_error_regex = self.get_shutdown_error_regex() + # Wait for all other nodes to die. + for rank in range(self.world_size): + if rank % 2 != 0: + wait_until_node_failure(rank, shutdown_error_regex) + + # Shutdown sequence is not very well defined and as a result + # we might see any error given by get_shutdown_error_regex() + with self.assertRaisesRegex(RuntimeError, shutdown_error_regex): + # Run backwards, and validate we receive an error since all + # other nodes are dead. + dist_autograd.backward(context_id, [res.sum()]) + else: + # Exit all other nodes. + pass + + @dist_init + def test_backward_without_context(self): + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + + context_id = 100 # dummy context_id + with self.assertRaisesRegex( + RuntimeError, + f"Could not find autograd context with id: {context_id}", + ): + res = rpc.rpc_sync( + worker_name(self._next_rank()), torch.add, args=(t1, t2) + ) + dist_autograd.backward(context_id, [res.sum()]) + + @dist_init + def test_backward_without_rpc(self): + dst_rank = self.rank + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + t3 = torch.add(t1, t2) + + dist_autograd.backward(context_id, [t3.sum()]) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(2, len(grads)) + self.assertIn(t1, grads) + self.assertIn(t2, grads) + self.assertEqual(torch.ones(3, 3), grads[t1]) + self.assertEqual(torch.ones(3, 3), grads[t2]) + + @dist_init + def test_backward_invalid_args(self): + with dist_autograd.context() as context_id: + + with self.assertRaisesRegex(TypeError, "incompatible function arguments"): + dist_autograd.backward(context_id, None) + + with self.assertRaisesRegex(TypeError, "incompatible function arguments"): + dist_autograd.backward(None, None) + + with self.assertRaisesRegex( + RuntimeError, "No tensors provided for gradient computation" + ): + dist_autograd.backward(context_id, []) + + with self.assertRaisesRegex(RuntimeError, "requires_grad not set on"): + t = torch.rand(3, 3) + dist_autograd.backward(context_id, [t]) + + with self.assertRaisesRegex( + RuntimeError, "is not a scalar, all roots need to be scalar" + ): + t = torch.rand(3, 3, requires_grad=True) + dist_autograd.backward(context_id, [t]) + + with self.assertRaisesRegex( + RuntimeError, "does not have a valid gradient function" + ): + t = torch.rand(1, requires_grad=True) + dist_autograd.backward(context_id, [t]) + + @dist_init + def test_backward_multiple_roots(self): + local_grads = None + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]: + with dist_autograd.context() as context_id: + r1 = self._exec_func(exec_mode, torch.add, t1, t2).sum() + r2 = self._exec_func(exec_mode, torch.mul, t1, t2).sum() + r3 = self._exec_func(exec_mode, torch.cos, t1).sum() + r4 = self._exec_func(exec_mode, torch.div, t1, t2).sum() + + local_grads = self._verify_backwards( + exec_mode, [r1, r2, r3, r4], context_id, local_grads, t1, t2 + ) + + @dist_init + def test_backward_different_dtypes(self): + self._backward_different_dtypes( + torch.rand((3, 3), requires_grad=True, dtype=torch.float32), + torch.rand((3, 3), requires_grad=True, dtype=torch.float64), + False + ) + + @dist_init + def test_backward_simple_python_udf(self): + self._backward_simple_python_udf( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + @dist_init + def test_backward_simple_script_call(self): + self._backward_simple_script_call( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + @staticmethod + def _complex_python_udf(t1, t2): + t3 = torch.nn.functional.linear(t1, t2) + t4 = torch.nn.functional.linear(t2, t3) + t5 = torch.nn.functional.linear(t3, t4) + return torch.linalg.multi_dot([t1, t2, t3, t4, t5]) + + @dist_init + def test_backward_complex_python_udf(self): + # Run the same code locally and with dist autograd and verify gradients + # are same. + local_grads = None + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func( + exec_mode, DistAutogradTest._complex_python_udf, t1, t2 + ) + loss = ret.sum() + local_grads = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + + @staticmethod + def _python_udf_with_backward_error(t1, t2): + t3 = t1 + t2 + t4 = SimulateBackwardError.apply(t3) + return torch.linalg.multi_dot([t1, t2, t3, t4]) + + @staticmethod + def _nested_rpc_call_backward_error(t1, t2, dst): + t1 = t1 * t2 + t2 = t1 + t2 + res = rpc.rpc_sync( + worker_name(dst), + DistAutogradTest._python_udf_with_backward_error, + args=(t1, t2), + ) + return torch.linalg.multi_dot([t1, t2, res]) + + @dist_init + def test_backward_python_udf_error(self): + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + worker_name(self._next_rank()), + DistAutogradTest._nested_rpc_call_backward_error, + args=(t1, t2, self._next_rank()), + ) + with self.assertRaisesRegex( + RuntimeError, "Simulate error on backward pass" + ): + dist_autograd.backward(context_id, [loss.sum()]) + + _backward_done = False + + @dist_init(clean_shutdown=False) + @skip_but_pass_in_sandcastle_if( + IS_MACOS, + "Test is flaky on MacOS since libuv error handling is not as robust as TCP", + ) + def test_backward_node_failure_python_udf(self): + # Set a short timeout to quickly time out failed RPCs. + rpc._set_rpc_timeout(5) # 5 seconds + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + + dst = self._next_rank() + res = rpc.rpc_sync( + worker_name(dst), + my_py_nested_call, + args=(t1, t2, dst, self.world_size, 1), + ) + + dist.barrier() + + # Kill rank 2 (last hop of nested rpc) and verify rank 0 receives an error. + if self.rank == 2: + return + + store = dist.distributed_c10d._get_default_store() + if self.rank == 0: + # Wait for rank 2 to die. + shutdown_error_regex = self.get_shutdown_error_regex() + wait_until_node_failure(2, shutdown_error_regex) + # Shutdown sequence is not very well defined and as a result + # we might see any error given by get_shutdown_error_regex(). + with self.assertRaisesRegex(RuntimeError, shutdown_error_regex): + # Run backwards, and validate we receive an error since rank 2 is dead. + dist_autograd.backward(context_id, [res.sum()]) + + # Mark rank 0 is done in the store, since the RPC framework on + # some nodes might be broken at this point. + store.set('test_backward_node_failure_python_udf_rank0_done', "True") + else: + # Wait for backward to finish on rank 0. + store.wait(['test_backward_node_failure_python_udf_rank0_done'], timedelta(seconds=10)) + + @staticmethod + def _nested_python_udf(t1, t2, dst): + t3 = t1 * t2 + t4 = t1 + t2 + res = rpc.rpc_sync(worker_name(dst), my_py_add, args=(t3, t4)) + return t1 * t2 * t3 * t4 * res + + @dist_init + def test_backwards_nested_python_udf(self): + # Run equivalent of _nested_python_udf locally. + self._backwards_nested_python_udf( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + _test_clean_context_backward_context_id = None + + class MyBackwardFunc(Function): + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + @once_differentiable + def backward(ctx, input): + assert DistAutogradTest._test_clean_context_backward_context_id is not None + + # Release the context to simulate error (use barrier before releasing + # context to ensure all nodes execute the backward function). + dist.barrier() + dist_autograd._release_context( + DistAutogradTest._test_clean_context_backward_context_id + ) + + # Verify all contexts are cleaned up. + assert _all_contexts_cleaned_up() + + return input + + @dist_init + def test_clean_context_during_backward(self): + """ + This test simulates the situation where the 'backward' call might throw + an exception locally which would lead to the autograd context being + cleaned up if we're using the context manager. As a result, the autograd + context might be cleaned up while some threads are still using the + autograd context. + + It is fine for the 'backward' call to throw an exception in this test, + but the process should not crash. + """ + initialize_pg(self.file_init_method, self.rank, self.world_size) + + context = dist_autograd._new_context() + context_id = context._context_id() + DistAutogradTest._test_clean_context_backward_context_id = context_id + + # Send the context id to all nodes. + for i in range(0, self.world_size): + if i != self.rank: + rank_distance = (i - self.rank + self.world_size) % self.world_size + rpc.rpc_sync( + worker_name(i), + _set_rpc_done, + args=(context_id, rank_distance), + ) + + dist.barrier() + + # Verify all context ids have been received. + self.assertEqual(self.world_size - 1, len(known_context_ids)) + + t1 = torch.rand((3, 3), requires_grad=True) + for i in range(0, 100): + dst = self._next_rank() + t1 = rpc.rpc_sync(worker_name(dst), torch.add, args=(t1, t1)) + + # Call MyBackwardFunc as the first op of the backward pass to + # ensure we release the context early in the backward pass. + t1 = DistAutogradTest.MyBackwardFunc.apply(t1) + self.assertEqual(100, len(context._send_functions())) + + context_id = 100 # dummy context_id + with self.assertRaisesRegex( + RuntimeError, + f"Could not find autograd context with id: {context_id}", + ): + dist_autograd.backward(context_id, [t1.sum()]) + + # HACK: Killing workers since otherwise the autograd engine gets stuck on + # other nodes. The proper fix would be addressing: + # https://github.com/pytorch/pytorch/issues/27643, which would inform + # other nodes about the failure. + # The autograd engine gets stuck on other nodes since they're waiting to + # receive gradients from the node that received an error (and as a + # result it didn't execute the rest of the graph). + dist.barrier() + rpc.shutdown(graceful=False) + sys.exit(0) + + @classmethod + def _call_remote_embedding(cls, embedding_rref, input, offsets, per_sample_weights): + embedding = embedding_rref.local_value() + return embedding(input, offsets, per_sample_weights) + + @classmethod + def _get_grad(cls, embedding_rref, context_id): + embedding = embedding_rref.local_value() + grad_map = dist_autograd.get_gradients(context_id) + return grad_map[embedding.weight] + + @classmethod + def _mixed_requires_grad_operaton(cls, t1, t2): + if t2.requires_grad: + return t1 - t2 + else: + return t1 * t2 + + @dist_init + def test_mixed_requires_grad(self): + self._mixed_requires_grad( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=False), + False + ) + + class TestDebugInfoFunc(Function): + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + @once_differentiable + def backward(ctx, input): + debug_info = dist_autograd._get_debug_info() + assert debug_info is not None + backward_passes = int(debug_info["num_current_backward_passes"]) + + # Hard to validate exact numbers because of the distributed nature. + # We can't use a barrier() here since that would block the single + # CPU thread available for autograd and can cause deadlocks. + assert backward_passes >= 1 and backward_passes <= 4 + return input + + @dist_init + def test_debug_info(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + i = 0 + res = {} + res[i] = t1 + for rank in range(self.world_size): + if rank != self.rank: + res[i + 1] = rpc.rpc_sync( + worker_name(rank), torch.add, args=(res[i], t2) + ) + i += 1 + + # Call custom function in middle of backward pass to ensure all + # nodes are still waiting on a backward(). + res[i + 1] = DistAutogradTest.TestDebugInfoFunc.apply(res[i]) + i += 1 + + for rank in range(self.world_size): + if rank != self.rank: + res[i + 1] = rpc.rpc_sync( + worker_name(rank), torch.add, args=(res[i], t2) + ) + i += 1 + + dist_autograd.backward(context_id, [res[i].sum()]) + + debug_info = dist_autograd._get_debug_info() + num_autograd_context = int(debug_info["num_autograd_contexts"]) + # Need atleast one context and not more than 4. + self.assertTrue(num_autograd_context >= 1 and num_autograd_context <= 4) + + for rd in range(self.world_size - 1): + rpc.rpc_sync( + worker_name((self.rank + rd + 1) % self.world_size), + _set_rpc_done, + args=(context_id, rd + 1), + ) + + dist.barrier() + + # Validate information + debug_info = dist_autograd._get_debug_info() + assert debug_info is not None + self.assertEqual(0, int(debug_info["num_current_backward_passes"])) + # only have `num_current_backward_passes` and `num_autograd contexts` + self.assertTrue(len(debug_info) == 2) + + self.assertTrue(_all_contexts_cleaned_up()) + + # All contexts should be cleaned up. + debug_info = dist_autograd._get_debug_info() + self.assertEqual(0, int(debug_info["num_autograd_contexts"])) + + @staticmethod + def _workload_thread(): + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + t3 = rpc.rpc_sync("worker0", torch.add, args=(t1, t2)) + t4 = rpc.rpc_sync("worker0", torch.mul, args=(t2, t3)) + t5 = rpc.rpc_sync("worker0", torch.matmul, args=(t3, t4)) + t6 = rpc.rpc_sync("worker0", torch.add, args=(t4, t5)) + + dist_autograd.backward(context_id, [t6.sum()]) + + @dist_init + def test_async_dist_autograd(self): + """ + This test ensures async processing for distributed autograd works + appropriately. This is achieved by spawning multiple threads and + hammering a single node with a lot of backward() calls. + """ + + initialize_pg(self.file_init_method, self.rank, self.world_size) + if self.rank != 0: + # All other ranks schedule work on rank 0. + threads = [] + for i in range(20): + t = threading.Thread(target=DistAutogradTest._workload_thread) + t.start() + threads.append(t) + + for thread in threads: + thread.join() + + dist.barrier() + + @dist_init + def test_backward_accumulate_grads(self): + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + t3 = torch.matmul(t1, t2) + # Run backward twice. + torch.autograd.backward([t3.sum()], retain_graph=True) + torch.autograd.backward([t3.sum()]) + + t3 = rpc.rpc_sync( + worker_name(self._next_rank()), torch.matmul, args=(t1, t2) + ) + # Run backward twice. + dist_autograd.backward(context_id, [t3.sum()], retain_graph=True) + dist_autograd.backward(context_id, [t3.sum()]) + + # Verify the gradients are same for local and remote execution. + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(2, len(grads)) + self.assertIn(t1, grads) + self.assertIn(t2, grads) + self.assertEqual(t1.grad, grads[t1]) + self.assertEqual(t2.grad, grads[t2]) + + @staticmethod + def _test_nested_backward_accumulate_grads(t1, t2, dst_rank): + return rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2)) + + @dist_init + def test_nested_backward_accumulate_grads(self): + self._nested_backward_accumulate_grads( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + @dist_init + def test_multiple_backward(self): + self._multiple_backward( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + @dist_init(clean_shutdown=False) + def test_multiple_backward_with_errors(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + f'worker{self._next_rank()}', + DistAutogradTest._python_udf_with_backward_error, + args=(t1, t2)).sum() + + try: + # Run backward in a loop multiple times. + for i in range(100): + if i < 50: + with self.assertRaisesRegex(RuntimeError, "Simulate error on backward pass"): + dist_autograd.backward(context_id, [loss], retain_graph=True) + elif i > 50: + # Recovered from error. + dist_autograd.backward(context_id, [loss], retain_graph=True) + else: + dist.barrier() + SimulateBackwardError._simulate_error = False + dist.barrier() + finally: + # Sync before resetting flag. + dist.barrier() + + # Reset the flag. + SimulateBackwardError._simulate_error = True + + @dist_init + def test_backward_verify_hooks(self): + t1 = torch.ones((3, 3), requires_grad=True) + # Double the gradient. + t1.register_hook(lambda grad: grad * 2) + t2 = torch.ones((3, 3), requires_grad=True) + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func(exec_mode, torch.matmul, t1, t2) + loss = ret.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + @dist_init + def test_no_grad_copy(self): + ''' + Similar to test in test_autograd.py. + ''' + # create autograd function that saves grad pointer as class static + class MyFunc(Function): + static_grad_ptr = None + + @staticmethod + def forward(ctx, inp1, inp2): + return inp1 + inp2 + + @staticmethod + def backward(ctx, grad): + MyFunc.static_grad_ptr = grad.data_ptr() + return grad, grad + + class MyFuncSingleGrad(Function): + static_grad_ptr = None + + @staticmethod + def forward(ctx, inp): + return inp + + @staticmethod + def backward(ctx, grad): + MyFuncSingleGrad.static_grad_ptr = grad.data_ptr() + return grad + + class NonContGradFunc(Function): + @staticmethod + def forward(ctx, inp1): + ctx.size = inp1.size() + return torch.tensor([1.]) + + @staticmethod + def backward(ctx, grad): + return torch.ones(1).expand(ctx.size) + + a = torch.randn(5, 6, requires_grad=True) + b = torch.randn(5, 6, requires_grad=True) + # non-contiguous grad should be copied + with dist_autograd.context() as context_id: + dist_autograd.backward(context_id, [NonContGradFunc.apply(MyFunc.apply(a, b))]) + grads = dist_autograd.get_gradients(context_id) + self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr) + self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr) + + # test case that should trigger no copy for a + with dist_autograd.context() as context_id: + dist_autograd.backward(context_id, [MyFuncSingleGrad.apply(a)[1][0]]) + grads = dist_autograd.get_gradients(context_id) + p_g = MyFuncSingleGrad.static_grad_ptr + p_a = grads[a].data_ptr() + # Verify there was no clone. + self.assertTrue(p_a == p_g) + + # Test case that should trigger copy for both of a,b. This is + # different in the distributed autograd case since we hold + # a reference to all grads in a vector until all accumulation is done. + with dist_autograd.context() as context_id: + dist_autograd.backward(context_id, [MyFunc.apply(a, b)[1][0]]) + grads = dist_autograd.get_gradients(context_id) + p_g = MyFunc.static_grad_ptr + p_a = grads[a].data_ptr() + p_b = grads[b].data_ptr() + # check a,b uses different grad buffer + self.assertFalse(p_a == p_b) + # both should be copied. + self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr) + self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr) + + @dist_init + def test_no_grad_copy_sparse(self): + # create autograd function that saves grad pointer as class static + class MyFunc(Function): + static_grad_ptr = None + + @staticmethod + def forward(ctx, inp): + return inp + + @staticmethod + def backward(ctx, grad): + MyFunc.static_grad_ptr = grad._values().data_ptr() + return grad + + class NonContGradFunc(Function): + static_grad_ptr = None + + @staticmethod + def forward(ctx, inp1, inp2): + return inp1 + inp2 + + @staticmethod + def backward(ctx, grad): + # Create a sparse tensor with non-contiguous indices and values + # and return as grad. + v = torch.rand(1, 3) + i = torch.ones(1, 1, dtype=torch.long) + nv = v.expand(8, 3) + ni = i.expand(1, 8) + ngrad = torch.sparse_coo_tensor(ni, nv, (10, 3), dtype=torch.float32) + NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr() + return ngrad, ngrad + + a = torch.randn(10, 3, requires_grad=True) + b = torch.randn(10, 3, requires_grad=True) + input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9]) + offsets = torch.tensor([0, 4]) + import torch.nn.functional as F + + # test case that should trigger no copy for a. + with dist_autograd.context() as context_id: + emb_matrix = MyFunc.apply(a) + loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum() + dist_autograd.backward(context_id, [loss], retain_graph=True) + grads = dist_autograd.get_gradients(context_id) + p_g = MyFunc.static_grad_ptr + p_a = grads[a]._values().data_ptr() + # check a uses the same buffer + self.assertTrue(p_a == p_g) + + # Run backwards multiple times. + for i in range(10): + dist_autograd.backward(context_id, [loss], retain_graph=True) + + # non-contiguous indices and value, we should trigger a copy. + with dist_autograd.context() as context_id: + emb_matrix = NonContGradFunc.apply(a, b) + loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum() + dist_autograd.backward(context_id, [loss], retain_graph=True) + grads = dist_autograd.get_gradients(context_id) + p_g = NonContGradFunc.static_grad_ptr + p_a = grads[a]._values().data_ptr() + p_b = grads[b]._values().data_ptr() + # check a,b uses different grad buffer + self.assertFalse(p_a == p_b) + # Verify we cloned both grads. + self.assertFalse(p_a == p_g) + self.assertFalse(p_b == p_g) + + # Run backwards multiple times to verify accumulation. + for i in range(10): + dist_autograd.backward(context_id, [loss], retain_graph=True) + + @dist_init + def test_grad_copy_sparse_indices_extra_ref(self): + # create autograd function that saves grad pointer as class static + class MyFunc(Function): + static_grad_ptr = None + static_grad_indices_ref = None + static_grad_values_ref = None + + @staticmethod + def forward(ctx, inp): + return inp + + @staticmethod + def backward(ctx, grad): + MyFunc.static_grad_ptr = grad._values().data_ptr() + # indices() and values() return views, so holding onto + # references of them would not increment refcount of indices + # and values inside the sparse tensor. + MyFunc.static_grad_indices_ref = grad._indices() + MyFunc.static_grad_values_ref = grad._values() + return grad + + a = torch.randn(10, 3, requires_grad=True) + input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9]) + offsets = torch.tensor([0, 4]) + import torch.nn.functional as F + + with dist_autograd.context() as context_id: + emb_matrix = MyFunc.apply(a) + loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum() + dist_autograd.backward(context_id, [loss], retain_graph=True) + grads = dist_autograd.get_gradients(context_id) + p_g = MyFunc.static_grad_ptr + p_a = grads[a]._values().data_ptr() + self.assertIsNotNone(MyFunc.static_grad_indices_ref) + self.assertIsNotNone(MyFunc.static_grad_values_ref) + # grad would be stolen, since static_grad_indices_ref and + # static_grad_values_ref are holding onto views and don't bump the + # refcount. + self.assertTrue(p_g == p_a) + + @dist_init + def test_post_hooks(self): + self.hook_called_times = 0 + + def post_hook_add_one(output_grads, input_grads): + self.hook_called_times += 1 + return output_grads + + def post_hook_add_two(output_grads, input_grads): + self.hook_called_times += 2 + return output_grads + + t = torch.rand(10, 10, requires_grad=True) + a = t + t + + # Register post hooks + accumulate_grad_0 = a.grad_fn.next_functions[0][0] + accumulate_grad_0.register_hook(post_hook_add_one) + accumulate_grad_0.register_hook(post_hook_add_two) + + accumulate_grad_1 = a.grad_fn.next_functions[1][0] + accumulate_grad_1.register_hook(post_hook_add_two) + + with dist_autograd.context() as context_id: + loss = a.sum() + dist_autograd.backward(context_id, [loss]) + self.assertEqual(5, self.hook_called_times) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(1, len(grads)) + self.assertTrue(t in grads) + + @staticmethod + def _slow_add(t1, t2): + time.sleep(1) + t3 = t1 + t2 + t3.requires_grad = True + return t3 + + @dist_init + def test_thread_local_context_id(self): + t1 = torch.rand((3, 3)) + t2 = torch.rand((3, 3)) + + t3 = t1 + t2 + t3.requires_grad = True + t3.sum().backward() + + dst = worker_name((self.rank + 1) % self.world_size) + rref = rpc.remote(dst, DistAutogradTest._slow_add, args=(t1, t2)) + + with dist_autograd.context() as context_id: + loss = rref.to_here().sum() + # due to slow add, the continuation of this backward pass will be + # invoked by the previous rpc.remote thread which does not have a + # valid context_id. So, this can test whether we propagate + # thread_local states properly when jumping across threads on the + # server side. + dist_autograd.backward(context_id, [loss]) + self.assertTrue( + rpc.rpc_sync( + dst, + _compare_owner_value, + args=(context_id, rref, t3.grad) + ) + ) + + +class CudaDistAutogradTest(CommonDistAutogradTest): + @skip_if_lt_x_gpu(1) + @dist_init + def test_gpu_simple(self): + t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0") + t2 = torch.rand(3, 3, requires_grad=True, device="cuda:0") + (t1 + t2).sum().backward() + with dist_autograd.context() as context_id: + t3 = t1 + t2 + dist_autograd.backward(context_id, [t3.sum()]) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(2, len(grads)) + self.assertEqual(t1.grad, grads[t1]) + self.assertEqual(t2.grad, grads[t2]) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_gpu_to_cpu_continuation(self): + t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0") + t2 = torch.rand(3, 3, requires_grad=True) + # Run a few iterations. + for i in range(3): + t1.grad = None + t2.grad = None + # Root is CPU + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]: + with dist_autograd.context() as context_id: + t3 = self._exec_func(exec_mode, torch.add, t2, t2) + t4 = t3.cuda(0) + t1 + t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2) + t6 = t5.cuda(0) + t4 + t7 = self._exec_func(exec_mode, torch.add, t6.cpu(), t5) + # Autograd graph consists of CPU -> GPU -> CPU execution. + ret = self._verify_backwards( + exec_mode, [t7.sum()], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + @skip_if_lt_x_gpu(1) + @dist_init + def test_gpu_to_cpu_continuation_gpu_root(self): + t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0") + t2 = torch.rand(3, 3, requires_grad=True) + # Run a few iterations. + for i in range(3): + t1.grad = None + t2.grad = None + # Root is CPU + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]: + with dist_autograd.context() as context_id: + t3 = self._exec_func(exec_mode, torch.add, t2, t2) + t4 = t3.cuda(0) + t1 + t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2) + t6 = t5.cuda(0) + t4 + # Autograd graph consists of CPU -> GPU -> CPU execution. + ret = self._verify_backwards( + exec_mode, [t6.sum()], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + +class FaultyAgentDistAutogradTest(RpcAgentTestFixture): + # Reusing a simplified helper function from DistAutogradTest to ensure + # autograd context is successfully cleaned up even when RPCs are failing. + def context_cleanup_test_helper(self, rpc_args, func): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + # test that in dist autograd, in the case that tensors communicated over RPC do + # NOT require grad, we still cleanup the dist autograd contexts created + # on other nodes. This is because the autograd context is still + # communicated over RPC even if tensor arguments do not require grad, as + # it is possible that the response could. + dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank} + + with dist_autograd.context() as context_id: + for dst_rank in dst_ranks: + rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args) + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + # the thread's context id should be cleaned up + with self.assertRaises(RuntimeError): + dist_autograd._retrieve_context(context_id) + # Ensure all peers have finished mutating the + # `known_context_ids` set. + dist.barrier() + # check that all contexts have been cleaned up. + success = _all_contexts_cleaned_up() + self.assertTrue(success) + + # no faulty_messages defined so this fails all retryable messages - see + # faulty_rpc_agent_test_fixture.py for the list of retryable messages. + @dist_init + def test_context_cleanup_tensor_with_grad(self): + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add) + + @dist_init + def test_verify_backend_options(self): + self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE) + self.assertEqual(self.rpc_backend_options.num_worker_threads, 8) + self.assertEqual(self.rpc_backend_options.num_fail_sends, 3) + self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4) + + +class WrapperModule(nn.Module): + def __init__(self, model, device): + super().__init__() + self.model = model.to(device) + + def forward(self, *args): + return self.model(*args) + + def gradients(self, ctx_id): + grads = dist_autograd.get_gradients(ctx_id) + return [grads[p] for p in self.model.parameters()] + + +class TensorPipeCudaDistAutogradTest(RpcAgentTestFixture): + + @skip_if_lt_x_gpu(4) + def test_device_maps_backward_pass(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + + # The reverse of this device mapping should be used for the backward pass. + options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + t1 = torch.rand(10, device=self.rank, requires_grad=True) + t2 = torch.rand(10, device=self.rank, requires_grad=True) + with dist_autograd.context() as context_id: + res = rpc.rpc_sync(dst, torch.add, args=(t1, t2)) + dist_autograd.backward(context_id, [res.sum()]) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(torch.ones(10), grads[t1]) + self.assertEqual(torch.ones(10), grads[t2]) + self.assertEqual(t1.device, grads[t1].device) + self.assertEqual(t2.device, grads[t2].device) + + rpc.shutdown() + + class MyRemoteCompute(torch.nn.Module): + def forward(self, input): + input = input * 2.0 + return input + + class MyLocalCompute(torch.nn.Module): + def __init__(self, next_stage): + super().__init__() + self.next_stage = next_stage + + def forward(self, input): + return self.next_stage.rpc_sync().forward(input) + + @skip_if_lt_x_gpu(4) + def test_dist_autograd_sync_streams(self): + + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + + # The reverse of this device mapping should be used for the backward pass. + options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + remote_compute = rpc.remote(dst, TensorPipeCudaDistAutogradTest.MyRemoteCompute) + local_compute = TensorPipeCudaDistAutogradTest.MyLocalCompute(remote_compute) + for _ in range(10): + input = torch.rand([1000, 10000], device=self.rank, requires_grad=True) + # Run local autograd + result = input * 2.0 + r = random.random() + loss = result.sum() * r + loss.backward() + + # Run distributed autograd + with dist_autograd.context() as context_id: + result = local_compute(input) + loss = result.sum() * r + dist_autograd.backward(context_id, [loss]) + + # Compare grads. + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(input.grad, grads[input]) + + rpc.shutdown() + + @skip_if_lt_x_gpu(4) + def test_gradients_synchronizations(self): + options = self.rpc_backend_options + for peer_rank in range(self.world_size): + options.set_device_map(worker_name(peer_rank), {self.rank: peer_rank}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + if self.rank == 0: + # this is master + layers = [nn.Linear(2000, 2000) for _ in range(self.world_size - 1)] + local_layers = [l.to(0) for l in layers] + remote_layers = [] + for rank in range(1, self.world_size): + remote_layers.append(rpc.remote( + worker_name(rank), + WrapperModule, + args=(layers[rank - 1], rank) + )) + + x = torch.randn(5000, 2000).to(0) + # local iteration + local_model = nn.Sequential(*local_layers) + local_model(x).sum().backward() + + # remote iteration + with dist_autograd.context() as context_id: + for remote_layer in remote_layers: + x = remote_layer.rpc_sync().forward(x) + + dist_autograd.backward(context_id, [x.sum()]) + + futs = [] + for remote_layer in remote_layers: + futs.append(remote_layer.rpc_async().gradients(context_id)) + + for i in range(len(futs)): + local_gradients = [p.grad for p in local_layers[i].parameters()] + for g1, g2 in zip(futs[i].wait(), local_gradients): + self.assertEqual(g1, g2) + + rpc.shutdown() diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_optimizer_test.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_optimizer_test.py new file mode 100644 index 0000000000000000000000000000000000000000..310dc740db6802dff140499aa68aa9dd18978891 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_optimizer_test.py @@ -0,0 +1,281 @@ +# mypy: allow-untyped-defs + + +import threading + +import torch +import torch.distributed.autograd as dist_autograd +import torch.distributed.rpc as rpc +from torch import optim +from torch.distributed.optim import DistributedOptimizer +from torch.testing._internal.dist_utils import dist_init +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + + +class MyModule: + lock = threading.Lock() + + def __init__(self, requires_grad=True): + # cannot directly use torch.manual_seed(0) as all threads share the same + # default generator. The race from multiple RPC threads could mess up + # the draw order from the default RNG instance, leading to + # non-deterministic behavior. Hence, create a dedicated RNG here. + g_cpu = torch.Generator() + g_cpu.manual_seed(0) + self.w = torch.rand((3, 3), requires_grad=requires_grad, generator=g_cpu) + + def forward(self, t1): + return torch.mm(self.w, t1) + + def get_w(self): + return self.w + + +class FailingOptimizer(optim.Optimizer): + def __init__(self, params): + super().__init__(params, {}) + + def step(self, closure=None): + raise ValueError("Error running optimizer.") + + +class OptimizerFailingOnConstructor(optim.Optimizer): + def __init__(self, params): + super().__init__(params, {}) + raise ValueError("Error creating optimizer.") + + def step(self, closure=None): + raise NotImplementedError + + +def _call_method(method, obj_rref, *args, **kwargs): + return method(obj_rref.local_value(), *args, **kwargs) + + +def remote_method(method, obj_rref, *args, **kwargs): + """ + Call rpc.remote on a method in a remote object. + + Args: + method: the method (for example, Class.method) + obj_rref (RRef): remote reference to the object + args: positional arguments to pass to the method + kwargs: keyword arguments to pass to the method + + Returns a RRef to the remote method call result. + """ + return rpc.remote( + obj_rref.owner(), + _call_method, + args=[method, obj_rref] + list(args), + kwargs=kwargs, + ) + + +def rpc_async_method(method, obj_rref, *args, **kwargs): + """ + Call rpc.rpc_async on a method in a remote object. + + Args: + method: the method (for example, Class.method) + obj_rref (RRef): remote reference to the object + args: positional arguments to pass to the method + kwargs: keyword arguments to pass to the method + + Returns a Future to the method call result. + """ + return rpc.rpc_async( + obj_rref.owner(), + _call_method, + args=[method, obj_rref] + list(args), + kwargs=kwargs, + ) + + +class DistOptimizerTest(RpcAgentTestFixture): + @dist_init() + def test_dist_optim_exception(self): + # distributed version + owner1 = "worker%d" % ((self.rank + 1) % self.world_size) + owner2 = "worker%d" % ((self.rank + 2) % self.world_size) + + remote_module1 = rpc.remote(owner1, MyModule) + remote_module2 = rpc.remote(owner2, MyModule) + remote_param1 = remote_method(MyModule.get_w, remote_module1) + remote_param2 = remote_method(MyModule.get_w, remote_module2) + + dist_optim = DistributedOptimizer( + FailingOptimizer, [remote_param1, remote_param2] + ) + + with dist_autograd.context() as context_id: + g_cpu = torch.Generator() + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = rpc_async_method(MyModule.forward, remote_module1, t2) + output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait()) + loss = torch.add(output2.wait(), t1).sum() + + dist_autograd.backward(context_id, [loss]) + with self.assertRaisesRegex(Exception, "Error running optimizer"): + dist_optim.step(context_id) + + @dist_init() + def test_dist_optim_exception_on_constructor(self): + # distributed version + owner1 = "worker%d" % ((self.rank + 1) % self.world_size) + owner2 = "worker%d" % ((self.rank + 2) % self.world_size) + + remote_module1 = rpc.remote(owner1, MyModule) + remote_module2 = rpc.remote(owner2, MyModule) + remote_param1 = remote_method(MyModule.get_w, remote_module1) + remote_param2 = remote_method(MyModule.get_w, remote_module2) + + with self.assertRaisesRegex(Exception, "Error creating optimizer."): + dist_optim = DistributedOptimizer( + OptimizerFailingOnConstructor, [remote_param1, remote_param2] + ) + + def _test_dist_optim_base(self, optim_cls, *args, **kwargs): + # local version + module1 = MyModule() + module2 = MyModule() + params = [module1.get_w(), module2.get_w()] + local_optim = optim_cls(params, *args, **kwargs) + + old_w1 = module1.w.clone().detach() + old_w2 = module2.w.clone().detach() + + g_cpu = torch.Generator() + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = module1.forward(t2) + output2 = module2.forward(output1) + loss = torch.add(output2, t1).sum() + + loss.backward() + local_optim.step() + + # distributed version + owner1 = "worker%d" % ((self.rank + 1) % self.world_size) + owner2 = "worker%d" % ((self.rank + 2) % self.world_size) + + remote_module1 = rpc.remote(owner1, MyModule) + remote_module2 = rpc.remote(owner2, MyModule) + remote_param1 = remote_method(MyModule.get_w, remote_module1) + remote_param2 = remote_method(MyModule.get_w, remote_module2) + + old_w1_remote = remote_param1.to_here() + + # sanity check: local and remote initial weights should match + self.assertEqual(old_w1, remote_param1.to_here()) + self.assertEqual(old_w2, remote_param2.to_here()) + + dist_optim = DistributedOptimizer( + optim_cls, [remote_param1, remote_param2], *args, **kwargs + ) + + with dist_autograd.context() as context_id: + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = rpc_async_method(MyModule.forward, remote_module1, t2) + output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait()) + loss = torch.add(output2.wait(), t1) + + dist_autograd.backward(context_id, [loss.sum()]) + dist_optim.step(context_id) + + new_w1 = rpc_async_method(MyModule.get_w, remote_module1).wait() + new_w2 = rpc_async_method(MyModule.get_w, remote_module2).wait() + + # ensure optimizer changed weights + self.assertNotEqual(old_w1, new_w1) + self.assertNotEqual(old_w2, new_w2) + # ensure local equals remote + self.assertEqual(new_w1, module1.get_w()) + self.assertEqual(new_w2, module2.get_w()) + + @dist_init() + def test_dist_optim(self): + self._test_dist_optim_base(optim.Adagrad, lr=0.05) + self._test_dist_optim_base(optim.Adam, lr=1e-2, amsgrad=True) + self._test_dist_optim_base(optim.AdamW, lr=0.05, amsgrad=True) + self._test_dist_optim_base(optim.SGD, lr=0.05) + self._test_dist_optim_base(optim.SGD, lr=1e-3, momentum=1, weight_decay=1, nesterov=True) + self._test_dist_optim_base(optim.Adadelta, rho=0.95) + self._test_dist_optim_base(optim.RMSprop, lr=0.05) + self._test_dist_optim_base(optim.Adamax, lr=0.05) + self._test_dist_optim_base(optim.Rprop, lr=0.05) + + def _test_dist_optim_none_grads(self, optim_cls, *args, **kwargs): + # local version + module1 = MyModule() + module2 = MyModule(requires_grad=False) + params = [module1.get_w(), module2.get_w()] + local_optim = optim_cls(params, *args, **kwargs) + + old_w1 = module1.w.clone().detach() + old_w2 = module2.w.clone().detach() + + g_cpu = torch.Generator() + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = module1.forward(t2) + output2 = module2.forward(output1) + loss = torch.add(output2, t1).sum() + + loss.backward() + local_optim.step() + + # distributed version + owner1 = "worker%d" % ((self.rank + 1) % self.world_size) + owner2 = "worker%d" % ((self.rank + 2) % self.world_size) + + remote_module1 = rpc.remote(owner1, MyModule) + remote_module2 = rpc.remote(owner2, MyModule, args=(False,)) + remote_param1 = remote_module1.remote().get_w() + remote_param2 = remote_module2.remote().get_w() + + # sanity check: local and remote initial weights should match + self.assertEqual(old_w1, remote_param1.to_here()) + self.assertEqual(old_w2, remote_param2.to_here()) + + dist_optim = DistributedOptimizer( + optim_cls, [remote_param1, remote_param2], *args, **kwargs + ) + + with dist_autograd.context() as context_id: + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = remote_module1.rpc_async().forward(t2) + output2 = remote_module2.rpc_async().forward(output1.wait()) + loss = torch.add(output2.wait(), t1) + + dist_autograd.backward(context_id, [loss.sum()]) + dist_optim.step(context_id) + + new_w1 = remote_module1.rpc_async().get_w().wait() + new_w2 = remote_module2.rpc_async().get_w().wait() + + # ensure optimizer changed weights for w1 + self.assertNotEqual(old_w1, new_w1) + + # ensure optimizer not changed weights for w2 + self.assertEqual(old_w2, new_w2) + # ensure local equals remote + self.assertEqual(new_w1, module1.get_w()) + self.assertEqual(new_w2, module2.get_w()) + + @dist_init() + def test_dist_optim_none_grads(self): + self._test_dist_optim_none_grads(optim.SGD, lr=0.05) + self._test_dist_optim_none_grads(optim.RMSprop, lr=0.05) + self._test_dist_optim_none_grads(optim.Rprop, lr=0.05) + self._test_dist_optim_none_grads(optim.Adadelta, rho=0.95) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/parameter_server_test.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/parameter_server_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..198dadc2e254d4419ad2652b14f57bbb6770ba1b Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/parameter_server_test.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/reinforcement_learning_rpc_test.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/reinforcement_learning_rpc_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ae3103729fdece16e04b25b972450e2820dfcc1 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/reinforcement_learning_rpc_test.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/parameter_server_test.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/parameter_server_test.py new file mode 100644 index 0000000000000000000000000000000000000000..98b41920d2a7de6587866307ac4d5c6b6e12c525 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/parameter_server_test.py @@ -0,0 +1,144 @@ +# mypy: allow-untyped-defs + +# If you need to modify this file to make this test pass, please also apply same edits accordingly to +# https://github.com/pytorch/examples/blob/master/distributed/rpc/batch/parameter_server.py +# and https://pytorch.org/tutorials/intermediate/rpc_async_execution.html#batch-updating-parameter-server + +import threading +from datetime import datetime +from time import perf_counter + +import torch +import torch.distributed.rpc as rpc +import torch.nn as nn +from torch import optim + +from torch.testing._internal.dist_utils import ( + dist_init, + worker_name, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import RpcAgentTestFixture + +batch_size = 20 +in_features = 100 +out_features = 30 +num_batches = 4 + + +def timed_log(text): + print(f"{datetime.now().strftime('%H:%M:%S')} {text}") + + +class BatchUpdateParameterServer: + + def __init__(self, batch_update_size): + self.model = nn.Linear(in_features, out_features) + self.lock = threading.Lock() + self.future_model = torch.futures.Future() + self.batch_update_size = batch_update_size + self.curr_update_size = 0 + self.optimizer = optim.SGD(self.model.parameters(), lr=0.001, momentum=0.9) + for p in self.model.parameters(): + p.grad = torch.zeros_like(p) + + def get_model(self): + return self.model + + @staticmethod + @rpc.functions.async_execution + def update_and_fetch_model(ps_rref, grads): + self = ps_rref.local_value() + for p, g in zip(self.model.parameters(), grads): + if p.grad is None: + p.grad = g + else: + p.grad += g + with self.lock: + timed_log(f"PS got {self.curr_update_size}/{self.batch_update_size} updates") + self.curr_update_size += 1 + fut = self.future_model + + if self.curr_update_size >= self.batch_update_size: + for p in self.model.parameters(): + p.grad /= self.batch_update_size + self.curr_update_size = 0 + self.optimizer.step() + self.optimizer.zero_grad() + fut.set_result(self.model) + timed_log("PS updated model") + self.future_model = torch.futures.Future() + + return fut + + +class Trainer: + + def __init__(self, ps_rref): + self.ps_rref = ps_rref + self.loss_fn = nn.L1Loss() + + def get_next_batch(self): + for _ in range(num_batches): + inputs = torch.randn(batch_size, in_features) + labels = torch.zeros(batch_size, out_features) + yield inputs, labels + + def train(self): + name = rpc.get_worker_info().name + m = self.ps_rref.rpc_sync().get_model() + for inputs, labels in self.get_next_batch(): + timed_log(f"{name} processing one batch") + self.loss_fn(m(inputs), labels).backward() + timed_log(f"{name} reporting grads") + m = rpc.rpc_sync( + self.ps_rref.owner(), + BatchUpdateParameterServer.update_and_fetch_model, + args=(self.ps_rref, [p.grad for p in m.cpu().parameters()]), + ) + timed_log(f"{name} got updated model") + + +def run_trainer(ps_rref): + trainer = Trainer(ps_rref) + trainer.train() + + +def run_ps(trainers): + timed_log("Start training") + start = perf_counter() + ps_rref = rpc.RRef(BatchUpdateParameterServer(len(trainers))) + futs = [] + for trainer in trainers: + futs.append( + rpc.rpc_async(trainer, run_trainer, args=(ps_rref,)) + ) + + torch.futures.wait_all(futs) + stop = perf_counter() + timed_log("Finish training") + timed_log(f"Time spent training: {stop-start}s") + +class ParameterServerTest(RpcAgentTestFixture): + + @dist_init(setup_rpc=False) + def test_batch_updating_parameter_server(self): + + if self.rank != 0: + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + else: + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + run_ps([f"{worker_name(r)}" for r in range(1, self.world_size)]) + + rpc.shutdown() diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5d7e7b1244bcea221056a47186c8d4708a111cb2 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py @@ -0,0 +1,261 @@ +# mypy: allow-untyped-defs + +# If you need to modify this file to make this test pass, please also apply same edits accordingly to +# https://github.com/pytorch/examples/blob/master/distributed/rpc/rl/main.py +# and https://pytorch.org/tutorials/intermediate/rpc_tutorial.html + +import numpy as np +from itertools import count + +import torch +import torch.distributed.rpc as rpc +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torch.distributed.rpc import RRef, rpc_sync, rpc_async, remote +from torch.distributions import Categorical + +from torch.testing._internal.dist_utils import dist_init, worker_name +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import RpcAgentTestFixture + +TOTAL_EPISODE_STEP = 5000 +GAMMA = 0.1 +SEED = 543 + +def _call_method(method, rref, *args, **kwargs): + r""" + a helper function to call a method on the given RRef + """ + return method(rref.local_value(), *args, **kwargs) + + +def _remote_method(method, rref, *args, **kwargs): + r""" + a helper function to run method on the owner of rref and fetch back the + result using RPC + """ + args = [method, rref] + list(args) + return rpc_sync(rref.owner(), _call_method, args=args, kwargs=kwargs) + + +class Policy(nn.Module): + r""" + Borrowing the ``Policy`` class from the Reinforcement Learning example. + Copying the code to make these two examples independent. + See https://github.com/pytorch/examples/tree/master/reinforcement_learning + """ + def __init__(self) -> None: + super().__init__() + self.affine1 = nn.Linear(4, 128) + self.dropout = nn.Dropout(p=0.6) + self.affine2 = nn.Linear(128, 2) + + self.saved_log_probs = [] + self.rewards = [] + + def forward(self, x): + x = self.affine1(x) + x = self.dropout(x) + x = F.relu(x) + action_scores = self.affine2(x) + return F.softmax(action_scores, dim=1) + + +class DummyEnv: + r""" + A dummy environment that implements the required subset of the OpenAI gym + interface. It exists only to avoid a dependency on gym for running the + tests in this file. It is designed to run for a set max number of iterations, + returning random states and rewards at each step. + """ + def __init__(self, state_dim=4, num_iters=10, reward_threshold=475.0): + self.state_dim = state_dim + self.num_iters = num_iters + self.iter = 0 + self.reward_threshold = reward_threshold + + def seed(self, manual_seed): + torch.manual_seed(manual_seed) + + def reset(self): + self.iter = 0 + return torch.randn(self.state_dim) + + def step(self, action): + self.iter += 1 + state = torch.randn(self.state_dim) + reward = torch.rand(1).item() * self.reward_threshold + done = self.iter >= self.num_iters + info = {} + return state, reward, done, info + + +class Observer: + r""" + An observer has exclusive access to its own environment. Each observer + captures the state from its environment, and send the state to the agent to + select an action. Then, the observer applies the action to its environment + and reports the reward to the agent. + """ + def __init__(self) -> None: + self.id = rpc.get_worker_info().id + self.env = DummyEnv() + self.env.seed(SEED) + + def run_episode(self, agent_rref, n_steps): + r""" + Run one episode of n_steps. + Arguments: + agent_rref (RRef): an RRef referencing the agent object. + n_steps (int): number of steps in this episode + """ + state, ep_reward = self.env.reset(), 0 + for step in range(n_steps): + # send the state to the agent to get an action + action = _remote_method(Agent.select_action, agent_rref, self.id, state) + + # apply the action to the environment, and get the reward + state, reward, done, _ = self.env.step(action) + + # report the reward to the agent for training purpose + _remote_method(Agent.report_reward, agent_rref, self.id, reward) + + if done: + break + + +class Agent: + def __init__(self, world_size): + self.ob_rrefs = [] + self.agent_rref = RRef(self) + self.rewards = {} + self.saved_log_probs = {} + self.policy = Policy() + self.optimizer = optim.Adam(self.policy.parameters(), lr=1e-2) + self.eps = np.finfo(np.float32).eps.item() + self.running_reward = 0 + self.reward_threshold = DummyEnv().reward_threshold + for ob_rank in range(1, world_size): + ob_info = rpc.get_worker_info(worker_name(ob_rank)) + self.ob_rrefs.append(remote(ob_info, Observer)) + self.rewards[ob_info.id] = [] + self.saved_log_probs[ob_info.id] = [] + + def select_action(self, ob_id, state): + r""" + This function is mostly borrowed from the Reinforcement Learning example. + See https://github.com/pytorch/examples/tree/master/reinforcement_learning + The main difference is that instead of keeping all probs in one list, + the agent keeps probs in a dictionary, one key per observer. + + NB: no need to enforce thread-safety here as GIL will serialize + executions. + """ + probs = self.policy(state.unsqueeze(0)) + m = Categorical(probs) + action = m.sample() + self.saved_log_probs[ob_id].append(m.log_prob(action)) + return action.item() + + def report_reward(self, ob_id, reward): + r""" + Observers call this function to report rewards. + """ + self.rewards[ob_id].append(reward) + + def run_episode(self, n_steps=0): + r""" + Run one episode. The agent will tell each observer to run n_steps. + """ + futs = [] + for ob_rref in self.ob_rrefs: + # make async RPC to kick off an episode on all observers + futs.append( + rpc_async( + ob_rref.owner(), + _call_method, + args=(Observer.run_episode, ob_rref, self.agent_rref, n_steps) + ) + ) + + # wait until all observers have finished this episode + for fut in futs: + fut.wait() + + def finish_episode(self): + r""" + This function is mostly borrowed from the Reinforcement Learning example. + See https://github.com/pytorch/examples/tree/master/reinforcement_learning + The main difference is that it joins all probs and rewards from + different observers into one list, and uses the minimum observer rewards + as the reward of the current episode. + """ + + # joins probs and rewards from different observers into lists + R, probs, rewards = 0, [], [] + for ob_id in self.rewards: + probs.extend(self.saved_log_probs[ob_id]) + rewards.extend(self.rewards[ob_id]) + + # use the minimum observer reward to calculate the running reward + min_reward = min(sum(self.rewards[ob_id]) for ob_id in self.rewards) + self.running_reward = 0.05 * min_reward + (1 - 0.05) * self.running_reward + + # clear saved probs and rewards + for ob_id in self.rewards: + self.rewards[ob_id] = [] + self.saved_log_probs[ob_id] = [] + + policy_loss, returns = [], [] + for r in rewards[::-1]: + R = r + GAMMA * R + returns.insert(0, R) + returns = torch.tensor(returns) + returns = (returns - returns.mean()) / (returns.std() + self.eps) + for log_prob, R in zip(probs, returns): + policy_loss.append(-log_prob * R) + self.optimizer.zero_grad() + policy_loss = torch.cat(policy_loss).sum() + policy_loss.backward() + self.optimizer.step() + return min_reward + + +def run_agent(agent, n_steps): + for i_episode in count(1): + agent.run_episode(n_steps=n_steps) + last_reward = agent.finish_episode() + + if agent.running_reward > agent.reward_threshold: + print(f"Solved! Running reward is now {agent.running_reward}!") + break + + +class ReinforcementLearningRpcTest(RpcAgentTestFixture): + @dist_init(setup_rpc=False) + def test_rl_rpc(self): + if self.rank == 0: + # Rank 0 is the agent. + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + agent = Agent(self.world_size) + run_agent(agent, n_steps=int(TOTAL_EPISODE_STEP / (self.world_size - 1))) + + # Ensure training was run. We don't really care about whether the task was learned, + # since the purpose of the test is to check the API calls. + self.assertGreater(agent.running_reward, 0.0) + else: + # Other ranks are observers that passively wait for instructions from the agent. + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + rpc.shutdown() diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_agent_rpc_test.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_agent_rpc_test.py new file mode 100644 index 0000000000000000000000000000000000000000..132e30e5b5cfb41ab9c0a2a0f0f6f960d648c0f9 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_agent_rpc_test.py @@ -0,0 +1,326 @@ +# mypy: allow-untyped-defs + +import torch +import time +import torch.distributed.rpc as rpc +from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs +from torch.testing._internal.dist_utils import ( + dist_init, + wait_until_pending_futures_and_users_flushed, + wait_until_owners_and_forks_on_rank, + worker_name, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + +def my_sleep_func(seconds=1): + time.sleep(seconds) + return torch.mul(torch.tensor(1), torch.tensor(1)) + +@torch.jit.script +def my_script_func(tensor): + return torch.add(tensor, tensor) + +def add_rref_to_value(rref, value): + return rref.to_here() + value + +class FaultyAgentRpcTest(RpcAgentTestFixture): + + # no faulty_messages defined so this fails all retryable messages - see + # faulty_rpc_agent_test_fixture.py for the list of retryable messages. + @dist_init(messages_to_delay={}) + def test_check_failed_messages(self): + if self.rank == 0: + dst_worker_b = worker_name((self.rank + 1) % self.world_size) + dst_worker_c = worker_name((self.rank + 2) % self.world_size) + + # Worker0 sends RPC to Worker1 and creates an RRef there + rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2))) + # Worker0 sends an RPC to Worker2 with the RRef as an arg + rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2))) + # check if the output is as expected + self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2))) + # explicitly delete all User RRefs + _delete_all_user_and_unforked_owner_rrefs() + + @dist_init + def test_verify_backend_options(self): + self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE) + self.assertEqual(self.rpc_backend_options.num_worker_threads, 8) + self.assertEqual(self.rpc_backend_options.num_fail_sends, 3) + self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4) + self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2) + self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + @dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"]) + def test_custom_faulty_messages(self): + self.assertEqual( + {"RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"}, + set(self.rpc_backend_options.messages_to_fail), + ) + + @dist_init(faulty_messages=[]) + def test_no_faulty_messages(self): + self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0) + + @dist_init(messages_to_delay={"SCRIPT_CALL": 1.5}) + def test_custom_messages_to_delay(self): + self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5}) + + def _test_remote_message_dropped_pickle(self, dst=None): + if self.rank != 0: + return + dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + # Since we fail python_remote_call messages synchronously, the future + # corresponding to this remote call will be marked with an error when + # this function returns. + rref = rpc.remote(dst_worker, my_sleep_func, args=(1,)) + # Call to ensure pending callbacks are run. + wait_until_pending_futures_and_users_flushed() + # Attempt to fork the RRef should raise an error indicating the rpc.remote timeout. + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rref._serialize() + # Test that using RRef as arg over RPC (which forks) results in the same + # error + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1)) + + @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"]) + def test_remote_message_dropped_pickle(self): + self._test_remote_message_dropped_pickle() + + @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"]) + def test_remote_message_dropped_pickle_to_self(self): + self._test_remote_message_dropped_pickle(self.rank) + + + def _test_remote_message_dropped_timeout(self, func, args, dst=None): + if self.rank != 0: + return + + # test the case where rpc.remote() message creation is completely dropped. + dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + # Since we fail python_remote_call messages synchronously, the future + # corresponding to this remote call will be marked with an error when + # this function returns. + rref = rpc.remote(dst_worker, func, args=args) + # Call to ensure pending callbacks are run. + wait_until_pending_futures_and_users_flushed() + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rref.to_here() + # Note: during shutdown, logs will indicate "Could not find OwnerRRef..." + # on the owning nodes, this is expected because the OwnerRRef was never + # successfully created. Therefore, delAllUsers will work as expected. + + @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"]) + def test_builtin_remote_message_dropped_timeout(self): + func = torch.add + args = (torch.tensor(1), torch.tensor(1)) + self._test_remote_message_dropped_timeout(func, args) + + @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"]) + def test_builtin_remote_message_dropped_timeout_to_self(self): + func = torch.add + args = (torch.tensor(1), torch.tensor(1)) + self._test_remote_message_dropped_timeout(func, args, dst=0) + + @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"]) + def test_udf_remote_message_dropped_timeout(self): + func = my_sleep_func + args = (2,) + self._test_remote_message_dropped_timeout(func, args) + + @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"]) + def test_udf_remote_message_dropped_timeout_to_self(self): + func = my_sleep_func + args = (2,) + self._test_remote_message_dropped_timeout(func, args, dst=0) + + def _test_remote_message_delay_timeout(self, func, args, dst=None): + if self.rank != 0: + return + # Test the case where remote message is eventually processed on the owner, + # but the future on the creator times out before the response comes back. + dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + # 10 ms timeout + rref = rpc.remote(dst_worker, func, args=args, timeout=0.001) + # Future corresponding to the remote creation should time out. + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rref._get_future().wait() + + # Call to ensure pending callbacks are run. + wait_until_pending_futures_and_users_flushed() + # to_here() should now pick up that rpc.remote() creation has failed. + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rref.to_here() + + # Test the case where rpc.remote() times out, but to_here() has already + # started blocking before. + # NOTE: we only test this when not sending to self, as to_here() calls + # calls localValue(), which does not send an RPC and thus does not have + # a timeout. This can be supported by allowing future.wait() to + # take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280) + if dst_rank != self.rank: + slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2) + + with self.assertRaisesRegex(RuntimeError, expected_error): + # to_here() should raise timeout error, since it does not know about the + # status of rpc.remote(). + slow_rref.to_here(0.001) + # Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete + # but this can be a noop since it may not exist on the owner yet. Later, + # the owner can process the RRef creation and wait for the delete message, + # thus leading to a timeout. + # Therefore, we wait until we get notification that pending owners have + # been confirmed before sending out RRefUserDeletes. + if dst_rank != self.rank: + wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank) + + @dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2}) + def test_udf_remote_message_delay_timeout(self): + func = my_sleep_func + args = (2,) + self._test_remote_message_delay_timeout(func, args) + + @dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2}) + def test_udf_remote_message_delay_timeout_to_self(self): + func = my_sleep_func + args = (1,) + self._test_remote_message_delay_timeout(func, args, dst=0) + + @dist_init( + faulty_messages=[], + messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1}, + ) + def test_remote_message_builtin_delay_timeout(self): + func = torch.add + args = (torch.tensor(1), torch.tensor(1)) + self._test_remote_message_delay_timeout(func, args) + + @dist_init( + faulty_messages=[], + messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1}, + ) + def test_remote_message_builtin_delay_timeout_to_self(self): + func = torch.add + args = (torch.tensor(1), torch.tensor(1)) + self._test_remote_message_delay_timeout(func, args, dst=0) + + @dist_init( + faulty_messages=[], + messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1}, + ) + def test_remote_message_script_delay_timeout(self): + func = my_script_func + args = (torch.tensor(1),) + self._test_remote_message_delay_timeout(func, args) + + @dist_init( + faulty_messages=[], + messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1}, + ) + def test_remote_message_script_delay_timeout_to_self(self): + func = my_script_func + args = (torch.tensor(1),) + self._test_remote_message_delay_timeout(func, args, dst=0) + + @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1}) + def test_rref_to_here_timeout(self): + if self.rank != 0: + return + + dst_rank = (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + rref = rpc.remote( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rref.to_here(0.01) + + rref.to_here() + + @dist_init(faulty_messages=[]) + def test_rpc_builtin_timeout(self): + next_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(next_rank) + expected_error = self.get_timeout_error_regex() + # PYTHON_CALL message types which correspond to Python UDF over RPC + # by default get a delay (see faulty_rpc_agent_test_fixture) + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.rpc_sync( + dst_worker, + torch.add, + args=(torch.tensor(1), torch.tensor(1)), + timeout=1, + ) + + fut = rpc.rpc_async( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1 + ) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure that the currently set default timeout is large enough such + # that RPCs with delays still complete. + fut = rpc.rpc_async( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + fut.wait() + + # Ensure timeout if we set a new default and don't override + rpc._set_rpc_timeout(0.001) + fut = rpc.rpc_async( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure run to completion if we specify timeout of 0 + fut = rpc.rpc_async( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0 + ) + fut.wait() + # Reset for clean shutdown + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5}) + def test_rpc_script_timeout(self): + next_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(next_rank) + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1) + + fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure that the currently set default timeout is large enough such + # that RPCs with delays still complete. + fut = rpc.rpc_async( + dst_worker, my_script_func, args=(torch.tensor(1),) + ) + fut.wait() + + # Ensure timeout if we set a new default and don't override + rpc._set_rpc_timeout(0.001) + fut = rpc.rpc_async( + dst_worker, my_script_func, args=(torch.tensor(1),) + ) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure run to completion if we specify timeout of 0 + rpc._set_rpc_timeout(0.001) + fut = rpc.rpc_async( + dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0 + ) + fut.wait() + # Reset for clean shutdown + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_rpc_agent_test_fixture.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_rpc_agent_test_fixture.py new file mode 100644 index 0000000000000000000000000000000000000000..ca584c1dc95aa0cd8f4d21ac8f0a31d214974106 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_rpc_agent_test_fixture.py @@ -0,0 +1,62 @@ +# mypy: allow-untyped-defs + +import torch.distributed.rpc as rpc +import torch.distributed.rpc._testing # noqa: F401 +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + +# The following message types are currently retried in the RREF protocol and +# distributed autograd. Thus only these messages should be tested with the +# Faulty RPC Agent. +retryable_message_types = ["RREF_FORK_REQUEST", + "RREF_CHILD_ACCEPT", + "RREF_USER_DELETE", + "CLEANUP_AUTOGRAD_CONTEXT_REQ"] + +# The following messages incur the corresponding delay in seconds while being +# processed in FaultyTensorPipeAgent's enqueueSend() function. +default_messages_to_delay = { + "PYTHON_CALL": 1.5, # Python UDF + "SCRIPT_CALL": 1.5, # Script/Builtin +} + +class FaultyRpcAgentTestFixture(RpcAgentTestFixture): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.messages_to_fail = retryable_message_types + self.messages_to_delay = default_messages_to_delay + + @property + def rpc_backend(self): + return rpc.backend_registry.BackendType[ + "FAULTY_TENSORPIPE" + ] + + @property + def rpc_backend_options(self): + return rpc.backend_registry.construct_rpc_backend_options( + self.rpc_backend, + init_method=self.init_method, + num_worker_threads=8, + num_fail_sends=3, + messages_to_fail=self.messages_to_fail, + messages_to_delay=self.messages_to_delay, + ) + + def setup_fault_injection(self, faulty_messages, messages_to_delay): + if faulty_messages is not None: + self.messages_to_fail = faulty_messages + if messages_to_delay is not None: + self.messages_to_delay = messages_to_delay + + def get_shutdown_error_regex(self): + error_regexes = [ + "Exception in thread pool task", + "Connection reset by peer", + "Connection closed by peer" + ] + return "|".join([f"({error_str})" for error_str in error_regexes]) + + def get_timeout_error_regex(self): + return "RPC ran for more than" diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/__init__.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90565ccfe27fe0f72bbed5f2c8c29ddf1c320e94 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1160d984bf80126833abba3da7500eff282d4849 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test_faulty.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test_faulty.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed83d2e3988dfe03d9b25ced3d1e21f90dde3567 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test_faulty.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2f83eb3311c650323083441b63fc7ec753a102fb --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test.py @@ -0,0 +1,1385 @@ +# mypy: allow-untyped-defs + +import time +import io +from typing import Dict, List, Tuple, Any + +import torch +import torch.distributed as dist +import torch.distributed.rpc as rpc +from torch import Tensor +from torch.autograd.profiler import record_function +from torch.distributed.rpc import RRef +from torch.distributed.rpc.internal import RPCExecMode, _build_rpc_profiling_key +from torch.futures import Future +from torch.testing._internal.common_utils import TemporaryFileName +from torch.testing._internal.dist_utils import ( + dist_init, + get_function_event, + initialize_pg, + worker_name, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + +from torch.autograd.profiler_legacy import profile as _profile + +def rref_isinstance(rref, cls_to_check): + return isinstance(rref.local_value(), cls_to_check) + +def sleep(t): + time.sleep(t) + + +def rpc_return_rref(dst): + return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)) + + +@torch.jit.script +def rref_local_value(rref: RRef[Tensor]) -> Tensor: + return rref.local_value() + + +@torch.jit.script +def list_create() -> List[int]: + global_list = [1, 2, 3] + return global_list + + +@torch.jit.script +def rref_list_mutate(rref: RRef[List[int]]) -> None: + rref.local_value().append(4) + rref.to_here().append(5) + rref.to_here(5.0).append(6) + + +def return_value(value: int) -> int: + return value + + +class RRefAPITest: + @dist_init + def test_rref_is_owner(self): + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + rref_var = rpc_return_rref(dst_worker_name) + + @torch.jit.script + def rref_tensor_is_owner(rref_var: RRef[Tensor]) -> bool: + return rref_var.is_owner() + + res = rref_tensor_is_owner(rref_var) + self.assertEqual(res, False) + + @dist_init + def test_rref_local_value(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + rref = rpc_return_rref(dst_worker_name) + + with self.assertRaisesRegex( + RuntimeError, r"Can't call RRef.local_value\(\) on a non-owner RRef" + ): + rref_local_value(rref) + + ret = ret = rpc.rpc_sync(dst_worker_name, rref_local_value, (rref,)) + self.assertEqual(ret, torch.add(torch.ones(2, 2), 1)) + + @dist_init + def test_local_rref_local_value(self): + if self.rank != 0: + return + + dst_worker_name = worker_name(self.rank) + rref = rpc.remote(dst_worker_name, return_value, (5,), {}) + + ret = rref_local_value(rref) + self.assertEqual(ret, 5) + + def _create_rref(self): + owner_rank = (self.rank + 2) % self.world_size + return rpc.remote( + worker_name(owner_rank), torch.add, args=(torch.zeros(2, 2), 1) + ) + + @dist_init + def test_user_rrefs_confirmed(self): + dst_rank = (self.rank + 1) % self.world_size + rref = self._create_rref() + ret = rpc.rpc_sync( + worker_name(dst_rank), script_check_rref_confirmed, args=(rref,) + ) + self.assertEqual(ret, True) + + @dist_init + def test_user_rrefs_confirmed_remote(self): + dst_rank = (self.rank + 1) % self.world_size + rref = self._create_rref() + ret_rref = rpc.remote( + worker_name(dst_rank), script_check_rref_confirmed, args=(rref,) + ) + self.assertEqual(ret_rref.to_here(), True) + + @dist_init + def test_rref_list_mutate(self): + dst = worker_name((self.rank + 1) % self.world_size) + list_rref = rpc.remote(dst, list_create) + + rpc.rpc_sync(dst, rref_list_mutate, args=(list_rref,)) + self.assertEqual(list_rref.to_here(), [1, 2, 3, 4, 5, 6]) + + +@torch.jit.script +def no_arg(): + return 0 + + +@torch.jit.script +def one_arg(value): + return value + 1 + +@torch.jit.script +def script_add_ones(x): + return torch.add(x, torch.ones(1)) + +@torch.jit.script +def script_add_ones_with_record_function(x, block: str): + with record_function(block): + return torch.add(x, torch.ones(1)) + + +@torch.jit.script +def record_function_on_caller_rpc_async(dst_worker_name: str, block: str) -> Tensor: + t: Tensor = torch.ones(1) + with record_function(block) as rf: + fut1 = rpc.rpc_async(dst_worker_name, script_add_ones, (t, )) + # Extra operator call to avoid de-duplication of the next async call + # see https://github.com/pytorch/pytorch/pull/62710#discussion_r694680279 + zero = torch.zeros_like(t) + fut2 = rpc.rpc_async(dst_worker_name, script_add_ones, (t, )) + res = fut1.wait() + fut2.wait() + zero + return res + + + +@torch.jit.script +def script_fork_wait_udf(tensor): + fut = torch.jit._fork(script_add_ones, tensor) + x = torch.jit._wait(fut) + return x + + +@torch.jit.script +def rref_to_here(rref_var: RRef[Tensor]) -> Tensor: + return rref_var.to_here() + + +@torch.jit.script +def return_rref(rref_var: RRef[Tensor]) -> RRef[Tensor]: + return rref_var + + +@torch.jit.script +def script_raise_func(value): + if value.numel() == 2: + raise ValueError("Expected error") + return value + 1 + + +@torch.jit.script +def script_fork_wait_throw(invalue): + fut = torch.jit._fork(script_raise_func, invalue) + value = torch.jit._wait(fut) + return value + + +@torch.jit.script +def call_rpc_with_profiling(record: torch.classes.profiler._RecordFunction, dst_worker_name: str) -> Tensor: + # Call rpc_async from within ScriptFunction and ensure that we can attach + # profiling callbacks. Note that handle here is a Tensor representation of + # RecordFunction. + fut = rpc.rpc_async(dst_worker_name, one_arg, (torch.tensor(1),)) + torch.ops.profiler._call_end_callbacks_on_jit_fut(record, fut) + ret = fut.wait() + return ret + +@torch.jit.script +def call_rpc_torchscript_with_record_function(dst_worker_name: str, block: str) -> Tensor: + fut = rpc.rpc_async(dst_worker_name, script_add_ones_with_record_function, (torch.tensor(1), block)) + return fut.wait() + + +@torch.jit.script +def call_fork_with_profiling(record: torch.classes.profiler._RecordFunction) -> Tensor: + # Call fork from within ScriptFunction and ensure that we can attach profiling + # callbacks to the resulting future. Note that handle here is a Tensor + # representation of RecordFunction. + fut = torch.jit._fork(one_arg, torch.tensor(1)) + torch.ops.profiler._call_end_callbacks_on_jit_fut(record, fut) + ret = fut.wait() + return ret + + +class MyScriptModuleWithRRefs(torch.jit.ScriptModule): + def __init__(self, dst_worker): + super().__init__() + self.rrefs = [] + for _ in range(4): + self.rrefs.append(rpc_return_rref(dst_worker)) + + @torch.jit.script_method + def forward(self) -> Tensor: + res_tensor = torch.ones(2, 2) + for rref in self.rrefs: + res_tensor += rref.to_here() + + return res_tensor + + +@torch.jit.ignore +def rref_python_annotation(rref_var: RRef[Tensor]) -> RRef[Tensor]: + return rref_var + + +@torch.jit.script +def rref_script_annotation(rref_var: RRef[Tensor]) -> Tensor: + return rref_python_annotation(rref_var).to_here() + + +class RRefTypingTest: + @dist_init + def test_rref_as_arg_and_return(self): + n = self.rank + 1 + dst_rank = n % self.world_size + local_ret = one_arg(torch.ones(2, 2)) + + # create rref on current rank + rref = rpc.remote(worker_name(self.rank), one_arg, args=(torch.ones(2, 2),)) + + # pass rref to another user in rpc call + ret = rpc.rpc_sync(worker_name(dst_rank), rref_to_here, args=(rref,)) + self.assertEqual(ret, local_ret) + + # return rref in rpc call + rref1 = rpc.rpc_sync(worker_name(dst_rank), return_rref, args=(rref,)) + self.assertEqual(rref1.to_here(), local_ret) + + # pass rref to another user in remote call + rref2 = rpc.remote(worker_name(dst_rank), rref_to_here, args=(rref,)) + self.assertEqual(rref2.to_here(), local_ret) + + # return rref in remote call + rref3 = rpc.remote(worker_name(dst_rank), return_rref, args=(rref,)) + self.assertEqual(rref3.to_here().to_here(), local_ret) + + @dist_init + def test_my_script_module_with_rrefs(self): + n = self.rank + 1 + dst_rank = n % self.world_size + + module_with_rrefs = MyScriptModuleWithRRefs(worker_name(dst_rank)) + res = module_with_rrefs() + self.assertEqual(res, torch.ones(2, 2) * 9) + + @dist_init + def test_rref_python_annotation(self): + n = self.rank + 1 + dst_rank = n % self.world_size + rref_var = rpc_return_rref(worker_name(dst_rank)) + + res = rref_script_annotation(rref_var) + self.assertEqual(res, torch.ones(2, 2) + 1) + + +class FutureTypingTest: + @dist_init + def test_future_passed_between_python_and_jit(self): + dst_rank = (self.rank + 1) % self.world_size + inputs = (torch.tensor([1, 1]), torch.tensor([2, 2])) + ret_fut = rpc.rpc_async(worker_name(dst_rank), two_args_two_kwargs, args=inputs) + expected_res = torch.tensor([10, 10]) + + @torch.jit.script + def future_wait_in_script(fut: Future[Tensor]) -> Tensor: + return fut.wait() + + self.assertEqual(future_wait_in_script(ret_fut), expected_res) + + @torch.jit.script + def future_return_to_python( + dst_rank: int, inputs: Tuple[Tensor, Tensor] + ) -> Future[Tensor]: + return rpc.rpc_async( + f"worker{dst_rank}", two_args_two_kwargs, inputs + ) + + fut_res = future_return_to_python(dst_rank, inputs) + self.assertEqual(fut_res.wait(), expected_res) + + @dist_init + def test_future_python_annotation(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + input_0 = torch.ones(2, 2) + input_1 = 1 + expected_res = torch.add(input_0, input_1) + + @torch.jit.ignore + def python_return_future() -> Future[Tensor]: + fut = rpc.rpc_async(dst_worker_name, torch.add, (input_0, input_1), {}) + return fut + + @torch.jit.script + def script_use_future() -> Tensor: + fut = python_return_future() + return fut.wait() + + res = script_use_future() + self.assertEqual(res, expected_res) + + +@torch.jit.script +class MyScriptClass: + def __init__(self, a: int): + self.a = a + + def get_value(self) -> int: + return self.a + + +@torch.jit.interface +class MyModuleInterface(torch.nn.Module): + def forward(self) -> Tensor: + # pyre-ignore[7]: Pyre and torch.jit.interface don't mix well + pass + + +class MyScriptModule(torch.jit.ScriptModule): + def __init__(self, rank): + super().__init__() + self.a = torch.ones(rank) + + @torch.jit.script_method + def forward(self) -> Tensor: + return self.a + + @torch.jit.script_method + def custom_func(self) -> Tensor: + return self.a + + +def owner_create_rref_my_script_class(a): + return rpc.RRef(MyScriptClass(a)) + + +def owner_create_rref_my_script_module(a): + return rpc.RRef(MyScriptModule(a), type_hint=MyModuleInterface) + + +@torch.jit.script +def script_rref_get_value_my_script_class(rref: RRef[MyScriptClass]) -> int: + return rref.to_here().get_value() + + +@torch.jit.script +def script_rref_run_forward_my_script_module(rref: RRef[MyModuleInterface]) -> Tensor: + return rref.to_here().forward() + + +class LocalRRefTest: + @dist_init + def test_create_local_script_class_rref_in_py(self): + if self.rank != 0: + return + + # Create a local RRef. + rref_script_class = rpc.RRef(MyScriptClass(self.rank)) + ret = rref_script_class.to_here().get_value() + self.assertEqual(ret, self.rank) + + @dist_init + def test_create_local_script_module_rref_in_py(self): + if self.rank != 0: + return + + # Create a local RRef. + rref_script_module = rpc.RRef(MyScriptModule(self.rank), MyModuleInterface) + ret = rref_script_module.to_here().forward() + self.assertEqual(ret, torch.ones(self.rank)) + + # Create a local RRef without type hint. + with self.assertRaisesRegex( + RuntimeError, + ( + "The RRef being created contains a ScriptModule, " + "must provide its ModuleInterface type hint." + ), + ): + rref_script_module = rpc.RRef(MyScriptModule(self.rank)) + + @dist_init + def test_return_local_script_class_rref_in_py_and_use_in_script(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + # Create a local RRef remotely in Python. + rref = rpc.rpc_sync( + dst_worker_name, owner_create_rref_my_script_class, args=(self.rank,) + ) + + def use_rref_on_owner(rref: RRef[MyScriptClass]) -> int: + args = (rref,) + kwargs: Dict[str, Any] = {} + fut = rpc.rpc_async( + rref.owner(), script_rref_get_value_my_script_class, args, kwargs + ) + ret = fut.wait() + return ret + + # Use RRef in local Python RPC and remote Script run. + ret = use_rref_on_owner(rref) + self.assertEqual(ret, self.rank) + + # Use RRef in local Script RPC and remote Script run. + use_rref_on_owner_script = torch.jit.script(use_rref_on_owner) + ret = use_rref_on_owner_script(rref) + self.assertEqual(ret, self.rank) + + @dist_init + def test_return_local_script_module_rref_in_py_and_use_in_script(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + # Create a local RRef remotely in Python. + rref = rpc.rpc_sync( + dst_worker_name, owner_create_rref_my_script_module, args=(self.rank,) + ) + + def use_rref_on_owner(rref: RRef[MyModuleInterface]) -> Tensor: + args = (rref,) + kwargs: Dict[str, Any] = {} + fut = rpc.rpc_async( + rref.owner_name(), + script_rref_run_forward_my_script_module, + args, + kwargs, + ) + ret = fut.wait() + return ret + + # Use RRef in local Python RPC and remote Script run. + ret = use_rref_on_owner(rref) + self.assertEqual(ret, torch.ones(self.rank)) + + # Use RRef in local Script RPC and remote Script run. + use_rref_on_owner_script = torch.jit.script(use_rref_on_owner) + ret = use_rref_on_owner_script(rref) + self.assertEqual(ret, torch.ones(self.rank)) + + +def python_function(): + return 0 + + +@torch.jit.script +def two_args_two_kwargs( + first_arg, + second_arg, + first_kwarg=torch.tensor([3, 3]), + second_kwarg=torch.tensor([4, 4]), +): + return first_arg + second_arg + first_kwarg + second_kwarg + + +@torch.jit.script +def assorted_types_args_kwargs( + tensor_arg: Tensor, # noqa: E999 + str_arg: str, + int_arg: int, + tensor_kwarg: Tensor = torch.tensor([2, 2]), + str_kwarg: str = "str_kwarg", + int_kwarg: int = 2, +): + return tensor_arg + tensor_kwarg, str_arg + str_kwarg, int_arg + int_kwarg + + +@torch.jit.script +def raise_script(): + raise RuntimeError("Expected error") + + +@torch.jit.script +def script_rpc_async_call( + dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor] +): + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs) + ret = fut.wait() + return ret + +@torch.jit.script +def script_rpc_sync_call( + dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor] +): + res = rpc.rpc_sync(dst_worker_name, two_args_two_kwargs, args, kwargs) + return res + +@torch.jit.script +def script_rpc_remote_call( + dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor] +): + rref_res = rpc.remote(dst_worker_name, two_args_two_kwargs, args, kwargs) + return rref_res.to_here() + +class JitRpcOpTest: + # Call functions remotely from Script. + @dist_init + def test_all_kwargs_are_populated_by_defaults(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + args = (torch.tensor([1, 1]), torch.tensor([2, 2])) + kwargs = {} + + for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]: + ret = script_op( + dst_worker_name, args, kwargs + ) + self.assertEqual(ret, torch.tensor([10, 10])) + + @dist_init + def test_some_kwargs_are_populated_by_defaults(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + args = (torch.tensor([1, 1]), torch.tensor([2, 2])) + kwargs = {"first_kwarg": torch.tensor([2, 2])} + + for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]: + ret = script_op( + dst_worker_name, args, kwargs + ) + self.assertEqual(ret, torch.tensor([9, 9])) + + @dist_init + def test_no_kwargs_are_populated_by_defaults(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + args = (torch.tensor([1, 1]), torch.tensor([2, 2])) + kwargs = { + "first_kwarg": torch.tensor([2, 2]), + "second_kwarg": torch.tensor([3, 3]), + } + for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]: + ret = script_op( + dst_worker_name, args, kwargs + ) + self.assertEqual(ret, torch.tensor([8, 8])) + + @dist_init + def test_args_and_kwargs_contain_different_types(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + @torch.jit.script + def script_rpc_async_call_with_assorted_types( + dst_worker_name: str, + ): + args = (torch.tensor([1, 1]), "str_arg", 1) + # Must annotate the value type as `Any`, because JIT type inference + # does not support multiple types when defining a Dict. + # The error JIT gives is, + # "Dict values must contain only a single type, " + # "expected: Tensor but found str instead." + kwargs: Dict[str, Any] = { + "tensor_kwarg": torch.tensor([3, 3]), + "str_kwarg": "_str_kwarg", + "int_kwarg": 3, + } + fut = rpc.rpc_async( + dst_worker_name, assorted_types_args_kwargs, args, kwargs + ) + ret = fut.wait() + return ret + + ret = script_rpc_async_call_with_assorted_types( + dst_worker_name + ) + self.assertEqual(ret, (torch.tensor([4, 4]), "str_arg_str_kwarg", 4)) + + @dist_init + def test_kwargs_not_passed(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + @torch.jit.script + def script_rpc_async_call_without_kwargs_passed( + dst_worker_name: str, + ): + args = () + fut = rpc.rpc_async(dst_worker_name, no_arg, args) + ret = fut.wait() + return ret + + ret = script_rpc_async_call_without_kwargs_passed( + dst_worker_name + ) + self.assertEqual(ret, 0) + + @dist_init + def test_args_kwargs_are_neither_passed(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + @torch.jit.script + def script_rpc_async_call_without_args_kwargs_passed( + dst_worker_name: str, + ): + fut = rpc.rpc_async(dst_worker_name, no_arg) + ret = fut.wait() + return ret + + ret = script_rpc_async_call_without_args_kwargs_passed( + dst_worker_name + ) + self.assertEqual(ret, 0) + + @dist_init + def test_less_than_needed_args_are_specified(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + # Notice, args matching happens during scripting. + with self.assertRaisesRegex(RuntimeError, "Argument second_arg not provided"): + + @torch.jit.script + def script_rpc_async_call_with_less_args( + dst_worker_name: str, # noqa: E999 + ): + args = (torch.tensor([1, 1]),) + kwargs = {} + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs) + ret = fut.wait() + return ret + + @dist_init + def test_more_than_needed_args_are_specified(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + # Notice, args matching happens during scripting. + with self.assertRaisesRegex( + RuntimeError, + "Expected at most 4 arguments but found 5 positional arguments", + ): + + @torch.jit.script + def script_rpc_async_call_with_more_args( + dst_worker_name: str, + ): + args = ( + torch.tensor([1, 1]), + torch.tensor([2, 2]), + torch.tensor([3, 3]), + torch.tensor([4, 4]), + torch.tensor([5, 5]), + ) + kwargs = {} + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs) + ret = fut.wait() + return ret + + @dist_init + def test_unexepected_kwarg_is_specified(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + # Notice, kwargs matching happens during execution. + @torch.jit.script + def script_rpc_async_call_with_unexpected_kwarg( + dst_worker_name: str, # noqa: E999 + ): + args = (torch.tensor([1, 1]), torch.tensor([2, 2])) + kwargs = {"third_kwarg": torch.tensor([1, 1])} + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs) + ret = fut.wait() + return ret + + with self.assertRaisesRegex( + RuntimeError, "Unknown keyword argument 'third_kwarg'" + ): + ret = script_rpc_async_call_with_unexpected_kwarg( + dst_worker_name + ) + self.assertEqual(ret, 0) + + @dist_init + def test_call_python_function_remotely_from_script_not_supported(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + @torch.jit.script + def rpc_async_call_remote_py_function_in_torchscript(dst_worker_name: str): + args = () + kwargs = {} + fut = rpc.rpc_async(dst_worker_name, python_function, args, kwargs) + ret = fut.wait() + return ret + + with self.assertRaisesRegex( + RuntimeError, "attempted to get undefined function" + ): + ret = rpc_async_call_remote_py_function_in_torchscript(dst_worker_name) + self.assertEqual(ret, 0) + + @dist_init + def test_call_script_function_that_raises_remotely_from_script(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + # Notice, TorchScript always translates(emits) Python `raise` statement, + # as the exception message string, "Exception", + # no matter what exception type and exception message are in the statement, + @torch.jit.script + def rpc_async_call_remote_raising_torchscript_in_torchscript( + dst_worker_name: str, + ): + args = () + kwargs = {} + fut = rpc.rpc_async(dst_worker_name, raise_script, args, kwargs) + ret = fut.wait() + return ret + + with self.assertRaisesRegex(RuntimeError, "Expected error"): + ret = rpc_async_call_remote_raising_torchscript_in_torchscript( + dst_worker_name + ) + self.assertEqual(ret, 0) + + @dist_init + def test_call_script_function_that_not_exists_remotely_from_script(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + @torch.jit.script + def nonexisting_script(): + return 0 + + @torch.jit.script + def rpc_async_call_remote_nonexisting_torchscript_in_torchscript( + dst_worker_name: str, + ): + args = () + kwargs = {} + fut = rpc.rpc_async(dst_worker_name, nonexisting_script, args, kwargs) + ret = fut.wait() + return ret + + with self.assertRaisesRegex( + RuntimeError, "attempted to get undefined function nonexisting_script" + ): + ret = rpc_async_call_remote_nonexisting_torchscript_in_torchscript( + dst_worker_name + ) + self.assertEqual(ret, 0) + + +@torch.jit.ignore +def my_script_module_init(rank: int) -> MyModuleInterface: + return MyScriptModule(rank) + + +@torch.jit.script +def construct_my_script_module(rank: int) -> MyModuleInterface: + return my_script_module_init(rank) + + +@torch.jit.script +def run_ref_script_module( + ref_script_module: RRef[MyModuleInterface], t: Tensor +) -> Tensor: + module = ref_script_module.to_here() + return module.forward() + t + + +@torch.jit.script +def script_check_rref_confirmed(rref: RRef[Tensor]) -> bool: + return rref.confirmed_by_owner() + + +@torch.jit.script +def save_rref(rref_var: RRef[Tensor], fname: str) -> None: + torch.save(rref_var, fname) + + +@torch.jit.script +def script_add(x: Tensor, y: Tensor) -> Tensor: + return x + y + + +@rpc.functions.async_execution +@torch.jit.script +def async_add(to: str, x: Tensor, y: Tensor) -> Future[Tensor]: + return rpc.rpc_async(to, script_add, (x, y)) + + +@rpc.functions.async_execution +@torch.jit.script +def async_wrong_type() -> Tensor: + return torch.zeros(2) + + +def load_script_module_with_pickled_rref(pickled_script_module): + f = io.BytesIO(pickled_script_module) + m = torch.jit.load(f) + return m() + + +class JitRpcTest( + RRefAPITest, + RRefTypingTest, + LocalRRefTest, + JitRpcOpTest, + FutureTypingTest, + RpcAgentTestFixture, +): + @dist_init + def test_torchscript_function(self): + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + local_ret = one_arg(torch.ones(2, 2)) + ret = rpc.rpc_sync(dst_worker_name, one_arg, args=(torch.ones(2, 2),)) + self.assertEqual(ret, local_ret) + rref = rpc.remote(dst_worker_name, one_arg, args=(torch.ones(2, 2),)) + self.assertEqual(rref.to_here(), local_ret) + # create rref to itself + local_rref = rpc.remote( + worker_name(self.rank), one_arg, args=(torch.ones(2, 2),) + ) + self.assertEqual(local_rref.to_here(), local_ret) + + @dist_init + def test_torchscript_function_exception(self): + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + with self.assertRaisesRegex(RuntimeError, r"one_arg\(\) expected at most"): + ret = rpc.rpc_sync(dst_worker_name, one_arg, args=(10, 20)) + + with self.assertRaisesRegex(RuntimeError, r"one_arg\(\) expected at most"): + rref = rpc.remote(dst_worker_name, one_arg, args=(10, 20)) + + @dist_init + def test_torchscript_functions_not_supported(self): + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + my_local_script_module = MyScriptModule(self.rank) + + # It is not thread safe to instantiate MyScriptModule in multiple threads, + # wait for local MyScriptModule instantiation to finish, + # otherwise it could instantiate MyScriptModule in parallel with + # server thread in the below + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + + # rpc_sync still accepts script class and run it in + # the same code path as python call. + ret = rpc.rpc_sync(dst_worker_name, MyScriptClass, args=(self.rank,)) + + # rpc_sync does not accept script module method. + # Python 3.5 and Python 3.6 throw different error message, the only + # common word can be greped is "pickle". + with self.assertRaisesRegex(TypeError, "pickle"): + ret = rpc.rpc_async( + dst_worker_name, my_local_script_module.forward, args=() + ) + + @dist_init + def test_remote_script_module(self): + # TODO, need more investigation + # there is rref leak when shutting down, suspect it is because + # ref as arg is passed to pybind boundary, and the ref is not garbage + # collected by python when calling shutdown() + import torch.distributed.rpc.api as api + + api._ignore_rref_leak = True + + local_ret = torch.ones(self.rank) + torch.ones(self.rank) + + n = self.rank + 1 + dst_rank = n % self.world_size + remote_ref = rpc.remote( + worker_name(dst_rank), construct_my_script_module, args=(self.rank,) + ) + + # pass rref arg to owner + ret = rpc.rpc_sync( + worker_name(dst_rank), + run_ref_script_module, + args=(remote_ref, torch.ones(self.rank)), + ) + self.assertEqual(ret, local_ret) + + # pass rref arg to self/user + with self.assertRaisesRegex( + RuntimeError, + "is an RRef to a ScriptModule. It can't be sent through RPC from owner,", + ): + ret = rpc.rpc_sync( + worker_name(self.rank), + run_ref_script_module, + args=(remote_ref, torch.ones(self.rank)), + ) + + @dist_init + def test_create_script_module_on_remote(self): + dst_name = worker_name((self.rank + 1) % self.world_size) + # Construct on remote end with rpc_sync + created_script_module = rpc.rpc_sync( + dst_name, MyScriptModule, args=(self.rank,) + ) + # Forward should output a ones tensor of self.rank. + self.assertTrue(isinstance(created_script_module, torch.jit.ScriptModule)) + rank_ones_tensor = created_script_module() + self.assertEqual(torch.ones(self.rank), rank_ones_tensor) + + # Construct ScriptModule with rpc.remote. + remote_script_module = rpc.remote(dst_name, MyScriptModule, args=(self.rank,)) + # Verify it is an instance of ScriptModule on remote end. + remote_end_is_script = rpc.rpc_sync( + remote_script_module.owner(), + rref_isinstance, + args=(remote_script_module, torch.jit.ScriptModule), + ) + self.assertTrue(remote_end_is_script) + # Run forward pass remotely. + remote_forward_output = remote_script_module.rpc_sync().forward() + self.assertEqual(remote_forward_output, torch.ones(self.rank)) + # Run function defined on ScriptModule remotely. + remote_func_output = remote_script_module.rpc_sync().custom_func() + self.assertEqual(remote_func_output, torch.ones(self.rank)) + # Ensure we can transfer ScriptModule RRef to this rank and run + # forward pass. + local_script_module = remote_script_module.to_here() + self.assertTrue(isinstance(local_script_module, torch.jit.ScriptModule)) + rank_ones_tensor = local_script_module() + self.assertEqual(rank_ones_tensor, torch.ones(self.rank)) + local_script_func_output = local_script_module.custom_func() + self.assertEqual(local_script_func_output, torch.ones(self.rank)) + + @dist_init + def test_load_script_module_with_pickled_rref(self): + dst_name = worker_name((self.rank + 1) % self.world_size) + m1 = MyScriptModuleWithRRefs(dst_name) + m2 = MyScriptModuleWithRRefs(dst_name) + + f = io.BytesIO() + + rpc._enable_jit_rref_pickle() + torch.jit.save(m1, f) + rpc._disable_jit_rref_pickle() + + out1 = rpc.rpc_sync( + dst_name, + load_script_module_with_pickled_rref, + args=(f.getvalue(),) + ) + out2 = m2() + self.assertEqual(out1, out2) + + @dist_init + def test_rref_jit_pickle_not_supported(self): + n = self.rank + 1 + dst_rank = n % self.world_size + rref_var = rpc_return_rref(worker_name(dst_rank)) + with TemporaryFileName() as fname: + with self.assertRaisesRegex( + RuntimeError, "RRef jit pickling is only allowed inside RPC calls" + ): + save_rref(rref_var, fname) + + @dist_init + def test_remote_script_throw(self): + rref = rpc.remote( + worker_name((self.rank + 1) % self.world_size), + script_raise_func, + args=(torch.ones(2),), + ) + with self.assertRaisesRegex(Exception, ".*Expected error.*"): + rref.to_here() + + @dist_init + def test_remote_script_udf(self): + rref = rpc.remote( + worker_name((self.rank + 1) % self.world_size), + script_fork_wait_udf, + args=(torch.ones(2),), + ) + self.assertEqual(rref.to_here(), torch.ones(2) * 2) + + @dist_init + def test_async_script_udf(self): + future = rpc.rpc_async( + worker_name((self.rank + 1) % self.world_size), + script_fork_wait_udf, + args=(torch.ones(2),), + ) + self.assertEqual(future.wait(), torch.ones(2) * 2) + + @dist_init + def test_callback_simple(self): + def callback(fut): + return fut.wait() + 1 + + future = rpc.rpc_async( + worker_name((self.rank + 1) % self.world_size), + script_fork_wait_udf, + args=(torch.ones(2),), + ).then(callback) + self.assertEqual(future.wait(), torch.ones(2) * 2 + 1) + + @dist_init + def test_callback_chain(self): + n = self.rank + 1 + dst = worker_name(n % self.world_size) + + def callback(fut): + return fut.wait() + 1 + + fut = rpc.rpc_async( + worker_name(n % self.world_size), one_arg, args=(torch.ones(n, n),) + ) + + num_cbs = 20 + for _ in range(num_cbs): + fut = fut.then(callback) + + self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs) + + @dist_init + def test_add_done_callback(self): + callback_called = None + + def callback(fut): + nonlocal callback_called + callback_called = fut.wait() * 2 + + future = rpc.rpc_async( + worker_name((self.rank + 1) % self.world_size), + script_fork_wait_udf, + args=(torch.ones(2),), + ) + + future.add_done_callback(callback) + future_then = future.then(lambda _: True) + + self.assertEqual(future.wait(), torch.ones(2) * 2) + + # We have no guarantee that the add_done_callback fn will execute before the test finishes. + # Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback + future_then.wait() + self.assertEqual(callback_called, torch.ones(2) * 4) + + @dist_init + def test_async_script_throw(self): + future = rpc.rpc_async( + worker_name((self.rank + 1) % self.world_size), + script_fork_wait_throw, + args=(torch.ones(2),), + ) + with self.assertRaisesRegex(Exception, ".*Expected error.*"): + future.wait() + + @dist_init + def test_callback_with_exception(self): + def callback(fut): + with self.assertRaisesRegex(Exception, ".*Expected error.*"): + fut.wait() + raise RuntimeError("Another expected error") + + future = rpc.rpc_async( + worker_name((self.rank + 1) % self.world_size), + script_fork_wait_throw, + args=(torch.ones(2),), + ).then(callback) + + with self.assertRaisesRegex(RuntimeError, "Another expected error"): + future.wait() + + @dist_init + def test_call_rpc_with_profiling(self): + # Ensures that we can call torch.ops.profiler._call_end_callbacks_on_jit_fut on a jit + # future from within a script function that calls rpc_async + if self.rank == 0: + with _profile() as prof: + prof_key = _build_rpc_profiling_key( + RPCExecMode.ASYNC, + torch._jit_internal._qualified_name(one_arg), + "worker0", + "worker1", + ) + with torch.autograd.profiler.record_function(prof_key) as rf: + ret = call_rpc_with_profiling(rf.record, "worker1") + # TODO: Can't get a reliable time for this profiling event since + # it's hard to estimate the execution time on the remote end for non-UDFs. + # This can be resolved by https://github.com/pytorch/pytorch/issues/36272. + # After that, this test should be modified to validate the function time. + events = prof.function_events + function_event = get_function_event(events, prof_key) + self.assertTrue(torch._jit_internal._qualified_name(one_arg) in function_event.name) + + @dist_init + def test_rpc_async_jit_profiled(self): + # Tests that rpc_async calls made from within a TorchScript function are + # profiled. + if self.rank == 0: + dst_rank = (self.rank + 1) % self.world_size + dst_worker_name = worker_name(dst_rank) + args = (torch.tensor([1, 1]), torch.tensor([2, 2])) + kwargs = {} + with _profile() as prof: + script_rpc_async_call( + dst_worker_name, args, kwargs + ) + + # Ensure rpc_async call is profiled + function_events = prof.function_events + qual_name = torch._jit_internal._qualified_name(two_args_two_kwargs) + rpc_async_jit_event = [ + event + for event in function_events + if qual_name in event.name and event.node_id == self.rank + ] + self.assertEqual(len(rpc_async_jit_event), 1) + rpc_async_jit_event = rpc_async_jit_event[0] + profiled_name = _build_rpc_profiling_key( + RPCExecMode.ASYNC_JIT, + qual_name, + worker_name(self.rank), + dst_worker_name, + ) + self.assertEqual(profiled_name, rpc_async_jit_event.name) + remote_events = [event for event in function_events if event.is_remote] + # All remote events should have taken place on dst_rank + remote_event_node_ids = { + remote_event.node_id for remote_event in remote_events + } + self.assertEqual(remote_event_node_ids, {dst_rank}) + # script_rpc_async_call invokes add operator + # so we should see this as a remote event. + remote_add = next( + remote_event + for remote_event in remote_events + if "aten::add" in remote_event.name + ) + remote_add_profiled_name = f"{profiled_name}#remote_op: aten::add" + self.assertEqual(remote_add.name, remote_add_profiled_name) + + @dist_init + def test_record_function_on_caller_rpc_async(self): + if self.rank == 0: + dst_rank = (self.rank + 1) % self.world_size + dst_worker_name = worker_name(dst_rank) + block_scope = "foo" + with _profile() as prof: + # Runs 2 rpc_async calls within JIT under record_function. + record_function_on_caller_rpc_async(dst_worker_name, block_scope) + + # Ensure record_function event is profiled. + function_events = prof.function_events + record_function_scope_event = [ + event for event in function_events if event.name == block_scope + ] + self.assertEqual(1, len(record_function_scope_event)) + record_function_scope_event = record_function_scope_event[0] + # Ensure RPC future is profiled. + expected_key = _build_rpc_profiling_key( + RPCExecMode.ASYNC_JIT, + torch._jit_internal._qualified_name(script_add_ones), + worker_name(self.rank), + dst_worker_name, + ) + jit_rpc_events = [ + event for event in function_events if event.name == expected_key + ] + self.assertEqual(2, len(jit_rpc_events)) + # Validate that the record_function scope time is greater than both + # of the individual RPC async call times. The reason it is not necessarily + # greater than the sum is because the two can execute in parallel. + for jit_rpc_event in jit_rpc_events: + self.assertTrue( + record_function_scope_event.cpu_time_total + > jit_rpc_event.cpu_time_total + ) + + @dist_init + def test_rpc_torchscript_record_function(self): + # tests that torchscript functions can be profiled using with + # record_function(...) over RPC. + REMOTE_OP_STR = "#remote_op: " + if self.rank == 0: + dst_rank = (self.rank + 1) % self.world_size + dst_worker_name = worker_name(dst_rank) + block_scope = "foo" + with _profile() as prof: + call_rpc_torchscript_with_record_function(dst_worker_name, block_scope) + + # Need to call below to populate CPU children. + prof.key_averages() + function_events = prof.function_events + expected_key = ( + _build_rpc_profiling_key( + RPCExecMode.ASYNC_JIT, + torch._jit_internal._qualified_name( + script_add_ones_with_record_function + ), + worker_name(self.rank), + dst_worker_name, + ) + + REMOTE_OP_STR + + block_scope + ) + remote_record_function_event = next( + evt for evt in function_events if evt.name == expected_key + ) + self.assertTrue(block_scope in remote_record_function_event.name) + remote_children = remote_record_function_event.cpu_children + self.assertTrue("aten::add" in child.name for child in remote_children) + + def test_record_function_jit_end_callbacks_with_fork(self): + # Ensures that we can call rf._call_end_callbacks_on_future on a jit + # future in python eager mode with torch.jit.fork + sleep_interval = 1 + with _profile() as prof: + with torch.autograd.profiler.record_function("foo") as rf: + fut = torch.jit._fork(sleep, sleep_interval) + rf._call_end_callbacks_on_future(fut) + fut.wait() + + function_events = prof.function_events + sleep_event = get_function_event(function_events, "foo") + self.assertEqual(sleep_event.name, "foo") + # Validate that callbacks were fired at the right time by checking the + # profiling event cpu time + self.assertGreaterAlmostEqual(sleep_event.cpu_time * 1e-6, sleep_interval) + + def test_call_fork_in_jit_with_profiling(self): + # Ensures that we can call torch.ops.profiler._call_end_callbacks_on_jit_fut on a jit + # future from within a script function with torch.jit.fork + with _profile() as prof: + with torch.autograd.profiler.record_function("foo") as rf: + ret = call_fork_with_profiling(rf.record) + + events = prof.function_events + function_event = get_function_event(events, "foo") + self.assertEqual(function_event.name, "foo") + + @dist_init + def test_async_function_simple(self): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + ret = rpc.rpc_sync( + dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2)) + ) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + @dist_init + def test_async_function_wrong_return_type(self): + with self.assertRaisesRegex( + RuntimeError, + "Async functions must return an IValue of Future type, but got Tensor", + ): + rpc.rpc_sync( + worker_name((self.rank + 1) % self.world_size), async_wrong_type + ) + + @dist_init + def test_async_function_wrong_decorator_order(self): + # @torch.jit.script complains about undefined value rpc. Error is shown + # below. The reason for not checking error string is to avoid making + # JIT error handling code depend on RPC tests, as we don't have any + # restrictions on the error message here. + # + # RuntimeError: + # undefined value rpc: + # def async_wrong_decorator_order(to, x, y): + # # type: (str, Tensor, Tensor) -> Future[Tensor] + # return rpc.rpc_async(to, script_add, (x, y)) + # ~~~ <--- HERE + with self.assertRaises(RuntimeError): + + @torch.jit.script + @rpc.functions.async_execution + def async_wrong_decorator_order( + to: str, x: Tensor, y: Tensor + ) -> Future[Tensor]: + return rpc.rpc_async(to, script_add, (x, y)) + + @dist_init + def test_async_function_remote(self): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + rref = rpc.remote( + dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2)) + ) + self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1) + + @dist_init + def test_async_function_remote_multi(self): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + num = 20 + rrefs = [] + for i in range(num): + rrefs.append( + rpc.remote( + dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2) * i) + ) + ) + + for i in range(num): + self.assertEqual(rrefs[i].to_here(), torch.ones(2, 2) + i) + + @dist_init + def test_async_function_wrong_return_type_remote(self): + rref = rpc.remote( + worker_name((self.rank + 1) % self.world_size), async_wrong_type + ) + + with self.assertRaisesRegex( + RuntimeError, + "Async functions must return an IValue of Future type, but got Tensor", + ): + rref.to_here() diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_agent_test_fixture.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_agent_test_fixture.py new file mode 100644 index 0000000000000000000000000000000000000000..3a684b73d2f315a00465371fad3050a795251ddb --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_agent_test_fixture.py @@ -0,0 +1,63 @@ +# mypy: allow-untyped-defs + +import os +from abc import ABC, abstractmethod + +import torch.testing._internal.dist_utils + + +class RpcAgentTestFixture(ABC): + @property + def world_size(self) -> int: + return 4 + + @property + def init_method(self): + use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None) + if use_tcp_init == "1": + master_addr = os.environ["MASTER_ADDR"] + master_port = os.environ["MASTER_PORT"] + return f"tcp://{master_addr}:{master_port}" + else: + return self.file_init_method + + @property + def file_init_method(self): + return torch.testing._internal.dist_utils.INIT_METHOD_TEMPLATE.format( + file_name=self.file_name + ) + + @property + @abstractmethod + def rpc_backend(self): + pass + + @property + @abstractmethod + def rpc_backend_options(self): + pass + + def setup_fault_injection(self, faulty_messages, messages_to_delay): # noqa: B027 + """Method used by dist_init to prepare the faulty agent. + + Does nothing for other agents. + """ + + # Shutdown sequence is not well defined, so we may see any of the following + # errors when running tests that simulate errors via a shutdown on the + # remote end. + @abstractmethod + def get_shutdown_error_regex(self): + """ + Return various error message we may see from RPC agents while running + tests that check for failures. This function is used to match against + possible errors to ensure failures were raised properly. + """ + + @abstractmethod + def get_timeout_error_regex(self): + """ + Returns a partial string indicating the error we should receive when an + RPC has timed out. Useful for use with assertRaisesRegex() to ensure we + have the right errors during timeout. + """ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_test.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_test.py new file mode 100644 index 0000000000000000000000000000000000000000..413f97d94eb28115b22e4572b5b04a98f1d40a68 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_test.py @@ -0,0 +1,6495 @@ +# mypy: allow-untyped-defs + +import concurrent.futures +import contextlib +import json +import os +import sys +import threading +import time + +from collections import namedtuple +from functools import partial +from threading import Event +from threading import Lock +from unittest import mock + +import torch +import torch.nn as nn +import torch.distributed as dist +import torch.distributed.rpc as rpc +import torch.distributed.autograd as dist_autograd +from torch.distributed.rpc import RRef, _get_debug_info, _rref_context_get_debug_info, WorkerInfo +from torch.distributed.rpc.api import _use_rpc_pickler, _thread_local_var, _wait_all +from torch.distributed.rpc.internal import ( + PythonUDF, + RPCExecMode, + _internal_rpc_pickler, + _build_rpc_profiling_key, +) +from torch.futures import Future +from torch.testing._internal.common_distributed import ( + skip_if_lt_x_gpu, + captured_output, + tp_transports, +) +from torch.testing._internal.common_utils import ( + IS_MACOS, + load_tests, + skip_but_pass_in_sandcastle_if, + get_cycles_per_ms, +) + +from torch.testing._internal.dist_utils import ( + dist_init, + get_function_event, + initialize_pg, + wait_until_node_failure, + wait_until_pending_futures_and_users_flushed, + wait_until_owners_and_forks_on_rank, + worker_name, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) +from torch.testing._internal.common_utils import TemporaryFileName + +from torch.autograd.profiler_legacy import profile as _profile +import operator + + +def foo_add(): + return torch.add(torch.ones(1), torch.ones(1)) + +def udf_with_torch_ops(device=-1, use_record_function=False): + device_ctx = contextlib.nullcontext() if device == -1 else torch.cuda.device(device) + record_function_ctx = ( + torch.autograd.profiler.record_function("##forward##") + if use_record_function + else contextlib.nullcontext() + ) + with device_ctx, record_function_ctx: + t1, t2 = torch.ones(1), torch.ones(1) + t = torch.add(t1, t2) + t = torch.mul(t, t) + t = t.relu() + t = t.sigmoid() + +# Events (operator invocations) that are expected to be ran as part of the above +# function. +EXPECTED_REMOTE_EVENTS = [ + "aten::ones", + "aten::ones", + "aten::add", + "aten::mul", + "aten::relu", + "aten::clamp_min", + "aten::sigmoid", +] + +# Remote operations are prefixed with the following string for RPC profiling. +REMOTE_OP_STR = "#remote_op: " + + +VALUE_FUTURE = concurrent.futures.Future() +DONE_FUTURE = concurrent.futures.Future() + +FIFTY_MIL_CYCLES = 50000000 + +_rpc_barrier_count = 0 + +def _increment_count(): + global _rpc_barrier_count + _rpc_barrier_count += 1 + +def _reset_count(): + global _rpc_barrier_count + _rpc_barrier_count = 0 + +class StubRpcAgent: + def __init__(self, world_size): + self.world_size = world_size + + def get_worker_infos(self): + return { + WorkerInfo(name=worker_name(rank), id=rank) + for rank in range(self.world_size) + } + + +def _stub_construct_rpc_backend_options_handler(**kwargs): + return mock.Mock() # RpcBackendOptions. + + +def _stub_init_rpc_backend_handler(store, name, rank, world_size, rpc_backend_options): + return StubRpcAgent(world_size=world_size) + + +def set_value(value): + VALUE_FUTURE.set_result(value) + + +def wait_for_value_future(): + return VALUE_FUTURE.result() + + +def set_and_check_done(value): + VALUE_FUTURE.set_result(value) + return DONE_FUTURE.result() + + +# it is used to test python user defined function over rpc +# classes and functions are used to test python user defined class and +# methods over rpc +TensorClass = namedtuple("TensorClass", ["tensors"]) + +class MyPickleClass: + def __init__(self) -> None: + self.t = None + + def __getstate__(self): + (pickled_python_udf, tensors) = _internal_rpc_pickler.serialize( + PythonUDF(my_tensor_function, (torch.ones(2, 2), torch.ones(2, 2)), None) + ) + return (pickled_python_udf, tensors) + + def __setstate__(self, obj): + python_udf = _internal_rpc_pickler.deserialize(obj[0], obj[1]) + result = python_udf.func(python_udf.args[0], python_udf.args[1]) + self.t = result + + def set(self, val): + self.t = val + + +class SlowPickleClass: + def __init__(self, t): + self.t = t + + def __getstate__(self): + time.sleep(self.t) + return (self.t, ) + + def __setstate__(self, obj): + self.t = obj[0] + time.sleep(self.t) + + +class MyClass: + def __init__(self, a, delay=False): + self.a = a + # delay initialization to simulate errors if specified + if delay: + time.sleep(2) + + def my_instance_method(self, b): + return self.a + b + + @classmethod + def my_class_method(cls, d, e): + return d + e + + @staticmethod + def my_static_method(f): + return f > 10 + + def increment_value(self, increment): + self.a += increment + + def get_value(self): + return self.a + + def my_slow_method(self, my_tensor_arg): + time.sleep(5) + return torch.add(self.a, my_tensor_arg) + + +def _call_method_on_rref(method, rref, *args, **kwargs): + return method(rref.local_value(), *args, **kwargs) + + +def get_rref_list(values): + return [RRef(MyClass(a)) for a in values] + + +def add_rref_to_value(rref, value): + return rref.to_here() + value + + +def run_nested_pickle(pickle_cls_instance, tensor): + return pickle_cls_instance.t + tensor + +def build_sparse_tensor(coalesce=False): + i = [[0, 1, 1], [2, 0, 2]] + v = [3, 4, 5] + tensor = torch.sparse_coo_tensor(i, v, (2, 3)) + if coalesce: + tensor = tensor.coalesce() + return tensor + +def build_complex_tensors(): + a = torch.ones(3, 3) + b = [a, a] + c = [b, b] + d = [a, b] + e = {a: d} + return [a, b, c, d, e] + +def non_cont_test(t_view, t_cont): + if t_view.is_contiguous(): + raise Exception('t_view is contiguous!') # noqa: TRY002 + if not t_cont.is_contiguous(): + raise Exception('t_cont is not contiguous!') # noqa: TRY002 + if not torch.equal(t_view, t_cont): + raise Exception('t_view is not equal to t_cont!') # noqa: TRY002 + return t_view + +def my_function(a, b, c): + return a + b + c + + +def my_tensor_function(a, b): + return a + b + +def my_container_sum(a): + result = a[0] + for tensor in a[1:]: + result += tensor + return result + + +def my_sleep_func(seconds=1): + time.sleep(seconds) + return torch.mul(torch.tensor(1), torch.tensor(1)) + + +def my_complex_tensor_function(list_input, tensor_class_input, dict_input): + res = list_input[0] + for t in list_input: + res += t + for v in dict_input.values(): + res += v + complex_tensors = tensor_class_input.tensors + return (res, complex_tensors[0], complex_tensors[1], complex_tensors[2]) + + +def my_rref_function(rref_a, rref_b): + return rref_a.to_here() + rref_b.to_here() + + +def delayed_add(a, b, seconds=0.05): + time.sleep(seconds) + return a + b + + +def identity(a): + return a + +def no_result(): + print("do nothing") + +def raise_or_inc(value): + if value.numel() == 2: + raise ValueError("Expected error") + return value + 1 + +def nested_rpc(dst): + return rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1)) + + +def nested_rpc_sparse(dst): + return rpc.rpc_sync( + dst, + torch.add, + args=(build_sparse_tensor(), build_sparse_tensor()) + ) + + +def multi_layer_nested_async_rpc(dst, world_size, ttl): + # this method returns immediately without blocking the callee, but will + # generate additional requests. + if ttl > 0: + current_dst = worker_name(dst) + next_dst = (dst + 1) % world_size + rpc.rpc_async( + current_dst, + multi_layer_nested_async_rpc, + args=(next_dst, world_size, ttl - 1), + ) + return 0 + + +def nested_rref(dst): + return ( + rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)), + rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 2)), + ) + + +def nested_rref_sparse(dst): + return ( + rpc.remote( + dst, + torch.add, + args=(build_sparse_tensor(), build_sparse_tensor()) + ), + rpc.remote( + dst, + torch.add, + args=(build_sparse_tensor(), build_sparse_tensor()) + ), + ) + + +def nested_remote(dst): + rref = rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 3)) + return rref.to_here() + +def nested_remote_sparse(dst): + rref = rpc.remote(dst, torch.add, args=(build_sparse_tensor(), build_sparse_tensor())) + return rref.to_here() + + +def rref_forward_chain(dst, world_size, rref, ttl): + if ttl > 0: + current_dst = worker_name(dst) + next_dst = (dst + 1) % world_size + ret_rref = rpc.remote( + current_dst, rref_forward_chain, args=(next_dst, world_size, rref, ttl - 1) + ) + return [ret_rref] + else: + return rref.to_here() + + +def rpc_return_rref(dst): + return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)) + + +def light_rpc(): + return 0 + + +def heavy_rpc(tensor): + for i in range(1, 100): + tensor *= i + tensor /= i + 1 + return 0 + + +def heavy_rpc_sparse(tensor): + for i in range(1, 100): + tensor *= i + tensor = tensor / (i + 1) + return 0 + +@torch.jit.script +def heavy_rpc_torchscript(tensor): + for i in range(1, 100): + tensor *= i + tensor /= i + 1 + return 0 + + +@torch.jit.script +def my_script_func(tensor): + return torch.add(tensor, tensor) + + +expected_err = "Expected error" + +# Note that it needs to inherit from Exception, not BaseException. See comment +# in rpc/internal.py +class CustomException(Exception): + def __init__(self, bool, msg): + self.bool = bool + super().__init__(msg) + +def raise_func(): + raise ValueError(expected_err) + +def custom_raise_func(): + raise CustomException(True, "foo") + +@torch.jit.script +def raise_func_script(expected_err: str) -> torch.Tensor: + raise ValueError(expected_err) + +expected_err_escape = "\nFirst line of error \n next line of error \n last line of error" +def raise_func_escape(): + raise ValueError(expected_err_escape) + + +global_rref = None + + +def set_global_rref(rref): + global global_rref + global_rref = rref + + +def clear_global_rref(): + global global_rref + global_rref = None + + +def check_rref_confirmed(rref): + return rref.confirmed_by_owner() + + +def get_rref_debug_info(): + return _rref_context_get_debug_info() + + +def add_use_future_cb(to, x, y, z): + out = concurrent.futures.Future() + + def callback(fut): + out.set_result(fut.wait() + z) + + fut = rpc.rpc_async(to, torch.add, args=(x, y)) + fut.then(callback) + return out.result() + + +def get_events_from_profile(profile_rref): + return profile_rref.local_value().process_global_function_events + + +def add_use_future_set_result(to, x, y, z): + out = torch.futures.Future() + fut = rpc.rpc_async(to, torch.add, args=(x, y)) + fut.then(lambda fut : out.set_result(fut.wait() + z)) + return out.wait() + + +def add_use_future_nested_cb(to, x, y, z): + out = torch.futures.Future() + + def callback(fut1): + fut2 = rpc.rpc_async(to, torch.add, args=(fut1.wait(), z)) + fut2.then(lambda fut2 : out.set_result(fut2.wait())) + + fut1 = rpc.rpc_async(to, torch.add, args=(x, y)) + fut1.then(callback) + return out.wait() + + +def fail_on_fut(fut): + pass + + +@rpc.functions.async_execution +def async_raise_func(): + raise RuntimeError("Expected error") + + +@rpc.functions.async_execution +def async_wrong_type(): + return torch.zeros(2, 2) + + +@rpc.functions.async_execution +def async_add(to, x, y): + return rpc.rpc_async(to, torch.add, args=(x, y)) + + +def slow_add(x, y, device="cpu"): + time.sleep(1) + x = x.to(device) + y = y.to(device) + return torch.add(x, y).cpu() + + +@rpc.functions.async_execution +def slow_async_add(to, x, y, device="cpu"): + return rpc.rpc_async(to, slow_add, args=(x, y, device)) + + +@rpc.functions.async_execution +def async_add_with_future_ctor(to, x, y, z): + fut = torch.futures.Future() + rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut1: fut.set_result(fut1.wait() + z) + ) + return fut + + +@rpc.functions.async_execution +def async_add_chained(to, x, y, z): + return rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut: fut.wait() + z + ) + + +@rpc.functions.async_execution +def async_add_chained_multi(to, x, num, step): + fut = rpc.rpc_async(to, torch.add, args=(x, 0)) + for _ in range(num): + fut = fut.then(lambda fut: fut.wait() + step) + return fut + + +@rpc.functions.async_execution +def async_add_nested(to, x, y, z): + return rpc.rpc_async(to, async_add, args=(to, x, y)).then( + lambda fut: fut.wait() + z + ) + + +@rpc.functions.async_execution +def async_add_multi_fanout(to, x, num, step): + futs = [] + for i in range(num): + if i == 0: + futs.append(rpc.rpc_async(to, torch.add, args=(x, step))) + else: + futs.append(rpc.rpc_async(to, torch.add, args=(0, step))) + + # TODO: use torch.futures.collect_all + lock = Lock() + state = {"cnt": 0, "ret": torch.zeros_like(x)} + ret_future = torch.futures.Future() + + def inc_and_set(fut): + with lock: + state["cnt"] += 1 + state["ret"] += fut.wait() + if state["cnt"] >= len(futs): + ret_future.set_result(state["ret"]) + + for fut in futs: + fut.then(inc_and_set) + + return ret_future + + +@rpc.functions.async_execution +def async_cuda_sleep_and_set_to_one(t): + device = t.device + original_stream = torch.cuda.current_stream(device) + new_stream = torch.cuda.Stream(device) + new_stream.wait_stream(original_stream) + with torch.cuda.stream(new_stream): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + t.fill_(1) + fut = Future(devices=[device]) + fut.set_result(t) + return fut + + +@rpc.functions.async_execution +def async_cuda_nested_add(to, x, y, z): + def cb(fut): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + return fut.value() + z + + return rpc.rpc_async(to, torch.add, args=(x, y)).then(cb) + + +# A custom Python class that contains a tensor, needed to see if we correctly +# use the Python pickler to extract tensors from non-IValue-convertible types. +class TensorWrapper: + __slots__ = ("tensor", "lock", "event", "thread") + + def __init__(self, t): + self.tensor = t + # Add one non-picklable field, to ensure it's ignored/skipped. + self.lock = Lock() + self.event = torch.cuda.Event(enable_timing=True) + self.thread = threading.Thread() + self.thread.start() + + def increase(self, v): + with self.lock: + self.tensor += v + + def sum(self): + with self.lock: + self.event.record() + return self.tensor.sum() + + +class AsyncExecutionClass: + + @staticmethod + @rpc.functions.async_execution + def static_async_add(to, x, y, z): + return rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut: fut.wait() + z + ) + + @classmethod + @rpc.functions.async_execution + def class_async_add(cls, to, x, y, z): + ret_fut = torch.futures.Future() + rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut: ret_fut.set_result(fut.wait() + z) + ) + return ret_fut + + @rpc.functions.async_execution + def bound_async_add(self, to, x, y, z): + return rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut: fut.wait() + z + ) + + +def return_future(): + return torch.futures.Future() + + +class FooBackendOptions(rpc.RpcBackendOptions): + def __init__(self, init_method): + # Must call the __init__ of the superclass (and do so directly, + # without using super()) because... pybind. + rpc.RpcBackendOptions.__init__(self) + self.init_method = init_method + + +# load_tests from common_utils is used to automatically filter tests for +# sharding on sandcastle. This line silences flake warnings +load_tests = load_tests + + +class MyEmbeddingBagModel(torch.nn.Module): + def __init__(self, sparse): + super().__init__() + self.eb = torch.nn.EmbeddingBag( + 10, + 10, + sparse=sparse + ) + + def forward(self, x): + return self.eb(x) + + +class MyParameterServer: + def __init__(self, trainers): + self.lock = Lock() + self.trainers = trainers + self.iteration = 0 + self.updates = 0 + self.futures = [] + self.total = None + self.gradient = None + + @staticmethod + def get_gradient(rref): + return rref.local_value().gradient + + @staticmethod + @rpc.functions.async_execution + def average(rref, riteration, tensor): + self = rref.local_value() + fut = torch.futures.Future() + with self.lock: + if riteration > self.iteration: + self.iteration = riteration + self.updates = 0 + self.futures.clear() + self.futures.append(fut) + if self.total is None: + self.total = tensor + else: + self.total += tensor + self.updates += 1 + if self.trainers == self.updates: + self.gradient = self.total / float(self.trainers) + for fut in self.futures: + result = self.total / float(self.trainers) + fut.set_result(result) + return fut + + +class MyConvNetForMNIST(nn.Module): + def __init__(self, device): + super().__init__() + self.net = nn.Sequential( + nn.Conv2d(1, 16, 3, 1), + nn.ReLU(), + nn.Conv2d(16, 32, 3, 1), + nn.ReLU(), + nn.MaxPool2d(2), + nn.Flatten(1), + nn.Linear(4608, 128), + nn.ReLU(), + nn.Linear(128, 10), + ).to(device) + self.device = device + + def forward(self, x, is_rref=False): + x = x.to_here() if is_rref else x + with torch.cuda.stream(torch.cuda.current_stream(self.device)): + # intentionally adding delay to current CUDA stream + torch.cuda._sleep(10 * FIFTY_MIL_CYCLES) + return self.net(x) + + def __getstate__(self): + # return an empty dict to avoid inspecting the model contents on the + # owner + return {} + + +class RpcTestCommon: + def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None): + if mode == RPCExecMode.SYNC: + return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs) + elif mode == RPCExecMode.ASYNC: + return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait() + elif mode == RPCExecMode.REMOTE: + return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here() + + def _self_py_udf_remote(self, worker_info, x, y, z): + rref = rpc.remote(worker_info, my_function, args=(x, y, z)) + self.assertEqual(rref.to_here(), x + y + z) + + def _self_remote_rref_as_rpc_arg(self, dst, x, y, z): + self_worker_info = rpc.get_worker_info() + rref = rpc.remote(self_worker_info, my_function, args=(x, y, z)) + fut = rpc.rpc_async(dst, add_rref_to_value, args=(rref, x)) + ret = rpc.rpc_sync(dst, add_rref_to_value, args=(rref, x + y)) + self.assertEqual(ret, x + y + z + x + y) + self.assertEqual(fut.wait(), x + y + z + x) + + def _self_remote_rref_as_remote_arg(self, dst, x, y, z): + self_worker_info = rpc.get_worker_info() + rref = rpc.remote(self_worker_info, my_function, args=(x, y, z)) + ret_rref = rpc.remote(dst, add_rref_to_value, args=(rref, x)) + self.assertEqual( + ret_rref.to_here(), x + y + z + x + ) + + def _world_size_one(self, a, b): + if self.rank == 0: + rpc.init_rpc( + name="me", + backend=self.rpc_backend, + rank=0, + world_size=1, + rpc_backend_options=self.rpc_backend_options, + ) + + def _rpc_sync(x, y): + expect = x * 2 + result = rpc.rpc_sync( + "me", + my_tensor_function, + args=(x, y) + ) + self.assertEqual(expect, result) + + def _rpc_async(x, y): + expect = x * 2 + result = rpc.rpc_async( + "me", + my_tensor_function, + args=(x, y) + ).wait() + self.assertEqual(expect, result) + + def _remote(x, y): + expect = x * 2 + result = rpc.remote( + "me", + my_tensor_function, + args=(x, y) + ).to_here() + self.assertEqual(expect, result) + + _rpc_sync(a, b) + _rpc_async(a, b) + _remote(a, b) + + rpc.shutdown() + + def _multi_rpc(self, sparse): + dst_rank = (self.rank + 1) % self.world_size + for i in range(20): + n = i + self.rank + 1 + if sparse: + x = build_sparse_tensor() * n + y = build_sparse_tensor() * n + else: + x = torch.ones(2, 2) + y = torch.ones(2, 2) + ret = rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(x, y), + ) + self.assertEqual(ret, x * 2) + + def _run_uneven_workload(self, f, x, num_repeat=30): + # worker0 drives and waits for worker1 and worker2 + # throughout the test. + if self.rank == 0: + self.assertTrue(self.world_size >= 3) + + # Phase 1: Only worker1 has workload. + dst = "worker1" + futs = [] + for _ in range(num_repeat): + fut = rpc.rpc_async(dst, f, args=(x,)) + futs.append(fut) + + for fut in torch.futures.collect_all(futs).wait(): + self.assertEqual(fut.wait(), 0) + + # Phase 2: Only worker2 has workload. + # If join is not correctly implemented, + # worker2 should be closed by now. + dst = "worker2" + futs = [] + for _ in range(num_repeat): + fut = rpc.rpc_async(dst, f, args=(x,)) + futs.append(fut) + + for val in torch.futures.wait_all(futs): + self.assertEqual(val, 0) + + def _wait_all_workers(self, f, x): + initialize_pg(self.file_init_method, self.rank, self.world_size) + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + self._run_uneven_workload(f, x) + + # worker0 calls this at the end after waiting for RPC responses. + # worker1/2 calls this immediately and has some works after it. + # worker3 calls this immediately and has no more work. + rpc.api._wait_all_workers() + + # Wait before proceeding to shutdown to ensure worker0 RPCs make + # it through to other workers. + dist.barrier() + rpc.shutdown(graceful=False) + + def _wait_all_workers_twice(self, f, x): + initialize_pg(self.file_init_method, self.rank, self.world_size) + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + self._run_uneven_workload(f, x) + + # worker0 calls this at the end after waiting for RPC responses. + # worker1/2 calls this immediately and has some works after it. + # worker3 calls this immediately and has no more work. + rpc.api._wait_all_workers() + rpc.api._wait_all_workers() + + # Wait before proceeding to shutdown to ensure worker0 RPCs make + # it through to other workers. + dist.barrier() + rpc.shutdown(graceful=False) + + def _nested_rpc(self, f, expected): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + f, + args=(worker_name(self.rank),), + ) + self.assertEqual(ret, expected) + + def _stress_test_rpc(self, f, repeat=1000, args=()): + n = self.rank + 1 + dst_rank = n % self.world_size + futs = [] + tik = time.time() + for _ in range(repeat): + fut = rpc.rpc_async(worker_name(dst_rank), f, args=args) + futs.append(fut) + + for val in torch.futures.wait_all(futs): + self.assertEqual(val, 0) + tok = time.time() + print( + f"Rank {self.rank} finished testing {repeat} times in {tok - tik} seconds." + ) + + def _builtin_remote_ret(self, x, y, expected): + n = self.rank + 1 + dst_rank = n % self.world_size + rref = rpc.remote( + worker_name(dst_rank), + torch.add, + args=(x, y), + ) + self.assertEqual(rref.to_here(), expected) + + def _builtin_remote_self(self, x, y, expected): + rref = rpc.remote( + worker_name(self.rank), + torch.add, + args=(x, y), + ) + self.assertEqual(rref.local_value(), expected) + + def _test_multi_remote_call(self, fn, sparse, args_fn=lambda x, y: (), kwargs_fn=lambda x, y: {}): + m = 10 + n = self.rank + 1 + dst_rank = n % self.world_size + rrefs = [] + expected = [] + for i in range(m): + n = n + i + rrefs.append( + rpc.remote( + worker_name(dst_rank), + fn, + args=args_fn(n, sparse), + kwargs=kwargs_fn(n, sparse), + ) + ) + expected.append(fn(*args_fn(n, sparse), **kwargs_fn(n, sparse))) + + for i in range(m): + self.assertEqual(rrefs[i].to_here(), expected[i]) + + def _py_rref_args(self, a, b, x, y, expected): + n = self.rank + 1 + dst_rank = n % self.world_size + rref_a = rpc.remote( + worker_name(dst_rank), torch.add, args=(a, b) + ) + rref_b = rpc.remote( + worker_name(dst_rank), torch.add, args=(x, y) + ) + rref_c = rpc.remote( + worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b) + ) + self.assertEqual(rref_c.to_here(), expected) + + def _py_rref_args_user_share(self, a, b, c, x, y, z, expected): + n = self.rank + 1 + owner_rank = n % self.world_size + user_rank = (n + 1) % self.world_size + rref_a = rpc.remote( + worker_name(owner_rank), my_function, args=(a, b, c) + ) + rref_b = rpc.remote( + worker_name(owner_rank), my_function, args=(x, y, z) + ) + rref_c = rpc.remote( + worker_name(user_rank), my_rref_function, args=(rref_a, rref_b) + ) + self.assertEqual(rref_c.to_here(), expected) + + def _py_rpc_rref_args(self, a, b, c, x, y, z, expected): + n = self.rank + 1 + dst_rank = n % self.world_size + rref_a = rpc.remote( + worker_name(dst_rank), my_function, args=(a, b, c) + ) + rref_b = rpc.remote( + worker_name(dst_rank), my_function, args=(x, y, z) + ) + + c = rpc.rpc_sync( + worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b) + ) + self.assertEqual(c, expected) + + def _nested_remote(self, f, expected): + n = self.rank + 1 + dst_rank1 = n % self.world_size + dst_rank2 = (n + 1) % self.world_size + + rref = rpc.remote( + worker_name(dst_rank1), + f, + args=(worker_name(dst_rank2),), + ) + self.assertEqual(rref.to_here(), expected) + + def _nested_rref(self, f, expected1, expected2): + n = self.rank + 1 + dst_rank1 = n % self.world_size + dst_rank2 = (n + 1) % self.world_size + rref_of_rrefs = rpc.remote( + worker_name(dst_rank1), + f, + args=(worker_name(dst_rank2),), + ) + + # Say C has 2 OwnerRRefs. + # B has 2 UserRRefs to those 2 OwnerRRefs, respectively. + # This call is effectively A asking B to share its 2 UserRRefs. + rrefs = rref_of_rrefs.to_here() + + self.assertEqual(len(rrefs), 2) + self.assertEqual(rrefs[0].to_here(), expected1) + self.assertEqual(rrefs[1].to_here(), expected2) + + def _nested_rref_stress(self, f, expected1, expected2): + n = self.rank + 1 + dst_rank1 = n % self.world_size + dst_rank2 = (n + 1) % self.world_size + all_rrefs = [] + for _ in range(20): + all_rrefs.append( + rpc.remote( + worker_name(dst_rank1), + f, + args=(worker_name(dst_rank2),), + ) + ) + + for i in range(20): + rref_of_rrefs = all_rrefs[i] + rrefs = rref_of_rrefs.to_here() + self.assertEqual(len(rrefs), 2) + self.assertEqual(rrefs[0].to_here(), expected1) + self.assertEqual(rrefs[1].to_here(), expected2) + + def _trainer_func(self, rref, sparse): + m = MyEmbeddingBagModel(sparse=sparse) + loss_fn = nn.MSELoss() + for i in range(10): + outputs = m(torch.rand(10, 10).long()) + loss_fn(outputs, torch.rand(10, 10)).backward() + gradient = next(iter(m.parameters())).grad + fut = rref.rpc_async().average(rref, i, gradient) + gradient = fut.wait() + if gradient.is_sparse: + gradient = gradient.to_dense().double() + ps_gradient = rref.rpc_sync().get_gradient(rref) + if ps_gradient.is_sparse: + ps_gradient = ps_gradient.to_dense().double() + self.assertTrue(torch.equal(gradient, ps_gradient)) + + def _my_parameter_server(self, sparse): + ps_rref = RRef(MyParameterServer(self.world_size - 1)) + futures = [] + for index in range(1, self.world_size): + futures.append( + rpc.rpc_async( + worker_name((self.rank + index) % self.world_size), + self._trainer_func, + args=( + ps_rref, + sparse + ), + ) + ) + torch.futures.wait_all(futures) + + def _test_cuda_future_extraction(self, wrapper, unwrapper, sparse_tensor): + # We check proper CUDA stream synchronization by adding to the tensor + # in one stream to get the expected value, and reading it from another stream. + future = Future(devices=["cuda:0"]) + with torch.cuda.device("cuda:0"): + stream = torch.cuda.Stream() + another_stream = torch.cuda.Stream() + with torch.cuda.stream(stream): + if sparse_tensor: + tensor = build_sparse_tensor().to("cuda:0") + add_tensor = build_sparse_tensor().to("cuda:0") + expected_tensor = (tensor + add_tensor).coalesce() + else: + tensor = torch.zeros((100,), device="cuda:0") + add_tensor = torch.ones((100,), device="cuda:0") + expected_tensor = tensor + add_tensor + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + tensor += add_tensor + if sparse_tensor: + tensor = tensor.coalesce() + future.set_result(wrapper(tensor)) + with torch.cuda.stream(another_stream): + tensor = unwrapper(future.wait()) + if sparse_tensor: + self.assertTrue(torch.eq(tensor.indices(), expected_tensor.indices()).all().item()) + self.assertTrue(torch.eq(tensor.values(), expected_tensor.values()).all().item()) + self.assertEqual(tensor.size(), expected_tensor.size()) + else: + self.assertTrue(torch.eq(tensor, expected_tensor).all().item()) + + +class RpcTest(RpcAgentTestFixture, RpcTestCommon): + @dist_init + def test_worker_id(self): + n = self.rank + 1 + peer_rank = n % self.world_size + self_worker_info = rpc.get_worker_info() + peer_worker_info = rpc.get_worker_info(worker_name(peer_rank)) + + self.assertEqual(self_worker_info.name, worker_name(self.rank)) + self.assertEqual(peer_worker_info.name, worker_name(peer_rank)) + + with self.assertRaisesRegex(RuntimeError, "could not find destination"): + unknown_worker_id = rpc.get_worker_info("WorkerUnknown") + + @dist_init + def test_get_worker_infos(self): + worker_infos = rpc.api._get_current_rpc_agent().get_worker_infos() + + worker_names = {worker_info.name for worker_info in worker_infos} + expected_worker_names = { + worker_name(rank) for rank in range(self.world_size) + } + self.assertEqual(worker_names, expected_worker_names) + + worker_ids = {worker_info.id for worker_info in worker_infos} + expected_worker_ids = set(range(self.world_size)) + self.assertEqual(worker_ids, expected_worker_ids) + + @dist_init + def test_self_add(self): + self_worker_info = rpc.get_worker_info() + self_worker_name = worker_name(self.rank) + fut = rpc.rpc_async(self_worker_info, torch.add, args=(torch.ones(2, 2), 1)) + ret = rpc.rpc_sync(self_worker_info, torch.add, args=(torch.ones(2, 2), 1)) + self.assertEqual(fut.wait(), torch.ones(2, 2) + 1) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + @dist_init + def test_send_to_rank(self): + dst_rank = (self.rank + 1) % self.world_size + + # Test dense tensor + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + # Test invalid ranks + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + with self.assertRaises(RuntimeError): + self._run_func_in_mode(self.world_size + 1, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + with self.assertRaises(RuntimeError): + self._run_func_in_mode(-1, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + with self.assertRaises(ValueError): + self._run_func_in_mode(dst_rank + 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + with self.assertRaises(ValueError): + self._run_func_in_mode(dst_rank - 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + + @dist_init + def test_self_py_udf_remote(self): + self._self_py_udf_remote( + rpc.get_worker_info(), + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_self_remote_rref_as_rpc_arg(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._self_remote_rref_as_rpc_arg( + dst, + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_self_remote_rref_as_self_rpc_arg(self): + self._self_remote_rref_as_rpc_arg( + rpc.get_worker_info(), + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_self_remote_rref_as_remote_arg(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._self_remote_rref_as_remote_arg( + dst, + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_self_remote_rref_as_self_remote_arg(self): + self._self_remote_rref_as_remote_arg( + rpc.get_worker_info(), + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_rref_proxy_non_exist(self): + dst = worker_name((self.rank + 1) % self.world_size) + rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3)) + msg = "has no attribute 'non_exist'" + with self.assertRaisesRegex(AttributeError, msg): + rref.rpc_sync().non_exist() + + with self.assertRaisesRegex(AttributeError, msg): + rref.rpc_async().non_exist().wait() + + with self.assertRaisesRegex(AttributeError, msg): + rref.remote().non_exist() + + def _test_rref_proxy_tensor(self, dst): + rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3)) + + expected = torch.ones(2, 2) + 1 + 3 + self.assertEqual(expected.size(), rref.rpc_sync().size()) + self.assertEqual(expected + 1, rref.rpc_async().add(1).wait()) + self.assertEqual(expected.view(1, 4), rref.remote().view(1, 4).to_here()) + + @dist_init + def test_rref_proxy_tensor(self): + self._test_rref_proxy_tensor(worker_name((self.rank + 1) % self.world_size)) + + @dist_init + def test_rref_proxy_tensor_self(self): + self._test_rref_proxy_tensor(rpc.get_worker_info()) + + @dist_init + def test_rref_proxy_reuse(self): + rref = rpc.remote( + worker_name((self.rank + 1) % self.world_size), + my_function, + args=(torch.ones(2, 2), 1, 3) + ) + expected = torch.ones(2, 2) + 1 + 3 + + proxy_rpc_sync = rref.rpc_sync() + proxy_rpc_async = rref.rpc_async() + proxy_remote = rref.remote() + + self.assertEqual(expected.size(), proxy_rpc_sync.size()) + self.assertEqual(expected + 1, proxy_rpc_sync.add(1)) + self.assertEqual(expected.view(1, 4), proxy_rpc_sync.view(1, 4)) + + self.assertEqual(expected.size(), proxy_rpc_async.size().wait()) + self.assertEqual(expected + 3, proxy_rpc_async.add(3).wait()) + self.assertEqual(expected.view(4, 1), proxy_rpc_async.view(4, 1).wait()) + + self.assertEqual(expected.size(), proxy_remote.size().to_here()) + self.assertEqual(expected + 5, proxy_remote.add(5).to_here()) + self.assertEqual(expected.view(-1), proxy_remote.view(-1).to_here()) + + def _test_rref_proxy_class(self, dst): + rref = rpc.remote(dst, MyClass, args=(7,)) + expected = MyClass(7) + self.assertEqual(expected.get_value(), rref.rpc_sync().get_value()) + self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait()) + self.assertEqual(expected.get_value(), rref.remote().get_value().to_here()) + + expected.increment_value(3) + self.assertEqual(None, rref.rpc_sync().increment_value(1)) + self.assertEqual(None, rref.rpc_async().increment_value(1).wait()) + self.assertEqual(None, rref.remote().increment_value(1).to_here()) + + self.assertEqual(expected.get_value(), rref.rpc_sync().get_value()) + self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait()) + self.assertEqual(expected.get_value(), rref.remote().get_value().to_here()) + + self.assertEqual( + expected.my_instance_method(2), + rref.rpc_sync().my_instance_method(2) + ) + self.assertEqual( + expected.my_instance_method(3), + rref.rpc_async().my_instance_method(3).wait() + ) + self.assertEqual( + expected.my_instance_method(4), + rref.remote().my_instance_method(4).to_here() + ) + + self.assertEqual( + expected.my_static_method(9), + rref.rpc_sync().my_static_method(9) + ) + self.assertEqual( + expected.my_static_method(10), + rref.rpc_async().my_static_method(10).wait() + ) + self.assertEqual( + expected.my_static_method(11), + rref.remote().my_static_method(11).to_here() + ) + + self.assertEqual( + expected.my_class_method(2, torch.zeros(2, 2)), + rref.rpc_sync().my_class_method(2, torch.zeros(2, 2)) + ) + self.assertEqual( + expected.my_class_method(2, torch.ones(3, 3)), + rref.rpc_async().my_class_method(2, torch.ones(3, 3)).wait() + ) + self.assertEqual( + expected.my_class_method(2, torch.ones(4, 4)), + rref.remote().my_class_method(2, torch.ones(4, 4)).to_here() + ) + + @dist_init + def test_rref_proxy_class(self): + self._test_rref_proxy_class(worker_name((self.rank + 1) % self.world_size)) + + @dist_init + def test_rref_proxy_class_self(self): + self._test_rref_proxy_class(rpc.get_worker_info()) + + @mock.patch.object(torch.distributed.autograd, "_init") + @mock.patch.object(torch.distributed.rpc.api, "_set_and_start_rpc_agent") + @dist_init(setup_rpc=False) + def test_register_rpc_backend_and_set_and_start_rpc_backend( + self, mock_rpc_agent, mock_dist_autograd_init + ): + backend_name = "stub_backend" + + backend = rpc.backend_registry.register_backend( + backend_name, + _stub_construct_rpc_backend_options_handler, + _stub_init_rpc_backend_handler, + ) + + with self.assertRaisesRegex( + RuntimeError, "^RPC backend .+: already registered$" + ): + backend = rpc.backend_registry.register_backend( + backend_name, + _stub_construct_rpc_backend_options_handler, + _stub_init_rpc_backend_handler, + ) + + rpc.init_rpc( + name="worker1", + backend=backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + @dist_init(setup_rpc=False) + def test_duplicate_name(self): + with self.assertRaisesRegex(RuntimeError, "is not unique"): + store, _, _ = next( + torch.distributed.rendezvous( + self.init_method, rank=self.rank, world_size=self.world_size + ) + ) + rpc._init_rpc_backend( + backend=self.rpc_backend, + store=store, + name="duplicate_name", + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + @dist_init(setup_rpc=False) + def test_duplicate_name_2(self): + with self.assertRaisesRegex(RuntimeError, "is not unique"): + rpc.init_rpc( + name=worker_name(self.rank % (self.world_size - 1)), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + @dist_init(setup_rpc=False) + def test_reinit(self): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + initialize_pg(self.file_init_method, self.rank, self.world_size) + # Wait for all init to complete. + dist.barrier() + + # TODO: with TCP init, rank 0 raises Address already in use because + # rank 0 is the start daemon and the store is created before checking if + # RPC is already initialized in init_rpc. + if os.environ.get("RPC_INIT_WITH_TCP", None) == "1" and self.rank == 0: + expected_reinit_err = "Address already in use" + else: + expected_reinit_err = "is already initialized" + + with self.assertRaisesRegex(RuntimeError, expected_reinit_err): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + rpc.shutdown() + + @dist_init(setup_rpc=False) + def test_pg_init_no_rpc_init(self): + dist.init_process_group( + backend='gloo', + init_method=self.file_init_method, + rank=self.rank, + world_size=self.world_size) + + class MyModel(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.lin = torch.nn.Linear(3, 4) + + def forward(self, x): + return self.lin(x) + + model = MyModel() + model.train() + model = torch.nn.parallel.DistributedDataParallel(model) + + with self.assertRaisesRegex(RuntimeError, 'Current RPC agent is not set! Did you initialize the RPC framework'): + params = [] + for param in model.parameters(): + params.append(RRef(param)) + + def test_world_size_one(self): + self._world_size_one( + torch.ones(2, 2), + torch.ones(2, 2) + ) + + @dist_init(setup_rpc=False) + def test_invalid_names(self): + + worker_id = 0 + with self.assertRaisesRegex(RuntimeError, "Worker name must match"): + info = WorkerInfo("abc*", worker_id) + + with self.assertRaisesRegex(RuntimeError, "Worker name must match"): + info = WorkerInfo(" ", worker_id) + + with self.assertRaisesRegex(RuntimeError, "must be non-empty"): + info = WorkerInfo("", worker_id) + + # If the number in the message does not match, it is likely that the + # value of MAX_NAME_LEN in RPC WorkerInfo has changed. + with self.assertRaisesRegex(RuntimeError, "shorter than 128"): + info = WorkerInfo("".join(["a" for i in range(500)]), worker_id) + + # Test that WorkerInfo can be pickled and sent in RPC call + @dist_init + def test_worker_info_pickle(self): + dst_rank = (self.rank + 1) % self.world_size + worker_info = rpc.api.get_worker_info() + ret = rpc.rpc_sync(worker_name(dst_rank), identity, args=(worker_info,)) + self.assertEqual(ret, worker_info) + + @dist_init + def test_add(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + self.assertEqual(ret, torch.ones(n, n) * 2) + + @staticmethod + def return_callee_id(): + return rpc.get_worker_info().id + + @dist_init + def test_int_callee(self): + dst_rank = (self.rank + 1) % self.world_size + ret = rpc.rpc_sync(dst_rank, RpcTest.return_callee_id) + self.assertEqual(ret, dst_rank) + + @dist_init + def test_add_with_id(self): + n = self.rank + 1 + dst_rank = n % self.world_size + workder_info = rpc.get_worker_info(worker_name(dst_rank)) + + ret = rpc.rpc_sync( + workder_info, torch.add, args=(torch.ones(n, n), torch.ones(n, n)) + ) + self.assertEqual(ret, torch.ones(n, n) * 2) + + @dist_init + def test_scalar_add(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(torch.ones(n, n), n) + ) + self.assertEqual(ret, (torch.ones(n, n) + n)) + + @dist_init + def test_async_add(self): + n = self.rank + 1 + dst_rank = n % self.world_size + fut = rpc.rpc_async( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + @dist_init + def test_nonzero(self): + n = self.rank + 1 + dst_rank = n % self.world_size + x = torch.ones(self.world_size, self.world_size) + x[self.rank][self.rank] = 0 + ret = rpc.rpc_sync(worker_name(dst_rank), torch.nonzero, args=(x,)) + self.assertEqual(ret, x.nonzero()) + + @dist_init + def test_multi_rpc(self): + self._multi_rpc(False) + + @dist_init + def test_future_wait_twice(self): + dst = worker_name((self.rank + 1) % self.world_size) + futs = [] + for i in range(20): + futs.append(rpc.rpc_async(dst, raise_func)) + + with self.assertRaisesRegex(ValueError, "Expected error"): + torch.futures.wait_all(futs) + + for fut in futs: + with self.assertRaisesRegex(ValueError, "Expected error"): + fut.wait() + + @dist_init(setup_rpc=False) + def test_wait_all_workers_timeout(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + og_func = rpc.api._wait_all_workers + + def wait_all_workers_sleep(timeout): + rpc.api._all_gather(SlowPickleClass(0.5), timeout=timeout) + + rpc.api._wait_all_workers = wait_all_workers_sleep + + try: + with self.assertRaisesRegex(RuntimeError, ''): + rpc.shutdown(graceful=True, timeout=0.01) + finally: + rpc.api._wait_all_workers = og_func + dist.barrier() + + def test_wait_all_workers_dense(self): + self._wait_all_workers(heavy_rpc, torch.ones(100, 100)) + + def test_wait_all_workers_twice_dense(self): + self._wait_all_workers_twice(heavy_rpc, torch.ones(100, 100)) + + @dist_init + def test_all_gather(self): + info = rpc.get_worker_info() + results = rpc.api._all_gather(info.id) + expected = {} + for info in rpc._get_current_rpc_agent().get_worker_infos(): + expected[info.name] = info.id + + self.assertEqual(expected, results) + + @dist_init + def test_all_gather_timeout(self): + rpc._set_rpc_timeout(0.1) + + if self.rank == 0: + with self.assertRaisesRegex( + RuntimeError, + "timed out in _all_gather after 0\\.10 seconds" + ): + rpc.api._all_gather(SlowPickleClass(0.5)) + else: + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.api._all_gather(SlowPickleClass(0.5)) + + def _test_barrier_helper(self, info, names, multi_threaded=False): + names = sorted(names) + leader = names[0] + rpc.rpc_sync(leader, _reset_count) + if not multi_threaded and info.name == leader: + self.assertEqual(_rpc_barrier_count, 0) + rpc.api._barrier(names) + rpc.rpc_sync(leader, _increment_count) + rpc.api._barrier(names) + if not multi_threaded and info.name == leader: + self.assertEqual(_rpc_barrier_count, len(names)) + + @dist_init + def test_rpc_barrier_all(self): + # Test rpc barrier when called with full list of workers + info = rpc.get_worker_info() + all_worker_info = rpc._get_current_rpc_agent().get_worker_infos() + names = [worker.name for worker in all_worker_info] + self._test_barrier_helper(info, names) + + @dist_init + def test_rpc_barrier_subset(self): + # Test rpc barrier when processes are called with different subsets of the full list + info = rpc.get_worker_info() + all_worker_info = rpc._get_current_rpc_agent().get_worker_infos() + if info.id % 2: + names = [worker.name for worker in all_worker_info if worker.id % 2] + else: + names = [worker.name for worker in all_worker_info if not worker.id % 2] + self._test_barrier_helper(info, names) + + @dist_init + def test_rpc_barrier_partial_subset(self): + # Test rpc barrier when some processes are not involved in the barrier + info = rpc.get_worker_info() + all_worker_info = rpc._get_current_rpc_agent().get_worker_infos() + if info.id % 2: + names = [worker.name for worker in all_worker_info if worker.id % 2] + else: + names = [f"worker{info.id}"] + self._test_barrier_helper(info, names) + + @dist_init + def test_rpc_barrier_multithreaded(self): + # This tests validates the implementation of barrier when multiple threads call into it + # We only need to check that it does not hang in this case + info = rpc.get_worker_info() + all_worker_info = rpc._get_current_rpc_agent().get_worker_infos() + names = [worker.name for worker in all_worker_info] + threads = [] + for _ in range(3): + th = threading.Thread(target=self._test_barrier_helper, args=(info, names, True)) + threads.append(th) + th.start() + for th in threads: + th.join() + + @dist_init + def test_graceful_shutdown_with_uneven_workload(self): + """Test graceful termination.""" + self._run_uneven_workload(heavy_rpc, torch.ones(100, 100)) + + @dist_init(setup_rpc=False) + def test_shutdown_followed_by_rpc(self): + # Initialize RPC. + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + self.assertEqual(ret, torch.ones(n, n) * 2) + rpc.shutdown() + + with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"): + rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + + @dist_init + def test_expected_src(self): + dst_rank = (self.rank + 1) % self.world_size + expected_src_rank = (self.rank - 1) % self.world_size + ret = rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,)) + value = VALUE_FUTURE.result() + self.assertEqual(value, expected_src_rank) + + @dist_init + def test_py_built_in(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2)) + self.assertEqual(ret, min(n, n + 1, n + 2)) + + @dist_init + def test_py_user_defined(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + my_function, + kwargs={"a": n, "b": n + 1, "c": n + 2}, + ) + self.assertEqual(ret, my_function(n, n + 1, n + 2)) + + def test_build_rpc_profiling_key(self): + # Tests that the name that shows up as an Event in profiling RPCs has all + # the necessary information. + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + rpc_profiling_key = _build_rpc_profiling_key( + exec_mode, "foo", "worker0", "worker1" + ) + self.assertIn(exec_mode.value, rpc_profiling_key) + self.assertIn("foo", rpc_profiling_key) + self.assertIn("worker0", rpc_profiling_key) + self.assertIn("worker1", rpc_profiling_key) + + def check_profiling_info(self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode): + self.assertTrue(self_worker_name in rpc_event.name) + self.assertTrue(dst_worker_name in rpc_event.name) + if isinstance(func, torch.jit.ScriptFunction): + self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name) + else: + self.assertTrue(func.__name__ in rpc_event.name) + self.assertTrue(rpc_exec_mode.value in rpc_event.name) + self.assertEqual(rpc_event.count, 1) + + @dist_init + def test_profiler_rpc_record_shapes(self): + if self.rank != 1: + return + dst = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst) + t1, t2 = torch.ones(100), torch.ones(100) + with _profile(record_shapes=True) as prof: + rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2)) + + function_events = prof.function_events + remote_events = [event for event in function_events if event.is_remote] + remote_add_event = next( + event for event in remote_events if "aten::add" in event.name + ) + remote_add_input_shapes = remote_add_event.input_shapes + # Run profiler on equivalent local op and validate shapes are the same. + with _profile(record_shapes=True) as prof: + torch.add(t1, t2) + + local_function_events = prof.function_events + local_add_event = next( + event for event in local_function_events if "aten::add" in event.name + ) + local_add_input_shapes = local_add_event.input_shapes + self.assertEqual(remote_add_input_shapes, local_add_input_shapes) + + @dist_init + def test_profiler_rpc_memory(self): + if self.rank != 1: + return + dst = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst) + with _profile(profile_memory=True) as p: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + res = fut.wait() + + function_events = p.function_events + event_cpu_mem_usages = {event.cpu_memory_usage for event in function_events} + # if cpu_memory_usage was not propagated over the wire, this set would + # only contain 0 (indicates no memory being profiled) + self.assertNotEqual({0}, event_cpu_mem_usages) + # No memory profiled if profile_memory=False + with _profile(profile_memory=False) as p: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + res = fut.wait() + + function_events = p.function_events + event_cpu_mem_usages = {event.cpu_memory_usage for event in function_events} + self.assertEqual({0}, event_cpu_mem_usages) + + @dist_init + def test_profiler_export_trace(self): + if self.rank != 1: + return + dst = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst) + with _profile() as p: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + res = fut.wait() + + events = p.function_events + with TemporaryFileName() as fname: + path = fname + p.export_chrome_trace(path) + with open(path) as f: + trace = json.load(f) + event_names = [event['name'] for event in trace] + for expected_event_name in EXPECTED_REMOTE_EVENTS + [RPCExecMode.ASYNC.value]: + event_exists = any(expected_event_name in event_name for event_name in event_names) + self.assertTrue(event_exists) + + @dist_init + def test_profiler_rpc_key_names(self): + # tests that remote events are properly prefixed with the RPC profiling key. + if self.rank != 1: + return + + # Spawn multiple threads that send RPCs to ensure keys are correctly + # prefixed when there are multiple RPCs being created/in flight at the + # same time. + dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank] + + def rpc_with_profiling(dst_worker): + with _profile() as prof: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + fut.wait() + + events = prof.function_events + remote_event_names = { + event.name: event for event in events if event.is_remote + } + rpc_profiling_key = _build_rpc_profiling_key( + RPCExecMode.ASYNC, + udf_with_torch_ops.__qualname__, + worker_name(self.rank), + dst_worker, + ) + + remote_event_name_set = set(EXPECTED_REMOTE_EVENTS) + for name, event in remote_event_names.items(): + # Ensure that we have the expected key as part of the remote + # event. + self.assertTrue(name.startswith(rpc_profiling_key)) + self.assertTrue(event.is_remote) + self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id) + # Ensure that the remote event name also contains the operator. + operator_name_substr = name[len(rpc_profiling_key) :] + # Note: we don't assert that every remote event needs to be + # in the above set, the set is just a representative set of + # what we expect to see. The profiler can change and add more + # events, but we should always expect to see this representative + # set. + matching_event = { + remote_event_name + for remote_event_name in remote_event_name_set + if remote_event_name in operator_name_substr + } + remote_event_name_set -= matching_event + + # The set should be empty, otherwise its contained elements did + # not show up in the remote profiler output. + self.assertTrue( + remote_event_name_set == set(), + f"Expected {remote_event_name_set} to be included in remote profiler output.", + ) + + for dst in dst_ranks: + dst_worker = worker_name(dst) + num_parallel_rpcs = 2 + with concurrent.futures.ThreadPoolExecutor( + max_workers=num_parallel_rpcs + ) as executor: + futs = [ + executor.submit(rpc_with_profiling, dst_worker) + for _ in range(num_parallel_rpcs) + ] + # Wait for workers to finish test + for fut in futs: + fut.result() + + def _run_test_profiler_remote_events_profiled(self): + # Tests that we can successfully invoke the profiler on a remote node, + # and collect the remote events back in the local profiler. + if self.rank != 1: + return + + dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank] + for dst in dst_ranks: + dst_worker = worker_name(dst) + with _profile() as prof: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + ret = fut.wait() + + events = prof.function_events + + rpc_event = get_function_event(events, RPCExecMode.ASYNC.value) + self.check_profiling_info( + worker_name(self.rank), + dst_worker, + udf_with_torch_ops, + rpc_event, + RPCExecMode.ASYNC, + ) + + remote_events = {event.name: event for event in events if event.is_remote} + rpc_profiling_key = _build_rpc_profiling_key( + RPCExecMode.ASYNC, + udf_with_torch_ops.__qualname__, + worker_name(self.rank), + worker_name(dst), + ) + + for expected_remote_event_name in EXPECTED_REMOTE_EVENTS: + expected_key = rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name + self.assertTrue(expected_key in remote_events) + remote_event = remote_events[expected_key] + # Remote event should have a node ID corresponding to the worker + # it ran on. + self.assertEqual(remote_event.node_id, dst) + + # Validate order remote events show up in profiling output. + def convert_remote_to_local(event_name): + remote_op_key = rpc_profiling_key + REMOTE_OP_STR + return event_name[ + event_name.find(remote_op_key) + + len(remote_op_key) : + ] + + remote_events_list = [ + convert_remote_to_local(event.name) + for event in events + if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS + ] + self.assertEqual( + set(remote_events_list), + set(EXPECTED_REMOTE_EVENTS), + f"Mismatch between profiled events: {set(remote_events_list)} and expected events: {set(EXPECTED_REMOTE_EVENTS)}", + ) + + @dist_init + def test_profiler_remote_events_profiled(self): + self._run_test_profiler_remote_events_profiled() + + @dist_init + def test_profiler_remote_events_profiled_single_threaded(self): + self._run_test_profiler_remote_events_profiled() + + def run_profiling_workload(self, dst): + fut = rpc.rpc_async( + worker_name(dst), + torch.mul, + args=( + torch.tensor(1.0, requires_grad=True), + torch.tensor(1.0, requires_grad=True), + ), + ) + fut.wait() + + def _run_rpc_profiling_async_function(self, device="cpu"): + if self.rank != 1: + return + + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + x = torch.ones(2) + y = torch.ones(2) + with _profile() as prof: + ret = rpc.rpc_async( + dst1, slow_async_add, args=(dst2, x, y, device), timeout=20 + ) + out = ret.wait() + + function_events = prof.function_events + # slow_async_add resulted in an RPC from dst1 -> dst2, so this should be + # recorded. + key_prefix = _build_rpc_profiling_key( + RPCExecMode.ASYNC, slow_async_add.__qualname__, worker_name(self.rank), dst1 + ) + + nested_rpc_key_prefix = _build_rpc_profiling_key( + RPCExecMode.ASYNC, slow_add.__qualname__, dst1, dst2 + ) + expected_key = key_prefix + REMOTE_OP_STR + nested_rpc_key_prefix + remote_events = [event for event in function_events if event.is_remote] + rpc_remote_event = [ + event for event in remote_events if event.name == expected_key + ] + self.assertEqual(1, len(rpc_remote_event)) + rpc_remote_event = rpc_remote_event[0] + self.assertEqual(rpc_remote_event.node_id, (self.rank + 1) % self.world_size) + # slow_async_add's RPC does an add on dst2, which should be reflected as well. + remote_add_key = ( + expected_key + REMOTE_OP_STR + torch.jit._builtins._find_builtin(torch.add) + ) + remote_add_event = [ + event for event in remote_events if event.name == remote_add_key + ] + self.assertEqual(1, len(remote_add_event)) + remote_add_event = remote_add_event[0] + # Validate that node_id is dst2. + self.assertEqual(remote_add_event.node_id, (self.rank + 2) % self.world_size) + + @dist_init + def test_rpc_profiling_async_function(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + self._run_rpc_profiling_async_function() + if torch.cuda.is_available(): + dist.barrier() + self._run_rpc_profiling_async_function(device="cuda:0") + + @dist_init + def test_rpc_profiling_async_function_single_threaded(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + self._run_rpc_profiling_async_function() + if torch.cuda.is_available(): + dist.barrier() + self._run_rpc_profiling_async_function(device="cuda:0") + + @dist_init + def test_rpc_profiling_remote_record_function(self): + # test that functions run over RPC with record_function show the expected + # profiled block. + if self.rank != 1: + return + dst_ranks = [i for i in range(self.world_size) if i != self.rank] + for dst_rank in dst_ranks: + dst_worker = worker_name(dst_rank) + with _profile() as prof: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=(-1, True)) + fut.wait() + + function_events = prof.function_events + record_function_remote_event = [ + evt for evt in function_events if "##forward##" in evt.name + ] + self.assertEqual(1, len(record_function_remote_event)) + record_function_remote_event = record_function_remote_event[0] + self.assertEqual(record_function_remote_event.node_id, dst_rank) + # cpu_children only returns direct children, so here we get all + # children recursively. + + def get_cpu_children(event): + if not event.cpu_children: + return [] + cpu_children = event.cpu_children + for e in event.cpu_children: + cpu_children.extend(get_cpu_children(e)) + return cpu_children + + remote_children = get_cpu_children(record_function_remote_event) + # Get local children and verify parity. + with _profile() as prof: + udf_with_torch_ops(-1, True) + + local_function_events = prof.function_events + local_record_function_event = next( + evt for evt in local_function_events if "##forward##" in evt.name + ) + local_children = get_cpu_children(local_record_function_event) + local_children_names = [ + evt.name for evt in local_children + ] + + REMOTE_OP_STR = "#remote_op: " + + def convert_remote_to_local(event_name): + remote_op_key = REMOTE_OP_STR + return event_name[ + event_name.find(remote_op_key) + len(remote_op_key) : + ] + + for evt in remote_children: + local_name = convert_remote_to_local(evt.name) + self.assertTrue(local_name in local_children_names) + + def validate_profiling_workload(self, dst, prof): + + def convert_remote_to_local(event_name): + return event_name[event_name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR) :] + + events = prof.function_events + remote_events = { + convert_remote_to_local(event.name): event + for event in events + if event.is_remote + } + self.assertTrue("aten::mul" in remote_events) + remote_mul_event = remote_events["aten::mul"] + self.assertEqual(remote_mul_event.node_id, dst) + self.check_profiling_info( + worker_name(self.rank), + worker_name(dst), + torch.mul, + remote_mul_event, + RPCExecMode.ASYNC, + ) + + def _run_test_profiler_with_autograd_context(self): + dst = (self.rank + 1) % self.world_size + if self.rank == 1: + # Cases where we can double wrap messages with profiling information and autograd info. + with dist_autograd.context() as context_id: + with _profile() as prof: + self.run_profiling_workload(dst) + + self.validate_profiling_workload(dst, prof) + + # Ensure that flipped order of ctx managers results in events being + # recorded as expected. + with _profile() as prof: + with dist_autograd.context() as context_id: + self.run_profiling_workload(dst) + + self.validate_profiling_workload(dst, prof) + + @dist_init + def test_profiler_with_autograd_context_single_threaded(self): + self._run_test_profiler_with_autograd_context() + + @dist_init + def test_profiler_with_autograd_context(self): + self._run_test_profiler_with_autograd_context() + + def _profiler_test_with_rpc( + self, rpc_exec_mode, func, args, use_record_function=False, dst=None, kineto_profile=False + ): + dst = dst if dst is not None else (self.rank + 1) % self.world_size + + # only run profiler on rank 1. + p = _profile if not kineto_profile else torch.profiler.profile # kineto + if self.rank == 1: + with p() as prof: + record_function_ctx_mgr = ( + contextlib.nullcontext() + if not use_record_function + else torch.autograd.profiler.record_function( + "foo" + ) + ) + with record_function_ctx_mgr as rf: + if rpc_exec_mode == RPCExecMode.SYNC: + rpc.rpc_sync(worker_name(dst), func, args=args) + elif rpc_exec_mode == RPCExecMode.ASYNC: + fut = rpc.rpc_async(worker_name(dst), func, args=args) + if kineto_profile: + # Ensure multiple async RPCs don't cause issues. + # Would have raised + # "RuntimeError: Cannot call + # RemoteProfilerManager::setCurrentKey when current + # key is already set." error if RPC profiling was + # not disabled properly for kineto. + fut2 = rpc.rpc_async(worker_name(dst), func, args=args) + fut2.wait() + fut.wait() + else: + self.assertTrue(rpc_exec_mode == RPCExecMode.REMOTE) + rref = rpc.remote(worker_name(dst), func, args=args) + rref.to_here() + # To avoid flakiness, wait for the RRef to be profiled. This + # means that we received the acknowledgement of successful + # creation on the owner and ran the callbacks responsible + # for recording the profiling event. + rref._get_profiling_future().wait() + + events = prof.function_events if not kineto_profile else prof.events() + if kineto_profile: + # RPC profiling is disabled so there should be no rpc related + # events. + with self.assertRaises(IndexError): + get_function_event(events, rpc_exec_mode.value) + + return + + rpc_event = get_function_event(events, rpc_exec_mode.value) + # verify Node ID for this rpc event. + self.assertEqual(rpc_event.node_id, self.rank) + # Ensure recording of remote events. + remote_events = {event for event in events if event.node_id == dst} - {rpc_event} + self.assertGreaterEqual(len(remote_events), 1) + for remote_event in remote_events: + self.assertEqual(remote_event.node_id, dst) + + if use_record_function: + scope_event = get_function_event(events, "foo") + # Since RPC call is within the scope, its CPU interval should be + # contained within foo's interval. + self.assertLessEqual(scope_event.time_range.start, rpc_event.time_range.start) + self.assertGreaterEqual(scope_event.time_range.end, rpc_event.time_range.end) + # the sender, dest worker, function run, and type of RPC should all + # be recorded. + self_worker_name = worker_name(self.rank) + dst_worker_name = worker_name(dst) + self.check_profiling_info(self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode) + if use_record_function: + # verify order by ensuring that the outer context comes + # before the rpc event. + foo_event_ix = next(i for i, event in enumerate(events) if "foo" in event.name) + rpc_event_idx = next(i for i, event in enumerate(events) if rpc_exec_mode.value in event.name) + self.assertLess(foo_event_ix, rpc_event_idx) + + def _run_test_profiler_with_sync_rpc_udf(self): + self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,)) + self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,), + use_record_function=True) + + @dist_init + def test_profiler_with_sync_rpc_udf(self): + self._run_test_profiler_with_sync_rpc_udf() + + @dist_init + def test_profiler_with_sync_rpc_udf_single_threaded(self): + self._run_test_profiler_with_sync_rpc_udf() + + def _run_test_profiler_with_sync_rpc_builtin(self): + self._profiler_test_with_rpc( + RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)) + ) + self._profiler_test_with_rpc( + RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)), + use_record_function=True + ) + + @dist_init + def test_profiler_with_sync_rpc_builtin(self): + self._run_test_profiler_with_sync_rpc_builtin() + + @dist_init + def test_profiler_with_sync_rpc_builtin_single_threaded(self): + self._run_test_profiler_with_sync_rpc_builtin() + + def _run_test_profiler_with_async_rpc_udf(self): + self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,)) + self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,), + use_record_function=True) + # Test to ensure that kineto profiler enabled in RPC does not enable + # RPC profiling (it is unsupported) and does not result in issues. + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, my_sleep_func, args=(1,), kineto_profile=True + ) + + @dist_init + def test_profiler_with_async_rpc_udf(self): + self._run_test_profiler_with_async_rpc_udf() + + @dist_init + def test_profiler_with_async_rpc_udf_single_threaded(self): + self._run_test_profiler_with_async_rpc_udf() + + def _run_test_profiler_with_async_rpc_builtin(self): + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)) + ) + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)), + use_record_function=True + ) + + @dist_init + def test_profiler_with_async_rpc_builtin(self): + self._run_test_profiler_with_async_rpc_builtin() + + @dist_init + def test_profiler_with_async_rpc_builtin_single_threaded(self): + self._run_test_profiler_with_async_rpc_builtin() + + def _run_test_profiler_with_remote_udf(self): + self._profiler_test_with_rpc(RPCExecMode.REMOTE, my_sleep_func, args=(1,)) + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, my_sleep_func, args=(1,), use_record_function=True + ) + # test remote to self + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, my_sleep_func, args=(1,), dst=self.rank + ) + + @dist_init + def test_profiler_with_remote_udf(self): + self._run_test_profiler_with_remote_udf() + + @dist_init + def test_profiler_with_remote_udf_single_threaded(self): + self._run_test_profiler_with_remote_udf() + + def _run_test_profiler_with_remote_builtin(self): + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)) + ) + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)), + use_record_function=True + ) + # test remote to self + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, + torch.mul, + args=(torch.ones(1), torch.ones(1)), + dst=self.rank, + ) + + @dist_init + def test_profiler_with_remote_builtin(self): + self._run_test_profiler_with_remote_builtin() + + @dist_init + def test_profiler_with_remote_builtin_single_threaded(self): + self._run_test_profiler_with_remote_builtin() + + def _run_test_profiler_with_script_async_rpc(self): + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, my_script_func, args=(torch.tensor(1),) + ) + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, + my_script_func, + args=(torch.tensor(1),), + use_record_function=True, + ) + + @dist_init + def test_profiler_with_script_async_rpc(self): + self._run_test_profiler_with_script_async_rpc() + + @dist_init + def test_profiler_with_script_async_rpc_single_threaded(self): + self._run_test_profiler_with_script_async_rpc() + + def _run_test_profiler_with_script_sync_rpc(self): + self._profiler_test_with_rpc( + RPCExecMode.SYNC, my_script_func, args=(torch.tensor(1),) + ) + self._profiler_test_with_rpc( + RPCExecMode.SYNC, + my_script_func, + args=(torch.tensor(1),), + use_record_function=True, + ) + + @dist_init + def test_profiler_with_script_sync_rpc(self): + self._run_test_profiler_with_script_sync_rpc() + + @dist_init + def test_profiler_with_script_sync_rpc_single_threaded(self): + self._run_test_profiler_with_script_sync_rpc() + + def _run_test_profiler_with_script_remote_rpc(self): + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),) + ) + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, + my_script_func, + args=(torch.tensor(1),), + use_record_function=True, + ) + # test remote to self + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),), dst=self.rank + ) + + @dist_init + def test_profiler_with_script_remote_rpc(self): + self._run_test_profiler_with_script_remote_rpc() + + @dist_init + def test_profiler_with_script_remote_rpc_single_threaded(self): + self._run_test_profiler_with_script_remote_rpc() + + def _assert_top_level_events(self, process_global_events, expected_top_level_event_names): + top_level_event_names = [] + for thread_local_events in process_global_events: + # Get top-level events from all events happened on a thread. + last_end_time = 0 + for event in thread_local_events: + event_name = event.name + time_range = event.time_range + if time_range.start > last_end_time: + top_level_event_names.append(event_name) + last_end_time = time_range.end + top_level_event_names = sorted(top_level_event_names) + expected_top_level_event_names = sorted(expected_top_level_event_names) + self.assertEqual( + top_level_event_names, + expected_top_level_event_names, + f"Expected events {expected_top_level_event_names}, but got {top_level_event_names}", + ) + + @dist_init + def test_server_process_global_profiler(self): + if self.rank != 0: + return + + dst_rank = (self.rank + 1) % self.world_size + dst_worker_name = worker_name(dst_rank) + + x = torch.tensor(1) + y = torch.tensor(2) + + outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile) + outer_profile_rref.rpc_sync().__enter__() + rpc.rpc_sync(dst_worker_name, torch.add, (x, y)) + inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile) + inner_profile_rref.rpc_sync().__enter__() + rpc.rpc_sync(dst_worker_name, torch.sub, (x, y)) + inner_profile_rref.rpc_sync().__exit__(None, None, None) + outer_profile_rref.rpc_sync().__exit__(None, None, None) + + inner_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (inner_profile_rref,)) + expected_inner_events = ['aten::sub'] + expected_outer_events = expected_inner_events + ['aten::add'] + + self._assert_top_level_events(inner_events, expected_inner_events) + outer_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (outer_profile_rref,)) + self._assert_top_level_events(outer_events, expected_outer_events) + + inner_profile_rref.rpc_sync().key_averages() + outer_profile_rref.rpc_sync().key_averages() + + @dist_init + def test_async_record_function_double_end_callbacks(self): + num_sleep_seconds = 1 + if self.rank == 1: + # Validate that calling the function twice results in an error. + with _profile() as pf: + with torch.autograd.profiler.record_function("foo") as rf: + fut = rpc.rpc_async( + worker_name(0), my_sleep_func, args=(num_sleep_seconds,) + ) + rf._call_end_callbacks_on_future(fut) + with self.assertRaisesRegex( + RuntimeError, "can only be called once." + ): + rf._call_end_callbacks_on_future(fut) + fut.wait() + + @dist_init + def test_async_record_function_legacy(self): + # Test the legacy _record_function ops work + # Note: These exist for backward compatibility with TorchScript + num_sleep_seconds = 1 + if self.rank == 1: + with _profile() as pf: + try: + handle = torch.ops.profiler._record_function_enter("foo", None) + fut = rpc.rpc_async( + worker_name(0), my_sleep_func, args=(num_sleep_seconds,) + ) + torch.ops.profiler._call_end_callbacks_on_jit_fut(handle, fut) + finally: + torch.ops.profiler._record_function_exit(handle) + + fut.wait() + + @dist_init + def test_async_record_function_cbs_jit_call(self): + if self.rank == 1: + with _profile() as pf: + key = _build_rpc_profiling_key( + RPCExecMode.ASYNC, + torch._jit_internal._qualified_name(my_script_func), + "worker1", + "worker0", + ) + with torch.autograd.profiler.record_function(key) as rf: + fut = rpc.rpc_async( + worker_name(0), my_script_func, args=(torch.tensor(1),) + ) + # Intentionally calling record_function internals + fut = torch.ops.profiler._call_end_callbacks_on_jit_fut(rf.record, fut) + result = fut.wait() + # Validate that the profiling future returns the same value as the RPC + # future. + expected = torch.add(torch.tensor(1), torch.tensor(1)) + self.assertEqual(result, expected) + events = pf.function_events + rpc_event = get_function_event( + events, torch._jit_internal._qualified_name(my_script_func) + ) + self.assertTrue(torch._jit_internal._qualified_name(my_script_func) in rpc_event.name) + + @dist_init + def test_py_class_constructor(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync(worker_name(dst_rank), MyClass, args=(n,)) + self.assertEqual(ret.a, n) + + @dist_init + def test_py_class_instance_method(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), MyClass(2).my_instance_method, args=(n,) + ) + self.assertEqual(ret, MyClass(2).my_instance_method(n)) + + @dist_init + def test_py_class_method(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), MyClass.my_class_method, args=(n, n + 1) + ) + self.assertEqual(ret, MyClass.my_class_method(n, n + 1)) + + @dist_init + def test_py_class_static_method(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), MyClass.my_static_method, args=(n + 10,) + ) + self.assertEqual(ret, MyClass.my_static_method(n + 10)) + + @dist_init + def test_py_multi_async_call(self): + n = self.rank + 1 + dst_rank = n % self.world_size + dst_worker_info = rpc.get_worker_info(worker_name(dst_rank)) + fut1 = rpc.rpc_async(dst_worker_info, MyClass.my_static_method, args=(n + 10,)) + fut2 = rpc.rpc_async(dst_worker_info, min, args=(n, n + 1, n + 2)) + self.assertEqual(fut1.wait(), MyClass.my_static_method(n + 10)) + self.assertEqual(fut2.wait(), min(n, n + 1, n + 2)) + + @dist_init + def test_py_no_return_result(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync(worker_name(dst_rank), no_result) + self.assertEqual(ret, no_result()) + + @dist_init + def test_py_tensors(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + my_tensor_function, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + self.assertEqual(ret, my_tensor_function(torch.ones(n, n), torch.ones(n, n))) + + @dist_init + def test_py_tensors_multi_async_call(self): + futs = [] + n = self.rank + 1 + dst_rank = n % self.world_size + for i in range(100): + fut = rpc.rpc_async( + worker_name(dst_rank), + my_tensor_function, + args=(torch.ones(i, i), torch.ones(i, i)), + ) + futs.append(fut) + + j = 0 + for val in torch.futures.wait_all(futs): + self.assertEqual( + val, my_tensor_function(torch.ones(j, j), torch.ones(j, j)) + ) + j += 1 + + @dist_init + def test_py_tensors_in_container(self): + n = self.rank + 1 + dst_rank = n % self.world_size + a = [torch.ones(n, n), torch.ones(n, n)] + b = TensorClass(build_complex_tensors()) + c = {"foo": torch.ones(n, n), "bar": torch.ones(n, n)} + ret = rpc.rpc_sync( + worker_name(dst_rank), my_complex_tensor_function, args=(a, b, c) + ) + self.assertEqual(ret, my_complex_tensor_function(a, b, c)) + + @dist_init + def test_py_nested_pickle(self): + n = self.rank + 1 + dst_rank = n % self.world_size + + ret = rpc.rpc_sync( + worker_name(dst_rank), + run_nested_pickle, + args=(MyPickleClass(), torch.ones(2, 2)), + ) + + m = MyPickleClass() + m.set(my_tensor_function(torch.ones(2, 2), torch.ones(2, 2))) + self.assertEqual(ret, run_nested_pickle(m, torch.ones(2, 2))) + + @dist_init + def test_py_function_exception(self): + n = self.rank + 1 + dst_rank = n % self.world_size + with self.assertRaises(TypeError): + ret = rpc.rpc_sync(worker_name(dst_rank), no_result, args=(10,)) + + @dist_init + def test_py_raise_in_user_func(self): + with captured_output() as (_, err): + # This barrier prevents a race condition where the main thread has + # not entered the context manager when the remote function runs. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + n = self.rank + 1 + dst_rank = n % self.world_size + fut = rpc.rpc_async(worker_name(dst_rank), raise_func) + with self.assertRaisesRegex(ValueError, expected_err): + fut.wait() + # This barrier prevents a race condition where the main thread exits + # context manager before the remote function has ran. + dist.barrier() + + # Validate that trainers log errors when running functions. + stderr_lines = err.getvalue() + self.assertTrue(expected_err in stderr_lines) + + @dist_init + def test_py_raise_in_user_func_escaped_str(self): + n = self.rank + 1 + dst_rank = n % self.world_size + fut = rpc.rpc_async(worker_name(dst_rank), raise_func_escape) + try: + fut.wait() + except ValueError as e: + msg = str(e) + # Ensure newlines are unescaped to provide a better repr of error. + self.assertEqual(msg, msg.encode("utf-8").decode("unicode_escape")) + else: + self.assertTrue(False, "expected raise_func_escape to raise ValueError.") + + @dist_init + def test_nested_rpc(self): + self._nested_rpc(nested_rpc, torch.ones(2, 2) + 1) + + @dist_init + def test_stress_light_rpc(self): + self._stress_test_rpc(light_rpc) + + @dist_init + def test_stress_heavy_rpc(self): + self._stress_test_rpc(heavy_rpc, repeat=20, args=(torch.ones(100, 100),)) + + @dist_init + def test_stress_heavy_rpc_torchscript(self): + self._stress_test_rpc(heavy_rpc_torchscript, repeat=20, args=(torch.ones(100, 100),)) + + @dist_init + def test_builtin_remote_ret(self): + self._builtin_remote_ret( + torch.ones(2, 2), + torch.ones(2, 2), + torch.ones(2, 2) * 2 + ) + + @dist_init + def test_builtin_remote_self(self): + self._builtin_remote_self( + torch.ones(2, 2), + torch.ones(2, 2), + torch.ones(2, 2) * 2 + ) + + @staticmethod + def _multi_args_fn(n, sparse=False): + if sparse: + return (build_sparse_tensor(), build_sparse_tensor()) + else: + return (torch.ones(n, n), torch.ones(n, n)) + + @dist_init + def test_multi_builtin_remote_ret(self): + self._test_multi_remote_call( + torch.add, False, + args_fn=RpcTest._multi_args_fn + ) + + @dist_init + def test_py_udf_remote(self): + n = self.rank + 1 + dst_rank = n % self.world_size + rref = rpc.remote( + worker_name(dst_rank), + my_function, + kwargs={"a": n, "b": n + 1, "c": n + 2}, + ) + self.assertEqual(rref.to_here(), my_function(n, n + 1, n + 2)) + + @staticmethod + def _multi_kwargs_fn(n, sparse=False): + if sparse: + return { + "a": build_sparse_tensor(), + "b": build_sparse_tensor(), + "c": build_sparse_tensor() + } + else: + return {"a": torch.ones(n, n), "b": torch.ones(n, n), "c": torch.ones(n, n)} + + @dist_init + def test_multi_py_udf_remote(self): + self._test_multi_remote_call( + my_function, + False, + kwargs_fn=RpcTest._multi_kwargs_fn + ) + + @dist_init + def test_py_rref_args(self): + self._py_rref_args( + torch.ones(2, 2), + 1, + torch.ones(2, 2), + 2, + torch.ones(2, 2) * 2 + 3) + + @dist_init + def test_py_rref_args_user_share(self): + self._py_rref_args_user_share( + torch.ones(2, 2), + 1, + 2, + torch.ones(2, 2), + 3, + 4, + torch.ones(2, 2) * 2 + 10 + ) + + @dist_init + def test_py_rpc_rref_args(self): + self._py_rpc_rref_args( + torch.ones(2, 2), + 1, + 2, + torch.ones(2, 2), + 3, + 4, + torch.ones(2, 2) * 2 + 10 + ) + + @dist_init + def test_nested_remote(self): + self._nested_remote( + nested_remote, + torch.ones(2, 2) + 3 + ) + + @dist_init + def test_nested_rref(self): + self._nested_rref( + nested_rref, + torch.ones(2, 2) + 1, + torch.ones(2, 2) + 2 + ) + + @dist_init + def test_nested_rref_stress(self): + self._nested_rref_stress( + nested_rref, + torch.ones(2, 2) + 1, + torch.ones(2, 2) + 2 + ) + + @dist_init + def test_multi_layer_nested_async_rpc(self): + # This test will exit right away, but there will be a chain of async + # RPCs. The termination algorithm should detect those messages properly. + # Otherwise, some peer could exit early, leaving others to timeout + # errors or connection closed errors. + ttl = 20 + n = self.rank + 1 + dst_rank = n % self.world_size + + multi_layer_nested_async_rpc(dst_rank, self.world_size, ttl) + + @dist_init + def test_remote_with_exception(self): + n = self.rank + 1 + dst_rank = n % self.world_size + # check ref to other workers + rref = rpc.remote(worker_name(dst_rank), raise_func) + with self.assertRaises(ValueError): + rref.to_here() + # check ref to itself + rref = rpc.remote(worker_name(self.rank), no_result, args=(10,)) + with self.assertRaises(TypeError): + rref.to_here() + + @dist_init + def test_rpc_return_rref(self): + n = self.rank + 1 + dst_rank1 = n % self.world_size + dst_rank2 = (n + 1) % self.world_size + rref = rpc.rpc_sync( + worker_name(dst_rank1), + rpc_return_rref, + args=(worker_name(dst_rank2),), + ) + self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1) + + @dist_init + def test_rref_forward_chain(self): + ttl = 8 + n = self.rank + 1 + dst_rank = n % self.world_size + + rref = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1) + ) + + ret_rref = rref_forward_chain(dst_rank, self.world_size, rref, ttl) + + for i in range(ttl): + self.assertEqual(len(ret_rref), 1) + ret_rref = ret_rref[0].to_here() + + ret = ret_rref + self.assertEqual(ret, torch.add(torch.ones(n, n), 1)) + + @dist_init + def test_local_rref_no_fork(self): + local_rref = RRef(35) + self.assertEqual(local_rref.local_value(), 35) + + @dist_init + def test_local_value_not_on_owner(self): + # ensure that an error message is thrown if a user tries to call + # local_value() on a non-owning node. + next_rank = (self.rank + 1) % self.world_size + rref = rpc.remote( + worker_name(next_rank), torch.add, args=(torch.ones(1), torch.ones(1)) + ) + with self.assertRaisesRegex( + RuntimeError, ( + fr"For UserRRef\(rref_id=GloballyUniqueId\(created_on={self.rank}, local_id=0\), " + fr"fork_id=GloballyUniqueId\(created_on={self.rank}, local_id=1\)\), " + r"can't call localValue\(\) on user " + fr"WorkerInfo\(id={self.rank}, name={worker_name(self.rank)}\). " + fr"Call it on owner WorkerInfo\(id={next_rank}, name={worker_name(next_rank)}\)" + ) + ): + rref.local_value() + + @dist_init + def test_return_local_rrefs(self): + n = self.rank + 1 + dst_rank = n % self.world_size + + rref_list = rpc.rpc_sync( + worker_name(dst_rank), get_rref_list, args=([1, 2, 3],) + ) + + for rref in rref_list: + rpc.rpc_sync( + rref.owner(), + _call_method_on_rref, + args=(MyClass.increment_value, rref, 10), + ) + + rets = [ + rpc.rpc_sync( + rref.owner(), _call_method_on_rref, args=(MyClass.get_value, rref) + ) + for rref in rref_list + ] + + self.assertEqual(rets, [11, 12, 13]) + + @dist_init + def _test_rref_type(self, blocking): + + def launched_rpc(events): + expected_name = f"rpc_{RPCExecMode.ASYNC.value}#_rref_typeof_on_owner" + return any(e.name.startswith(expected_name) for e in events) + + dst = worker_name((self.rank + 1) % self.world_size) + rref = rpc.remote(dst, torch.add, args=(torch.ones(2), 1)) + + with _profile() as p: + t = rref._get_type(blocking=blocking) + if not blocking: + t = t.wait() + + self.assertTrue(launched_rpc(p.function_events)) + expected_type = type(torch.ones(2)) + self.assertEqual(t, expected_type) + + futs = [] + + def verify(fut): + self.assertEqual(fut.value(), expected_type) + + with _profile() as p: + for _ in range(10): + t = rref._get_type(blocking=blocking) + if not blocking: + futs.append(t) + t.add_done_callback(verify) + t = t.wait() + self.assertEqual(t, expected_type) + + if not blocking: + # Note that cached calls with blocking=False all return the same + # cached original future. + first_fut = futs[0] + for f in futs[1:]: + self.assertTrue(f is first_fut) + # Ensure we never launch another RPC, other than for the very + # first call. + self.assertFalse(launched_rpc(p.function_events)) + self.assertEqual(t, type(torch.ones(2))) + + rref = rpc.remote(dst, MyClass, args=(0,)) + rref_type = rref._get_type(blocking=blocking) + if not blocking: + rref_type = rref_type.wait() + self.assertEqual(rref_type, MyClass) + + def test_rref_type_blocking(self): + self._test_rref_type(blocking=True) + + def test_rref_type_non_blocking(self): + self._test_rref_type(blocking=False) + + @dist_init + def _test_rref_type_with_error(self, blocking): + dst = worker_name((self.rank + 1) % self.world_size) + # 10 ms timeout + rref = rpc.remote(dst, raise_func) + # Blocking: error raised inline + if blocking: + with self.assertRaisesRegex(ValueError, "Expected error"): + rref._get_type(blocking=blocking) + else: + # Non-blocking: Immediately return future, block on wait + fut = rref._get_type(blocking=blocking) + with self.assertRaisesRegex(ValueError, "Expected error"): + fut.wait() + + + def test_rref_type_with_error_blocking(self): + self._test_rref_type_with_error(blocking=True) + + def test_rref_type_with_error_non_blocking(self): + self._test_rref_type_with_error(blocking=False) + + @dist_init + def _test_rref_type_owner(self, blocking): + rref = RRef(torch.ones(2) + 1) + rref_type = rref._get_type(blocking=blocking) + if not blocking: + rref_type = rref_type.wait() + self.assertEqual(rref_type, type(torch.ones(2))) + + rref = RRef(MyClass(0)) + rref_type = rref._get_type(blocking=blocking) + if not blocking: + rref_type = rref_type.wait() + self.assertEqual(rref_type, MyClass) + + def test_rref_type_owner_blocking(self): + self._test_rref_type_owner(blocking=True) + + def test_rref_type_owner_non_blocking(self): + self._test_rref_type_owner(blocking=False) + + @staticmethod + def _slow_add(x, y): + time.sleep(1) + return x + y + + @dist_init + def test_rref_type_slow_init(self): + dst = worker_name((self.rank + 1) % self.world_size) + rref = rpc.remote(dst, RpcTest._slow_add, args=(torch.ones(2), 1)) + self.assertEqual(rref._get_type(), type(torch.ones(2))) + + @dist_init + def test_owner_equality(self): + a = RRef(40) + b = RRef(50) + + other_rank = (self.rank + 1) % self.world_size + other_a = rpc.remote( + worker_name(other_rank), torch.add, args=(torch.ones(1), 1) + ) + other_b = rpc.remote( + worker_name(other_rank), torch.add, args=(torch.ones(1), 1) + ) + other_a.to_here() # to ensure clean termination + other_b.to_here() + + self.assertNotEqual(a.owner(), 23) + self.assertEqual(other_a.owner(), other_b.owner()) + self.assertNotEqual(a.owner(), other_a.owner()) + self.assertEqual(other_a.owner(), other_a.owner()) + self.assertEqual(other_a.owner(), other_b.owner()) + self.assertEqual(a.owner(), a.owner()) + self.assertEqual(a.owner(), b.owner()) + self.assertEqual(a.owner(), rpc.get_worker_info()) + x = {} + x[a.owner()] = a + x[other_a.owner()] = other_a + self.assertEqual(x[a.owner()], a) + self.assertEqual(x[b.owner()], a) + self.assertEqual(x[other_a.owner()], other_a) + self.assertEqual(x[other_b.owner()], other_a) + self.assertEqual(len(x), 2) + + @dist_init + def test_pass_local_rrefs(self): + n = self.rank + 1 + dst_rank = n % self.world_size + dst_worker = worker_name(dst_rank) + + rref = RRef(40) + self.assertEqual( + rpc.rpc_sync(dst_worker, add_rref_to_value, args=(rref, 50)), 90 + ) + self.assertEqual( + rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 50)).wait(), 90 + ) + self.assertEqual( + rpc.remote(dst_worker, add_rref_to_value, args=(rref, 50)).to_here(), 90 + ) + + @dist_init + def test_remote_same_worker(self): + n = self.rank + 1 + dst_rank = n % self.world_size + rref_a = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2) + ) + rref_b = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1) + ) + rref_c = rpc.remote( + worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b) + ) + self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4) + + @dist_init(setup_rpc=True) + def test_call_method_on_rref(self): + """ + Tests that it is possible to call an instance method on a remote object + by using rref.owner() as destination of the call. + """ + vals = [10, 2, 5, 7] + dst_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst_rank) + + # creates a remote object + rref = rpc.remote(dst_worker, MyClass, args=(vals[0],)) + + # modifies state of the remote object + rpc.rpc_sync( + rref.owner(), + _call_method_on_rref, + args=(MyClass.increment_value, rref, vals[1]), + ) + rpc.rpc_async( + rref.owner(), + _call_method_on_rref, + args=(MyClass.increment_value, rref, vals[2]), + ).wait() + rpc.remote( + rref.owner(), + _call_method_on_rref, + args=(MyClass.increment_value, rref, vals[3]), + ).to_here() + + # queries state of the remote object + result = rpc.rpc_sync( + dst_worker, _call_method_on_rref, args=(MyClass.get_value, rref) + ) + + self.assertEqual(result, sum(vals)) + + # Notice `rpc.api.shutdown()` accesses + # `_delete_all_user_and_unforked_owner_rrefs` through + # `torch.distributed.rpc.api`, so patching + # `torch.distributed.rpc._delete_all_user_and_unforked_owner_rrefs` will + # not help. + @mock.patch.object(torch.distributed.rpc.api, "_delete_all_user_and_unforked_owner_rrefs") + def _test_rref_leak(self, _mock_delete_all_user_and_unforked_owner_rrefs, ignore_leak): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + initialize_pg(self.file_init_method, self.rank, self.world_size) + # Wait for all init to complete. + dist.barrier() + + rref = rpc.remote( + worker_name((self.rank + 1) % self.world_size), + torch.add, + args=(torch.ones(2, 2), 1), + ) + + import torch.distributed.rpc.api as api + + if ignore_leak: + api._ignore_rref_leak = True + rpc.shutdown(graceful=True) + else: + api._ignore_rref_leak = False + with self.assertRaisesRegex(RuntimeError, "Leaking RRef"): + rpc.shutdown(graceful=True) + + @dist_init(setup_rpc=False) + def test_rref_leak(self): + self._test_rref_leak(ignore_leak=False) + + @dist_init(setup_rpc=False) + def test_ignore_rref_leak(self): + self._test_rref_leak(ignore_leak=True) + + @dist_init + def test_rref_str(self): + rref1 = RRef(self.rank) + id_class = "GloballyUniqueId" + self.assertEqual( + f"OwnerRRef({id_class}(created_on={self.rank}, local_id=0))", rref1.__str__() + ) + + dst_rank = (self.rank + 1) % self.world_size + rref2 = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1) + ) + self.assertEqual( + rref2.__str__(), + f"UserRRef(RRefId = {id_class}(created_on={self.rank}, local_id=1), " + f"ForkId = {id_class}(created_on={self.rank}, local_id=2))", + ) + + @dist_init + def test_rref_get_future(self): + # Tests that we can obtain the future corresponding to the creation of + # the RRef on remote end + if self.rank == 0: + # Builtin + rref = rpc.remote(worker_name(1), torch.add, args=(1, 1)) + rref.to_here() + fut = rref._get_future() + self.assertIsInstance(fut, torch._C.Future) + + # UDF + rref = rpc.remote(worker_name(1), foo_add, args=()) + rref.to_here() + fut = rref._get_future() + self.assertIsInstance(fut, torch._C.Future) + + # Script + rref = rpc.remote(worker_name(1), my_script_func, args=(torch.tensor(1), )) + rref.to_here() + fut = rref._get_future() + self.assertIsInstance(fut, torch._C.Future) + + + @dist_init + def test_rref_context_debug_info(self): + # This test checks local states that are modified by remote workers. + # This means that we would need barrier before and after every check. + # The barrier before the check makes sure that all previous states are + # cleared globally, the barrier after ensures that no following states + # change gets into the current check. + initialize_pg(self.file_init_method, self.rank, self.world_size) + + # Check 1: local RRef does not update owners_ map or add a pending user. + ################################################# + + rref1 = RRef(self.rank) + + # don't need a barrier here as local RRef is handled by this thread + info = _rref_context_get_debug_info() + self.assertIn("num_owner_rrefs", info) + self.assertIn("num_pending_users", info) + # RRef on local value is not added to context until shared across RPC + self.assertEqual(0, int(info["num_owner_rrefs"])) + self.assertEqual(0, int(info["num_pending_users"])) + # barrier after the check 1 + dist.barrier() + + # Check 2: Sharing RRef as an arg should update owners_ map + ########################################################### + + dst_rank = (self.rank + 1) % self.world_size + rpc.rpc_sync(worker_name(dst_rank), set_global_rref, args=(rref1,)) + + # barrier before check 2 + wait_until_pending_futures_and_users_flushed() + dist.barrier() + + info = _rref_context_get_debug_info() + self.assertIn("num_owner_rrefs", info) + self.assertEqual(1, int(info["num_owner_rrefs"])) + # no pending users since the fork is finished + self.assertEqual(0, int(info["num_pending_users"])) + # barrier after check 2 + dist.barrier() + + # clear states for check 2 + rpc.rpc_sync(worker_name(dst_rank), clear_global_rref) + + # Wait for owner rref to be cleared. + while int(info["num_owner_rrefs"]) != 0: + info = _rref_context_get_debug_info() + time.sleep(0.1) + dist.barrier() + + # Check 3: rpc.remote call should update owners_ map + #################################################### + rref2 = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1) + ) + rref3 = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1) + ) + rref2.to_here() + rref3.to_here() + + # barrier before check 3 + wait_until_pending_futures_and_users_flushed() + dist.barrier() + + info = _rref_context_get_debug_info() + self.assertIn("num_owner_rrefs", info) + self.assertEqual(2, int(info["num_owner_rrefs"])) + # no pending users since the fork is finished + self.assertEqual(0, int(info["num_pending_users"])) + + # barrier after check 3 + dist.barrier() + + @dist_init + def test_disable_gil_profiling(self): + # test that rpc.enable_gil_profiling(false) will result in + # GIL wait time not being recorded. + + # GIL profiling should be disabled by default. + dst_rank = (self.rank + 1) % self.world_size + rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1)) + ) + info = rpc.api._get_current_rpc_agent().get_debug_info() + self.assertRaises(KeyError, lambda: info["agent.gil_average_wait_time_us"]) + rpc.enable_gil_profiling(True) + rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1)) + ) + info = rpc.api._get_current_rpc_agent().get_debug_info() + self.assertIn("agent.gil_average_wait_time_us", info) + + @dist_init(setup_rpc=False) + def test_local_shutdown(self): + # test that we can start RPC and then immediately locally shutdown + # without sending any messages. + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + # pass in graceful=False to ensure that we don't wait for other workers. + rpc.shutdown(graceful=False) + + @dist_init + def test_debug_info(self): + # only test keys in this test case. Values should be covered by + # individual module debug info tests + import torch.distributed.autograd as dist_autograd + + info = _get_debug_info() + rref_info = _rref_context_get_debug_info() + agent_info = rpc.api._get_current_rpc_agent().get_debug_info() + autograd_info = dist_autograd._get_debug_info() + common_keys = rref_info.keys() & agent_info.keys() & autograd_info.keys() + self.assertEqual(0, len(common_keys)) + expected = {} + expected.update(rref_info) + expected.update(agent_info) + expected.update(autograd_info) + # NB: Key ordering is only preserved in python 3.6+. So here, we + # manually check keys are equal. + for key in expected.keys(): + self.assertIn(key, info.keys()) + + for key in info.keys(): + self.assertIn(key, expected.keys()) + + @dist_init(setup_rpc=False) + @skip_but_pass_in_sandcastle_if( + IS_MACOS, + "Test is flaky on MacOS since libuv error handling is not as robust as TCP", + ) + def test_handle_send_exceptions(self): + # test that if a callee node has gone down, we raise an appropriate + # exception instead of just crashing. + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + rpc._set_rpc_timeout(10) + # This barrier is needed to ensure that some workers do not exit before + # others have been brought up. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + if self.rank == 1: + dst_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst_rank) + # allow destination worker to exit without joining + error_str = self.get_shutdown_error_regex() + wait_until_node_failure(dst_rank, error_str) + fut = rpc.rpc_async(dst_worker, torch.add, args=(torch.ones(1), 3)) + # Shutdown sequence is not very well defined and as a result + # we can see any of the error messages defined in get_shutdown_error_regex. + with self.assertRaisesRegex(RuntimeError, error_str): + fut.wait() + # exit all workers non-gracefully. + rpc.shutdown(graceful=False) + + @dist_init + def test_deadlock(self): + # this test is copied from https://github.com/pytorch/pytorch/issues/45089 + if self.rank == 1: + dst1 = worker_name((self.rank + 1) % self.world_size) + x = torch.ones(2) + y = torch.ones(2) + rpc.rpc_async(dst1, RpcTest._slow_add, args=(x, y), timeout=15).wait() + + dist_initialized = dist.is_initialized() + if not dist_initialized: + dist.init_process_group( + backend="gloo", + init_method=self.file_init_method, + rank=self.rank, + world_size=self.world_size, + ) + + @dist_init(setup_rpc=False) + def test_local_shutdown_with_rpc(self): + # test that we can start RPC, send RPCs, and then run local shutdown. + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + n = self.rank + 1 + dst_rank = n % self.world_size + rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + # A barrier is needed to ensure that all RPCs are processed. + # Otherwise, some RPCs can timeout since the receiving end + # has terminated. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + # pass in graceful=False to ensure that we don't wait for other workers. + rpc.shutdown(graceful=False) + + @dist_init(setup_rpc=False) + def test_set_and_get_default_rpc_timeout(self): + timeout = 0.5 + + # A new `RpcBackendOptions` is constructed + # when accessing `self.rpc_backend_options`. + rpc_backend_options = self.rpc_backend_options + rpc_backend_options.rpc_timeout = timeout + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + set_timeout = rpc.get_rpc_timeout() + self.assertEqual(timeout, set_timeout) + rpc.shutdown() + + @dist_init + def test_default_timeout_used(self): + """ + Tests that if no timeout is passed into rpc_async and rpc_sync, then the + default timeout is used. + """ + dst_rank = (self.rank + 1) % self.world_size + rpc._set_rpc_timeout(0.001) # 1 ms + # futures should time out and be marked with an exception indicating it as such. + futs = [ + rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()) + for _ in range(10) + ] + expected_error = self.get_timeout_error_regex() + for fut in futs: + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # ensure that if a new timeout is set old futures don't time out but new ones do. + rpc._set_rpc_timeout(200) # 200 seconds + # create a longstanding RPC. + fut1 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,)) + # now, set a short timeout. + rpc._set_rpc_timeout(0.001) + # fut2 should time out, fut1 should not. + fut2 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,)) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut2.wait() + fut1.wait() + + # Zero timeout means infinity, so future should run to completion. + rpc._set_rpc_timeout(0) + rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()).wait() + + # reset to default timeout so shutdown messages can process cleanly. + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + @dist_init + def test_rpc_timeouts(self): + # TODO: enable timeouts for rpc.remote/RRef (https://github.com/pytorch/pytorch/issues/33803) + dst_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst_rank) + timeout = 0.1 # 100 ms + expected_error = self.get_timeout_error_regex() + # Test async UDF + fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=timeout) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure run to completion if there is no timeout and we use the default + # RPC timeout. + rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)).wait() + + # Test sync UDF + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=timeout) + + # Ensure run to completion if there is no timeout and we use the default + # RPC timeout. + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,)) + + # If we set a default timeout for RPCs, it should be respected, though + # still overridden if we pass in a different timeout to the APIs. + rpc._set_rpc_timeout(0.001) + fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,)) + + # The RPCs should run to completion since we override the timeout. + rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=5).wait() + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=5) + # Passing in a zero timeout should ensure that the RPC won't time out. + rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=0).wait() + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=0) + # Reset for clean shutdown + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + def test_dist_init_decorator(self): + @dist_init(setup_rpc=False) + def test_func(self): + return "expected result" + + self.assertEqual(test_func(self), "expected result") + + @dist_init + def test_func(self): + return "expected result" + + self.assertEqual(test_func(self), "expected result") + + def test_use_rpc_pickler(self): + class TestPickler: + pass + + test_pickler = TestPickler() + with _use_rpc_pickler(test_pickler): + self.assertTrue(torch.distributed.rpc.api._default_pickler is test_pickler) + self.assertTrue( + torch.distributed.rpc.api._default_pickler is _internal_rpc_pickler + ) + + @dist_init + def test_wait_all(self): + with _wait_all(): + self.assertTrue(_thread_local_var.future_list == []) + dst = worker_name((self.rank + 1) % self.world_size) + fut = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1)) + self.assertTrue(len(_thread_local_var.future_list) == 1) + self.assertTrue(isinstance(_thread_local_var.future_list[0], torch._C.Future)) + self.assertTrue(fut.done()) + self.assertEqual(fut.wait(), torch.ones(2, 2) + 1) + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_wait_all_multiple_call(self): + with _wait_all(): + self.assertTrue(_thread_local_var.future_list == []) + dst = worker_name((self.rank + 1) % self.world_size) + for i in range(20): + fut = rpc.rpc_async(dst, torch.add, (torch.ones(i, i), 1)) + res = rpc.rpc_sync(dst, torch.add, (torch.ones(i, i), 1)) + self.assertEqual(res, torch.ones(i, i) + 1) + self.assertEqual(fut.wait(), torch.ones(i, i) + 1) + self.assertTrue(len(_thread_local_var.future_list) == 20) + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_wait_all_timeout(self): + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + with _wait_all(): + self.assertTrue(_thread_local_var.future_list == []) + dst = worker_name((self.rank + 1) % self.world_size) + timeout = 0.1 # 100 ms + fut = rpc.rpc_async(dst, my_sleep_func, args=(1,), timeout=timeout) + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_wait_all_raise_in_user_func(self): + with self.assertRaises(ValueError): + with _wait_all(): + self.assertTrue(_thread_local_var.future_list == []) + dst = worker_name((self.rank + 1) % self.world_size) + fut = rpc.rpc_async(dst, raise_func) + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_wait_all_raise_in_body(self): + with self.assertRaises(ValueError): + with _wait_all(): + raise_func() + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_custom_exception_throw_during_reconstruction(self): + """ + Test that we still throw info about the remote side exception even when + we cannot recreate it on client side. + """ + initialize_pg(self.file_init_method, self.rank, self.world_size) + if self.rank != 0: + exc_caught = False + dst = worker_name(0) + try: + rpc.rpc_sync(dst, custom_raise_func, args=()) + except RuntimeError as e: + exc_caught = True + msg = str(e) + print(f"Got msg {msg}") + self.assertTrue("Original exception on remote side was" in msg) + self.assertTrue("CustomException" in msg) + except BaseException as e: + raise RuntimeError( + f"Failure - expected RuntimeError, got {e}" + ) from e + finally: + self.assertTrue(exc_caught) + + dist.barrier() + + + timed_out_rpc_event = None + + @staticmethod + def timed_out_rpc(): + RpcTest.timed_out_rpc_event.wait() + + @dist_init + def test_wait_all_exit_early_python(self): + # Initialize the event in the subprocess. + RpcTest.timed_out_rpc_event = Event() + + # Wait for all processes to initialize event. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + + dst = worker_name((self.rank + 1) % self.world_size) + fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc) + fut2 = rpc.rpc_async(dst, raise_func) + fut3 = rpc.rpc_async(dst, raise_func) + + # We should receive the error from fut2 + with self.assertRaisesRegex(ValueError, expected_err): + torch.futures.wait_all([fut1, fut2, fut3]) + + # Unblock RPC thread for fut1 + RpcTest.timed_out_rpc_event.set() + + @dist_init + def test_wait_all_exit_early_builtin(self): + # Initialize the event in the subprocess. + RpcTest.timed_out_rpc_event = Event() + + # Wait for all processes to initialize event. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + + dst = worker_name((self.rank + 1) % self.world_size) + fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc) + fut2 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5))) + fut3 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5))) + + # We should receive the error from fut2 + with self.assertRaisesRegex(RuntimeError, "size of tensor"): + torch.futures.wait_all([fut1, fut2, fut3]) + + # Unblock RPC thread for fut1 + RpcTest.timed_out_rpc_event.set() + + @dist_init + def test_wait_all_exit_early_script_function(self): + # Initialize the event in the subprocess. + RpcTest.timed_out_rpc_event = Event() + + # Wait for all processes to initialize event. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + + dst = worker_name((self.rank + 1) % self.world_size) + fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc) + fut2 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,)) + fut3 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,)) + + # We should receive the error from fut2 + with self.assertRaisesRegex(RuntimeError, expected_err): + torch.futures.wait_all([fut1, fut2, fut3]) + + # Unblock RPC thread for fut1 + RpcTest.timed_out_rpc_event.set() + + + @dist_init + def test_function_not_on_callee(self): + # test that if a function does not exist on a callee, we don't crash, + # instead we get an AttributeError indicating that the func does not exist. + this_module = sys.modules[__name__] + caller_worker = "worker0" + callee_worker = "worker1" + + if self.rank == 1: + # Use delattr to remove the binding of a func on this nodes + delattr(this_module, "foo_add") + # notify remote end that we have removed it. + rpc.rpc_sync(caller_worker, set_value, args=(self.rank,)) + + if self.rank == 0: + # func exists on caller, but not callee. + # wait for remote end to remove the binding of foo_add func. + wait_for_value_future() + # Ensure that we have the attribute on this module. Otherwise, the test could fail due to a caller-side pickling error. + self.assertTrue(hasattr(this_module, "foo_add")) + with self.assertRaisesRegex( + RuntimeError, "RPC pickler does not serialize" + ): + rpc.rpc_sync(callee_worker, foo_add, args=()) + + @dist_init + def test_non_garbage_collected_user_rref_due_to_local_circular_dependency(self): + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + a = MyClass(1) + b = MyClass(2) + + # This is to make Python not garbage collect a and b. + a.other = b + b.other = a + + n = self.rank + a.rref = rpc.remote( + dst_worker_name, + torch.add, + args=(torch.ones(n, n), 2) + ) + + @dist_init(setup_rpc=False) + def test_use_rref_after_shutdown(self): + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + n = self.rank + 1 + dst_rank = n % self.world_size + rref = rpc.remote( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + # pass in graceful=True to ensure that local UserRRefs are deleted. + rpc.shutdown(graceful=True) + + with self.assertRaisesRegex( + RuntimeError, "Cannot call to_here\\(\\) on it after deletion." + ): + rref.to_here() + + with self.assertRaisesRegex( + RuntimeError, "Cannot call fork an UserRRef after deletion." + ): + import torch.distributed.rpc.internal as internal + internal.serialize(rref) + + @staticmethod + def _return_gpu_tensor(): + return torch.rand(3, 3).cuda(0) + + @staticmethod + def _return_gpu_tensor_list(): + return [torch.rand(3, 3).cuda(0), torch.rand(3, 3).cuda(1)] + + @staticmethod + def _gpu_tensor_list_arg(tensor_list): + return torch.rand(3, 3) + + def _create_rref(self): + owner_rank = (self.rank + 2) % self.world_size + return rpc.remote( + worker_name(owner_rank), + torch.add, + args=(torch.zeros(2, 2), 1) + ) + + @dist_init + def test_user_rrefs_confirmed(self): + dst_rank = (self.rank + 1) % self.world_size + rref = self._create_rref() + ret = rpc.rpc_sync( + worker_name(dst_rank), + check_rref_confirmed, + args=(rref,) + ) + self.assertEqual(ret, True) + + @dist_init + def test_user_rrefs_confirmed_remote(self): + dst_rank = (self.rank + 1) % self.world_size + rref = self._create_rref() + ret_rref = rpc.remote( + worker_name(dst_rank), + check_rref_confirmed, + args=(rref,) + ) + self.assertEqual(ret_rref.to_here(), True) + + @dist_init + def test_rref_py_pickle_not_supported(self): + local_rref = RRef(35) + with TemporaryFileName() as fname: + with self.assertRaisesRegex(RuntimeError, "Can not pickle rref in python pickler"): + torch.save(local_rref, fname) + + @dist_init + def test_remote_throw(self): + rref = rpc.remote(worker_name((self.rank + 1) % self.world_size), + raise_or_inc, + args=(torch.ones(2),)) + with self.assertRaisesRegex(Exception, ".*Expected error.*"): + rref.to_here() + + @dist_init + def test_non_cont_tensors(self): + if self.rank == 0: + # Create a non-contiguous tensor. + t = torch.rand(5, 5) + t_view = t.narrow(1, 2, 2) + self.assertFalse(t_view.is_contiguous()) + t_cont = t_view.contiguous() + self.assertTrue(t_cont.is_contiguous()) + self.assertEqual(t_view, t_cont) + + # Send non-cont tensor over RPC. + next_rank = (self.rank + 1) % self.world_size + t_ret = rpc.rpc_sync(worker_name(next_rank), non_cont_test, args=(t_view, t_cont)) + + # Verify the returned tensor. + self.assertEqual(t_view, t_ret) + self.assertFalse(t_ret.is_contiguous()) + + @dist_init + def test_callback_simple(self): + set_by_cb = concurrent.futures.Future() + n = self.rank + 1 + + def callback(fut): + ret = fut.wait() + self.assertEqual(ret, torch.ones(n, n) * 2) + set_by_cb.set_result(ret.clone() + 1) + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)) + ) + + fut.then(callback) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1) + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + @dist_init + def test_callback_wrong_arg_num(self): + set_by_cb = concurrent.futures.Future() + n = self.rank + 1 + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)) + ) + + cb_fut = fut.then(my_function) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + with self.assertRaisesRegex( + RuntimeError, + "my\\_function\\(\\) missing 2 required positional arguments" + ): + cb_fut.wait() + + @dist_init + def test_callback_wrong_arg_type(self): + dst = worker_name((self.rank + 1) % self.world_size) + + fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1)) + fut1 = fut0.then(lambda x: x + 1) + + with self.assertRaisesRegex( + RuntimeError, + "unsupported operand type\\(s\\) for \\+" + ): + fut1.wait() + + @dist_init + def test_callback_multi(self): + num_cbs = 10 + n = self.rank + 1 + + def callback(idx, fut): + ret = fut.wait() + self.assertEqual(ret, torch.ones(n, n) * 2) + return ret + idx + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)) + ) + + cb_futs = [] + for idx in range(num_cbs): + cb_futs.append(fut.then(partial(callback, idx))) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + for idx in range(num_cbs): + self.assertEqual( + cb_futs[idx].wait(), + torch.ones(n, n) * 2 + idx + ) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + @dist_init + def test_callback_chain(self): + n = self.rank + 1 + dst = worker_name(n % self.world_size) + + def callback(fut): + return fut.wait() + 1 + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), 1) + ) + + num_cbs = 20 + for _ in range(num_cbs): + fut = fut.then(callback) + + self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs) + + @dist_init + def test_callback_in_rpc(self): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + ret = rpc.rpc_sync( + dst1, + add_use_future_cb, + args=(dst2, torch.ones(2, 2), 1, 2) + ) + self.assertEqual(ret, torch.ones(2, 2) + 1 + 2) + + @dist_init + def test_callback_with_ret(self): + dst = worker_name((self.rank + 1) % self.world_size) + + def callback(fut0): + fut2 = rpc.rpc_async( + dst, + torch.add, + args=(fut0.wait(), 1) + ).then(lambda fut1: fut1.wait() + 1) + + return fut2.wait() + + fut3 = rpc.rpc_async( + dst, + torch.add, + args=(torch.ones(2, 2), 1) + ).then(callback) + + self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3) + + @dist_init + def test_callback_with_error(self): + dst = worker_name((self.rank + 1) % self.world_size) + + def callback(fut0): + with self.assertRaisesRegex(ValueError, "Expected error"): + fut0.wait() + raise RuntimeError("Another expected error") + + fut1 = rpc.rpc_async(dst, raise_func).then(callback) + with self.assertRaisesRegex(RuntimeError, "Another expected error"): + fut1.wait() + + @dist_init + def test_callback_none(self): + dst = worker_name((self.rank + 1) % self.world_size) + with self.assertRaisesRegex( + TypeError, + "incompatible function arguments." + ): + rpc.rpc_async(dst, raise_func).then(None) + + @dist_init + def test_add_done_callback(self): + set_by_cb = False + n = self.rank + 1 + + def callback(fut): + nonlocal set_by_cb + fut.wait() + set_by_cb = True + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)) + ) + + fut.add_done_callback(callback) + fut_then = fut.then(lambda _: True) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + # We have no guarantee that the add_done_callback fn will execute before the test finishes. + # Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback + fut_then.wait() + self.assertTrue(set_by_cb) + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + @dist_init + def test_mark_future_twice(self): + fut = rpc.rpc_async( + worker_name((self.rank + 1) % self.world_size), + torch.add, + args=(torch.zeros(2, 2), 1) + ) + self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1) + with self.assertRaisesRegex( + RuntimeError, + "Future can only be marked completed once" + ): + fut.set_result(1) + + @dist_init + def test_pickle_future(self): + fut = torch.futures.Future() + errMsg = "Can not pickle torch.futures.Future" + + dst = worker_name((self.rank + 1) % self.world_size) + with TemporaryFileName() as fname: + with self.assertRaisesRegex(RuntimeError, errMsg): + rpc.rpc_sync(dst, fail_on_fut, args=(fut,)) + + with TemporaryFileName() as fname: + with self.assertRaisesRegex(RuntimeError, errMsg): + rpc.rpc_async(dst, fail_on_fut, args=(fut,)) + + with TemporaryFileName() as fname: + with self.assertRaisesRegex(RuntimeError, errMsg): + rpc.remote(dst, fail_on_fut, args=(fut,)) + + @dist_init + def test_future_done(self): + dst = worker_name((self.rank + 1) % self.world_size) + fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1)) + fut.wait() + self.assertTrue(fut.done()) + + @dist_init + def test_future_done_exception(self): + dst = worker_name((self.rank + 1) % self.world_size) + fut = rpc.rpc_async(dst, raise_func) + with self.assertRaisesRegex(ValueError, "Expected error"): + fut.wait() + self.assertTrue(fut.done()) + + def _test_future_cb(self, func): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + ret = rpc.rpc_sync( + dst1, + func, + args=(dst2, torch.ones(2, 2), 1, 2) + ) + self.assertEqual(ret, torch.ones(2, 2) + 1 + 2) + + @dist_init + def test_future_in_rpc(self): + self._test_future_cb(add_use_future_set_result) + + @dist_init + def test_future_nested_callback(self): + self._test_future_cb(add_use_future_nested_cb) + + def _test_async_function_raise(self, mode): + with self.assertRaisesRegex(RuntimeError, "Expected error"): + self._run_func_in_mode( + worker_name((self.rank + 1) % self.world_size), + async_raise_func, + mode + ) + + @dist_init + def test_async_function_raise(self): + self._test_async_function_raise(RPCExecMode.SYNC) + + @dist_init + def test_async_function_raise_async(self): + self._test_async_function_raise(RPCExecMode.ASYNC) + + @dist_init + def test_async_function_raise_remote(self): + self._test_async_function_raise(RPCExecMode.REMOTE) + + def _test_async_function_wrong_return_type(self, mode): + errMsg = ( + "Functions decorated with @rpc\\.async_function must return a " + "torch\\.futures\\.Future object," + ) + with self.assertRaisesRegex(RuntimeError, errMsg): + self._run_func_in_mode( + worker_name((self.rank + 1) % self.world_size), + async_wrong_type, + mode + ) + + @dist_init + def test_async_function_wrong_return_type(self): + self._test_async_function_wrong_return_type(RPCExecMode.SYNC) + + @dist_init + def test_async_function_wrong_return_type_async(self): + self._test_async_function_wrong_return_type(RPCExecMode.ASYNC) + + @dist_init + def test_async_function_wrong_return_type_remote(self): + self._test_async_function_wrong_return_type(RPCExecMode.REMOTE) + + @dist_init + def test_async_function_simple(self): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1)) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + def _test_async_function(self, fn, mode=RPCExecMode.SYNC): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + args = (dst2, torch.ones(2, 2), 1, 2) + ret = self._run_func_in_mode(dst1, fn, mode, args=args) + self.assertEqual(ret, torch.ones(2, 2) + 3) + + @dist_init + def test_async_function_with_future_ctor(self): + self._test_async_function(async_add_with_future_ctor) + + @dist_init + def test_async_function_with_future_ctor_remote(self): + self._test_async_function( + async_add_with_future_ctor, + RPCExecMode.REMOTE + ) + + @dist_init + def test_async_function_chained(self): + self._test_async_function(async_add_chained) + + @dist_init + def test_async_function_chained_remote(self): + self._test_async_function(async_add_chained, RPCExecMode.REMOTE) + + @dist_init + def test_async_function_nested(self): + self._test_async_function(async_add_nested) + + @dist_init + def test_async_function_nested_remote(self): + self._test_async_function(async_add_nested, RPCExecMode.REMOTE) + + @dist_init + def test_async_static_method(self): + self._test_async_function(AsyncExecutionClass.static_async_add) + + @dist_init + def test_async_static_method_remote(self): + self._test_async_function( + AsyncExecutionClass.static_async_add, + RPCExecMode.REMOTE + ) + + @dist_init + def test_async_class_method(self): + self._test_async_function(AsyncExecutionClass.class_async_add) + + @dist_init + def test_async_class_method_remote(self): + self._test_async_function( + AsyncExecutionClass.class_async_add, + RPCExecMode.REMOTE + ) + + def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + rref = rpc.remote(dst1, AsyncExecutionClass) + + x = torch.ones(2, 2) + y = torch.ones(2, 2) + 1 + if mode == RPCExecMode.SYNC: + ret = rref.rpc_sync().static_async_add(dst2, x, x, y) + ret += rref.rpc_sync().class_async_add(dst2, x, x, y) + ret += rref.rpc_sync().bound_async_add(dst2, x, x, y) + elif mode == RPCExecMode.ASYNC: + ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait() + ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait() + ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait() + elif mode == RPCExecMode.REMOTE: + ret = rref.remote().static_async_add(dst2, x, x, y).to_here() + ret += rref.remote().class_async_add(dst2, x, x, y).to_here() + ret += rref.remote().bound_async_add(dst2, x, x, y).to_here() + + self.assertEqual(ret, 3 * 4 * x) + + @dist_init + def test_async_class_rref_proxy(self): + self._test_test_async_class_rref_proxy() + + @dist_init + def test_async_class_rref_proxy_async(self): + self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC) + + @dist_init + def test_async_class_rref_proxy_remote(self): + self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE) + + def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + num = 20 + step = 3 + args = (dst2, torch.ones(2, 2), num, step) + ret = self._run_func_in_mode(dst1, fn, mode, args=args) + self.assertEqual(ret, torch.ones(2, 2) + num * step) + + @dist_init + def test_async_function_multi_chained(self): + self._test_async_function_multi(async_add_chained_multi) + + @dist_init + def test_async_function_multi_chained_async(self): + self._test_async_function_multi( + async_add_chained_multi, + RPCExecMode.ASYNC + ) + + @dist_init + def test_async_function_multi_chained_remote(self): + self._test_async_function_multi( + async_add_chained_multi, + RPCExecMode.REMOTE + ) + + @dist_init + def test_async_function_multi_fanout(self): + self._test_async_function_multi(async_add_multi_fanout) + + @dist_init + def test_async_function_multi_fanout_async(self): + self._test_async_function_multi( + async_add_multi_fanout, + RPCExecMode.ASYNC + ) + + @dist_init + def test_async_function_multi_fanout_remote(self): + self._test_async_function_multi( + async_add_multi_fanout, + RPCExecMode.REMOTE + ) + + def _test_return_future(self, mode): + with self.assertRaisesRegex( + RuntimeError, + "Can not pickle torch.futures.Future" + ): + self._run_func_in_mode( + worker_name((self.rank + 1) % self.world_size), + return_future, + mode + ) + + @dist_init + def test_return_future(self): + self._test_return_future(RPCExecMode.SYNC) + + @dist_init + def test_return_future_async(self): + self._test_return_future(RPCExecMode.ASYNC) + + @dist_init + def test_return_future_remote(self): + self._test_return_future(RPCExecMode.REMOTE) + + @dist_init + def test_rref_timeout(self): + # This test is similar to ones in FaultyProcessGroupTest, but is meant to be + # run with other backends besides ProcessGroup. + if self.rank != 0: + return + + dst_rank = (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + # 10 ms timeout + rref = rpc.remote(dst_worker, my_sleep_func, args=(2, ), timeout=0.01) + # Future corresponding to the remote creation should time out. + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rref._get_future().wait() + # Call to ensure pending callbacks are run. + wait_until_pending_futures_and_users_flushed() + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rref.to_here() + + wait_until_owners_and_forks_on_rank(1, 1, rank=1) + + @dist_init(setup_rpc=False) + @skip_but_pass_in_sandcastle_if( + os.environ.get("RPC_INIT_WITH_TCP", None) == "1", + "init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614." + ) + def test_init_pg_then_rpc(self): + dist.init_process_group( + backend="gloo", + init_method=self.init_method, + rank=self.rank, + world_size=self.world_size, + ) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + # Test RPC. + next_rank = (self.rank + 1) % self.world_size + ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1)) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + # Test PG + dist.barrier() + + rpc.shutdown() + + @dist_init(setup_rpc=False) + @skip_but_pass_in_sandcastle_if( + os.environ.get("RPC_INIT_WITH_TCP", None) == "1", + "init_rpc_then_pg does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614." + ) + def test_init_rpc_then_pg(self): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + dist.init_process_group( + backend="gloo", + init_method=self.init_method, + rank=self.rank, + world_size=self.world_size, + ) + + # Test RPC. + next_rank = (self.rank + 1) % self.world_size + ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1)) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + # Test PG + dist.barrier() + + rpc.shutdown() + + @dist_init + def test_wait_all_with_exception(self): + futs = [] + dst = worker_name((self.rank + 1) % self.world_size) + for _ in range(10): + futs.append(rpc.rpc_async(dst, raise_func)) + + with self.assertRaisesRegex(ValueError, "Expected error"): + ret = torch.futures.wait_all(futs) + + @dist_init + def test_wait_all_with_partial_exception(self): + futs = [] + dst = worker_name((self.rank + 1) % self.world_size) + for _ in range(10): + futs.append(rpc.rpc_async(dst, torch.add, args=(torch.ones(2), 1))) + + futs.append(rpc.rpc_async(dst, raise_func)) + + with self.assertRaisesRegex(ValueError, "Expected error"): + ret = torch.futures.wait_all(futs) + + @dist_init(setup_rpc=False) + @skip_but_pass_in_sandcastle_if( + os.environ.get("RPC_INIT_WITH_TCP", None) == "1", + "Test does not work with TCP init, see https://github.com/pytorch/pytorch/issues/46491", + ) + def test_init_rpc_twice(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + rpc.shutdown() + + # Wait for all init to complete. + dist.barrier() + + # Use a different file name for the next initialization + new_backend_options = self.rpc_backend_options + new_backend_options.init_method += "init_2" + + # Ensure rpc initialization works again. + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=new_backend_options, + ) + + # Verify RPCs work after re-init. + dst = worker_name((self.rank + 1) % self.world_size) + rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1)) + rpc.rpc_sync(dst, foo_add, args=()) + + rpc.shutdown() + + def test_wrong_types(self): + with self.assertRaisesRegex( + TypeError, + "Argument backend must be a member of BackendType", + ): + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + backend="TENSORPIPE", + ) + + with self.assertRaisesRegex( + TypeError, + "Argument rpc_backend_options must be an instance of RpcBackendOptions", + ): + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + backend=self.rpc_backend, + rpc_backend_options={"init_method": self.init_method} + ) + + def test_cannot_infer_backend_from_options(self): + # An exception should be raised if the backend isn't specified but + # options are given which are not an instance of any of the known + # agents' option classes. + rpc_backend_options = FooBackendOptions(self.init_method) + + with self.assertRaisesRegex(TypeError, "Could not infer backend for options"): + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + # Do _not_ pass backend. + rpc_backend_options=rpc_backend_options, + ) + + @dist_init + def test_owner_rref_backward(self): + dst = worker_name((self.rank + 1) % self.world_size) + t1 = torch.rand(10, 10, requires_grad=True) + rref = rpc.RRef(t1.sum() + t1.sum()) + rref.backward() + expected_grad = torch.ones_like(t1) * 2 + self.assertEqual(expected_grad, t1.grad) + + with dist_autograd.context() as context_id: + t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1)) + rref = rpc.RRef(t2.sum()) + rref.backward(context_id) + self.assertEqual(expected_grad, dist_autograd.get_gradients(context_id)[t1]) + + # Double backward. + with dist_autograd.context() as context_id: + t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1)) + rref = rpc.RRef(t2.sum()) + rref.backward(context_id, retain_graph=True) + rref.backward(context_id) + self.assertEqual(expected_grad * 2, dist_autograd.get_gradients(context_id)[t1]) + + # Test errors. + with self.assertRaisesRegex(RuntimeError, "tensors does not require grad and does not have a grad_fn"): + rpc.RRef(torch.rand(10)).backward() + + with self.assertRaisesRegex(RuntimeError, "grad can be implicitly created only for scalar outputs"): + rpc.RRef(torch.rand(10, requires_grad=True)).backward() + + with self.assertRaisesRegex(RuntimeError, "Could not find autograd context with id: 100"): + rpc.RRef(torch.rand(10, requires_grad=True).sum()).backward(100) + + with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"): + rpc.RRef("foo").backward() + + @staticmethod + def _sum(x): + return x.sum() + + @staticmethod + def _identity(x): + return x + + @dist_init + def test_user_rref_backward(self): + dst = worker_name((self.rank + 1) % self.world_size) + t = torch.rand(10, requires_grad=True) + with dist_autograd.context() as context_id: + rref = rpc.remote(dst, RpcTest._sum, args=(t,)) + rref.backward(context_id, retain_graph=True) + rref.backward(context_id) + self.assertEqual(torch.ones_like(t) * 2, dist_autograd.get_gradients(context_id)[t]) + + with dist_autograd.context() as context_id: + rref = rpc.remote(dst, RpcTest._identity, args=("foo",)) + with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"): + rref.backward(context_id) + + with self.assertRaisesRegex(RuntimeError, "User RRefs require 'dist_autograd_ctx_id' to be specified"): + rref.backward() + + @dist_init(setup_rpc=False) + def test_shutdown_errors(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + if self.rank != 0: + og_func = rpc.api._broadcast_to_followers + og_rref_func = rpc.api._delete_all_user_and_unforked_owner_rrefs + + # Monkey-patch _broadcast_to_followers to fail, which would ensure + # _all_gather on leader raises an exception. + def raise_error(sequence_id, objects_map): + og_func(sequence_id, objects_map) + raise RuntimeError('simulation') + + # Monkey-patch _delete_all_user_and_unforked_owner_rrefs to fail, + # which would ensure barrier is not called on followers. + def rref_error(): + raise RuntimeError('simulation rref') + + try: + rpc.api._broadcast_to_followers = raise_error + rpc.api._delete_all_user_and_unforked_owner_rrefs = rref_error + with self.assertRaisesRegex(RuntimeError, 'simulation rref'): + rpc.shutdown() + finally: + rpc.api._broadcast_to_followers = og_func + rpc.api._delete_all_user_and_unforked_owner_rrefs = og_rref_func + else: + with self.assertRaisesRegex(RuntimeError, 'timed out in _all_gather'): + rpc.shutdown() + + dist.barrier() + + @dist_init + def test_my_parameter_server(self): + self._my_parameter_server(False) + + +class CudaRpcTest(RpcAgentTestFixture): + + @skip_if_lt_x_gpu(2) + @dist_init + def test_profiler_remote_cuda(self): + if self.rank != 1: + return + + dst_cuda_0 = (self.rank + 1) % self.world_size + dst_cuda_1 = (self.rank + 2) % self.world_size + dst_worker_cuda_0 = worker_name(dst_cuda_0) + dst_worker_cuda_1 = worker_name(dst_cuda_1) + + with _profile(use_cuda=True) as p: + fut1 = rpc.rpc_async(dst_worker_cuda_0, udf_with_torch_ops, args=(0, )) + fut2 = rpc.rpc_async(dst_worker_cuda_1, udf_with_torch_ops, args=(1, )) + fut1.wait() + fut2.wait() + + def get_name(event): + return event.name[event.name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR):] + + function_events = p.function_events + for event in function_events: + if event.is_async: + self.assertEqual(0, event.device_time_total) + self.assertEqual([], event.kernels) + self.assertEqual(0, event.device_time) + else: + if event.node_id == 1: + continue + self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1]) + if get_name(event) in EXPECTED_REMOTE_EVENTS: + self.assertGreater(event.device_time_total, 0) + self.assertEqual(1, len(event.kernels)) + kernel = event.kernels[0] + if event.node_id == dst_cuda_0: + self.assertEqual(kernel.device, 0) + if event.node_id == dst_cuda_1: + self.assertEqual(kernel.device, 1) + self.assertGreater(event.device_time, 0) + + # Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled + # events. + remote_events = [event for event in function_events if event.is_remote] + remote_event_names = [get_name(event) for event in remote_events if get_name(event) in EXPECTED_REMOTE_EVENTS] + self.assertEqual(set(remote_event_names), set(EXPECTED_REMOTE_EVENTS)) + + +class TensorPipeAgentRpcTest(RpcAgentTestFixture, RpcTestCommon): + + def test_mismatched_type_for_options(self): + # An exception should be raised if the options are not an instance of + # TensorPipeRpcBackendOptions. + rpc_backend_options = FooBackendOptions(self.init_method) + + with self.assertRaisesRegex( + TypeError, "`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`" + ): + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + backend=rpc.BackendType.TENSORPIPE, + rpc_backend_options=rpc_backend_options, + ) + + def test_infer_backend_from_options(self): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions( + init_method=self.init_method, + _transports=tp_transports() + ) + + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + # Do _not_ pass backend. + rpc_backend_options=rpc_backend_options, + ) + + self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent) + + # FIXME Merge this test with the corresponding one in RpcTest. + @dist_init(setup_rpc=False) + def test_set_and_get_num_worker_threads(self): + NUM_THREADS = 27 + rpc_backend_options = rpc.TensorPipeRpcBackendOptions( + init_method=self.rpc_backend_options.init_method, + num_worker_threads=NUM_THREADS, + _transports=tp_transports(), + ) + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + + info = rpc.api._get_current_rpc_agent().get_debug_info() + self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS) + rpc.shutdown() + + # FIXME Merge this test with the corresponding one in RpcTest. + @dist_init(setup_rpc=False) + def test_tensorpipe_set_default_timeout(self): + # Set a high timeout since it doesn't affect test runtime and ensures + # the test doesn't erroneously timeout due to slow machines. + timeout = 100 + rpc_backend_options = rpc.TensorPipeRpcBackendOptions( + init_method=self.rpc_backend_options.init_method, + num_worker_threads=self.rpc_backend_options.num_worker_threads, + rpc_timeout=timeout, + _transports=tp_transports(), + ) + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + + default_timeout = rpc.get_rpc_timeout() + self.assertEqual(default_timeout, timeout) + rpc.shutdown() + + # FIXME Merge this test with the corresponding one in RpcTest. + @dist_init(setup_rpc=False) + def test_tensorpipe_options_throw_on_timedelta_timeout(self): + from datetime import timedelta + + timeout = timedelta() + # Ensure that constructing TensorPipeRpcBackendOptions with timedelta fails + with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions( + init_method=self.rpc_backend_options.init_method, + num_worker_threads=self.rpc_backend_options.num_worker_threads, + rpc_timeout=timeout, + ) + + @dist_init + def _test_rref_get_type_timeout(self, blocking): + # Test where we try to get the type of a RRef from an owner, but RRef + # creation is slower than timeout passed into _get_type. + dst_rank = (self.rank + 1) % self.world_size + dst = worker_name(dst_rank) + slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True)) + timeout = 0.5 + expected_err = self.get_timeout_error_regex() + # Blocking: blocks on inline call + if blocking: + with self.assertRaisesRegex(RuntimeError, expected_err): + slow_rref._get_type(timeout=timeout, blocking=blocking) + # Non-blocking: blocks on wait + else: + fut = slow_rref._get_type(timeout=timeout, blocking=blocking) + with self.assertRaisesRegex(RuntimeError, expected_err): + fut.wait() + + # FIXME We wait until the remote completed creating the OwnerRRef + # because there's currently a race if we shut down RPC before that. + slow_rref.to_here() + + def test_rref_get_type_timeout_blocking(self): + self._test_rref_get_type_timeout(blocking=True) + + def test_rref_get_type_timeout_non_blocking(self): + self._test_rref_get_type_timeout(blocking=False) + + @dist_init + def test_op_with_invalid_args(self): + dst = worker_name((self.rank + 1) % self.world_size) + with self.assertRaisesRegex( + RuntimeError, "Overloaded torch operator invoked from Python failed to match any schema" + ): + rpc.rpc_sync(dst, torch.add, args=()) + + def _test_rref_proxy_timeout(self, rref_proxy_api): + dst_rank = (self.rank + 1) % self.world_size + dst = worker_name(dst_rank) + rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), )) + # Ensure RRef is created on remote node. + rref.to_here() + rref_api = getattr(rref, rref_proxy_api) + self.assertTrue(rref_api is not None, f"Failed to get RRef proxy api: {rref_proxy_api}") + expected_error = self.get_timeout_error_regex() + timeout = 2 + with self.assertRaisesRegex(RuntimeError, expected_error): + result = rref_api(timeout=timeout).my_slow_method(torch.ones(2, 2)) + if rref_api == rref.rpc_async: + result.wait() + elif rref_api == rref.remote: + result._get_future().wait() + + # Case where rpc.remote() is stuck and exceeds timeout + slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True)) + timeout = 0.01 + rref_api = getattr(slow_rref, rref_proxy_api) + # Note that even when we call rref.rpc_async() in this case, we + # time out in future creation, not waiting for future. This is because + # rref proxy function calls rref._get_type before returning future, + # which blocks on the RRef being created on owner node, until the + # specified timeout. + with self.assertRaisesRegex(RuntimeError, expected_error): + result = rref_api(timeout=timeout).my_instance_method(torch.ones(2, 2)) + # rpc_async returns immediately and surface a timeout through wait() + if rref_api == slow_rref.rpc_async: + result.wait() + + # FIXME We wait until the remote completed creating the OwnerRRef + # because there's currently a race if we shut down RPC before that. + slow_rref.to_here() + + @dist_init + def test_rref_proxy_timeout(self): + for rpc_api in ["rpc_sync", "rpc_async", "remote"]: + self._test_rref_proxy_timeout(rpc_api) + + @dist_init + def test_send_to_rank_sparse(self): + dst_rank = (self.rank + 1) % self.world_size + + # Test sparse tensor + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + x = build_sparse_tensor() + y = build_sparse_tensor() + expected_tensor = (x + y) + ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y)) + self.assertEqual(expected_tensor, ret) + + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + x = build_sparse_tensor(coalesce=True) + y = build_sparse_tensor(coalesce=True) + expected_tensor = (x + y) + ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y)) + self.assertEqual(expected_tensor, ret) + + @dist_init + def test_self_py_udf_remote_sparse(self): + self._self_py_udf_remote( + rpc.get_worker_info(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_self_remote_rref_as_rpc_arg_sparse(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._self_remote_rref_as_rpc_arg( + dst, + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_self_remote_rref_as_self_rpc_arg_sparse(self): + self._self_remote_rref_as_rpc_arg( + rpc.get_worker_info(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_self_remote_rref_as_remote_arg_sparse(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._self_remote_rref_as_remote_arg( + dst, + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_self_remote_rref_as_self_remote_arg_sparse(self): + self._self_remote_rref_as_remote_arg( + rpc.get_worker_info(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + def test_world_size_one_sparse(self): + self._world_size_one( + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_multi_rpc_sparse(self): + self._multi_rpc(True) + + def test_wait_all_workers_sparse(self): + self._wait_all_workers(heavy_rpc_sparse, build_sparse_tensor()) + + def test_wait_all_workers_twice_sparse(self): + self._wait_all_workers_twice(heavy_rpc_sparse, build_sparse_tensor()) + + @dist_init + def test_py_sparse_tensors_in_container(self): + n = self.rank + 1 + dst_rank = n % self.world_size + a = [build_sparse_tensor(), build_sparse_tensor()] + ret = rpc.rpc_sync( + worker_name(dst_rank), my_container_sum, args=(a,) + ) + self.assertEqual(ret, my_container_sum(a)) + + @dist_init + def test_nested_rpc_sparse(self): + self._nested_rpc(nested_rpc_sparse, build_sparse_tensor() * 2) + + @dist_init + def test_stress_heavy_rpc_sparse(self): + self._stress_test_rpc(heavy_rpc_sparse, repeat=20, args=(build_sparse_tensor(),)) + + @dist_init + def test_builtin_remote_ret_sparse(self): + self._builtin_remote_ret( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 2 + ) + + @dist_init + def test_builtin_remote_self_sparse(self): + self._builtin_remote_self( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 2 + ) + + @dist_init + def test_multi_builtin_remote_ret_sparse(self): + self._test_multi_remote_call( + torch.add, True, + args_fn=RpcTest._multi_args_fn + ) + + @dist_init + def test_multi_py_udf_remote_sparse(self): + self._test_multi_remote_call( + my_function, + True, + kwargs_fn=RpcTest._multi_kwargs_fn + ) + + @dist_init + def test_py_rref_args_sparse(self): + self._py_rref_args( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 4 + ) + + @dist_init + def test_py_rref_args_user_share_sparse(self): + self._py_rref_args_user_share( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 6 + ) + + @dist_init + def test_py_rpc_rref_args_sparse(self): + self._py_rpc_rref_args( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 6 + ) + + @dist_init + def test_nested_remote_sparse(self): + self._nested_remote( + nested_remote_sparse, + build_sparse_tensor() + build_sparse_tensor() + ) + + @dist_init + def test_nested_rref_sparse(self): + self._nested_rref( + nested_rref_sparse, + build_sparse_tensor() * 2, + build_sparse_tensor() * 2 + ) + + @dist_init + def test_nested_rref_stress_sparse(self): + self._nested_rref_stress( + nested_rref_sparse, + build_sparse_tensor() * 2, + build_sparse_tensor() * 2 + ) + + @dist_init + def test_my_parameter_server_sparse(self): + self._my_parameter_server(True) + + # Test init_rpc without world_size argument + @dist_init(setup_rpc=False) + def test_dynamic_rpc_init_rpc(self): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + rpc.shutdown() + + # Dynamic RPC new ranks communicate with existing ranks + @dist_init(setup_rpc=False) + def test_dynamic_rpc_new_rank_can_communicated_with_existing_rank(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + if self.rank == 0: + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + + # Rank 0 will be initialized with RPC after this barrier + dist.barrier() + + if self.rank != 0: + # Newly joined ranks will be able to communicate with rank 0, since that was created first + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + result = rpc.rpc_sync(worker_name(0), torch.add, args=(torch.tensor(1), torch.tensor(1))) + self.assertEqual(torch.add(torch.tensor(1), torch.tensor(1)), result) + + # Barrier to ensure that all rpc_sync calls are finished + dist.barrier() + rpc.shutdown() + + # Dynamic RPC existing ranks can communicate with new ranks + @dist_init(setup_rpc=False) + def test_dynamic_rpc_existing_rank_can_communicate_with_new_rank(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + if self.rank == 0: + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + + # Rank 0 will be initialized with RPC after this barrier + dist.barrier() + + # Rest of ranks join after barrier + if self.rank != 0: + # Newly joined ranks will be able to communicate with rank 0, since that was created first + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + + dist.barrier() + if self.rank == 0: + for i in range(1, self.world_size): + result = rpc.rpc_sync(worker_name(i), torch.add, args=(torch.tensor(1), torch.tensor(1))) + self.assertEqual(torch.add(torch.tensor(1), torch.tensor(1)), result) + + # Barrier to ensure that all rpc_sync calls are finished + dist.barrier() + rpc.shutdown() + + # Dynamic RPC existing ranks can communicate with new ranks using CUDA rpc + @skip_if_lt_x_gpu(2) + @dist_init(setup_rpc=False) + def test_dynamic_rpc_existing_rank_can_communicate_with_new_rank_cuda(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + if self.rank == 0: + options = self.rpc_backend_options + for i in range(1, self.world_size): + dst = worker_name(i) + options.set_device_map(dst, {1: 0}) + options.set_device_map(dst, {0: 1}) + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=options, + ) + + # Rank 0 will be initialized with RPC after this barrier + dist.barrier() + + # Rest of ranks join after barrier + if self.rank != 0: + # Newly joined ranks will be able to communicate with rank 0, since that was created first + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + + # TODO: Cuda RPC is failing due to: + # terminate called after throwing an instance of 'c10::Error' + # what(): 0 <= device && static_cast(device) < device_allocator.size() + # INTERNAL ASSERT FAILED at "../c10/cuda/CUDACachingAllocator.cpp":1937, + # please report a bug to PyTorch. Allocator not initialized for device 1: did you call init? + # dist.barrier() + # if self.rank == 0: + # for i in range(1, self.world_size): + # x = torch.ones(2) + # result_on_device_0 = rpc.rpc_sync(worker_name(i), torch.add, args=(x.to(0), 1)) + # result_on_device_1 = rpc.rpc_sync(worker_name(i), torch.add, args=(x.to(1), 1)) + # self.assertEqual(torch.add(torch.ones(2), 1), result_on_device_0) + # self.assertEqual(torch.device('cuda:0'), result_on_device_0.device) + # self.assertEqual(torch.add(torch.ones(2), 1), result_on_device_1) + # self.assertEqual(torch.device('cuda:1'), result_on_device_1.device) + + # Barrier to ensure that all rpc_sync calls are finished + dist.barrier() + rpc.shutdown() + + @dist_init(setup_rpc=False) + def test_dynamic_rpc_init_rpc_without_rank(self): + # default initialization uses file init + with self.assertRaisesRegex(ValueError, "rank parameter missing"): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rpc_backend_options=self.rpc_backend_options, + ) + + # env init + with self.assertRaisesRegex(ValueError, "environment variable RANK expected"): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions(init_method="env://") + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rpc_backend_options=rpc_backend_options, + ) + + # tcp init + with self.assertRaisesRegex(ValueError, "rank parameter missing"): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions(init_method="tcp://127.0.0.1:23456") + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rpc_backend_options=rpc_backend_options, + ) + + @dist_init(setup_rpc=False) + def test_dynamic_and_static_init_rpc_together(self): + # Initialize a static rpc group with size = self.world_size - 1 + dist.init_process_group( + backend='gloo', + init_method=self.file_init_method, + rank=self.rank, + world_size=self.world_size) + + world_size_minus_one = self.world_size - 1 + if self.rank < world_size_minus_one: + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=world_size_minus_one, + rpc_backend_options=self.rpc_backend_options, + ) + + dist.barrier() + + # Attempt to add an additional dynamic group member + if self.rank == world_size_minus_one: + # Expect error message to be thrown + with self.assertRaisesRegex(RuntimeError, "RPC group mixes statically and dynamically\ + initialized members which is not supported."): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + +class TensorPipeAgentCudaRpcTest(RpcAgentTestFixture, RpcTestCommon): + + def _test_device_maps(self, options, errMsg): + with self.assertRaisesRegex(ValueError, errMsg): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + self.assertFalse(rpc.api._is_current_rpc_agent_set()) + + @skip_if_lt_x_gpu(2) + def test_device_maps_wrong_worker_name(self): + options = self.rpc_backend_options + options.set_device_map("none_exist", {0: 1}) + + self._test_device_maps( + options, + errMsg="Node worker0 has invalid target node names in its device maps" + ) + + @skip_if_lt_x_gpu(1) + def test_device_maps_invalid_max_local_device(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {torch.cuda.device_count(): 0}) + + self._test_device_maps( + options, + errMsg="Node worker0 has source devices with invalid indices in its device map for worker1" + ) + + @skip_if_lt_x_gpu(1) + def test_device_maps_invalid_max_remote_device(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {0: torch.cuda.device_count()}) + + self._test_device_maps( + options, + errMsg="Node worker0 has target devices with invalid indices in its device map for worker1" + ) + + @skip_if_lt_x_gpu(2) + def test_device_maps_many_to_one(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {1: 0}) + options.set_device_map(dst, {0: 0}) + + self._test_device_maps( + options, + errMsg="Node worker0 has duplicated target devices in its device map for worker1" + ) + + @skip_if_lt_x_gpu(2) + def test_device_maps_one_to_many(self): + if self.rank == 0: + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {0: 1}) + with self.assertRaisesRegex( + ValueError, "`set_device_map` only supports 1-to-1 mapping" + ): + options.set_device_map(dst, {0: 0}) + + @skip_if_lt_x_gpu(1) + def test_device_maps_invalid_min_device(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + with self.assertRaisesRegex( + RuntimeError, "Device index must not be negative" + ): + options.set_device_map(dst, {-1: 0}) + + with self.assertRaisesRegex( + RuntimeError, "Device index must not be negative" + ): + options.set_device_map(dst, {0: -1}) + + @staticmethod + def _gpu_add(x, y): + if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 1]): + return (x + y).to(0) + else: + raise ValueError("Wrong device affinity") + + @skip_if_lt_x_gpu(2) + def test_device_maps_gpu(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {0: 1, 1: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + ret = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add, + args=(torch.zeros(2).to(0), torch.ones(2).to(0)) + ) + self.assertEqual(ret.device, torch.device(1)) + self.assertEqual(ret, (torch.zeros(2) + torch.ones(2)).to(1)) + rpc.shutdown() + + @staticmethod + def _gpu_add_given_devices(x, y, x_to, y_to, z_to): + x_device = "cpu" if x.device.type == "cpu" else x.device.index + y_device = "cpu" if y.device.type == "cpu" else y.device.index + if x_device == x_to and y_device == y_to: + return x.to(z_to) + y.to(z_to) + else: + raise ValueError("Wrong device affinity") + + def _test_device_maps_gpu(self, x_from, y_from, z_to, device_map, dst=None, fn=None): + fn = TensorPipeAgentCudaRpcTest._gpu_add_given_devices if fn is None else fn + x_to = device_map[x_from] + y_to = device_map[y_from] + + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) if dst is None else dst + options.set_device_map(dst, device_map) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + x = torch.zeros(2).to(x_from) + y = torch.ones(2).to(y_from) + + ret = rpc.rpc_sync(dst, fn, args=(x, y, x_to, y_to, z_to)) + + reverse_device_map = {device_map[k] : k for k in device_map} + z_from = reverse_device_map[z_to] + + ret_device = "cpu" if ret.device.type == "cpu" else ret.device.index + self.assertEqual(ret_device, z_from) + self.assertEqual(ret, torch.ones(2).to(z_from)) + + rpc.shutdown() + + def test_device_map_cpu(self): + self._test_device_maps_gpu( + x_from="cpu", + y_from="cpu", + z_to="cpu", + device_map={"cpu" : "cpu"}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(1) + def test_device_map_cpu_to_gpu_default(self): + self._test_device_maps_gpu( + x_from="cpu", + y_from="cpu", + z_to=0, + device_map={"cpu" : 0}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_cpu_to_gpu_non_default(self): + self._test_device_maps_gpu( + x_from="cpu", + y_from="cpu", + z_to=1, + device_map={"cpu" : 1}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(1) + def test_device_map_gpu_to_cpu_default(self): + self._test_device_maps_gpu( + x_from=0, + y_from=0, + z_to="cpu", + device_map={0 : "cpu"}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_to_cpu_non_default(self): + self._test_device_maps_gpu( + x_from=1, + y_from=1, + z_to="cpu", + device_map={1 : "cpu"}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_default(self): + self._test_device_maps_gpu( + x_from=0, + y_from=0, + z_to=0, + device_map={0 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_non_default(self): + self._test_device_maps_gpu( + x_from=1, + y_from=1, + z_to=1, + device_map={1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_default_to_non_default(self): + self._test_device_maps_gpu( + x_from=0, + y_from=0, + z_to=1, + device_map={0 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_non_default_to_default(self): + self._test_device_maps_gpu( + x_from=1, + y_from=1, + z_to=0, + device_map={1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_1(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=0, + device_map={0 : 0, 1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_2(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=1, + device_map={0 : 0, 1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_3(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=0, + device_map={0 : 0, 1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_4(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=1, + device_map={0 : 0, 1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_5(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=0, + device_map={0 : 1, 1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_6(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=1, + device_map={0 : 1, 1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_7(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=0, + device_map={0 : 1, 1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_8(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=1, + device_map={0 : 1, 1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_1(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=0, + device_map={0 : 0, 1 : 1}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_2(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=1, + device_map={0 : 0, 1 : 1}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_3(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=0, + device_map={0 : 0, 1 : 1}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_4(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=1, + device_map={0 : 0, 1 : 1}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_5(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=0, + device_map={0 : 1, 1 : 0}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_6(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=1, + device_map={0 : 1, 1 : 0}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_7(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=0, + device_map={0 : 1, 1 : 0}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_8(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=1, + device_map={0 : 1, 1 : 0}, + dst=worker_name(self.rank) + ) + + @staticmethod + def _gpu_add_multi_gpu(x, y): + if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 0]): + return x.to(0) + y, x - y.to(1) + else: + raise ValueError("Wrong device affinity") + + def _test_device_maps_multi_gpu(self, dst): + options = self.rpc_backend_options + options.set_device_map(dst, {0: 1}) + options.set_device_map(dst, {1: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + x = torch.zeros(2).to(0) + y = torch.ones(2).to(1) + rets = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu, + args=(x, y) + ) + + self.assertEqual(rets[0].device, torch.device(1)) + self.assertEqual(rets[1].device, torch.device(0)) + self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1)) + self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0)) + rpc.shutdown() + + @skip_if_lt_x_gpu(2) + def test_device_maps_multi_gpu(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._test_device_maps_multi_gpu(dst) + + @skip_if_lt_x_gpu(2) + def test_device_maps_multi_gpu_self(self): + dst = worker_name(self.rank) + self._test_device_maps_multi_gpu(dst) + + @staticmethod + def _gpu_add_return_to_gpu(x, y): + if x.device.type == 'cpu' and y.device.type == 'cpu': + return (x + y).to(0), (x - y).to(1), (x * y).to(2), (x / y).to(3) + else: + raise ValueError("Wrong device affinity") + + @skip_if_lt_x_gpu(2) + def test_device_maps_in_options(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc.TensorPipeRpcBackendOptions( + init_method=options.init_method, + num_worker_threads=options.num_worker_threads, + device_maps={dst: {0: 1, 1: 0}}, + _transports=tp_transports() + ) + ) + + rets = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu, + args=(torch.zeros(2).to(0), torch.ones(2).to(1)) + ) + self.assertEqual(rets[0].device, torch.device(1)) + self.assertEqual(rets[1].device, torch.device(0)) + self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1)) + self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0)) + rpc.shutdown() + + def _test_device_maps_return_to_gpu(self, dst): + options = self.rpc_backend_options + + options.set_device_map(dst, {0: 1}) + options.set_device_map(dst, {1: 2}) + options.set_device_map(dst, {2: 3}) + options.set_device_map(dst, {3: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rets = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add_return_to_gpu, + args=(torch.zeros(2), torch.ones(2)) + ) + for i in range(len(rets)): + self.assertEqual(rets[i].device, torch.device((3 + i) % 4)) + self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(3)) + self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0)) + self.assertEqual(rets[2], (torch.zeros(2) * torch.ones(2)).to(1)) + self.assertEqual(rets[3], (torch.zeros(2) / torch.ones(2)).to(2)) + rpc.shutdown() + + @skip_if_lt_x_gpu(4) + def test_device_maps_return_to_gpu(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._test_device_maps_return_to_gpu(dst) + + @skip_if_lt_x_gpu(4) + def test_device_maps_return_to_gpu_self(self): + dst = worker_name(self.rank) + self._test_device_maps_return_to_gpu(dst) + + @staticmethod + def _add_to_gpu(x, y): + return (x + y).to(0) + + def _test_device_maps_missing_config(self, mode): + dst = worker_name((self.rank + 1) % self.world_size) + errMsg = ( + "TensorPipe RPC backend only supports CPU tensors by default.*" + "`set_device_map` on `TensorPipeRpcBackendOptions`" + ) + + with self.assertRaisesRegex(RuntimeError, errMsg): + if mode == RPCExecMode.SYNC: + rpc.rpc_sync(dst, torch.add, args=(torch.zeros(2).to(0), 1)) + elif mode == RPCExecMode.REMOTE: + rpc.remote(dst, torch.add, args=(torch.zeros(2).to(0), 1)).to_here() + else: + raise ValueError(f"unexpected mode {mode}") + + # make sure RPC is still functioning + ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1)) + self.assertEqual(ret, torch.ones(2) + 1) + + def _test_device_maps_missing_config_response(self, mode): + dst = worker_name((self.rank + 1) % self.world_size) + errMsg = "Response device mapping is not available" + + with self.assertRaisesRegex(RuntimeError, errMsg): + if mode == RPCExecMode.SYNC: + rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._add_to_gpu, + args=(torch.zeros(2), 1) + ) + elif mode == RPCExecMode.REMOTE: + rpc.remote( + dst, + TensorPipeAgentCudaRpcTest._add_to_gpu, + args=(torch.zeros(2), 1) + ).to_here() + else: + raise ValueError(f"unexpected mode {mode}") + + # make sure RPC is still functioning + ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1)) + self.assertEqual(ret, torch.ones(2) + 1) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config(self): + self._test_device_maps_missing_config(RPCExecMode.SYNC) + + @skip_if_lt_x_gpu(1) + def test_device_maps_missing_config_not_timeout(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options + ) + + timeout = rpc.get_rpc_timeout() + + tik = time.time() + self._test_device_maps_missing_config(RPCExecMode.SYNC) + rpc.shutdown() + tok = time.time() + + self.assertTrue(tok - tik < timeout) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_loop(self): + for _ in range(self.rpc_backend_options.num_worker_threads + 5): + self._test_device_maps_missing_config(RPCExecMode.SYNC) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_response(self): + self._test_device_maps_missing_config_response(RPCExecMode.SYNC) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_response_loop(self): + for _ in range(self.rpc_backend_options.num_worker_threads + 5): + self._test_device_maps_missing_config_response(RPCExecMode.SYNC) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_remote(self): + self._test_device_maps_missing_config(RPCExecMode.REMOTE) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_remote_response(self): + self._test_device_maps_missing_config_response(RPCExecMode.REMOTE) + + @skip_if_lt_x_gpu(2) + def test_device_maps_remote(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {1: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rref = rpc.remote( + dst, + TensorPipeAgentCudaRpcTest._add_to_gpu, + args=(torch.zeros(2), 1) + ) + + self.assertEqual(rref.to_here().device.index, 1) + self.assertEqual(rref.to_here(), torch.ones(2).to(1)) + + rpc.shutdown() + + @staticmethod + def _slow_add_on_user_stream(x, y): + s0 = torch.cuda.current_stream(x.device) + s1 = torch.cuda.Stream(device=x.device) + s1.wait_stream(s0) + x.record_stream(s1) + y.record_stream(s1) + with torch.cuda.stream(s1): + torch.cuda._sleep(10 * FIFTY_MIL_CYCLES) + z = x + y + s0.wait_stream(s1) + z.record_stream(s0) + return z + + def _test_custom_stream(self, fn, device_map): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, device_map) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + fn(dst) + + rpc.shutdown() + + def _test_stream_sync(self, dst): + x = torch.ones(2, 2).to(0) + ret = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._slow_add_on_user_stream, + args=(x, x) + ) + self.assertEqual(ret, 2 * x) + + @skip_if_lt_x_gpu(2) + def test_custom_stream(self): + self._test_custom_stream(self._test_stream_sync, {"cuda:0": "cuda:1"}) + + def _test_stream_multi_async(self, dst): + futs = [] + for i in range(20): + x = torch.ones(2, 2).to(0) * i + futs.append( + rpc.rpc_async( + dst, + TensorPipeAgentCudaRpcTest._slow_add_on_user_stream, + args=(x, x) + ) + ) + + for i in range(20): + self.assertEqual(futs[i].wait(), 2 * torch.ones(2, 2).to(0) * i) + + @skip_if_lt_x_gpu(2) + def test_custom_stream_multi(self): + self._test_custom_stream( + self._test_stream_multi_async, + {"cuda:0": "cuda:1"} + ) + + @staticmethod + def _nested_slow_add_on_user_stream(dst, x, y, z): + ret = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._slow_add_on_user_stream, + args=(x, y) + ) + + return TensorPipeAgentCudaRpcTest._slow_add_on_user_stream(ret, z) + + def _test_stream_nested_sync(self, dst): + x = torch.ones(2, 2).to(0) + y = torch.ones(2, 2).to(0) * 2 + z = torch.ones(2, 2).to(0) * 3 + nested_dst = worker_name((self.rank + 2) % self.world_size) + ret = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream, + args=(nested_dst, x, y, z) + ) + self.assertEqual(ret, 6 * x) + + @skip_if_lt_x_gpu(2) + def test_custom_stream_nested(self): + self._test_custom_stream( + self._test_stream_nested_sync, + {"cuda:0": "cuda:1", "cuda:1": "cuda:0"} + ) + + def _test_stream_nested_multi_async(self, dst): + if self.rank == 0: + futs = [] + n = 5 + xs, ys, zs = [], [], [] + for i in range(n): + x = torch.ones(2, 2).to(0) * (i - 1) + y = torch.ones(2, 2).to(0) * i + z = torch.ones(2, 2).to(0) * (i + 1) + xs.append(x) + ys.append(y) + zs.append(z) + nested_dst = worker_name((self.rank + 2) % self.world_size) + futs.append( + rpc.rpc_async( + dst, + TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream, + args=(nested_dst, x, y, z) + ) + ) + + for i in range(n): + self.assertEqual(futs[i].wait(), xs[i] + ys[i] + zs[i]) + + @skip_if_lt_x_gpu(2) + def test_custom_stream_nested_multi(self): + self._test_custom_stream( + self._test_stream_nested_multi_async, + {"cuda:0": "cuda:1", "cuda:1": "cuda:0"} + ) + + @staticmethod + def _gpu_add_wrong_gpus(x, y): + if x.is_cuda and y.is_cuda: + return x.cpu() + y.cuda() + else: + raise ValueError("Wrong device affinity") + + @skip_if_lt_x_gpu(1) + def test_device_mismatch(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {0: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + x = torch.zeros(2).to(0) + y = torch.ones(2).to(0) + + with self.assertRaisesRegex( + RuntimeError, + "Expected all tensors to be on the same device, but found at least two devices" + ): + rets = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add_wrong_gpus, + args=(x, y) + ) + + rpc.shutdown() + + def _test_rref_synchronization(self, local_device, remote_device): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {local_device : remote_device}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + if self.rank == 1: + # This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here() + # If to_here() is properly synchronized with forward(x) the results must be identical + # This test needs multiple iterations and significant batch size to simulate real + # training of a CNN of MNIST-like data. + # see https://github.com/pytorch/pytorch/issues/54771 + rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,)) + for _ in range(10): + x = torch.randn(200, 1, 28, 28).to(local_device) + actual = rref.remote().forward(x).to_here() + expected = rref.rpc_sync().forward(x) + self.assertEqual(actual, expected) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_rref_to_here_synchronization1(self): + self._test_rref_synchronization("cuda:0", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_to_here_synchronization2(self): + self._test_rref_synchronization("cuda:1", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_to_here_synchronization3(self): + self._test_rref_synchronization("cuda:1", "cuda:1") + + @skip_if_lt_x_gpu(2) + def test_rref_to_here_synchronization4(self): + self._test_rref_synchronization("cuda:0", "cuda:1") + + def _test_rref_as_arg_synchronization( + self, + local_device, + remote_device, + devicesOptions=None + ): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {local_device: remote_device}) + + input_src = worker_name((self.rank - 1 + self.world_size) % self.world_size) + options.set_device_map(input_src, {remote_device: local_device}) + + if devicesOptions is not None: + options.set_devices(devicesOptions[self.rank]) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + if self.rank == 1: + # This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here() + # If to_here() is properly synchronized with forward(x) the results must be identical + # This test needs multiple iterations and significant batch size to simulate real + # training of a CNN of MNIST-like data. + # see https://github.com/pytorch/pytorch/issues/54771 + rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,)) + for _ in range(10): + rref_x = RRef(torch.randn(200, 1, 28, 28).to(local_device)) + actual = rref.remote().forward(rref_x, True).to_here() + expected = rref.rpc_sync().forward(rref_x, True) + self.assertEqual(actual, expected) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_rref_as_arg_synchronization1(self): + self._test_rref_as_arg_synchronization("cuda:0", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_as_arg_synchronization2(self): + self._test_rref_as_arg_synchronization("cuda:1", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_as_arg_synchronization3(self): + self._test_rref_as_arg_synchronization("cuda:1", "cuda:1") + + @skip_if_lt_x_gpu(2) + def test_rref_as_arg_synchronization4(self): + self._test_rref_as_arg_synchronization("cuda:0", "cuda:1") + + @skip_if_lt_x_gpu(1) + def test_rref_as_arg_synchronization5(self): + self._test_rref_as_arg_synchronization( + "cuda:0", + "cuda:0", + [["cuda:0"] for _ in range(4)], # devicesOptions + ) + + @staticmethod + def _rref_relay(rref): + return rref.to_here() + + def _test_rref_forward_synchronization(self, local_device, remote_device): + options = self.rpc_backend_options + + input_src = worker_name(0) + model_dst = worker_name(1) + out_relay = worker_name(2) + + if self.rank == 0: + # for 1) model construction 2) forward execution + options.set_device_map(model_dst, {local_device: remote_device}) + + # Forward output will be first copied to the relay node before + # returning to the worker. This is intentional, to test RRef + # forward CUDA stream synchronizations. + options.set_device_map(out_relay, {local_device: local_device}) + elif self.rank == 1: + # worker1 hosts the model and runs forward. The forward functions + # calls RRef.to_here(), hence needs to configure the device map + options.set_device_map(input_src, {remote_device: local_device}) + elif self.rank == 2: + # worker2 will get the out RRef and call to_here() and hence, needs + # to configure device map. + options.set_device_map(model_dst, {local_device: remote_device}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + if self.rank == 0: + # This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here() + # If to_here() is properly synchronized with forward(x) the results must be identical + # This test needs multiple iterations and significant batch size to simulate real + # training of a CNN of MNIST-like data. + # see https://github.com/pytorch/pytorch/issues/54771 + rref = rpc.remote(model_dst, MyConvNetForMNIST, args=(remote_device,)) + for _ in range(10): + rref_input = RRef(torch.randn(200, 1, 28, 28).to(local_device)) + rref_out = rref.remote().forward(rref_input, True) + out = rpc.remote( + out_relay, + TensorPipeAgentCudaRpcTest._rref_relay, + args=(rref_out,) + ).to_here() + expected = rref.rpc_sync().forward(rref_input, True) + self.assertEqual(out, expected) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_rref_forward_synchronization1(self): + self._test_rref_forward_synchronization("cuda:0", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_forward_synchronization2(self): + self._test_rref_forward_synchronization("cuda:0", "cuda:1") + + @skip_if_lt_x_gpu(2) + def test_rref_forward_synchronization3(self): + self._test_rref_forward_synchronization("cuda:1", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_forward_synchronization4(self): + self._test_rref_forward_synchronization("cuda:1", "cuda:1") + + def _test_owner_rref_forward_synchronization(self, local_device, remote_device): + if self.rank == 0: + options = self.rpc_backend_options + options.set_device_map("w0", {local_device: remote_device}) + rpc.init_rpc( + "w0", + rank=0, + world_size=1, + rpc_backend_options=options + ) + + model = rpc.remote( + "w0", torch.nn.Linear, (2048, 20000) + ).remote().to(remote_device) + for _ in range(30): + data = torch.rand(2048, 2048).to(local_device) + output = model.rpc_sync().forward(data) + # to_here() internally calls localValue as the caller is + # the owner of the RRef. + v0 = rpc.RRef(output).remote().sum().to_here().item() + v1 = output.sum().item() + self.assertEqual(v0, v1) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_owner_rref_forward_synchronization1(self): + self._test_owner_rref_forward_synchronization("cuda:0", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_owner_rref_forward_synchronization2(self): + self._test_owner_rref_forward_synchronization("cuda:0", "cuda:1") + + @skip_if_lt_x_gpu(2) + def test_owner_rref_forward_synchronization3(self): + self._test_owner_rref_forward_synchronization("cuda:1", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_owner_rref_forward_synchronization4(self): + self._test_owner_rref_forward_synchronization("cuda:1", "cuda:1") + + @staticmethod + def _return_tensor_view(i): + x = torch.ones(1000, 200).cuda(0) * i + torch.cuda._sleep(10 * FIFTY_MIL_CYCLES) + # serialization of the return value will create a new tensor from the + # view, which is done outside of the user function. + return x.split(100)[0] + + @skip_if_lt_x_gpu(1) + def test_tensor_view_as_return_value(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {0 : 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + futs = [] + for i in range(5): + futs.append(rpc.rpc_async( + dst, + TensorPipeAgentCudaRpcTest._return_tensor_view, + args=(i,) + )) + + for i in range(5): + self.assertEqual(torch.ones(100, 200) * i, futs[i].wait()) + + rpc.shutdown() + + @skip_if_lt_x_gpu(2) + def test_devices_option_mismatch(self): + with self.assertRaisesRegex( + ValueError, + "Node worker0 has unexpected source devices in its device map for worker1" + ): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {0 : 0}) + options.set_devices([1]) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rpc.shutdown() + + @skip_if_lt_x_gpu(2) + def test_devices_option_mismatch_reverse(self): + with self.assertRaisesRegex( + ValueError, + "Node worker0 has unexpected target devices in its device map for worker1" + ): + dst = worker_name((self.rank + 1) % self.world_size) + + options = rpc.TensorPipeRpcBackendOptions( + init_method=self.rpc_backend_options.init_method, + num_worker_threads=self.rpc_backend_options.num_worker_threads, + device_maps={dst: {0 : 1}}, + devices=[0] + ) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_cuda_future_device_as_int(self): + fut = Future(devices=[0]) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_device_as_str(self): + fut = Future(devices=["cuda:0"]) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_device_as_device(self): + fut = Future(devices=[torch.device("cuda", 0)]) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_device_not_cuda(self): + with self.assertRaisesRegex( + ValueError, "Expected devices to have indices, got cpu" + ): + fut = Future(devices=["cpu"]) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_cuda_tensor(self): + self._test_cuda_future_extraction( + wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=False + ) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_list_with_cuda_tensor(self): + self._test_cuda_future_extraction( + wrapper=lambda t: [t], unwrapper=operator.itemgetter(0), sparse_tensor=False + ) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_custom_class_with_cuda_tensor(self): + self._test_cuda_future_extraction( + wrapper=TensorWrapper, unwrapper=lambda v: v.tensor, sparse_tensor=False + ) + + @skip_if_lt_x_gpu(2) + def test_cuda_future_callback_changes_devices(self): + # We check proper CUDA stream synchronization by filling the tensor with + # the expected value in one stream, and reading it from another stream. + tensor0 = torch.zeros((100,), device="cuda:0") + tensor1 = torch.zeros((100,), device="cuda:1") + parent_future = Future(devices=["cuda:0", "cuda:1"]) + + def cb(fut): + t0 = fut.value() + tensor1.copy_(t0, non_blocking=True) + return tensor1 + + child_future = parent_future.then(cb) + with torch.cuda.device("cuda:0"): + stream = torch.cuda.Stream() + with torch.cuda.stream(stream): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + tensor0.fill_(1) + parent_future.set_result(tensor0) + with torch.cuda.device("cuda:1"): + another_stream = torch.cuda.Stream() + with torch.cuda.stream(another_stream): + self.assertTrue(torch.eq(child_future.wait(), 1).all().item()) + + @skip_if_lt_x_gpu(2) + def test_cuda_future_value_on_bad_device(self): + tensor0 = torch.zeros((100,), device="cuda:0") + tensor1 = torch.zeros((100,), device="cuda:1") + parent_future = Future(devices=["cuda:1"]) + + # As a plus, we test that futures still invoke callbacks even in case of + # error, and that the child futures are successful if those callbacks + # don't access the parent future. + def cb(fut): + with torch.cuda.device("cuda:1"): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + tensor1.fill_(1) + return tensor1 + + child_future = parent_future.then(cb) + with torch.cuda.device("cuda:0"): + stream = torch.cuda.Stream() + with torch.cuda.stream(stream): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + tensor0.fill_(1) + parent_future.set_result(tensor0) + with self.assertRaisesRegex( + ValueError, + r"The result contained tensors residing on device\(s\) cuda:0 " + r"which are not among the expected device\(s\) cuda:1", + ): + parent_future.wait() + with torch.cuda.device("cuda:1"): + another_stream = torch.cuda.Stream() + with torch.cuda.stream(another_stream): + self.assertTrue(torch.eq(child_future.wait(), 1).all().item()) + + @skip_if_lt_x_gpu(1) + def test_async_execution_with_cuda_future(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {"cuda:0": "cuda:0"}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + t = torch.zeros((100,), device="cuda:0") + fut = rpc.rpc_async(dst, async_cuda_sleep_and_set_to_one, args=(t,)) + another_stream = torch.cuda.Stream("cuda:0") + with torch.cuda.stream(another_stream): + self.assertTrue(torch.eq(fut.wait(), 1).all().item()) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_async_execution_nested_with_cuda_future(self): + dst = worker_name((self.rank + 1) % self.world_size) + nested_dst = worker_name((self.rank + 2) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {"cuda:0": "cuda:0"}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + a = torch.ones((100,), device="cuda:0") + b = torch.ones((100,), device="cuda:0") + c = torch.ones((100,), device="cuda:0") + fut = rpc.rpc_async(dst, async_cuda_nested_add, args=(nested_dst, a, b, c)) + another_stream = torch.cuda.Stream("cuda:0") + with torch.cuda.stream(another_stream): + self.assertTrue(torch.eq(fut.wait(), 3).all().item()) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_cuda_future_modify_tensor_inplace(self): + tensor = torch.zeros((100,), device="cuda:0") + future = Future(devices=["cuda:0"]) + future.set_result(tensor) + # It's weird to modify the value of a future once it's complete, but + # technically possible. Currently this is considered undefined behavior + # (in practice the future will ignore the modification and still + # synchronize with the original value). We could one day add logic to + # detect and warn or throw in such cases, but for now we just check that + # this doesn't crash. + tensor.fill_(1) + future.wait() + + @skip_if_lt_x_gpu(1) + def test_cuda_future_replace_tensor(self): + tensor_list = [torch.zeros((100,), device="cuda:0")] + future = Future(devices=["cuda:0"]) + future.set_result(tensor_list) + # It's weird to modify the value of a future once it's complete, but + # technically possible. Currently this is considered undefined behavior + # (in practice the future will ignore the modification and still + # synchronize with the original value). We could one day add logic to + # detect and warn or throw in such cases, but for now we just check that + # this doesn't crash. + # We set things up so that the original tensor contained in the list + # gets deleted once we replace it with the other one. This will + # invalidate any cached information held by the future. + tensor_list[0] = torch.ones((100,), device="cuda:0") + future.wait() + + @skip_if_lt_x_gpu(1) + def test_rref_with_unpickleable_attributes(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {"cuda:0": "cuda:0"}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rref = rpc.remote(dst, TensorWrapper, args=(torch.zeros(42, device="cuda:0"),)) + rref.rpc_sync().increase(1) + ret = rref.rpc_sync().sum() + self.assertEqual(ret, 42) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_cuda_sparse_tensor(self): + self._test_cuda_future_extraction( + wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=True + ) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_list_with_cuda_sparse_tensor(self): + self._test_cuda_future_extraction( + wrapper=lambda t: [t], unwrapper=operator.itemgetter(0), sparse_tensor=True + ) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_custom_class_with_cuda_sparse_tensor(self): + self._test_cuda_future_extraction( + wrapper=TensorWrapper, unwrapper=lambda v: v.tensor, sparse_tensor=True + ) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/tensorpipe_rpc_agent_test_fixture.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/tensorpipe_rpc_agent_test_fixture.py new file mode 100644 index 0000000000000000000000000000000000000000..629b0328f1aed6d14d71589faee40a9b702a1b22 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/tensorpipe_rpc_agent_test_fixture.py @@ -0,0 +1,34 @@ +# mypy: allow-untyped-defs + +import torch.distributed.rpc as rpc +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) +from torch.testing._internal.common_distributed import ( + tp_transports, +) + + +class TensorPipeRpcAgentTestFixture(RpcAgentTestFixture): + @property + def rpc_backend(self): + return rpc.backend_registry.BackendType[ + "TENSORPIPE" + ] + + @property + def rpc_backend_options(self): + return rpc.backend_registry.construct_rpc_backend_options( + self.rpc_backend, + init_method=self.init_method, + _transports=tp_transports() + ) + + def get_shutdown_error_regex(self): + # FIXME Once we consolidate the error messages returned by the + # TensorPipe agent put some more specific regex here. + error_regexes = [".*"] + return "|".join([f"({error_str})" for error_str in error_regexes]) + + def get_timeout_error_regex(self): + return "RPC ran for more than" diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/optests/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/optests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e9125ba0ebe7e0623a12ad1a1cd7eeb7d2749a3a --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/optests/__init__.py @@ -0,0 +1,7 @@ +# mypy: ignore-errors + +from .make_fx import make_fx_check +from .aot_autograd import aot_autograd_check, _test_aot_autograd_forwards_backwards_helper +from .fake_tensor import fake_check +from .autograd_registration import autograd_registration_check +from .generate_tests import generate_opcheck_tests, opcheck, OpCheckError, dontGenerateOpCheckTests, is_inside_opcheck_mode diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/optests/generate_tests.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/optests/generate_tests.py new file mode 100644 index 0000000000000000000000000000000000000000..7fac1e57c6ac8e13c0a0f7c3f53592d91ce28bbf --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/optests/generate_tests.py @@ -0,0 +1,825 @@ +# mypy: ignore-errors + +import datetime +import difflib +import functools +import inspect +import json +import os +import re +import tempfile +import threading +import unittest +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union + +import torch +import torch._dynamo +import torch.utils._pytree as pytree +from torch._dynamo.utils import clone_input +from torch._library.custom_ops import CustomOpDef +from torch._subclasses.schema_check_mode import SchemaCheckMode +from torch._utils_internal import get_file_path_2 +from torch.overrides import TorchFunctionMode +from torch.testing._internal.optests import ( + aot_autograd_check, + autograd_registration_check, + fake_check, +) + + +def dontGenerateOpCheckTests(reason: str): + def inner(fun): + fun._torch_dont_generate_opcheck_tests = True + return fun + + return inner + + +def is_abstract(tensor: torch.Tensor) -> bool: + if tensor.is_meta: + return True + if torch._subclasses.fake_tensor.is_fake(tensor): + return True + return False + + +def safe_schema_check( + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + *, + copy_inputs: bool = True, +) -> Any: + if copy_inputs: + args, kwargs = deepcopy_tensors((args, kwargs)) + if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)): + return None + with SchemaCheckMode(): + result = op(*args, **kwargs) + return result + + +def safe_autograd_registration_check( + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + *, + copy_inputs: bool = True, +) -> None: + if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)): + return + if copy_inputs: + args, kwargs = deepcopy_tensors((args, kwargs)) + # Don't perform autograd_registration_check if none of the inputs require grad. + if not pytree.tree_any_only( + torch.Tensor, lambda x: x.requires_grad, (args, kwargs) + ): + return + return autograd_registration_check(op, args, kwargs) + + +def safe_fake_check( + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + *, + copy_inputs: bool = True, +) -> None: + if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)): + return None + if copy_inputs: + args, kwargs = deepcopy_tensors((args, kwargs)) + return fake_check(op, args, kwargs) + + +def safe_aot_autograd_check( + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + dynamic: bool, + *, + copy_inputs: bool = True, +) -> Any: + # NB: copy_inputs does nothing for aot_autograd_check: it always needs to copy + # inputs. + if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)): + return None + + def func(*args, **kwargs): + args, kwargs = pytree.tree_map_only(torch.Tensor, torch.clone, (args, kwargs)) + return op(*args, **kwargs) + + # aot_autograd_check runs func(*args, **kwargs) multiple times + # and assumes `func` does not modify its inputs. + return aot_autograd_check(func, args, kwargs, dynamic, check_gradients="auto") + + +def deepcopy_tensors(inputs: Any) -> Any: + return pytree.tree_map_only(torch.Tensor, clone_input, inputs) + + +# Test util requirements +# - The test util must have signature (op: OpOverload, args, kwargs) +# - The test util must NOT mutate args, kwargs. +# - The test utils in this list must not be prefixes of each other. For example, +# having both "test_schema" and "test_schema_is_functional" is NOT OK. +# - The order of items in this dict matters (for opcheck), we'll run them +# in order. +ALL_TEST_UTILS = { + "test_schema": safe_schema_check, + "test_autograd_registration": safe_autograd_registration_check, + "test_faketensor": safe_fake_check, + "test_aot_dispatch_static": functools.partial( + safe_aot_autograd_check, + dynamic=False, + ), + "test_aot_dispatch_dynamic": functools.partial( + safe_aot_autograd_check, + dynamic=True, + ), +} + +GDOC = "https://docs.google.com/document/d/1Pj5HRZvdOq3xpFpbEjUZp2hBovhy7Wnxw14m6lF2154/edit" + +DEFAULT_TEST_UTILS = [ + "test_schema", + "test_autograd_registration", + "test_faketensor", + "test_aot_dispatch_dynamic", +] + +DEPRECATED_DEFAULT_TEST_UTILS = DEFAULT_TEST_UTILS + [ + "test_aot_dispatch_static", +] + + +def generate_opcheck_tests( + testcase: Any, + namespaces: List[str], + failures_dict_path: Optional[str] = None, + additional_decorators: Dict[str, Callable] = None, + test_utils: List[str] = DEFAULT_TEST_UTILS, +) -> None: + """Given an existing TestCase, use the existing tests to generate + additional validation tests for custom operators. + + For {all existing tests in the TestCase} x {all test utils}, + we will generate one new test. The new test runs a TorchFunctionMode + that intercepts ``op(*args, **kwargs)`` calls and invokes + ``test_util(op, *args, **kwargs)``, where ``op`` is an operator. + + The test_util that we support are in ALL_TEST_UTILS. They are: + - test_schema: This runs SchemaCheckMode. + - test_autograd_registration: This runs autograd_registration_check. + - test_faketensor: This runs CrossRefFakeMode. + - test_aot_dispatch_static: This runs aot_autograd_check, which: + checks that the outputs (and gradients, if they are computable) + are the same under eager-mode PyTorch and using AOTAutograd. + - test_aot_dispatch_dynamic: Same as aot_dispatch_static, but + runs AOTAutograd using dynamic shapes instead of static shapes. + + The generated test will have name ``{test_util}__{original_name}``. + For example, if there is a method named ``test_cumsum``, then + we will generate a ``test_schema__test_cumsum``, + ``test_faketensor__test_cumsum``, etc. + + For more details, see https://docs.google.com/document/d/1Pj5HRZvdOq3xpFpbEjUZp2hBovhy7Wnxw14m6lF2154/edit + + Args: + testcase: The testcase we will modify and generate additional tests for. + namespaces: We will only intercept calls to custom operators with these + namespaces. + failures_dict_path: See ``validate_failures_dict_structure`` for more details + test_utils: a list of test_utils to generate. Example: ["test_schema", "test_faketensor"] + """ + if additional_decorators is None: + additional_decorators = {} + test_methods = [ + m + for m in dir(testcase) + if m.startswith("test_") and callable(getattr(testcase, m)) + ] + if failures_dict_path is None: + # The default failures_dict_path is failures_dict.json in + # the same directory as the test file. + prev_frame = inspect.currentframe().f_back + filename = inspect.getframeinfo(prev_frame)[0] + failures_dict_path = get_file_path_2( + os.path.dirname(filename), "failures_dict.json" + ) + failures_dict = FailuresDict.load( + failures_dict_path, create_file=should_update_failures_dict() + ) + validate_failures_dict_structure(failures_dict, test_utils, testcase) + validate_failures_dict_formatting(failures_dict_path) + + def construct_method(attr, prefix, tester): + method = getattr(testcase, attr) + if getattr(method, "_torch_dont_generate_opcheck_tests", False): + return + new_method_name = prefix + "__" + attr + + @functools.wraps(method) + def new_method(*args, **kwargs): + with OpCheckMode( + namespaces, + prefix, + tester, + failures_dict, + f"{testcase.__name__}.{new_method_name}", + failures_dict_path, + ): + result = method(*args, **kwargs) + return result + + if pytestmark := new_method.__dict__.get("pytestmark"): + import pytest + + # check if we need to simplify the parametrize marks + # NB: you need to add this mark to your pytest.ini + opcheck_only_one = False + for mark in pytestmark: + if isinstance(mark, pytest.Mark) and mark.name == "opcheck_only_one": + opcheck_only_one = True + + if opcheck_only_one: + new_pytestmark = [] + for mark in pytestmark: + if isinstance(mark, pytest.Mark) and mark.name == "parametrize": + argnames, argvalues = mark.args + assert not mark.kwargs, "NYI" + # Special case for device, we want to run on all + # devices + if argnames != "device": + new_pytestmark.append( + pytest.mark.parametrize( + argnames, (next(iter(argvalues)),) + ) + ) + continue + new_pytestmark.append(mark) + new_method.__dict__["pytestmark"] = new_pytestmark + + if new_method_name in additional_decorators: + for dec in additional_decorators[new_method_name]: + new_method = dec(new_method) + + if hasattr(testcase, new_method_name): + raise RuntimeError( + f"Tried to autogenerate {new_method_name} but {testcase} already " + f"has method named {new_method_name}. Please rename the original " + f"method on the TestCase." + ) + setattr(testcase, new_method_name, new_method) + + test_utils = {name: ALL_TEST_UTILS[name] for name in test_utils} + for attr in test_methods: + for prefix, tester in test_utils.items(): + construct_method(attr, prefix, tester) + + generate_tag_tests(testcase, failures_dict, additional_decorators) + + +def generate_tag_tests(testcase, failures_dict, additional_decorators): + def generate_test(qualname, definitely_not_pt2_compliant, xfailed_tests): + def inner(self): + try: + op = torch._library.utils.lookup_op(qualname) + except AttributeError as e: + # Operator not importable in this test file + raise unittest.SkipTest(f"Can't import operator {qualname}") from e + op_marked_as_compliant = torch.Tag.pt2_compliant_tag in op.tags + if not op_marked_as_compliant: + return + if not definitely_not_pt2_compliant: + return + raise AssertionError( + f"op '{qualname}' was tagged with torch.Tag.pt2_compliant_tag " + f"but it failed some of the generated opcheck tests " + f"({xfailed_tests}). This may lead to silent correctness issues, " + f"please fix this." + ) + + return inner + + for qualname, test_dict in failures_dict.data.items(): + xfailed_tests = [ + test + for test, status_dict in test_dict.items() + # We're about to delete the following test after Ed's PR + # to specialize on C++ .size() calls + if "test_aot_dispatch_static" not in test + and status_dict["status"] == "xfail" + ] + definitely_not_pt2_compliant = len(xfailed_tests) > 0 + generated = generate_test(qualname, definitely_not_pt2_compliant, xfailed_tests) + + # Could result in collisions, but unlikely. We'll raise if we see one below. + mangled_qualname = qualname.replace("::", "_").replace(".", "_") + test_name = "test_pt2_compliant_tag_" + mangled_qualname + + # You can skip this test via the additional_decorators argument + # in generate_opcheck_tests + if test_name in additional_decorators: + for decorator in additional_decorators[test_name]: + generated = decorator(generated) + + if hasattr(testcase, test_name): + raise RuntimeError( + f"Tried to generate a test named {test_name}, but it exists " + f"already. This could be because of a name collision (where " + f"we generated two tests with the same name), or where we " + f"generated a test with the same name as an existing test." + ) + setattr(testcase, test_name, generated) + + +TEST_OPTIONS = ("xfail", "skip", "xsuccess") + + +def validate_failures_dict_formatting(failures_dict_path: str) -> None: + with open(failures_dict_path) as fp: + actual = fp.read() + failures_dict = FailuresDict.load(failures_dict_path) + expected = failures_dict._save(to_str=True) + if actual == expected: + return + if should_update_failures_dict(): + failures_dict = FailuresDict.load(failures_dict_path) + failures_dict.save() + return + expected = expected.splitlines(1) + actual = actual.splitlines(1) + diff = difflib.unified_diff(actual, expected) + diff = "".join(diff) + raise RuntimeError( + f"\n{diff}\n\nExpected the failures dict to be formatted " + f"a certain way. Please see the above diff; you can correct " + f"this either manually or by re-running the test with " + f"PYTORCH_OPCHECK_ACCEPT=1" + ) + + +def validate_failures_dict_structure( + failure_dict: "FailuresDict", test_utils: List[str], testcase: Any +) -> None: + """Validates the failures dict. + + The failure dict looks something like the following. + It maps operator name (qualname) to a list of autogenerated tests. + Each autogenerated test may have a check for the operator (if the operator is + called by the test); the dictionary specifies if we should skip the check, + or if we expect some check to fail. + + { + "fbgemm::split_lengths": { + "test_schema__test_split_lengths": { + "comment": "you can put whatever you want into the comment section", + "status": "xfail", + } + "test_schema__test_split_lengths_empty": { + "comment": "", + "status": "skip", + }, + }, + "fbgemm::gather_lengths": { + "test_schema__test_gather_lengths": { + "comment": "", + "status": "skip", + }, + }, + } + + """ + failure_dict = failure_dict.data + qualnames = list(failure_dict.keys()) + for test_to_option in failure_dict.values(): + test_names = list(test_to_option.keys()) + for test_name, test_dict in test_to_option.items(): + if set(test_dict.keys()) != set({"comment", "status"}): + raise RuntimeError( + "in failures_dict, expected sub-dict to have keys 'comment' and 'status'" + ) + test_option = test_dict["status"] + if test_option not in TEST_OPTIONS: + raise RuntimeError( + f"In failures_dict, got status={test_option} but it needs to be in {TEST_OPTIONS}" + ) + test_class, actual_test_name = test_name.split(".") + if not any(actual_test_name.startswith(test) for test in test_utils): + raise RuntimeError( + f"In failures_dict, test name '{test_name}' should begin with one of {test_utils}" + ) + for test in test_utils: + if not actual_test_name.startswith(test): + continue + base_test_name = actual_test_name[len(test) + 2 :] + # remove potential pytest parametrization suffix + base_test_name = re.sub(r"\[.*\]", "", base_test_name) + if testcase.__name__ != test_class: + continue + if hasattr(testcase, base_test_name): + continue + raise RuntimeError( + f"In failures dict, got test name '{test_name}'. We parsed this as " + f"running test '{test}' on '{base_test_name}', but " + f"{base_test_name} does not exist on the TestCase '{testcase.__name__}]. " + f"Maybe you need to change the test name?" + ) + + +def should_update_failures_dict() -> bool: + key = "PYTORCH_OPCHECK_ACCEPT" + return key in os.environ and os.environ[key] == "1" + + +_is_inside_opcheck_mode = threading.local() +_is_inside_opcheck_mode.value = False + + +def is_inside_opcheck_mode(): + return _is_inside_opcheck_mode.value + + +class OpCheckMode(TorchFunctionMode): + """ + For a given test, OpCheckMode intercepts calls to operators and runs + test_util(op, args, kwargs) for each intercepted (op, args, kwargs). + """ + + def __init__( + self, + namespaces: List[str], + test_util_name: str, + test_util: Callable, + failures_dict: "FailuresDict", + test_name: str, + failures_dict_path: str, + ): + # We will intercept calls to ops with these namespaces + self.namespaces = namespaces + # The test utility function. Its signature should be (op, args, kwargs) -> None. + # Examples of test utilities are: schema_check, make_fx_check + self.test_util = test_util + self.test_util_name = test_util_name + # The name of the test that is running this OpCheckMode. + self.test_name = test_name + # Maps qualname -> test_name -> skip/xfail + # Tells us if we should skip a test or assert that there is a failure. + self.failures_dict = failures_dict + # Location of the failures dict. Makes it so that the error message is better. + self.failures_dict_path = failures_dict_path + + # OpCheckMode surpresses errors, collects them here, and then raises them on exit. + # Maps qualname -> List[(Exception, func, maybe args, maybe kwargs)] + self.seen_ops_to_errors = {} + + def maybe_raise_errors_on_exit(self) -> None: + # Check expected failures first + for qualname in self.seen_ops_to_errors.keys(): + option = self.failures_dict.get_status(qualname, self.test_name) + if len(self.seen_ops_to_errors[qualname]) == 0: + if should_update_failures_dict(): + self.failures_dict.set_status( + qualname, self.test_name, "xsuccess", comment="" + ) + else: + if option == "xfail": + raise OpCheckError( + f"generate_opcheck_tests: Unexpected success for operator " + f"{qualname} on test {self.test_name}. This may mean that " + f"you have fixed this test failure. Please rerun the test with " + f"PYTORCH_OPCHECK_ACCEPT=1 to automatically update the test runner " + f"or manually remove the " + f"expected failure in the failure dict at " + f"{self.failures_dict_path}" + f"For more details, see " + f"{GDOC}" + ) + continue + failed_ops = [] + for qualname in self.seen_ops_to_errors.keys(): + option = self.failures_dict.get_status(qualname, self.test_name) + if option != "xsuccess": + continue + if len(self.seen_ops_to_errors[qualname]) == 0: + continue + failed_ops.append(qualname) + if not failed_ops: + return + + if should_update_failures_dict(): + for op in failed_ops: + self.failures_dict.set_status(op, self.test_name, "xfail") + return + + # Raise from the first error but also report about all of them to make + # recording xfails easier. + ex, op, args, kwargs = self.seen_ops_to_errors[failed_ops[0]][0] + repro_command = generate_repro( + self.test_util_name, op, args, kwargs, save_data=should_print_better_repro() + ) + raise OpCheckError( + f"Test generated by `generate_opcheck_tests`, {self.test_name}, " + f"failed on operators {failed_ops}. This usually means that the " + f"operators are not implemented correctly and may lead to silently " + f"incorrect behavior. Set PYTORCH_OPCHECK_PRINT_BETTER_REPRO=1 for a standalone repro, " + f"or please see " + f"{GDOC} " + f"for more recommendations. " + f"To reproduce this problem locally, try to run the following:\n{repro_command}" + ) from ex + + def __enter__(self, *args, **kwargs): + self.prev_is_opcheck_mode = _is_inside_opcheck_mode.value + self.prev_dynamo_disable = os.environ.get("TORCHDYNAMO_DISABLE", "") + _is_inside_opcheck_mode.value = True + os.environ["TORCHDYNAMO_DISABLE"] = "1" + return super().__enter__(*args, **kwargs) + + def __exit__(self, *args, **kwargs): + _is_inside_opcheck_mode.value = self.prev_is_opcheck_mode + os.environ["TORCHDYNAMO_DISABLE"] = self.prev_dynamo_disable + try: + self.maybe_raise_errors_on_exit() + if should_update_failures_dict(): + self.failures_dict.save() + finally: + result = super().__exit__(*args, **kwargs) + return result + + def run_test_util(self, op, args, kwargs): + try: + self.test_util(op, args, kwargs, copy_inputs=False) + except torch._subclasses.fake_tensor.UnsupportedFakeTensorException: + # We might get here if the input is already a FakeTensor + # or if we're in a torch.compile block. Just ignore these + # since we can't handle them and reporting them as failures + # is too noisy. + pass + + def __torch_function__(self, func, types, args=(), kwargs=None): + kwargs = kwargs if kwargs else {} + + # Only intercept calls to operators + if not isinstance(func, (torch._ops.OpOverloadPacket, torch._ops.OpOverload)): + return func(*args, **kwargs) + if ( + torch.jit.is_tracing() + or torch.jit.is_scripting() + or torch._dynamo.is_compiling() + ): + return func(*args, **kwargs) + # Pre-existing code may not use the .default overload. If we see an + # OpOverloadPacket and we cannot resolve the overload, then we just throw + # and ask the user to clarify. Otherwise, we attempt to resolve the overload. + if isinstance(func, torch._ops.OpOverloadPacket): + func = resolve_unique_overload_or_throw(func) + qualname = func.name() + ns = qualname.split("::")[0] + if ns not in self.namespaces: + return func(*args, **kwargs) + + args_c, kwargs_c = deepcopy_tensors((args, kwargs)) + result = func(*args, **kwargs) + + option = self.failures_dict.get_status(qualname, self.test_name) + if option == "xsuccess" or option == "xfail": + # Surpress all errors during execution. Raise them during __exit__. + try: + if qualname not in self.seen_ops_to_errors: + self.seen_ops_to_errors[qualname] = [] + self.run_test_util(func, args_c, kwargs_c) + except Exception as ex: + if should_print_better_repro(): + self.seen_ops_to_errors[qualname].append((ex, func, args, kwargs)) + else: + self.seen_ops_to_errors[qualname].append((ex, func, None, None)) + elif option == "skip": + pass + return result + + +def should_print_better_repro() -> None: + """If set, the tests generated by `generate_opcheck_tests` will print a + repro command on failure. + + In order to print the repro command, we need to save some tensors to disk. + These will be saved under the following directory: + {tempfile.gettempdir()}/pytorch_opcheck_safe_to_delete/. + + Although this is a temp folder, it will usually not automatically get cleaned + up, so you'll need to manually delete it. + """ + key = "PYTORCH_OPCHECK_PRINT_BETTER_REPRO" + if key not in os.environ: + return False + value = os.environ[key] + return value == "1" or value == 1 + + +def opcheck( + op: Union[torch._ops.OpOverload, torch._ops.OpOverloadPacket, CustomOpDef], + args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]] = None, + *, + test_utils: Union[str, Sequence[str]] = DEFAULT_TEST_UTILS, + raise_exception: bool = True, +) -> Dict[str, str]: + """See torch.library.opcheck for docstring""" + + if kwargs is None: + kwargs = {} + if isinstance(op, CustomOpDef): + op = op._opoverload + if isinstance(op, torch._ops.OpOverloadPacket): + op = resolve_unique_overload_or_throw(op) + if not isinstance(op, torch._ops.OpOverload): + raise ValueError( + f"opcheck(op, ...): op must be instance of torch._ops.OpOverload, " + f"e.g. torch.ops.aten.sin.default, got {type(op)}" + ) + if test_utils == "ALL": + test_utils = tuple(ALL_TEST_UTILS.keys()) + if isinstance(test_utils, str): + test_utils = (test_utils,) + if not isinstance(test_utils, (tuple, list)) or not set(test_utils).issubset( + ALL_TEST_UTILS.keys() + ): + raise ValueError( + f"opcheck(op, ..., test_utils={test_utils}), expected test_utils " + f"to be subset of {tuple(ALL_TEST_UTILS.keys())} but it was not" + ) + + results_dict = {} + for test_util in test_utils: + tester = ALL_TEST_UTILS[test_util] + try: + tester(op, args, kwargs) + results_dict[test_util] = "SUCCESS" + except Exception as ex: + if raise_exception: + raise OpCheckError( + f"opcheck(op, ...): {test_util} failed with {ex} " + f"(scroll up for stack trace)" + ) from ex + results_dict[test_util] = ex + return results_dict + + +class OpCheckError(Exception): + pass + + +def generate_repro( + test: str, + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + *, + save_data: bool, + dry_run: bool = False, +) -> str: + if save_data: + now = datetime.datetime.now() + path = os.path.join(tempfile.gettempdir(), "pytorch_opcheck_safe_to_delete") + unix_timestamp = datetime.datetime.timestamp(now) * 100000 + filepath = os.path.join(path, f"repro_{unix_timestamp}.pt") + if not dry_run: + os.makedirs(path, exist_ok=True) + torch.save((args, kwargs), filepath) + args_kwargs = f'args, kwargs = torch.load("{filepath}")' + else: + args_kwargs = ( + "# If you rerun your test with PYTORCH_OPCHECK_PRINT_BETTER_REPRO=1\n" + "# we will fill them in same (args, kwargs) as in your test\n" + "args = () # args to the operator\n" + "kwargs = {} # kwargs to the operator" + ) + + ns, name = op._schema.name.split("::") + overload = op._overloadname + + repro_command = ( + f"# =========================================================\n" + f"# BEGIN REPRO SCRIPT\n" + f"# =========================================================\n" + f"import torch\n" + f"from torch.testing._internal.optests import opcheck\n" + f"\n" + f"# Make sure you have loaded the library that contains the op\n" + f"# via an import or torch.ops.load_library(...)\n" + f"op = torch.ops.{ns}.{name}.{overload}\n" + f"\n" + f"{args_kwargs}\n" + f'opcheck(op, args, kwargs, test_utils="{test}")\n' + f"# =========================================================\n" + f"# END REPRO SCRIPT\n" + f"# =========================================================\n" + ) + return repro_command + + +def resolve_unique_overload_or_throw( + op: torch._ops.OpOverloadPacket, +) -> torch._ops.OpOverload: + all_schemas = torch._C._jit_get_schemas_for_operator(op._qualified_op_name) + if len(all_schemas) != 1: + raise RuntimeError( + f"opcheck can only test operators without overloads. " + f"Got the following overloads for {op._qualified_op_name}: " + f"{[schema.overload_name for schema in all_schemas]}" + ) + + overload_name = all_schemas[0].overload_name + if overload_name == "": + return op.default + return getattr(op, overload_name) + + +DUMP_OPTIONS = {"indent": 2, "sort_keys": True} + + +FailuresDictData = Dict[str, Dict[str, Dict[str, str]]] + + +VERSION = 1 +DESCRIPTION = ( + f"This is a dict containing failures for tests autogenerated by " + f"generate_opcheck_tests. " + f"For more details, please see {GDOC}" +) + + +class FailuresDict: + def __init__(self, path: str, data: FailuresDictData): + self.path = path + self.data = data + + @staticmethod + def load(path, *, create_file=False) -> "FailuresDict": + if create_file and not os.path.exists(path): + result = FailuresDict(path, {}) + FailuresDict.save() + return result + with open(path) as fp: + contents = fp.read() + if contents.strip() == "": + dct = { + "_description": DESCRIPTION, + "data": {}, + "_version": VERSION, + } + else: + dct = json.loads(contents) + assert "data" in dct + assert "_version" in dct and dct["_version"] == VERSION + return FailuresDict(path, dct["data"]) + + def _save(self, to_str=False) -> Optional[str]: + to_dump = { + "_description": DESCRIPTION, + "data": self.data, + "_version": VERSION, + } + # json.dumps doesn't end with a newline. Let's add one because files + # should end in newlines. + serialized = json.dumps(to_dump, **DUMP_OPTIONS) + "\n" + if to_str: + return serialized + with open(self.path, "w") as fp: + fp.write(serialized) + return None + + def save(self) -> None: + return self._save() + + def get_status(self, qualname: str, test_name: str) -> str: + if qualname not in self.data: + return "xsuccess" + dct = self.data[qualname] + if test_name not in dct: + return "xsuccess" + return dct[test_name]["status"] + + def set_status( + self, + qualname: str, + test_name: str, + status: str, + *, + comment: Optional[str] = None, + ): + if qualname not in self.data: + self.data[qualname] = {} + dct = self.data[qualname] + if test_name not in dct: + dct[test_name] = {"status": None, "comment": ""} + + if status == "xsuccess": + # The default status is "xsuccess". + del dct[test_name] + else: + dct[test_name]["status"] = status + if comment is not None: + dct[test_name]["comment"] = comment diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/__init__.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..605adb5edab5d82e8738e4cc756c6dbcc4b7bae6 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/no_future_div.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/no_future_div.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..959c06e24f83e31b637d69c3bc82290cd3d01638 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/no_future_div.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/test_module/future_div.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/test_module/future_div.py new file mode 100644 index 0000000000000000000000000000000000000000..0a3494f945fad36d84cb8056dcf722d6911f0af2 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/test_module/future_div.py @@ -0,0 +1,10 @@ +# mypy: ignore-errors + + + +def div_int_future(): + return 1 / 2 + + +def div_float_future(): + return 3.14 / 0.125 diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/test_module/no_future_div.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/test_module/no_future_div.py new file mode 100644 index 0000000000000000000000000000000000000000..164e6d168414a11039f3b63885760ad08b81ae99 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/test_module/no_future_div.py @@ -0,0 +1,11 @@ +# mypy: ignore-errors + +import torch # noqa: F401 + + +def div_int_nofuture(): + return 1 / 2 + + +def div_float_nofuture(): + return 3.14 / 0.125