language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | wandb__wandb | wandb/sdk/internal/file_stream.py | {
"start": 3276,
"end": 4034
} | class ____:
r"""Stream state that tracks carriage returns.
There are two streams: stdout and stderr. We create two instances for each stream.
An instance holds state about:
found_cr: if a carriage return has been found in this stream.
cr: most recent offset (line number) where we found \r.
We update this offset with every progress bar update.
last_normal: most recent offset without a \r in this stream.
i.e. the most recent "normal" line.
"""
found_cr: bool
cr: Optional[int]
last_normal: Optional[int]
def __init__(self) -> None:
self.found_cr = False
self.cr = None
self.last_normal = None
| StreamCRState |
python | Netflix__metaflow | metaflow/plugins/cards/card_modules/test_cards.py | {
"start": 1349,
"end": 1649
} | class ____(MetaflowCard):
type = "test_non_editable_card"
separator = "$&#!!@*"
def __init__(self, components=[], **kwargs):
self._components = components
def render(self, task):
return self.separator.join([str(comp) for comp in self._components])
| TestNonEditableCard |
python | tensorflow__tensorflow | tensorflow/python/saved_model/function_deserialization.py | {
"start": 9019,
"end": 29647
} | class ____(def_function.Function):
"""Wrapper class for a function that has been restored from saved state.
See `def_function.Function`.
"""
def __init__(self, python_function, name, function_spec, concrete_functions):
# TODO(b/205016819): We may enable autograph once exceptions are supported.
super(RestoredFunction, self).__init__(
python_function,
name,
autograph=False,
jit_compile=function_spec.jit_compile)
self.concrete_functions = concrete_functions
self._function_type = function_spec.function_type
self._default_values = function_spec.default_values
# Prevent RestoredFunction from spamming users with frequent tracing
# warnings.
self._omit_frequent_tracing_warning = True
@property
def _run_functions_eagerly(self):
# We do not have access to the original python function, and thus, we
# cannot meaningfully do anything but call our concrete function graphs
# under the hood.
#
# Attempting to call our bespoke python function (i.e.
# `restored_function_body`) will work so long as the user passes in all
# required and optional arguments. If an optional argument is missing,
# however, the call will break. For this reason, we instead skip the
# eager call path altogether if a user has enabled eager function execution
# via `tf.config.run_functions_eagerly`.
return False
def _list_all_concrete_functions(self):
return self.concrete_functions
def _list_all_concrete_functions_for_serialization(self):
return self.concrete_functions
def recreate_function(saved_function, concrete_functions):
"""Creates a `Function` from a `SavedFunction`.
Args:
saved_function: `SavedFunction` proto.
concrete_functions: map from function name to `ConcreteFunction`. As a side
effect of this function, the `FunctionSpec` from `saved_function` is added
to each `ConcreteFunction` in this map.
Returns:
A `Function`.
"""
# TODO(b/205017389): Construct a `Function` with the cache populated
# instead of creating a new `Function` backed by a Python layer to
# glue things together. Current approach is nesting functions deeper for each
# serialization cycle.
# Note: handling method functions is tricky since make_decorator does not
# allows control of "ismethod". Additionally since restored functions do
# not behave as methods i.e. they always use the same captured tensors
# independent of the object they are bound to, there is little value on
# propagating that correctly.
#
# Ideally this conversion should happen at serialization time. But since
# there are SavedModels which have "ismethod" populated and have an extra
# argument that they expect to be ignored, we do it at deserialization.
function_spec = _deserialize_function_spec_as_nonmethod(
saved_function.function_spec)
def restored_function_body(*args, **kwargs):
"""Calls a restored function or raises an error if no matching function."""
if not saved_function.concrete_functions:
raise ValueError("Found zero restored functions for caller function.")
# This is the format of function.graph.structured_input_signature. At this
# point, the args and kwargs have already been canonicalized.
inputs = (args, kwargs)
# First try to find a concrete function that can be called without input
# conversions. This allows one to pick a more specific trace in case there
# was also a more expensive one that supported tensors.
for allow_conversion in [False, True]:
for function_name in saved_function.concrete_functions:
function = concrete_functions[function_name]
if any([inp is None for inp in function.captured_inputs]):
raise ValueError("Looks like you are trying to run a loaded "
"non-Keras model that was trained using "
"tf.distribute.experimental.ParameterServerStrategy "
"with variable partitioning, which is not currently "
"supported. Try using Keras to define your model "
"if possible.")
if _concrete_function_callable_with(function, inputs, allow_conversion):
return _call_concrete_function(function, inputs)
signature_descriptions = []
def _pretty_format_positional(positional):
return "Positional arguments ({} total):\n * {}".format(
len(positional),
"\n * ".join(pprint.pformat(a) for a in positional))
for index, function_name in enumerate(saved_function.concrete_functions):
concrete_function = concrete_functions[function_name]
positional, keyword = concrete_function.structured_input_signature
signature_descriptions.append(
"Option {}:\n {}\n Keyword arguments: {}".format(
index + 1, _pretty_format_positional(positional), keyword))
raise ValueError(
"Could not find matching concrete function to call loaded from the "
f"SavedModel. Got:\n {_pretty_format_positional(args)}\n Keyword "
f"arguments: {kwargs}\n\n Expected these arguments to match one of the "
f"following {len(saved_function.concrete_functions)} option(s):\n\n"
f"{(chr(10)+chr(10)).join(signature_descriptions)}")
concrete_function_objects = []
for concrete_function_name in saved_function.concrete_functions:
concrete_function_objects.append(concrete_functions[concrete_function_name])
for cf in concrete_function_objects:
set_preinitialized_function_spec(cf, function_spec)
restored_function = RestoredFunction(restored_function_body,
restored_function_body.__name__,
function_spec, concrete_function_objects)
return tf_decorator.make_decorator(
restored_function_body,
restored_function,
decorator_argspec=function_spec.fullargspec)
def load_function_def_library(library,
saved_object_graph=None,
load_shared_name_suffix=None,
wrapper_function=None):
"""Load a set of functions as concrete functions without captured inputs.
Functions names are manipulated during load such that they do not overlap
with previously created ones.
Gradients are re-registered under new names. Ops that reference the gradients
are updated to reflect the new registered names.
Args:
library: FunctionDefLibrary proto message.
saved_object_graph: SavedObjectGraph proto message. If not passed in,
concrete function structured signatures and outputs will not be set.
load_shared_name_suffix: If specified, used to uniquify shared names.
Otherwise, a unique name is generated.
wrapper_function: An object that will be wrapped on newly created functions.
Returns:
Map of original function names in the library to instances of
`ConcreteFunction` without captured inputs.
Raises:
ValueError: if functions dependencies have a cycle.
"""
library_function_names = set(fdef.signature.name for fdef in library.function)
functions = {}
renamed_functions = {}
# Our graph building code currently requires functions to be registered with
# some tf.Graph in order to import functions using the
# op-name-is-function-name calling convention. To avoid leaking memory into
# the global default graph when executing eagerly, we create a temporary
# Graph.
#
# TODO(b/205023033): Make this Graph creation unnecessary when executing
# eagerly by fixing function_def_to_graph_def.
if ops.executing_eagerly_outside_functions():
graph = ops.Graph()
else:
graph = ops.get_default_graph()
if load_shared_name_suffix is None:
load_shared_name_suffix = "_load_{}".format(ops.uid())
# Custom gradient functions must be re-registered under new UIDs.
library_gradient_names = {} # Maps old op type to old function name
new_gradient_op_types = {} # Maps old gradient op type to new op type.
gradients_to_register = {} # Maps old function name to new op type
for gdef in library.registered_gradients:
if gdef.registered_op_type:
new_op_type = custom_gradient.generate_name()
old_op_type = compat.as_bytes(gdef.registered_op_type)
library_gradient_names[old_op_type] = gdef.gradient_func
new_gradient_op_types[old_op_type] = new_op_type
gradients_to_register[gdef.gradient_func] = new_op_type
function_deps = {}
for fdef in library.function:
function_deps[fdef.signature.name] = _list_function_deps(
fdef, library_function_names, library_gradient_names)
loaded_gradients = {}
for fdef in _sort_function_defs(library, function_deps):
orig_name = _fix_fdef_in_place(fdef, functions, load_shared_name_suffix,
new_gradient_op_types)
# Setup function signatures and outputs
#
# When concrete functions are created normally (i.e. when they're originally
# created and not loaded via saved model), the inputs and outputs are
# calculated based on the values passed in by the user and returned from the
# original function, respectively. We don't have access to those anymore at
# restore time, so we must instead pass them to the FuncGraph explicitly.
structured_input_signature = None
structured_outputs = None
if (saved_object_graph is not None and
orig_name in saved_object_graph.concrete_functions):
# TODO(b/204324043): Offload the deserialization of the protos to the
# first class objects by passing the actual protos. This is blocked on
# importing `nested_structure_coder` in function.py causing a circular
# dependency.
proto = saved_object_graph.concrete_functions[orig_name]
structured_input_signature = nested_structure_coder.decode_proto(
proto.canonicalized_input_signature)
structured_outputs = nested_structure_coder.decode_proto(
proto.output_signature)
# There is no need to copy all functions into the function def graph. It
# leads to a O(n^2) increase of memory when importing functions and the
# extra function definitions are a no-op since they already imported as a
# function before and passed in explicitly (due to the topologic sort
# import).
with graph.as_default():
func_graph = function_def_lib.function_def_to_graph(
fdef,
structured_input_signature=structured_input_signature,
structured_outputs=structured_outputs)
# Restores gradients for function-call ops (not the same as ops that use
# custom gradients)
_restore_gradient_functions(func_graph, renamed_functions, loaded_gradients)
for dep in function_deps[orig_name]:
functions[dep].add_to_graph(func_graph)
# We do not initialize the new ConcreteFunction's function_spec and/or
# arg_keywords here (which are used to parse the structured and flat
# signatures, respectively). ConcreteFunction that are part of a saved
# function is set up later by recreate_function(); and bare ConcreteFunction
# is set up by by setup_bare_concrete_function().
# However, we copy the FunctionDef attributes to the new ConcreteFunction,
# excluding the "_input_shapes", which may cause an error during input shape
# initialization at a later stage.
if "_input_shapes" in fdef.attr:
del fdef.attr["_input_shapes"]
function_type = function_type_lib.from_structured_signature(
func_graph.structured_input_signature,
func_graph.structured_outputs,
func_graph.function_captures.capture_types,
)
func = function_lib.ConcreteFunction.from_func_graph(
func_graph, function_type, attrs=fdef.attr)
if wrapper_function:
func = wrapper_function(func)
func.add_to_graph(graph)
functions[orig_name] = func
renamed_functions[func.name] = func
if any(op.type == "TRTEngineOp" for op in func_graph.get_operations()):
# TODO(b/150708051): Remove this hack once TensorRT SavedModel integration
# is fixed. Currently it's leaking memory to maintain bug compatibility
# with previous behavior.
func.add_to_graph(ops.get_default_graph())
if orig_name in gradients_to_register:
gradient_op_type = gradients_to_register[orig_name]
loaded_gradients[compat.as_bytes(gradient_op_type)] = func
ops.RegisterGradient(gradient_op_type)(_gen_gradient_func(func))
return functions
def _gen_gradient_func(func):
"""Wraps a deserialized function."""
def gradient_func(unused_op, *result_grads):
# Replace all `None` arguments, because the traced custom gradient function
# expects tensors. Replacing with zeros is correct since the `None` values
# occur when the gradient is unconnected, and thus the gradient is
# "statically proven to be zero." See `tf.UnconnectedGradients` for details.
def none_to_zero(x, t):
if x is not None:
return x
shape, dtype = default_gradient.shape_and_dtype(t)
if shape.is_fully_defined():
return default_gradient.zeros_like(t)
dims = []
if shape.rank is not None:
dims = [1 if d is None else d for d in shape.as_list()]
return array_ops.zeros(dims, dtype)
result_grads = [
none_to_zero(x, t) for (x, t) in zip(result_grads, func.graph.inputs)
]
return func(*result_grads)
return gradient_func
def _restore_gradient_functions(func_graph, renamed_functions,
loaded_gradients):
"""Populate function op's _gradient_function with default gradient."""
for op in func_graph.get_operations():
# TODO(b/205024208): This code assumes that the gradient registered for this
# function call is the default gradient for the function and not a custom
# one.
if op.type in ["StatefulPartitionedCall", "PartitionedCall"]:
function = renamed_functions[compat.as_bytes(
op.node_def.attr["f"].func.name)]
op._gradient_function = function._get_gradient_function() # pylint: disable=protected-access
try:
gradient_op_type = op.get_attr("_gradient_op_type")
except ValueError:
pass
else:
if gradient_op_type in loaded_gradients:
grad_fn = loaded_gradients[gradient_op_type]
grad_fn._num_positional_args = len(op.inputs) # pylint: disable=protected-access
grad_fn._arg_keywords = [inp.name for inp in op.inputs] # pylint: disable=protected-access
def _sort_function_defs(library, function_deps):
"""Return a topologic sort of FunctionDefs in a library."""
edges = collections.defaultdict(list)
in_count = collections.defaultdict(lambda: 0)
for fname, deps in function_deps.items():
for dep in deps:
edges[dep].append(fname)
in_count[fname] += 1
ready = [
fdef.signature.name
for fdef in library.function
if in_count[fdef.signature.name] == 0
]
output = []
while ready:
node = ready.pop()
output.append(node)
for dest in edges[node]:
in_count[dest] -= 1
if not in_count[dest]:
ready.append(dest)
if len(output) != len(library.function):
failed_to_resolve = sorted(set(in_count.keys()) - set(output))
raise ValueError("There is a cyclic dependency between functions. ",
f"Could not resolve {failed_to_resolve}.")
reverse = {fdef.signature.name: fdef for fdef in library.function}
return [reverse[x] for x in output]
def _get_gradient_op_type(node_def):
"""Returns the custom gradient op type."""
if ("_gradient_op_type" in node_def.attr and
node_def.op not in ["StatefulPartitionedCall", "PartitionedCall"]):
return node_def.attr["_gradient_op_type"].s
return None
def fix_node_def(node_def, functions, shared_name_suffix):
"""Replace functions calls and shared names in `node_def`."""
if node_def.op in functions:
node_def.op = functions[node_def.op].name
for _, attr_value in node_def.attr.items():
if attr_value.WhichOneof("value") == "func":
attr_value.func.name = functions[attr_value.func.name].name
elif attr_value.WhichOneof("value") == "list":
for fn in attr_value.list.func:
fn.name = functions[fn.name].name
# Fix old table creation bug.
if node_def.op == "HashTableV2":
if ("use_node_name_sharing" not in node_def.attr or
not node_def.attr["use_node_name_sharing"].b):
node_def.attr["use_node_name_sharing"].b = True
# We are turning on node mame sharing, so have to make sure we don't
# accidentally share a table resource.
shared_name_suffix += "_{}".format(ops.uid())
# TODO(b/124205571): Avoid accidental sharing and destruction of restored
# resources. For now uniquify "shared_name" when loading functions to avoid
# sharing.
# TODO: Add regression test for b/150826922.
op_def = op_def_registry.get(node_def.op)
if op_def:
attr = next((a for a in op_def.attr if a.name == "shared_name"), None)
if attr:
shared_name = None
if "shared_name" in node_def.attr and node_def.attr["shared_name"].s:
shared_name = node_def.attr["shared_name"].s
elif attr.default_value.s:
shared_name = compat.as_bytes(attr.default_value.s)
if not shared_name:
shared_name = compat.as_bytes(node_def.name)
node_def.attr["shared_name"].s = (
shared_name + compat.as_bytes(shared_name_suffix))
def _fix_fdef_in_place(fdef, functions, shared_name_suffix,
new_gradient_op_types):
"""Fixes a FunctionDef proto to be loaded in current context.
In particular, when loading a function library into an eager context, one
must rename the functions to avoid conflicts with existent functions.
Args:
fdef: FunctionDef proto to fix. It is mutated in-place.
functions: map from function name to a ConcreteFunction instance.
shared_name_suffix: A unique string for this load which helps to avoid
`shared_name` collisions across loads. Two functions from the same load
using the same `shared_name` still need to share, but functions from
different loads with the same `shared_name` should not.
new_gradient_op_types: map from old gradient op type to newly generated op
type.
Returns:
orig_name: original value of fdef.signature.name
"""
orig_name = fdef.signature.name
contains_unsaved_custom_gradients = False
for node_def in fdef.node_def:
fix_node_def(node_def, functions, shared_name_suffix)
op_type = _get_gradient_op_type(node_def)
if op_type is not None:
if op_type in new_gradient_op_types:
node_def.attr["_gradient_op_type"].s = compat.as_bytes(
new_gradient_op_types[op_type])
else:
contains_unsaved_custom_gradients = True
if contains_unsaved_custom_gradients:
logging.warning(
"Importing a function (%s) with ops with unsaved custom gradients. Will"
" likely fail if a gradient is requested.", fdef.signature.name)
fdef.signature.name = _clean_function_name(fdef.signature.name)
return orig_name
def _list_function_deps(fdef, library_function_names, library_gradient_names):
"""Find functions referenced in `fdef`."""
# TODO(b/205023953): Recurse into list attributes and into NameAttrList attrs
# both when listing deps and when fixing them. `function_def_to_graph` also
# requires fixes.
deps = set()
for node_def in fdef.node_def:
grad_op_type = _get_gradient_op_type(node_def)
if node_def.op in library_function_names:
deps.add(node_def.op)
elif grad_op_type and grad_op_type in library_gradient_names:
deps.add(library_gradient_names[grad_op_type])
else:
for _, attr_value in node_def.attr.items():
if attr_value.WhichOneof("value") == "func":
deps.add(attr_value.func.name)
elif attr_value.WhichOneof("value") == "list":
for fn in attr_value.list.func:
deps.add(fn.name)
return deps
_FUNCTION_WRAPPER_NAME_REGEX = r"^%s(.*)_\d+$" % (function_lib._INFERENCE_PREFIX
) # pylint:disable=protected-access
def _clean_function_name(name):
"""Vanity function to keep the function names comprehensible."""
# Note: each time a function is wrapped into `function_lib.ConcreteFunction`
# its name becomes "__inference_<orig>_xyz".
match = re.search(_FUNCTION_WRAPPER_NAME_REGEX, name)
if match:
return match.group(1)
else:
return name
| RestoredFunction |
python | doocs__leetcode | solution/2200-2299/2284.Sender With Largest Word Count/Solution.py | {
"start": 0,
"end": 386
} | class ____:
def largestWordCount(self, messages: List[str], senders: List[str]) -> str:
cnt = Counter()
for message, sender in zip(messages, senders):
cnt[sender] += message.count(" ") + 1
ans = senders[0]
for k, v in cnt.items():
if cnt[ans] < v or (cnt[ans] == v and ans < k):
ans = k
return ans
| Solution |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 7109,
"end": 7793
} | class ____(BaseModel):
"""
Connection serializer for responses.
"""
connection_id: Annotated[str, Field(title="Connection Id")]
conn_type: Annotated[str, Field(title="Conn Type")]
description: Annotated[str | None, Field(title="Description")] = None
host: Annotated[str | None, Field(title="Host")] = None
login: Annotated[str | None, Field(title="Login")] = None
schema_: Annotated[str | None, Field(alias="schema", title="Schema")] = None
port: Annotated[int | None, Field(title="Port")] = None
password: Annotated[str | None, Field(title="Password")] = None
extra: Annotated[str | None, Field(title="Extra")] = None
| ConnectionResponse |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_hybrid_shard.py | {
"start": 2592,
"end": 17097
} | class ____(FSDPTest):
@property
def world_size(self):
return max(torch.accelerator.device_count(), 2)
@property
def process_group(self):
return dist.distributed_c10d._get_default_group()
@skip_if_lt_x_gpu(2)
def test_raises_manual_wrap_hybrid_shard_when_none_policy(self):
model = MyModel().to(device_type)
err_ctx = self.assertRaisesRegex(
ValueError,
"requires explicit specification of process group or device_mesh.",
)
with err_ctx:
model = FSDP(model, sharding_strategy=ShardingStrategy.HYBRID_SHARD)
with err_ctx:
model = FSDP(model, sharding_strategy=ShardingStrategy._HYBRID_SHARD_ZERO2)
@skip_if_lt_x_gpu(4)
def test_hsdp_save_load_state_dict(self):
model = MyModel().to(device_type)
num_node_devices = torch.accelerator.device_count()
shard_rank_lists = (
list(range(num_node_devices // 2)),
list(range(num_node_devices // 2, num_node_devices)),
)
shard_groups = (
dist.new_group(shard_rank_lists[0]),
dist.new_group(shard_rank_lists[1]),
)
my_shard_group = (
shard_groups[0] if self.rank in shard_rank_lists[0] else shard_groups[1]
)
my_replicate_group = None
my_rank = self.rank
# Create groups like (0, 4), (1, 5), (2, 6) etc and assign appropriately
shard_factor = len(shard_rank_lists[0])
for i in range(num_node_devices // 2):
replicate_group_ranks = list(range(i, num_node_devices, shard_factor))
replicate_group = dist.new_group(replicate_group_ranks)
if my_rank in replicate_group_ranks:
my_replicate_group = replicate_group
fsdp_ctor = partial(
FSDP,
sharding_strategy=ShardingStrategy.HYBRID_SHARD,
use_orig_params=True,
process_group=(my_shard_group, my_replicate_group),
)
model = fsdp_ctor(model)
optim = torch.optim.AdamW(model.parameters())
# Initialize optimizer states
model(torch.randn(2, 10)).sum().backward()
optim.step()
shard_g = model.process_group
replicate_g = model._inter_node_pg
assert shard_g == my_shard_group
assert replicate_g == my_replicate_group
with FSDP.state_dict_type(model, StateDictType.SHARDED_STATE_DICT):
msd = model.state_dict()
osd = FSDP.optim_state_dict(model, optim)
load_model = fsdp_ctor(MyModel().to(device_type))
load_optim = torch.optim.AdamW(load_model.parameters())
with FSDP.state_dict_type(load_model, StateDictType.SHARDED_STATE_DICT):
load_model.load_state_dict(msd)
FSDP.optim_state_dict_to_load(load_model, load_optim, osd)
load_optim.load_state_dict(osd)
@skip_if_lt_x_gpu(4)
def test_hsdp_sync_module_state(self):
model = MyModel().to(device_type)
num_node_devices = torch.accelerator.device_count()
shard_rank_lists = (
list(range(num_node_devices // 2)),
list(range(num_node_devices // 2, num_node_devices)),
)
shard_groups = (
dist.new_group(shard_rank_lists[0]),
dist.new_group(shard_rank_lists[1]),
)
my_shard_group = (
shard_groups[0] if self.rank in shard_rank_lists[0] else shard_groups[1]
)
my_replicate_group = None
my_rank = self.rank
# Create groups like (0, 4), (1, 5), (2, 6) etc and assign appropriately
shard_factor = len(shard_rank_lists[0])
for i in range(num_node_devices // 2):
replicate_group_ranks = list(range(i, num_node_devices, shard_factor))
replicate_group = dist.new_group(replicate_group_ranks)
if my_rank in replicate_group_ranks:
my_replicate_group = replicate_group
nn.init.constant_(model.lin1.weight, self.rank)
nn.init.constant_(model.lin2.weight, self.rank)
nn.init.constant_(model.lin3.weight, self.rank)
fsdp_ctor = partial(
FSDP,
sharding_strategy=ShardingStrategy.HYBRID_SHARD,
use_orig_params=True,
sync_module_states=True,
process_group=(my_shard_group, my_replicate_group),
)
model = fsdp_ctor(model)
with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT):
self.assertTrue((model.lin1.weight == 0).all())
self.assertTrue((model.lin2.weight == 0).all())
self.assertTrue((model.lin3.weight == 0).all())
@skip_if_lt_x_gpu(2)
def test_invalid_pg_specification_raises(self):
pol = ModuleWrapPolicy({nn.Linear})
model = MyModel().to(device_type)
with self.assertRaisesRegex(
ValueError, "Expected process_group to be passed in"
):
model = FSDP(
model,
auto_wrap_policy=pol,
process_group=self.process_group,
sharding_strategy=ShardingStrategy.HYBRID_SHARD,
)
# TODO - add test for ZeRO-2 style sharding ensure params are not
# resharded after forward.
@skip_if_lt_x_gpu(2)
def test_fsdp_hybrid_shard_basic_setup(self):
"""
Tests basic functionality of HYBRID_SHARD and _HYBRID_SHARD_ZERO2:
1. Inter and intra-node process groups are correctly setup
2. Process groups are the same across FSDP wrapped instances
3. reduce_scatter and allreduce called the expected no. of times
"""
self.run_subtests(
{
"hsdp_sharding_strategy": [
ShardingStrategy.HYBRID_SHARD,
ShardingStrategy._HYBRID_SHARD_ZERO2,
],
"sharding_strategy_mode": [
ShardingStrategyMode.ALL_HYBRID_SHARD,
ShardingStrategyMode.MIXED_HYBRID_FULL_SHARD,
],
"use_orig_params": [False, True],
"use_device_mesh": [False, True],
},
self._test_fsdp_hybrid_shard_basic_setup,
)
def _test_fsdp_hybrid_shard_basic_setup(
self,
hsdp_sharding_strategy: ShardingStrategy,
sharding_strategy_mode: ShardingStrategyMode,
use_orig_params: bool,
use_device_mesh: bool,
):
if use_device_mesh:
device_mesh = init_device_mesh(device_type, (1, self.world_size))
else:
device_mesh = None
hsdp_model = self._init_hsdp_model(
hsdp_sharding_strategy,
sharding_strategy_mode,
use_orig_params,
hsdp_device_mesh=device_mesh,
)
# All FSDP modules should have state.process_group as the process group over which to
# shard (default process group), and state._inter_node_pg (process group containing only
# this rank)
intra_node_pgs = set()
inter_node_pgs = set()
for fsdp_module in hsdp_model.fsdp_modules(hsdp_model):
# TODO: This needs to be replaced if we deprecate
# `FSDP.sharding_strategy` to only use the handle one.
# https://github.com/pytorch/pytorch/issues/90857
if fsdp_module.sharding_strategy not in HYBRID_SHARDING_STRATEGIES:
self.assertEqual(
sharding_strategy_mode, ShardingStrategyMode.MIXED_HYBRID_FULL_SHARD
)
self.assertEqual(
fsdp_module.sharding_strategy, ShardingStrategy.FULL_SHARD
)
continue
# process_group should be across the node, which is just the
# whole world here.
self.assertEqual(
dist.get_world_size(fsdp_module.process_group),
dist.get_world_size(self.process_group),
)
intra_node_pgs.add(fsdp_module.process_group)
inter_node_pg = fsdp_module._inter_node_pg
inter_node_pgs.add(inter_node_pg)
self.assertEqual(1, dist.get_world_size(inter_node_pg))
self.assertFalse(_rank_not_in_group(inter_node_pg))
self.assertEqual(hsdp_sharding_strategy, fsdp_module.sharding_strategy)
# All fsdp modules should share the same process groups
self.assertEqual(1, len(intra_node_pgs))
self.assertEqual(1, len(inter_node_pgs))
orig_ar = dist.all_reduce
orig_rs = dist.reduce_scatter_tensor
def patched_collective(orig_collective, counter, *args, **kwargs):
counter[orig_collective] += 1
return orig_collective(*args, **kwargs)
cntr = Counter()
patched_allreduce = partial(patched_collective, orig_ar, cntr)
patched_reduce_scatter = partial(patched_collective, orig_rs, cntr)
with (
patch_allreduce(patched_allreduce),
patch_reduce_scatter(patched_reduce_scatter),
):
inp = hsdp_model.get_input(device=torch.accelerator.current_device_index())
out = hsdp_model(inp[0], inp[1])
loss = hsdp_model.get_loss(inp, out)
loss.backward()
if sharding_strategy_mode == ShardingStrategyMode.ALL_HYBRID_SHARD:
num_flat_params = len(list(traversal_utils._get_fsdp_handles(hsdp_model)))
self.assertEqual(num_flat_params, cntr[orig_ar])
self.assertEqual(num_flat_params, cntr[orig_rs])
elif sharding_strategy_mode == ShardingStrategyMode.MIXED_HYBRID_FULL_SHARD:
num_hsdp_flat_params = len(
list(traversal_utils._get_fsdp_handles(hsdp_model.transformer))
)
num_flat_params = len(list(traversal_utils._get_fsdp_handles(hsdp_model)))
self.assertEqual(num_hsdp_flat_params, cntr[orig_ar])
self.assertEqual(num_flat_params, cntr[orig_rs])
@skip_if_lt_x_gpu(4)
def test_fsdp_hybrid_shard_parity(self):
self.run_subtests(
{
"hsdp_sharding_strategy": [
ShardingStrategy.HYBRID_SHARD,
ShardingStrategy._HYBRID_SHARD_ZERO2,
],
"use_orig_params": [False, True],
},
self._test_fsdp_hybrid_shard_parity,
)
def _test_fsdp_hybrid_shard_parity(
self, hsdp_sharding_strategy: ShardingStrategy, use_orig_params: bool
):
fsdp_model = self._init_fsdp_model(use_orig_params)
global_pg = dist.distributed_c10d._get_default_group()
hsdp_pgs = _init_intra_and_inter_node_groups(global_pg, 2)
hsdp_model = self._init_hsdp_model(
hsdp_sharding_strategy,
ShardingStrategyMode.ALL_HYBRID_SHARD,
use_orig_params,
hsdp_process_groups=hsdp_pgs,
)
assert hsdp_model._inter_node_pg.size() > 1, (
"HSDP model initialized without replication"
)
fsdp_optim = torch.optim.Adam(fsdp_model.parameters(), lr=1e-2)
hsdp_optim = torch.optim.Adam(hsdp_model.parameters(), lr=1e-2)
torch.manual_seed(global_pg.rank() + 1)
for _ in range(5):
inp = fsdp_model.module.get_input(torch.device(device_type))
losses: list[torch.Tensor] = []
for model, optim in ((fsdp_model, fsdp_optim), (hsdp_model, hsdp_optim)):
optim.zero_grad()
loss = model(*inp).sum()
losses.append(loss)
loss.backward()
optim.step()
self.assertEqual(losses[0], losses[1])
def _init_fsdp_model(self, use_orig_params: bool) -> nn.Module:
auto_wrap_policy = ModuleWrapPolicy(
{TransformerEncoderLayer, TransformerDecoderLayer},
)
hsdp_kwargs = {
"auto_wrap_policy": auto_wrap_policy,
"device_id": torch.accelerator.current_device_index(),
"use_orig_params": use_orig_params,
}
fsdp_model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_BEFORE,
hsdp_kwargs,
deterministic=True,
)
return fsdp_model
def _init_hsdp_model(
self,
hsdp_sharding_strategy: ShardingStrategy,
sharding_strategy_mode: str,
use_orig_params: bool,
hsdp_process_groups: Optional[
tuple[dist.ProcessGroup, dist.ProcessGroup]
] = None,
hsdp_device_mesh: Optional = None,
):
assert hsdp_process_groups is None or hsdp_device_mesh is None
auto_wrap_policy = ModuleWrapPolicy(
{TransformerEncoderLayer, TransformerDecoderLayer},
)
hsdp_kwargs = {
"device_id": torch.accelerator.current_device_index(),
"auto_wrap_policy": auto_wrap_policy,
"sharding_strategy": hsdp_sharding_strategy,
"use_orig_params": use_orig_params,
"device_mesh": hsdp_device_mesh,
}
if sharding_strategy_mode == ShardingStrategyMode.ALL_HYBRID_SHARD:
hsdp_model = TransformerWithSharedParams.init(
hsdp_process_groups or self.process_group,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_BEFORE,
hsdp_kwargs,
deterministic=True,
)
elif sharding_strategy_mode == ShardingStrategyMode.MIXED_HYBRID_FULL_SHARD:
model = TransformerWithSharedParams.init(
hsdp_process_groups or self.process_group,
FSDPInitMode.NO_FSDP,
DEVICEInitMode.DEVICE_BEFORE,
{},
deterministic=True,
)
# Use the HSDP strategy for the transformer module
model.transformer = FSDP(model.transformer, **hsdp_kwargs)
# Use `FULL_SHARD` for the embedding and output projection
hsdp_model = FSDP(
model,
device_id=torch.accelerator.current_device_index(),
sharding_strategy=ShardingStrategy.FULL_SHARD,
use_orig_params=use_orig_params,
)
return hsdp_model
instantiate_parametrized_tests(TestFSDPHybridShard)
if __name__ == "__main__":
run_tests()
| TestFSDPHybridShard |
python | pytorch__pytorch | torch/_inductor/subgraph_lowering.py | {
"start": 4802,
"end": 7312
} | class ____(WrapperHandler):
def __init__(self, tracer: torch.fx.Tracer, num_inputs: int) -> None:
parent = tracer.create_proxy("placeholder", "ops", (), {})
super().__init__(parent)
self.tracer = tracer
self.placeholders = [
self.tracer.create_proxy("placeholder", f"input{i}", (), {})
for i in range(num_inputs)
]
def placeholder(self, idx: int) -> torch.fx.Proxy:
return self.placeholders[idx]
def output(self, *args: tuple[object]) -> None:
self.tracer.create_node(
"output", "output", (tuple(self.tracer.create_arg(a) for a in args),), {}
)
def lower_pointwise_subgraph(
subgraph: ir.Subgraph, inputs: list[InputDescriptor]
) -> Callable[_P, Any]:
# Lower subgraph to ir.Pointwise nodes
def fake_inner_fn(
loop_idx: int, input_idx: int
) -> Union[ir.Expr, ir.TensorBox, None]:
return ops.placeholder(input_idx)
graph_inputs = [
ir.Pointwise.create(
device=desc.device,
dtype=desc.dtype,
inner_fn=functools.partial(fake_inner_fn, input_idx=i),
ranges=[],
)
for i, desc in enumerate(inputs)
]
gm = subgraph.graph_module
pw_subgraph = PointwiseSubgraphLowering(gm, root_graph_lowering=V.graph)
with V.set_graph_handler(pw_subgraph): # type: ignore[arg-type]
pw_subgraph.run(*graph_inputs)
# Combine multiple pointwise computations into a single graph module
# Do this by tracing through each individually and doing CSE
tracer = torch.fx.Tracer()
tracer.graph = torch.fx.Graph(tracer_cls=tracer.__class__)
trace_ops = SimpleCSEHandler(TracingOpsHandler(tracer, len(inputs)))
assert pw_subgraph.graph_outputs is not None
with V.set_ops_handler(trace_ops):
output_irs = []
for out_var in pw_subgraph.graph_outputs:
assert isinstance(out_var, ir.TensorBox), type(out_var)
assert out_var.get_size() == []
assert isinstance(out_var.data, ir.StorageBox)
assert isinstance(out_var.data.data, ir.Pointwise)
idx = ()
ir_out = out_var.data.data.inner_fn(idx)
output_irs.append(ir_out)
ops.output(*output_irs)
lowered_gm = torch.fx.GraphModule({}, tracer.graph)
def inner_fn(*args: _P.args, **kwargs: _P.kwargs) -> Any:
return lowered_gm(V.get_ops_handler(), *args, **kwargs)
return inner_fn
| TracingOpsHandler |
python | keras-team__keras | keras/src/backend/common/global_state_test.py | {
"start": 137,
"end": 499
} | class ____(test_case.TestCase):
def test_clear_session(self):
name0 = auto_name("somename")
self.assertEqual(name0, "somename")
name1 = auto_name("somename")
self.assertEqual(name1, "somename_1")
global_state.clear_session()
name0 = auto_name("somename")
self.assertEqual(name0, "somename")
| GlobalStateTest |
python | readthedocs__readthedocs.org | readthedocs/builds/forms.py | {
"start": 4885,
"end": 7786
} | class ____(forms.ModelForm):
project = forms.CharField(widget=forms.HiddenInput(), required=False)
match_arg = forms.CharField(
label="Custom match",
help_text=_(
textwrap.dedent(
"""
A regular expression to match the version.
<a href="https://docs.readthedocs.io/page/automation-rules.html#user-defined-matches">
Check the documentation for valid patterns.
</a>
"""
)
),
required=False,
)
class Meta:
model = RegexAutomationRule
fields = [
"project",
"description",
"predefined_match_arg",
"match_arg",
"version_type",
"action",
]
# Don't pollute the UI with help texts
help_texts = {
"version_type": "",
"action": "",
}
labels = {
"predefined_match_arg": "Match",
}
def __init__(self, *args, **kwargs):
self.project = kwargs.pop("project", None)
super().__init__(*args, **kwargs)
# Only list supported types
self.fields["version_type"].choices = [
(None, "-" * 9),
(BRANCH, BRANCH_TEXT),
(TAG, TAG_TEXT),
]
# Remove privacy actions not available in community
if not settings.ALLOW_PRIVATE_REPOS:
invalid_actions = {
VersionAutomationRule.MAKE_VERSION_PUBLIC_ACTION,
VersionAutomationRule.MAKE_VERSION_PRIVATE_ACTION,
}
action_choices = self.fields["action"].choices
self.fields["action"].choices = [
action for action in action_choices if action[0] not in invalid_actions
]
if not self.instance.pk:
self.initial["predefined_match_arg"] = ALL_VERSIONS
# Allow users to start from the pattern of the predefined match
# if they want to use a custom one.
if self.instance.pk and self.instance.predefined_match_arg:
self.initial["match_arg"] = self.instance.get_match_arg()
def clean_match_arg(self):
"""Check that a custom match was given if a predefined match wasn't used."""
match_arg = self.cleaned_data["match_arg"]
predefined_match = self.cleaned_data["predefined_match_arg"]
if predefined_match:
match_arg = ""
if not predefined_match and not match_arg:
raise forms.ValidationError(
_("Custom match should not be empty."),
)
try:
re.compile(match_arg)
except Exception:
raise forms.ValidationError(
_("Invalid Python regular expression."),
)
return match_arg
def clean_project(self):
return self.project
| RegexAutomationRuleForm |
python | FactoryBoy__factory_boy | tests/utils.py | {
"start": 349,
"end": 996
} | class ____:
"""An abstract context processor for patching multiple modules."""
def __init__(self, *target_modules, **kwargs):
super().__init__(**kwargs)
self.patchers = [self._build_patcher(mod) for mod in target_modules]
def _build_patcher(self, target_module): # pragma: no cover
"""Build a mock patcher for the target module."""
raise NotImplementedError()
def __enter__(self):
for patcher in self.patchers:
patcher.start()
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for patcher in self.patchers:
patcher.stop()
| MultiModulePatcher |
python | great-expectations__great_expectations | great_expectations/render/components.py | {
"start": 35110,
"end": 36182
} | class ____(Schema):
name = fields.String(required=False, allow_none=True)
value = fields.Nested(RenderedAtomicValueSchema(), required=True, allow_none=False)
value_type = fields.String(required=True, allow_none=False)
exception = fields.String(required=False, allow_none=True)
REMOVE_KEYS_IF_NONE: Final[tuple[str, ...]] = ("exception",)
@post_load
def make_rendered_atomic_content(self, data, **kwargs):
return RenderedAtomicContent(**data)
@post_dump
def clean_null_attrs(self, data: dict, **kwargs: dict) -> dict:
"""Removes the attributes in RenderedAtomicContentSchema.REMOVE_KEYS_IF_NONE during serialization if
their values are None.""" # noqa: E501 # FIXME CoP
data = deepcopy(data)
for key in RenderedAtomicContentSchema.REMOVE_KEYS_IF_NONE:
if key in data and data[key] is None:
data.pop(key)
return data
renderedAtomicContentSchema = RenderedAtomicContentSchema()
renderedAtomicValueSchema = RenderedAtomicValueSchema()
| RenderedAtomicContentSchema |
python | davidhalter__jedi | test/completion/lambdas.py | {
"start": 836,
"end": 1408
} | class ____():
def __init__(self, foo=1.0):
self.a = lambda: 1
self.foo = foo
def ret(self):
return lambda: self.foo
def with_param(self):
return lambda x: x + self.a()
lambd = lambda self: self.foo
#? int()
C().a()
#? str()
C('foo').ret()()
index = C().with_param()(1)
#? float()
['', 1, 1.0][index]
#? float()
C().lambd()
#? int()
C(1).lambd()
def xy(param):
def ret(a, b):
return a + b
return lambda b: ret(param, b)
#? int()
xy(1)(2)
# -----------------
# lambda param (#379)
# -----------------
| C |
python | sphinx-doc__sphinx | sphinx/util/logging.py | {
"start": 11532,
"end": 12608
} | class ____(logging.Filter):
"""Raise exception if a warning is emitted."""
def filter(self, record: logging.LogRecord) -> NoReturn:
try:
message = record.msg % record.args
except (TypeError, ValueError):
message = record.msg # use record.msg itself
if location := getattr(record, 'location', ''):
message = f'{location}:{message}'
if record.exc_info is not None:
raise SphinxWarning(message) from record.exc_info[1]
raise SphinxWarning(message)
def is_suppressed_warning(
warning_type: str, sub_type: str, suppress_warnings: Set[str] | Sequence[str]
) -> bool:
"""Check whether the warning is suppressed or not."""
if warning_type is None or len(suppress_warnings) == 0:
return False
suppressed_warnings = frozenset(suppress_warnings)
if warning_type in suppressed_warnings:
return True
if f'{warning_type}.*' in suppressed_warnings:
return True
return f'{warning_type}.{sub_type}' in suppressed_warnings
| _RaiseOnWarningFilter |
python | sphinx-doc__sphinx | sphinx/util/cfamily.py | {
"start": 6047,
"end": 6624
} | class ____(ASTAttribute):
"""For simple attributes defined by the user."""
def __init__(self, id: str) -> None:
self.id = id
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTIdAttribute):
return NotImplemented
return self.id == other.id
def __hash__(self) -> int:
return hash(self.id)
def _stringify(self, transform: StringifyTransform) -> str:
return self.id
def describe_signature(self, signode: TextElement) -> None:
signode.append(nodes.Text(self.id))
| ASTIdAttribute |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/width_100.py | {
"start": 119,
"end": 735
} | class ____(App[None]):
CSS = """
Vertical {
border: solid red;
width: auto;
Label {
border: solid green;
}
#first {
width: 100%;
}
#second {
width: auto;
}
}
"""
def compose(self) -> ComposeResult:
with Vertical():
yield Label("I want to be 100% of my parent", id="first")
yield Label(
"I want my parent to be wide enough to wrap me and no more", id="second"
)
if __name__ == "__main__":
Width100PCentApp().run()
| Width100PCentApp |
python | numba__numba | numba/tests/test_types.py | {
"start": 27816,
"end": 31176
} | class ____(TestCase):
def test_omitted_type(self):
# issue https://github.com/numba/numba/issues/5471
def inner(a):
pass
@overload(inner)
def inner_overload(a):
if not isinstance(a, types.Literal):
return
return lambda a: a
@njit
def my_func(a='a'):
return inner(a)
@njit
def f():
return my_func()
@njit
def g():
return my_func('b')
self.assertEqual(f(), 'a')
self.assertEqual(g(), 'b')
def test_type_of_literal(self):
# type(val) where val is a literal should not give a literal type.
def inner(a):
pass
@overload(inner)
def inner_overload(a):
if not isinstance(a, types.Literal):
return
self.assertIsInstance(a, types.Literal)
# NOTE: using 1.23 to ensure that the result is indeed an int.
return lambda a: type(a)(a + 1.23)
@njit
def my_func(a=1):
return inner(a)
@njit
def f():
return my_func()
@njit
def g():
return my_func(100)
self.assertEqual(f(), 2)
self.assertEqual(g(), 101)
def test_issue_typeref_key(self):
# issue https://github.com/numba/numba/issues/6336
class NoUniqueNameType(types.Dummy):
def __init__(self, param):
super(NoUniqueNameType, self).__init__('NoUniqueNameType')
self.param = param
@property
def key(self):
return self.param
no_unique_name_type_1 = NoUniqueNameType(1)
no_unique_name_type_2 = NoUniqueNameType(2)
for ty1 in (no_unique_name_type_1, no_unique_name_type_2):
for ty2 in (no_unique_name_type_1, no_unique_name_type_2):
self.assertIs(
types.TypeRef(ty1) == types.TypeRef(ty2), ty1 == ty2)
def test_issue_list_type_key(self):
# https://github.com/numba/numba/issues/6397
class NoUniqueNameType(types.Dummy):
def __init__(self, param):
super(NoUniqueNameType, self).__init__('NoUniqueNameType')
self.param = param
@property
def key(self):
return self.param
no_unique_name_type_1 = NoUniqueNameType(1)
no_unique_name_type_2 = NoUniqueNameType(2)
for ty1 in (no_unique_name_type_1, no_unique_name_type_2):
for ty2 in (no_unique_name_type_1, no_unique_name_type_2):
self.assertIs(
types.ListType(ty1) == types.ListType(ty2), # noqa: E721
ty1 == ty2
)
def test_int_enum_no_conversion(self):
# Ensures that IntEnumMember.can_convert_to() handles the case when the
# typing context's can_convert() method returns None to signal no
# possible conversion
ctx = typing.Context()
int_enum_type = types.IntEnumMember(Shape, types.int64)
# Conversion of an int enum member to a 1D array would be invalid
invalid_toty = types.int64[::1]
self.assertIsNone(ctx.can_convert(int_enum_type, invalid_toty))
if __name__ == '__main__':
unittest.main()
| TestIssues |
python | gevent__gevent | src/gevent/tests/test__threading_2.py | {
"start": 23219,
"end": 23401
} | class ____(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
@skipDueToHang
| ConditionAsRLockTests |
python | kamyu104__LeetCode-Solutions | Python/largest-color-value-in-a-directed-graph.py | {
"start": 37,
"end": 1114
} | class ____(object):
def largestPathValue(self, colors, edges):
"""
:type colors: str
:type edges: List[List[int]]
:rtype: int
"""
adj = [[] for _ in xrange(len(colors))]
in_degree = [0]*len(colors)
for u, v in edges:
adj[u].append(v)
in_degree[v] += 1
q = []
for u in xrange(len(colors)):
if not in_degree[u]:
q.append(u)
dp = [[0]*26 for _ in xrange(len(colors))]
result, cnt = -1, 0
while q:
new_q = []
for u in q:
cnt += 1
dp[u][ord(colors[u])-ord('a')] += 1
result = max(result, dp[u][ord(colors[u])-ord('a')])
for v in adj[u]:
for c in xrange(26):
dp[v][c] = max(dp[v][c], dp[u][c])
in_degree[v] -= 1
if not in_degree[v]:
new_q.append(v)
q = new_q
return result if cnt == len(colors) else -1
| Solution |
python | pypa__hatch | tests/backend/builders/test_config.py | {
"start": 4412,
"end": 6211
} | class ____:
def test_default(self, isolation):
builder = MockBuilder(str(isolation))
assert builder.config.ignore_vcs is builder.config.ignore_vcs is False
def test_target(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"foo": {"ignore-vcs": True}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert builder.config.ignore_vcs is True
def test_target_not_boolean(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"foo": {"ignore-vcs": 9000}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(TypeError, match="Field `tool.hatch.build.targets.foo.ignore-vcs` must be a boolean"):
_ = builder.config.ignore_vcs
def test_global(self, isolation):
config = {"tool": {"hatch": {"build": {"ignore-vcs": True}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert builder.config.ignore_vcs is True
def test_global_not_boolean(self, isolation):
config = {"tool": {"hatch": {"build": {"ignore-vcs": 9000}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(TypeError, match="Field `tool.hatch.build.ignore-vcs` must be a boolean"):
_ = builder.config.ignore_vcs
def test_target_overrides_global(self, isolation):
config = {"tool": {"hatch": {"build": {"ignore-vcs": True, "targets": {"foo": {"ignore-vcs": False}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert builder.config.ignore_vcs is False
| TestIgnoreVCS |
python | tiangolo__fastapi | tests/test_include_router_defaults_overrides.py | {
"start": 334,
"end": 413
} | class ____(JSONResponse):
media_type = "application/x-level-2"
| ResponseLevel2 |
python | joke2k__faker | tests/providers/test_color.py | {
"start": 17839,
"end": 18169
} | class ____:
"""Test sk_SK color provider methods"""
def test_safe_color_name(self, faker, num_samples):
for _ in range(num_samples):
safe_color_name = faker.safe_color_name()
assert isinstance(safe_color_name, str)
assert safe_color_name in SkSkColorProvider.safe_colors
| TestSkSk |
python | pytorch__pytorch | torch/_export/serde/schema.py | {
"start": 1358,
"end": 1730
} | class ____(_Union):
as_int: Annotated[int, 10]
as_bool: Annotated[bool, 20]
as_float: Annotated[float, 30]
# This is for storing the symbolic expressions behind symints/symfloats/symbools
# For example, we can get something like
# SymExpr(expr_str="s0 + s1", hint=SymExprHint(as_int=4)
# if we also have the hint that s0 and s1 are both 2.
@dataclass
| SymExprHint |
python | python__mypy | mypyc/analysis/ircheck.py | {
"start": 1267,
"end": 2601
} | class ____:
def __init__(self, source: Op | BasicBlock, desc: str) -> None:
self.source = source
self.desc = desc
def __eq__(self, other: object) -> bool:
return (
isinstance(other, FnError) and self.source == other.source and self.desc == other.desc
)
def __repr__(self) -> str:
return f"FnError(source={self.source}, desc={self.desc})"
def check_func_ir(fn: FuncIR) -> list[FnError]:
"""Applies validations to a given function ir and returns a list of errors found."""
errors = []
op_set = set()
for block in fn.blocks:
if not block.terminated:
errors.append(
FnError(source=block.ops[-1] if block.ops else block, desc="Block not terminated")
)
for op in block.ops[:-1]:
if isinstance(op, ControlOp):
errors.append(FnError(source=op, desc="Block has operations after control op"))
if op in op_set:
errors.append(FnError(source=op, desc="Func has a duplicate op"))
op_set.add(op)
errors.extend(check_op_sources_valid(fn))
if errors:
return errors
op_checker = OpChecker(fn)
for block in fn.blocks:
for op in block.ops:
op.accept(op_checker)
return op_checker.errors
| FnError |
python | Netflix__metaflow | metaflow/plugins/argo/argo_workflows_cli.py | {
"start": 2019,
"end": 52990
} | class ____(MetaflowException):
headline = "Unsupported version of Python"
@click.group()
def cli():
pass
@cli.group(help="Commands related to Argo Workflows.")
@click.option(
"--name",
default=None,
type=str,
help="Argo Workflow name. The flow name is used instead if "
"this option is not specified.",
)
@click.pass_obj
def argo_workflows(obj, name=None):
check_python_version(obj)
obj.check(obj.graph, obj.flow, obj.environment, pylint=obj.pylint)
(
obj.workflow_name,
obj.token_prefix,
obj.is_project,
obj._is_workflow_name_modified,
obj._exception_on_create, # exception_on_create is used to prevent deploying new flows with too long names via --name
) = resolve_workflow_name_v2(obj, name)
# Backward compatibility for Metaflow versions <=2.16 because of
# change in name length restrictions in Argo Workflows from 253 to 52
# characters.
(
obj._v1_workflow_name,
obj._v1_is_workflow_name_modified,
) = resolve_workflow_name_v1(obj, name)
@argo_workflows.command(help="Deploy a new version of this workflow to Argo Workflows.")
@click.option(
"--authorize",
default=None,
help="Authorize using this production token. You need this "
"when you are re-deploying an existing flow for the first "
"time. The token is cached in METAFLOW_HOME, so you only "
"need to specify this once.",
)
@click.option(
"--generate-new-token",
is_flag=True,
help="Generate a new production token for this flow. "
"This will move the production flow to a new namespace.",
)
@click.option(
"--new-token",
"given_token",
default=None,
help="Use the given production token for this flow. "
"This will move the production flow to the given namespace.",
)
@click.option(
"--tag",
"tags",
multiple=True,
default=None,
help="Annotate all objects produced by Argo Workflows runs "
"with the given tag. You can specify this option multiple "
"times to attach multiple tags.",
)
@click.option(
"--namespace",
"user_namespace",
default=None,
help="Change the namespace from the default (production token) "
"to the given tag. See run --help for more information.",
)
@click.option(
"--only-json",
is_flag=True,
default=False,
help="Only print out JSON sent to Argo Workflows. Do not deploy anything.",
hidden=True,
)
@click.option(
"--max-workers",
default=100,
show_default=True,
help="Maximum number of parallel processes.",
)
@click.option(
"--workflow-timeout", default=None, type=int, help="Workflow timeout in seconds."
)
@click.option(
"--workflow-priority",
default=None,
type=int,
help="Workflow priority as an integer. Workflows with higher priority "
"are processed first if Argo Workflows controller is configured to process "
"limited number of workflows in parallel",
)
@click.option(
"--auto-emit-argo-events/--no-auto-emit-argo-events",
default=True, # TODO: Default to a value from config
show_default=True,
help="Auto emits Argo Events when the run completes successfully.",
)
@click.option(
"--notify-on-error/--no-notify-on-error",
default=False,
show_default=True,
help="Notify if the workflow fails.",
)
@click.option(
"--notify-on-success/--no-notify-on-success",
default=False,
show_default=True,
help="Notify if the workflow succeeds.",
)
@click.option(
"--notify-slack-webhook-url",
default=None,
help="Slack incoming webhook url for workflow success/failure notifications.",
)
@click.option(
"--notify-pager-duty-integration-key",
default=None,
help="PagerDuty Events API V2 Integration key for workflow success/failure notifications.",
)
@click.option(
"--notify-incident-io-api-key",
default=None,
help="Incident.io API V2 key for workflow success/failure notifications.",
)
@click.option(
"--incident-io-alert-source-config-id",
default=None,
help="Incident.io Alert source config ID. Example '01GW2G3V0S59R238FAHPDS1R66'",
)
@click.option(
"--incident-io-metadata",
default=None,
type=str,
multiple=True,
help="Incident.io Alert Custom Metadata field in the form of Key=Value",
)
@click.option(
"--enable-heartbeat-daemon/--no-enable-heartbeat-daemon",
default=False,
show_default=True,
help="Use a daemon container to broadcast heartbeats.",
)
@click.option(
"--deployer-attribute-file",
default=None,
show_default=True,
type=str,
help="Write the workflow name to the file specified. Used internally for Metaflow's Deployer API.",
hidden=True,
)
@click.option(
"--enable-error-msg-capture/--no-enable-error-msg-capture",
default=True,
show_default=True,
help="Capture stack trace of first failed task in exit hook.",
)
@click.option(
"--workflow-title",
default=None,
type=str,
help="Custom title for the workflow displayed in Argo Workflows UI. Defaults to `project_flow_name`. Supports markdown formatting.",
)
@click.option(
"--workflow-description",
default=None,
type=str,
help="Custom description for the workflow displayed in Argo Workflows UI. Defaults to the flow's docstring if available. Supports markdown formatting and multi-line text.",
)
@click.pass_obj
def create(
obj,
tags=None,
user_namespace=None,
only_json=False,
authorize=None,
generate_new_token=False,
given_token=None,
max_workers=None,
workflow_timeout=None,
workflow_priority=None,
auto_emit_argo_events=False,
notify_on_error=False,
notify_on_success=False,
notify_slack_webhook_url=None,
notify_pager_duty_integration_key=None,
notify_incident_io_api_key=None,
incident_io_alert_source_config_id=None,
incident_io_metadata=None,
enable_heartbeat_daemon=True,
workflow_title=None,
workflow_description=None,
deployer_attribute_file=None,
enable_error_msg_capture=False,
):
# check if we are supposed to block deploying the flow due to name length constraints.
if obj._exception_on_create is not None:
raise obj._exception_on_create
# TODO: Remove this once we have a proper validator system in place
for node in obj.graph:
for decorator, error_message in unsupported_decorators.items():
if any([d.name == decorator for d in node.decorators]):
raise MetaflowException(error_message % node.name)
validate_tags(tags)
if deployer_attribute_file:
with open(deployer_attribute_file, "w", encoding="utf-8") as f:
json.dump(
{
"name": obj.workflow_name,
"flow_name": obj.flow.name,
"metadata": obj.metadata.metadata_str(),
},
f,
)
obj.echo("Deploying *%s* to Argo Workflows..." % obj.flow.name, bold=True)
if SERVICE_VERSION_CHECK:
# TODO: Consider dispelling with this check since it's been 2 years since the
# needed metadata service changes have been available in open-source. It's
# likely that Metaflow users may not have access to metadata service from
# within their workstations.
check_metadata_service_version(obj)
token = resolve_token(
obj.workflow_name,
obj.token_prefix,
obj,
authorize,
given_token,
generate_new_token,
obj.is_project,
)
flow = make_flow(
obj,
token,
obj.workflow_name,
tags,
user_namespace,
max_workers,
workflow_timeout,
workflow_priority,
auto_emit_argo_events,
notify_on_error,
notify_on_success,
notify_slack_webhook_url,
notify_pager_duty_integration_key,
notify_incident_io_api_key,
incident_io_alert_source_config_id,
incident_io_metadata,
enable_heartbeat_daemon,
enable_error_msg_capture,
workflow_title,
workflow_description,
)
if only_json:
obj.echo_always(str(flow), err=False, no_bold=True)
# TODO: Support echo-ing Argo Events Sensor template
else:
flow.deploy()
obj.echo(
"Workflow *{workflow_name}* "
"for flow *{name}* deployed to "
"Argo Workflows successfully.\n".format(
workflow_name=obj.workflow_name, name=current.flow_name
),
bold=True,
)
if obj._is_workflow_name_modified:
obj.echo(
"Note that the flow was deployed with a modified name "
"due to Kubernetes naming conventions on Argo Workflows. The "
"original flow name is stored in the workflow annotations.\n",
wrap=True,
)
if obj.workflow_name != obj._v1_workflow_name:
# Delete the old workflow if it exists
try:
ArgoWorkflows.delete(obj._v1_workflow_name)
obj.echo("Important!", bold=True, nl=False)
obj.echo(
" To comply with new naming restrictions on Argo "
"Workflows, this deployment replaced the previously "
"deployed workflow {v1_workflow_name}.\n".format(
v1_workflow_name=obj._v1_workflow_name
),
wrap=True,
)
except ArgoWorkflowsException as e:
# TODO: Catch a more specific exception
pass
obj.echo("Warning! ", bold=True, nl=False)
obj.echo(
"Due to new naming restrictions on Argo Workflows, "
"re-deploying this flow with older versions of Metaflow (<{version}) "
"will result in the flow being deployed with a different name -\n"
"*{v1_workflow_name}* without replacing the version you just deployed. "
"This may result in duplicate executions of this flow. To avoid this issue, "
"always deploy this flow using Metaflow ≥{version} or specify the flow name with --name.".format(
v1_workflow_name=obj._v1_workflow_name,
version=NEW_ARGO_NAMELENGTH_METAFLOW_VERSION,
),
wrap=True,
)
if ARGO_WORKFLOWS_UI_URL:
obj.echo("See the deployed workflow here:", bold=True)
argo_workflowtemplate_link = "%s/workflow-templates/%s" % (
ARGO_WORKFLOWS_UI_URL.rstrip("/"),
KUBERNETES_NAMESPACE,
)
obj.echo(
"%s/%s\n\n" % (argo_workflowtemplate_link, obj.workflow_name),
indent=True,
)
flow.schedule()
obj.echo("What will trigger execution of the workflow:", bold=True)
obj.echo(flow.trigger_explanation(), indent=True)
# TODO: Print events emitted by execution of this flow
# response = ArgoWorkflows.trigger(obj.workflow_name)
# run_id = "argo-" + response["metadata"]["name"]
# obj.echo(
# "Workflow *{name}* triggered on Argo Workflows "
# "(run-id *{run_id}*).".format(name=obj.workflow_name, run_id=run_id),
# bold=True,
# )
def check_python_version(obj):
# argo-workflows integration for Metaflow isn't supported for Py versions below 3.6.
# This constraint can very well be lifted if desired.
if sys.version_info < (3, 6):
obj.echo("")
obj.echo(
"Metaflow doesn't support Argo Workflows for Python %s right now."
% platform.python_version()
)
obj.echo(
"Please upgrade your Python interpreter to version 3.6 (or higher) or "
"reach out to us at slack.outerbounds.co for more help."
)
raise UnsupportedPythonVersion(
"Try again with a more recent version of Python (>=3.6)."
)
def check_metadata_service_version(obj):
metadata = obj.metadata
version = metadata.version()
if version == "local":
return
elif version is not None and version_parse(version) >= version_parse("2.0.2"):
# Metaflow metadata service needs to be at least at version 2.0.2
# since prior versions did not support strings as object ids.
return
else:
obj.echo("")
obj.echo(
"You are running a version of the metaflow service that currently doesn't "
"support Argo Workflows. "
)
obj.echo(
"For more information on how to upgrade your service to a compatible "
"version (>= 2.0.2), visit:"
)
obj.echo(
" https://admin-docs.metaflow.org/metaflow-on-aws/operation"
"s-guide/metaflow-service-migration-guide",
fg="green",
)
obj.echo(
"Once you have upgraded your metadata service, please re-execute your "
"command."
)
raise IncorrectMetadataServiceVersion(
"Try again with a more recent version of metaflow service (>=2.0.2)."
)
# Argo Workflows has a few restrictions on workflow names:
# - Argo Workflow Template names can't be longer than 253 characters since
# they follow DNS Subdomain name restrictions.
# - Argo Workflows stores workflow template names as a label in the workflow
# template metadata - workflows.argoproj.io/workflow-template, which follows
# RFC 1123, which is a strict subset of DNS Subdomain names and allows for
# 63 characters.
# - Argo Workflows appends a unix timestamp to the workflow name when the workflow
# is created (-1243856725) from a workflow template deployed as a cron workflow template
# reducing the number of characters available to 52.
# - TODO: Check naming restrictions for Argo Events.
# In summary -
# - We truncate the workflow name to 45 characters to leave enough room for future
# enhancements to the Argo Workflows integration.
# - We remove any underscores since Argo Workflows doesn't allow them.
# - We convert the name to lower case.
# - We remove + and @ as not allowed characters, which can be part of the
# project branch due to using email addresses as user names.
# - We append a hash of the workflow name to the end to make it unique.
# A complication here is that in previous versions of Metaflow (=<2.16), the limit was a
# rather lax 253 characters - so we have two issues to contend with:
# 1. Replacing any equivalent flows deployed using previous versions of Metaflow which
# adds a bit of complexity to the business logic.
# 2. Breaking Metaflow users who have multiple versions of Metaflow floating in their
# organization. Imagine a scenario, where metaflow-v1 (253 chars) deploys the same
# flow which was previously deployed using the new metaflow-v2 (45 chars) - the user
# will end up with two workflows templates instead of one since metaflow-v1 has no
# awareness of the new name truncation logic introduced by metaflow-v2. Unfortunately,
# there is no way to avoid this scenario - so we will do our best to message to the
# user to not use an older version of Metaflow to redeploy affected flows.
# ------------------------------------------------------------------------------------------
# | metaflow-v1 (253 chars) | metaflow-v2 (45 chars) | Result |
# ------------------------------------------------------------------------------------------
# | workflow_name_modified = True | workflow_name_modified = False | Not possible |
# ------------------------------------------------------------------------------------------
# | workflow_name_modified = False | workflow_name_modified = True | Messaging needed |
# ------------------------------------------------------------------------------------------
# | workflow_name_modified = False | workflow_name_modified = False | No message needed |
# ------------------------------------------------------------------------------------------
# | workflow_name_modified = True | workflow_name_modified = True | Messaging needed |
# ------------------------------------------------------------------------------------------
def resolve_workflow_name_v1(obj, name):
# models the workflow_name calculation logic in Metaflow versions =<2.16
# important!! - should stay static including any future bugs
project = current.get("project_name")
is_workflow_name_modified = False
if project:
if name:
return None, False # not possible in versions =<2.16
workflow_name = current.project_flow_name
if len(workflow_name) > 253:
name_hash = to_unicode(
base64.b32encode(sha1(to_bytes(workflow_name)).digest())
)[:8].lower()
workflow_name = "%s-%s" % (workflow_name[:242], name_hash)
is_workflow_name_modified = True
if not VALID_NAME.search(workflow_name):
workflow_name = (
re.compile(r"^[^A-Za-z0-9]+")
.sub("", workflow_name)
.replace("_", "")
.replace("@", "")
.replace("+", "")
.lower()
)
is_workflow_name_modified = True
else:
if name and not VALID_NAME.search(name):
return None, False # not possible in versions =<2.16
workflow_name = name if name else current.flow_name
if len(workflow_name) > 253:
return None, False # not possible in versions =<2.16
if not VALID_NAME.search(workflow_name):
# Note - since the original name sanitization was a surjective
# mapping, using it here is a bug, but we leave this in
# place since the usage of v1_workflow_name is to generate
# historical workflow names, so we need to replicate all
# the bugs too :'(
workflow_name = (
re.compile(r"^[^A-Za-z0-9]+")
.sub("", workflow_name)
.replace("_", "")
.replace("@", "")
.replace("+", "")
.lower()
)
is_workflow_name_modified = True
return workflow_name, is_workflow_name_modified
def resolve_workflow_name_v2(obj, name):
# current logic for imputing workflow_name
limit = 45
project = current.get("project_name")
is_workflow_name_modified = False
exception_on_create = None
if project:
if name:
raise MetaflowException(
"--name is not supported for @projects. Use --branch instead."
)
workflow_name = current.project_flow_name
project_branch = to_bytes(".".join((project, current.branch_name)))
token_prefix = (
"mfprj-%s"
% to_unicode(base64.b32encode(sha1(project_branch).digest()))[:16]
)
is_project = True
if len(workflow_name) > limit:
name_hash = to_unicode(
base64.b32encode(sha1(to_bytes(workflow_name)).digest())
)[:5].lower()
# Generate a meaningful short name
project_name = project
branch_name = current.branch_name
flow_name = current.flow_name
parts = [project_name, branch_name, flow_name]
max_name_len = limit - 6
min_each = 7
total_len = sum(len(p) for p in parts)
remaining = max_name_len - 3 * min_each
extras = [int(remaining * len(p) / total_len) for p in parts]
while sum(extras) < remaining:
extras[extras.index(min(extras))] += 1
budgets = [min_each + e for e in extras]
proj_budget = budgets[0]
if len(project_name) <= proj_budget:
proj_str = project_name
else:
h = proj_budget // 2
t = proj_budget - h
proj_str = project_name[:h] + project_name[-t:]
branch_budget = budgets[1]
branch_str = branch_name[:branch_budget]
flow_budget = budgets[2]
if len(flow_name) <= flow_budget:
flow_str = flow_name
else:
h = flow_budget // 2
t = flow_budget - h
flow_str = flow_name[:h] + flow_name[-t:]
descriptive_name = sanitize_for_argo(
"%s.%s.%s" % (proj_str, branch_str, flow_str)
)
workflow_name = "%s-%s" % (descriptive_name, name_hash)
is_workflow_name_modified = True
else:
if name and not VALID_NAME.search(name):
raise MetaflowException(
"Name '%s' contains invalid characters. The "
"name must consist of lower case alphanumeric characters, '-' or '.'"
", and must start with an alphabetic character, "
"and end with an alphanumeric character." % name
)
workflow_name = name if name else current.flow_name
token_prefix = workflow_name
is_project = False
if len(workflow_name) > limit:
# NOTE: We could have opted for truncating names specified by --name and flow_name
# as well, but chose to error instead due to the expectation that users would
# be intentionally explicit in their naming, and truncating these would lose
# information they intended to encode in the deployment.
exception_on_create = ArgoWorkflowsNameTooLong(
"The full name of the workflow:\n*%s*\nis longer than %s "
"characters.\n\n"
"To deploy this workflow to Argo Workflows, please "
"assign a shorter name\nusing the option\n"
"*argo-workflows --name <name> create*." % (name, limit)
)
if not VALID_NAME.search(workflow_name):
# NOTE: Even though sanitize_for_argo is surjective which can result in collisions,
# we still use it here since production tokens guard against name collisions
# and if we made it injective, metaflow 2.17 will result in every deployed
# flow's name changing, significantly increasing the blast radius of the change.
workflow_name = sanitize_for_argo(workflow_name)
is_workflow_name_modified = True
return (
workflow_name,
token_prefix.lower(),
is_project,
is_workflow_name_modified,
exception_on_create,
)
def make_flow(
obj,
token,
name,
tags,
namespace,
max_workers,
workflow_timeout,
workflow_priority,
auto_emit_argo_events,
notify_on_error,
notify_on_success,
notify_slack_webhook_url,
notify_pager_duty_integration_key,
notify_incident_io_api_key,
incident_io_alert_source_config_id,
incident_io_metadata,
enable_heartbeat_daemon,
enable_error_msg_capture,
workflow_title,
workflow_description,
):
# TODO: Make this check less specific to Amazon S3 as we introduce
# support for more cloud object stores.
if obj.flow_datastore.TYPE not in ("azure", "gs", "s3"):
raise MetaflowException(
"Argo Workflows requires --datastore=s3 or --datastore=azure or --datastore=gs"
)
if (notify_on_error or notify_on_success) and not (
notify_slack_webhook_url
or notify_pager_duty_integration_key
or notify_incident_io_api_key
):
raise MetaflowException(
"Notifications require specifying an incoming Slack webhook url via --notify-slack-webhook-url, PagerDuty events v2 integration key via --notify-pager-duty-integration-key or\n"
"Incident.io integration API key via --notify-incident-io-api-key.\n"
" If you would like to set up notifications for your Slack workspace, follow the instructions at "
"https://api.slack.com/messaging/webhooks to generate a webhook url.\n"
" For notifications through PagerDuty, generate an integration key by following the instructions at "
"https://support.pagerduty.com/docs/services-and-integrations#create-a-generic-events-api-integration\n"
" For notifications through Incident.io, generate an alert source config."
)
if (
(notify_on_error or notify_on_success)
and notify_incident_io_api_key
and incident_io_alert_source_config_id is None
):
raise MetaflowException(
"Incident.io alerts require an alert source configuration ID. Please set one with --incident-io-alert-source-config-id"
)
# Attach @kubernetes and @environment decorator to the flow to
# ensure that the related decorator hooks are invoked.
decorators._attach_decorators(
obj.flow, [KubernetesDecorator.name, EnvironmentDecorator.name]
)
decorators._init(obj.flow)
decorators._init_step_decorators(
obj.flow, obj.graph, obj.environment, obj.flow_datastore, obj.logger
)
obj.graph = obj.flow._graph
# Save the code package in the flow datastore so that both user code and
# metaflow package can be retrieved during workflow execution.
obj.package = MetaflowPackage(
obj.flow,
obj.environment,
obj.echo,
suffixes=obj.package_suffixes,
flow_datastore=obj.flow_datastore if FEAT_ALWAYS_UPLOAD_CODE_PACKAGE else None,
)
# This blocks until the package is created
if FEAT_ALWAYS_UPLOAD_CODE_PACKAGE:
package_url = obj.package.package_url()
package_sha = obj.package.package_sha()
else:
package_url, package_sha = obj.flow_datastore.save_data(
[obj.package.blob], len_hint=1
)[0]
return ArgoWorkflows(
name,
obj.graph,
obj.flow,
obj.package.package_metadata,
package_sha,
package_url,
token,
obj.metadata,
obj.flow_datastore,
obj.environment,
obj.event_logger,
obj.monitor,
tags=tags,
namespace=namespace,
max_workers=max_workers,
username=get_username(),
workflow_timeout=workflow_timeout,
workflow_priority=workflow_priority,
auto_emit_argo_events=auto_emit_argo_events,
notify_on_error=notify_on_error,
notify_on_success=notify_on_success,
notify_slack_webhook_url=notify_slack_webhook_url,
notify_pager_duty_integration_key=notify_pager_duty_integration_key,
notify_incident_io_api_key=notify_incident_io_api_key,
incident_io_alert_source_config_id=incident_io_alert_source_config_id,
incident_io_metadata=incident_io_metadata,
enable_heartbeat_daemon=enable_heartbeat_daemon,
enable_error_msg_capture=enable_error_msg_capture,
workflow_title=workflow_title,
workflow_description=workflow_description,
)
# TODO: Unify this method with the one in step_functions_cli.py
def resolve_token(
name, token_prefix, obj, authorize, given_token, generate_new_token, is_project
):
# 1) retrieve the previous deployment, if one exists
workflow = ArgoWorkflows.get_existing_deployment(name)
if workflow is None:
obj.echo(
"It seems this is the first time you are deploying *%s* to "
"Argo Workflows." % name
)
prev_token = None
else:
prev_user, prev_token = workflow
# 2) authorize this deployment
if prev_token is not None:
if authorize is None:
authorize = load_token(token_prefix)
elif authorize.startswith("production:"):
authorize = authorize[11:]
# we allow the user who deployed the previous version to re-deploy,
# even if they don't have the token
if prev_user != get_username() and authorize != prev_token:
obj.echo(
"There is an existing version of *%s* on Argo Workflows which was "
"deployed by the user *%s*." % (name, prev_user)
)
obj.echo(
"To deploy a new version of this flow, you need to use the same "
"production token that they used. "
)
obj.echo(
"Please reach out to them to get the token. Once you have it, call "
"this command:"
)
obj.echo(" argo-workflows create --authorize MY_TOKEN", fg="green")
obj.echo(
'See "Organizing Results" at docs.metaflow.org for more information '
"about production tokens."
)
raise IncorrectProductionToken(
"Try again with the correct production token."
)
# 3) do we need a new token or should we use the existing token?
if given_token:
if is_project:
# we rely on a known prefix for @project tokens, so we can't
# allow the user to specify a custom token with an arbitrary prefix
raise MetaflowException(
"--new-token is not supported for @projects. Use --generate-new-token "
"to create a new token."
)
if given_token.startswith("production:"):
given_token = given_token[11:]
token = given_token
obj.echo("")
obj.echo("Using the given token, *%s*." % token)
elif prev_token is None or generate_new_token:
token = new_token(token_prefix, prev_token)
if token is None:
if prev_token is None:
raise MetaflowInternalError(
"We could not generate a new token. This is unexpected. "
)
else:
raise MetaflowException(
"--generate-new-token option is not supported after using "
"--new-token. Use --new-token to make a new namespace."
)
obj.echo("")
obj.echo("A new production token generated.")
else:
token = prev_token
obj.echo("")
obj.echo("The namespace of this production flow is")
obj.echo(" production:%s" % token, fg="green")
obj.echo(
"To analyze results of this production flow add this line in your notebooks:"
)
obj.echo(' namespace("production:%s")' % token, fg="green")
obj.echo(
"If you want to authorize other people to deploy new versions of this flow to "
"Argo Workflows, they need to call"
)
obj.echo(" argo-workflows create --authorize %s" % token, fg="green")
obj.echo("when deploying this flow to Argo Workflows for the first time.")
obj.echo(
'See "Organizing Results" at https://docs.metaflow.org/ for more '
"information about production tokens."
)
obj.echo("")
store_token(token_prefix, token)
return token
@parameters.add_custom_parameters(deploy_mode=False)
@argo_workflows.command(help="Trigger the workflow on Argo Workflows.")
@click.option(
"--run-id-file",
default=None,
show_default=True,
type=str,
help="Write the ID of this run to the file specified.",
)
@click.option(
"--deployer-attribute-file",
default=None,
show_default=True,
type=str,
help="Write the metadata and pathspec of this run to the file specified.\nUsed internally for Metaflow's Deployer API.",
hidden=True,
)
@click.pass_obj
def trigger(obj, run_id_file=None, deployer_attribute_file=None, **kwargs):
def _convert_value(param):
# Swap `-` with `_` in parameter name to match click's behavior
val = kwargs.get(param.name.replace("-", "_").lower())
if param.kwargs.get("type") == JSONType:
val = json.dumps(val)
elif isinstance(val, parameters.DelayedEvaluationParameter):
val = val(return_str=True)
return val
params = {
param.name: _convert_value(param)
for _, param in obj.flow._get_parameters()
if kwargs.get(param.name.replace("-", "_").lower()) is not None
}
workflow_name_to_deploy = obj.workflow_name
# For users that upgraded the client but did not redeploy their flow,
# we fallback to old workflow names in case of a conflict.
if obj.workflow_name != obj._v1_workflow_name:
# use the old name only if there exists a deployment.
if ArgoWorkflows.get_existing_deployment(obj._v1_workflow_name):
obj.echo("Warning! ", bold=True, nl=False)
obj.echo(
"Found a deployment of this flow with an old style name, defaulted to triggering *%s*."
% obj._v1_workflow_name,
wrap=True,
)
obj.echo(
"Due to new naming restrictions on Argo Workflows, "
"this flow will have a shorter name with newer versions of Metaflow (>=%s) "
"which will allow it to be triggered through Argo UI as well. "
% NEW_ARGO_NAMELENGTH_METAFLOW_VERSION,
wrap=True,
)
obj.echo("re-deploy your flow in order to get rid of this message.")
workflow_name_to_deploy = obj._v1_workflow_name
response = ArgoWorkflows.trigger(workflow_name_to_deploy, params)
run_id = "argo-" + response["metadata"]["name"]
if run_id_file:
with open(run_id_file, "w") as f:
f.write(str(run_id))
if deployer_attribute_file:
with open(deployer_attribute_file, "w") as f:
json.dump(
{
"name": workflow_name_to_deploy,
"metadata": obj.metadata.metadata_str(),
"pathspec": "/".join((obj.flow.name, run_id)),
},
f,
)
obj.echo(
"Workflow *{name}* triggered on Argo Workflows "
"(run-id *{run_id}*).".format(name=workflow_name_to_deploy, run_id=run_id),
bold=True,
)
run_url = (
"%s/%s/%s" % (UI_URL.rstrip("/"), obj.flow.name, run_id) if UI_URL else None
)
if run_url:
obj.echo(
"See the run in the UI at %s" % run_url,
bold=True,
)
@argo_workflows.command(help="Delete the flow on Argo Workflows.")
@click.option(
"--authorize",
default=None,
type=str,
help="Authorize the deletion with a production token",
)
@click.pass_obj
def delete(obj, authorize=None):
def _token_instructions(flow_name, prev_user):
obj.echo(
"There is an existing version of *%s* on Argo Workflows which was "
"deployed by the user *%s*." % (flow_name, prev_user)
)
obj.echo(
"To delete this flow, you need to use the same production token that they used."
)
obj.echo(
"Please reach out to them to get the token. Once you have it, call "
"this command:"
)
obj.echo(" argo-workflows delete --authorize MY_TOKEN", fg="green")
obj.echo(
'See "Organizing Results" at docs.metaflow.org for more information '
"about production tokens."
)
# Cases and expected behaviours:
# old name exists, new name does not exist -> delete old and do not fail on missing new
# old name exists, new name exists -> delete both
# old name does not exist, new name exists -> only try to delete new
# old name does not exist, new name does not exist -> keep previous behaviour where missing deployment raises error for the new name.
def _delete(workflow_name):
validate_token(workflow_name, obj.token_prefix, authorize, _token_instructions)
obj.echo("Deleting workflow *{name}*...".format(name=workflow_name), bold=True)
schedule_deleted, sensor_deleted, workflow_deleted = ArgoWorkflows.delete(
workflow_name
)
if schedule_deleted:
obj.echo(
"Deleting cronworkflow *{name}*...".format(name=workflow_name),
bold=True,
)
if sensor_deleted:
obj.echo(
"Deleting sensor *{name}*...".format(name=workflow_name),
bold=True,
)
return workflow_deleted
workflows_deleted = False
cleanup_old_name = False
if obj.workflow_name != obj._v1_workflow_name:
# Only add the old name if there exists a deployment with such name.
# This is due to the way validate_token is tied to an existing deployment.
if ArgoWorkflows.get_existing_deployment(obj._v1_workflow_name) is not None:
cleanup_old_name = True
obj.echo(
"This flow has been deployed with another name in the past due to a limitation with Argo Workflows. "
"Will also delete the older deployment.",
wrap=True,
)
_delete(obj._v1_workflow_name)
workflows_deleted = True
# Always try to delete the current name.
# Do not raise exception if we deleted old name before this.
try:
_delete(obj.workflow_name)
workflows_deleted = True
except ArgoWorkflowsException:
if not cleanup_old_name:
raise
if workflows_deleted:
obj.echo(
"Deleting Kubernetes resources may take a while. "
"Deploying the flow again to Argo Workflows while the delete is in-flight will fail."
)
obj.echo(
"In-flight executions will not be affected. "
"If necessary, terminate them manually."
)
@argo_workflows.command(help="Suspend flow execution on Argo Workflows.")
@click.option(
"--authorize",
default=None,
type=str,
help="Authorize the suspension with a production token",
)
@click.argument("run-id", required=True, type=str)
@click.pass_obj
def suspend(obj, run_id, authorize=None):
def _token_instructions(flow_name, prev_user):
obj.echo(
"There is an existing version of *%s* on Argo Workflows which was "
"deployed by the user *%s*." % (flow_name, prev_user)
)
obj.echo(
"To suspend this flow, you need to use the same production token that they used."
)
obj.echo(
"Please reach out to them to get the token. Once you have it, call "
"this command:"
)
obj.echo(" argo-workflows suspend RUN_ID --authorize MY_TOKEN", fg="green")
obj.echo(
'See "Organizing Results" at docs.metaflow.org for more information '
"about production tokens."
)
workflows = _get_existing_workflow_names(obj)
for workflow_name in workflows:
validate_run_id(
workflow_name, obj.token_prefix, authorize, run_id, _token_instructions
)
# Trim prefix from run_id
name = run_id[5:]
workflow_suspended = ArgoWorkflows.suspend(name)
if workflow_suspended:
obj.echo("Suspended execution of *%s*" % run_id)
break # no need to try out all workflow_names if we found the running one.
@argo_workflows.command(help="Unsuspend flow execution on Argo Workflows.")
@click.option(
"--authorize",
default=None,
type=str,
help="Authorize the unsuspend with a production token",
)
@click.argument("run-id", required=True, type=str)
@click.pass_obj
def unsuspend(obj, run_id, authorize=None):
def _token_instructions(flow_name, prev_user):
obj.echo(
"There is an existing version of *%s* on Argo Workflows which was "
"deployed by the user *%s*." % (flow_name, prev_user)
)
obj.echo(
"To unsuspend this flow, you need to use the same production token that they used."
)
obj.echo(
"Please reach out to them to get the token. Once you have it, call "
"this command:"
)
obj.echo(
" argo-workflows unsuspend RUN_ID --authorize MY_TOKEN",
fg="green",
)
obj.echo(
'See "Organizing Results" at docs.metaflow.org for more information '
"about production tokens."
)
workflows = _get_existing_workflow_names(obj)
for workflow_name in workflows:
validate_run_id(
workflow_name, obj.token_prefix, authorize, run_id, _token_instructions
)
# Trim prefix from run_id
name = run_id[5:]
workflow_suspended = ArgoWorkflows.unsuspend(name)
if workflow_suspended:
obj.echo("Unsuspended execution of *%s*" % run_id)
break # no need to try all workflow_names if we found one.
def validate_token(name, token_prefix, authorize, instructions_fn=None):
"""
Validate that the production token matches that of the deployed flow.
In case both the user and token do not match, raises an error.
Optionally outputs instructions on token usage via the provided instruction_fn(flow_name, prev_user)
"""
# TODO: Unify this with the existing resolve_token implementation.
# 1) retrieve the previous deployment, if one exists
workflow = ArgoWorkflows.get_existing_deployment(name)
if workflow is None:
prev_token = None
else:
prev_user, prev_token = workflow
# 2) authorize this deployment
if prev_token is not None:
if authorize is None:
authorize = load_token(token_prefix)
elif authorize.startswith("production:"):
authorize = authorize[11:]
# we allow the user who deployed the previous version to re-deploy,
# even if they don't have the token
# NOTE: The username is visible in multiple sources, and can be set by the user.
# Should we consider being stricter here?
if prev_user != get_username() and authorize != prev_token:
if instructions_fn:
instructions_fn(flow_name=name, prev_user=prev_user)
raise IncorrectProductionToken(
"Try again with the correct production token."
)
# 3) all validations passed, store the previous token for future use
token = prev_token
store_token(token_prefix, token)
return True
def get_run_object(pathspec: str):
try:
return Run(pathspec, _namespace_check=False)
except MetaflowNotFound:
return None
def get_status_considering_run_object(status, run_obj):
remapped_status = remap_status(status)
if remapped_status == "Running" and run_obj is None:
return "Pending"
return remapped_status
@argo_workflows.command(help="Fetch flow execution status on Argo Workflows.")
@click.argument("run-id", required=True, type=str)
@click.pass_obj
def status(obj, run_id):
if not run_id.startswith("argo-"):
raise RunIdMismatch(
"Run IDs for flows executed through Argo Workflows begin with 'argo-'"
)
obj.echo(
"Fetching status for run *{run_id}* for {flow_name} ...".format(
run_id=run_id, flow_name=obj.flow.name
),
bold=True,
)
# Trim prefix from run_id
name = run_id[5:]
status = ArgoWorkflows.get_workflow_status(obj.flow.name, name)
run_obj = get_run_object("/".join((obj.flow.name, run_id)))
if status is not None:
status = get_status_considering_run_object(status, run_obj)
obj.echo_always(status)
@argo_workflows.command(help="Terminate flow execution on Argo Workflows.")
@click.option(
"--authorize",
default=None,
type=str,
help="Authorize the termination with a production token",
)
@click.argument("run-id", required=True, type=str)
@click.pass_obj
def terminate(obj, run_id, authorize=None):
def _token_instructions(flow_name, prev_user):
obj.echo(
"There is an existing version of *%s* on Argo Workflows which was "
"deployed by the user *%s*." % (flow_name, prev_user)
)
obj.echo(
"To terminate this flow, you need to use the same production token that they used."
)
obj.echo(
"Please reach out to them to get the token. Once you have it, call "
"this command:"
)
obj.echo(" argo-workflows terminate --authorize MY_TOKEN RUN_ID", fg="green")
obj.echo(
'See "Organizing Results" at docs.metaflow.org for more information '
"about production tokens."
)
workflows = _get_existing_workflow_names(obj)
for workflow_name in workflows:
validate_run_id(
workflow_name, obj.token_prefix, authorize, run_id, _token_instructions
)
# Trim prefix from run_id
name = run_id[5:]
obj.echo(
"Terminating run *{run_id}* for {flow_name} ...".format(
run_id=run_id, flow_name=obj.flow.name
),
bold=True,
)
terminated = ArgoWorkflows.terminate(obj.flow.name, name)
if terminated:
obj.echo("\nRun terminated.")
break # no need to try all workflow_names if we found the running one.
@argo_workflows.command(help="List Argo Workflow templates for the flow.")
@click.option(
"--all",
default=False,
is_flag=True,
type=bool,
help="list all Argo Workflow Templates (not just limited to this flow)",
)
@click.pass_obj
def list_workflow_templates(obj, all=None):
for template_name in ArgoWorkflows.list_templates(obj.flow.name, all):
obj.echo_always(template_name)
# Internal CLI command to run a heartbeat daemon in an Argo Workflows Daemon container.
@argo_workflows.command(hidden=True, help="start heartbeat process for a run")
@click.option("--run_id", required=True)
@click.option(
"--tag",
"tags",
multiple=True,
default=None,
help="Annotate all objects produced by Argo Workflows runs "
"with the given tag. You can specify this option multiple "
"times to attach multiple tags.",
)
@click.pass_obj
def heartbeat(obj, run_id, tags=None):
# Try to register a run in case the start task has not taken care of it yet.
obj.metadata.register_run_id(run_id, tags)
# Start run heartbeat
obj.metadata.start_run_heartbeat(obj.flow.name, run_id)
# Keepalive loop
while True:
# Do not pollute daemon logs with anything unnecessary,
# as they might be extremely long running.
sleep(10)
def validate_run_id(
workflow_name, token_prefix, authorize, run_id, instructions_fn=None
):
"""
Validates that a run_id adheres to the Argo Workflows naming rules, and
that it belongs to the current flow (accounting for project branch as well).
"""
# Verify that user is trying to change an Argo workflow
if not run_id.startswith("argo-"):
raise RunIdMismatch(
"Run IDs for flows executed through Argo Workflows begin with 'argo-'"
)
# Verify that run_id belongs to the Flow, and that branches match
name = run_id[5:]
workflow = ArgoWorkflows.get_execution(name)
if workflow is None:
raise MetaflowException("Could not find workflow *%s* on Argo Workflows" % name)
owner, token, flow_name, branch_name, project_name = workflow
# Verify we are operating on the correct Flow file compared to the running one.
# Without this check, using --name could be used to run commands for arbitrary run_id's, disregarding the Flow in the file.
if current.flow_name != flow_name:
raise RunIdMismatch(
"The workflow with the run_id *%s* belongs to the flow *%s*, not for the flow *%s*."
% (run_id, flow_name, current.flow_name)
)
if project_name is not None:
# Verify we are operating on the correct project.
if current.get("project_name") != project_name:
raise RunIdMismatch(
"The workflow belongs to the project *%s*. "
"Please use the project decorator or --name to target the correct project"
% project_name
)
# Verify we are operating on the correct branch.
if current.get("branch_name") != branch_name:
raise RunIdMismatch(
"The workflow belongs to the branch *%s*. "
"Please use --branch, --production or --name to target the correct branch"
% branch_name
)
# Verify that the production tokens match. We do not want to cache the token that was used though,
# as the operations that require run_id validation can target runs not authored from the local environment
if authorize is None:
authorize = load_token(token_prefix)
elif authorize.startswith("production:"):
authorize = authorize[11:]
if owner != get_username() and authorize != token:
if instructions_fn:
instructions_fn(flow_name=name, prev_user=owner)
raise IncorrectProductionToken("Try again with the correct production token.")
return True
def _get_existing_workflow_names(obj):
"""
Construct a list of the current workflow name and possible existing deployments of old workflow names
"""
workflows = [obj.workflow_name]
if obj.workflow_name != obj._v1_workflow_name:
# Only add the old name if there exists a deployment with such name.
# This is due to the way validate_token is tied to an existing deployment.
if ArgoWorkflows.get_existing_deployment(obj._v1_workflow_name) is not None:
workflows.append(obj._v1_workflow_name)
return workflows
def sanitize_for_argo(text):
"""
Sanitizes a string so it does not contain characters that are not permitted in
Argo Workflow resource names.
"""
sanitized = (
re.compile(r"^[^A-Za-z0-9]+")
.sub("", text)
.replace("_", "")
.replace("@", "")
.replace("+", "")
.lower()
)
# This is added in order to get sanitized and truncated project branch names to adhere to RFC 1123 subdomain requirements
# f.ex. after truncation a project flow name might be project.branch-cut-short-.flowname
# sanitize around the . separators by removing any non-alphanumeric characters
sanitized = re.compile(r"[^a-z0-9]*\.[^a-z0-9]*").sub(".", sanitized)
return sanitized
def remap_status(status):
"""
Group similar Argo Workflow statuses together in order to have similar output to step functions statuses.
"""
STATUS_MAP = {"Error": "Failed"}
return STATUS_MAP.get(status, status)
| UnsupportedPythonVersion |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassSlots1.py | {
"start": 867,
"end": 983
} | class ____:
a: int
# This should generate an error.
F.__slots__
# This should generate an error.
F(1).__slots__
| F |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/asyncpg.py | {
"start": 10996,
"end": 11128
} | class ____(sqltypes.JSON.JSONStrIndexType):
__visit_name__ = "json_str_index"
render_bind_cast = True
| AsyncpgJSONStrIndexType |
python | sqlalchemy__sqlalchemy | test/orm/test_joins.py | {
"start": 7289,
"end": 8181
} | class ____(_fixtures.FixtureTest, AssertsCompiledSQL):
__dialect__ = "default"
@classmethod
def setup_mappers(cls):
User = cls.classes.User
Address = cls.classes.Address
users, addresses = (cls.tables.users, cls.tables.addresses)
cls.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(Address),
"ad_syn": synonym("addresses"),
},
)
cls.mapper_registry.map_imperatively(Address, addresses)
def test_join_on_synonym(self):
User = self.classes.User
self.assert_compile(
fixture_session().query(User).join(User.ad_syn),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN addresses ON users.id = addresses.user_id",
)
| JoinOnSynonymTest |
python | coleifer__peewee | tests/shortcuts.py | {
"start": 29929,
"end": 30055
} | class ____(TestModel):
key = CharField()
value = IntegerField(default=0)
extra = IntegerField(default=lambda: 1)
| TIW |
python | pydata__xarray | xarray/coding/cftime_offsets.py | {
"start": 20525,
"end": 20718
} | class ____(Tick):
_freq = "h"
def as_timedelta(self) -> timedelta:
return timedelta(hours=self.n)
def __apply__(self, other):
return other + self.as_timedelta()
| Hour |
python | joke2k__faker | tests/providers/test_address.py | {
"start": 32740,
"end": 34444
} | class ____:
"""Test es_ES address provider methods"""
def test_state_name(self, faker, num_samples):
for _ in range(num_samples):
state_name = faker.state_name()
assert isinstance(state_name, str)
assert state_name in EsEsAddressProvider.states
def test_street_prefix(self, faker, num_samples):
for _ in range(num_samples):
street_prefix = faker.street_prefix()
assert isinstance(street_prefix, str)
assert street_prefix in EsEsAddressProvider.street_prefixes
def test_secondary_address(self, faker, num_samples):
for _ in range(num_samples):
secondary_address = faker.secondary_address()
assert isinstance(secondary_address, str)
assert re.fullmatch(r"Apt\. \d{2}|Piso \d|Puerta \d", secondary_address)
def test_regions(self, faker, num_samples):
for _ in range(num_samples):
region = faker.region()
assert isinstance(region, str)
assert region in EsEsAddressProvider.regions
def test_autonomous_community(self, faker, num_samples):
for _ in range(num_samples):
# Spanish regions, also known as "autonomous communities"
autonomous_community = faker.autonomous_community()
assert isinstance(autonomous_community, str)
assert autonomous_community in EsEsAddressProvider.regions
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert len(postcode) == 5
assert 1000 <= int(postcode) <= 52100
| TestEsEs |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 102502,
"end": 103810
} | class ____(Operation):
def __init__(self, axis, *, name=None):
super().__init__(name=name)
if not isinstance(axis, (int, tuple, list)):
raise ValueError(
"The `axis` argument to `expand_dims` should be an integer, "
f"tuple or list. Received axis={axis}"
)
self.axis = axis
def call(self, x):
return backend.numpy.expand_dims(x, self.axis)
def compute_output_spec(self, x):
output_shape = operation_utils.compute_expand_dims_output_shape(
x.shape, self.axis
)
sparse = getattr(x, "sparse", False)
return KerasTensor(output_shape, dtype=x.dtype, sparse=sparse)
@keras_export(
[
"keras.ops.expand_dims",
"keras.ops.numpy.expand_dims",
]
)
def expand_dims(x, axis):
"""Expand the shape of a tensor.
Insert a new axis at the `axis` position in the expanded tensor shape.
Args:
x: Input tensor.
axis: Position in the expanded axes where the new axis
(or axes) is placed.
Returns:
Output tensor with the number of dimensions increased.
"""
if any_symbolic_tensors((x,)):
return ExpandDims(axis=axis).symbolic_call(x)
return backend.numpy.expand_dims(x, axis)
| ExpandDims |
python | PrefectHQ__prefect | src/prefect/server/events/filters.py | {
"start": 9035,
"end": 9583
} | class ____:
values: list[str]
positive: LabelSet = field(default_factory=LabelSet)
negative: LabelSet = field(default_factory=LabelSet)
def __post_init__(self) -> None:
for value in self.values:
label_set = self.positive
if value.startswith("!"):
label_set = self.negative
value = value[1:]
if value.endswith("*"):
label_set.prefixes.append(value.rstrip("*"))
else:
label_set.simple.append(value)
| LabelOperations |
python | kamyu104__LeetCode-Solutions | Python/path-existence-queries-in-a-graph-ii.py | {
"start": 83,
"end": 1654
} | class ____(object):
def pathExistenceQueries(self, n, nums, maxDiff, queries):
"""
:type n: int
:type nums: List[int]
:type maxDiff: int
:type queries: List[List[int]]
:rtype: List[int]
"""
def ceil_log2_x(x):
return (x-1).bit_length() if x-1 >= 0 else -1
sorted_i = sorted((i for i in xrange(n)), key=lambda i : nums[i])
i_to_idx = [0]*n
for idx, i in enumerate(sorted_i):
i_to_idx[i] = idx
prefix = [0]*n
for i in xrange(n-1):
prefix[i+1] = prefix[i]+int(nums[sorted_i[i+1]]-nums[sorted_i[i]] > maxDiff)
P = [[n-1]*n for _ in xrange(ceil_log2_x(n-1)+1)]
left = 0
for right in xrange(n):
while nums[sorted_i[right]]-nums[sorted_i[left]] > maxDiff:
P[0][left] = right-1
left += 1
for i in xrange(len(P)-1):
for j in xrange(n):
P[i+1][j] = P[i][P[i][j]]
result = [-1]*len(queries)
for idx, (i, j) in enumerate(queries):
if i == j:
result[idx] = 0
continue
if prefix[i_to_idx[i]] != prefix[i_to_idx[j]]:
continue
if i_to_idx[i] > i_to_idx[j]:
i, j = j, i
curr, l = i_to_idx[i], 0
for k in reversed(xrange(len(P))):
if P[k][curr] < i_to_idx[j]:
curr = P[k][curr]
l += 1<<k
result[idx] = l+1
return result
| Solution |
python | django__django | django/contrib/admin/widgets.py | {
"start": 8440,
"end": 12787
} | class ____(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
template_name = "admin/widgets/related_widget_wrapper.html"
def __init__(
self,
widget,
rel,
admin_site,
can_add_related=None,
can_change_related=False,
can_delete_related=False,
can_view_related=False,
):
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = admin_site.is_registered(rel.model)
self.can_add_related = can_add_related
if not isinstance(widget, AutocompleteMixin):
self.attrs["data-context"] = "available-source"
# Only single-select Select widgets are supported.
supported = not getattr(
widget, "allow_multiple_selected", False
) and isinstance(widget, Select)
self.can_change_related = supported and can_change_related
# XXX: The deletion UX can be confusing when dealing with cascading
# deletion.
cascade = getattr(rel, "on_delete", None) is CASCADE
self.can_delete_related = supported and not cascade and can_delete_related
self.can_view_related = supported and can_view_related
# To check if the related object is registered with this AdminSite.
self.admin_site = admin_site
self.use_fieldset = True
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.widget.is_hidden
@property
def media(self):
return self.widget.media
@property
def choices(self):
return self.widget.choices
@choices.setter
def choices(self, value):
self.widget.choices = value
def get_related_url(self, info, action, *args):
return reverse(
"admin:%s_%s_%s" % (*info, action),
current_app=self.admin_site.name,
args=args,
)
def get_context(self, name, value, attrs):
from django.contrib.admin.views.main import IS_POPUP_VAR, TO_FIELD_VAR
rel_opts = self.rel.model._meta
info = (rel_opts.app_label, rel_opts.model_name)
related_field_name = self.rel.get_related_field().name
url_params = "&".join(
"%s=%s" % param
for param in [
(TO_FIELD_VAR, related_field_name),
(IS_POPUP_VAR, 1),
]
)
context = {
"rendered_widget": self.widget.render(name, value, attrs),
"is_hidden": self.is_hidden,
"name": name,
"url_params": url_params,
"model": rel_opts.verbose_name,
"model_name": rel_opts.model_name,
"can_add_related": self.can_add_related,
"can_change_related": self.can_change_related,
"can_delete_related": self.can_delete_related,
"can_view_related": self.can_view_related,
"model_has_limit_choices_to": self.rel.limit_choices_to,
}
if self.can_add_related:
context["add_related_url"] = self.get_related_url(info, "add")
if self.can_delete_related:
context["delete_related_template_url"] = self.get_related_url(
info, "delete", "__fk__"
)
if self.can_view_related or self.can_change_related:
context["view_related_url_params"] = f"{TO_FIELD_VAR}={related_field_name}"
context["change_related_template_url"] = self.get_related_url(
info, "change", "__fk__"
)
return context
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def value_omitted_from_data(self, data, files, name):
return self.widget.value_omitted_from_data(data, files, name)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
| RelatedFieldWidgetWrapper |
python | numba__numba | numba/core/datamodel/models.py | {
"start": 37964,
"end": 38305
} | class ____(StructModel):
def __init__(self, dmm, fe_type):
# ndim = fe_type.ndim
members = [('data', types.CPointer(fe_type.dtype)),
('meminfo', types.MemInfoPointer(fe_type.dtype))]
super(ArrayCTypesModel, self).__init__(dmm, fe_type, members)
@register_default(types.RangeType)
| ArrayCTypesModel |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-oxylabs/llama_index/readers/oxylabs/google_base.py | {
"start": 387,
"end": 522
} | class ____:
tag: str
display_tag: str
path_: str
python_type: str
parent: Optional["ResponseElement"]
| ResponseElement |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/asyncpg.py | {
"start": 13004,
"end": 13068
} | class ____(sqltypes.CHAR):
render_bind_cast = True
| AsyncpgCHAR |
python | simplejson__simplejson | simplejson/raw_json.py | {
"start": 34,
"end": 217
} | class ____(object):
"""Wrap an encoded JSON document for direct embedding in the output
"""
def __init__(self, encoded_json):
self.encoded_json = encoded_json
| RawJSON |
python | plotly__plotly.py | plotly/graph_objs/streamtube/_lighting.py | {
"start": 233,
"end": 7773
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "streamtube"
_path_str = "streamtube.lighting"
_valid_props = {
"ambient",
"diffuse",
"facenormalsepsilon",
"fresnel",
"roughness",
"specular",
"vertexnormalsepsilon",
}
@property
def ambient(self):
"""
Ambient light increases overall color visibility but can wash
out the image.
The 'ambient' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["ambient"]
@ambient.setter
def ambient(self, val):
self["ambient"] = val
@property
def diffuse(self):
"""
Represents the extent that incident rays are reflected in a
range of angles.
The 'diffuse' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["diffuse"]
@diffuse.setter
def diffuse(self, val):
self["diffuse"] = val
@property
def facenormalsepsilon(self):
"""
Epsilon for face normals calculation avoids math issues arising
from degenerate geometry.
The 'facenormalsepsilon' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["facenormalsepsilon"]
@facenormalsepsilon.setter
def facenormalsepsilon(self, val):
self["facenormalsepsilon"] = val
@property
def fresnel(self):
"""
Represents the reflectance as a dependency of the viewing
angle; e.g. paper is reflective when viewing it from the edge
of the paper (almost 90 degrees), causing shine.
The 'fresnel' property is a number and may be specified as:
- An int or float in the interval [0, 5]
Returns
-------
int|float
"""
return self["fresnel"]
@fresnel.setter
def fresnel(self, val):
self["fresnel"] = val
@property
def roughness(self):
"""
Alters specular reflection; the rougher the surface, the wider
and less contrasty the shine.
The 'roughness' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["roughness"]
@roughness.setter
def roughness(self, val):
self["roughness"] = val
@property
def specular(self):
"""
Represents the level that incident rays are reflected in a
single direction, causing shine.
The 'specular' property is a number and may be specified as:
- An int or float in the interval [0, 2]
Returns
-------
int|float
"""
return self["specular"]
@specular.setter
def specular(self, val):
self["specular"] = val
@property
def vertexnormalsepsilon(self):
"""
Epsilon for vertex normals calculation avoids math issues
arising from degenerate geometry.
The 'vertexnormalsepsilon' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["vertexnormalsepsilon"]
@vertexnormalsepsilon.setter
def vertexnormalsepsilon(self, val):
self["vertexnormalsepsilon"] = val
@property
def _prop_descriptions(self):
return """\
ambient
Ambient light increases overall color visibility but
can wash out the image.
diffuse
Represents the extent that incident rays are reflected
in a range of angles.
facenormalsepsilon
Epsilon for face normals calculation avoids math issues
arising from degenerate geometry.
fresnel
Represents the reflectance as a dependency of the
viewing angle; e.g. paper is reflective when viewing it
from the edge of the paper (almost 90 degrees), causing
shine.
roughness
Alters specular reflection; the rougher the surface,
the wider and less contrasty the shine.
specular
Represents the level that incident rays are reflected
in a single direction, causing shine.
vertexnormalsepsilon
Epsilon for vertex normals calculation avoids math
issues arising from degenerate geometry.
"""
def __init__(
self,
arg=None,
ambient=None,
diffuse=None,
facenormalsepsilon=None,
fresnel=None,
roughness=None,
specular=None,
vertexnormalsepsilon=None,
**kwargs,
):
"""
Construct a new Lighting object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.streamtube.Lighting`
ambient
Ambient light increases overall color visibility but
can wash out the image.
diffuse
Represents the extent that incident rays are reflected
in a range of angles.
facenormalsepsilon
Epsilon for face normals calculation avoids math issues
arising from degenerate geometry.
fresnel
Represents the reflectance as a dependency of the
viewing angle; e.g. paper is reflective when viewing it
from the edge of the paper (almost 90 degrees), causing
shine.
roughness
Alters specular reflection; the rougher the surface,
the wider and less contrasty the shine.
specular
Represents the level that incident rays are reflected
in a single direction, causing shine.
vertexnormalsepsilon
Epsilon for vertex normals calculation avoids math
issues arising from degenerate geometry.
Returns
-------
Lighting
"""
super().__init__("lighting")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.streamtube.Lighting
constructor must be a dict or
an instance of :class:`plotly.graph_objs.streamtube.Lighting`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("ambient", arg, ambient)
self._set_property("diffuse", arg, diffuse)
self._set_property("facenormalsepsilon", arg, facenormalsepsilon)
self._set_property("fresnel", arg, fresnel)
self._set_property("roughness", arg, roughness)
self._set_property("specular", arg, specular)
self._set_property("vertexnormalsepsilon", arg, vertexnormalsepsilon)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Lighting |
python | ansible__ansible | lib/ansible/plugins/connection/__init__.py | {
"start": 14168,
"end": 19456
} | class ____(ConnectionBase):
"""
A base class for network-style connections.
"""
force_persistence = True
# Do not use _remote_is_local in other connections
_remote_is_local = True
def __init__(
self,
play_context: PlayContext,
*args: t.Any,
**kwargs: t.Any,
) -> None:
super(NetworkConnectionBase, self).__init__(play_context, *args, **kwargs)
self._messages: list[tuple[str, str]] = []
self._conn_closed = False
self._network_os = self._play_context.network_os
self._local = connection_loader.get('local', play_context, '/dev/null')
self._local.set_options()
self._sub_plugin: dict[str, t.Any] = {}
self._cached_variables = (None, None, None)
# reconstruct the socket_path and set instance values accordingly
self._ansible_playbook_pid = kwargs.get('ansible_playbook_pid')
self._update_connection_state()
def __getattr__(self, name):
try:
return self.__dict__[name]
except KeyError:
if not name.startswith('_'):
plugin = self._sub_plugin.get('obj')
if plugin:
method = getattr(plugin, name, None)
if method is not None:
return method
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = True) -> tuple[int, bytes, bytes]:
return self._local.exec_command(cmd, in_data, sudoable)
def queue_message(self, level: str, message: str) -> None:
"""
Adds a message to the queue of messages waiting to be pushed back to the controller process.
:arg level: A string which can either be the name of a method in display, or 'log'. When
the messages are returned to task_executor, a value of log will correspond to
``display.display(message, log_only=True)``, while another value will call ``display.[level](message)``
"""
self._messages.append((level, message))
def pop_messages(self) -> list[tuple[str, str]]:
messages, self._messages = self._messages, []
return messages
def put_file(self, in_path: str, out_path: str) -> None:
"""Transfer a file from local to remote"""
return self._local.put_file(in_path, out_path)
def fetch_file(self, in_path: str, out_path: str) -> None:
"""Fetch a file from remote to local"""
return self._local.fetch_file(in_path, out_path)
def reset(self) -> None:
"""
Reset the connection
"""
if self._socket_path:
self.queue_message('vvvv', 'resetting persistent connection for socket_path %s' % self._socket_path)
self.close()
self.queue_message('vvvv', 'reset call on connection instance')
def close(self) -> None:
self._conn_closed = True
if self._connected:
self._connected = False
def set_options(
self,
task_keys: dict[str, t.Any] | None = None,
var_options: dict[str, t.Any] | None = None,
direct: dict[str, t.Any] | None = None,
) -> None:
super(NetworkConnectionBase, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
if self.get_option('persistent_log_messages'):
warning = "Persistent connection logging is enabled for %s. This will log ALL interactions" % self._play_context.remote_addr
logpath = getattr(C, 'DEFAULT_LOG_PATH')
if logpath is not None:
warning += " to %s" % logpath
self.queue_message('warning', "%s and WILL NOT redact sensitive configuration like passwords. USE WITH CAUTION!" % warning)
if self._sub_plugin.get('obj') and self._sub_plugin.get('type') != 'external':
try:
self._sub_plugin['obj'].set_options(task_keys=task_keys, var_options=var_options, direct=direct)
except AttributeError:
pass
def _update_connection_state(self) -> None:
"""
Reconstruct the connection socket_path and check if it exists
If the socket path exists then the connection is active and set
both the _socket_path value to the path and the _connected value
to True. If the socket path doesn't exist, leave the socket path
value to None and the _connected value to False
"""
ssh = connection_loader.get('ssh', class_only=True)
control_path = ssh._create_control_path(
self._play_context.remote_addr, self._play_context.port,
self._play_context.remote_user, self._play_context.connection,
self._ansible_playbook_pid
)
tmp_path = unfrackpath(C.PERSISTENT_CONTROL_PATH_DIR)
socket_path = unfrackpath(control_path % dict(directory=tmp_path))
if os.path.exists(socket_path):
self._connected = True
self._socket_path = socket_path
def _log_messages(self, message: str) -> None:
if self.get_option('persistent_log_messages'):
self.queue_message('log', message)
| NetworkConnectionBase |
python | scipy__scipy | scipy/linalg/tests/test_fblas.py | {
"start": 9024,
"end": 14525
} | class ____:
''' Mixin class for gemv tests '''
def get_data(self, x_stride=1, y_stride=1):
rng = np.random.default_rng(1234)
mult = array(1, dtype=self.dtype)
if self.dtype in [complex64, complex128]:
mult = array(1+1j, dtype=self.dtype)
alpha = array(1., dtype=self.dtype) * mult
beta = array(1., dtype=self.dtype) * mult
a = rng.normal(0., 1., (3, 3)).astype(self.dtype) * mult
x = arange(shape(a)[0]*x_stride, dtype=self.dtype) * mult
y = arange(shape(a)[1]*y_stride, dtype=self.dtype) * mult
return alpha, beta, a, x, y
def test_simple(self):
alpha, beta, a, x, y = self.get_data()
desired_y = alpha*matrixmultiply(a, x)+beta*y
y = self.blas_func(alpha, a, x, beta, y)
assert_array_almost_equal(desired_y, y)
def test_default_beta_y(self):
alpha, beta, a, x, y = self.get_data()
desired_y = matrixmultiply(a, x)
y = self.blas_func(1, a, x)
assert_array_almost_equal(desired_y, y)
def test_simple_transpose(self):
alpha, beta, a, x, y = self.get_data()
desired_y = alpha*matrixmultiply(transpose(a), x)+beta*y
y = self.blas_func(alpha, a, x, beta, y, trans=1)
assert_array_almost_equal(desired_y, y)
def test_simple_transpose_conj(self):
alpha, beta, a, x, y = self.get_data()
desired_y = alpha*matrixmultiply(transpose(conjugate(a)), x)+beta*y
y = self.blas_func(alpha, a, x, beta, y, trans=2)
assert_array_almost_equal(desired_y, y)
def test_x_stride(self):
alpha, beta, a, x, y = self.get_data(x_stride=2)
desired_y = alpha*matrixmultiply(a, x[::2])+beta*y
y = self.blas_func(alpha, a, x, beta, y, incx=2)
assert_array_almost_equal(desired_y, y)
def test_x_stride_transpose(self):
alpha, beta, a, x, y = self.get_data(x_stride=2)
desired_y = alpha*matrixmultiply(transpose(a), x[::2])+beta*y
y = self.blas_func(alpha, a, x, beta, y, trans=1, incx=2)
assert_array_almost_equal(desired_y, y)
def test_x_stride_assert(self):
# What is the use of this test?
alpha, beta, a, x, y = self.get_data(x_stride=2)
with pytest.raises(Exception, match='failed for 3rd argument'):
y = self.blas_func(1, a, x, 1, y, trans=0, incx=3)
with pytest.raises(Exception, match='failed for 3rd argument'):
y = self.blas_func(1, a, x, 1, y, trans=1, incx=3)
def test_y_stride(self):
alpha, beta, a, x, y = self.get_data(y_stride=2)
desired_y = y.copy()
desired_y[::2] = alpha*matrixmultiply(a, x)+beta*y[::2]
y = self.blas_func(alpha, a, x, beta, y, incy=2)
assert_array_almost_equal(desired_y, y)
def test_y_stride_transpose(self):
alpha, beta, a, x, y = self.get_data(y_stride=2)
desired_y = y.copy()
desired_y[::2] = alpha*matrixmultiply(transpose(a), x)+beta*y[::2]
y = self.blas_func(alpha, a, x, beta, y, trans=1, incy=2)
assert_array_almost_equal(desired_y, y)
def test_y_stride_assert(self):
# What is the use of this test?
alpha, beta, a, x, y = self.get_data(y_stride=2)
with pytest.raises(Exception, match='failed for 2nd keyword'):
y = self.blas_func(1, a, x, 1, y, trans=0, incy=3)
with pytest.raises(Exception, match='failed for 2nd keyword'):
y = self.blas_func(1, a, x, 1, y, trans=1, incy=3)
try:
class TestSgemv(BaseGemv):
blas_func = fblas.sgemv
dtype = float32
@pytest.mark.skipif(sys.platform != 'darwin', reason="MacOS specific test")
def test_sgemv_on_osx(self):
def aligned_array(shape, align, dtype, order='C'):
# Make array shape `shape` with aligned at `align` bytes
d = dtype()
# Make array of correct size with `align` extra bytes
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
# Find offset into array giving desired alignment
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
# Copy `arr` into an aligned array with same shape
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(self.blas_func(1.0, A, X), desired,
rtol=1e-5, atol=1e-7)
testdata = product((15, 32), (10000,), (200, 89), ('C', 'F'))
rng = np.random.default_rng(1234)
for align, m, n, a_order in testdata:
A_d = rng.random((m, n))
X_d = rng.random(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32, order=a_order)
assert_dot_close(A_f, X_f, desired)
except AttributeError:
class TestSgemv:
pass
| BaseGemv |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 13532,
"end": 13759
} | class ____(models.Model):
name = models.CharField(max_length=100)
register(
ExternalModelSpecifiedWithAppParam,
app="simple_history.tests.external",
manager_name="histories",
)
| ExternalModelSpecifiedWithAppParam |
python | numba__numba | numba/tests/test_extending.py | {
"start": 12540,
"end": 14164
} | class ____(TestCase):
"""
Test the low-level two-tier extension API.
"""
# Check with `@jit` from within the test process and also in a new test
# process so as to check the registration mechanism.
def test_func1(self):
pyfunc = call_func1_nullary
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(), 42)
pyfunc = call_func1_unary
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(None), 42)
self.assertPreciseEqual(cfunc(18.0), 6.0)
@TestCase.run_test_in_subprocess
def test_func1_isolated(self):
self.test_func1()
def test_type_callable_keeps_function(self):
self.assertIs(type_func1, type_func1_)
self.assertIsNotNone(type_func1)
@TestCase.run_test_in_subprocess
def test_cast_mydummy(self):
pyfunc = get_dummy
cfunc = njit(types.float64(),)(pyfunc)
self.assertPreciseEqual(cfunc(), 42.0)
def test_mk_func_literal(self):
"""make sure make_function is passed to typer class as a literal
"""
test_ir = compiler.run_frontend(mk_func_test_impl)
typingctx = cpu_target.typing_context
targetctx = cpu_target.target_context
typingctx.refresh()
targetctx.refresh()
typing_res = type_inference_stage(typingctx, targetctx, test_ir, (),
None)
self.assertTrue(
any(
isinstance(a, types.MakeFunctionLiteral)
for a in typing_res.typemap.values()
)
)
| TestLowLevelExtending |
python | ray-project__ray | python/ray/tune/search/sample.py | {
"start": 2294,
"end": 4294
} | class ____:
"""Base class to specify a type and valid range to sample parameters from.
This base class is implemented by parameter spaces, like float ranges
(``Float``), integer ranges (``Integer``), or categorical variables
(``Categorical``). The ``Domain`` object contains information about
valid values (e.g. minimum and maximum values), and exposes methods that
allow specification of specific samplers (e.g. ``uniform()`` or
``loguniform()``).
"""
sampler = None
default_sampler_cls = None
def cast(self, value):
"""Cast value to domain type"""
return value
def set_sampler(self, sampler, allow_override=False):
if self.sampler and not allow_override:
raise ValueError(
"You can only choose one sampler for parameter "
"domains. Existing sampler for parameter {}: "
"{}. Tried to add {}".format(
self.__class__.__name__, self.sampler, sampler
)
)
self.sampler = sampler
def get_sampler(self):
sampler = self.sampler
if not sampler:
sampler = self.default_sampler_cls()
return sampler
def sample(
self,
config: Optional[Union[List[Dict], Dict]] = None,
size: int = 1,
random_state: "RandomState" = None,
):
if not isinstance(random_state, _BackwardsCompatibleNumpyRng):
random_state = _BackwardsCompatibleNumpyRng(random_state)
sampler = self.get_sampler()
return sampler.sample(self, config=config, size=size, random_state=random_state)
def is_grid(self):
return isinstance(self.sampler, Grid)
def is_function(self):
return False
def is_valid(self, value: Any):
"""Returns True if `value` is a valid value in this domain."""
raise NotImplementedError
@property
def domain_str(self):
return "(unknown)"
@DeveloperAPI
| Domain |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/sensor_dry_run.py | {
"start": 739,
"end": 1404
} | class ____(graphene.Mutation):
"""Enable a sensor to launch runs for a job based on external state change."""
Output = graphene.NonNull(GrapheneSensorDryRunResult)
class Arguments:
selector_data = graphene.NonNull(GrapheneSensorSelector)
cursor = graphene.String()
class Meta:
name = "SensorDryRunMutation"
@capture_error
def mutate(
self, graphene_info: "ResolveInfo", selector_data: Mapping[str, Any], cursor: Optional[str]
):
return GrapheneDryRunInstigationTick(
SensorSelector.from_graphql_input(selector_data), timestamp=None, cursor=cursor
)
| GrapheneSensorDryRunMutation |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_assets.py | {
"start": 27916,
"end": 39639
} | class ____(TestAssets):
def test_should_respond_200(self, test_client, session):
asset1, asset2 = self.create_assets(session)
self.create_assets_events(session)
self.create_dag_run(session)
self.create_asset_dag_run(session)
assets = session.query(AssetEvent).all()
session.commit()
assert len(assets) == 2
with assert_queries_count(3):
response = test_client.get("/assets/events")
assert response.status_code == 200
response_data = response.json()
assert response_data == {
"asset_events": [
{
"id": 1,
"asset_id": 1,
"uri": "s3://bucket/key/1",
"extra": {"foo": "bar"},
"group": "asset",
"name": "simple1",
"source_task_id": "source_task_id",
"source_dag_id": "source_dag_id",
"source_run_id": "source_run_id_1",
"source_map_index": -1,
"created_dagruns": [
{
"run_id": "source_run_id_1",
"dag_id": "source_dag_id",
"logical_date": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"start_date": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"end_date": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"state": "success",
"data_interval_start": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"data_interval_end": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
}
],
"timestamp": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"partition_key": None,
},
{
"id": 2,
"asset_id": 2,
"uri": "s3://bucket/key/2",
"group": "asset",
"name": "simple2",
"extra": {"foo": "bar"},
"source_task_id": "source_task_id",
"source_dag_id": "source_dag_id",
"source_run_id": "source_run_id_2",
"source_map_index": -1,
"created_dagruns": [
{
"run_id": "source_run_id_2",
"dag_id": "source_dag_id",
"logical_date": from_datetime_to_zulu_without_ms(
DEFAULT_DATE + timedelta(days=1),
),
"start_date": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"end_date": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"state": "success",
"data_interval_start": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"data_interval_end": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
}
],
"timestamp": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"partition_key": None,
},
],
"total_entries": 2,
}
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get("/assets/events")
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.get("/assets/events")
assert response.status_code == 403
@pytest.mark.parametrize(
("params", "total_entries"),
[
({"asset_id": "2"}, 1),
({"source_dag_id": "source_dag_id"}, 2),
({"source_task_id": "source_task_id"}, 2),
({"source_run_id": "source_run_id_1"}, 1),
({"source_map_index": "-1"}, 2),
({"name_pattern": "simple1"}, 1),
({"name_pattern": "simple%"}, 2),
({"name_pattern": "nonexistent"}, 0),
],
)
@provide_session
def test_filtering(self, test_client, params, total_entries, session):
self.create_assets()
self.create_assets_events()
self.create_dag_run()
self.create_asset_dag_run()
response = test_client.get("/assets/events", params=params)
assert response.status_code == 200
assert response.json()["total_entries"] == total_entries
@pytest.mark.parametrize(
("params", "expected_ids"),
[
# Test Case 1: Filtering with both timestamp_gte and timestamp_lte set to the same date
(
{
"timestamp_gte": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"timestamp_lte": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
},
[1], # expected_ids for events exactly on DEFAULT_DATE
),
# Test Case 2: Filtering events greater than or equal to a certain timestamp and less than or equal to another
(
{
"timestamp_gte": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"timestamp_lte": from_datetime_to_zulu_without_ms(DEFAULT_DATE + timedelta(days=1)),
},
[1, 2], # expected_ids for events within the date range
),
# Test Case 3: timestamp_gte later than timestamp_lte with no events in range
(
{
"timestamp_gte": from_datetime_to_zulu_without_ms(DEFAULT_DATE + timedelta(days=1)),
"timestamp_lte": from_datetime_to_zulu_without_ms(DEFAULT_DATE - timedelta(days=1)),
},
[], # expected_ids for events outside the range
),
# Test Case 4: timestamp_gte earlier than timestamp_lte, allowing events within the range
(
{
"timestamp_gte": from_datetime_to_zulu_without_ms(DEFAULT_DATE + timedelta(days=1)),
"timestamp_lte": from_datetime_to_zulu_without_ms(DEFAULT_DATE + timedelta(days=2)),
},
[2, 3], # expected_ids for events within the date range
),
],
)
def test_filter_by_timestamp_gte_and_lte(self, test_client, params, expected_ids, session):
# Create sample assets and asset events with specified timestamps
self.create_assets()
self.create_assets_events(num=3, varying_timestamps=True)
self.create_dag_run()
self.create_asset_dag_run()
# Test with both timestamp_gte and timestamp_lte filters
response = test_client.get("/assets/events", params=params)
assert response.status_code == 200
asset_event_ids = [asset_event["id"] for asset_event in response.json()["asset_events"]]
assert asset_event_ids == expected_ids
def test_order_by_raises_400_for_invalid_attr(self, test_client, session):
response = test_client.get("/assets/events?order_by=fake")
assert response.status_code == 400
msg = "Ordering with 'fake' is disallowed or the attribute does not exist on the model"
assert response.json()["detail"] == msg
@pytest.mark.parametrize(
("params", "expected_asset_ids"),
[
# Limit test data
({"limit": "1"}, [1]),
({"limit": "100"}, list(range(1, 101))),
# Offset test data
({"offset": "1"}, list(range(2, 52))),
({"offset": "3"}, list(range(4, 54))),
],
)
def test_limit_and_offset(self, test_client, params, expected_asset_ids):
self.create_assets(num=110)
self.create_assets_events(num=110)
self.create_dag_run(num=110)
self.create_asset_dag_run(num=110)
response = test_client.get("/assets/events", params=params)
assert response.status_code == 200
asset_ids = [asset["id"] for asset in response.json()["asset_events"]]
assert asset_ids == expected_asset_ids
@pytest.mark.usefixtures("time_freezer")
@pytest.mark.enable_redact
def test_should_mask_sensitive_extra(self, test_client, session):
self.create_assets_with_sensitive_extra()
self.create_assets_events_with_sensitive_extra()
self.create_dag_run()
self.create_asset_dag_run()
response = test_client.get("/assets/events")
assert response.status_code == 200
response_data = response.json()
assert response_data == {
"asset_events": [
{
"id": 1,
"asset_id": 1,
"uri": "s3://bucket/key/1",
"group": "asset",
"name": "sensitive1",
"extra": {"password": "***"},
"source_task_id": "source_task_id",
"source_dag_id": "source_dag_id",
"source_run_id": "source_run_id_1",
"source_map_index": -1,
"created_dagruns": [
{
"run_id": "source_run_id_1",
"dag_id": "source_dag_id",
"logical_date": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"start_date": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"end_date": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"state": "success",
"data_interval_start": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"data_interval_end": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
}
],
"timestamp": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"partition_key": None,
},
{
"id": 2,
"asset_id": 2,
"uri": "s3://bucket/key/2",
"group": "asset",
"name": "sensitive2",
"extra": {"password": "***"},
"source_task_id": "source_task_id",
"source_dag_id": "source_dag_id",
"source_run_id": "source_run_id_2",
"source_map_index": -1,
"created_dagruns": [
{
"run_id": "source_run_id_2",
"dag_id": "source_dag_id",
"logical_date": from_datetime_to_zulu_without_ms(
DEFAULT_DATE + timedelta(days=1),
),
"start_date": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"end_date": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"state": "success",
"data_interval_start": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"data_interval_end": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
}
],
"timestamp": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
"partition_key": None,
},
],
"total_entries": 2,
}
| TestGetAssetEvents |
python | huggingface__transformers | src/transformers/models/bitnet/modeling_bitnet.py | {
"start": 10277,
"end": 12049
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: BitNetConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = BitNetAttention(config=config, layer_idx=layer_idx)
self.mlp = BitNetMLP(config)
self.input_layernorm = BitNetRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = BitNetRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
| BitNetDecoderLayer |
python | getsentry__sentry | src/sentry/integrations/data_forwarding/segment/forwarder.py | {
"start": 357,
"end": 3624
} | class ____(BaseDataForwarder):
provider = DataForwarderProviderSlug.SEGMENT
rate_limit = (200, 1)
endpoint: ClassVar[str] = "https://api.segment.io/v1/track"
def get_event_payload(
self, event: Event | GroupEvent, config: dict[str, Any]
) -> dict[str, Any]:
context = {"library": {"name": "sentry", "version": VERSION}}
props = {
"eventId": event.event_id,
"transaction": event.get_tag("transaction") or "",
"release": event.get_tag("sentry:release") or "",
"level": event.get_tag("level") or "",
"environment": event.get_tag("environment") or "",
}
if "user" in event.interfaces:
user = event.interfaces["user"]
if user.ip_address:
context["ip"] = user.ip_address
user_id = user.id
else:
user_id = None
if "request" in event.interfaces:
request = event.interfaces["request"]
headers = request.headers
if not isinstance(headers, dict):
headers = dict(headers or ())
context.update(
{
"userAgent": headers.get("User-Agent", ""),
"page": {
"url": request.url,
"method": request.method,
"search": request.query_string or "",
"referrer": headers.get("Referer", ""),
},
}
)
if "exception" in event.interfaces:
exc = event.interfaces["exception"].values[0]
props.update({"exceptionType": exc.type})
return {
"context": context,
"userId": user_id,
"event": "Error Captured",
"properties": props,
"integration": {"name": "sentry", "version": VERSION},
"timestamp": event.datetime.isoformat() + "Z",
}
def forward_event(
self,
event: Event | GroupEvent,
payload: dict[str, Any],
config: dict[str, Any],
) -> bool:
# we currently only support errors
if event.get_event_type() != "error":
return False
# we avoid instantiating interfaces here as they're only going to be
# used if there's a User present
user_interface = event.interfaces.get("user")
if not user_interface:
return False
# if the user id is not present, we can't forward the event
if not user_interface.id:
return False
write_key = config["write_key"]
if not write_key:
return False
try:
with http.build_session() as session:
response = session.post(
self.endpoint,
json=payload,
auth=(write_key, ""),
timeout=10,
)
response.raise_for_status()
except Exception:
logger.exception(
"segment.send_payload.error",
extra={"event_id": event.event_id, "project_id": event.project_id},
)
return False
return True
| SegmentForwarder |
python | scrapy__scrapy | tests/test_middleware.py | {
"start": 607,
"end": 674
} | class ____:
def process(self, response, request):
pass
| M3 |
python | spack__spack | lib/spack/spack/operating_systems/linux_distro.py | {
"start": 599,
"end": 2270
} | class ____(OperatingSystem):
"""This class will represent the autodetected operating system
for a Linux System. Since there are many different flavors of
Linux, this class will attempt to encompass them all through
autodetection using the python module platform and the method
platform.dist()
"""
def __init__(self):
try:
# This will throw an error if imported on a non-Linux platform.
from spack.vendor import distro
distname, version = distro.id(), distro.version()
except ImportError:
distname, version = "unknown", ""
# Grabs major version from tuple on redhat; on other platforms
# grab the first legal identifier in the version field. On
# debian you get things like 'wheezy/sid'; sid means unstable.
# We just record 'wheezy' and don't get quite so detailed.
version = re.split(r"[^\w-]", version)
if "ubuntu" in distname:
version = ".".join(version[0:2])
# openSUSE Tumbleweed is a rolling release which can change
# more than once in a week, so set version to tumbleweed$GLIBVERS
elif "opensuse-tumbleweed" in distname or "opensusetumbleweed" in distname:
distname = "opensuse"
output = check_output(["ldd", "--version"]).decode()
libcvers = re.findall(r"ldd \(GNU libc\) (.*)", output)
if len(libcvers) == 1:
version = "tumbleweed" + libcvers[0]
else:
version = "tumbleweed" + version[0]
else:
version = version[0]
super().__init__(distname, version)
| LinuxDistro |
python | pyqtgraph__pyqtgraph | pyqtgraph/GraphicsScene/GraphicsScene.py | {
"start": 374,
"end": 25268
} | class ____(QtWidgets.QGraphicsScene):
"""
Extension of QGraphicsScene that implements a complete, parallel mouse event system.
(It would have been preferred to just alter the way QGraphicsScene creates and delivers
events, but this turned out to be impossible because the constructor for QGraphicsMouseEvent
is private)
* Generates MouseClicked events in addition to the usual press/move/release events.
(This works around a problem where it is impossible to have one item respond to a
drag if another is watching for a click.)
* Adjustable radius around click that will catch objects so you don't have to click *exactly* over small/thin objects
* Global context menu--if an item implements a context menu, then its parent(s) may also add items to the menu.
* Allows items to decide _before_ a mouse click which item will be the recipient of mouse events.
This lets us indicate unambiguously to the user which item they are about to click/drag on
* Eats mouseMove events that occur too soon after a mouse press.
* Reimplements items() and itemAt() to circumvent PyQt bug
====================== ====================================================================
**Signals**
sigMouseClicked(event) Emitted when the mouse is clicked over the scene. Use ev.pos() to
get the click position relative to the item that was clicked on,
or ev.scenePos() to get the click position in scene coordinates.
See :class:`pyqtgraph.GraphicsScene.mouseEvents.MouseClickEvent`.
sigMouseMoved(pos) Emitted when the mouse cursor moves over the scene. The position
is given in scene coordinates.
sigMouseHover(items) Emitted when the mouse is moved over the scene. Items is a list
of items under the cursor.
sigItemAdded(item) Emitted when an item is added via addItem(). The item is given.
sigItemRemoved(item) Emitted when an item is removed via removeItem(). The item is given.
====================== ====================================================================
Mouse interaction is as follows:
1) Every time the mouse moves, the scene delivers both the standard hoverEnter/Move/LeaveEvents
as well as custom HoverEvents.
2) Items are sent HoverEvents in Z-order and each item may optionally call event.acceptClicks(button),
acceptDrags(button) or both. If this method call returns True, this informs the item that _if_
the user clicks/drags the specified mouse button, the item is guaranteed to be the
recipient of click/drag events (the item may wish to change its appearance to indicate this).
If the call to acceptClicks/Drags returns False, then the item is guaranteed to *not* receive
the requested event (because another item has already accepted it).
3) If the mouse is clicked, a mousePressEvent is generated as usual. If any items accept this press event, then
No click/drag events will be generated and mouse interaction proceeds as defined by Qt. This allows
items to function properly if they are expecting the usual press/move/release sequence of events.
(It is recommended that items do NOT accept press events, and instead use click/drag events)
Note: The default implementation of QGraphicsItem.mousePressEvent will *accept* the event if the
item is has its Selectable or Movable flags enabled. You may need to override this behavior.
4) If no item accepts the mousePressEvent, then the scene will begin delivering mouseDrag and/or mouseClick events.
If the mouse is moved a sufficient distance (or moved slowly enough) before the button is released,
then a mouseDragEvent is generated.
If no drag events are generated before the button is released, then a mouseClickEvent is generated.
5) Click/drag events are delivered to the item that called acceptClicks/acceptDrags on the HoverEvent
in step 1. If no such items exist, then the scene attempts to deliver the events to items near the event.
ClickEvents may be delivered in this way even if no
item originally claimed it could accept the click. DragEvents may only be delivered this way if it is the initial
move in a drag.
"""
sigMouseHover = QtCore.Signal(object) ## emits a list of objects hovered over
sigMouseMoved = QtCore.Signal(object) ## emits position of mouse on every move
sigMouseClicked = QtCore.Signal(object) ## emitted when mouse is clicked. Check for event.isAccepted() to see whether the event has already been acted on.
sigPrepareForPaint = QtCore.Signal() ## emitted immediately before the scene is about to be rendered
sigItemAdded = QtCore.Signal(object) ## emits the item object just added
sigItemRemoved = QtCore.Signal(object) ## emits the item object just removed
_addressCache = weakref.WeakValueDictionary()
ExportDirectory = None
def __init__(self, clickRadius: int = 2, moveDistance=5, parent=None):
QtWidgets.QGraphicsScene.__init__(self, parent)
self.setClickRadius(clickRadius)
self.setMoveDistance(moveDistance)
self.exportDirectory = None
self.clickEvents = []
self.dragButtons = []
self.mouseGrabber = None
self.dragItem = None
self.lastDrag = None
self.hoverItems = weakref.WeakKeyDictionary()
self.lastHoverEvent = None
self.minDragTime = 0.5 # drags shorter than 0.5 sec are interpreted as clicks
self.contextMenu = [QtGui.QAction(QtCore.QCoreApplication.translate("GraphicsScene", "Export..."), self)]
self.contextMenu[0].triggered.connect(self.showExportDialog)
self.exportDialog = None
self._lastMoveEventTime = 0
def render(self, *args):
self.prepareForPaint()
return QtWidgets.QGraphicsScene.render(self, *args)
@QtCore.Slot()
def prepareForPaint(self):
"""Called before every render. This method will inform items that the scene is about to
be rendered by emitting sigPrepareForPaint.
This allows items to delay expensive processing until they know a paint will be required."""
self.sigPrepareForPaint.emit()
def setClickRadius(self, r: int):
"""
Set the distance away from mouse clicks to search for interacting items.
When clicking, the scene searches first for items that directly intersect the click position
followed by any other items that are within a rectangle that extends r pixels away from the
click position.
"""
self._clickRadius = int(r)
def setMoveDistance(self, d):
"""
Set the distance the mouse must move after a press before mouseMoveEvents will be delivered.
This ensures that clicks with a small amount of movement are recognized as clicks instead of
drags.
"""
self._moveDistance = d
def mousePressEvent(self, ev):
super().mousePressEvent(ev)
if self.mouseGrabberItem() is None: ## nobody claimed press; we are free to generate drag/click events
if self.lastHoverEvent is not None:
# If the mouse has moved since the last hover event, send a new one.
# This can happen if a context menu is open while the mouse is moving.
if ev.scenePos() != self.lastHoverEvent.scenePos():
self.sendHoverEvents(ev)
self.clickEvents.append(MouseClickEvent(ev))
## set focus on the topmost focusable item under this click
items = self.items(ev.scenePos())
for i in items:
if i.isEnabled() and i.isVisible() and (i.flags() & i.GraphicsItemFlag.ItemIsFocusable):
i.setFocus(QtCore.Qt.FocusReason.MouseFocusReason)
break
def _moveEventIsAllowed(self):
# For ignoring events that are too close together
# Max number of events per second
rateLimit = getConfigOption('mouseRateLimit')
if rateLimit <= 0:
return True
# Delay between events (in milliseconds)
delay = 1000.0 / rateLimit
if getMillis() - self._lastMoveEventTime >= delay:
return True
return False
def mouseMoveEvent(self, ev):
# ignore high frequency events
if self._moveEventIsAllowed():
self._lastMoveEventTime = getMillis()
self.sigMouseMoved.emit(ev.scenePos())
# First allow QGraphicsScene to eliver hoverEvent/Move/Exit Events
super().mouseMoveEvent(ev)
# Next Deliver our own Hover Events
self.sendHoverEvents(ev)
if ev.buttons():
# button is pressed' send mouseDragEvents
if self.mouseGrabberItem() is None:
now = perf_counter()
init = False
## keep track of which buttons are involved in dragging
for btn in [QtCore.Qt.MouseButton.LeftButton, QtCore.Qt.MouseButton.MiddleButton, QtCore.Qt.MouseButton.RightButton]:
if not (ev.buttons() & btn):
continue
if btn not in self.dragButtons: ## see if we've dragged far enough yet
cev = [e for e in self.clickEvents if e.button() == btn]
if cev:
cev = cev[0]
dist = Point(ev.scenePos() - cev.scenePos()).length()
if dist == 0 or (dist < self._moveDistance and now - cev.time() < self.minDragTime):
continue
init = init or (len(self.dragButtons) == 0) ## If this is the first button to be dragged, then init=True
self.dragButtons.append(btn)
## if we have dragged buttons, deliver a drag event
if len(self.dragButtons) > 0:
if self.sendDragEvent(ev, init=init):
ev.accept()
else:
super().mouseMoveEvent(ev)
# if you do not accept event (which is ignored) then cursor will disappear
ev.accept()
def leaveEvent(self, ev): ## inform items that mouse is gone
if len(self.dragButtons) == 0:
self.sendHoverEvents(ev, exitOnly=True)
def mouseReleaseEvent(self, ev):
if self.mouseGrabberItem() is None:
if ev.button() in self.dragButtons:
if self.sendDragEvent(ev, final=True):
#print "sent drag event"
ev.accept()
self.dragButtons.remove(ev.button())
else:
cev = [e for e in self.clickEvents if e.button() == ev.button()]
if cev:
if self.sendClickEvent(cev[0]):
ev.accept()
try:
self.clickEvents.remove(cev[0])
except ValueError:
warnings.warn(
("A ValueError can occur here with errant "
"QApplication.processEvent() calls, see "
"https://github.com/pyqtgraph/pyqtgraph/pull/2580 "
"for more information."),
RuntimeWarning,
stacklevel=2
)
if not ev.buttons():
self.dragItem = None
self.dragButtons = []
self.clickEvents = []
self.lastDrag = None
super().mouseReleaseEvent(ev)
self.sendHoverEvents(ev) ## let items prepare for next click/drag
def mouseDoubleClickEvent(self, ev):
super().mouseDoubleClickEvent(ev)
if self.mouseGrabberItem() is None: ## nobody claimed press; we are free to generate drag/click events
self.clickEvents.append(MouseClickEvent(ev, double=True))
def sendHoverEvents(self, ev, exitOnly=False):
## if exitOnly, then just inform all previously hovered items that the mouse has left.
if exitOnly:
acceptable=False
items = []
event = HoverEvent(None, acceptable)
else:
acceptable = not ev.buttons() ## if we are in mid-drag, do not allow items to accept the hover event.
event = HoverEvent(ev, acceptable)
items = self.itemsNearEvent(event, hoverable=True)
self.sigMouseHover.emit(items)
prevItems = list(self.hoverItems.keys())
for item in items:
if hasattr(item, 'hoverEvent'):
event.currentItem = item
if item not in self.hoverItems:
self.hoverItems[item] = None
event.enter = True
else:
prevItems.remove(item)
event.enter = False
try:
item.hoverEvent(event)
except:
debug.printExc("Error sending hover event:")
event.enter = False
event.exit = True
#print "hover exit items:", prevItems
for item in prevItems:
event.currentItem = item
try:
# NOTE: isQObjectAlive(item) was added for PySide6 where
# verlet_chain_demo.py triggers a RuntimeError.
if isQObjectAlive(item) and item.scene() is self:
item.hoverEvent(event)
except:
debug.printExc("Error sending hover exit event:")
finally:
del self.hoverItems[item]
# Update last hover event unless:
# - mouse is dragging (move+buttons); in this case we want the dragged
# item to continue receiving events until the drag is over
# - event is not a mouse event (QEvent.Type.Leave sometimes appears here)
if (ev.type() == ev.Type.GraphicsSceneMousePress or
(ev.type() == ev.Type.GraphicsSceneMouseMove and not ev.buttons())):
self.lastHoverEvent = event ## save this so we can ask about accepted events later.
def sendDragEvent(self, ev, init=False, final=False):
## Send a MouseDragEvent to the current dragItem or to
## items near the beginning of the drag
event = MouseDragEvent(ev, self.clickEvents[0], self.lastDrag, start=init, finish=final)
#print "dragEvent: init=", init, 'final=', final, 'self.dragItem=', self.dragItem
if init and self.dragItem is None:
if self.lastHoverEvent is not None:
acceptedItem = self.lastHoverEvent.dragItems().get(event.button(), None)
else:
acceptedItem = None
if acceptedItem is not None and acceptedItem.scene() is self:
#print "Drag -> pre-selected item:", acceptedItem
self.dragItem = acceptedItem
event.currentItem = self.dragItem
try:
self.dragItem.mouseDragEvent(event)
except:
debug.printExc("Error sending drag event:")
else:
#print "drag -> new item"
for item in self.itemsNearEvent(event):
#print "check item:", item
if not item.isVisible() or not item.isEnabled():
continue
if hasattr(item, 'mouseDragEvent'):
event.currentItem = item
try:
item.mouseDragEvent(event)
except:
debug.printExc("Error sending drag event:")
if event.isAccepted():
#print " --> accepted"
self.dragItem = item
if item.flags() & item.GraphicsItemFlag.ItemIsFocusable:
item.setFocus(QtCore.Qt.FocusReason.MouseFocusReason)
break
elif self.dragItem is not None:
event.currentItem = self.dragItem
try:
self.dragItem.mouseDragEvent(event)
except:
debug.printExc("Error sending hover exit event:")
self.lastDrag = event
return event.isAccepted()
def sendClickEvent(self, ev):
## if we are in mid-drag, click events may only go to the dragged item.
if self.dragItem is not None and hasattr(self.dragItem, 'mouseClickEvent'):
ev.currentItem = self.dragItem
self.dragItem.mouseClickEvent(ev)
## otherwise, search near the cursor
else:
if self.lastHoverEvent is not None:
acceptedItem = self.lastHoverEvent.clickItems().get(ev.button(), None)
else:
acceptedItem = None
if acceptedItem is not None:
ev.currentItem = acceptedItem
try:
acceptedItem.mouseClickEvent(ev)
except:
debug.printExc("Error sending click event:")
else:
for item in self.itemsNearEvent(ev):
if not item.isVisible() or not item.isEnabled():
continue
if hasattr(item, 'mouseClickEvent'):
ev.currentItem = item
try:
item.mouseClickEvent(ev)
except:
debug.printExc("Error sending click event:")
if ev.isAccepted():
if item.flags() & item.GraphicsItemFlag.ItemIsFocusable:
item.setFocus(QtCore.Qt.FocusReason.MouseFocusReason)
break
self.sigMouseClicked.emit(ev)
return ev.isAccepted()
def addItem(self, item):
# extend QGraphicsScene.addItem to emit a sigItemAdded signal
ret = QtWidgets.QGraphicsScene.addItem(self, item)
self.sigItemAdded.emit(item)
return ret
def removeItem(self, item):
# extend QGraphicsScene.removeItem to emit a sigItemRemoved signal
ret = QtWidgets.QGraphicsScene.removeItem(self, item)
self.sigItemRemoved.emit(item)
return ret
def itemsNearEvent(
self,
event,
selMode=QtCore.Qt.ItemSelectionMode.IntersectsItemShape,
sortOrder=QtCore.Qt.SortOrder.DescendingOrder,
hoverable=False,
):
"""
Return an iterator that iterates first through the items that directly intersect point (in Z order)
followed by any other items that are within the scene's click radius.
"""
view = self.views()[0]
tr = view.viewportTransform()
if hasattr(event, "buttonDownScenePos"):
point = event.buttonDownScenePos()
else:
point = event.scenePos()
## Sort by descending Z-order (don't trust scene.itms() to do this either)
## use 'absolute' z value, which is the sum of all item/parent ZValues
def absZValue(item):
if item is None:
return 0
return item.zValue() + absZValue(item.parentItem())
## Get items, which directly are at the given point (sorted by z-value)
items_at_point = self.items(point, selMode, sortOrder, tr)
items_at_point.sort(key=absZValue, reverse=True)
## Get items, which are within the click radius around the given point (sorted by z-value)
r = self._clickRadius
items_within_radius = []
rgn = None
if r > 0:
rect = view.mapToScene(QtCore.QRect(0, 0, 2 * r, 2 * r)).boundingRect()
w = rect.width()
h = rect.height()
rgn = QtCore.QRectF(point.x() - w / 2, point.y() - h / 2, w, h)
items_within_radius = self.items(rgn, selMode, sortOrder, tr)
items_within_radius.sort(key=absZValue, reverse=True)
# Remove items, which are already in the other list
for item in items_at_point:
if item in items_within_radius:
items_within_radius.remove(item)
## Put both groups of items together, but in the correct order
## The items directly at the given point shall have higher priority
all_items = items_at_point + items_within_radius
## Remove items, which we don't want, due to several reasons
selected_items = []
for item in all_items:
if hoverable and not hasattr(item, "hoverEvent"):
continue
if item.scene() is not self:
continue
shape = item.shape() # Note: default shape() returns boundingRect()
if shape is None:
continue
# Remove items whose shape does not contain point or region
# (scene.items() apparently sucks at this)
if (
rgn is not None
and shape.intersects(item.mapFromScene(rgn).boundingRect())
) or shape.contains(item.mapFromScene(point)):
selected_items.append(item)
return selected_items
def getViewWidget(self):
return self.views()[0]
def addParentContextMenus(self, item, menu, event):
"""
Can be called by any item in the scene to expand its context menu to include parent context menus.
Parents may implement getContextMenus to add new menus / actions to the existing menu.
getContextMenus must accept 1 argument (the event that generated the original menu) and
return a single QMenu or a list of QMenus.
The final menu will look like:
| Original Item 1
| Original Item 2
| ...
| Original Item N
| ------------------
| Parent Item 1
| Parent Item 2
| ...
| Grandparent Item 1
| ...
============== ==================================================
**Arguments:**
item The item that initially created the context menu
(This is probably the item making the call to this function)
menu The context menu being shown by the item
event The original event that triggered the menu to appear.
============== ==================================================
"""
menusToAdd = []
while item is not self:
item = item.parentItem()
if item is None:
item = self
if not hasattr(item, "getContextMenus"):
continue
subMenus = item.getContextMenus(event) or []
if isinstance(subMenus, list): ## so that some items (like FlowchartViewBox) can return multiple menus
menusToAdd.extend(subMenus)
else:
menusToAdd.append(subMenus)
# Filter out options that were previously added
existingActions = menu.actions()
actsToAdd = []
for menuOrAct in menusToAdd:
if isinstance(menuOrAct, QtWidgets.QMenu):
menuOrAct = menuOrAct.menuAction()
elif not isinstance(menuOrAct, QtGui.QAction):
raise Exception(
f"Cannot add object {menuOrAct} (type={type(menuOrAct)}) to QMenu."
)
if menuOrAct not in existingActions:
actsToAdd.append(menuOrAct)
if actsToAdd:
menu.addSeparator()
menu.addActions(actsToAdd)
return menu
def getContextMenus(self, event):
self.contextMenuItem = event.acceptedItem
return self.contextMenu
@QtCore.Slot()
def showExportDialog(self):
if self.exportDialog is None:
from . import exportDialog
self.exportDialog = exportDialog.ExportDialog(self)
self.exportDialog.show(self.contextMenuItem)
| GraphicsScene |
python | huggingface__transformers | tests/models/pegasus/test_modeling_pegasus.py | {
"start": 23998,
"end": 25209
} | class ____(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (PegasusDecoder, PegasusForCausalLM) if is_torch_available() else ()
test_resize_position_embeddings = True
is_encoder_decoder = False
def setUp(
self,
):
self.model_tester = PegasusStandaloneDecoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=PegasusConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
def test_decoder_model_attn_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self):
return
@unittest.skip(reason="Decoder cannot keep gradients")
def test_flex_attention_with_grads():
return
| PegasusStandaloneDecoderModelTest |
python | kamyu104__LeetCode-Solutions | Python/number-of-operations-to-make-network-connected.py | {
"start": 981,
"end": 1644
} | class ____(object):
def makeConnected(self, n, connections):
"""
:type n: int
:type connections: List[List[int]]
:rtype: int
"""
def dfs(i, lookup):
if i in lookup:
return 0
lookup.add(i)
if i in G:
for j in G[i]:
dfs(j, lookup)
return 1
if len(connections) < n-1:
return -1
G = collections.defaultdict(list)
for i, j in connections:
G[i].append(j)
G[j].append(i)
lookup = set()
return sum(dfs(i, lookup) for i in xrange(n)) - 1
| Solution2 |
python | kamyu104__LeetCode-Solutions | Python/defanging-an-ip-address.py | {
"start": 29,
"end": 351
} | class ____(object):
def defangIPaddr(self, address):
"""
:type address: str
:rtype: str
"""
result = []
for c in address:
if c == '.':
result.append("[.]")
else:
result.append(c)
return "".join(result)
| Solution |
python | pypa__pip | tests/unit/test_index.py | {
"start": 3577,
"end": 11436
} | class ____:
@pytest.mark.parametrize(
"py_version_info, ignore_requires_python, expected",
[
pytest.param(
(3, 6, 5),
False,
(LinkType.candidate, "1.12"),
id="compatible",
),
pytest.param(
(3, 6, 4),
False,
(
LinkType.requires_python_mismatch,
"1.12 Requires-Python ==3.6.5,!=3.13.3",
),
id="requires-python-mismatch",
),
pytest.param(
(3, 6, 4),
True,
(LinkType.candidate, "1.12"),
id="requires-python-mismatch-ignored",
),
],
)
def test_evaluate_link(
self,
py_version_info: tuple[int, int, int],
ignore_requires_python: bool,
expected: tuple[LinkType, str],
) -> None:
target_python = TargetPython(py_version_info=py_version_info)
evaluator = LinkEvaluator(
project_name="twine",
canonical_name=canonicalize_name("twine"),
formats=frozenset(["source"]),
target_python=target_python,
allow_yanked=True,
ignore_requires_python=ignore_requires_python,
)
link = Link(
"https://example.com/#egg=twine-1.12",
requires_python="!= 3.13.3, == 3.6.5",
)
actual = evaluator.evaluate_link(link)
assert actual == expected
@pytest.mark.parametrize(
"yanked_reason, allow_yanked, expected",
[
(None, True, (LinkType.candidate, "1.12")),
(None, False, (LinkType.candidate, "1.12")),
("", True, (LinkType.candidate, "1.12")),
(
"",
False,
(LinkType.yanked, "yanked for reason: <none given>"),
),
("bad metadata", True, (LinkType.candidate, "1.12")),
(
"bad metadata",
False,
(LinkType.yanked, "yanked for reason: bad metadata"),
),
# Test a unicode string with a non-ascii character.
("curly quote: \u2018", True, (LinkType.candidate, "1.12")),
(
"curly quote: \u2018",
False,
(
LinkType.yanked,
"yanked for reason: curly quote: \u2018",
),
),
],
)
def test_evaluate_link__allow_yanked(
self,
yanked_reason: str,
allow_yanked: bool,
expected: tuple[LinkType, str],
) -> None:
target_python = TargetPython(py_version_info=(3, 6, 4))
evaluator = LinkEvaluator(
project_name="twine",
canonical_name=canonicalize_name("twine"),
formats=frozenset(["source"]),
target_python=target_python,
allow_yanked=allow_yanked,
)
link = Link(
"https://example.com/#egg=twine-1.12",
yanked_reason=yanked_reason,
)
actual = evaluator.evaluate_link(link)
assert actual == expected
def test_evaluate_link__incompatible_wheel(self) -> None:
"""
Test an incompatible wheel.
"""
target_python = TargetPython(py_version_info=(3, 6, 4))
# Set the valid tags to an empty list to make sure nothing matches.
target_python._valid_tags = []
evaluator = LinkEvaluator(
project_name="sample",
canonical_name=canonicalize_name("sample"),
formats=frozenset(["binary"]),
target_python=target_python,
allow_yanked=True,
)
link = Link("https://example.com/sample-1.0-py2.py3-none-any.whl")
actual = evaluator.evaluate_link(link)
expected = (
LinkType.platform_mismatch,
"none of the wheel's tags (py2-none-any, py3-none-any) are compatible "
"(run pip debug --verbose to show compatible tags)",
)
assert actual == expected
@pytest.mark.parametrize(
"hex_digest, expected_versions",
[
(64 * "a", ["1.0", "1.1"]),
(64 * "b", ["1.0", "1.2"]),
(64 * "c", ["1.0", "1.1", "1.2"]),
],
)
def test_filter_unallowed_hashes(hex_digest: str, expected_versions: list[str]) -> None:
candidates = [
make_mock_candidate("1.0"),
make_mock_candidate("1.1", hex_digest=(64 * "a")),
make_mock_candidate("1.2", hex_digest=(64 * "b")),
]
hashes_data = {
"sha256": [hex_digest],
}
hashes = Hashes(hashes_data)
actual = filter_unallowed_hashes(
candidates,
hashes=hashes,
project_name="my-project",
)
actual_versions = [str(candidate.version) for candidate in actual]
assert actual_versions == expected_versions
# Check that the return value is always different from the given value.
assert actual is not candidates
def test_filter_unallowed_hashes__no_hashes(caplog: pytest.LogCaptureFixture) -> None:
caplog.set_level(logging.DEBUG)
candidates = [
make_mock_candidate("1.0"),
make_mock_candidate("1.1"),
]
actual = filter_unallowed_hashes(
candidates,
hashes=Hashes(),
project_name="my-project",
)
# Check that the return value is a copy.
assert actual == candidates
assert actual is not candidates
expected_message = (
"Given no hashes to check 2 links for project 'my-project': "
"discarding no candidates"
)
check_caplog(caplog, "DEBUG", expected_message)
def test_filter_unallowed_hashes__log_message_with_match(
caplog: pytest.LogCaptureFixture,
) -> None:
caplog.set_level(logging.DEBUG)
# Test 1 match, 2 non-matches, 3 no hashes so all 3 values will be
# different.
candidates = [
make_mock_candidate("1.0"),
make_mock_candidate(
"1.1",
),
make_mock_candidate(
"1.2",
),
make_mock_candidate("1.3", hex_digest=(64 * "a")),
make_mock_candidate("1.4", hex_digest=(64 * "b")),
make_mock_candidate("1.5", hex_digest=(64 * "c")),
]
hashes_data = {
"sha256": [64 * "a", 64 * "d"],
}
hashes = Hashes(hashes_data)
actual = filter_unallowed_hashes(
candidates,
hashes=hashes,
project_name="my-project",
)
assert len(actual) == 4
expected_message = (
"Checked 6 links for project 'my-project' against 2 hashes "
"(1 matches, 3 no digest): discarding 2 non-matches:\n"
" https://example.com/pkg-1.4.tar.gz#sha256="
"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n"
" https://example.com/pkg-1.5.tar.gz#sha256="
"cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"
)
check_caplog(caplog, "DEBUG", expected_message)
def test_filter_unallowed_hashes__log_message_with_no_match(
caplog: pytest.LogCaptureFixture,
) -> None:
caplog.set_level(logging.DEBUG)
candidates = [
make_mock_candidate("1.0"),
make_mock_candidate("1.1", hex_digest=(64 * "b")),
make_mock_candidate("1.2", hex_digest=(64 * "c")),
]
hashes_data = {
"sha256": [64 * "a", 64 * "d"],
}
hashes = Hashes(hashes_data)
actual = filter_unallowed_hashes(
candidates,
hashes=hashes,
project_name="my-project",
)
assert len(actual) == 3
expected_message = (
"Checked 3 links for project 'my-project' against 2 hashes "
"(0 matches, 1 no digest): discarding no candidates"
)
check_caplog(caplog, "DEBUG", expected_message)
| TestLinkEvaluator |
python | pyca__cryptography | src/cryptography/hazmat/primitives/_serialization.py | {
"start": 438,
"end": 615
} | class ____(utils.Enum):
PBESv1SHA1And3KeyTripleDESCBC = "PBESv1 using SHA1 and 3-Key TripleDES"
PBESv2SHA256AndAES256CBC = "PBESv2 using SHA256 PBKDF2 and AES256 CBC"
| PBES |
python | google__jax | jax/experimental/jax2tf/tests/shape_poly_test.py | {
"start": 1565,
"end": 9757
} | class ____(Harness):
"""Tests a function with shape polymorphism.
Converts `fun` with shape polymorphism, creates a `tf.ConcreteFunction`
given `input_signature` and checks the inferred output shapes to match
`expected_output_shapes`, then checks that the JAX and the TF functions
produce the same results.
"""
def __init__(self,
group_name: str, name: str,
fun: Callable,
*,
arg_descriptors: Sequence[test_harnesses.ArgDescriptor] = (),
polymorphic_shapes: Sequence[str | None] = (),
polymorphic_constraints: Sequence[str] = (),
input_signature: Sequence[tf.TensorSpec] | None = None,
expected_output_signature: tf.TensorSpec | None = None,
expect_error: tuple[Any | None, str | None] = (None, None),
skip_jax_run: bool = False,
check_result: bool = True,
tol: float | None = None,
limitations: Sequence[Jax2TfLimitation] = (),
override_jax_config_flags: dict[str, Any] = {}):
"""Args:
group_name, name: The name for the harness. See `Harness.__init__`.
fun: the function to be converted, possibly after partial application to
static arguments from `arg_descriptors`. See `Harness.__init__`.
arg_descriptors: The argument descriptors. See `Harness.__init__`. May
be missing, in which case `skip_jax_run` should be `True` and
`input_signature` must be present.
polymorphic_shapes: For `jax2tf.convert`.
polymorphic_constraints: For `jax2tf.convert`.
input_signature: For `tf.function.get_concrete_function`. If missing,
generated from `polymorphic_shapes`.
expected_output_signature: the expected inferred output shape.
expect_error: a pair of an Exception type and a regular expression to
match the expected exception string.
skip_jax_run: If True, then neither the JAX nor the TF functions are
executed.
check_result: specifies if we want to check that the result of the shape
polymorphic conversion produces the same result and the JAX function.
tol: the tolerance to use for checking results.
limitations: if given, then apply the custom_assert and tolerance from the
Jax2TfLimitations.
override_jax_config_flags: jax.config flags to override for the duration
of the test.
"""
super().__init__(group_name, name, fun, arg_descriptors,
dtype=np.float32)
self.polymorphic_shapes = polymorphic_shapes
self.polymorphic_constraints = polymorphic_constraints
self.input_signature = input_signature
self.expected_output_signature = expected_output_signature
self.skip_jax_run = skip_jax_run
self.expect_error = expect_error
self.tol = tol
self.check_result = check_result
self.limitations = limitations
self.override_jax_config_flags = override_jax_config_flags
def run_test(self, tst: tf_test_util.JaxToTfTestCase) -> jax.Array | None:
def log_message(extra: str):
return f"[{tst._testMethodName}]: {extra}"
# Check that we have overridden the jax.config flags
for fname, fvalue in self.override_jax_config_flags.items():
tst.assertEqual(getattr(jax.config, fname), fvalue, (
f"Flag {fname} current value {getattr(jax.config, fname)} != {fvalue}"))
tst.assertIsNotNone(self.polymorphic_shapes)
polymorphic_shapes = self.polymorphic_shapes
if not self.skip_jax_run:
args = self.dyn_args_maker(tst.rng())
else:
tst.assertIsNotNone(self.input_signature)
if self.input_signature is None:
tst.assertEqual(
len(polymorphic_shapes), len(args),
f"polymorphic_shapes {polymorphic_shapes} of length "
f"{len(polymorphic_shapes)} must match number of arguments {len(args)}")
args_specs = export.symbolic_args_specs(args, polymorphic_shapes)
input_signature = [
tf.TensorSpec(
[d if isinstance(d, int) else None for d in a.shape],
dtype=a.dtype) for a in args_specs]
else:
input_signature = self.input_signature # type: ignore
expect_error_type, expect_error_regex = self.expect_error
if self.skip_jax_run and not self.arg_descriptors:
f_jax = self.fun
else:
f_jax = self.dyn_fun
with contextlib.ExitStack() as stack:
if expect_error_type is not None:
stack.enter_context(tst.assertRaisesRegex(expect_error_type, expect_error_regex))
f_tf = jax2tf.convert(f_jax, polymorphic_shapes=polymorphic_shapes,
polymorphic_constraints=self.polymorphic_constraints)
# Run in tf.Eager mode first, because it is friendlier to debuggers
res_tf = f_tf(*args) if not self.skip_jax_run else None
f_tf_func = tf.function(
f_tf, autograph=False, input_signature=input_signature)
# Create tf.ConcreteFunction and check inferred output signature
concrete_f_tf = f_tf_func.get_concrete_function(*input_signature)
if expect_error_type is not None:
return None
if self.expected_output_signature:
# Strangely, output_shapes can be a single shape for a function with a
# single result, or a list/tuple of shapes.
expected_output_signature = self.expected_output_signature
concrete_output_tf_shape = concrete_f_tf.output_shapes
if not isinstance(concrete_output_tf_shape, (tuple, list)): # Single result
assert not isinstance(self.expected_output_signature, (tuple, list))
expected_output_signature = [self.expected_output_signature]
concrete_output_tf_shape = [concrete_output_tf_shape]
for expected, found in util.safe_zip(expected_output_signature,
concrete_output_tf_shape):
tst.assertEqual(tuple(expected.shape), tuple(found))
# Run the JAX and the TF functions and compare the results
if not self.skip_jax_run:
res_jax = f_jax(*args)
if self.check_result:
res_tf = tf.nest.map_structure(lambda t: t.numpy(), res_tf)
custom_assert_lims = [
l for l in self.limitations if l.custom_assert is not None]
assert len(custom_assert_lims) <= 1, custom_assert_lims
tol = None
if self.tol is not None:
tol = self.tol
elif self.limitations:
max_lim = self.limitations[0].get_max_tolerance_limitation(
self.limitations)
if max_lim is not None:
tol = max_lim.tol
if not custom_assert_lims:
tst.assertAllClose(res_jax, res_tf, atol=tol, rtol=tol)
else:
logging.info(log_message(
f"Running custom_assert with tol={tol} due "
f"to {custom_assert_lims[0]}"))
custom_assert_lims[0].custom_assert(tst, res_jax, res_tf, args=args, # type: ignore
tol=tol, err_msg=None)
return res_tf
else:
return None
else:
return None
def check_shape_poly(tst, f_jax: Callable, *,
arg_descriptors: Sequence[test_harnesses.ArgDescriptor] = (),
skip_jax_run: bool = False,
polymorphic_shapes: Sequence[str | None] = (),
polymorphic_constraints: Sequence[str] = (),
input_signature: Sequence[tf.TensorSpec] | None = None,
expected_output_signature: tf.TensorSpec | None = None,
expect_error=(None, None)) -> jax.Array | None:
# Makes and tests a harness. See PolyHarness documentation.
h = PolyHarness("", "", f_jax,
arg_descriptors=arg_descriptors,
skip_jax_run=skip_jax_run,
polymorphic_shapes=polymorphic_shapes,
polymorphic_constraints=polymorphic_constraints,
input_signature=input_signature,
expected_output_signature=expected_output_signature,
expect_error=expect_error)
return h.run_test(tst)
@jtu.thread_unsafe_test_class()
| PolyHarness |
python | numba__numba | numba/tests/test_linalg.py | {
"start": 81597,
"end": 86295
} | class ____(TestLinalgSystems):
"""
Tests for np.linalg.matrix_rank.
"""
@needs_lapack
def test_linalg_matrix_rank(self):
"""
Test np.linalg.matrix_rank
"""
cfunc = jit(nopython=True)(matrix_rank_matrix)
def check(a, **kwargs):
expected = matrix_rank_matrix(a, **kwargs)
got = cfunc(a, **kwargs)
# Ranks are integral so comparison should be trivial.
# check the rank is the same
np.testing.assert_allclose(got, expected)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(a, **kwargs)
sizes = [(7, 1), (11, 5), (5, 11), (3, 3), (1, 7)]
for size, dtype, order in \
product(sizes, self.dtypes, 'FC'):
# check full rank system
a = self.specific_sample_matrix(size, dtype, order)
check(a)
# If the system is a matrix, check rank deficiency is reported
# correctly. Check all ranks from 0 to (full rank - 1).
tol = 1e-13
# first check 1 to (full rank - 1)
for k in range(1, min(size) - 1):
# check rank k
a = self.specific_sample_matrix(size, dtype, order, rank=k)
self.assertEqual(cfunc(a), k)
check(a)
# check provision of a tolerance works as expected
# create a (m x n) diagonal matrix with a singular value
# guaranteed below the tolerance 1e-13
m, n = a.shape
a[:, :] = 0. # reuse `a`'s memory
idx = np.nonzero(np.eye(m, n))
if np.iscomplexobj(a):
b = 1. + np.random.rand(k) + 1.j +\
1.j * np.random.rand(k)
# min singular value is sqrt(2)*1e-14
b[0] = 1e-14 + 1e-14j
else:
b = 1. + np.random.rand(k)
b[0] = 1e-14 # min singular value is 1e-14
a[idx[0][:k], idx[1][:k]] = b.astype(dtype)
# rank should be k-1 (as tol is present)
self.assertEqual(cfunc(a, tol), k - 1)
check(a, tol=tol)
# then check zero rank
a[:, :] = 0.
self.assertEqual(cfunc(a), 0)
check(a)
# add in a singular value that is small
if np.iscomplexobj(a):
a[-1, -1] = 1e-14 + 1e-14j
else:
a[-1, -1] = 1e-14
# check the system has zero rank to a given tolerance
self.assertEqual(cfunc(a, tol), 0)
check(a, tol=tol)
# check the zero vector returns rank 0 and a nonzero vector
# returns rank 1.
for dt in self.dtypes:
a = np.zeros((5), dtype=dt)
self.assertEqual(cfunc(a), 0)
check(a)
# make it a nonzero vector
a[0] = 1.
self.assertEqual(cfunc(a), 1)
check(a)
# empty
for sz in [(0, 1), (1, 0), (0, 0)]:
for tol in [None, 1e-13]:
self.assert_raise_on_empty(cfunc, (np.empty(sz), tol))
rn = "matrix_rank"
# Wrong dtype
self.assert_wrong_dtype(rn, cfunc,
(np.ones((2, 2), dtype=np.int32),))
# Dimension issue
self.assert_wrong_dimensions_1D(
rn, cfunc, (np.ones(
12, dtype=np.float64).reshape(
2, 2, 3),))
# no nans or infs for 2D case
self.assert_no_nan_or_inf(cfunc,
(np.array([[1., 2., ], [np.inf, np.nan]],
dtype=np.float64),))
@needs_lapack
def test_no_input_mutation(self):
# this is here to test no input mutation by
# numba.np.linalg._compute_singular_values
# which is the workhorse for norm with 2d input, rank and cond.
X = np.array([[1., 3, 2, 7,],
[-5, 4, 2, 3,],
[9, -3, 1, 1,],
[2, -2, 2, 8,]], order='F')
X_orig = np.copy(X)
@jit(nopython=True)
def func(X, test):
if test:
# not executed, but necessary to trigger A ordering in X
X = X[1:2, :]
return np.linalg.matrix_rank(X)
expected = func.py_func(X, False)
np.testing.assert_allclose(X, X_orig)
got = func(X, False)
np.testing.assert_allclose(X, X_orig)
np.testing.assert_allclose(expected, got)
| TestLinalgMatrixRank |
python | google__jax | jax/_src/numpy/setops.py | {
"start": 36551,
"end": 47942
} | class ____(NamedTuple):
"""Struct returned by :func:`jax.numpy.unique_inverse`."""
values: Array
inverse_indices: Array
@export
def unique_all(x: ArrayLike, /, *, size: int | None = None,
fill_value: ArrayLike | None = None) -> _UniqueAllResult:
"""Return unique values from x, along with indices, inverse indices, and counts.
JAX implementation of :func:`numpy.unique_all`; this is equivalent to calling
:func:`jax.numpy.unique` with `return_index`, `return_inverse`, `return_counts`,
and `equal_nan` set to True.
Because the size of the output of ``unique_all`` is data-dependent, the function
is not typically compatible with :func:`~jax.jit` and other JAX transformations.
The JAX version adds the optional ``size`` argument which must be specified
statically for ``jnp.unique`` to be used in such contexts.
Args:
x: N-dimensional array from which unique values will be extracted.
size: if specified, return only the first ``size`` sorted unique elements. If there are fewer
unique elements than ``size`` indicates, the return value will be padded with ``fill_value``.
fill_value: when ``size`` is specified and there are fewer than the indicated number of
elements, fill the remaining entries ``fill_value``. Defaults to the minimum unique value.
Returns:
A tuple ``(values, indices, inverse_indices, counts)``, with the following properties:
- ``values``:
an array of shape ``(n_unique,)`` containing the unique values from ``x``.
- ``indices``:
An array of shape ``(n_unique,)``. Contains the indices of the first occurrence of
each unique value in ``x``. For 1D inputs, ``x[indices]`` is equivalent to ``values``.
- ``inverse_indices``:
An array of shape ``x.shape``. Contains the indices within ``values`` of each value
in ``x``. For 1D inputs, ``values[inverse_indices]`` is equivalent to ``x``.
- ``counts``:
An array of shape ``(n_unique,)``. Contains the number of occurrences of each unique
value in ``x``.
See also:
- :func:`jax.numpy.unique`: general function for computing unique values.
- :func:`jax.numpy.unique_values`: compute only ``values``.
- :func:`jax.numpy.unique_counts`: compute only ``values`` and ``counts``.
- :func:`jax.numpy.unique_inverse`: compute only ``values`` and ``inverse``.
Examples:
Here we compute the unique values in a 1D array:
>>> x = jnp.array([3, 4, 1, 3, 1])
>>> result = jnp.unique_all(x)
The result is a :class:`~typing.NamedTuple` with four named attributes.
The ``values`` attribute contains the unique values from the array:
>>> result.values
Array([1, 3, 4], dtype=int32)
The ``indices`` attribute contains the indices of the unique ``values`` within
the input array:
>>> result.indices
Array([2, 0, 1], dtype=int32)
>>> jnp.all(result.values == x[result.indices])
Array(True, dtype=bool)
The ``inverse_indices`` attribute contains the indices of the input within ``values``:
>>> result.inverse_indices
Array([1, 2, 0, 1, 0], dtype=int32)
>>> jnp.all(x == result.values[result.inverse_indices])
Array(True, dtype=bool)
The ``counts`` attribute contains the counts of each unique value in the input:
>>> result.counts
Array([2, 2, 1], dtype=int32)
For examples of the ``size`` and ``fill_value`` arguments, see :func:`jax.numpy.unique`.
"""
x = ensure_arraylike("unique_all", x)
values, indices, inverse_indices, counts = unique(
x, return_index=True, return_inverse=True, return_counts=True, equal_nan=False,
size=size, fill_value=fill_value)
return _UniqueAllResult(values=values, indices=indices, inverse_indices=inverse_indices, counts=counts)
@export
def unique_counts(x: ArrayLike, /, *, size: int | None = None,
fill_value: ArrayLike | None = None) -> _UniqueCountsResult:
"""Return unique values from x, along with counts.
JAX implementation of :func:`numpy.unique_counts`; this is equivalent to calling
:func:`jax.numpy.unique` with `return_counts` and `equal_nan` set to True.
Because the size of the output of ``unique_counts`` is data-dependent, the function
is not typically compatible with :func:`~jax.jit` and other JAX transformations.
The JAX version adds the optional ``size`` argument which must be specified
statically for ``jnp.unique`` to be used in such contexts.
Args:
x: N-dimensional array from which unique values will be extracted.
size: if specified, return only the first ``size`` sorted unique elements. If there are fewer
unique elements than ``size`` indicates, the return value will be padded with ``fill_value``.
fill_value: when ``size`` is specified and there are fewer than the indicated number of
elements, fill the remaining entries ``fill_value``. Defaults to the minimum unique value.
Returns:
A tuple ``(values, counts)``, with the following properties:
- ``values``:
an array of shape ``(n_unique,)`` containing the unique values from ``x``.
- ``counts``:
An array of shape ``(n_unique,)``. Contains the number of occurrences of each unique
value in ``x``.
See also:
- :func:`jax.numpy.unique`: general function for computing unique values.
- :func:`jax.numpy.unique_values`: compute only ``values``.
- :func:`jax.numpy.unique_inverse`: compute only ``values`` and ``inverse``.
- :func:`jax.numpy.unique_all`: compute ``values``, ``indices``, ``inverse_indices``,
and ``counts``.
Examples:
Here we compute the unique values in a 1D array:
>>> x = jnp.array([3, 4, 1, 3, 1])
>>> result = jnp.unique_counts(x)
The result is a :class:`~typing.NamedTuple` with two named attributes.
The ``values`` attribute contains the unique values from the array:
>>> result.values
Array([1, 3, 4], dtype=int32)
The ``counts`` attribute contains the counts of each unique value in the input:
>>> result.counts
Array([2, 2, 1], dtype=int32)
For examples of the ``size`` and ``fill_value`` arguments, see :func:`jax.numpy.unique`.
"""
x = ensure_arraylike("unique_counts", x)
values, counts = unique(x, return_counts=True, equal_nan=False,
size=size, fill_value=fill_value)
return _UniqueCountsResult(values=values, counts=counts)
@export
def unique_inverse(x: ArrayLike, /, *, size: int | None = None,
fill_value: ArrayLike | None = None) -> _UniqueInverseResult:
"""Return unique values from x, along with indices, inverse indices, and counts.
JAX implementation of :func:`numpy.unique_inverse`; this is equivalent to calling
:func:`jax.numpy.unique` with `return_inverse` and `equal_nan` set to True.
Because the size of the output of ``unique_inverse`` is data-dependent, the function
is not typically compatible with :func:`~jax.jit` and other JAX transformations.
The JAX version adds the optional ``size`` argument which must be specified
statically for ``jnp.unique`` to be used in such contexts.
Args:
x: N-dimensional array from which unique values will be extracted.
size: if specified, return only the first ``size`` sorted unique elements. If there are fewer
unique elements than ``size`` indicates, the return value will be padded with ``fill_value``.
fill_value: when ``size`` is specified and there are fewer than the indicated number of
elements, fill the remaining entries ``fill_value``. Defaults to the minimum unique value.
Returns:
A tuple ``(values, indices, inverse_indices, counts)``, with the following properties:
- ``values``:
an array of shape ``(n_unique,)`` containing the unique values from ``x``.
- ``inverse_indices``:
An array of shape ``x.shape``. Contains the indices within ``values`` of each value
in ``x``. For 1D inputs, ``values[inverse_indices]`` is equivalent to ``x``.
See also:
- :func:`jax.numpy.unique`: general function for computing unique values.
- :func:`jax.numpy.unique_values`: compute only ``values``.
- :func:`jax.numpy.unique_counts`: compute only ``values`` and ``counts``.
- :func:`jax.numpy.unique_all`: compute ``values``, ``indices``, ``inverse_indices``,
and ``counts``.
Examples:
Here we compute the unique values in a 1D array:
>>> x = jnp.array([3, 4, 1, 3, 1])
>>> result = jnp.unique_inverse(x)
The result is a :class:`~typing.NamedTuple` with two named attributes.
The ``values`` attribute contains the unique values from the array:
>>> result.values
Array([1, 3, 4], dtype=int32)
The ``indices`` attribute contains the indices of the unique ``values`` within
the input array:
The ``inverse_indices`` attribute contains the indices of the input within ``values``:
>>> result.inverse_indices
Array([1, 2, 0, 1, 0], dtype=int32)
>>> jnp.all(x == result.values[result.inverse_indices])
Array(True, dtype=bool)
For examples of the ``size`` and ``fill_value`` arguments, see :func:`jax.numpy.unique`.
"""
x = ensure_arraylike("unique_inverse", x)
values, inverse_indices = unique(x, return_inverse=True, equal_nan=False,
size=size, fill_value=fill_value)
return _UniqueInverseResult(values=values, inverse_indices=inverse_indices)
@export
def unique_values(x: ArrayLike, /, *, size: int | None = None,
fill_value: ArrayLike | None = None) -> Array:
"""Return unique values from x, along with indices, inverse indices, and counts.
JAX implementation of :func:`numpy.unique_values`; this is equivalent to calling
:func:`jax.numpy.unique` with `equal_nan` set to True.
Because the size of the output of ``unique_values`` is data-dependent, the function
is not typically compatible with :func:`~jax.jit` and other JAX transformations.
The JAX version adds the optional ``size`` argument which must be specified statically
for ``jnp.unique`` to be used in such contexts.
Args:
x: N-dimensional array from which unique values will be extracted.
size: if specified, return only the first ``size`` sorted unique elements. If there are fewer
unique elements than ``size`` indicates, the return value will be padded with ``fill_value``.
fill_value: when ``size`` is specified and there are fewer than the indicated number of
elements, fill the remaining entries ``fill_value``. Defaults to the minimum unique value.
Returns:
An array ``values`` of shape ``(n_unique,)`` containing the unique values from ``x``.
See also:
- :func:`jax.numpy.unique`: general function for computing unique values.
- :func:`jax.numpy.unique_values`: compute only ``values``.
- :func:`jax.numpy.unique_counts`: compute only ``values`` and ``counts``.
- :func:`jax.numpy.unique_inverse`: compute only ``values`` and ``inverse``.
Examples:
Here we compute the unique values in a 1D array:
>>> x = jnp.array([3, 4, 1, 3, 1])
>>> jnp.unique_values(x)
Array([1, 3, 4], dtype=int32)
For examples of the ``size`` and ``fill_value`` arguments, see :func:`jax.numpy.unique`.
"""
x = ensure_arraylike("unique_values", x)
return cast(Array, unique(x, equal_nan=False, size=size, fill_value=fill_value))
| _UniqueInverseResult |
python | streamlit__streamlit | lib/tests/streamlit/elements/lib/index_test.py | {
"start": 791,
"end": 2354
} | class ____(unittest.TestCase):
def test_index_list(self):
assert index_([1, 2, 3, 4], 1) == 0
assert index_([1, 2, 3, 4], 4) == 3
def test_index_list_success(self):
assert index_([1, 2, 3, 4], 2) == 1
def test_index_list_fails(self):
with pytest.raises(ValueError, match="5 is not in iterable"):
index_([1, 2, 3, 4], 5)
def test_index_tuple(self):
assert index_((1, 2, 3, 4), 1) == 0
assert index_((1, 2, 3, 4), 4) == 3
def test_index_tuple_success(self):
assert index_((1, 2, 3, 4), 2) == 1
def test_index_tuple_fails(self):
with pytest.raises(ValueError, match="5 is not in iterable"):
index_((1, 2, 3, 4), 5)
def test_index_numpy_array(self):
assert index_(np.array([1, 2, 3, 4]), 1) == 0
assert index_(np.array([1, 2, 3, 4]), 4) == 3
def test_index_numpy_array_success(self):
assert index_(np.array([1, 2, 3, 4]), 2) == 1
def test_index_numpy_array_fails(self):
with pytest.raises(ValueError, match="5 is not in iterable"):
index_(np.array([1, 2, 3, 4]), 5)
def test_index_pandas_series(self):
assert index_(pd.Series([1, 2, 3, 4]), 1) == 0
assert index_(pd.Series([1, 2, 3, 4]), 4) == 3
def test_index_pandas_series_success(self):
assert index_(pd.Series([1, 2, 3, 4]), 2) == 1
def test_index_pandas_series_fails(self):
with pytest.raises(ValueError, match="5 is not in iterable"):
index_(pd.Series([1, 2, 3, 4]), 5)
| Index_Test |
python | tensorflow__tensorflow | tensorflow/python/data/benchmarks/meta_benchmark.py | {
"start": 1093,
"end": 5624
} | class ____(test.Benchmark):
"""Benchmark that compares various ways of running tf.data benchmarks."""
# Note that each of these benchmarks is a separate method so that we can
# run them independently and collect a performance profile.
def setup_fast_dataset(self):
self.num_reps = 15
self.iters = 100000
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
return dataset_ops.Dataset.range(10000**2).with_options(options)
def benchmark_fast_dataset_with_only_cpp_iterations(self):
dataset = self.setup_fast_dataset()
self.run_benchmark_with_only_cpp_iterations(dataset)
def benchmark_fast_dataset_with_session_run(self):
dataset = self.setup_fast_dataset()
self.run_benchmark_with_session_run(dataset)
def benchmark_fast_dataset_with_session_callable(self):
dataset = self.setup_fast_dataset()
self.run_benchmark_with_session_run(dataset, make_callable=True)
def benchmark_fast_dataset_in_eager(self):
with context.eager_mode():
dataset = self.setup_fast_dataset()
self.run_benchmark_in_eager(dataset)
def setup_slow_dataset(self):
dataset = self.setup_fast_dataset()
self.iters = 1000
# sleep for 1e-3s per iteration
return dataset.apply(testing.sleep(1000))
def benchmark_slow_dataset_with_only_cpp_iterations(self):
dataset = self.setup_slow_dataset()
self.run_benchmark_with_only_cpp_iterations(dataset)
def benchmark_slow_dataset_with_session_run(self):
dataset = self.setup_slow_dataset()
self.run_benchmark_with_session_run(dataset)
def benchmark_slow_dataset_with_session_callable(self):
dataset = self.setup_slow_dataset()
self.run_benchmark_with_session_run(dataset, make_callable=True)
def benchmark_slow_dataset_in_eager(self):
with context.eager_mode():
dataset = self.setup_slow_dataset()
self.run_benchmark_in_eager(dataset)
def report(self, deltas):
# Each `delta` is the time taken for `self.iters` iterations. Divide by the
# number of iterations here to get per-element iteration time.
deltas = np.array(deltas) / self.iters
# Discard the first 5 results from "warming up" the session.
deltas = deltas[5:]
median = np.median(deltas)
mean = np.mean(deltas)
min_val = np.min(deltas)
max_val = np.max(deltas)
extras = {
"iters_per_second": 1 / median,
"median": median,
"mean": mean,
"min": min_val,
"max": max_val,
"num_reps": self.num_reps - 5,
}
self.report_benchmark(wall_time=median, iters=self.iters, extras=extras)
def run_benchmark_in_eager(self, dataset):
deltas = []
for _ in range(self.num_reps):
iterator = iter(dataset)
deltas.append(timeit.timeit(lambda: next(iterator), number=self.iters)) # pylint: disable=cell-var-from-loop
self.report(deltas)
def run_benchmark_with_session_run(self, dataset, make_callable=False):
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
deltas = []
for _ in range(self.num_reps):
if make_callable:
get_next_element = sess.make_callable(next_element)
else:
# Note: session.run(next_element.op) is more performant than
# session.run(next_element) because we avoid the cost of copying the
# tensor from C++ to python.
get_next_element = lambda: sess.run(next_element.op)
sess.run(iterator.initializer)
deltas.append(timeit.timeit(get_next_element, number=self.iters))
self.report(deltas)
def run_benchmark_with_only_cpp_iterations(self, dataset):
"""Benchmarks the dataset with the iterations performed in C++."""
# NOTE: We use `dataset.skip()` to perform the iterations in C++, avoiding
# the overhead of multiple `session.run()` calls. Note that this relies on
# the underlying implementation of `skip`: if it is optimized in the future,
# we will have to change this code.
dataset = dataset.skip(self.iters - 1)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
deltas = []
for _ in range(self.num_reps):
sess.run(iterator.initializer)
deltas.append(
timeit.timeit(lambda: sess.run(next_element.op), number=1))
self.report(deltas)
if __name__ == "__main__":
test.main()
| MetaBenchmark |
python | huggingface__transformers | src/transformers/models/ernie/modeling_ernie.py | {
"start": 32585,
"end": 36944
} | class ____(ErniePreTrainedModel):
_tied_weights_keys = {
"cls.predictions.decoder.bias": "cls.predictions.bias",
"cls.predictions.decoder.weight": "ernie.embeddings.word_embeddings.weight",
}
def __init__(self, config):
super().__init__(config)
self.ernie = ErnieModel(config)
self.cls = ErniePreTrainingHeads(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
task_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
next_sentence_label: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], ErnieForPreTrainingOutput]:
r"""
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Example:
```python
>>> from transformers import AutoTokenizer, ErnieForPreTraining
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh")
>>> model = ErnieForPreTraining.from_pretrained("nghuyong/ernie-1.0-base-zh")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
```
"""
outputs = self.ernie(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
task_type_ids=task_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return ErnieForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| ErnieForPreTraining |
python | psf__black | src/black/trans.py | {
"start": 38426,
"end": 50984
} | class ____(StringTransformer):
"""
Abstract class for StringTransformers which transform a Line's strings by splitting
them or placing them on their own lines where necessary to avoid going over
the configured line length.
Requirements:
* The target string value is responsible for the line going over the
line length limit. It follows that after all of black's other line
split methods have been exhausted, this line (or one of the resulting
lines after all line splits are performed) would still be over the
line_length limit unless we split this string.
AND
* The target string is NOT a "pointless" string (i.e. a string that has
no parent or siblings).
AND
* The target string is not followed by an inline comment that appears
to be a pragma.
AND
* The target string is not a multiline (i.e. triple-quote) string.
"""
STRING_OPERATORS: Final = [
token.EQEQUAL,
token.GREATER,
token.GREATEREQUAL,
token.LESS,
token.LESSEQUAL,
token.NOTEQUAL,
token.PERCENT,
token.PLUS,
token.STAR,
]
@abstractmethod
def do_splitter_match(self, line: Line) -> TMatchResult:
"""
BaseStringSplitter asks its clients to override this method instead of
`StringTransformer.do_match(...)`.
Follows the same protocol as `StringTransformer.do_match(...)`.
Refer to `help(StringTransformer.do_match)` for more information.
"""
def do_match(self, line: Line) -> TMatchResult:
match_result = self.do_splitter_match(line)
if isinstance(match_result, Err):
return match_result
string_indices = match_result.ok()
assert len(string_indices) == 1, (
f"{self.__class__.__name__} should only find one match at a time, found"
f" {len(string_indices)}"
)
string_idx = string_indices[0]
vresult = self._validate(line, string_idx)
if isinstance(vresult, Err):
return vresult
return match_result
def _validate(self, line: Line, string_idx: int) -> TResult[None]:
"""
Checks that @line meets all of the requirements listed in this classes'
docstring. Refer to `help(BaseStringSplitter)` for a detailed
description of those requirements.
Returns:
* Ok(None), if ALL of the requirements are met.
OR
* Err(CannotTransform), if ANY of the requirements are NOT met.
"""
LL = line.leaves
string_leaf = LL[string_idx]
max_string_length = self._get_max_string_length(line, string_idx)
if len(string_leaf.value) <= max_string_length:
return TErr(
"The string itself is not what is causing this line to be too long."
)
if not string_leaf.parent or [L.type for L in string_leaf.parent.children] == [
token.STRING,
token.NEWLINE,
]:
return TErr(
f"This string ({string_leaf.value}) appears to be pointless (i.e. has"
" no parent)."
)
if id(line.leaves[string_idx]) in line.comments and contains_pragma_comment(
line.comments[id(line.leaves[string_idx])]
):
return TErr(
"Line appears to end with an inline pragma comment. Splitting the line"
" could modify the pragma's behavior."
)
if has_triple_quotes(string_leaf.value):
return TErr("We cannot split multiline strings.")
return Ok(None)
def _get_max_string_length(self, line: Line, string_idx: int) -> int:
"""
Calculates the max string length used when attempting to determine
whether or not the target string is responsible for causing the line to
go over the line length limit.
WARNING: This method is tightly coupled to both StringSplitter and
(especially) StringParenWrapper. There is probably a better way to
accomplish what is being done here.
Returns:
max_string_length: such that `line.leaves[string_idx].value >
max_string_length` implies that the target string IS responsible
for causing this line to exceed the line length limit.
"""
LL = line.leaves
is_valid_index = is_valid_index_factory(LL)
# We use the shorthand "WMA4" in comments to abbreviate "We must
# account for". When giving examples, we use STRING to mean some/any
# valid string.
#
# Finally, we use the following convenience variables:
#
# P: The leaf that is before the target string leaf.
# N: The leaf that is after the target string leaf.
# NN: The leaf that is after N.
# WMA4 the whitespace at the beginning of the line.
offset = line.depth * 4
if is_valid_index(string_idx - 1):
p_idx = string_idx - 1
if (
LL[string_idx - 1].type == token.LPAR
and LL[string_idx - 1].value == ""
and string_idx >= 2
):
# If the previous leaf is an empty LPAR placeholder, we should skip it.
p_idx -= 1
P = LL[p_idx]
if P.type in self.STRING_OPERATORS:
# WMA4 a space and a string operator (e.g. `+ STRING` or `== STRING`).
offset += len(str(P)) + 1
if P.type == token.COMMA:
# WMA4 a space, a comma, and a closing bracket [e.g. `), STRING`].
offset += 3
if P.type in [token.COLON, token.EQUAL, token.PLUSEQUAL, token.NAME]:
# This conditional branch is meant to handle dictionary keys,
# variable assignments, 'return STRING' statement lines, and
# 'else STRING' ternary expression lines.
# WMA4 a single space.
offset += 1
# WMA4 the lengths of any leaves that came before that space,
# but after any closing bracket before that space.
for leaf in reversed(LL[: p_idx + 1]):
offset += len(str(leaf))
if leaf.type in CLOSING_BRACKETS:
break
if is_valid_index(string_idx + 1):
N = LL[string_idx + 1]
if N.type == token.RPAR and N.value == "" and len(LL) > string_idx + 2:
# If the next leaf is an empty RPAR placeholder, we should skip it.
N = LL[string_idx + 2]
if N.type == token.COMMA:
# WMA4 a single comma at the end of the string (e.g `STRING,`).
offset += 1
if is_valid_index(string_idx + 2):
NN = LL[string_idx + 2]
if N.type == token.DOT and NN.type == token.NAME:
# This conditional branch is meant to handle method calls invoked
# off of a string literal up to and including the LPAR character.
# WMA4 the '.' character.
offset += 1
if (
is_valid_index(string_idx + 3)
and LL[string_idx + 3].type == token.LPAR
):
# WMA4 the left parenthesis character.
offset += 1
# WMA4 the length of the method's name.
offset += len(NN.value)
has_comments = False
for comment_leaf in line.comments_after(LL[string_idx]):
if not has_comments:
has_comments = True
# WMA4 two spaces before the '#' character.
offset += 2
# WMA4 the length of the inline comment.
offset += len(comment_leaf.value)
max_string_length = count_chars_in_width(str(line), self.line_length - offset)
return max_string_length
@staticmethod
def _prefer_paren_wrap_match(LL: list[Leaf]) -> int | None:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the "prefer paren wrap" statement
requirements listed in the 'Requirements' section of the StringParenWrapper
class's docstring.
OR
None, otherwise.
"""
# The line must start with a string.
if LL[0].type != token.STRING:
return None
matching_nodes = [
syms.listmaker,
syms.dictsetmaker,
syms.testlist_gexp,
]
# If the string is an immediate child of a list/set/tuple literal...
if (
parent_type(LL[0]) in matching_nodes
or parent_type(LL[0].parent) in matching_nodes
):
# And the string is surrounded by commas (or is the first/last child)...
prev_sibling = LL[0].prev_sibling
next_sibling = LL[0].next_sibling
if (
not prev_sibling
and not next_sibling
and parent_type(LL[0]) == syms.atom
):
# If it's an atom string, we need to check the parent atom's siblings.
parent = LL[0].parent
assert parent is not None # For type checkers.
prev_sibling = parent.prev_sibling
next_sibling = parent.next_sibling
if (not prev_sibling or prev_sibling.type == token.COMMA) and (
not next_sibling or next_sibling.type == token.COMMA
):
return 0
return None
def iter_fexpr_spans(s: str) -> Iterator[tuple[int, int]]:
"""
Yields spans corresponding to expressions in a given f-string.
Spans are half-open ranges (left inclusive, right exclusive).
Assumes the input string is a valid f-string, but will not crash if the input
string is invalid.
"""
stack: list[int] = [] # our curly paren stack
i = 0
while i < len(s):
if s[i] == "{":
# if we're in a string part of the f-string, ignore escaped curly braces
if not stack and i + 1 < len(s) and s[i + 1] == "{":
i += 2
continue
stack.append(i)
i += 1
continue
if s[i] == "}":
if not stack:
i += 1
continue
j = stack.pop()
# we've made it back out of the expression! yield the span
if not stack:
yield (j, i + 1)
i += 1
continue
# if we're in an expression part of the f-string, fast-forward through strings
# note that backslashes are not legal in the expression portion of f-strings
if stack:
delim = None
if s[i : i + 3] in ("'''", '"""'):
delim = s[i : i + 3]
elif s[i] in ("'", '"'):
delim = s[i]
if delim:
i += len(delim)
while i < len(s) and s[i : i + len(delim)] != delim:
i += 1
i += len(delim)
continue
i += 1
def fstring_contains_expr(s: str) -> bool:
return any(iter_fexpr_spans(s))
def _toggle_fexpr_quotes(fstring: str, old_quote: str) -> str:
"""
Toggles quotes used in f-string expressions that are `old_quote`.
f-string expressions can't contain backslashes, so we need to toggle the
quotes if the f-string itself will end up using the same quote. We can
simply toggle without escaping because, quotes can't be reused in f-string
expressions. They will fail to parse.
NOTE: If PEP 701 is accepted, above statement will no longer be true.
Though if quotes can be reused, we can simply reuse them without updates or
escaping, once Black figures out how to parse the new grammar.
"""
new_quote = "'" if old_quote == '"' else '"'
parts = []
previous_index = 0
for start, end in iter_fexpr_spans(fstring):
parts.append(fstring[previous_index:start])
parts.append(fstring[start:end].replace(old_quote, new_quote))
previous_index = end
parts.append(fstring[previous_index:])
return "".join(parts)
| BaseStringSplitter |
python | tensorflow__tensorflow | tensorflow/python/keras/engine/training_v1.py | {
"start": 125150,
"end": 126990
} | class ____(Model):
"""Model that is used for callbacks with tf.distribute.Strategy."""
def __init__(self, model):
super(DistributedCallbackModel, self).__init__()
self.optimizer = model.optimizer
def set_original_model(self, orig_model):
self._original_model = orig_model
def save_weights(self, filepath, overwrite=True, save_format=None):
self._replicated_model.save_weights(filepath, overwrite=overwrite,
save_format=save_format)
def save(self, filepath, overwrite=True, include_optimizer=True):
# save weights from the distributed model to the original model
distributed_model_weights = self.get_weights()
self._original_model.set_weights(distributed_model_weights)
# TODO(anjalisridhar): Do we need to save the original model here?
# Saving the first replicated model works as well.
self._original_model.save(filepath, overwrite=True, include_optimizer=False)
def load_weights(self, filepath, by_name=False):
self._original_model.load_weights(filepath, by_name=False)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = self._original_model.get_weights()
distributed_training_utils_v1.set_weights(
self._original_model._distribution_strategy, self, # pylint: disable=protected-access
orig_model_weights)
def __getattr__(self, item):
# Allowed attributes of the model that can be accessed by the user
# during a callback.
if item not in ('_setattr_tracking', '_layers'):
logging.warning('You are accessing attribute ' + item + ' of the '
'DistributedCallbackModel that may not have been set '
'correctly.')
return super(DistributedCallbackModel, self).__getattr__(item)
| DistributedCallbackModel |
python | getsentry__sentry | src/sentry/workflow_engine/endpoints/serializers/detector_workflow_serializer.py | {
"start": 188,
"end": 313
} | class ____(TypedDict):
id: str
detectorId: str
workflowId: str
@register(DetectorWorkflow)
| DetectorWorkflowResponse |
python | scipy__scipy | scipy/sparse/_csc.py | {
"start": 343,
"end": 5322
} | class ____(_cs_matrix):
_format = 'csc'
def transpose(self, axes=None, copy=False):
if axes is not None and axes != (1, 0):
raise ValueError("Sparse arrays/matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation.")
M, N = self.shape
return self._csr_container((self.data, self.indices,
self.indptr), (N, M), copy=copy)
transpose.__doc__ = _spbase.transpose.__doc__
def __iter__(self):
yield from self.tocsr()
def tocsc(self, copy=False):
if copy:
return self.copy()
else:
return self
tocsc.__doc__ = _spbase.tocsc.__doc__
def tocsr(self, copy=False):
M,N = self.shape
idx_dtype = self._get_index_dtype((self.indptr, self.indices),
maxval=max(self.nnz, N))
indptr = np.empty(M + 1, dtype=idx_dtype)
indices = np.empty(self.nnz, dtype=idx_dtype)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
csr_tocsc(N, M,
self.indptr.astype(idx_dtype, copy=False),
self.indices.astype(idx_dtype, copy=False),
self.data,
indptr,
indices,
data)
A = self._csr_container(
(data, indices, indptr),
shape=self.shape, copy=False
)
A.has_sorted_indices = True
return A
tocsr.__doc__ = _spbase.tocsr.__doc__
def nonzero(self):
# CSC can't use _cs_matrix's .nonzero method because it
# returns the indices sorted for self transposed.
# Get row and col indices, from _cs_matrix.tocoo
major_dim, minor_dim = self._swap(self.shape)
minor_indices = self.indices
major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)
expandptr(major_dim, self.indptr, major_indices)
row, col = self._swap((major_indices, minor_indices))
# Remove explicit zeros
nz_mask = self.data != 0
row = row[nz_mask]
col = col[nz_mask]
# Sort them to be in C-style order
ind = np.argsort(row, kind='mergesort')
row = row[ind]
col = col[ind]
return row, col
nonzero.__doc__ = _cs_matrix.nonzero.__doc__
def _getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n)
CSR matrix (row vector).
"""
M, N = self.shape
i = int(i)
if i < 0:
i += M
if i < 0 or i >= M:
raise IndexError(f'index ({i}) out of range')
return self._get_submatrix(minor=i).tocsr()
def _getcol(self, i):
"""Returns a copy of column i of the matrix, as a (m x 1)
CSC matrix (column vector).
"""
M, N = self.shape
i = int(i)
if i < 0:
i += N
if i < 0 or i >= N:
raise IndexError(f'index ({i}) out of range')
return self._get_submatrix(major=i, copy=True)
def _get_intXarray(self, row, col):
return self._major_index_fancy(col)._get_submatrix(minor=row)
def _get_intXslice(self, row, col):
if col.step in (1, None):
return self._get_submatrix(major=col, minor=row, copy=True)
return self._major_slice(col)._get_submatrix(minor=row)
def _get_sliceXint(self, row, col):
if row.step in (1, None):
return self._get_submatrix(major=col, minor=row, copy=True)
return self._get_submatrix(major=col)._minor_slice(row)
def _get_sliceXarray(self, row, col):
return self._major_index_fancy(col)._minor_slice(row)
def _get_arrayXint(self, row, col):
res = self._get_submatrix(major=col)._minor_index_fancy(row)
if row.ndim > 1:
return res.reshape(row.shape)
return res
def _get_arrayXslice(self, row, col):
return self._major_slice(col)._minor_index_fancy(row)
# these functions are used by the parent class (_cs_matrix)
# to remove redundancy between csc_array and csr_matrix
@staticmethod
def _swap(x):
"""swap the members of x if this is a column-oriented matrix
"""
return x[1], x[0]
def isspmatrix_csc(x):
"""Is `x` of csc_matrix type?
Parameters
----------
x
object to check for being a csc matrix
Returns
-------
bool
True if `x` is a csc matrix, False otherwise
Examples
--------
>>> from scipy.sparse import csc_array, csc_matrix, coo_matrix, isspmatrix_csc
>>> isspmatrix_csc(csc_matrix([[5]]))
True
>>> isspmatrix_csc(csc_array([[5]]))
False
>>> isspmatrix_csc(coo_matrix([[5]]))
False
"""
return isinstance(x, csc_matrix)
# This namespace class separates array from matrix with isinstance
| _csc_base |
python | fastapi__sqlmodel | docs_src/tutorial/connect/delete/tutorial001_py310.py | {
"start": 214,
"end": 2273
} | class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
team_id: int | None = Field(default=None, foreign_key="team.id")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
session.add(team_preventers)
session.add(team_z_force)
session.commit()
hero_deadpond = Hero(
name="Deadpond", secret_name="Dive Wilson", team_id=team_z_force.id
)
hero_rusty_man = Hero(
name="Rusty-Man",
secret_name="Tommy Sharp",
age=48,
team_id=team_preventers.id,
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
hero_spider_boy.team_id = team_preventers.id
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_spider_boy)
print("Updated hero:", hero_spider_boy)
hero_spider_boy.team_id = None
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_spider_boy)
print("No longer Preventer:", hero_spider_boy)
def main():
create_db_and_tables()
create_heroes()
if __name__ == "__main__":
main()
| Hero |
python | wandb__wandb | wandb/sdk/data_types/_dtypes.py | {
"start": 11332,
"end": 11429
} | class ____(Type):
name = "none"
types: t.ClassVar[t.List[type]] = [None.__class__]
| NoneType |
python | tensorflow__tensorflow | tensorflow/core/function/trace_type/serialization_test.py | {
"start": 3424,
"end": 7809
} | class ____(test.TestCase):
def testCustomClassSerialization(self):
my_custom = MyCustomClass(1234, "my_name")
serialized = serialization.serialize(my_custom)
self.assertTrue(
serialized.representation.Is(
serialization_test_pb2.MyCustomRepresentation.DESCRIPTOR))
proto = serialization_test_pb2.MyCustomRepresentation()
serialized.representation.Unpack(proto)
self.assertEqual(proto.index, my_custom.index)
self.assertEqual(proto.name, my_custom.name)
def testCustomClassDeserialization(self):
original = MyCustomClass(1234, "my_name")
serialized = serialization.serialize(original)
deserialized = serialization.deserialize(serialized)
self.assertIsInstance(deserialized, MyCustomClass)
self.assertEqual(deserialized.index, original.index)
self.assertEqual(deserialized.name, original.name)
def testCompositeClassSerialization(self):
my_composite = MyCompositeClass(
MyCustomClass(1, "name_1"), MyCustomClass(2, "name_2"),
MyCustomClass(3, "name_3"))
serialized = serialization.serialize(my_composite)
self.assertTrue(
serialized.representation.Is(
serialization_test_pb2.MyCompositeRepresentation.DESCRIPTOR))
proto = serialization_test_pb2.MyCompositeRepresentation()
serialized.representation.Unpack(proto)
self.assertEqual(proto.elements[0],
serialization.serialize(MyCustomClass(1, "name_1")))
self.assertEqual(proto.elements[1],
serialization.serialize(MyCustomClass(2, "name_2")))
self.assertEqual(proto.elements[2],
serialization.serialize(MyCustomClass(3, "name_3")))
def testCompositeClassDeserialization(self):
original = MyCompositeClass(
MyCustomClass(1, "name_1"), MyCustomClass(2, "name_2"),
MyCustomClass(3, "name_3"))
serialized = serialization.serialize(original)
deserialized = serialization.deserialize(serialized)
self.assertIsInstance(deserialized, MyCompositeClass)
self.assertEqual(deserialized.elements[0].index, 1)
self.assertEqual(deserialized.elements[1].index, 2)
self.assertEqual(deserialized.elements[2].index, 3)
self.assertEqual(deserialized.elements[0].name, "name_1")
self.assertEqual(deserialized.elements[1].name, "name_2")
self.assertEqual(deserialized.elements[2].name, "name_3")
def testNonUniqueProto(self):
class ClassThatReusesProto(serialization.Serializable):
@classmethod
def experimental_type_proto(cls):
return serialization_test_pb2.MyCustomRepresentation
@classmethod
def experimental_from_proto(cls, proto):
raise NotImplementedError
def experimental_as_proto(self):
raise NotImplementedError
with self.assertRaisesRegex(
ValueError,
("Existing Python class MyCustomClass already has "
"MyCustomRepresentation as its associated proto representation. "
"Please ensure ClassThatReusesProto has a unique proto representation."
)):
serialization.register_serializable(ClassThatReusesProto)
def testWrongProto(self):
class ClassReturningWrongProto(serialization.Serializable):
@classmethod
def experimental_type_proto(cls):
return serialization.SerializedTraceType
@classmethod
def experimental_from_proto(cls, proto):
raise NotImplementedError
def experimental_as_proto(self):
return serialization_test_pb2.MyCustomRepresentation()
with self.assertRaisesRegex(
ValueError,
("ClassReturningWrongProto returned different type of proto than "
"specified by experimental_type_proto()")):
serialization.serialize(ClassReturningWrongProto())
def testSerializableSuperClass(self):
self.assertEqual(
serialization.deserialize(
serialization.serialize(SerializableFromSuperClassOne())),
SerializableFromSuperClassOne())
self.assertEqual(
serialization.deserialize(
serialization.serialize(SerializableFromSuperClassTwo())),
SerializableFromSuperClassTwo())
self.assertEqual(
serialization.deserialize(
serialization.serialize(SerializableFromSuperClassThree())),
SerializableFromSuperClassThree())
if __name__ == "__main__":
test.main()
| SerializeTest |
python | walkccc__LeetCode | solutions/1963. Minimum Number of Swaps to Make the String Balanced/1963.py | {
"start": 0,
"end": 374
} | class ____:
def minSwaps(self, s: str) -> int:
# Cancel out all the matched pairs, then we'll be left with ']]]..[[['.
# The answer is ceil(# of unmatched pairs // 2).
unmatched = 0
for c in s:
if c == '[':
unmatched += 1
elif unmatched > 0: # c == ']' and there's a match.
unmatched -= 1
return (unmatched + 1) // 2
| Solution |
python | getsentry__sentry | src/sentry/core/endpoints/organization_member_invite/utils.py | {
"start": 306,
"end": 1133
} | class ____(OrganizationPermission):
scope_map = {
"GET": ["member:read", "member:write", "member:admin"],
"PUT": ["member:write", "member:admin"],
# DELETE checks for role comparison as you can either remove a member invite request
# you added, or any member invite / invite request if you have the required scopes
"DELETE": ["member:read", "member:write", "member:admin"],
}
def has_object_permission(self, request, view, organization):
"""
Prevents superuser read from deleting an invite or invite request.
"""
has_perms = super().has_object_permission(request, view, organization)
if is_active_superuser(request) and not superuser_has_permission(request):
return False
return has_perms
| MemberInviteDetailsPermission |
python | FactoryBoy__factory_boy | tests/test_using.py | {
"start": 61149,
"end": 63944
} | class ____(unittest.TestCase):
def test_iterator(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.Iterator(range(10, 30))
objs = TestObjectFactory.build_batch(20)
for i, obj in enumerate(objs):
self.assertEqual(i + 10, obj.one)
@utils.disable_warnings
def test_iterator_list_comprehension_protected(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.Iterator([_j * 3 for _j in range(5)])
# Scope bleeding : _j will end up in TestObjectFactory's scope.
# But factory_boy ignores it, as a protected variable.
objs = TestObjectFactory.build_batch(20)
for i, obj in enumerate(objs):
self.assertEqual(3 * (i % 5), obj.one)
def test_iterator_decorator(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
@factory.iterator
def one():
yield from range(10, 50)
objs = TestObjectFactory.build_batch(20)
for i, obj in enumerate(objs):
self.assertEqual(i + 10, obj.one)
def test_iterator_late_loading(self):
"""Ensure that Iterator doesn't unroll on class creation.
This allows, for Django objects, to call:
foo = factory.Iterator(models.MyThingy.objects.all())
"""
class DBRequest:
def __init__(self):
self.ready = False
def __iter__(self):
if not self.ready:
raise ValueError("Not ready!!")
return iter([1, 2, 3])
# calling __iter__() should crash
req1 = DBRequest()
with self.assertRaises(ValueError):
iter(req1)
req2 = DBRequest()
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.Iterator(req2)
req2.ready = True
obj = TestObjectFactory()
self.assertEqual(1, obj.one)
def test_iterator_time_manipulation(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
@factory.iterator
def one():
now = datetime.datetime.now()
yield now + datetime.timedelta(hours=1)
yield now + datetime.timedelta(hours=2)
obj1, obj2, obj3 = TestObjectFactory.create_batch(3)
# Timers should be t+1H, t+2H, t+1H, t+2H, etc.
self.assertEqual(datetime.timedelta(hours=1), obj2.one - obj1.one)
self.assertEqual(obj1.one, obj3.one)
| IteratorTestCase |
python | etianen__django-reversion | tests/test_app/tests/test_admin.py | {
"start": 9320,
"end": 10461
} | class ____(LoginMixin, AdminMixin, TestBase):
def setUp(self):
super().setUp()
admin.site.register(TestModelEscapePK, VersionAdmin)
self.reloadUrls()
def tearDown(self):
super().tearDown()
admin.site.unregister(TestModelEscapePK)
self.reloadUrls()
def testHistoryWithQuotedPrimaryKey(self):
pk = 'ABC_123'
quoted_pk = admin.utils.quote(pk)
# test is invalid if quoting does not change anything
assert quoted_pk != pk
with reversion.create_revision():
obj = TestModelEscapePK.objects.create(name=pk)
revision_url = resolve_url(
"admin:test_app_testmodelescapepk_revision",
quoted_pk,
Version.objects.get_for_object(obj).get().pk,
)
history_url = resolve_url(
"admin:test_app_testmodelescapepk_history",
quoted_pk
)
response = self.client.get(history_url)
self.assertContains(response, revision_url)
response = self.client.get(revision_url)
self.assertContains(response, f'value="{pk}"')
| AdminQuotingTest |
python | getsentry__sentry | src/sentry/notifications/services/service.py | {
"start": 754,
"end": 3421
} | class ____(RpcService):
key = "notifications"
local_mode = SiloMode.CONTROL
@classmethod
def get_local_implementation(cls) -> RpcService:
from sentry.notifications.services.impl import DatabaseBackedNotificationsService
return DatabaseBackedNotificationsService()
@rpc_method
@abstractmethod
def enable_all_settings_for_provider(
self,
*,
external_provider: ExternalProviderEnum,
user_id: int | None = None,
team_id: int | None = None,
types: list[NotificationSettingEnum] | None = None,
) -> None:
pass
@rpc_method
@abstractmethod
def update_notification_options(
self,
*,
actor: Actor,
type: NotificationSettingEnum,
scope_type: NotificationScopeEnum,
scope_identifier: int,
value: NotificationSettingsOptionEnum,
) -> None:
pass
@rpc_method
@abstractmethod
def remove_notification_settings_for_provider_team(
self, *, team_id: int, provider: ExternalProviders
) -> None:
pass
@rpc_method
@abstractmethod
def remove_notification_settings_for_organization(self, *, organization_id: int) -> None:
pass
@rpc_method
@abstractmethod
def remove_notification_settings_for_project(self, *, project_id: int) -> None:
pass
@rpc_method
@abstractmethod
def subscriptions_for_projects(
self,
*,
user_id: int,
project_ids: list[int],
type: NotificationSettingEnum,
) -> Mapping[int, RpcSubscriptionStatus]:
"""
Returns a mapping of project_id to the subscription status for the provided user_id
"""
pass
@rpc_method
@abstractmethod
def get_participants(
self,
*,
recipients: list[Actor],
type: NotificationSettingEnum,
project_ids: list[int] | None = None,
organization_id: int | None = None,
) -> MutableMapping[int, MutableMapping[int, str]]:
pass
@rpc_method
@abstractmethod
def get_users_for_weekly_reports(
self, *, organization_id: int, user_ids: list[int]
) -> list[int]:
pass
@rpc_method
@abstractmethod
def get_notification_recipients(
self,
*,
recipients: list[Actor],
type: NotificationSettingEnum,
organization_id: int | None = None,
project_ids: list[int] | None = None,
actor_type: ActorType | None = None,
) -> Mapping[str, set[Actor]]:
pass
notifications_service = NotificationsService.create_delegation()
| NotificationsService |
python | TheAlgorithms__Python | conversions/prefix_conversions_string.py | {
"start": 686,
"end": 3181
} | class ____(Enum):
yotta = 24
zetta = 21
exa = 18
peta = 15
tera = 12
giga = 9
mega = 6
kilo = 3
hecto = 2
deca = 1
deci = -1
centi = -2
milli = -3
micro = -6
nano = -9
pico = -12
femto = -15
atto = -18
zepto = -21
yocto = -24
@classmethod
def get_positive(cls) -> dict:
"""
Returns a dictionary with only the elements of this enum
that has a positive value
>>> from itertools import islice
>>> positive = SIUnit.get_positive()
>>> inc = iter(positive.items())
>>> dict(islice(inc, len(positive) // 2))
{'yotta': 24, 'zetta': 21, 'exa': 18, 'peta': 15, 'tera': 12}
>>> dict(inc)
{'giga': 9, 'mega': 6, 'kilo': 3, 'hecto': 2, 'deca': 1}
"""
return {unit.name: unit.value for unit in cls if unit.value > 0}
@classmethod
def get_negative(cls) -> dict:
"""
Returns a dictionary with only the elements of this enum
that has a negative value
@example
>>> from itertools import islice
>>> negative = SIUnit.get_negative()
>>> inc = iter(negative.items())
>>> dict(islice(inc, len(negative) // 2))
{'deci': -1, 'centi': -2, 'milli': -3, 'micro': -6, 'nano': -9}
>>> dict(inc)
{'pico': -12, 'femto': -15, 'atto': -18, 'zepto': -21, 'yocto': -24}
"""
return {unit.name: unit.value for unit in cls if unit.value < 0}
def add_si_prefix(value: float) -> str:
"""
Function that converts a number to his version with SI prefix
@input value (an integer)
@example:
>>> add_si_prefix(10000)
'10.0 kilo'
"""
prefixes = SIUnit.get_positive() if value > 0 else SIUnit.get_negative()
for name_prefix, value_prefix in prefixes.items():
numerical_part = value / (10**value_prefix)
if numerical_part > 1:
return f"{numerical_part!s} {name_prefix}"
return str(value)
def add_binary_prefix(value: float) -> str:
"""
Function that converts a number to his version with Binary prefix
@input value (an integer)
@example:
>>> add_binary_prefix(65536)
'64.0 kilo'
"""
for prefix in BinaryUnit:
numerical_part = value / (2**prefix.value)
if numerical_part > 1:
return f"{numerical_part!s} {prefix.name}"
return str(value)
if __name__ == "__main__":
import doctest
doctest.testmod()
| SIUnit |
python | sympy__sympy | sympy/assumptions/predicates/sets.py | {
"start": 8588,
"end": 9325
} | class ____(Predicate):
r"""
Algebraic number predicate.
Explanation
===========
``Q.algebraic(x)`` is true iff ``x`` belongs to the set of
algebraic numbers. ``x`` is algebraic if there is some polynomial
in ``p(x)\in \mathbb\{Q\}[x]`` such that ``p(x) = 0``.
Examples
========
>>> from sympy import ask, Q, sqrt, I, pi
>>> ask(Q.algebraic(sqrt(2)))
True
>>> ask(Q.algebraic(I))
True
>>> ask(Q.algebraic(pi))
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Algebraic_number
"""
name = 'algebraic'
AlgebraicHandler = Dispatcher(
"AlgebraicHandler",
doc="""Handler for Q.algebraic key."""
)
| AlgebraicPredicate |
python | facebook__pyre-check | client/configuration/search_path.py | {
"start": 1591,
"end": 2052
} | class ____(Element):
site_root: str
package_name: str
is_toplevel_module: bool = False
def package_path(self) -> str:
module_suffix = ".py" if self.is_toplevel_module else ""
return self.package_name + module_suffix
def path(self) -> str:
return os.path.join(self.site_root, self.package_path())
def command_line_argument(self) -> str:
return self.site_root + "$" + self.package_path()
| SitePackageElement |
python | cython__cython | Cython/Plex/Actions.py | {
"start": 655,
"end": 993
} | class ____(Action):
"""
Internal Plex action which causes a function to be called.
"""
def __init__(self, function):
self.function = function
def perform(self, token_stream, text):
return self.function(token_stream, text)
def __repr__(self):
return "Call(%s)" % self.function.__name__
| Call |
python | giampaolo__psutil | tests/test_misc.py | {
"start": 1101,
"end": 6127
} | class ____(PsutilTestCase):
def test_check_pid_range(self):
with pytest.raises(OverflowError):
psutil._psplatform.cext.check_pid_range(2**128)
with pytest.raises(psutil.NoSuchProcess):
psutil.Process(2**128)
def test_process__repr__(self, func=repr):
p = psutil.Process(self.spawn_subproc().pid)
r = func(p)
assert "psutil.Process" in r
assert f"pid={p.pid}" in r
assert f"name='{p.name()}'" in r.replace("name=u'", "name='")
assert "status=" in r
assert "exitcode=" not in r
p.terminate()
p.wait()
r = func(p)
assert "status='terminated'" in r
assert "exitcode=" in r
with mock.patch.object(
psutil.Process,
"name",
side_effect=psutil.ZombieProcess(os.getpid()),
):
p = psutil.Process()
r = func(p)
assert f"pid={p.pid}" in r
assert "status='zombie'" in r
assert "name=" not in r
with mock.patch.object(
psutil.Process,
"name",
side_effect=psutil.NoSuchProcess(os.getpid()),
):
p = psutil.Process()
r = func(p)
assert f"pid={p.pid}" in r
assert "terminated" in r
assert "name=" not in r
with mock.patch.object(
psutil.Process,
"name",
side_effect=psutil.AccessDenied(os.getpid()),
):
p = psutil.Process()
r = func(p)
assert f"pid={p.pid}" in r
assert "name=" not in r
def test_process__str__(self):
self.test_process__repr__(func=str)
def test_error__repr__(self):
assert repr(psutil.Error()) == "psutil.Error()"
def test_error__str__(self):
assert str(psutil.Error()) == ""
def test_no_such_process__repr__(self):
assert (
repr(psutil.NoSuchProcess(321))
== "psutil.NoSuchProcess(pid=321, msg='process no longer exists')"
)
assert (
repr(psutil.NoSuchProcess(321, name="name", msg="msg"))
== "psutil.NoSuchProcess(pid=321, name='name', msg='msg')"
)
def test_no_such_process__str__(self):
assert (
str(psutil.NoSuchProcess(321))
== "process no longer exists (pid=321)"
)
assert (
str(psutil.NoSuchProcess(321, name="name", msg="msg"))
== "msg (pid=321, name='name')"
)
def test_zombie_process__repr__(self):
assert (
repr(psutil.ZombieProcess(321))
== 'psutil.ZombieProcess(pid=321, msg="PID still '
'exists but it\'s a zombie")'
)
assert (
repr(psutil.ZombieProcess(321, name="name", ppid=320, msg="foo"))
== "psutil.ZombieProcess(pid=321, ppid=320, name='name',"
" msg='foo')"
)
def test_zombie_process__str__(self):
assert (
str(psutil.ZombieProcess(321))
== "PID still exists but it's a zombie (pid=321)"
)
assert (
str(psutil.ZombieProcess(321, name="name", ppid=320, msg="foo"))
== "foo (pid=321, ppid=320, name='name')"
)
def test_access_denied__repr__(self):
assert repr(psutil.AccessDenied(321)) == "psutil.AccessDenied(pid=321)"
assert (
repr(psutil.AccessDenied(321, name="name", msg="msg"))
== "psutil.AccessDenied(pid=321, name='name', msg='msg')"
)
def test_access_denied__str__(self):
assert str(psutil.AccessDenied(321)) == "(pid=321)"
assert (
str(psutil.AccessDenied(321, name="name", msg="msg"))
== "msg (pid=321, name='name')"
)
def test_timeout_expired__repr__(self):
assert (
repr(psutil.TimeoutExpired(5))
== "psutil.TimeoutExpired(seconds=5, msg='timeout after 5"
" seconds')"
)
assert (
repr(psutil.TimeoutExpired(5, pid=321, name="name"))
== "psutil.TimeoutExpired(pid=321, name='name', seconds=5, "
"msg='timeout after 5 seconds')"
)
def test_timeout_expired__str__(self):
assert str(psutil.TimeoutExpired(5)) == "timeout after 5 seconds"
assert (
str(psutil.TimeoutExpired(5, pid=321, name="name"))
== "timeout after 5 seconds (pid=321, name='name')"
)
def test_process__eq__(self):
p1 = psutil.Process()
p2 = psutil.Process()
assert p1 == p2
p2._ident = (0, 0)
assert p1 != p2
assert p1 != 'foo'
def test_process__hash__(self):
s = {psutil.Process(), psutil.Process()}
assert len(s) == 1
# ===================================================================
# --- Misc, generic, corner cases
# ===================================================================
| TestSpecialMethods |
python | mlflow__mlflow | tests/helper_functions.py | {
"start": 9580,
"end": 19268
} | class ____:
def __init__(self, proc, port, activity_polling_timeout_seconds=60 * 8, validate_version=True):
self._proc = proc
self._port = port
self._activity_polling_timeout_seconds = activity_polling_timeout_seconds
self._validate_version = validate_version
def __enter__(self):
ping_status = None
for i in range(self._activity_polling_timeout_seconds):
assert self._proc.poll() is None, "scoring process died"
time.sleep(1)
# noinspection PyBroadException
try:
ping_status = requests.get(url=f"http://localhost:{self._port}/ping")
_logger.info(f"connection attempt {i} server is up! ping status {ping_status}")
if ping_status.status_code == 200:
break
except Exception:
_logger.info(f"connection attempt {i} failed, server is not up yet")
if ping_status is None or ping_status.status_code != 200:
raise Exception("ping failed, server is not happy")
_logger.info(f"server up, ping status {ping_status}")
if self._validate_version:
resp_status = requests.get(url=f"http://localhost:{self._port}/version")
version = resp_status.text
_logger.info(f"mlflow server version {version}")
if version != mlflow.__version__:
raise Exception("version path is not returning correct mlflow version")
return self
def __exit__(self, tp, val, traceback):
if self._proc.poll() is None:
# Terminate the process group containing the scoring process.
# This will terminate all child processes of the scoring process
if not is_windows():
pgrp = os.getpgid(self._proc.pid)
os.killpg(pgrp, signal.SIGTERM)
else:
# https://stackoverflow.com/questions/47016723/windows-equivalent-for-spawning-and-killing-separate-process-group-in-python-3
self._proc.send_signal(signal.CTRL_BREAK_EVENT)
self._proc.kill()
def invoke(self, data, content_type):
import pandas as pd
from mlflow.pyfunc import scoring_server as pyfunc_scoring_server
if isinstance(data, pd.DataFrame):
if content_type == pyfunc_scoring_server.CONTENT_TYPE_CSV:
data = data.to_csv(index=False)
else:
assert content_type == pyfunc_scoring_server.CONTENT_TYPE_JSON
data = json.dumps({"dataframe_split": data.to_dict(orient="split")})
elif type(data) not in {str, dict}:
data = json.dumps({"instances": data})
return requests.post(
url=f"http://localhost:{self._port}/invocations",
data=data,
headers={"Content-Type": content_type},
)
@pytest.fixture(autouse=True)
def set_boto_credentials(monkeypatch):
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "NotARealAccessKey")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "NotARealSecretAccessKey")
monkeypatch.setenv("AWS_SESSION_TOKEN", "NotARealSessionToken")
def create_mock_response(status_code, text):
"""
Create a mock response object with the status_code and text
Args:
status_code: HTTP status code.
text: Message from the response.
Returns:
Mock HTTP Response.
"""
response = mock.MagicMock()
response.status_code = status_code
response.text = text
return response
def _read_lines(path):
with open(path) as f:
return f.read().splitlines()
def _compare_logged_code_paths(code_path: str, model_uri: str, flavor_name: str) -> None:
from mlflow.utils.model_utils import FLAVOR_CONFIG_CODE, _get_flavor_configuration
model_path = _download_artifact_from_uri(model_uri)
pyfunc_conf = _get_flavor_configuration(
model_path=model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME
)
flavor_conf = _get_flavor_configuration(model_path, flavor_name=flavor_name)
assert pyfunc_conf[mlflow.pyfunc.CODE] == flavor_conf[FLAVOR_CONFIG_CODE]
saved_code_path = os.path.join(model_path, pyfunc_conf[mlflow.pyfunc.CODE])
assert os.path.exists(saved_code_path)
with open(os.path.join(saved_code_path, os.path.basename(code_path))) as f1:
with open(code_path) as f2:
assert f1.read() == f2.read()
def _compare_conda_env_requirements(env_path, req_path):
from mlflow.utils.environment import _get_pip_deps
from mlflow.utils.yaml_utils import read_yaml
assert os.path.exists(req_path)
env_root, env_path = os.path.split(env_path)
custom_env_parsed = read_yaml(env_root, env_path)
requirements = _read_lines(req_path)
assert _get_pip_deps(custom_env_parsed) == requirements
def _get_deps_from_requirement_file(model_uri):
"""
Returns a list of pip dependencies for the model at `model_uri` and truncate the version number.
"""
from mlflow.utils.environment import _REQUIREMENTS_FILE_NAME
local_path = _download_artifact_from_uri(model_uri)
pip_packages = _read_lines(os.path.join(local_path, _REQUIREMENTS_FILE_NAME))
return [req.split("==")[0] if "==" in req else req for req in pip_packages]
def assert_register_model_called_with_local_model_path(
register_model_mock, model_uri, registered_model_name
):
register_model_call_args = register_model_mock.call_args
assert register_model_call_args.args == (model_uri, registered_model_name)
assert (
register_model_call_args.kwargs["await_registration_for"] == DEFAULT_AWAIT_MAX_SLEEP_SECONDS
)
local_model_path = register_model_call_args.kwargs["local_model_path"]
assert local_model_path.startswith(tempfile.gettempdir())
def _assert_pip_requirements(model_uri, requirements, constraints=None, strict=False):
"""
Loads the pip requirements (and optionally constraints) from `model_uri` and compares them
to `requirements` (and `constraints`).
If `strict` is True, evaluate `set(requirements) == set(loaded_requirements)`.
Otherwise, evaluate `set(requirements) <= set(loaded_requirements)`.
"""
from mlflow.utils.environment import (
_CONDA_ENV_FILE_NAME,
_CONSTRAINTS_FILE_NAME,
_REQUIREMENTS_FILE_NAME,
_get_pip_deps,
)
from mlflow.utils.yaml_utils import read_yaml
local_path = _download_artifact_from_uri(model_uri)
txt_reqs = _read_lines(os.path.join(local_path, _REQUIREMENTS_FILE_NAME))
conda_reqs = _get_pip_deps(read_yaml(local_path, _CONDA_ENV_FILE_NAME))
compare_func = set.__eq__ if strict else set.__le__
requirements = set(requirements)
assert compare_func(requirements, set(txt_reqs))
assert compare_func(requirements, set(conda_reqs))
if constraints is not None:
assert f"-c {_CONSTRAINTS_FILE_NAME}" in txt_reqs
assert f"-c {_CONSTRAINTS_FILE_NAME}" in conda_reqs
cons = _read_lines(os.path.join(local_path, _CONSTRAINTS_FILE_NAME))
assert compare_func(set(constraints), set(cons))
def _is_available_on_pypi(package, version=None, module=None):
"""
Returns True if the specified package version is available on PyPI.
Args:
package: The name of the package.
version: The version of the package. If None, defaults to the installed version.
module: The name of the top-level module provided by the package. For example,
if `package` is 'scikit-learn', `module` should be 'sklearn'. If None, defaults
to `package`.
"""
from mlflow.utils.requirements_utils import _get_installed_version
url = f"https://pypi.python.org/pypi/{package}/json"
for sec in range(3):
try:
time.sleep(sec)
resp = requests.get(url)
except requests.exceptions.ConnectionError:
continue
if resp.status_code == 404:
return False
if resp.status_code == 200:
break
else:
raise Exception(f"Failed to connect to {url}")
version = version or _get_installed_version(module or package)
dist_files = resp.json()["releases"].get(version)
return (
dist_files is not None # specified version exists
and (len(dist_files) > 0) # at least one distribution file exists
and not dist_files[0].get("yanked", False) # specified version is not yanked
)
def _is_importable(module_name):
try:
__import__(module_name)
return True
except ImportError:
return False
def allow_infer_pip_requirements_fallback_if(condition):
def decorator(f):
return pytest.mark.allow_infer_pip_requirements_fallback(f) if condition else f
return decorator
def mock_method_chain(mock_obj, methods, return_value=None, side_effect=None):
"""
Mock a chain of methods.
Examples
--------
>>> from unittest import mock
>>> m = mock.MagicMock()
>>> mock_method_chain(m, ["a", "b"], return_value=0)
>>> m.a().b()
0
>>> mock_method_chain(m, ["c.d", "e"], return_value=1)
>>> m.c.d().e()
1
>>> mock_method_chain(m, ["f"], side_effect=Exception("side_effect"))
>>> m.f()
Traceback (most recent call last):
...
Exception: side_effect
"""
length = len(methods)
for idx, method in enumerate(methods):
mock_obj = functools.reduce(getattr, method.split("."), mock_obj)
if idx != length - 1:
mock_obj = mock_obj.return_value
else:
mock_obj.return_value = return_value
mock_obj.side_effect = side_effect
| RestEndpoint |
python | django__django | tests/composite_pk/models/tenant.py | {
"start": 1593,
"end": 1832
} | class ____(models.Model):
pk = models.CompositePrimaryKey("id", "created")
id = models.SmallIntegerField(unique=True)
created = models.DateTimeField(auto_now_add=True)
text = models.TextField(default="", blank=True)
| TimeStamped |
python | ethereum__web3.py | web3/exceptions.py | {
"start": 9565,
"end": 9704
} | class ____(Web3RPCError):
"""
Raised when the block id used to look up a block in a jsonrpc call cannot be found.
"""
| BlockNotFound |
python | Pylons__pyramid | docs/quick_tutorial/jinja2/tutorial/tests.py | {
"start": 47,
"end": 675
} | class ____(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_home(self):
from .views import TutorialViews
request = testing.DummyRequest()
inst = TutorialViews(request)
response = inst.home()
self.assertEqual('Home View', response['name'])
def test_hello(self):
from .views import TutorialViews
request = testing.DummyRequest()
inst = TutorialViews(request)
response = inst.hello()
self.assertEqual('Hello View', response['name'])
| TutorialViewTests |
python | PyCQA__pylint | tests/functional/i/inherit_non_class.py | {
"start": 630,
"end": 729
} | class ____(object()): # [inherit-non-class]
""" Can't inherit from an instance of object. """
| Bad2 |
python | mlflow__mlflow | tests/dev/test_remove_experimental_decorators.py | {
"start": 2131,
"end": 2221
} | class ____:
def method(self):
pass
def regular_func():
pass
"""
)
| MyClass |
python | encode__django-rest-framework | tests/test_versioning.py | {
"start": 1067,
"end": 1387
} | class ____(RequestVersionView):
def determine_version(self, request, *args, **kwargs):
scheme = self.versioning_class()
scheme.allowed_versions = ('v1', 'v2')
scheme.default_version = 'v2'
return (scheme.determine_version(request, *args, **kwargs), scheme)
| AllowedAndDefaultVersionsView |
python | PrefectHQ__prefect | tests/test_settings.py | {
"start": 84581,
"end": 95575
} | class ____:
def test_init_stores_single_profile(self):
profile = Profile(name="test", settings={})
profiles = ProfilesCollection(profiles=[profile])
assert profiles.profiles_by_name == {"test": profile}
assert profiles.active_name is None
def test_init_stores_multiple_profile(self):
foo = Profile(name="foo", settings={})
bar = Profile(name="bar", settings={})
profiles = ProfilesCollection(profiles=[foo, bar])
assert profiles.profiles_by_name == {"foo": foo, "bar": bar}
assert profiles.active_name is None
def test_init_sets_active_name(self):
foo = Profile(name="foo", settings={})
bar = Profile(name="bar", settings={})
profiles = ProfilesCollection(profiles=[foo, bar], active="foo")
assert profiles.active_name == "foo"
def test_init_sets_active_name_even_if_not_present(self):
foo = Profile(name="foo", settings={})
bar = Profile(name="bar", settings={})
profiles = ProfilesCollection(profiles=[foo, bar], active="foobar")
assert profiles.active_name == "foobar"
def test_getitem_retrieves_profiles(self):
foo = Profile(name="foo", settings={})
bar = Profile(name="bar", settings={})
profiles = ProfilesCollection(profiles=[foo, bar])
assert profiles["foo"] is foo
assert profiles["bar"] is bar
def test_getitem_with_invalid_key(self):
foo = Profile(name="foo", settings={})
bar = Profile(name="bar", settings={})
profiles = ProfilesCollection(profiles=[foo, bar])
with pytest.raises(KeyError):
profiles["test"]
def test_iter_retrieves_profile_names(self):
foo = Profile(name="foo", settings={})
bar = Profile(name="bar", settings={})
profiles = ProfilesCollection(profiles=[foo, bar])
assert tuple(sorted(profiles)) == ("bar", "foo")
def test_names_property(self):
foo = Profile(name="foo", settings={})
bar = Profile(name="bar", settings={})
profiles = ProfilesCollection(profiles=[foo, bar], active="foo")
assert profiles.names == {"foo", "bar"}
def test_active_profile_property(self):
foo = Profile(name="foo", settings={})
bar = Profile(name="bar", settings={})
profiles = ProfilesCollection(profiles=[foo, bar], active="foo")
assert profiles.active_profile == foo
def test_active_profile_property_null_active(self):
foo = Profile(name="foo", settings={})
bar = Profile(name="bar", settings={})
profiles = ProfilesCollection(profiles=[foo, bar], active=None)
assert profiles.active_profile is None
def test_active_profile_property_missing_active(self):
foo = Profile(name="foo", settings={})
bar = Profile(name="bar", settings={})
profiles = ProfilesCollection(profiles=[foo, bar], active="foobar")
with pytest.raises(KeyError):
profiles.active_profile
def test_set_active_profile(self):
foo = Profile(name="foo", settings={})
bar = Profile(name="bar", settings={})
profiles = ProfilesCollection(profiles=[foo, bar], active=None)
assert profiles.set_active("foo") is None
assert profiles.active_name == "foo"
assert profiles.active_profile is foo
def test_set_active_profile_with_missing_name(self):
foo = Profile(name="foo", settings={})
bar = Profile(name="bar", settings={})
profiles = ProfilesCollection(profiles=[foo, bar], active=None)
with pytest.raises(ValueError, match="Unknown profile name"):
profiles.set_active("foobar")
def test_set_active_profile_with_null_name(self):
foo = Profile(name="foo", settings={})
bar = Profile(name="bar", settings={})
profiles = ProfilesCollection(profiles=[foo, bar], active=None)
assert profiles.set_active(None) is None
assert profiles.active_name is None
assert profiles.active_profile is None
def test_add_profile(self):
foo = Profile(name="foo", settings={})
bar = Profile(name="bar", settings={})
profiles = ProfilesCollection(profiles=[foo], active=None)
assert "bar" not in profiles.names
profiles.add_profile(bar)
assert "bar" in profiles.names
assert profiles["bar"] is bar
def test_add_profile_already_exists(self):
foo = Profile(name="foo", settings={})
bar = Profile(name="bar", settings={})
profiles = ProfilesCollection(profiles=[foo, bar], active=None)
with pytest.raises(ValueError, match="already exists in collection"):
profiles.add_profile(bar)
def test_remove_profiles(self):
foo = Profile(name="foo", settings={})
bar = Profile(name="bar", settings={})
profiles = ProfilesCollection(profiles=[foo, bar], active=None)
assert "bar" in profiles
profiles.remove_profile("bar")
assert "bar" not in profiles
def test_remove_profile_does_not_exist(self):
foo = Profile(name="foo", settings={})
Profile(name="bar", settings={})
profiles = ProfilesCollection(profiles=[foo], active=None)
assert "bar" not in profiles.names
with pytest.raises(KeyError):
profiles.remove_profile("bar")
def test_update_profile_adds_key(self):
profiles = ProfilesCollection(profiles=[Profile(name="test", settings={})])
profiles.update_profile(name="test", settings={PREFECT_API_URL: "hello"})
assert profiles["test"].settings == {PREFECT_API_URL: "hello"}
def test_update_profile_updates_key(self):
profiles = ProfilesCollection(profiles=[Profile(name="test", settings={})])
profiles.update_profile(name="test", settings={PREFECT_API_URL: "hello"})
assert profiles["test"].settings == {PREFECT_API_URL: "hello"}
profiles.update_profile(name="test", settings={PREFECT_API_URL: "goodbye"})
assert profiles["test"].settings == {PREFECT_API_URL: "goodbye"}
def test_update_profile_removes_key(self):
profiles = ProfilesCollection(profiles=[Profile(name="test", settings={})])
profiles.update_profile(name="test", settings={PREFECT_API_URL: "hello"})
assert profiles["test"].settings == {PREFECT_API_URL: "hello"}
profiles.update_profile(name="test", settings={PREFECT_API_URL: None})
assert profiles["test"].settings == {}
def test_update_profile_mixed_add_and_update(self):
profiles = ProfilesCollection(profiles=[Profile(name="test", settings={})])
profiles.update_profile(name="test", settings={PREFECT_API_URL: "hello"})
assert profiles["test"].settings == {PREFECT_API_URL: "hello"}
profiles.update_profile(
name="test",
settings={PREFECT_API_URL: "goodbye", PREFECT_LOGGING_LEVEL: "DEBUG"},
)
assert profiles["test"].settings == {
PREFECT_API_URL: "goodbye",
PREFECT_LOGGING_LEVEL: "DEBUG",
}
def test_update_profile_retains_existing_keys(self):
profiles = ProfilesCollection(profiles=[Profile(name="test", settings={})])
profiles.update_profile(name="test", settings={PREFECT_API_URL: "hello"})
assert profiles["test"].settings == {PREFECT_API_URL: "hello"}
profiles.update_profile(name="test", settings={PREFECT_LOGGING_LEVEL: "DEBUG"})
assert profiles["test"].settings == {
PREFECT_API_URL: "hello",
PREFECT_LOGGING_LEVEL: "DEBUG",
}
def test_without_profile_source(self):
foo = Profile(name="foo", settings={}, source=Path("/foo"))
bar = Profile(name="bar", settings={}, source=Path("/bar"))
profiles = ProfilesCollection(profiles=[foo, bar], active=None)
new_profiles = profiles.without_profile_source(Path("/foo"))
assert new_profiles.names == {"bar"}
assert profiles.names == {"foo", "bar"}, "Original object not mutated"
def test_without_profile_source_retains_nulls(self):
foo = Profile(name="foo", settings={}, source=Path("/foo"))
bar = Profile(name="bar", settings={}, source=None)
profiles = ProfilesCollection(profiles=[foo, bar], active=None)
new_profiles = profiles.without_profile_source(Path("/foo"))
assert new_profiles.names == {"bar"}
assert profiles.names == {"foo", "bar"}, "Original object not mutated"
def test_without_profile_source_handles_null_input(self):
foo = Profile(name="foo", settings={}, source=Path("/foo"))
bar = Profile(name="bar", settings={}, source=None)
profiles = ProfilesCollection(profiles=[foo, bar], active=None)
new_profiles = profiles.without_profile_source(None)
assert new_profiles.names == {"foo"}
assert profiles.names == {"foo", "bar"}, "Original object not mutated"
def test_equality(self):
foo = Profile(name="foo", settings={}, source=Path("/foo"))
bar = Profile(name="bar", settings={}, source=Path("/bar"))
assert ProfilesCollection(profiles=[foo, bar]) == ProfilesCollection(
profiles=[foo, bar]
), "Same definition should be equal"
assert ProfilesCollection(
profiles=[foo, bar], active=None
) == ProfilesCollection(profiles=[foo, bar]), (
"Explicit and implicit null active should be equal"
)
assert ProfilesCollection(
profiles=[foo, bar], active="foo"
) != ProfilesCollection(profiles=[foo, bar]), (
"One null active should be inequal"
)
assert ProfilesCollection(
profiles=[foo, bar], active="foo"
) != ProfilesCollection(profiles=[foo, bar], active="bar"), (
"Different active should be inequal"
)
assert ProfilesCollection(profiles=[foo, bar]) == ProfilesCollection(
profiles=[
Profile(name="foo", settings={}, source=Path("/foo")),
Profile(name="bar", settings={}, source=Path("/bar")),
]
), "Comparison of profiles should use equality not identity"
assert ProfilesCollection(profiles=[foo, bar]) != ProfilesCollection(
profiles=[foo]
), "Missing profile should be inequal"
assert ProfilesCollection(profiles=[foo, bar]) != ProfilesCollection(
profiles=[
foo,
Profile(
name="bar", settings={PREFECT_API_KEY: "test"}, source=Path("/bar")
),
]
), "Changed profile settings should be inequal"
assert ProfilesCollection(profiles=[foo, bar]) != ProfilesCollection(
profiles=[
foo,
Profile(name="bar", settings={}, source=Path("/new-path")),
]
), "Changed profile source should be inequal"
| TestProfilesCollection |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_datastore.py | {
"start": 3859,
"end": 4504
} | class ____:
@mock.patch(HOOK_PATH)
def test_execute(self, mock_hook):
partial_keys = [1, 2, 3]
op = CloudDatastoreAllocateIdsOperator(
task_id="test_task",
gcp_conn_id=CONN_ID,
project_id=PROJECT_ID,
partial_keys=partial_keys,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=CONN_ID, impersonation_chain=None)
mock_hook.return_value.allocate_ids.assert_called_once_with(
project_id=PROJECT_ID, partial_keys=partial_keys
)
| TestCloudDatastoreAllocateIds |
python | pytorch__pytorch | torch/testing/_internal/distributed/distributed_test.py | {
"start": 18928,
"end": 438272
} | class ____:
class _DistTestBase:
def _barrier(self, *args, **kwargs):
Barrier.sync(*args, **kwargs)
def _init_group_test(self, **kwargs):
group = [1, 2]
group_id = dist.new_group(group, **kwargs)
rank = dist.get_rank()
if rank not in group:
return ([], None, rank)
return (group, group_id, rank)
def _init_full_group_test(self, **kwargs):
group = list(range(dist.get_world_size()))
group_id = dist.new_group(**kwargs)
rank = dist.get_rank()
return (group, group_id, rank)
def _init_global_test(self):
group = list(range(dist.get_world_size()))
group_id = dist.group.WORLD
rank = dist.get_rank()
return (group, group_id, rank)
def _verify_buffers_equal(self, m1, m2):
# verify buffers across models
m1_buf_dict = dict(m1.module.named_buffers())
for name, buf in m2.module.named_buffers():
self.assertEqual(buf, m1_buf_dict[name])
# Verify buffers across ranks.
m1_buffers = list(m1.buffers())
m2_buffers = list(m2.buffers())
for buf1, buf2 in zip(m1_buffers, m2_buffers, strict=True):
gathered_bufs = [
torch.empty_like(buf1) for _ in range(dist.get_world_size())
]
dist.all_gather(gathered_bufs, buf1)
gathered_bufs_m2 = [
torch.empty_like(buf2) for _ in range(dist.get_world_size())
]
for b in gathered_bufs:
self.assertEqual(b, buf1)
dist.all_gather(gathered_bufs_m2, buf2)
for b in gathered_bufs_m2:
self.assertEqual(b, buf2)
def _sanity_check_profiler_nccl_meta(self, nccl_meta_events):
"""Torch profiler includes nccl metadata in an inserted operator called "record_param_comms"
We test for basic fields in this profiler event that correspond to the nccl communication
collectives"""
per_coll_meta = defaultdict(list)
for e in nccl_meta_events:
args = e.get("args", {})
collname = args.get("Collective name", "")
self.assertNotEqual(collname, "")
self.assertNotEqual(args.get("dtype", ""), "")
per_coll_meta[collname].append(args)
if collname == "wait":
continue
self.assertEqual(args["Process Group Description"], "default_pg")
self.assertNotEqual(args["Process Group Ranks"], "")
self.assertGreaterEqual(args.get("In msg nelems", -1), 0)
self.assertGreaterEqual(args.get("Out msg nelems", -1), 0)
self.assertGreaterEqual(args.get("Group size", -1), 0)
self.assertGreaterEqual(args.get("Global rank start", -1), 0)
self.assertGreaterEqual(args.get("Global rank stride", -1), 0)
# print(per_coll_meta)
return per_coll_meta
def test_dump_DDP_relevant_env_vars(self):
with captured_output() as (out, _):
_dump_DDP_relevant_env_vars()
lines = out.getvalue().splitlines()
def format_line(var):
return f"env:{var}={os.environ.get(var, 'N/A')}"
# Check relevant env vars
vars = [
"MASTER_ADDR",
"MASTER_PORT",
"WORLD_SIZE",
"NCCL_TOPO_DUMP_FILE", # N/A
"TORCH_NCCL_ASYNC_ERROR_HANDLING",
]
for var in vars:
line = format_line(var)
self.assertIn(line, lines)
# Check irrelevant env vars
vars = [
"xxx",
"yyy",
"zzz",
]
for var in vars:
line = format_line(var)
self.assertNotIn(line, lines)
# GET RANK
def test_get_rank(self):
test_dir = os.path.join(os.environ["TEMP_DIR"], "test_dir")
pid = str(os.getpid())
num_processes = dist.get_world_size()
with open(os.path.join(test_dir, pid), "w") as f:
f.write(str(dist.get_rank()))
self._barrier()
all_ranks = set()
for f_name in os.listdir(test_dir):
with open(os.path.join(test_dir, f_name)) as f:
all_ranks.add(int(f.read()))
self.assertEqual(len(all_ranks), num_processes)
self._barrier()
if dist.get_rank() == 0:
for f_name in os.listdir(test_dir):
os.unlink(os.path.join(test_dir, f_name))
self._barrier()
def test_get_backend(self):
if dist.get_world_size() > 2:
group = [1, 2]
else:
group = [0, 1]
group_id = dist.new_group(group)
backend_str = BACKEND.lower()
self.assertEqual(dist.get_backend(), backend_str)
if dist.get_rank() in group:
self.assertEqual(dist.get_backend(group_id), backend_str)
else:
with self.assertRaisesRegex(
ValueError, "Invalid process group specified"
):
dist.get_backend(group_id)
def test_Backend_enum_class(self):
# test parsing
backend = BACKEND.lower()
self.assertEqual(dist.Backend(BACKEND.upper()), backend)
self.assertEqual(dist.Backend(BACKEND), backend)
with self.assertRaises(ValueError):
dist.Backend(None)
with self.assertRaises(ValueError):
dist.Backend(3)
with self.assertRaises(ValueError):
dist.Backend(["gloo"])
# Test destroy
def test_destroy_group(self):
if dist.get_world_size() > 2:
group = [1, 2]
else:
group = [0, 1]
group_id = dist.new_group(group)
self._barrier()
dist.destroy_process_group(group_id)
# Test get rank and size of group
def test_get_rank_size_group(self):
if dist.get_world_size() > 2:
group = [1, 2]
else:
group = [0, 1]
group_id = dist.new_group(group)
if dist.get_rank() in group:
self.assertEqual(dist.get_world_size(group_id), 2)
self.assertTrue(dist.get_rank(group_id) in list(range(2)))
else:
self.assertEqual(dist.get_world_size(group_id), -1)
self.assertEqual(dist.get_rank(group_id), -1)
# Test destroy full groups
def test_destroy_full_group(self):
_, group_id, _ = self._init_full_group_test()
self._barrier()
dist.destroy_process_group(group_id)
# Test get rank and size of full group
def test_get_rank_size_full_group(self):
_, group_id, _ = self._init_full_group_test()
self.assertEqual(dist.get_world_size(group_id), dist.get_world_size())
self.assertEqual(dist.get_rank(group_id), dist.get_rank())
def _test_barrier_timeout(self, group_id, timeout):
local_rank = dist.get_rank(group_id)
# Only execute barrier on rank == 0, causing it to timeout
if local_rank == 0:
expected_time = time.time() + timeout.total_seconds()
# In debug mode, we execute a monitored_barrier before the
# collective, so assert on that.
if dist.get_debug_level() == dist.DebugLevel.DETAIL:
exception_ctx = self.assertRaisesRegex(
Exception, "failed to pass monitoredBarrier"
)
else:
exception_ctx = self.assertRaisesRegex(
Exception, " (Timed out|closed|timeout) "
)
with exception_ctx:
dist.barrier(group_id)
self.assertGreaterAlmostEqual(time.time(), expected_time, delta=0.1)
@skip_but_pass_in_sandcastle_if(
BACKEND != "gloo", "Only gloo backend supports timeouts"
)
@skip_but_pass_in_sandcastle_if(
not INIT_METHOD.startswith("file://"),
"Requires file:// initialization method. "
+ "Both tcp:// and env:// rely on the TCP store for which "
"reinitialization has proven racy.",
)
def test_barrier_timeout_global(self):
dist.destroy_process_group()
# Explicitly pass world size to the barrier because we've
# just destroyed any state in torch.distributed.
self._barrier(wait_for=int(os.environ["WORLD_SIZE"]))
# Reinitialize global process group
timeout = timedelta(seconds=1)
dist.init_process_group(
init_method=INIT_METHOD,
backend=BACKEND,
world_size=int(os.environ["WORLD_SIZE"]),
rank=self.rank,
timeout=timeout,
)
self._test_barrier_timeout(dist.group.WORLD, timeout)
@skip_if_small_worldsize
@skip_but_pass_in_sandcastle_if(
BACKEND != "gloo", "Only gloo backend supports timeouts"
)
def test_barrier_timeout_group(self):
timeout = timedelta(seconds=5)
_, group_id, _ = self._init_group_test(timeout=timeout)
if group_id is not None:
self._test_barrier_timeout(group_id, timeout)
@skip_but_pass_in_sandcastle_if(
BACKEND != "gloo", "Only gloo backend supports timeouts"
)
def test_barrier_timeout_full_group(self):
timeout = timedelta(seconds=1)
_, group_id, _ = self._init_full_group_test(timeout=timeout)
if group_id is not None:
self._test_barrier_timeout(group_id, timeout)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices",
)
@require_world_size(4)
@skip_if_lt_x_gpu(2)
def test_new_subgroups(self):
subgroup_size = 2
cur_subgroup, subgroups = dist.new_subgroups(subgroup_size)
world_size = dist.get_world_size()
self.assertEqual(cur_subgroup.size(), subgroup_size)
self.assertEqual(len(subgroups), world_size / subgroup_size)
self.assertFalse(dist._rank_not_in_group(cur_subgroup))
for subgroup in subgroups:
dist.destroy_process_group(subgroup)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices",
)
@require_exact_world_size(4)
def test_new_subgroups_with_group_param(self):
# Initialize global test environment
self._init_global_test()
# Set up GPU devices for each rank
init_multigpu_helper(dist.get_world_size(), BACKEND)
# Create two subgroups: one with ranks [0,2] and another with ranks [1,3]
cur_subgroup, subgroups = dist.new_subgroups_by_enumeration(
ranks_per_subgroup_list=[[0, 2], [1, 3]]
)
# Further divide the current subgroup into sub-subgroups of size 1
cur_sub_subgroup, sub_subgroups = dist.new_subgroups(
group_size=1, group=cur_subgroup
)
# Verify we have 2 sub-subgroups (one for each rank in the original subgroup)
self.assertEqual(len(sub_subgroups), 2)
# Verify the current process's sub-subgroup has size 1
self.assertEqual(cur_sub_subgroup.size(), 1)
# Verify the current process is in its assigned sub-subgroup
self.assertFalse(dist._rank_not_in_group(group=cur_sub_subgroup))
# Clean up by destroying all created process groups
for sub_subgroup in sub_subgroups:
dist.destroy_process_group(sub_subgroup)
for subgroup in subgroups:
dist.destroy_process_group(subgroup)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices",
)
@skip_if_no_gpu
def test_new_subgroups_group_size_exceeds_world_size(self):
with self.assertRaisesRegex(ValueError, "must not exceed"):
dist.new_subgroups(100)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices",
)
@require_world_size(4)
@skip_if_lt_x_gpu(4)
def test_new_subgroups_world_size_not_divisible_by_group_size(self):
expected_msg = f"The world size ({dist.get_world_size()}) must be divisible by 'group_size=3'"
with self.assertRaisesRegex(
ValueError,
re.escape(expected_msg),
):
dist.new_subgroups(3)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices",
)
@require_world_size(4)
@skip_if_lt_x_gpu(4)
def test_new_subgroups_by_enumeration(self):
_group, _group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
cur_subgroup, subgroups = dist.new_subgroups_by_enumeration(
ranks_per_subgroup_list=[[0, 2], [1, 3]]
)
if device_id >= 4:
self.assertIsNone(cur_subgroup)
else:
self.assertEqual(cur_subgroup.size(), 2)
self.assertEqual(len(subgroups), 2)
if device_id == 0 or device_id == 2:
self.assertEqual(cur_subgroup, subgroups[0])
else:
self.assertEqual(cur_subgroup, subgroups[1])
for subgroup in subgroups:
dist.destroy_process_group(subgroup)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices",
)
@require_world_size(4)
@skip_if_lt_x_gpu(4)
def test_new_subgroups_by_enumeration_input_rank_exceeds_world_size(self):
_group, group_id, _rank = self._init_global_test()
init_multigpu_helper(dist.get_world_size(), BACKEND)
world_size = get_world_size(group_id)
with self.assertRaisesRegex(
ValueError,
"The new group's rank should be within the world_size set by init_process_group",
):
dist.new_subgroups_by_enumeration(
ranks_per_subgroup_list=[[0, 1], [world_size, 2]]
)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices",
)
@skip_if_no_gpu
def test_new_subgroups_by_enumeration_negative_input_rank(self):
self._init_global_test()
with self.assertRaisesRegex(
ValueError,
"The new group's rank should be within the world_size set by init_process_group",
):
dist.new_subgroups_by_enumeration(
ranks_per_subgroup_list=[[-1, -2], [-3, -4]]
)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices",
)
@require_world_size(4)
@skip_if_lt_x_gpu(4)
def test_new_subgroups_overlap_not_allowed(self):
with self.assertRaisesRegex(
ValueError, "Rank 1 has appeared in both subgroup"
):
dist.new_subgroups_by_enumeration(
ranks_per_subgroup_list=[[0], [1, 2], [1, 3]]
)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices",
)
@skip_if_lt_x_gpu(2)
def test_average_parameters(self):
rank = dist.get_rank()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
model = nn.Sequential(
nn.Conv2d(3, 3, kernel_size=3, padding=1),
nn.ReLU(),
nn.Linear(1, 5, bias=False),
).cuda(device_id)
# Test global model averaging
for p in model.parameters():
p.data = torch.ones_like(p.data)
model_averaging_utils.average_parameters(
params=model.parameters(), process_group=None
)
# Every element will be the same as the input.
for p in model.parameters():
self.assertEqual(p.data, torch.ones_like(p.data))
# Test partial model averaging
for p in model.parameters():
p.data = torch.ones_like(p.data) * rank
group_nccl = dist.new_group(ranks=[0, 1], backend="nccl")
model_averaging_utils.average_parameters(
params=model.parameters(), process_group=group_nccl
)
if not dist._rank_not_in_group(group_nccl):
# Every element on device 0 or 1 should be the average of 0 and 1, i.e., 0.5.
for p in model.parameters():
self.assertEqual(p.data, torch.ones_like(p.data) * 0.5)
else:
# Every element on device not in the subgroup should remain the same.
for p in model.parameters():
self.assertEqual(p.data, torch.ones_like(p.data) * rank)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices",
)
@skip_if_lt_x_gpu(2)
def test_periodic_model_averager(self):
rank = dist.get_rank()
world_size = dist.get_world_size()
rank_to_GPU = init_multigpu_helper(world_size, BACKEND)
device_id = rank_to_GPU[rank][0]
model = nn.Linear(1, 5, bias=False).cuda(device_id)
param = next(model.parameters())
tensor = torch.ones_like(param.data) * rank
expected_avg_tensor = (
torch.ones_like(param.data) * sum(range(world_size)) / world_size
)
period = 4
for warmup_steps in [12, 13, 14, 15]:
averager = averagers.PeriodicModelAverager(
period=period, warmup_steps=warmup_steps
)
for step in range(20):
# Reset the parameters at every step.
param.data = copy.deepcopy(tensor)
for params in model.parameters():
# mock grad
params.grad = torch.ones_like(param.data)
averager.average_parameters(model.parameters())
if step >= warmup_steps and (step - warmup_steps) % period == 0:
self.assertEqual(param.data, expected_avg_tensor)
else:
# No model averaging, so the parameters are not updated.
self.assertEqual(param.data, tensor)
@skip_if_lt_x_gpu(2)
def test_periodic_model_averager_param_group(self):
rank = dist.get_rank()
world_size = dist.get_world_size()
rank_to_GPU = init_multigpu_helper(world_size, BACKEND)
device_id = rank_to_GPU[rank][0]
model = nn.Linear(1, 5, bias=False).cuda(device_id)
param = next(model.parameters())
opt = torch.optim.SGD(model.parameters(), lr=0.1)
period = 4
for warmup_steps in [12, 13, 14, 15]:
averager = averagers.PeriodicModelAverager(
period=period, warmup_steps=warmup_steps
)
for step in range(20):
# Reset the parameters at every step.
for param_group in opt.param_groups:
for params in param_group["params"]:
# mock grad
params.grad = torch.ones_like(param.data) * rank
params.data = torch.ones_like(param.data) * rank
averager.average_parameters(opt.param_groups)
if step >= warmup_steps and (step - warmup_steps) % period == 0:
for param_group in opt.param_groups:
for params in param_group["params"]:
if params.grad is None:
continue
self.assertEqual(
param.data,
torch.ones_like(param.data)
* sum(range(world_size))
/ world_size,
)
else:
# No model averaging, so the parameters are not updated.
for param_group in opt.param_groups:
for params in param_group["params"]:
if params.grad is None:
continue
self.assertEqual(
param.data, torch.ones_like(param.data) * rank
)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices",
)
@skip_if_lt_x_gpu(2)
def test_1_level_hierarchical_model_averager_equivalent_to_periodic_model_averager(
self,
):
rank = dist.get_rank()
world_size = dist.get_world_size()
rank_to_GPU = init_multigpu_helper(world_size, BACKEND)
device_id = rank_to_GPU[rank][0]
model = nn.Linear(1, 5, bias=False).cuda(device_id)
param = next(model.parameters())
tensor = torch.ones_like(param.data) * rank
expected_avg_tensor = (
torch.ones_like(param.data) * sum(range(world_size)) / world_size
)
period = 4
for warmup_steps in [12, 13, 14, 15]:
averager = hierarchicalSGD.HierarchicalModelAverager(
# Run the global averaging at a period of 4,
# which is equivalent to the above periodic model averaging test case.
period_group_size_dict=OrderedDict([(period, world_size)]),
warmup_steps=warmup_steps,
)
averager = averagers.PeriodicModelAverager(
period=period, warmup_steps=warmup_steps
)
for step in range(20):
# Reset the parameters at every step.
param.data = copy.deepcopy(tensor)
for params in model.parameters():
# mock grad
params.grad = torch.ones_like(param.data)
averager.average_parameters(model.parameters())
if step >= warmup_steps and (step - warmup_steps) % period == 0:
self.assertEqual(param.data, expected_avg_tensor)
else:
# No model averaging, so the parameters are not updated.
self.assertEqual(param.data, tensor)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices",
)
@require_exact_world_size(4)
@skip_if_lt_x_gpu(4)
def test_3_level_hierarchical_model_averager(self):
rank = dist.get_rank()
world_size = dist.get_world_size()
rank_to_GPU = init_multigpu_helper(world_size, BACKEND)
device_id = rank_to_GPU[rank][0]
model = nn.Linear(1, 5, bias=False).cuda(device_id)
param = next(model.parameters())
tensor = torch.ones_like(param.data) * rank
# Set up such a hierarchical model averaging as follows:
# after the first 10 warmup steps,
# run model averaging every 2 steps within each subgroup of size 2,
# run model averaging every 4 steps within each subgroup of size 3,
# and run the global model averaging every 8 steps.
# If there is a conflict in model averaging at a step, only run the highest-level model averaging.
warmup_steps = 10
subgroup_size1 = 2
subgroup_avg_period1 = 2
subgroup_size2 = 4
subgroup_avg_period2 = 4
global_avg_period = 8
period_group_size_dict = OrderedDict(
[
(subgroup_avg_period1, subgroup_size1),
(subgroup_avg_period2, subgroup_size2),
(global_avg_period, world_size),
]
)
averager = hierarchicalSGD.HierarchicalModelAverager(
period_group_size_dict=period_group_size_dict, warmup_steps=warmup_steps
)
self.assertEqual(dist.get_pg_count(), len(period_group_size_dict))
subgroup1 = averager.period_process_group_dict[subgroup_avg_period1]
subgroup2 = averager.period_process_group_dict[subgroup_avg_period2]
real_group_ranks_res1 = _get_pg_config(subgroup1)["ranks"]
real_group_ranks_res2 = _get_pg_config(subgroup2)["ranks"]
expect_group_ranks_res1 = (
rank // subgroup_size1 * subgroup_size1
+ np.array(list(range(subgroup_size1)))
).tolist()
expect_group_ranks_res2 = (
rank // subgroup_size2 * subgroup_size2
+ np.array(list(range(subgroup_size2)))
).tolist()
self.assertEqual(real_group_ranks_res1, expect_group_ranks_res1)
self.assertEqual(real_group_ranks_res2, expect_group_ranks_res2)
expected_avg_tensor_within_subgroup1 = (
torch.ones_like(param.data)
* sum(real_group_ranks_res1)
/ subgroup_size1
)
expected_avg_tensor_within_subgroup2 = (
torch.ones_like(param.data)
* sum(real_group_ranks_res2)
/ subgroup_size2
)
expected_global_avg_tensor = (
torch.ones_like(param.data) * sum(range(world_size)) / world_size
)
for step in range(25):
# Reset the parameters at every step.
param.data = copy.deepcopy(tensor)
for params in model.parameters():
# mock grad
params.grad = torch.ones_like(param.data)
averager.average_parameters(model.parameters())
if step == 16 or step == 24:
# Run global model averaging when `step` can be divided by 8.
self.assertEqual(param.data, expected_global_avg_tensor)
elif step == 12 or step == 20:
# Run model averaging within subgroup when `step` can be divided by 4 but not by 8.
self.assertEqual(param.data, expected_avg_tensor_within_subgroup2)
elif step == 10 or step == 14 or step == 18 or step == 22:
# Run model averaging within subgroup when `step` can be divided by 2 but not by 4 or 8.
self.assertEqual(param.data, expected_avg_tensor_within_subgroup1)
else:
# No model averaging, so the parameters are not updated.
self.assertEqual(param.data, tensor)
# Coalescing manager (sync mode)
@skip_if_no_gpu
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl" or IS_FBCODE or IS_SANDCASTLE,
"Coalescing manager currently tests with NCCL only; internal test flaky",
)
def test_coalescing_manager(self):
self._barrier()
rank = dist.get_rank()
world_size = dist.get_world_size()
rank_to_GPU = init_multigpu_helper(world_size, BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
num_colls = 2
size_per_coll = 8
small_tensors = [
torch.ones(size_per_coll, device=device_id) for _ in range(num_colls)
]
with dist._coalescing_manager():
for i in range(num_colls):
dist.all_reduce(small_tensors[i])
big_tensor = torch.ones(num_colls * size_per_coll, device=device_id)
dist.all_reduce(big_tensor)
for i in range(num_colls):
self.assertEqual(
small_tensors[i],
big_tensor[i * size_per_coll : (i + 1) * size_per_coll],
)
self._barrier()
# Coalescing manager (async mode)
@skip_if_no_gpu
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl" or IS_FBCODE or IS_SANDCASTLE,
"Coalescing manager currently tests with NCCL only; internal test flaky",
)
def test_coalescing_manager_async(self):
self._barrier()
rank = dist.get_rank()
world_size = dist.get_world_size()
rank_to_GPU = init_multigpu_helper(world_size, BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
num_colls = 2
size_per_coll = 8
small_tensors = [
torch.ones(size_per_coll, device=device_id) for _ in range(num_colls)
]
with dist._coalescing_manager(async_ops=True) as cm:
for i in range(num_colls):
dist.all_reduce(small_tensors[i])
cm.wait()
big_tensor = torch.ones(num_colls * size_per_coll, device=device_id)
dist.all_reduce(big_tensor)
for i in range(num_colls):
self.assertEqual(
small_tensors[i],
big_tensor[i * size_per_coll : (i + 1) * size_per_coll],
)
self._barrier()
# NCCL Batch SEND RECV
@skip_if_no_gpu
@skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_nccl(self):
self._barrier()
rank = dist.get_rank()
world_size = dist.get_world_size()
rank_to_GPU = init_multigpu_helper(world_size, BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
p2p_op_list = []
recv_tensors = [None for _ in range(world_size)]
expected_tensors = [None for _ in range(world_size)]
for val in ["1", "0"]:
os.environ["TORCH_NCCL_BLOCKING_WAIT"] = val
for src in range(world_size):
send_tensor = _build_tensor(rank + 1, device_id=device_id).fill_(
src
)
recv_tensors[src] = _build_tensor(
src + 1, value=-1, device_id=device_id
).fill_(-1)
expected_tensors[src] = _build_tensor(
src + 1, value=-1, device_id=device_id
).fill_(rank)
recv_op = dist.P2POp(dist.irecv, recv_tensors[src], src)
p2p_op_list.append(recv_op)
send_op = dist.P2POp(dist.isend, send_tensor, src)
p2p_op_list.append(send_op)
reqs = dist.batch_isend_irecv(p2p_op_list)
for req in reqs:
req.wait()
for src in range(world_size):
self.assertEqual(recv_tensors[src], expected_tensors[src])
self._barrier()
@skip_if_no_gpu
@skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_ring_exchange_nccl(self):
self._barrier()
rank = dist.get_rank()
world_size = dist.get_world_size()
rank_to_GPU = init_multigpu_helper(world_size, BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
send_tensor = _build_tensor(world_size, device_id=device_id)
recv_tensor = _build_tensor(world_size, value=-1, device_id=device_id)
send_op = dist.P2POp(dist.isend, send_tensor, (rank + 1) % world_size)
recv_op = dist.P2POp(
dist.irecv, recv_tensor, (rank - 1 + world_size) % world_size
)
reqs = dist.batch_isend_irecv([send_op, recv_op])
for req in reqs:
req.wait()
self._barrier()
@skip_if_no_gpu
@skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_self_nccl(self):
self._barrier()
# Ensure the process group has been fully initialized (needed by
# the first sub-group batch_isend_irecv call)
dist.barrier()
rank = dist.get_rank()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
p2p_op_list = []
if rank == 0:
send_tensor = _build_tensor(rank + 1, device_id=device_id)
recv_tensor = _build_tensor(rank + 1, value=-1, device_id=device_id)
recv_op = dist.P2POp(dist.irecv, recv_tensor, 0)
p2p_op_list.append(recv_op)
send_op = dist.P2POp(dist.isend, send_tensor, 0)
p2p_op_list.append(send_op)
reqs = dist.batch_isend_irecv(p2p_op_list)
for req in reqs:
req.wait()
self._barrier()
@skip_if_no_gpu
@skip_if_small_worldsize
@skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_no_rank_zero_nccl(self):
self._barrier()
# Ensure the process group has been fully initialized (needed by
# the first sub-group batch_isend_irecv call)
dist.barrier()
rank = dist.get_rank()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
p2p_op_list = []
if rank == 1:
peer = 2
elif rank == 2:
peer = 1
if rank in [1, 2]:
send_tensor = _build_tensor(rank + 1, device_id=device_id)
recv_tensor = _build_tensor(peer + 1, value=-1, device_id=device_id)
recv_op = dist.P2POp(dist.irecv, recv_tensor, peer)
p2p_op_list.append(recv_op)
send_op = dist.P2POp(dist.isend, send_tensor, peer)
p2p_op_list.append(send_op)
reqs = dist.batch_isend_irecv(p2p_op_list)
for req in reqs:
req.wait()
self._barrier()
# GLOO Batch SEND RECV CPU
@skip_but_pass_in_sandcastle_if(BACKEND != "gloo", "GLOO Batch Send Recv CPU")
def test_batch_isend_irecv_gloo(self):
self._barrier()
rank = dist.get_rank()
p2p_op_list = []
for src in range(dist.get_world_size()):
if src == rank:
continue
send_tensor = _build_tensor(rank + 1)
recv_tensor = _build_tensor(src + 1, value=-1)
recv_op = dist.P2POp(dist.irecv, recv_tensor, src)
p2p_op_list.append(recv_op)
send_op = dist.P2POp(dist.isend, send_tensor, src)
p2p_op_list.append(send_op)
reqs = dist.batch_isend_irecv(p2p_op_list)
for req in reqs:
req.wait()
self._barrier()
# GLOO Batch SEND RECV CPU with provided tags
@skip_but_pass_in_sandcastle_if(BACKEND != "gloo", "GLOO Batch Send Recv CPU")
def test_batch_isend_irecv_gloo_tags(self):
self._barrier()
rank = dist.get_rank()
p2p_op_list = []
for src in range(dist.get_world_size()):
if src == rank:
continue
send_tensor = _build_tensor(rank + 1)
recv_tensor = _build_tensor(src + 1, value=-1)
recv_op = dist.P2POp(dist.irecv, recv_tensor, src, tag=src)
p2p_op_list.append(recv_op)
send_op = dist.P2POp(dist.isend, send_tensor, src, tag=rank)
p2p_op_list.append(send_op)
reqs = dist.batch_isend_irecv(p2p_op_list)
for req in reqs:
req.wait()
self._barrier()
# NCCL Batch SEND RECV Op Error
@skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_op_err(self):
self._barrier()
rank = dist.get_rank()
if rank == 0:
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
with self.assertRaisesRegex(ValueError, "^Invalid ``op``"):
send_tensor = _build_tensor(rank + 1, device_id=device_id)
send_op = dist.P2POp(dist.broadcast, send_tensor, 1)
dist.batch_isend_irecv([send_op])
# NCCL Batch SEND RECV p2p_op_list Error
@skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_op_list_err(self):
self._barrier()
rank = dist.get_rank()
if rank == 0:
with self.assertRaisesRegex(ValueError, "^Invalid ``p2p_op_list``"):
dist.batch_isend_irecv([1, 2])
# NCCL Batch SEND RECV Mixed Backend Error
@skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_mixed_backend_err(self):
self._barrier()
rank = dist.get_rank()
init_multigpu_helper(dist.get_world_size(), BACKEND)
group_gloo = dist.new_group(ranks=[0, 1], backend="gloo")
group_nccl = dist.new_group(ranks=[0, 1], backend="nccl")
if rank == 0:
with self.assertRaisesRegex(
ValueError, "All ops need to use the same group"
):
send_tensor = _build_tensor(rank + 1)
send_op_gloo = dist.P2POp(dist.isend, send_tensor, 1, group_gloo)
send_op_nccl = dist.P2POp(dist.isend, send_tensor, 1, group_nccl)
dist.batch_isend_irecv([send_op_gloo, send_op_nccl])
# NCCL SEND RECV
@skip_if_no_gpu
@skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def _test_send_recv_nccl(self, profiler_ctx=None):
# TODO: now that nccl send/recv is supported, there does not seem to
# be a need to have nccl send/recv be tested separately.
rank = dist.get_rank()
world_size = dist.get_world_size()
rank_to_GPU = init_multigpu_helper(world_size, BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
tensor = _build_tensor(rank + 1, device_id=device_id)
profiler_cls = profiler_ctx if profiler_ctx is not None else nullcontext()
with profiler_cls as prof:
for src in range(world_size):
if src == rank:
# Send mode
for dst in range(world_size):
if dst == rank:
continue
dist.send(tensor, dst)
else:
# Recv mode
expected_tensor = _build_tensor(src + 1)
output_tensor = _build_tensor(
src + 1, value=-1, device_id=device_id
)
dist.recv(output_tensor, src)
self.assertEqual(output_tensor, expected_tensor)
self._barrier()
if profiler_ctx is not None:
backend = dist.get_backend()
if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:
for event_name in [f"{backend}:send", f"{backend}:recv"]:
events = get_profiling_event(
event_name, prof, dedup_gpu_user_annotation=True
)
self.assertTrue(events)
# Event order is not deterministic, so simply assert their shape
# is found in the following list.
expected_shapes = [
[[rank + 1] * 3] for rank in range(dist.get_world_size())
]
for event in events:
self.assertTrue(event.input_shapes in expected_shapes)
@skip_if_no_gpu
@skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def test_send_recv_nccl(self):
self._test_send_recv_nccl()
@skip_if_no_gpu
@skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def test_send_recv_nccl_autograd_profiler(self):
profiler_ctx = torch.autograd.profiler.profile(record_shapes=True)
self._test_send_recv_nccl(profiler_ctx)
@skip_if_no_gpu
@skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
@skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode causes hang")
@skip_but_pass_in_sandcastle_if(
IS_MACOS or IS_WINDOWS,
"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124",
)
def test_send_recv_nccl_torch_profiler(self):
profiler_ctx = torch.profiler.profile(
activities=[
torch.profiler.ProfilerActivity.CPU,
torch.profiler.ProfilerActivity.CUDA,
],
record_shapes=True,
)
self._test_send_recv_nccl(profiler_ctx)
# SEND RECV
def _test_send_recv(self, profiler_ctx):
rank = dist.get_rank()
send_size = rank + 1
tensor = _build_tensor(send_size)
ctx = profiler_ctx if profiler_ctx is not None else nullcontext()
with ctx as prof:
for src in range(dist.get_world_size()):
if src == rank:
# Send mode
for dst in range(dist.get_world_size()):
if dst == rank:
continue
dist.send(tensor, dst)
else:
# Recv mode
recv_size = src + 1
expected_tensor = _build_tensor(recv_size)
output_tensor = _build_tensor(recv_size, value=-1)
dist.recv(output_tensor, src)
self.assertEqual(output_tensor, expected_tensor)
if profiler_ctx is not None:
backend = dist.get_backend()
if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:
for event_name in [f"{backend}:send", f"{backend}:recv"]:
events = get_profiling_event(event_name, prof)
# Each rank sends/recvs from all other ranks.
event_count = sum(e.count for e in events)
expected_event_count = dist.get_world_size() - 1
self.assertEqual(event_count, expected_event_count)
# Event order is not deterministic, so simply assert their shape
# is found in the following list.
expected_shapes = [
[[rank + 1] * 3] for rank in range(dist.get_world_size())
]
for event in events:
self.assertTrue(event.is_async)
self.assertTrue(event.input_shapes in expected_shapes)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl send/recv tested by test_send_recv_nccl"
)
def test_send_recv(self):
self._test_send_recv(profiler_ctx=None)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl"
)
def test_send_recv_autograd_profiler(self):
autograd_profiler_ctx = _create_autograd_profiler()
self._test_send_recv(profiler_ctx=autograd_profiler_ctx)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl"
)
@skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode causes hang")
@skip_but_pass_in_sandcastle_if(
IS_MACOS or IS_WINDOWS,
"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124",
)
def test_send_recv_torch_profiler(self):
torch_profiler_ctx = _create_torch_profiler()
return self._test_send_recv(profiler_ctx=torch_profiler_ctx)
# SEND RECV ANY SOURCE
def _test_send_recv_any_source(self, profiler_ctx):
rank = dist.get_rank()
send_recv_size = 10
tensor = _build_tensor(send_recv_size, value=rank)
recv_ranks = []
irecv_ranks = []
ctx = profiler_ctx if profiler_ctx is not None else nullcontext()
with ctx as prof:
for dst in range(dist.get_world_size()):
if dst == rank:
# Recv mode
for dst in range(dist.get_world_size()):
if dst == rank:
continue
for recv in ["recv", "irecv"]:
output_tensor = _build_tensor(send_recv_size, value=-1)
if recv == "recv":
sender = dist.recv(output_tensor)
recv_ranks.append(sender)
elif recv == "irecv":
work = dist.irecv(output_tensor)
work.wait()
sender = work._source_rank()
irecv_ranks.append(sender)
# Assert the scalar value "sender" that should be
# equal to the rank of the sender is equal to all
# values in the received tensor.
self.assertTrue(output_tensor.eq(sender).all())
else:
# Send mode
dist.send(tensor, dst) # recv
dist.send(tensor, dst) # irecv
if profiler_ctx is not None:
backend = dist.get_backend()
if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:
for event_name in [f"{backend}:send", f"{backend}:recvAnySource"]:
events = get_profiling_event(event_name, prof)
# Each rank sends/recvs from other rank twice.
self.assertEqual(
sum(event.count for event in events),
2 * (dist.get_world_size() - 1),
)
for event in events:
self.assertTrue(event.is_async)
self.assertEqual(event.input_shapes, [[send_recv_size] * 3])
# Each rank would have 2 * (world_size - 1) sends, verify that
# globally we receive the same amount on the other end.
recv_ranks_tensor = torch.cat(
(torch.tensor(recv_ranks), torch.tensor(irecv_ranks)), 0
)
global_recv_ranks = [
torch.empty_like(recv_ranks_tensor)
for _ in range(dist.get_world_size())
]
dist.all_gather(global_recv_ranks, recv_ranks_tensor)
global_recv_ranks_list = []
for tensor in global_recv_ranks:
global_recv_ranks_list += tensor.tolist()
from itertools import groupby
global_recv_ranks_list.sort()
frequency = [
len(list(group)) for key, group in groupby(global_recv_ranks_list)
]
self.assertEqual(dist.get_world_size(), len(frequency))
self.assertEqual(
[2 * (dist.get_world_size() - 1)] * dist.get_world_size(), frequency
)
self._barrier()
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["sendrecv anysource"],
f"{BACKEND} does not support send/recv from any source",
)
def test_send_recv_any_source(self):
self._test_send_recv_any_source(profiler_ctx=None)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["sendrecv anysource"],
f"{BACKEND} does not support send/recv from any source",
)
def test_send_recv_any_source_autograd_profiler(self):
autograd_profiler_ctx = _create_autograd_profiler()
self._test_send_recv_any_source(profiler_ctx=autograd_profiler_ctx)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["sendrecv anysource"],
f"{BACKEND} does not support send/recv from any source",
)
@skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang")
@skip_but_pass_in_sandcastle_if(
IS_MACOS or IS_WINDOWS,
"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124",
)
def test_send_recv_any_source_torch_profiler(self):
torch_profiler_ctx = _create_torch_profiler()
return self._test_send_recv_any_source(profiler_ctx=torch_profiler_ctx)
# SEND RECV WITH TAG
def _test_send_recv_with_tag(self, profiler_ctx):
rank = dist.get_rank()
world_size = dist.get_world_size()
send_recv_size = 10
tensor = _build_tensor(send_recv_size, value=rank)
ctx = profiler_ctx if profiler_ctx is not None else nullcontext()
with ctx as prof:
for dst in range(world_size):
if dst == rank:
# Recv mode
for src in range(world_size):
if src == rank:
continue
output_tensor = _build_tensor(send_recv_size, value=-1)
dist.recv(output_tensor, src, tag=src)
self.assertTrue(output_tensor.eq(src).all())
else:
# Send mode
dist.send(tensor, dst, tag=rank)
if profiler_ctx is not None:
backend = dist.get_backend()
if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:
for event_name in [f"{backend}:send", f"{backend}:recv"]:
events = get_profiling_event(event_name, prof)
# Each rank sends/recvs from all other ranks
event_count = sum(e.count for e in events)
expected_event_count = dist.get_world_size() - 1
self.assertEqual(event_count, expected_event_count)
for event in events:
self.assertTrue(event.is_async)
self.assertEqual(event.name, event_name)
self.assertEqual(event.input_shapes, [[send_recv_size] * 3])
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl"
)
def test_send_recv_with_tag(self):
self._test_send_recv_with_tag(profiler_ctx=None)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl"
)
def test_send_recv_with_tag_autograd_profiler(self):
autograd_profiler_ctx = _create_autograd_profiler()
return self._test_send_recv_with_tag(profiler_ctx=autograd_profiler_ctx)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl"
)
@skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang")
@skip_but_pass_in_sandcastle_if(
IS_MACOS or IS_WINDOWS,
"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124",
)
def test_send_recv_with_tag_torch_profiler(self):
torch_profiler_ctx = _create_torch_profiler()
return self._test_send_recv_with_tag(profiler_ctx=torch_profiler_ctx)
# ISEND
def _test_isend(self, profiler_ctx):
rank = dist.get_rank()
world_size = dist.get_world_size()
ctx = profiler_ctx if profiler_ctx is not None else nullcontext()
with ctx as prof:
if rank == 0:
requests = [
dist.isend(_build_tensor(dest, 10), dest)
for dest in range(1, world_size)
]
for request in requests:
request.wait()
self.assertTrue(request.is_completed())
else:
tensor = _build_tensor(rank, -1)
dist.recv(tensor, 0)
self.assertEqual(tensor, _build_tensor(rank, 10))
self._barrier()
if profiler_ctx is not None:
backend = dist.get_backend()
if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:
expected_event_name = (
f"{backend}:send" if rank == 0 else f"{backend}:recv"
)
events = get_profiling_event(expected_event_name, prof)
event_count = sum(e.count for e in events)
expected_count = dist.get_world_size() - 1 if rank == 0 else 1
self.assertEqual(expected_count, event_count)
# Event ordering is not guaranteed, so simply ensure the shapes are
# found in the following map.
expected_shapes = {
r: [[r] * 3] for r in range(1, dist.get_world_size())
}
for event in events:
self.assertTrue(event.is_async)
self.assertEqual(event.name, expected_event_name)
if rank == 0:
self.assertTrue(
event.input_shapes in expected_shapes.values()
)
else:
self.assertEqual(event.input_shapes, expected_shapes[rank])
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support isend"
)
def test_isend(self):
self._test_isend(profiler_ctx=None)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support isend"
)
def test_isend_autograd_profiler(self):
autograd_profiler_ctx = _create_autograd_profiler()
self._test_isend(profiler_ctx=autograd_profiler_ctx)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support isend"
)
@skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang")
@skip_but_pass_in_sandcastle_if(
IS_MACOS or IS_WINDOWS,
"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124",
)
def test_isend_torch_profiler(self):
torch_profiler_ctx = _create_torch_profiler()
self._test_isend(profiler_ctx=torch_profiler_ctx)
# IRECV
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support irecv"
)
def test_irecv(self):
rank = dist.get_rank()
world_size = dist.get_world_size()
if rank == 0:
expected_tensors = [
_build_tensor(src, -1) for src in range(1, world_size)
]
requests = [
dist.irecv(expected_tensors[src - 1], src)
for src in range(1, world_size)
]
for src in range(1, world_size):
requests[src - 1].wait()
self.assertTrue(requests[src - 1].is_completed())
self.assertEqual(expected_tensors[src - 1], _build_tensor(src, 10))
else:
tensor = _build_tensor(rank, 10)
dist.send(tensor, 0)
self._barrier()
# BROADCAST
def _test_broadcast_helper(
self,
group,
group_id,
rank,
cuda=False,
rank_to_GPU=None,
with_options=False,
):
for dtype, value, requires_cuda in [
(torch.float, -1e-10, False),
(torch.double, -1e-100, False),
(torch.half, -0.1, True),
(torch.int8, -2, False),
(torch.uint8, 129, False),
(torch.int, -1e5, False),
(torch.long, -1e15, False),
]:
if requires_cuda and not cuda:
continue
for src in group:
expected_tensor = _build_tensor(src + 1, value, dtype)
if cuda:
expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])
if rank == src:
if with_options:
opts = dist.BroadcastOptions()
opts.rootTensor = 0
opts.rootRank = src
self.call_dist_op(
":broadcast",
True,
group_id.broadcast,
[expected_tensor],
opts,
)
else:
self.call_dist_op(
":broadcast",
False,
dist.broadcast,
expected_tensor,
src,
group_id,
)
else:
tensor = _build_tensor(src + 1, -1, dtype)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
if with_options:
opts = dist.BroadcastOptions()
opts.rootTensor = 0
opts.rootRank = src
self.call_dist_op(
":broadcast", True, group_id.broadcast, [tensor], opts
)
else:
self.call_dist_op(
":broadcast",
False,
dist.broadcast,
tensor,
src,
group_id,
)
self.assertEqual(tensor.size(), expected_tensor.size())
self.assertEqual(
tensor.ne(expected_tensor).max(), torch.tensor(False)
)
self._barrier()
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_broadcast(self):
group, group_id, rank = self._init_global_test()
self._test_broadcast_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND != "gloo" and BACKEND != "nccl",
"Only Gloo and Nccl backend supports CUDA allReduce",
)
@skip_if_no_gpu
def test_broadcast_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU)
@skip_if_small_worldsize
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_broadcast_group(self):
group, group_id, rank = self._init_group_test()
self._test_broadcast_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_broadcast_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_broadcast_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl",
"Only NCCL backend supports high priority stream",
)
@skip_if_no_gpu
def test_nccl_high_priority_stream(self):
group, _, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
new_port = str(MASTER_PORT + 1)
os.environ["MASTER_PORT"] = new_port
gen_iterator = dist.rendezvous("env://", rank, dist.get_world_size())
store, rank, size = next(gen_iterator)
store = dist.PrefixStore(new_port, store)
opts = dist.ProcessGroupNCCL.Options()
opts.is_high_priority_stream = False
group_id = dist.ProcessGroupNCCL(store, rank, size, opts)
self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU, True)
# REDUCE
def _test_reduce_helper(
self,
group,
group_id,
rank,
op,
master_value,
worker_value,
expected_value,
cuda=False,
rank_to_GPU=None,
):
for src in group:
tensor = _build_tensor(src + 1).fill_(
master_value if rank == src else worker_value
)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
self.call_dist_op(
":reduce",
False,
dist.reduce,
tensor,
src,
op,
group_id,
tensor_shapes=[tensor.shape],
)
if rank == src:
self.assertEqual(tensor, _build_tensor(src + 1, expected_value))
self._barrier()
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["reduce"],
f"{BACKEND} does not support reduce",
)
def test_reduce_sum(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports CUDA reduce"
)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["reduce"],
f"{BACKEND} does not support reduce",
)
@skip_if_no_gpu
def test_reduce_sum_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + 10 * (len(group) - 1),
True,
rank_to_GPU,
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["reduce"],
f"{BACKEND} does not support reduce",
)
def test_reduce_product(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce(operator.mul, [10] * (len(group) - 1), 2),
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["reduce"],
f"{BACKEND} does not support reduce",
)
def test_reduce_min(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(
group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["reduce"],
f"{BACKEND} does not support reduce",
)
def test_reduce_max(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(
group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["reduce"],
f"{BACKEND} does not support reduce",
)
@skip_if_small_worldsize
def test_reduce_group_sum(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["reduce"],
f"{BACKEND} does not support reduce",
)
@skip_if_small_worldsize
def test_reduce_group_product(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce(operator.mul, [10] * (len(group) - 1), 2),
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["reduce"],
f"{BACKEND} does not support reduce",
)
@skip_if_small_worldsize
def test_reduce_group_min(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(
group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["reduce"],
f"{BACKEND} does not support reduce",
)
@skip_if_small_worldsize
def test_reduce_group_max(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(
group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["reduce"],
f"{BACKEND} does not support reduce",
)
def test_reduce_full_group_sum(self):
group, group_id, rank = self._init_full_group_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["reduce"],
f"{BACKEND} does not support reduce",
)
def test_reduce_full_group_product(self):
group, group_id, rank = self._init_full_group_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce(operator.mul, [10] * (len(group) - 1), 2),
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["reduce"],
f"{BACKEND} does not support reduce",
)
def test_reduce_full_group_min(self):
group, group_id, rank = self._init_full_group_test()
self._test_reduce_helper(
group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["reduce"],
f"{BACKEND} does not support reduce",
)
def test_reduce_full_group_max(self):
group, group_id, rank = self._init_full_group_test()
self._test_reduce_helper(
group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10
)
# REDUCE TWICE
def _test_reduce_twice_helper(
self,
group,
group_id,
rank,
op,
master_value,
worker_value,
expected_value,
cuda=False,
rank_to_GPU=None,
):
for src in group:
tensors = [
_build_tensor(src + 1).fill_(
master_value if rank == src else worker_value
)
for i in range(2)
]
if cuda:
for i in range(2):
tensors[i] = tensors[i].cuda(rank_to_GPU[rank][0])
self.call_dist_op(
":reduce",
False,
dist.reduce,
tensors[0],
src,
op,
group_id,
secondary_op_call=lambda: dist.reduce(
tensors[1], src, op, group_id
),
tensor_shapes=[tensors[0].shape],
)
if rank == src:
for tensor in tensors:
self.assertEqual(tensor, _build_tensor(src + 1, expected_value))
self._barrier()
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["reduce"],
f"{BACKEND} does not support reduce",
)
def test_reduce_sum_twice(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_twice_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports CUDA reduce"
)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["reduce"],
f"{BACKEND} does not support reduce",
)
@skip_if_no_gpu
def test_reduce_sum_cuda_twice(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
self._test_reduce_twice_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + 10 * (len(group) - 1),
True,
rank_to_GPU,
)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports reduce_scatter_v"
)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["reduce"],
f"{BACKEND} does not support reduce",
)
@skip_if_no_gpu
def test_reduce_scatter_v_cuda(self):
self._barrier()
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
input_split_sizes = [src + 1 for src in group]
start_len = sum(input_split_sizes[:rank])
end_len = start_len + input_split_sizes[rank]
sum_len = sum(input_split_sizes)
master_value = 2
worker_value = 10
for async_val in [True, False]:
tensor = _build_tensor(sum_len, worker_value, device_id=device_id)
tensor[start_len:end_len].fill_(master_value)
out_tensor = (
torch.empty(
input_split_sizes[rank], sum_len, sum_len, dtype=torch.float
)
.fill_(-1)
.cuda(device_id)
)
req = dist.reduce_scatter(
out_tensor,
list(torch.split(tensor, input_split_sizes)),
dist.ReduceOp.SUM,
group_id,
async_val,
)
if async_val:
req.wait()
expected_value = 2 + (10 * (len(group) - 1))
expected_tensor = torch.empty(
input_split_sizes[rank], sum_len, sum_len, dtype=torch.float
)
expected_tensor = expected_tensor.fill_(expected_value).cuda(device_id)
self.assertEqual(out_tensor, expected_tensor)
self._barrier()
# Test reduce_scatter_tensor accepting single tensor as input
def _reduce_scatter_tensor_helper(
self, tensor_out, tensor_in, group_id, rank, cuda=True, rank_to_GPU=None
):
if cuda:
tensor_in = tensor_in.cuda(rank_to_GPU[rank][0])
tensor_out = tensor_out.cuda(rank_to_GPU[rank][0])
tensor_shapes = [tensor_out.shape]
self.call_dist_op(
":reduce_scatter_tensor",
False,
dist.reduce_scatter_tensor,
tensor_out,
tensor_in,
dist.ReduceOp.SUM,
group_id,
False,
expect_event=False,
tensor_shapes=tensor_shapes,
)
return tensor_out
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports CUDA reduce_scatter_tensor"
)
@skip_if_no_gpu
def test_reduce_scatter_tensor_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
size = 2
tensor_out = torch.zeros(size, dtype=torch.int64)
# Concatenated input
tensor_in = torch.arange(len(group) * size)
tensor_out = self._reduce_scatter_tensor_helper(
tensor_out, tensor_in, group_id, rank, True, rank_to_GPU
)
# Check result
expected_tensor = torch.arange(rank * size, (rank + 1) * size) * len(group)
self.assertEqual(tensor_out, expected_tensor)
self._barrier()
# Stacked input
tensor_in = torch.reshape(tensor_in, (len(group), size))
tensor_out = self._reduce_scatter_tensor_helper(
tensor_out, tensor_in, group_id, rank, True, rank_to_GPU
)
# Check result
# Should be the same as the result in concatenated case
self.assertEqual(tensor_out, expected_tensor)
self._barrier()
def call_dist_op(
self,
profiling_title_postfix,
is_async,
op,
*args,
expect_event=True,
secondary_op_call=None,
profile_cuda=False,
tensor_shapes=None,
**kwargs,
):
op_calls = [lambda: op(*args, **kwargs)]
if secondary_op_call is not None:
op_calls.append(secondary_op_call)
autograd_profiler_ctx = torch.autograd.profiler.profile(
use_cuda=profile_cuda, record_shapes=True
)
# TODO: move this test to use torch.profiler once kineto issues are
# fixed internally.
with autograd_profiler_ctx:
works = [op_call() for op_call in op_calls]
if is_async:
for work in works:
work.wait()
if expect_event and dist.get_backend() in PROFILING_SUPPORTED_BACKENDS:
# We are only interested in the backend's implementation not the dispatcher wrapper.
events = get_profiling_event(
dist.get_backend() + profiling_title_postfix, autograd_profiler_ctx
)
# DETAIL debug mode can use a pg wrapper that issues more collectives
# under the hood
if dist.get_debug_level() != dist.DebugLevel.DETAIL:
self.assertEqual(len(events), len(op_calls))
for e in events:
self.assertTrue(e.is_async)
self.assertEqual(e.count, 1)
self.assertGreaterEqual(e.cpu_time, 0)
# Verify tensor shapes if given
# DETAIL debug mode can use a pg wrapper that issues more collectives
# under the hood
if (
tensor_shapes is not None
and dist.get_debug_level() != dist.DebugLevel.DETAIL
):
self.assertEqual(
e.input_shapes,
tensor_shapes,
f"event shape: {e.input_shapes} vs tensor {tensor_shapes}",
)
# ALL REDUCE
def _test_all_reduce_helper(
self,
group,
group_id,
rank,
op,
master_value,
worker_value,
expected_value,
cuda=False,
rank_to_GPU=None,
dtype=torch.float,
async_op=False,
):
for src in group:
curr_value = master_value if rank == src else worker_value
tensor = _build_tensor(src + 1, dtype=dtype).fill_(curr_value)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
if tensor.dtype == torch.complex64:
tensor_shapes = [torch.view_as_real(tensor).shape]
else:
tensor_shapes = [tensor.shape]
self.call_dist_op(
":all_reduce",
async_op,
dist.all_reduce,
tensor,
op,
group_id,
async_op=async_op,
tensor_shapes=tensor_shapes,
)
# Currently, only Gloo backend has profiling tested with CUDA enabled.
# Only run cuda profiling test for one rank to speed up since
# running with different src_rank does not affect the correctness.
if (
src == 0
and cuda
and dist.get_backend() in CUDA_PROFILING_SUPPORTED_BACKENDS
):
self.call_dist_op(
":all_reduce",
async_op,
dist.all_reduce,
tensor,
op,
group_id,
async_op=async_op,
profile_cuda=True,
tensor_shapes=tensor_shapes,
)
self._barrier()
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_reduce_sum(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_reduce_sum_async(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
async_op=True,
)
@skip_but_pass_in_sandcastle_if(
BACKEND != "gloo" and BACKEND != "nccl",
"Only Gloo and NCCL backends will have CUDA allReduce tested",
)
@skip_if_no_gpu
def test_all_reduce_sum_cuda(self):
torch.cuda.set_device(self.rank)
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
True,
rank_to_GPU,
)
@skip_but_pass_in_sandcastle_if(
BACKEND != "gloo" and BACKEND != "nccl",
"Only Gloo and NCCL backends will have CUDA allReduce tested",
)
@skip_if_no_gpu
def test_all_reduce_sum_cuda_async(self):
torch.cuda.set_device(self.rank)
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
True,
rank_to_GPU,
async_op=True,
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_reduce_sum_complex(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
complex(2, 3),
complex(10, 11),
complex(2, 3) + (complex(10, 11) * (len(group) - 1)),
dtype=torch.cfloat,
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_reduce_complex_unsupported_ops(self):
unsupported_ops = [
dist.ReduceOp.MAX,
dist.ReduceOp.MIN,
dist.ReduceOp.PRODUCT,
dist.ReduceOp.BAND,
dist.ReduceOp.BOR,
dist.ReduceOp.BXOR,
]
_group, group_id, _rank = self._init_global_test()
for unsupported_op in unsupported_ops:
with self.assertRaisesRegex(ValueError, "all_reduce does not support"):
dist.all_reduce(
_build_tensor(1, dtype=torch.cfloat), unsupported_op, group_id
)
@skip_but_pass_in_sandcastle_if(
BACKEND != "gloo" and BACKEND != "nccl",
"Only Gloo and NCCL backends will have CUDA allReduce tested",
)
@skip_if_no_gpu
def test_all_reduce_sum_cuda_complex(self):
torch.cuda.set_device(self.rank)
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
complex(2, 3),
complex(10, 11),
complex(2, 3) + (complex(10, 11) * (len(group) - 1)),
True,
rank_to_GPU,
dtype=torch.cfloat,
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_reduce_product(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce(operator.mul, [10] * (len(group) - 1), 2),
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_reduce_min(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_reduce_max(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10
)
@skip_if_small_worldsize
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_reduce_group_sum(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@skip_if_small_worldsize
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_reduce_group_product(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce(operator.mul, [10] * (len(group) - 1), 2),
)
@skip_if_small_worldsize
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_reduce_group_min(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1
)
@skip_if_small_worldsize
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_reduce_group_max(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_reduce_full_group_sum(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_reduce_full_group_product(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce(operator.mul, [10] * (len(group) - 1), 2),
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_reduce_full_group_min(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_reduce_full_group_max(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10
)
# SPARSE ALL REDUCE
def _test_sparse_all_reduce_sum(self, fn):
_group, group_id, rank = self._init_global_test()
tests = simple_sparse_reduce_tests(
rank, dist.get_world_size(), num_inputs=1
)
for inputs, outputs in tests:
tensors = [fn(input) for input in inputs]
dist.all_reduce(tensors[0], dist.ReduceOp.SUM, group_id)
self.assertEqual(tensors[0], outputs[0])
@skip_but_pass_in_sandcastle_if(
BACKEND != "gloo", "Only Gloo backend support sparse all reduce"
)
def test_sparse_all_reduce_sum(self):
self._test_sparse_all_reduce_sum(lambda t: t)
@skip_but_pass_in_sandcastle_if(
BACKEND != "gloo", "Only Gloo backend support sparse all reduce"
)
@skip_if_no_gpu
def test_sparse_all_reduce_sum_cuda(self):
self._test_sparse_all_reduce_sum(lambda t: t.clone().cuda())
# ALL REDUCE - COALESCED
@staticmethod
def _all_reduce_coalesced_sum_test_cases(group_size):
return (
[2, 3, complex(2, 3)],
[10, 11, complex(10, 11)],
[
2 + 10 * (group_size - 1),
3 + 11 * (group_size - 1),
complex(2, 3) + complex(10, 11) * (group_size - 1),
],
[torch.float, torch.float, torch.cfloat],
)
@staticmethod
def _all_reduce_coalesced_product_test_cases(group_size):
return (
[1, 2],
[3, 4],
[1 * 3 ** (group_size - 1), 2 * 4 ** (group_size - 1)],
[torch.float, torch.float],
)
@staticmethod
def _all_reduce_coalesced_min_test_cases(group_size):
return (
[1, 4],
[2, 3],
[1, 3],
[torch.float, torch.float],
)
@staticmethod
def _all_reduce_coalesced_max_test_cases(group_size):
return (
[1, 4],
[2, 3],
[2, 4],
[torch.float, torch.float],
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_reduce_coalesced_max_complex_unsupported(self):
_group, group_id, _rank = self._init_global_test()
with self.assertRaisesRegex(ValueError, "all_reduce does not support"):
dist.all_reduce_coalesced(
[_build_tensor(1, dtype=torch.cfloat)], dist.ReduceOp.MAX, group_id
)
def _test_all_reduce_coalesced_helper(
self,
group,
group_id,
rank,
op,
cuda=False,
rank_to_GPU=None,
):
test_case_func = {
dist.ReduceOp.SUM: self._all_reduce_coalesced_sum_test_cases,
dist.ReduceOp.PRODUCT: self._all_reduce_coalesced_product_test_cases,
dist.ReduceOp.MIN: self._all_reduce_coalesced_min_test_cases,
dist.ReduceOp.MAX: self._all_reduce_coalesced_max_test_cases,
}[op]
master_values, worker_values, expected_values, dtypes = test_case_func(
len(group)
)
for src in group:
curr_values = master_values if rank == src else worker_values
tensors = [
_build_tensor(src + 1, val, dtype=dtype)
for dtype, val in zip(dtypes, curr_values, strict=True)
]
if cuda:
tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]
tensor_shapes = []
for tensor in tensors:
if tensor.dtype == torch.complex64:
tensor_shapes.append(torch.view_as_real(tensor).shape)
else:
tensor_shapes.append(tensor.shape)
self.call_dist_op(
":all_reduce",
False,
dist.all_reduce_coalesced,
tensors,
op,
group_id,
tensor_shapes=tensor_shapes,
)
expected_tensors = [
_build_tensor(src + 1, expected_value, dtype=dtype)
for dtype, expected_value in zip(
dtypes, expected_values, strict=True
)
]
self.assertEqual(tensors, expected_tensors)
self._barrier()
@require_backend_is_available({"gloo"})
def test_all_reduce_coalesced_sum(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
cuda=False,
rank_to_GPU=None,
)
@require_backend_is_available({"gloo"})
def test_all_reduce_coalesced_product(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
cuda=False,
rank_to_GPU=None,
)
@require_backend_is_available({"gloo"})
def test_all_reduce_coalesced_min(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.MIN,
cuda=False,
rank_to_GPU=None,
)
@require_backend_is_available({"gloo"})
def test_all_reduce_coalesced_max(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_coalesced_helper(
group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None
)
@skip_if_small_worldsize
@require_backend_is_available({"gloo"})
def test_all_reduce_coalesced_group_sum(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_coalesced_helper(
group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None
)
@skip_if_small_worldsize
@require_backend_is_available({"gloo"})
def test_all_reduce_coalesced_group_product(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
cuda=False,
rank_to_GPU=None,
)
@skip_if_small_worldsize
@require_backend_is_available({"gloo"})
def test_all_reduce_coalesced_group_min(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_coalesced_helper(
group, group_id, rank, dist.ReduceOp.MIN, cuda=False, rank_to_GPU=None
)
@skip_if_small_worldsize
@require_backend_is_available({"gloo"})
def test_all_reduce_coalesced_group_max(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_coalesced_helper(
group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None
)
@require_backend_is_available({"gloo"})
def test_all_reduce_coalesced_full_group_sum(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_coalesced_helper(
group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None
)
@require_backend_is_available({"gloo"})
def test_all_reduce_coalesced_full_group_product(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
cuda=False,
rank_to_GPU=None,
)
@require_backend_is_available({"gloo"})
def test_all_reduce_coalesced_full_group_min(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.MIN,
cuda=False,
rank_to_GPU=None,
)
@require_backend_is_available({"gloo"})
def test_all_reduce_coalesced_full_group_max(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_coalesced_helper(
group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None
)
# SCATTER
def _test_scatter_helper(
self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float
):
for dest in group:
tensor = _build_tensor(dest + 1, -1, dtype=dtype)
expected_tensor = _build_tensor(dest + 1, rank, dtype=dtype)
tensors = (
[_build_tensor(dest + 1, i, dtype=dtype) for i in group]
if rank == dest
else []
)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]
if dtype == torch.complex64:
tensor_shapes = [torch.view_as_real(t).shape for t in tensors]
else:
tensor_shapes = [t.shape for t in tensors]
self.call_dist_op(
":scatter",
False,
dist.scatter,
tensor,
src=dest,
scatter_list=tensors,
group=group_id,
expect_event=False,
tensor_shapes=tensor_shapes,
)
self.assertEqual(tensor, expected_tensor)
self._barrier()
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "ucc", "CPU tensor ops not supported by UCP TL"
)
def test_scatter_checks(self):
group, _group_id, rank = self._init_global_test()
one = torch.ones([1])
# Specify scatter_list argument only on source rank.
output = one.clone() * -1
if rank == 0:
scatter_list = [one.clone() * i for i in group]
dist.scatter(output, src=0, scatter_list=scatter_list)
else:
dist.scatter(output, src=0)
self.assertEqual(output, one * rank)
# Don't specify src argument.
output = one.clone() * -1
if rank == 0:
scatter_list = [one.clone() * i for i in group]
dist.scatter(output, scatter_list=scatter_list)
else:
dist.scatter(output)
self.assertEqual(output, one * rank)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "ucc", "CPU tensor ops not supported by UCP TL"
)
def test_scatter(self):
group, group_id, rank = self._init_global_test()
self._test_scatter_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports CUDA gather"
)
@skip_if_no_gpu
def test_scatter_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_scatter_helper(group, group_id, rank, True, rank_to_GPU)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "ucc", "CPU tensor ops not supported by UCP TL"
)
def test_scatter_complex(self):
group, group_id, rank = self._init_global_test()
self._test_scatter_helper(group, group_id, rank, dtype=torch.cfloat)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports CUDA gather"
)
@skip_if_no_gpu
def test_scatter_cuda_complex(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_scatter_helper(
group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "ucc", "CPU tensor ops not supported by UCP TL"
)
@skip_if_small_worldsize
def test_scatter_group(self):
group, group_id, rank = self._init_group_test()
self._test_scatter_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "ucc", "CPU tensor ops not supported by UCP TL"
)
def test_scatter_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_scatter_helper(group, group_id, rank)
# GATHER
def _test_gather_helper(
self, group, group_id, rank, cuda=False, rank_to_GPU=None
):
for dest in group:
tensor = _build_tensor(dest + 1, rank)
tensors = (
[_build_tensor(dest + 1, -1) for i in group] if rank == dest else []
)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]
self.call_dist_op(
":gather",
False,
dist.gather,
tensor,
dst=dest,
gather_list=tensors,
group=group_id,
expect_event=False,
tensor_shapes=[tensors[0].shape] if len(tensors) > 0 else None,
)
if rank == dest:
expected_tensors = [_build_tensor(dest + 1, i) for i in group]
for t1, t2 in zip(tensors, expected_tensors, strict=True):
self.assertEqual(t1, t2)
self._barrier()
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "ucc", "CPU tensor ops not supported by UCP TL"
)
def test_gather_checks(self):
group, _group_id, rank = self._init_global_test()
one = torch.ones([1])
# Specify gather_list argument only on destination rank.
if rank == 0:
gather_list = [one.clone() for _ in group]
dist.gather(one * rank, dst=0, gather_list=gather_list)
for i in group:
self.assertEqual(gather_list[i], one * i)
else:
dist.gather(one * rank, dst=0)
# Don't specify dst argument.
if rank == 0:
gather_list = [one.clone() for _ in group]
dist.gather(one * rank, gather_list=gather_list)
for i in group:
self.assertEqual(gather_list[i], one * i)
else:
dist.gather(one * rank)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "ucc", "CPU tensor ops not supported by UCP TL"
)
def test_gather(self):
group, group_id, rank = self._init_global_test()
self._test_gather_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports CUDA gather"
)
@skip_if_no_gpu
def test_gather_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_gather_helper(group, group_id, rank, True, rank_to_GPU)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "ucc", "CPU tensor ops not supported by UCP TL"
)
@skip_if_small_worldsize
def test_gather_group(self):
group, group_id, rank = self._init_group_test()
self._test_gather_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "ucc", "CPU tensor ops not supported by UCP TL"
)
def test_gather_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_gather_helper(group, group_id, rank)
# ALL GATHER
def _test_all_gather_helper(
self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float
):
for dest in group:
tensor = _build_tensor(dest + 1, rank, dtype=dtype)
tensors = [_build_tensor(dest + 1, -1, dtype=dtype) for i in group]
allgather = dist.all_gather
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]
if tensors[0].dtype == torch.complex64:
tensor_shapes = [torch.view_as_real(tensors[0]).shape]
else:
tensor_shapes = [tensors[0].shape]
self.call_dist_op(
":all_gather",
False,
allgather,
tensors,
tensor,
group_id,
False,
tensor_shapes=tensor_shapes,
)
expected_tensors = [
_build_tensor(dest + 1, i, dtype=dtype) for i in group
]
for t1, t2 in zip(tensors, expected_tensors, strict=True):
self.assertEqual(t1, t2)
self._barrier()
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_gather(self):
group, group_id, rank = self._init_global_test()
self._test_all_gather_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports CUDA all gather"
)
@skip_if_no_gpu
def test_all_gather_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_gather_helper(group, group_id, rank, True, rank_to_GPU)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_gather_complex(self):
group, group_id, rank = self._init_global_test()
self._test_all_gather_helper(group, group_id, rank, dtype=torch.cfloat)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports CUDA all gather"
)
@skip_if_no_gpu
def test_all_gather_cuda_complex(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_gather_helper(
group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat
)
@skip_if_small_worldsize
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_gather_group(self):
group, group_id, rank = self._init_group_test()
self._test_all_gather_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "Nccl does not support CPU tensors"
)
def test_all_gather_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_gather_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports all_gather_v"
)
@skip_if_no_gpu
def test_all_gather_v_cuda(self):
self._barrier()
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
output_split_sizes = [dst + 1 for dst in group]
sum_len = sum(output_split_sizes)
value = 2
for async_val in [True, False]:
tensor = (
torch.empty(
output_split_sizes[rank], sum_len, sum_len, dtype=torch.float
)
.fill_(value)
.cuda(device_id)
)
out_tensor = _build_tensor(sum_len, -1, device_id=device_id)
req = dist.all_gather(
list(torch.split(out_tensor, output_split_sizes)),
tensor,
group_id,
async_val,
)
if async_val:
req.wait()
expected_value = value
expected_tensor = _build_tensor(
sum_len, expected_value, device_id=device_id
)
self.assertEqual(out_tensor, expected_tensor)
self._barrier()
# Test all_gather accepting single tensor as output
def _all_gather_into_tensor_helper(
self, tensor_out, tensor_in, group_id, rank, cuda=True, rank_to_GPU=None
):
if cuda:
tensor_in = tensor_in.cuda(rank_to_GPU[rank][0])
tensor_out = tensor_out.cuda(rank_to_GPU[rank][0])
if tensor_out.dtype == torch.complex64:
tensor_shapes = [torch.view_as_real(tensor_in).shape]
else:
tensor_shapes = [tensor_in.shape]
self.call_dist_op(
":all_gather_into_tensor",
False,
dist.all_gather_into_tensor,
tensor_out,
tensor_in,
group_id,
False,
expect_event=False,
tensor_shapes=tensor_shapes,
)
return tensor_out
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports CUDA all_gather_into_tensor"
)
@skip_if_no_gpu
def test_all_gather_into_cat_tensor_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
size = 2
tensor_in = torch.ones([size, size]) * rank
# Concatenated output
tensor_out = torch.ones([len(group) * size, size]) * (-1)
tensor_out = self._all_gather_into_tensor_helper(
tensor_out, tensor_in, group_id, rank, True, rank_to_GPU
)
# Check result
# Concatenate all blocks into a bigger tensor
expected_tensor = torch.cat([torch.ones([size, size]) * i for i in group])
self.assertEqual(tensor_out, expected_tensor)
self._barrier()
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports CUDA all_gather_into_tensor"
)
@skip_if_no_gpu
def test_all_gather_into_stack_tensor_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
size = 2
tensor_in = torch.ones([size, size]) * rank
# Stacked output
tensor_out = torch.ones([len(group), size, size]) * (-1)
tensor_out = self._all_gather_into_tensor_helper(
tensor_out, tensor_in, group_id, rank, True, rank_to_GPU
)
# Check result
# Stack all blocks into a bigger tensor
expected_tensor = torch.stack([torch.ones([size, size]) * i for i in group])
self.assertEqual(tensor_out, expected_tensor)
self._barrier()
def _run_all_gather_coalesced_and_verify(
self, output_tensor_lists, input_tensors, expected_tensors, group_id
):
"""
Helper that runs all_gather_coalesced and returns true if output
matches expectations.
"""
tensor_shapes = []
for input_tensor in input_tensors:
if input_tensor.dtype == torch.complex64:
tensor_shapes.append(torch.view_as_real(input_tensor).shape)
else:
tensor_shapes.append(input_tensor.shape)
self.call_dist_op(
":all_gather",
False,
dist.all_gather_coalesced,
output_tensor_lists,
input_tensors,
group_id,
tensor_shapes=tensor_shapes,
)
for l1, l2 in zip(output_tensor_lists, expected_tensors, strict=True):
for t1, t2 in zip(l1, l2, strict=True):
if not torch.equal(t1, t2):
return False
return True
def _test_all_gather_coalesced_helper(
self, group, group_id, rank, dtype=torch.float
):
# TODO: Instead we should probably go through _rank_not_in_group
# mechanism to disable sending tensors
if group_id is not None:
for test_case_id in range(2, 5):
# Make sure we create tensors of incompatible sizes, e.g.
# [1], [2x2], [3x3x3] ... to be sent in one batch
input_tensors = [
_build_multidim_tensor(
tensor_id, tensor_id, rank + tensor_id, dtype=dtype
)
for tensor_id in range(1, test_case_id)
]
output_tensor_lists = [
[
_build_multidim_tensor(
tensor_id, tensor_id, -1, dtype=dtype
)
for tensor_id in range(1, test_case_id)
]
for _ in group
]
expected_tensors = [
[
_build_multidim_tensor(
tensor_id, tensor_id, rank_iter + tensor_id, dtype=dtype
)
for tensor_id in range(1, test_case_id)
]
for rank_iter in group
]
assert self._run_all_gather_coalesced_and_verify(
output_tensor_lists, input_tensors, expected_tensors, group_id
), "output tensors do not match expected outputs"
self._barrier()
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["allgather_coalesced"],
f"{BACKEND} does not support all_gather_coalesced",
)
def test_all_gather_coalesced_simple(self):
group, group_id, rank = self._init_global_test()
self._test_all_gather_coalesced_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["allgather_coalesced"],
f"{BACKEND} does not support all_gather_coalesced",
)
def test_all_gather_coalesced_complex(self):
group, group_id, rank = self._init_global_test()
self._test_all_gather_coalesced_helper(
group, group_id, rank, dtype=torch.cfloat
)
@skip_if_small_worldsize
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["allgather_coalesced"],
f"{BACKEND} does not support all_gather_coalesced",
)
def test_all_gather_coalesced_group(self):
group, group_id, rank = self._init_group_test()
self._test_all_gather_coalesced_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["allgather_coalesced"],
f"{BACKEND} does not support all_gather_coalesced",
)
def test_all_gather_coalesced_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_gather_coalesced_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["allgather_coalesced"],
f"{BACKEND} does not support all_gather_coalesced",
)
def test_all_gather_coalesced_with_empty(self):
group, group_id, rank = self._init_global_test()
input_tensors = [
rank * torch.ones([2, 2]),
torch.ones([0]),
(rank + 1) * torch.ones([3, 3]),
torch.ones([0]),
torch.ones([0]),
]
output_tensors_lists = [
[
-1 * torch.ones([2, 2]),
-1 * torch.ones([0]),
-1 * torch.ones([3, 3]),
-1 * torch.ones([0]),
-1 * torch.ones([0]),
]
for _ in group
]
expected_tensors = [
[
r * torch.ones([2, 2]),
torch.ones([0]),
(r + 1) * torch.ones([3, 3]),
torch.ones([0]),
torch.ones([0]),
]
for r in group
]
assert self._run_all_gather_coalesced_and_verify(
output_tensors_lists, input_tensors, expected_tensors, group_id
)
self._barrier()
# AllToAll
def _test_all_to_all_single_equal_split_helper(
self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float
):
if group_id is not None:
size = len(group)
in_tensor = torch.ones([size, size], dtype=dtype) * rank
expected_tensor = torch.cat(
[torch.ones([1, size], dtype=dtype) * i for i in group]
)
out_tensor = torch.ones([size, size], dtype=dtype) * -1
if cuda:
in_tensor = in_tensor.cuda(rank_to_GPU[rank][0])
expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])
out_tensor = out_tensor.cuda(rank_to_GPU[rank][0])
if dtype == torch.complex64:
tensor_shapes = [torch.view_as_real(in_tensor).shape]
else:
tensor_shapes = [in_tensor.shape]
self.call_dist_op(
":all_to_all",
False,
dist.all_to_all_single,
out_tensor,
in_tensor,
group=group_id,
tensor_shapes=tensor_shapes,
)
self.assertEqual(out_tensor, expected_tensor)
self._barrier()
def _test_all_to_all_single_unequal_split_helper(
self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float
):
if group_id is not None:
size = len(group)
in_splits = [i + 1 for i in group]
out_splits = [rank + 1 for _ in group]
in_tensor = torch.ones([sum(in_splits), size], dtype=dtype) * rank
out_tensor = torch.ones([(rank + 1) * size, size], dtype=dtype)
expected_tensor = torch.cat(
[torch.ones([rank + 1, size], dtype=dtype) * i for i in group]
)
if cuda:
in_tensor = in_tensor.cuda(rank_to_GPU[rank][0])
expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])
out_tensor = out_tensor.cuda(rank_to_GPU[rank][0])
dist.all_to_all_single(
out_tensor, in_tensor, out_splits, in_splits, group=group_id
)
self.assertEqual(out_tensor, expected_tensor)
self._barrier()
def _test_all_to_all_helper(
self,
group,
group_id,
rank,
cuda=False,
rank_to_GPU=None,
dtype=torch.float,
):
if group_id is not None:
size = len(group)
in_splits = [i + 1 for i in group]
in_tensors = [
torch.ones([in_splits[i], size], dtype=dtype) * rank
for i, _ in enumerate(group)
]
out_tensors = [
torch.ones([(rank + 1), size], dtype=dtype) for _ in group
]
expected_tensors = [
torch.ones([rank + 1, size], dtype=dtype) * i for i in group
]
if cuda:
in_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in in_tensors]
expected_tensors = [
t.cuda(rank_to_GPU[rank][0]) for t in expected_tensors
]
out_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in out_tensors]
dist.all_to_all(out_tensors, in_tensors, group=group_id)
for t1, t2 in zip(out_tensors, expected_tensors, strict=True):
self.assertEqual(t1, t2)
self._barrier()
@skip_but_pass_in_sandcastle_if(
BACKEND != "mpi", "Only MPI supports CPU all_to_all_single"
)
def test_all_to_all_single_equal_split(self):
group, group_id, rank = self._init_global_test()
self._test_all_to_all_single_equal_split_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single"
)
@skip_if_no_gpu
def test_all_to_all_single_equal_split_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_single_equal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
)
@skip_but_pass_in_sandcastle_if(
BACKEND != "mpi", "Only MPI supports CPU all_to_all_single"
)
def test_all_to_all_single_equal_split_complex(self):
group, group_id, rank = self._init_global_test()
self._test_all_to_all_single_equal_split_helper(
group, group_id, rank, dtype=torch.cfloat
)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single"
)
@skip_if_no_gpu
def test_all_to_all_single_equal_split_cuda_complex(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_single_equal_split_helper(
group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat
)
@skip_but_pass_in_sandcastle_if(
BACKEND != "mpi", "Only MPI supports CPU all_to_all_single"
)
def test_all_to_all_single_unequal_split(self):
group, group_id, rank = self._init_global_test()
self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single"
)
@skip_if_no_gpu
def test_all_to_all_single_unequal_split_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_single_unequal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
)
@skip_but_pass_in_sandcastle_if(
BACKEND != "mpi", "Only MPI supports CPU all_to_all_single"
)
def test_all_to_all_single_unequal_split_complex(self):
group, group_id, rank = self._init_global_test()
self._test_all_to_all_single_unequal_split_helper(
group, group_id, rank, dtype=torch.cfloat
)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single"
)
@skip_if_no_gpu
def test_all_to_all_single_unequal_split_cuda_complex(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_single_unequal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
dtype=torch.cfloat,
)
@skip_but_pass_in_sandcastle_if(
BACKEND != "mpi", "Only MPI supports all_to_all"
)
def test_all_to_all(self):
group, group_id, rank = self._init_global_test()
self._test_all_to_all_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only NCCL supports CUDA all_to_all"
)
@skip_if_rocm_multiprocess
def test_all_to_all_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU)
@skip_but_pass_in_sandcastle_if(
BACKEND != "mpi", "Only MPI supports all_to_all"
)
def test_all_to_all_complex(self):
group, group_id, rank = self._init_global_test()
self._test_all_to_all_helper(group, group_id, rank, dtype=torch.cfloat)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only NCCL supports CUDA all_to_all"
)
@skip_if_rocm_multiprocess
def test_all_to_all_cuda_complex(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_helper(
group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat
)
@skip_but_pass_in_sandcastle_if(
BACKEND != "mpi", "Only MPI supports CPU all_to_all_single"
)
@skip_if_small_worldsize
def test_all_to_all_single_equal_split_group(self):
group, group_id, rank = self._init_group_test()
self._test_all_to_all_single_equal_split_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single"
)
@skip_if_no_gpu
@skip_if_small_worldsize
def test_all_to_all_single_equal_split_group_cuda(self):
group, group_id, rank = self._init_group_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_single_equal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
)
@skip_but_pass_in_sandcastle_if(
BACKEND != "mpi", "Only MPI supports CPU all_to_all_single"
)
@skip_if_small_worldsize
def test_all_to_all_single_unequal_split_group(self):
group, group_id, rank = self._init_group_test()
self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single"
)
@skip_if_no_gpu
@skip_if_small_worldsize
def test_all_to_all_single_unequal_split_group_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_single_unequal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
)
@skip_but_pass_in_sandcastle_if(
BACKEND != "mpi", "Only MPI supports all_to_all"
)
@skip_if_small_worldsize
def test_all_to_all_group(self):
group, group_id, rank = self._init_group_test()
self._test_all_to_all_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single"
)
@skip_if_small_worldsize
@skip_if_rocm_multiprocess
def test_all_to_all_group_cuda(self):
group, group_id, rank = self._init_group_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU)
@skip_but_pass_in_sandcastle_if(
BACKEND != "mpi", "Only MPI supports CPU all_to_all_single"
)
def test_all_to_all_single_equal_split_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_to_all_single_equal_split_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single"
)
@skip_if_no_gpu
def test_all_to_all_single_equal_split_full_group_cuda(self):
group, group_id, rank = self._init_full_group_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_single_equal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
)
@skip_but_pass_in_sandcastle_if(
BACKEND != "mpi", "Only MPI supports CPU all_to_all_single"
)
def test_all_to_all_single_unequal_split_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single"
)
@skip_if_no_gpu
def test_all_to_all_single_unequal_split_full_group_cuda(self):
group, group_id, rank = self._init_full_group_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_single_unequal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
)
@skip_but_pass_in_sandcastle_if(
BACKEND != "mpi", "Only MPI supports all_to_all"
)
def test_all_to_all_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_to_all_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only NCCL supports CUDA all_to_all"
)
@skip_if_rocm_multiprocess
def test_all_to_all_full_group_cuda(self):
group, group_id, rank = self._init_full_group_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU)
# BARRIER
def _test_barrier_helper(
self, group, group_id, rank, cuda=False, rank_to_GPU=None
):
WAIT_TIME = 0.3 # seconds
for dest in group:
expected_time = torch.DoubleTensor(1).fill_(0.0)
if cuda:
expected_time = expected_time.cuda(rank_to_GPU[rank][0])
if dest == rank:
expected_time.fill_(time.time() + WAIT_TIME)
dist.broadcast(expected_time, dest, group_id)
time.sleep(WAIT_TIME + 0.1) # sleep a little bit longer
dist.barrier(group_id)
else:
dist.broadcast(expected_time, dest, group_id)
dist.barrier(group_id)
self.assertGreaterAlmostEqual(
float(time.time()),
float(expected_time[0]),
msg=f"destination rank: {dest:d}, my rank: {rank:d}"
+ " (if you see this failure, please report in #14554)",
)
# Use higher timeout for the instance where the test runs
# against a subgroup and uses a CUDA tensor for expected time.
# The CUDA initialization for the participating processes can
# take long enough for the barrier timeout to trigger on the
# process that doesn't participate in the group.
self._barrier(timeout=20)
@skip_if_no_gpu
@skip_but_pass_in_sandcastle_if(
BACKEND == "mpi", "MPI doesn't supports GPU barrier"
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally"
)
def test_barrier_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)
@skip_if_small_worldsize
@skip_if_no_gpu
@skip_but_pass_in_sandcastle_if(
BACKEND == "mpi", "MPI doesn't supports GPU barrier"
)
def test_barrier_group_cuda(self):
group, group_id, rank = self._init_group_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)
@skip_if_small_worldsize
@skip_if_no_gpu
@skip_but_pass_in_sandcastle_if(
BACKEND == "mpi", "MPI doesn't supports GPU barrier"
)
def test_barrier_full_group_cuda(self):
group, group_id, rank = self._init_full_group_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["cpu barrier"],
f"{BACKEND} does not support CPU barrier",
)
def test_barrier(self):
group, group_id, rank = self._init_global_test()
self._test_barrier_helper(group, group_id, rank)
@skip_if_small_worldsize
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["cpu barrier"],
f"{BACKEND} does not support CPU barrier",
)
def test_barrier_group(self):
group, group_id, rank = self._init_group_test()
self._test_barrier_helper(group, group_id, rank)
@skip_but_pass_in_sandcastle_if(
BACKEND in DistTestCases.skip_collective["cpu barrier"],
f"{BACKEND} does not support CPU barrier",
)
def test_barrier_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_barrier_helper(group, group_id, rank)
def _model_step(self, model):
for param in model.parameters():
if param.grad is not None:
with torch.no_grad():
param += param.grad
param.grad = None
def _model_step_with_zero_grad(self, model):
for param in model.parameters():
if param.grad is not None:
with torch.no_grad():
param += param.grad
param.grad.requires_grad_(False)
param.grad.zero_()
def _prepare_dummy_data(self, local_bs):
# global_bs for DDP should be divisible by WORLD_SIZE
world_size = int(os.environ["WORLD_SIZE"])
global_bs = world_size * local_bs
input_cpu = torch.randn(global_bs, 2)
target = torch.randn(global_bs, 4)
loss = nn.MSELoss()
return global_bs, input_cpu, target, loss
# END TO END TEST FOR DISTRIBUTEDDATAPARALLEL
def _test_DDP_helper(
self, model, input_var, target, loss, scale_factor=1.0, memory_format=None
):
model.train()
output = model(input_var)
l = loss(output, target) * scale_factor
l.backward()
if memory_format is not None:
self.assertTrue(output.is_contiguous(memory_format=memory_format))
def _assert_equal_param(self, param_gpu, param_DDP):
self.assertEqual(len(param_gpu), len(param_DDP))
for p_gpu, p_DDP in zip(param_gpu, param_DDP, strict=True):
self.assertEqual(p_gpu, p_DDP)
def _test_DDP_niter(
self,
model_base,
model_DDP,
input,
target,
loss,
local_bs,
rank,
batch_size,
test_save,
offset=None,
world_size=0,
zero_grad=False,
memory_format=None,
n_iter=5,
):
for idx in range(n_iter):
# single cpu/gpu training
self._test_DDP_helper(
model_base, input, target, loss, memory_format=memory_format
)
if offset is None:
offset = rank * local_bs
# DDP training, DDP scatters subsets of input_cpu to nodes/GPUs
self._test_DDP_helper(
model_DDP,
input[offset : offset + local_bs],
target[offset : offset + local_bs],
loss,
world_size * local_bs / batch_size if world_size != 0 else 1,
memory_format=memory_format,
)
# Update weights and run a second iteration to shake out errors
if zero_grad:
self._model_step_with_zero_grad(model_base)
self._model_step_with_zero_grad(model_DDP)
else:
self._model_step(model_base)
self._model_step(model_DDP)
self._assert_equal_param(
list(model_base.parameters()), list(model_DDP.module.parameters())
)
# Shuffle the input so that DDP input is different
input = input[torch.randperm(batch_size)]
# save the model in the middle and reload
if test_save and idx == 2 and INIT_METHOD.startswith("file://"):
with tempfile.NamedTemporaryFile() as tmp:
if sys.platform == "win32":
torch.save(model_DDP, tmp)
tmp.seek(0)
# weights_only=False as this is legacy code that saves the model
model_DDP = torch.load(tmp, weights_only=False)
else:
torch.save(model_DDP, tmp.name)
# weights_only=False as this is legacy code that saves the model
model_DDP = torch.load(tmp.name, weights_only=False)
with tempfile.TemporaryFile() as tmp_file:
torch.save(model_DDP, tmp_file)
tmp_file.seek(0)
# weights_only=False as this is legacy code that saves the model
saved_model = torch.load(tmp_file, weights_only=False)
for k in model_DDP.state_dict():
self.assertEqual(model_DDP.state_dict()[k], saved_model.state_dict()[k])
def _test_DistributedDataParallel(
self,
gpu_subset,
rank,
output_device=None,
gradient_as_bucket_view=False,
static_graph=False,
set_static_graph_twice=False,
):
# Run a simple end to end DDP model, use result of single node model
# as baseline
# cpu training setup
model = Net()
# single gpu training setup
model_gpu = copy.deepcopy(model)
model_gpu.cuda(gpu_subset[0])
# DDP training setup
model_DDP = copy.deepcopy(model)
model_DDP.cuda(gpu_subset[0])
model_DDP = nn.parallel.DistributedDataParallel(
model_DDP,
device_ids=gpu_subset,
gradient_as_bucket_view=gradient_as_bucket_view,
static_graph=static_graph,
)
if set_static_graph_twice:
model_DDP._set_static_graph()
# test serializable/unserializable
with tempfile.NamedTemporaryFile() as tmp:
if sys.platform == "win32":
torch.save(model_DDP, tmp)
tmp.seek(0)
# weights_only=False as this is legacy code that saves the model
model_DDP = torch.load(tmp, weights_only=False)
else:
torch.save(model_DDP, tmp.name)
# weights_only=False as this is legacy code that saves the model
model_DDP = torch.load(tmp.name, weights_only=False)
# dummy data initialization
local_bs = len(gpu_subset)
global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs)
# check two model parameters over 5 iterations
self._test_DDP_niter(
model_gpu,
model_DDP,
input_cpu.cuda(gpu_subset[0]),
target.cuda(gpu_subset[0]),
loss,
local_bs,
rank,
global_bs,
True,
)
self._barrier()
def _test_DistributedDataParallelCPU(self, gradient_as_bucket_view=False):
# Run a simple end to end DDP-CPU model, use result of single node
# model as baseline
_group, _group_id, rank = self._init_global_test()
# cpu training setup
model_base = Net()
# DDP-CPU training setup
model_DDP = copy.deepcopy(model_base)
model_DDP = nn.parallel.DistributedDataParallel(
model_DDP, gradient_as_bucket_view=gradient_as_bucket_view
)
# dummy data initialization
local_bs = 2
global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs)
# check two model parameters over 5 iterations
self._test_DDP_niter(
model_base,
model_DDP,
input_cpu,
target,
loss,
local_bs,
rank,
global_bs,
False,
zero_grad=True,
)
self._barrier()
return model_DDP
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "nccl does not support DDP on CPU models"
)
def test_DistributedDataParallelCPU(self):
self._test_DistributedDataParallelCPU()
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "nccl does not support DDP on CPU models"
)
def test_DistributedDataParallelCPU_grad_is_view(self):
self._test_DistributedDataParallelCPU(gradient_as_bucket_view=True)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_DistributedDataParallel_requires_grad(self):
# a module without gradients shouldn't be accepted
self.assertRaises(
RuntimeError, lambda: nn.parallel.DistributedDataParallel(nn.Module())
)
self._barrier()
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_ddp_zero_output_features(self):
class ToyModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.net1 = nn.Linear(10, 10)
self.relu = nn.ReLU()
self.net2 = nn.Linear(10, 0)
model = ToyModel().to(self.rank)
nn.parallel.DistributedDataParallel(model, device_ids=[self.rank])
@skip_but_pass_in_sandcastle_if(BACKEND == "nccl", "Gloo-only test")
def test_ddp_create_graph(self):
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.p = nn.Parameter(torch.tensor(1.0))
def forward(self):
return self.p.pow(2)
model = Model()
ddp_model = torch.nn.parallel.DistributedDataParallel(model)
for _ in range(6):
# Verify DDP doesn't throw when ran with create_graph=True.
# Although we do warn about potential issues, please see
# https://github.com/pytorch/pytorch/issues/63929 for details.
ddp_model().backward(create_graph=True)
# grad tensors should require grad.
self.assertTrue(
all(param.requires_grad for param in ddp_model.parameters())
)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_DistributedDataParallel_non_default_stream(self):
stream = torch.cuda.Stream(self.rank)
rank = self.rank
with torch.cuda.stream(stream):
net = torch.nn.parallel.DistributedDataParallel(
torch.nn.Linear(1, 1, bias=False).cuda(rank), device_ids=[rank]
)
for i in range(1000):
# Clear gradients manually
grad = net.module.weight.grad
if grad is not None:
grad.requires_grad_(False)
grad.zero_()
# Forward + BW
batch = torch.tensor([rank]).float().cuda(rank)
loss = net(batch).sum()
loss.backward()
# For each worker, the gradient on the weight should be worker_rank.
grad = net.module.weight.grad
avg = grad.clone()
# All-reducing the gradient averages should give us the gradient
# average. If not, then one of the workers has not correctly
# written back the averaged gradient before this all-reduce call.
dist.all_reduce(avg)
world_size = int(os.environ["WORLD_SIZE"])
avg.div_(world_size)
expected_grad = sum(i for i in range(world_size)) / world_size
self.assertEqual(
avg[0, 0],
expected_grad,
msg=f"Expected gradient of {expected_grad} but got {avg} on rank {self.rank}",
)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["cuda"],
f"The {BACKEND} backend does not support DDP communication hook on CUDA devices",
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_ddp_comm_hook_logging(self):
hooks = [
default.allreduce_hook,
default.fp16_compress_hook,
powerSGD.powerSGD_hook,
powerSGD.batched_powerSGD_hook,
quantization_hooks.quantization_pertensor_hook,
quantization_hooks.quantization_perchannel_hook,
]
cpp_builtin_hooks = [
dist.BuiltinCommHookType.ALLREDUCE,
dist.BuiltinCommHookType.FP16_COMPRESS,
]
for hook in hooks:
ddp_model = torch.nn.parallel.DistributedDataParallel(
torch.nn.Linear(1, 1, bias=False).cuda(self.rank),
device_ids=[self.rank],
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
# Hook not registered yet, so should be empty
self.assertEqual(ddp_logging_data.get("comm_hook"), None)
ddp_model.register_comm_hook(None, hook)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertEqual(ddp_logging_data.get("comm_hook"), hook.__qualname__)
for hook in cpp_builtin_hooks:
ddp_model = torch.nn.parallel.DistributedDataParallel(
torch.nn.Linear(1, 1, bias=False).cuda(self.rank),
device_ids=[self.rank],
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
# Hook not registered yet, so should be empty
self.assertEqual(ddp_logging_data.get("comm_hook"), None)
ddp_model._register_builtin_comm_hook(hook)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertEqual(ddp_logging_data.get("comm_hook"), str(hook))
# No hook registered
ddp_model = torch.nn.parallel.DistributedDataParallel(
torch.nn.Linear(1, 1, bias=False).cuda(self.rank),
device_ids=[self.rank],
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
# Hook not registered yet, so should be empty
self.assertEqual(ddp_logging_data.get("comm_hook"), None)
# After second forward pass, hook should still be empty string
for _ in range(2):
inp = torch.ones(1, 1, device=self.rank)
loss = ddp_model(inp).sum()
loss.backward()
ddp_logging_data = ddp_model._get_ddp_logging_data()
# Note: DETAIL debug mode logs DDP logging data to stdout and
# thus accesses std::map, which fills in a default value for the
# type if it didn't exist.
self.assertEqual(ddp_logging_data.get("comm_hook", ""), "")
def _test_ddp_hook_with_optimizer_parity(
self,
grad_as_bucket_view,
static_graph,
optim_cls,
optimize_subset,
*functional_optim_args,
**functional_optim_kwargs,
):
rank = self.rank
torch.cuda.set_device(rank)
torch.manual_seed(rank)
torch.cuda.manual_seed(rank)
models_to_test = [
(LargeNet(), torch.randn(1, 1000).cuda()),
]
if HAS_TORCHVISION:
models_to_test.append(
(torchvision.models.resnet50(), torch.randn(1, 3, 3, 1000).cuda())
)
for model, inp in models_to_test:
# Enable determinism in cudnn operators
with torch.backends.cudnn.flags(
enabled=True, deterministic=True, benchmark=False
):
# Create DDP model that runs optimizer in fused fashion.
ddp_model_with_optimizer_hook = (
torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(model).cuda(),
device_ids=[self.rank],
gradient_as_bucket_view=grad_as_bucket_view,
static_graph=static_graph,
)
)
# Create DDP model with no hook that does optimizer after
# backward.
ddp_model_with_no_hook = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(model).cuda(),
device_ids=[self.rank],
gradient_as_bucket_view=grad_as_bucket_view,
static_graph=static_graph,
)
hook_params = ddp_model_with_optimizer_hook.parameters()
no_hook_params = ddp_model_with_no_hook.parameters()
if optimize_subset:
hook_params = list(hook_params)
no_hook_params = list(no_hook_params)
self.assertGreater(len(hook_params), 0)
hook_params = [hook_params[0]]
no_hook_params = [no_hook_params[0]]
# Register a fused optimizer that will run optimizer in step
# with allreduce.
if optimize_subset:
# API where optim_params is specified.
ddp_model_with_optimizer_hook._register_fused_optim(
optim_cls,
*functional_optim_args,
optim_params=hook_params,
**functional_optim_kwargs,
)
else:
# API where optim_params is omitted
ddp_model_with_optimizer_hook._register_fused_optim(
optim_cls,
*functional_optim_args,
**functional_optim_kwargs,
)
optimizer_no_hook = optim_cls(
no_hook_params,
*functional_optim_args,
**functional_optim_kwargs,
)
# Verify parameters are equal initially.
for hook_param, allreduce_param in zip(
ddp_model_with_optimizer_hook.parameters(),
ddp_model_with_no_hook.parameters(),
strict=True,
):
self.assertEqual(hook_param, allreduce_param)
# Save old parameters to later verify optimizer modified them.
opt_hook_init_params = copy.deepcopy(
list(ddp_model_with_optimizer_hook.parameters())
)
# Run optimizer with hook model.
for _ in range(6):
ddp_model_with_optimizer_hook.zero_grad()
out = ddp_model_with_optimizer_hook(inp)
loss = out.sum()
loss.backward()
dist.barrier()
# Run regular model.
for _ in range(6):
ddp_model_with_no_hook.zero_grad()
out = ddp_model_with_no_hook(inp)
loss = out.sum()
loss.backward()
optimizer_no_hook.step()
dist.barrier()
# Now verify parameters are equal.
for hook_param, allreduce_param in zip(
ddp_model_with_optimizer_hook.parameters(),
ddp_model_with_no_hook.parameters(),
strict=True,
):
self.assertEqual(hook_param, allreduce_param)
# Verify optimizer modified appropriate parameter set,
# otherwise they'd be trivially equal above.
if optimize_subset:
self.assertNotEqual(
opt_hook_init_params[0],
next(iter(ddp_model_with_optimizer_hook.parameters())),
)
# Untouched params should be equal
self.assertEqual(
opt_hook_init_params[1:],
list(ddp_model_with_optimizer_hook.parameters())[1:],
)
else:
self.assertNotEqual(
opt_hook_init_params,
list(ddp_model_with_optimizer_hook.parameters()),
)
dist.barrier()
"""
# Commenting out the following 3 tests as they cause Sandcastle jobs to fail
# Failure signature:
# AttributeError: type object 'TestDistBackendWithSpawn' has no attribute 'test_ddp_hook_with_optimizer_parity_adamw
from torch.testing._internal.common_utils import parametrize
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl" or BACKEND == "ucc",
"Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259",
)
@skip_if_lt_x_gpu(2)
@parametrize("grad_as_bucket_view", [True, False])
@parametrize("static_graph", [True, False])
@parametrize("optimize_subset", [True, False])
def test_ddp_hook_with_optimizer_parity_adamw(
self,
grad_as_bucket_view,
static_graph,
optimize_subset,
):
adamw_lr = 1e-2
adamw_betas = (0.9, 0.99)
adamw_eps = 1e-6
self._test_ddp_hook_with_optimizer_parity(
grad_as_bucket_view,
static_graph,
torch.optim.AdamW,
optimize_subset,
adamw_lr,
betas=adamw_betas,
eps=adamw_eps,
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl" or BACKEND == "ucc",
"Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259",
)
@skip_if_lt_x_gpu(2)
@parametrize("optimize_subset", [True, False])
def test_ddp_hook_with_optimizer_parity_adam(self, optimize_subset):
adam_lr = 1e-2
adam_betas = (0.9, 0.99)
adam_eps = 1e-6
self._test_ddp_hook_with_optimizer_parity(
True, # grad as bucket view
False, # static graph
torch.optim.Adam,
optimize_subset,
adam_lr,
betas=adam_betas,
eps=adam_eps,
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl" or BACKEND == "ucc",
"Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259",
)
@skip_if_lt_x_gpu(2)
@parametrize("optimize_subset", [True, False])
def test_ddp_hook_with_optimizer_parity_sgd(self, optimize_subset):
sgd_lr = 1e-2
sgd_momentum = 0.9
sgd_weight_decay = 0.01
# Not testing grad_as_bucket_view and static_graph as they are
# tested in AdamW test above.
self._test_ddp_hook_with_optimizer_parity(
True, # grad as bucket view
False, # static_graph
torch.optim.SGD,
optimize_subset,
sgd_lr,
momentum=sgd_momentum,
weight_decay=sgd_weight_decay,
)
"""
@skip_if_lt_x_gpu(2)
def test_get_data_parallel_params(self):
torch.cuda.set_device(self.rank)
model = TwoLinLayerNet().cuda()
# Parameters to ignore are in the format {module_name}.{param_name}
params_to_ignore = ["a.weight"]
torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(
model, params_to_ignore
)
torch.nn.parallel.DistributedDataParallel(model, device_ids=[self.rank])
dp_params = (
torch.nn.parallel.DistributedDataParallel._get_data_parallel_params(
model, named_params=True
)
)
for name, _ in dp_params:
self.assertNotEqual(f"module.{params_to_ignore[0]}", name)
# test named_params=False, just check if returns the expected
# no of parameters.
num_ddp_params = len(list(model.parameters())) - 1
count = 0
dp_params = (
torch.nn.parallel.DistributedDataParallel._get_data_parallel_params(
model, named_params=False
)
)
for _ in dp_params:
count += 1
self.assertEqual(count, num_ddp_params)
def _test_ddp_apply_optim_in_backward(
self,
optim_cls,
optim_kwargs,
init_before,
gradient_as_bucket_view=True,
):
# Need to seed to ensure inputs are unique across rank. Otherwise,
# allreduce won't have any effect.
torch.manual_seed(self.rank)
torch.cuda.manual_seed(self.rank)
torch.cuda.set_device(self.rank)
# Test a simple linear as well as a ResNet model.
models_to_test = [
nn.Sequential(nn.Linear(3, 3), nn.Linear(3, 3), nn.Linear(3, 3)).cuda()
]
if HAS_TORCHVISION:
models_to_test.append(torchvision.models.resnet50().cuda())
for j, model in enumerate(models_to_test):
model_optim_in_bwd = copy.deepcopy(model)
model = nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
gradient_as_bucket_view=gradient_as_bucket_view,
)
optim = optim_cls(model.parameters(), **optim_kwargs)
if init_before:
_apply_optimizer_in_backward(
optimizer_class=optim_cls,
params=model_optim_in_bwd.parameters(),
optimizer_kwargs=optim_kwargs,
)
model_optim_in_bwd = nn.parallel.DistributedDataParallel(
model_optim_in_bwd,
device_ids=[self.rank],
gradient_as_bucket_view=gradient_as_bucket_view,
)
if not init_before:
_apply_optimizer_in_backward(
optimizer_class=optim_cls,
params=model_optim_in_bwd.parameters(),
optimizer_kwargs=optim_kwargs,
)
for p1, p2 in zip(
model.parameters(), model_optim_in_bwd.parameters(), strict=True
):
self.assertEqual(p1, p2, "Parameters not initially equal!")
# Enable determinism in cudnn operators
with torch.backends.cudnn.flags(
enabled=True, deterministic=True, benchmark=False
):
for i in range(8):
inp = (
torch.randn(1, 3, 1000, 1000, device="cuda")
if j == 1
else torch.randn(10, 3, device="cuda")
)
model(inp).sum().backward()
optim.step()
model_optim_in_bwd(
inp
).sum().backward() # runs optimizer as well
for p1, p2 in zip(
model.parameters(),
model_optim_in_bwd.parameters(),
strict=True,
):
self.assertEqual(
p1, p2, f"Params not equal at iteration {i}"
)
self.assertTrue(
p2.grad is None,
f"Optim in backward grad is not None at {i}",
)
# set_to_none for regular optimizer to match in backward
# case.
optim.zero_grad(set_to_none=True)
@skipIfRocm
@skip_if_lt_x_gpu(2)
def test_ddp_apply_optim_in_backward(self):
for optim_cls, init_before in itertools.product(
[torch.optim.SGD, torch.optim.Adam], [True, False]
):
with self.subTest(optim_cls=optim_cls):
self._test_ddp_apply_optim_in_backward(
optim_cls=optim_cls,
optim_kwargs={"lr": 0.03},
init_before=init_before,
)
@skipIfRocm
@skip_if_lt_x_gpu(2)
def test_ddp_apply_optim_in_backward_grad_as_bucket_view_false(self):
for init_before in [True, False]:
self._test_ddp_apply_optim_in_backward(
optim_cls=torch.optim.SGD,
optim_kwargs={"lr": 0.03},
init_before=init_before,
gradient_as_bucket_view=False,
)
@skipIfRocm
@skip_if_lt_x_gpu(2)
def test_ddp_apply_optim_in_backward_ignored_params(self):
torch.cuda.set_device(self.rank)
for init_before in [True, False]:
with self.subTest(init_before=init_before):
torch.manual_seed(self.rank)
torch.cuda.manual_seed(self.rank)
model = TwoLinLayerNet()
# Parameters to ignore are in the format {module_name}.{param_name}
params_to_ignore = ["a.weight"]
torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(
model, params_to_ignore
)
if init_before:
_apply_optimizer_in_backward(
optimizer_class=torch.optim.SGD,
params=model.parameters(),
optimizer_kwargs={"lr": 0.03},
)
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(self.rank),
device_ids=[self.rank],
)
if not init_before:
_apply_optimizer_in_backward(
optimizer_class=torch.optim.SGD,
params=model.parameters(),
optimizer_kwargs={"lr": 0.03},
)
inp = torch.randn(1, 10)
a, b = net(inp)
(a.transpose(0, 1) @ b).sum().backward()
# a.weight did not go through allreduce, so optimizer acted on local
# gradient, which should be different across ranks. Remaining params
# should be equal.
models = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(models, model)
rank0_model, remainder = models[0], models[1:]
for m in remainder:
self.assertNotEqual(rank0_model.a.weight, m.a.weight)
self.assertEqual(
list(rank0_model.b.parameters()), list(m.b.parameters())
)
self.assertEqual(rank0_model.a.bias, m.a.bias)
def _get_fp16_config(self) -> _MixedPrecision:
return _MixedPrecision(
param_dtype=torch.float16,
reduce_dtype=torch.float16,
buffer_dtype=torch.float16,
)
@skip_if_lt_x_gpu(2)
def test_ddp_native_mixed_precision_ignored_params(self):
rank = self.rank
torch.manual_seed(rank)
torch.cuda.manual_seed(rank)
torch.cuda.set_device(rank)
model = TwoLinLayerNet()
model.register_buffer("buffer", torch.ones(5))
# Parameters to ignore are in the format {module_name}.{param_name}
to_ignore = ["a.weight", "buffer"]
torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(
model,
to_ignore,
)
mp_config = self._get_fp16_config()
net = torch.nn.parallel.DistributedDataParallel(
model.to(rank),
device_ids=[rank],
mixed_precision=mp_config,
gradient_as_bucket_view=True,
)
to_ignore = [f"module.{name}" for name in to_ignore]
expected_ignored = len(to_ignore)
n_ignored = 0
# ignored params should not have _mp_param or _fp_param fields.
for n, p in itertools.chain(net.named_parameters(), net.named_buffers()):
if n in to_ignore:
n_ignored += 1
self.assertFalse(hasattr(p, "_mp_param"))
self.assertFalse(hasattr(p, "_fp_param"))
else:
self.assertEqual(mp_config.param_dtype, p._mp_param.dtype)
self.assertEqual(torch.float32, p._fp_param.dtype)
self.assertEqual(expected_ignored, n_ignored)
def _test_ddp_native_mixed_precision(
self, gradient_as_bucket_view, set_grad_to_none
):
rank = self.rank
torch.manual_seed(rank)
torch.cuda.manual_seed(rank)
torch.cuda.set_device(rank)
inp = torch.randn(10, 1)
mp_config = self._get_fp16_config()
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.m = torch.nn.Linear(1, 5)
self.register_buffer("buffer", torch.randn(1, 2))
self.p = torch.nn.Parameter(torch.randn(10, 5), requires_grad=False)
def forward(self_, x): # noqa: B902
params = self_.m.parameters()
for p in params:
self.assertEqual(mp_config.param_dtype, p.dtype)
self.assertEqual(self_.buffer.dtype, mp_config.buffer_dtype)
self.assertEqual(mp_config.param_dtype, x.dtype)
return self_.m(x) + self_.p
m = MyModel()
net = torch.nn.parallel.DistributedDataParallel(
m.to(rank),
device_ids=[rank],
mixed_precision=mp_config,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Buffers are casted in constructor.
self.assertEqual(net.module.buffer.dtype, mp_config.buffer_dtype)
# Each param should have an mp_param in the lower precision, and
# an fp_param in the higher precision.
for p in net.parameters():
self.assertEqual(mp_config.param_dtype, p._mp_param.dtype)
self.assertEqual(torch.float32, p._fp_param.dtype)
for _ in range(6):
loss = net(inp).sum()
loss.backward()
# Verify gradient synchronization and params and grads are fp32.
for n, param in net.named_parameters():
self.assertEqual(param.dtype, torch.float32)
if param.grad is None:
assert n == "module.p" # Only param that doesn't require grad
else:
self.assertEqual(param.grad.dtype, torch.float32)
tensor_list = [
torch.zeros_like(param.grad)
for _ in range(dist.get_world_size(net.process_group))
]
dist.all_gather(tensor_list, param.grad)
g, rest = tensor_list[0], tensor_list[1:]
self.assertEqual(g.dtype, torch.float32)
for g_ in rest:
self.assertEqual(g_.dtype, torch.float32)
self.assertEqual(g, g_)
net.zero_grad(set_to_none=set_grad_to_none)
@skip_if_lt_x_gpu(2)
def test_ddp_native_mixed_precision_no_grad_as_bucket_view_no_set_grad_none(
self,
):
self._test_ddp_native_mixed_precision(
gradient_as_bucket_view=False,
set_grad_to_none=False,
)
@skip_if_lt_x_gpu(2)
def test_ddp_native_mixed_precision_grad_as_bucket_view_no_set_grad_none(self):
self._test_ddp_native_mixed_precision(
gradient_as_bucket_view=True,
set_grad_to_none=False,
)
@skip_if_lt_x_gpu(2)
def test_ddp_native_mixed_precision_grad_as_bucket_view_set_grad_to_none(self):
self._test_ddp_native_mixed_precision(
gradient_as_bucket_view=True, set_grad_to_none=True
)
@skip_if_lt_x_gpu(2)
def test_ddp_native_mixed_precision_no_grad_as_bucket_view_set_grad_to_none(
self,
):
self._test_ddp_native_mixed_precision(
gradient_as_bucket_view=True, set_grad_to_none=True
)
def _test_ddp_hook_parity(self, state, hook, num_validated_iters=100):
rank = self.rank
m = torch.nn.Linear(1, 5)
try:
process_group = state.process_group
except AttributeError:
process_group = state
net_with_hook = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(m).to(rank),
device_ids=[rank],
process_group=process_group,
)
net_with_hook.register_comm_hook(state=state, hook=hook)
net_without_hook = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(m).to(rank),
device_ids=[rank],
process_group=process_group,
)
for i in range(100):
# Clear gradients manually.
for g in [
net_without_hook.module.weight.grad,
net_with_hook.module.weight.grad,
]:
if g is not None:
g.requires_grad_(False)
g.zero_()
# Forward + BW
batch = torch.tensor([rank]).float().cuda(rank)
loss = net_without_hook(batch).sum()
loss.backward()
# For each worker, the gradient on the weight should be worker_rank.
grad = net_without_hook.module.weight.grad
avg = grad.clone()
expected_grad = (
sum(i for i in range(dist.get_world_size())) / dist.get_world_size()
)
loss_hook = net_with_hook(batch).sum()
loss_hook.backward()
grad_hook = net_with_hook.module.weight.grad
avg_hook = grad_hook.clone()
if i < num_validated_iters:
# Verify hook grad with expected.
self.assertEqual(
avg_hook[0, 0].item(),
expected_grad,
msg=f"Expected hook grad of {expected_grad} but got {avg_hook[0, 0]}",
)
# Verify hook grad with vanilla allreduce
self.assertEqual(
avg_hook[0, 0],
avg[0, 0],
msg=f"Expected hook grad to be close to allreduce {avg[0, 0]}, but got {avg_hook[0, 0]}",
)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["cuda"],
f"The {BACKEND} backend does not support DDP communication hook on CUDA devices",
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_ddp_hook_parity_allreduce(self):
self._test_ddp_hook_parity(state=None, hook=default.allreduce_hook)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["cuda"],
f"The {BACKEND} backend does not support DDP communication hook on CUDA devices",
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_ddp_hook_parity_allreduce_process_group(self):
# process_group is passed in to both DDP and comm. hook
world_size = dist.get_world_size()
rank_to_GPU = init_multigpu_helper(world_size, BACKEND)
gpus = [rank_to_GPU[int(r)][0] for r in range(world_size)]
process_group = torch.distributed.new_group(gpus)
self._test_ddp_hook_parity(state=process_group, hook=default.allreduce_hook)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["cuda"],
f"The {BACKEND} backend does not support DDP communication hook on CUDA devices",
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_ddp_hook_parity_powerSGD(self):
for warm_start in [True, False]:
powersgd_state = powerSGD.PowerSGDState(
process_group=None,
matrix_approximation_rank=1,
start_powerSGD_iter=2,
warm_start=warm_start,
)
self._test_ddp_hook_parity(
state=powersgd_state, hook=powerSGD.powerSGD_hook
)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["cuda"],
f"The {BACKEND} backend does not support DDP communication hook on CUDA devices",
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_ddp_hook_parity_post_localSGD(self):
# Although we start run local SGD at iteration 10, since we still use the global process group to run it,
# the post-LocalSGD actually still allreduces gradients globally for the remaining iterations.
state = post_localSGD.PostLocalSGDState(
process_group=None, subgroup=dist.group.WORLD, start_localSGD_iter=10
)
self._test_ddp_hook_parity(
state=state, hook=post_localSGD.post_localSGD_hook
)
# Only validate the warmup iterations before local SGD is applied,
# because when `post_local_gradient_allreduce` is disabled, the gradients will not be synchronized at all.
# Note that in practice a model averager has to be applied to run model averaging,
# so local gradient averaging is not necessary.
start_localSGD_iter = 10
state = post_localSGD.PostLocalSGDState(
process_group=None,
subgroup=dist.group.WORLD,
start_localSGD_iter=start_localSGD_iter,
post_local_gradient_allreduce=False,
)
self._test_ddp_hook_parity(
state=state,
hook=post_localSGD.post_localSGD_hook,
num_validated_iters=start_localSGD_iter,
)
# When `subgroup` is None, it is equivalent to the subgroup on the each node.
# For this single-node test environment, the intra-node process group is equivalent to
# the global process group.
if self.world_size == dist.get_world_size():
state = post_localSGD.PostLocalSGDState(
process_group=None, subgroup=None, start_localSGD_iter=10
)
self._test_ddp_hook_parity(
state=state, hook=post_localSGD.post_localSGD_hook
)
# Since we start local SGD later than the total number of 100 iterations,
# no local SGD actually is executed, and we don't even need to provide a subgroup for this case.
state = post_localSGD.PostLocalSGDState(
process_group=None, subgroup=None, start_localSGD_iter=1000
)
self._test_ddp_hook_parity(
state=state, hook=post_localSGD.post_localSGD_hook
)
def _prepare_single_device_module(
self,
rank,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
model = Net()
device = devices[0] if devices else torch.device(f"cuda:{rank:d}")
ddp_model = DistributedDataParallel(
copy.deepcopy(model).to(device),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
model.to(device)
input = torch.randn(global_batch_size, 2).to(device)
target = torch.randn(global_batch_size, 4).to(device)
return model, ddp_model, input, target
def _prepare_cpu_module(
self,
process_group,
global_batch_size,
gradient_as_bucket_view=False,
):
model = Net()
ddp_model = DistributedDataParallel(
copy.deepcopy(model),
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
input = torch.randn(global_batch_size, 2)
target = torch.randn(global_batch_size, 4)
return model, ddp_model, input, target
def _test_accumulate_gradients_no_sync(
self, num_iters=2, ddp_comm_hook=None, gradient_as_bucket_view=False
):
"""
This is the recommended way to implement accumulate grads.
If ``ddp_comm_hook`` input was specified, it will also register that hook
to the ``ddp_model``. The hook fed into this function should not change
the resulting gradients.
"""
_group, group_id, rank = self._init_global_test()
world_size = get_world_size()
# FIXME: Add testing for gloo/CUDA
if BACKEND == "mpi" or BACKEND == "gloo":
global_batch_size = world_size
local_batch_size = 1
model, ddp_model, input, target = self._prepare_cpu_module(
group_id, global_batch_size, gradient_as_bucket_view
)
if BACKEND == "nccl":
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
int_devices = rank_to_GPU[rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
global_batch_size = world_size
local_batch_size = len(devices)
model, ddp_model, input, target = self._prepare_single_device_module(
rank,
group_id,
devices,
devices,
global_batch_size,
gradient_as_bucket_view,
)
if ddp_comm_hook is not None:
ddp_model.register_comm_hook(group_id, ddp_comm_hook)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
# ensure accumulate grads works with no_grad => no grads are accumulated.
with torch.no_grad():
with ddp_model.no_sync():
ddp_model.train()
ddp_model(input)
# check two model parameters over num_iters iterations
for iteration in range(num_iters):
step_model(model, input, target)
ddp_input = input[
rank * local_batch_size : (rank + 1) * local_batch_size
]
ddp_target = target[
rank * local_batch_size : (rank + 1) * local_batch_size
]
if iteration % 2 == 0:
# accumulate grads locally
with ddp_model.no_sync():
step_model(ddp_model, ddp_input, ddp_target)
else:
# sync grads
step_model(ddp_model, ddp_input, ddp_target)
for i, j in zip(
model.parameters(), ddp_model.parameters(), strict=True
):
if not i.requires_grad:
continue
if iteration % 2 == 0:
self.assertNotEqual(i.grad, j.grad)
else:
self.assertEqual(i.grad, j.grad)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
@skip_but_pass_in_sandcastle_if(
BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo",
"get_future is only supported on mpi, nccl and gloo",
)
@nccl_skip_if_lt_x_gpu(BACKEND, 2)
def test_accumulate_gradients_no_sync(self):
"""
Runs _test_accumulate_gradients_no_sync using default inputs
"""
self._test_accumulate_gradients_no_sync()
@skip_but_pass_in_sandcastle_if(
BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo",
"get_future is only supported on mpi, nccl and gloo",
)
@nccl_skip_if_lt_x_gpu(BACKEND, 2)
def test_accumulate_gradients_no_sync_grad_is_view(self):
"""
Runs _test_accumulate_gradients_no_sync using default inputs
"""
self._test_accumulate_gradients_no_sync(gradient_as_bucket_view=True)
@skip_but_pass_in_sandcastle_if(
BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo",
"get_future is only supported on mpi, nccl and gloo",
)
@nccl_skip_if_lt_x_gpu(BACKEND, 2)
def test_accumulate_gradients_no_sync_allreduce_hook(self):
"""
Runs multiple iterations on _test_accumulate_gradients_no_sync
using allreduce hook and validates whether future result was properly
passed as gradients in reducer.
"""
world_size = get_world_size()
def allreduce_hook(
group_id: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
tensors = [bucket.buffer() / world_size]
return (
group_id.allreduce(tensors)
.get_future()
.then(lambda fut: fut.value()[0])
)
self._test_accumulate_gradients_no_sync(
num_iters=4, ddp_comm_hook=allreduce_hook
)
@skip_but_pass_in_sandcastle_if(
BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo",
"get_future is only supported on mpi, nccl and gloo",
)
@nccl_skip_if_lt_x_gpu(BACKEND, 2)
def test_accumulate_gradients_no_sync_allreduce_with_then_hook(self):
"""
Runs multiple iterations on _test_accumulate_gradients_no_sync using allreduce
hook that also uses then callbacks. In first then callback result is multiplied
by 2, and the second callback divides the result by 2 * world_size. It validates
whether final result was properly passed as gradients in reducer.
"""
world_size = get_world_size()
def allreduce_with_then_hook(
group_id: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
fut = group_id.allreduce([bucket.buffer()]).get_future()
def mult(fut):
# Multiply the result by 2.
return 2 * fut.wait()[0]
def div(fut):
# Divide the result by 2 * world_size.
return fut.wait() / (2 * world_size)
return fut.then(mult).then(div)
self._test_accumulate_gradients_no_sync(
num_iters=4, ddp_comm_hook=allreduce_with_then_hook
)
@skip_but_pass_in_sandcastle_if(
BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo",
"get_future is only supported on mpi, nccl and gloo",
)
@nccl_skip_if_lt_x_gpu(BACKEND, 2)
def test_get_future(self):
def mult(fut):
return [t * 3 for t in fut.wait()]
def add(fut):
return [t + 1 for t in fut.wait()]
group, group_id, rank = self._init_global_test()
input = _build_tensor(3, 2)
if BACKEND == "nccl":
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
input = input.to(device_id)
fut = group_id.allreduce([input]).get_future()
res = fut.then(mult).then(add).wait()
expected = _build_tensor(3, 2 * len(group) * 3 + 1)
self.assertEqual(res[0], expected)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@skip_if_no_gpu
def test_DistributedDataParallel(self):
_group, _group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
gpus = list(rank_to_GPU[rank])
for use_bucket_view, static_graph in itertools.product(
(False, True), (False, True)
):
self._test_DistributedDataParallel(
gpu_subset=gpus,
rank=rank,
gradient_as_bucket_view=use_bucket_view,
static_graph=static_graph,
)
# test set static graph twice
self._test_DistributedDataParallel(
gpu_subset=gpus,
rank=rank,
gradient_as_bucket_view=use_bucket_view,
static_graph=static_graph,
set_static_graph_twice=True,
)
# test output_device
self._test_DistributedDataParallel(
gpu_subset=gpus,
rank=rank,
output_device=torch.device("cuda"),
gradient_as_bucket_view=use_bucket_view,
static_graph=static_graph,
)
# test device_ids
gpus_list = [torch.device("cuda:" + str(i)) for i in gpus]
self._test_DistributedDataParallel(
gpu_subset=gpus_list,
rank=rank,
output_device=torch.device("cuda"),
gradient_as_bucket_view=use_bucket_view,
static_graph=static_graph,
)
def _test_DistributedDataParallel_with_amp(self, grad_is_view=False):
torch.manual_seed(31415)
# Creates model and optimizer in default precision
model = Net().cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=0.03)
# Creates a GradScaler once at the beginning of training.
scaler = GradScaler()
ddp_model = nn.parallel.DistributedDataParallel(
model, device_ids=[self.rank], gradient_as_bucket_view=grad_is_view
)
input = torch.randn(dist.get_world_size() * 2, 2).cuda()
target = torch.randn(dist.get_world_size() * 2, 4).cuda()
loss_fn = nn.MSELoss()
# verify grads are none before training
for p in ddp_model.parameters():
self.assertTrue(p is not None)
self.assertTrue(p.grad is None)
for idx in range(20):
optimizer.zero_grad()
# Runs the forward pass with autocasting.
with autocast():
output = ddp_model(input)
loss = loss_fn(output, target)
# Scales loss. Calls backward() on scaled loss to create scaled gradients.
# Backward passes under autocast are not recommended.
# Backward ops run in the same dtype autocast chose for corresponding forward ops.
scaler.scale(loss).backward()
# verify grads are not none and are valid during training
for p in ddp_model.parameters():
if p.requires_grad:
self.assertTrue(p.grad is not None)
self.assertFalse(p.grad.isnan().any())
self.assertFalse(p.grad.isinf().any())
# scaler.step() first unscales the gradients of the optimizer's assigned params.
# If these gradients do not contain infs or NaNs, optimizer.step() is then called,
# otherwise, optimizer.step() is skipped.
scaler.step(optimizer)
# Updates the scale for next iteration.
scaler.update()
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + idx)
input = input[torch.randperm(dist.get_world_size() * 2)]
return ddp_model
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@skip_if_no_gpu
def test_DistributedDataParallel_with_amp_and_grad_is_view(self):
torch.cuda.set_device(self.rank)
ddp_model_grad_not_view = self._test_DistributedDataParallel_with_amp(
grad_is_view=False
)
ddp_model_grad_is_view = self._test_DistributedDataParallel_with_amp(
grad_is_view=True
)
for i, j in zip(
ddp_model_grad_not_view.parameters(),
ddp_model_grad_is_view.parameters(),
strict=True,
):
self.assertEqual(i, j)
def _test_DistributedDataParallel_SyncBatchNorm(
self,
gpu_subset,
rank,
local_bs,
global_bs,
offset,
output_device=None,
affine=True,
):
# Run a simple end to end DDP model, use result of single node model
# as baseline
# cpu training setup
model = BatchNormNet() if affine else BatchNormNet(affine=False)
# single gpu training setup
model_gpu = copy.deepcopy(model)
model_gpu.cuda(gpu_subset[0])
# DDP training setup
model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))
model_DDP.cuda(gpu_subset[0])
model_DDP = nn.parallel.DistributedDataParallel(
model_DDP, device_ids=gpu_subset
)
# test serializable/unserializable
with tempfile.NamedTemporaryFile() as tmp:
if sys.platform == "win32":
torch.save(model_DDP, tmp)
tmp.seek(0)
# weights_only=False as this is legacy code that saves the model
model_DDP = torch.load(tmp, weights_only=False)
else:
torch.save(model_DDP, tmp.name)
# weights_only=False as this is legacy code that saves the model
model_DDP = torch.load(tmp.name, weights_only=False)
# data initialization
input_cpu = torch.randn(global_bs, 2)
target = torch.randn(global_bs, 4)
loss = nn.MSELoss()
# check two model parameters over 5 iterations
self._test_DDP_niter(
model_gpu,
model_DDP,
input_cpu.cuda(gpu_subset[0]),
target.cuda(gpu_subset[0]),
loss,
local_bs,
rank,
global_bs,
True,
offset,
dist.get_world_size(),
5 if affine else 2,
)
self._barrier()
def _test_post_localSGD_optimizer_parity(self, create_averager, grad_is_view):
learning_rate = 0.03
DDP_NET = Net()
net = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(DDP_NET).cuda(),
device_ids=[self.rank],
gradient_as_bucket_view=grad_is_view,
)
averager = create_averager()
opt = torch.optim.SGD(net.parameters(), lr=learning_rate)
net_using_post_localSGD_opt = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(DDP_NET).cuda(),
device_ids=[self.rank],
gradient_as_bucket_view=grad_is_view,
)
# Process group cannot be pickled in some environments,
# so cannot deep copy an averager. See:
# https://github.com/pytorch/pytorch/pull/74737#pullrequestreview-922487496
averager2 = create_averager()
post_localSGD_opt = self._create_post_localSGD_optimizer(
net_using_post_localSGD_opt, learning_rate, averager2
)
input = torch.randn(dist.get_world_size() * 2, 2).cuda()
target = torch.randn(dist.get_world_size() * 2, 4).cuda()
loss_fn = nn.MSELoss()
for _ in range(20):
self._perform_a_train_step(opt, net, loss_fn, input, target)
averager.average_parameters(net.parameters())
self._perform_a_train_step(
post_localSGD_opt,
net_using_post_localSGD_opt,
loss_fn,
input,
target,
)
for p1, p2 in zip(
net.parameters(),
net_using_post_localSGD_opt.parameters(),
strict=True,
):
self.assertEqual(p1.data, p2.data)
# Also check if the built-in step counters are the same to prevent a bug like #74737.
self.assertEqual(averager.step, averager2.step)
def _create_periodic_model_averager(self):
return averagers.PeriodicModelAverager(period=4, warmup_steps=10)
def _create_post_localSGD_optimizer(self, net, learning_rate, averager):
return post_localSGD_optimizer.PostLocalSGDOptimizer(
optim=torch.optim.SGD(net.parameters(), lr=learning_rate),
averager=averager,
)
def _perform_a_train_step(self, optimizer, net, loss_fn, input, target):
optimizer.zero_grad()
output = net(input)
loss = loss_fn(output, target)
loss.backward()
optimizer.step()
def _test_post_localSGD_optimizer_step_reload(
self, create_averager, chkpt_file
):
learning_rate = 0.03
net_using_post_localSGD_opt = torch.nn.parallel.DistributedDataParallel(
Net().cuda(), device_ids=[self.rank]
)
averager = create_averager()
post_localSGD_opt = self._create_post_localSGD_optimizer(
net_using_post_localSGD_opt, learning_rate, averager
)
averager2 = create_averager()
dummy_post_localSGD_opt = self._create_post_localSGD_optimizer(
net_using_post_localSGD_opt, learning_rate, averager2
)
input = torch.randn(dist.get_world_size() * 2, 2).cuda()
target = torch.randn(dist.get_world_size() * 2, 4).cuda()
loss_fn = nn.MSELoss()
for _ in range(20):
self._perform_a_train_step(
post_localSGD_opt,
net_using_post_localSGD_opt,
loss_fn,
input,
target,
)
if self.rank == 0:
torch.save(
{"optimizer_state_dict": post_localSGD_opt.state_dict()}, chkpt_file
)
dist.barrier()
map_location = {"cuda:0": f"cuda:{self.rank:d}"}
checkpoint = torch.load(chkpt_file, map_location=map_location)
dummy_post_localSGD_opt.load_state_dict(checkpoint["optimizer_state_dict"])
# Check that we didn't hit the trivial case
self.assertNotEqual(averager2.step, 0)
# Check if dummy averager was initialized to a correct value
self.assertEqual(averager.step, averager2.step)
# Remove 'step' entry from a checkpoint.
# And make sure it is not in the state dictionary
del checkpoint["optimizer_state_dict"]["step"]
self.assertNotIn("step", checkpoint["optimizer_state_dict"])
# Check if checkpoint without a 'step' entry invokes a warning
with self.assertWarnsRegex(
expected_warning=UserWarning,
expected_regex="Loaded state dict does not contain a step counter for an averager. "
"Setting step counter to 0.",
):
dummy_post_localSGD_opt.load_state_dict(
checkpoint["optimizer_state_dict"]
)
self.assertEqual(averager2.step, 0)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_post_localSGD_optimizer_parity(self):
torch.cuda.set_device(self.rank)
self._test_post_localSGD_optimizer_parity(
self._create_periodic_model_averager,
grad_is_view=False,
)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_post_localSGD_optimizer_parity_grad_is_view(self):
torch.cuda.set_device(self.rank)
self._test_post_localSGD_optimizer_parity(
self._create_periodic_model_averager,
grad_is_view=True,
)
def _create_hierarchical_model_averager(self):
period_group_size_dict = OrderedDict([(2, 2), (4, dist.get_world_size())])
return hierarchicalSGD.HierarchicalModelAverager(
period_group_size_dict=period_group_size_dict, warmup_steps=4
)
@skip_if_lt_x_gpu(4)
@skip_if_odd_worldsize
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_post_localSGD_optimizer_parity_with_hierarchical_sgd(self):
torch.cuda.set_device(self.rank)
self._test_post_localSGD_optimizer_parity(
self._create_hierarchical_model_averager,
grad_is_view=False,
)
@skip_if_lt_x_gpu(4)
@skip_if_odd_worldsize
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_post_localSGD_optimizer_parity_with_hierarchical_sgd_grad_is_view(
self,
):
torch.cuda.set_device(self.rank)
self._test_post_localSGD_optimizer_parity(
self._create_hierarchical_model_averager,
grad_is_view=True,
)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_post_localSGD_optimizer_step_reload(self):
torch.cuda.set_device(self.rank)
with _rank_temp_file() as tmp_file:
self._test_post_localSGD_optimizer_step_reload(
self._create_periodic_model_averager, tmp_file
)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@skip_if_no_gpu
def test_DistributedDataParallel_SyncBatchNorm_Channels_Last(self):
self._test_DistributedDataParallel_SyncBatchNorm_with_memory_format(
torch.channels_last
)
self._test_DistributedDataParallel_SyncBatchNorm_with_memory_format(
torch.channels_last_3d
)
def _test_DistributedDataParallel_SyncBatchNorm_with_memory_format(
self, memory_format
):
_group, _group_id, rank = self._init_global_test()
num_processes = dist.get_world_size()
local_bs = 2
bs_offset = int(rank * 2)
global_bs = int(num_processes * 2)
model = nn.SyncBatchNorm(2, momentum=0.99)
model_gpu = copy.deepcopy(model).cuda(rank)
model_DDP = nn.parallel.DistributedDataParallel(
model_gpu, device_ids=[rank]
)
shapes = [global_bs, 2, 4, 4] + (
[] if memory_format is torch.channels_last else [4]
)
input_gpu = (
torch.randn(*shapes, dtype=torch.float)
.cuda(rank)
.to(memory_format=memory_format)
)
target_gpu = (
torch.randn(*shapes, dtype=torch.float)
.cuda(rank)
.to(memory_format=memory_format)
)
loss = nn.MSELoss()
# check two model parameters over 5 iterations
self._test_DDP_niter(
model_gpu,
model_DDP,
input_gpu,
target_gpu,
loss,
local_bs,
rank,
global_bs,
True,
bs_offset,
dist.get_world_size(),
memory_format=memory_format,
)
self._barrier()
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@skip_if_no_gpu
def test_DistributedDataParallel_SyncBatchNorm(self):
_group, _group_id, rank = self._init_global_test()
world_size = dist.get_world_size()
# DDP does not support replicating BN layers within a process, hence
# testing with one module replica per process
gpus = [rank]
local_bs = 2
bs_offset = int(rank * 2)
global_bs = int(world_size * 2)
self._test_DistributedDataParallel_SyncBatchNorm(
gpu_subset=gpus,
rank=rank,
local_bs=local_bs,
global_bs=global_bs,
offset=bs_offset,
)
# test output_device
self._test_DistributedDataParallel_SyncBatchNorm(
gpu_subset=gpus,
rank=rank,
local_bs=local_bs,
global_bs=global_bs,
offset=bs_offset,
output_device=torch.device("cuda"),
)
# test device_ids
gpus = [torch.device("cuda:" + str(i)) for i in gpus]
self._test_DistributedDataParallel_SyncBatchNorm(
gpu_subset=gpus,
rank=rank,
local_bs=local_bs,
global_bs=global_bs,
offset=bs_offset,
output_device=torch.device("cuda"),
)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@skip_if_no_gpu
def test_DistributedDataParallel_SyncBatchNorm_No_Affine(self):
_group, _group_id, rank = self._init_global_test()
world_size = dist.get_world_size()
# DDP does not support replicating BN layers within a process, hence
# testing with one module replica per process
gpus = [rank]
local_bs = 2
bs_offset = int(rank * 2)
global_bs = int(world_size * 2)
self._test_DistributedDataParallel_SyncBatchNorm(
gpu_subset=gpus,
rank=rank,
local_bs=local_bs,
global_bs=global_bs,
offset=bs_offset,
affine=False,
)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@skip_if_no_gpu
def test_DistributedDataParallel_SyncBatchNorm_2D_Input(self):
_group, _group_id, rank = self._init_global_test()
# DDP does not support replicating BN layers within a process, hence
# testing with one module replica per process
gpus = [rank]
model = nn.BatchNorm1d(2)
# single gpu training setup
model_gpu = copy.deepcopy(model)
model_gpu.cuda(gpus[0])
# DDP training setup
model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))
model_DDP.cuda(gpus[0])
model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus)
local_bs = len(gpus) * 2
global_bs = dist.get_world_size() * local_bs
input_cpu = torch.randn(global_bs, 2)
target = torch.randn(global_bs, 2)
loss = nn.MSELoss()
# disabling cudnn.
# SyncBatchNorm goes through native_batch_norm kernel, this avoids the
# numerical issue created by the divergent code path.
with torch.backends.cudnn.flags(False):
# check two model parameters over 5 iterations
self._test_DDP_niter(
model_gpu,
model_DDP,
input_cpu.cuda(gpus[0]),
target.cuda(gpus[0]),
loss,
local_bs,
rank,
global_bs,
True,
)
self._barrier()
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@skip_if_no_gpu
@require_world_size(2)
def test_DistributedDataParallel_SyncBatchNorm_Single_Input_Per_Process(self):
_group, _group_id, rank = self._init_global_test()
# DDP does not support replicating BN layers within a process, hence
# testing with one module replica per process
gpus = [rank]
model = nn.BatchNorm1d(2)
# single gpu training setup
model_gpu = copy.deepcopy(model)
model_gpu.cuda(gpus[0])
# DDP training setup
model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))
model_DDP.cuda(gpus[0])
model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus)
local_bs = 1
global_bs = dist.get_world_size()
input_cpu = torch.randn(global_bs, 2)
target = torch.randn(global_bs, 2)
loss = nn.MSELoss()
# disabling cudnn.
# SyncBatchNorm goes through native_batch_norm kernel, this avoids the
# numerical issue created by the divergent code path.
with torch.backends.cudnn.flags(False):
# check two model parameters over 5 iterations
self._test_DDP_niter(
model_gpu,
model_DDP,
input_cpu.cuda(gpus[0]),
target.cuda(gpus[0]),
loss,
local_bs,
rank,
global_bs,
True,
)
self._barrier()
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@skip_if_no_gpu
def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_Running_Value(
self,
):
ONLY_SBN_NET = nn.SyncBatchNorm(2, momentum=0.99)
_group, _group_id, rank = self._init_global_test()
model = nn.parallel.DistributedDataParallel(
ONLY_SBN_NET.cuda(rank), device_ids=[rank]
)
input_var = []
for i in range(dist.get_world_size()):
input_var_rank = torch.cat(
[
torch.ones(2, 1, 10 ** (i + 1)) * (0.1 ** (i - 1)),
torch.ones(2, 1, 10 ** (i + 1)) * (0.3 ** (i - 1)),
],
dim=1,
)
input_var.append(input_var_rank)
all_input_var = torch.cat(
[
x.permute(1, 0, 2).contiguous().view(ONLY_SBN_NET.num_features, -1)
for x in input_var
],
dim=1,
).cuda(rank)
for _ in range(100):
y = model(input_var[rank].cuda(rank))
y.mean().backward()
running_mean, running_var = (
model.module.running_mean,
model.module.running_var,
)
torch.testing.assert_close(running_mean, all_input_var.mean(1))
torch.testing.assert_close(running_var, all_input_var.var(1))
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@skip_if_no_gpu
def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_gradient(self):
_group, _group_id, rank = self._init_global_test()
# only do single GPU per process
gpus = [rank]
# cpu training setup
num_processes = dist.get_world_size()
local_bs = rank + 2
bs_offset = int((rank + 3) * rank / 2)
global_bs = int((num_processes + 3) * num_processes / 2)
self._test_DistributedDataParallel_SyncBatchNorm(
gpu_subset=gpus,
rank=rank,
local_bs=local_bs,
global_bs=global_bs,
offset=bs_offset,
)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@skip_if_no_gpu
def test_DistributedDataParallel_SyncBatchNorm_half(self):
_group, _group_id, rank = self._init_global_test()
model = BatchNormNet()
model = model.half()
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = nn.parallel.DistributedDataParallel(
model.cuda(rank), device_ids=[rank]
)
inp = torch.randn(2, 2, dtype=torch.float16, device=torch.device(rank))
# Check that forward/backward do not error with dtype mismatch
out = model(inp)
self.assertEqual(out.dtype, torch.float16)
out.sum().backward()
for param in model.parameters():
self.assertEqual(param.grad.dtype, torch.float16)
def _test_ddp_logging_data(self, is_gpu):
rank = dist.get_rank()
model_DDP = Net()
if is_gpu:
model_DDP = nn.parallel.DistributedDataParallel(
model_DDP.cuda(rank), device_ids=[rank]
)
else:
model_DDP = nn.parallel.DistributedDataParallel(model_DDP)
# dummy data initialization
local_bs = 2
batch_size, input, target, loss = self._prepare_dummy_data(local_bs)
if is_gpu:
input = input.cuda(rank)
target = target.cuda(rank)
model_DDP._set_ddp_runtime_logging_sample_rate(2)
for idx in range(20):
offset = rank * local_bs
# DDP training, DDP scatters subsets of input to nodes/GPUs
self._test_DDP_helper(
model_DDP,
input[offset : offset + local_bs],
target[offset : offset + local_bs],
loss,
1,
)
self._model_step_with_zero_grad(model_DDP)
# Verify DDP logging data is sampled as expected
# If it has ran more than 10 iterations and this is
# the sampled iteration for measuring run time stats,
# the run time stats for this idx-th iteration will not
# be zeros.
ddp_logging_data = model_DDP._get_ddp_logging_data()
if idx > 0 and (idx < 10 or idx % 2 == 0):
self.assertGreaterEqual(
ddp_logging_data.get("forward_compute_time"), 1
)
self.assertGreaterEqual(
ddp_logging_data.get("backward_compute_time"), 1
)
self.assertGreaterEqual(
ddp_logging_data.get("backward_comm_time"), 1
)
self.assertGreaterEqual(
ddp_logging_data.get("backward_compute_time"),
ddp_logging_data.get("backward_compute_comm_overlap_time"),
)
self.assertGreaterEqual(
ddp_logging_data.get("backward_comm_time"),
ddp_logging_data.get("backward_compute_comm_overlap_time"),
)
self.assertEqual(ddp_logging_data.get("iteration"), idx)
elif idx > 0:
# if the idx-th iteration is not sampled to set runtime stats,
# ddp_logging_data.iteration will not be updated to current
# iteration.
self.assertNotEqual(ddp_logging_data.get("iteration"), idx)
# Shuffle the input so that DDP input is different
input = input[torch.randperm(batch_size)]
return model_DDP
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "nccl does not support DDP on CPU models"
)
def test_ddp_logging_data_cpu(self):
def parse_env(var):
return os.environ.get(var, "N/A")
dist.set_debug_level(dist.DebugLevel.INFO)
_, group_id, _ = self._init_global_test()
model_DDP = self._test_ddp_logging_data(is_gpu=False)
ddp_logging_data = model_DDP._get_ddp_logging_data()
self.assertEqual(ddp_logging_data.get("world_size"), dist.get_world_size())
self.assertEqual(ddp_logging_data.get("rank"), dist.get_rank())
self.assertEqual(ddp_logging_data.get("module_name"), "Net")
self.assertEqual(ddp_logging_data.get("device_ids"), "")
# output_device is -1 in default if it is not set, e.g.
# output_device of CPU training is -1.
self.assertEqual(ddp_logging_data.get("output_device"), -1)
self.assertEqual(ddp_logging_data.get("broadcast_buffers"), 1)
self.assertEqual(ddp_logging_data.get("bucket_cap_bytes"), 25 * 1024 * 1024)
self.assertEqual(ddp_logging_data.get("find_unused_parameters"), 0)
self.assertEqual(ddp_logging_data.get("gradient_as_bucket_view"), 0)
self.assertEqual(
ddp_logging_data.get("backend_name"), dist.get_backend(group_id)
)
self.assertEqual(ddp_logging_data.get("iteration"), 18)
params = list(model_DDP.parameters())
num_params = 0
param_size = 0
params = list(filter(lambda parameter: parameter.requires_grad, params))
for p in params:
num_params += 1
param_size += p.numel() * p.element_size()
self.assertEqual(ddp_logging_data.get("dtypes"), "float")
self.assertEqual(
ddp_logging_data.get("total_parameter_size_bytes"), param_size
)
self.assertEqual(ddp_logging_data.get("num_parameter_tensors"), num_params)
self.assertEqual(ddp_logging_data.get("bucket_sizes"), str(param_size))
self.assertEqual(
ddp_logging_data.get("master_port"), parse_env("MASTER_PORT")
)
self.assertEqual(
ddp_logging_data.get("master_addr"), parse_env("MASTER_ADDR")
)
self.assertEqual(
ddp_logging_data.get("torch_distributed_debug"),
parse_env("TORCH_DISTRIBUTED_DEBUG"),
)
self.assertEqual(
ddp_logging_data.get("cuda_visible_devices"),
parse_env("CUDA_VISIBLE_DEVICES"),
)
if ddp_logging_data.get("backend_name") == "gloo":
self.assertEqual(
ddp_logging_data.get("gloo_socket_ifname"),
parse_env("GLOO_SOCKET_IFNAME"),
)
self.assertEqual(
ddp_logging_data.get("gloo_device_transport"),
parse_env("GLOO_DEVICE_TRANSPORT"),
)
default_gloo_threads = 2
self.assertEqual(
ddp_logging_data.get("gloo_num_threads"),
default_gloo_threads,
)
self.assertEqual(ddp_logging_data.get("nccl_socket_ifname"), None)
self.assertEqual(ddp_logging_data.get("nccl_blocking_wait"), None)
self.assertEqual(ddp_logging_data.get("nccl_async_error_handling"), None)
self.assertEqual(ddp_logging_data.get("nccl_debug"), None)
self.assertEqual(ddp_logging_data.get("nccl_nthreads"), None)
self.assertEqual(ddp_logging_data.get("nccl_ib_timeout"), None)
# test runtime logging fields
# Note: DETAIL debug mode logs DDP logging data to stdout and
# thus accesses std::map, which fills in a default value for the
# type if it didn't exist.
self.assertEqual(ddp_logging_data.get("unused_parameter_size", 0), 0)
self.assertEqual(ddp_logging_data.get("has_rebuilt_buckets"), 1)
self.assertEqual(
ddp_logging_data.get("rebuilt_bucket_sizes"), str(param_size)
)
grad_ready_order = ddp_logging_data.get(
"prev_iteration_grad_ready_order_indices"
)
expected_order = list(reversed([str(x) for x in range(3)]))
self.assertEqual(grad_ready_order, ", ".join(expected_order))
bucket_indices = ddp_logging_data.get("rebuilt_per_bucket_param_indices")
self.assertEqual(bucket_indices, " ".join(expected_order))
# It is hard to test accurate latency, but it can test whether the latency is
# a valid value and in the expected range.
self.assertGreaterEqual(ddp_logging_data.get("avg_forward_compute_time"), 1)
self.assertGreaterEqual(
ddp_logging_data.get("avg_backward_compute_time"), 1
)
self.assertGreaterEqual(ddp_logging_data.get("avg_backward_comm_time"), 1)
self.assertGreaterEqual(
ddp_logging_data.get("avg_backward_compute_time"),
ddp_logging_data.get("avg_backward_compute_comm_overlap_time"),
)
self.assertGreaterEqual(
ddp_logging_data.get("avg_backward_comm_time"),
ddp_logging_data.get("avg_backward_compute_comm_overlap_time"),
)
# Test host-side times are roughly in the order that we expect
fwd_host_side_time = ddp_logging_data.get("forward_compute_time_start")
bwd_comp_start_host_side_time = ddp_logging_data.get(
"backward_compute_time_start"
)
bwd_comp_end_host_side_time = ddp_logging_data.get(
"backward_compute_time_end"
)
bwd_comm_start_host_side_time = ddp_logging_data.get(
"backward_comm_time_start"
)
bwd_comm_end_host_side_time = ddp_logging_data.get("backward_comm_time_end")
self.assertGreaterEqual(
bwd_comm_end_host_side_time, bwd_comm_start_host_side_time
)
self.assertGreaterEqual(
bwd_comm_start_host_side_time, bwd_comp_start_host_side_time
)
self.assertGreaterEqual(
bwd_comp_end_host_side_time, bwd_comp_start_host_side_time
)
self.assertGreaterEqual(bwd_comp_start_host_side_time, fwd_host_side_time)
# test larger net with mixed data types, verify multiple bucket sizes
model = LargeNet()
model.float()
model.fc1.double()
model_DDP = nn.parallel.DistributedDataParallel(model, bucket_cap_mb=1.5)
ddp_logging_data = model_DDP._get_ddp_logging_data()
params = list(model_DDP.parameters())
self.assertEqual(
ddp_logging_data.get("bucket_cap_bytes"), int(1.5 * 1024 * 1024)
)
bucket_sizes = [
params[1].numel() * params[1].element_size(),
params[0].numel() * params[0].element_size(),
]
self.assertEqual(
ddp_logging_data.get("bucket_sizes"),
", ".join(str(x) for x in bucket_sizes),
)
self.assertEqual(ddp_logging_data.get("dtypes"), "double, float")
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@skip_if_no_gpu
def test_ddp_logging_data_gpu(self):
_group, _group_id, rank = self._init_global_test()
model_DDP = self._test_ddp_logging_data(is_gpu=True)
ddp_logging_data = model_DDP._get_ddp_logging_data()
self.assertEqual(ddp_logging_data.get("device_ids"), str(rank))
self.assertEqual(ddp_logging_data.get("output_device"), rank)
grad_ready_order = ddp_logging_data.get(
"prev_iteration_grad_ready_order_indices"
)
expected_order = list(reversed([str(x) for x in range(3)]))
self.assertEqual(grad_ready_order, ", ".join(expected_order))
bucket_indices = ddp_logging_data.get("rebuilt_per_bucket_param_indices")
self.assertEqual(bucket_indices, " ".join(expected_order))
# test runtime logging fields
# It is hard to test accurate latency, but it can test whether the latency is
# a valid value and in the expected range.
self.assertGreaterEqual(ddp_logging_data.get("avg_forward_compute_time"), 1)
self.assertGreaterEqual(
ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), 1
)
self.assertGreaterEqual(
ddp_logging_data.get("avg_backward_compute_time"),
ddp_logging_data.get("avg_backward_compute_comm_overlap_time"),
)
self.assertGreaterEqual(
ddp_logging_data.get("avg_backward_comm_time"),
ddp_logging_data.get("avg_backward_compute_comm_overlap_time"),
)
# Test host-side times are roughly in the order that we expect
fwd_host_side_time = ddp_logging_data.get("forward_compute_time_start")
bwd_comp_start_host_side_time = ddp_logging_data.get(
"backward_compute_time_start"
)
bwd_comp_end_host_side_time = ddp_logging_data.get(
"backward_compute_time_end"
)
bwd_comm_start_host_side_time = ddp_logging_data.get(
"backward_comm_time_start"
)
bwd_comm_end_host_side_time = ddp_logging_data.get("backward_comm_time_end")
self.assertGreaterEqual(
bwd_comm_end_host_side_time, bwd_comm_start_host_side_time
)
self.assertGreaterEqual(
bwd_comm_start_host_side_time, bwd_comp_start_host_side_time
)
self.assertGreaterEqual(
bwd_comp_end_host_side_time, bwd_comp_start_host_side_time
)
self.assertGreaterEqual(bwd_comp_start_host_side_time, fwd_host_side_time)
@skip_but_pass_in_sandcastle_if(
BACKEND == "nccl", "nccl does not support DDP on CPU models"
)
def test_static_graph_api_cpu(self):
model_DDP = nn.parallel.DistributedDataParallel(Net())
expected_err = "should be called before training loop starts"
with self.assertRaisesRegex(RuntimeError, expected_err):
local_bs = 2
_batch_size, input, target, loss = self._prepare_dummy_data(local_bs)
offset = dist.get_rank() * local_bs
# DDP training, DDP scatters subsets of input to nodes/GPUs
self._test_DDP_helper(
model_DDP,
input[offset : offset + local_bs],
target[offset : offset + local_bs],
loss,
1,
)
model_DDP._set_static_graph()
# Verify error was logged in ddp_logging_data.
verify_ddp_error_logged(model_DDP, expected_err)
@skipIfNoTorchVision
def test_SyncBatchNorm_process_group(self):
# When adopting `convert_sync_batchnorm` to convert a `nn.modules`,
# it need to recursively pass the `process_group` in the module when the `SyncBatchNorm`
# is nested in a sub-module or sub-sub-module (e.g. resnet50 in torchvision.models).
process_ids = 0
process_group = torch.distributed.new_group([process_ids])
res50_model = torchvision.models.resnet50()
res50_model_sync = nn.SyncBatchNorm.convert_sync_batchnorm(
copy.deepcopy(res50_model), process_group
)
process_group_sync = res50_model_sync.layer1[0].bn1.process_group
self.assertEqual(process_group_sync, process_group)
def _run_reduction_test(
self, tensor, expected_tensor, op, reduction_fn=dist.all_reduce, dst=None
):
if reduction_fn is not dist.all_reduce and dst is None:
raise ValueError(f"Reduction fn {reduction_fn} must specify dst!")
if dst is not None:
reduction_fn(tensor, dst, op)
# Only destination rank tensor is expected to have final result.
if dist.get_rank() == dst:
self.assertEqual(tensor, expected_tensor)
else:
reduction_fn(tensor, op)
self.assertEqual(tensor, expected_tensor)
@require_backend_is_available({"nccl"})
@skip_if_lt_x_gpu(2)
def test_nccl_backend_bool_allreduce(self):
torch.cuda.set_device(self.rank)
# Run all_reduce with PRODUCT
element = self.rank % 2 == 0
for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]:
input_tensor = torch.tensor([element, element]).to(self.rank)
self._run_reduction_test(
input_tensor, torch.tensor([False, False]).to(self.rank), op
)
# Ensure that all ranks contributing True (cast to 1) results in the
# correct reduction.
input_tensor = torch.tensor([True, True]).to(self.rank)
expected_tensor = input_tensor.clone()
self._run_reduction_test(input_tensor, expected_tensor, op)
# Run all_reduce with SUM
for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]:
input_tensor = torch.tensor([element, element]).to(self.rank)
self._run_reduction_test(
input_tensor, torch.tensor([True, True]).to(self.rank), op
)
# TODO: NCCL backend does not work correctly for bitwise reduction ops
# (see https://github.com/pytorch/pytorch/issues/41362). Add tests for
# these once it is supported.
@require_backend_is_available({"nccl"})
@skip_if_lt_x_gpu(2)
def test_nccl_backend_bool_allgather(self):
torch.cuda.set_device(self.rank)
inp = {0: [True, True], 1: [False, True]}
input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)
# Preserve a copy of the tensor to compare against after allgather.
input_tensor_copy = input_tensor.clone()
tensor_list = [
torch.tensor([False, False]).to(self.rank)
for _ in range(dist.get_world_size())
]
dist.all_gather(tensor_list, input_tensor)
self.assertEqual(len(tensor_list), dist.get_world_size())
for i, t in enumerate(tensor_list):
expected = torch.tensor(inp[i % 2]).to(self.rank)
self.assertEqual(t, expected)
# Ensure that the input tensor is not modified, since this collective
# does not modify its input.
self.assertEqual(input_tensor_copy, input_tensor)
@require_backend_is_available({"nccl"})
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_nccl_backend_bool_reduce(self):
torch.cuda.set_device(self.rank)
inp = {0: [True, True], 1: [False, False]}
# Run reduce() with product op
for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]:
# make sure rank 0 gets False if WORLD_SIZE=1 to match expected tensor
input_tensor = torch.tensor(inp[(self.rank + 1) % 2]).to(self.rank)
expected = torch.tensor([False, False]).to(self.rank)
self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0)
# Ensure that all ranks contributing True (cast to 1) results in the
# correct reduction.
input_tensor = torch.tensor([True, True]).to(self.rank)
expected_tensor = input_tensor.clone()
self._run_reduction_test(
input_tensor, expected_tensor, op, dist.reduce, dst=0
)
for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]:
input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)
expected = (
torch.tensor([True, True]).to(self.rank)
if self.rank == 0
else input_tensor.clone()
)
self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0)
@require_backend_is_available({"nccl"})
@skip_if_lt_x_gpu(2)
def test_nccl_backend_bool_broadcast(self):
tensor_size = 10
bcast_tensor = torch.tensor(
[
(random.random() < 0.5 if self.rank == 0 else False)
for _ in range(tensor_size)
]
).to(self.rank)
dist.broadcast(bcast_tensor, src=0)
# Now allgather and ensure the tensors are equal.
tensor_list = [
torch.tensor([False for _ in range(tensor_size)]).to(self.rank)
for _ in range(dist.get_world_size())
]
dist.all_gather(tensor_list, bcast_tensor)
expected = tensor_list[0]
for tensor in tensor_list[1:]:
self.assertEqual(tensor, expected)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_DistributedSampler_padding(self):
# Tests padding of distributed sampler.
world_size = dist.get_world_size()
# Simulates the 'casual' dataset size
dataset_size = 100 + world_size + 1
dataset = [torch.ones(1).to(self.rank) * i for i in range(dataset_size)]
# Simulates the 'tiny' dataset size
dataset_tiny_size = max(world_size // 2 - 1, 1)
dataset_tiny = [
torch.ones(1).to(self.rank) * i for i in range(dataset_tiny_size)
]
# Specifying drop_last=True will cause the tail of the data to be dropped.
dist_sampler = DistributedSampler(dataset=dataset, drop_last=True)
local_num_samples, local_dataset_size = (
dist_sampler.num_samples,
dist_sampler.total_size,
)
# The effective dataset size should be the greatest integer that is <=
# dataset_size that is divisible by the world_size. This is to ensure each
# rank processes the same number of samples.
effective_dataset_size = (
math.ceil((dataset_size - world_size) / world_size)
if dataset_size % world_size != 0
else dataset_size / world_size
)
self.assertEqual(local_num_samples, effective_dataset_size)
self.assertEqual(local_dataset_size, local_num_samples * world_size)
indices_list = list(iter(dist_sampler))
self.assertEqual(len(indices_list), local_num_samples)
def validate_global_samples(local_num_samples):
# Ensure that each rank processes the same number of samples.
world_samples = [
torch.LongTensor([0]).to(self.rank) for _ in range(world_size)
]
dist.all_gather(
world_samples, torch.tensor([local_num_samples]).to(self.rank)
)
world_samples = [sample.item() for sample in world_samples]
self.assertEqual(len(set(world_samples)), 1)
validate_global_samples(local_num_samples)
# drop_last=False is the default and will add additional indices to be sampled,
# increasing the effective dataset size.
dist_sampler_added_samples = DistributedSampler(dataset=dataset)
local_num_samples, local_dataset_size = (
dist_sampler_added_samples.num_samples,
dist_sampler_added_samples.total_size,
)
# The effective dataset size is the smallest integer that is >= dataset_size
# and divisible by the world size.
self.assertEqual(local_num_samples, math.ceil(dataset_size / world_size))
self.assertEqual(local_dataset_size, local_num_samples * world_size)
indices_list = list(iter(dist_sampler_added_samples))
self.assertEqual(len(indices_list), local_num_samples)
# Ensure that each rank processes the same number of samples.
validate_global_samples(local_num_samples)
# Ensure additional samples are padded even when
# the extremely small dataset is given.
dist_sampler_added_samples_tiny = DistributedSampler(dataset=dataset_tiny)
local_num_samples, local_dataset_size = (
dist_sampler_added_samples_tiny.num_samples,
dist_sampler_added_samples_tiny.total_size,
)
self.assertEqual(
local_num_samples, math.ceil(dataset_tiny_size / world_size)
)
self.assertEqual(local_dataset_size, local_num_samples * world_size)
indices_list = list(iter(dist_sampler_added_samples_tiny))
self.assertEqual(len(indices_list), local_num_samples)
validate_global_samples(local_num_samples)
def _test_allgather_object(self, subgroup=None):
# Only set device for NCCL backend since it must use GPUs.
gather_objects = create_collectives_object_test_list()
backend = os.environ["BACKEND"]
if backend == "nccl":
# Case where rank != GPU device.
next_rank = (self.rank + 1) % int(self.world_size)
torch.cuda.set_device(next_rank)
# If GPU test, add object with GPU tensor
if backend == "nccl":
gather_objects.append(Foo(torch.randn(3, 3, device=0)))
output_gathered = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(
output_gathered,
gather_objects[self.rank % len(gather_objects)],
group=subgroup,
)
for i, val in enumerate(output_gathered):
expected = gather_objects[i % len(gather_objects)]
self.assertEqual(val, expected)
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@require_n_gpus_for_nccl_backend(
int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"]
)
@with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"])
def test_all_gather_object_default_pg(self):
return self._test_allgather_object()
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@require_n_gpus_for_nccl_backend(
int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"]
)
@with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"])
def test_all_gather_object_subgroup(self):
default = _get_default_group()
backend = dist.get_backend(default)
subgroup = dist.new_group(backend=backend)
return self._test_allgather_object(subgroup=subgroup)
def _test_gather_object(self, pg=None):
# Ensure stateful objects can be gathered
gather_objects = create_collectives_object_test_list()
my_rank = dist.get_rank(pg)
backend = os.environ["BACKEND"]
if backend == "nccl":
# Case where rank != GPU device.
next_rank = (self.rank + 1) % int(self.world_size)
torch.cuda.set_device(next_rank)
# If GPU test, add object with GPU tensor
if backend == "nccl":
gather_objects.append(Foo(torch.randn(3, 3, device=my_rank)))
output_gathered = [None for _ in range(dist.get_world_size(pg))]
gather_on_rank = 0
dist.gather_object(
gather_objects[self.rank % len(gather_objects)],
object_gather_list=output_gathered
if my_rank == gather_on_rank
else None,
dst=gather_on_rank,
group=pg,
)
if my_rank != gather_on_rank:
self.assertEqual(
output_gathered, [None for _ in range(dist.get_world_size())]
)
else:
for i, val in enumerate(output_gathered):
expected = gather_objects[i % len(gather_objects)]
self.assertEqual(val, expected)
# Validate errors when objects can't be pickled.
class Bar:
pass
b = Bar()
gather_objects = [b for _ in range(dist.get_world_size())]
with self.assertRaises(AttributeError):
dist.all_gather_object(
[None for _ in range(dist.get_world_size())],
gather_objects[self.rank],
group=pg,
)
@skip_but_pass_in_sandcastle_if(
BACKEND == "ucc", "CPU tensor ops not supported by UCP TL"
)
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"])
@require_exact_world_size(4)
def test_gather_object(self):
return self._test_gather_object()
@skip_but_pass_in_sandcastle_if(
BACKEND == "ucc", "CPU tensor ops not supported by UCP TL"
)
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"])
@require_exact_world_size(4)
def test_gather_object_subgroup(self):
default = _get_default_group()
backend = dist.get_backend(default)
subgroup = dist.new_group(backend=backend)
return self._test_gather_object(subgroup)
def validate_net_equivalence(self, net):
# Helper to validate synchronization of nets across ranks.
net_module_states = list(net.module.state_dict().values())
# Check that all tensors in module's state_dict() are equal.
for t in net_module_states:
tensor_list = [
torch.zeros_like(t) for _ in range(dist.get_world_size())
]
dist.all_gather(tensor_list, t)
for tensor in tensor_list:
self.assertEqual(tensor, t)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_sync_module_states(self):
# Test that after calling _sync_module_states, models across ranks
# are the same and are equal to the model on the input rank.
dim = 2
rank = self.rank
rank_to_broadcast = 1
# Seed to ensure that ranks are initialized with different initial models.
torch.manual_seed(rank)
model = nn.Linear(dim, dim, bias=False)
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1
)
new_model = nn.Linear(dim, dim, bias=False).cuda(rank)
net.module = copy.deepcopy(new_model)
# Assert params are different
net_module_states = list(net.module.state_dict().values())
for t in net_module_states:
tensor_list = [
torch.zeros_like(t) for _ in range(dist.get_world_size())
]
dist.all_gather(tensor_list, t)
for i, tensor in enumerate(tensor_list):
if i == rank:
self.assertEqual(t, tensor)
else:
# tensor from another rank should be different.
self.assertNotEqual(t, tensor)
_sync_module_states(
module=net.module,
process_group=net.process_group,
broadcast_bucket_size=net.broadcast_bucket_size,
src=rank_to_broadcast,
params_and_buffers_to_ignore=net.parameters_to_ignore,
)
# Now all model params should be the same.
self.validate_net_equivalence(net)
# Since the network params were broadcast from rank_to_broadcast, validate that
# they are the same as new_model on rank_to_broadcast.
if rank == rank_to_broadcast:
expected_states = new_model.state_dict().values()
for t, expected in zip(net_module_states, expected_states, strict=True):
self.assertEqual(t, expected)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_grad_div_uneven_inputs(self):
# Test gradient division during training with join() API. If
# divide_by_initial_world_size=False, we scale by the effective world
# size when allreducing grads.
dim = 5
batch = 1
grad_scale = 50
rank = self.rank
model = nn.Linear(dim, dim, bias=False)
inp = torch.ones(batch, dim, device=self.rank) * grad_scale
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1
)
n_iters = 3
if self.rank > 0:
n_iters += 2
with net.join(divide_by_initial_world_size=False):
for _ in range(n_iters):
loss = net(inp).sum()
loss.backward()
# The grad is always expected_grad, since we divide by the number
# of currently active processes and inactive processes contribute
# zero gradient. If we kept dividing by static initial world
# size as processes leave, the grad would be smaller.
expected_grad = torch.ones(dim, dim, device=self.rank) * grad_scale
param = next(iter(net.parameters()))
self.assertEqual(expected_grad, param.grad)
# Avoid accumulating grads so that it's the same every iteration
net.zero_grad()
torch.cuda.synchronize(device=self.rank)
# If divide_by_initial_world_size=True (default), we always scale grads
# by the initial world_size.
with net.join(divide_by_initial_world_size=True):
for i in range(n_iters):
loss = net(inp).sum()
loss.backward()
effective_ws = dist.get_world_size()
if i >= 3:
effective_ws -= 1
expected_grad = (
torch.ones(dim, dim, device=self.rank)
* grad_scale
* effective_ws
) / dist.get_world_size()
param = next(iter(net.parameters()))
self.assertEqual(expected_grad, param.grad)
# Avoid accumulating grad so that it's the same every iteration.
net.zero_grad()
torch.cuda.synchronize(device=self.rank)
def _test_ddp_profiling(self, profiler_ctx, profiler_ctx2=None):
"""Runs DDP based model training and captures profiles.
This test will do two profiler runs.
1. An initial basic run to check if profiler events are correctly captured.
2. A second profiling pass after running some iterations of DDP, to check robustness of thread local state.
args
profiler_ctx : Profiler context manager for pass 1
profiler_ctx2 : Profiler context manager for pass 2.
This can be left out as None, in which case a deepcopy
of profiler_ctx is used.
Returns:
prof: Instantiated profiler object that can be used for post analysis.
"""
batch = 3
dim = 10
num_iters = 6
torch.cuda.set_device(self.rank)
model = nn.Linear(dim, dim, bias=False)
inp = torch.rand(batch, dim, device=self.rank)
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(self.rank),
device_ids=[self.rank],
)
if profiler_ctx2 is None:
profiler_ctx2 = copy.deepcopy(profiler_ctx)
with profiler_ctx as prof:
for _ in range(num_iters):
loss = net(inp).sum()
loss.backward()
all_reduce_event_name = f"{dist.get_backend()}:all_reduce"
events = get_profiling_event(
all_reduce_event_name, prof, dedup_gpu_user_annotation=True
)
event_count = sum(e.count for e in events)
self.assertEqual(event_count, num_iters)
for event in events:
self.assertTrue(event.is_async)
self.assertEqual(event.name, all_reduce_event_name)
broadcast_event_name = f"{dist.get_backend()}:broadcast"
broadcast_events = get_profiling_event(
broadcast_event_name, prof, dedup_gpu_user_annotation=True
)
event_count = sum(e.count for e in broadcast_events)
# Broadcast is called during rebuild_buckets
self.assertGreaterEqual(event_count, 1)
for event in broadcast_events:
self.assertEqual(event.name, broadcast_event_name)
# Run DDP with profiling for a few iterations, then enable profiling
# for a single pass, and ensure it is recorded. This tests that the
# thread local state is correctly updated.
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(self.rank),
device_ids=[self.rank],
find_unused_parameters=True,
)
for _ in range(3):
loss = net(inp).sum()
loss.backward()
# Now enable the profiler.
with profiler_ctx2 as prof:
loss = net(inp).sum()
loss.backward()
events = get_profiling_event(
all_reduce_event_name, prof, dedup_gpu_user_annotation=True
)
self.assertGreaterEqual(len(events), 1)
self.assertGreaterEqual(events[0].count, 1)
self.assertEqual(events[0].name, all_reduce_event_name)
for event in events:
self.assertTrue(event.is_async)
# Ensure searching unused parameters was profiled
events = get_profiling_event("search_unused_parameters", prof)
self.assertEqual(len(events), 1)
return prof
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle("Currently failing in NVIDIA internal CI")
def test_ddp_profiling_autograd_profiler(self):
autograd_profiler_ctx = torch.autograd.profiler.profile()
return self._test_ddp_profiling(profiler_ctx=autograd_profiler_ctx)
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang")
@skip_but_pass_in_sandcastle_if(
IS_MACOS or IS_WINDOWS,
"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124",
)
def test_ddp_profiling_torch_profiler(self):
cpu_act = torch.profiler.ProfilerActivity.CPU
cuda_act = torch.profiler.ProfilerActivity.CUDA
torch_profiler_ctx = torch.profiler.profile(activities=[cpu_act, cuda_act])
prof = self._test_ddp_profiling(profiler_ctx=torch_profiler_ctx)
if dist.get_backend() != "nccl":
return
# Note comment out the "os.remove(trace_file)" in `get_profiler_nccl_meta()`
# to debug any mismatches.
nccl_meta_events = get_profiler_nccl_meta(prof)
self.assertGreater(len(nccl_meta_events), 0)
nccl_meta = self._sanity_check_profiler_nccl_meta(nccl_meta_events)
# additionally check the specific collectives in this test case
self.assertEqual(len(nccl_meta["allreduce"]), 2)
self.assertEqual(len(nccl_meta["wait"]), 1)
# check allreduce message sizes
a0 = nccl_meta["allreduce"][0]
self.assertEqual(a0["Out msg nelems"], 100, msg=f"{a0}")
self.assertEqual(a0["dtype"], "Float", msg=f"{a0}")
a1 = nccl_meta["allreduce"][1]
self.assertEqual(a1["Out msg nelems"], 1, msg=f"{a1}")
self.assertEqual(a1["dtype"], "Int", msg=f"{a1}")
def _validate_execution_trace_nccl(self, et_file: str) -> None:
"""Torch profiler includes nccl metadata in an inserted operator called "record_param_comms"
We test for basic fields in these nodes in the Execution Trace.
"""
with open(et_file) as f:
et = json.load(f)
pg_cfg_node = [
n for n in et["nodes"] if n["name"] == "## process_group:init ##"
]
self.assertGreaterEqual(len(pg_cfg_node), 1)
nccl_meta_nodes = [
n for n in et["nodes"] if n["name"] == "record_param_comms"
]
self.assertEqual(len(nccl_meta_nodes), 3)
per_coll_meta = defaultdict(list)
# Sanity check NCCL metadata nodes
for n in nccl_meta_nodes:
attrs_list = n.get("attrs", [])
self.assertGreater(len(attrs_list), 0)
attrs = {a["name"]: a["value"] for a in attrs_list}
collname = attrs.get("collective_name", "")
self.assertNotEqual(collname, "")
self.assertNotEqual(attrs.get("dtype", ""), "")
per_coll_meta[collname].append(attrs)
if collname == "wait":
continue
self.assertEqual(attrs["pg_name"], "0") # yes this is a string
self.assertEqual(attrs["pg_desc"], "default_pg")
self.assertEqual(attrs["pg_size"], 2)
self.assertGreaterEqual(attrs.get("in_msg_nelems", -1), 0)
self.assertGreaterEqual(attrs.get("out_msg_nelems", -1), 0)
self.assertTrue("in_split_size" in attrs)
self.assertTrue("out_split_size" in attrs)
self.assertEqual(attrs.get("global_rank_start", -1), 0)
self.assertEqual(attrs.get("global_rank_stride", -1), 1)
# print(per_coll_meta)
self.assertEqual(len(per_coll_meta["allreduce"]), 2)
self.assertEqual(len(per_coll_meta["wait"]), 1)
# check allreduce message sizes
a0 = per_coll_meta["allreduce"][0]
self.assertEqual(a0["out_msg_nelems"], 100, msg=f"{a0}")
self.assertEqual(a0["dtype"], "Float", msg=f"{a0}")
a1 = per_coll_meta["allreduce"][1]
self.assertEqual(a1["out_msg_nelems"], 1, msg=f"{a1}")
self.assertEqual(a1["dtype"], "Int", msg=f"{a1}")
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang")
@skip_but_pass_in_sandcastle_if(
IS_MACOS or IS_WINDOWS,
"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124",
)
@unittest.skipIf(BACKEND != "nccl", "Tests nccl metadata primarily.")
def test_ddp_profiling_execution_trace(self):
self.assertEqual(dist.get_backend(), "nccl")
# Create a temp file to save execution trace data
with TemporaryFileName("w+t", suffix=".et.json") as et_file:
et = ExecutionTraceObserver().register_callback(et_file)
# first profiler context need not have ET
torch_profiler_ctx1 = torch.profiler.profile(
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
)
# collect ET in second profiler pass
torch_profiler_ctx2 = torch.profiler.profile(
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
execution_trace_observer=et,
)
self._test_ddp_profiling(
profiler_ctx=torch_profiler_ctx1,
profiler_ctx2=torch_profiler_ctx2,
)
print(f"Execution trace saved at {et_file}")
self._validate_execution_trace_nccl(et_file)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_join_model_equivalence(self):
# Verifies equivalence with model training locally and with DDP under
# the join context manager.
batch = 3
dim = 10
learning_rate = 0.03
model = nn.Linear(dim, dim, bias=False)
inp = torch.rand(batch, dim, device=self.rank)
local_model = copy.deepcopy(model)
local_model = local_model.cuda(self.rank)
rank_to_iter_mapping = {
rank: 2 * (rank + 1) for rank in range(dist.get_world_size())
}
# run local model
local_iters = sum(rank_to_iter_mapping.values())
local_optim = torch.optim.SGD(local_model.parameters(), lr=learning_rate)
for _ in range(local_iters):
local_optim.zero_grad()
out = local_model(inp)
loss = out.sum()
loss.backward()
local_optim.step()
# run DDP model with join API
num_iters = rank_to_iter_mapping[self.rank]
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(self.rank), device_ids=[self.rank]
)
ddp_optim = torch.optim.SGD(
model.parameters(), lr=learning_rate * dist.get_world_size()
)
with net.join():
for _ in range(num_iters):
ddp_optim.zero_grad()
out = net(inp)
loss = out.sum()
loss.backward()
torch.cuda.synchronize(device=self.rank)
ddp_optim.step()
# Validate model state dicts are equal
for (_, local_tensor), (_, dist_tensor) in zip(
local_model.state_dict().items(),
net.module.state_dict().items(),
strict=True,
):
self.assertEqual(local_tensor, dist_tensor)
def _run_uneven_inputs_test(
self,
test_case,
iteration_mapping,
find_unused_params,
):
model = test_case.model
inp = test_case.inp
rank = self.rank
sync_interval = test_case.sync_interval
torch.cuda.set_device(rank)
# Ensure all outstanding GPU work is completed so this test runs independently.
dist.barrier()
# Bucket_cap_mb is intentionally low to test allreduce scheduling when
# there are many buckets.
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(rank),
device_ids=[rank],
bucket_cap_mb=1,
find_unused_parameters=find_unused_params,
)
# Register hook if specified
if test_case.hook is not None:
net.register_comm_hook(test_case.state, test_case.hook)
print(f"registered hook {test_case.hook}")
# Determine num iters for this rank via the passed in mapping.
num_iters = iteration_mapping[rank]
# If we throw when earliest rank terminates, we should ensure
# that we iterate for that minimum number of times.
num_iters_tensor = torch.tensor(
[num_iters], device=torch.cuda.current_device()
)
dist.all_reduce(num_iters_tensor, op=dist.ReduceOp.MIN)
min_num_iters = num_iters_tensor.item()
total_iters = 0
if test_case.throw_on_early_termination:
if min_num_iters == num_iters:
# Early termination rank(s)
exception_ctx = self.assertRaisesRegex(
RuntimeError, f"Rank {self.rank} exhausted all inputs"
)
else:
# Non early termination rank
exception_ctx = self.assertRaisesRegex(
RuntimeError,
"Detected at least one rank that exhausted inputs.",
)
else:
exception_ctx = nullcontext()
with exception_ctx:
with net.join(
throw_on_early_termination=test_case.throw_on_early_termination
):
for i in range(num_iters):
# Use model.no_sync() to disable grad synchronization every
# sync_interval.
if i % sync_interval != 0:
context = net.no_sync()
else:
context = nullcontext()
with context:
if isinstance(inp, tuple):
loss = net(*inp).sum()
else:
loss = net(inp).sum()
loss.backward()
self._model_step(net)
# Ensure completion of GPU kernels (including allreduce). If the
# join API is not properly implemented, then this should hang
# since the allreduce will hang.
torch.cuda.synchronize(device=rank)
total_iters += 1
if test_case.throw_on_early_termination:
# Ensure we iterated min_num_iters times.
self.assertEqual(total_iters, min_num_iters)
else:
# Ensure we iterated at least min_num_iters times.
self.assertGreaterEqual(total_iters, min_num_iters)
# Ensure completion of all GPU kernels.
torch.cuda.synchronize(device=rank)
# When throwing on early rank termination, we do not
# broadcast model state from an authoritative rank. All models
# should already be in sync.
if not test_case.throw_on_early_termination:
self.assertTrue(net._authoritative_rank)
# All ranks should have agreed on the same authoritative_rank!
final_rank_tensor = torch.tensor(
[net._authoritative_rank], device=self.rank
)
tensor_list = [
torch.zeros_like(final_rank_tensor)
for _ in range(dist.get_world_size())
]
dist.all_gather(tensor_list, final_rank_tensor)
max_rank = dist.get_world_size() - 1
self.assertSetEqual(
{max_rank}, {tensor.item() for tensor in tensor_list}
)
# Ensure that all models are the same across ranks after all have joined.
self.validate_net_equivalence(net)
# Ensure that running with DDP uneven inputs was logged.
ddp_logging_data = net._get_ddp_logging_data()
self.assertTrue(ddp_logging_data.get("join_uneven_inputs"))
dist.barrier()
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_uneven_inputs_stop_iteration_sync_bn(self):
# Tests that uneven inputs join handler correctly throws StopIteration
# for models with SyncBN or general collective comm when
# throw_on_early_termination=True.
class ModelWithComm(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin = nn.Linear(2, 40, bias=False)
def forward(self, x):
x = self.lin(x)
dist.all_reduce(x)
return x
torch.cuda.set_device(self.rank)
model_bn = BatchNormNet()
model_bn = nn.SyncBatchNorm.convert_sync_batchnorm(
copy.deepcopy(model_bn)
).cuda(self.rank)
comm_model = ModelWithComm().cuda(self.rank)
model_input = torch.randn(10, 2).cuda(torch.cuda.current_device())
for model in [model_bn, comm_model]:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
)
min_num_iters = 5
if self.rank != 0:
# Early termination rank(s)
num_iters = min_num_iters
exception_ctx = self.assertRaisesRegex(
RuntimeError, f"Rank {self.rank} exhausted all inputs"
)
else:
# Non early termination rank
num_iters = min_num_iters * 2
exception_ctx = self.assertRaisesRegex(
RuntimeError,
"Detected at least one rank that exhausted inputs.",
)
n = 0
with exception_ctx:
with model.join(throw_on_early_termination=True):
for _ in range(num_iters):
loss = model(model_input).sum()
loss.backward()
self._model_step(model)
n += 1
self.assertEqual(n, min_num_iters)
# Verify model equivalence
self.validate_net_equivalence(model)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_uneven_inputs(self):
dim = 1000
batch = 1
# Create a variety of models to run uneven input tests on.
large_model = nn.Sequential(
nn.Conv2d(1, 20, 5),
nn.ReLU(),
nn.Conv2d(20, 32, 5),
nn.ReLU(),
nn.Conv2d(32, 256, 5),
nn.ReLU(),
)
small_model = nn.Linear(dim, dim, bias=False)
bn_net = BatchNormNet()
class UnusedParamModule(nn.Module):
def __init__(self, unused_params_rank):
super().__init__()
self.t0 = Task()
self.t1 = Task()
self.unused_params_rank = unused_params_rank
def task_parameters(self):
return (self.t0.p, self.t1.p)
def forward(self, x, rank):
return (
self.t1(self.t0(x))
if rank != self.unused_params_rank
else self.t1(x)
)
unjoined_rank_with_unused_params_model = UnusedParamModule(1)
joined_rank_with_unused_params_model = UnusedParamModule(0)
rank = self.rank
models_to_test = [
# Network with batchnorm
DDPUnevenTestInput(
name="batch_norm_net",
model=bn_net,
inp=torch.ones(batch, 2, device=rank),
sync_interval=1,
),
DDPUnevenTestInput(
name="large_conv_model",
model=large_model,
inp=torch.ones(batch, batch, dim, dim, device=rank),
sync_interval=1,
),
DDPUnevenTestInput(
name="small_model",
model=small_model,
inp=torch.ones(batch, dim, device=rank),
sync_interval=1,
),
# Unused parameter test where rank that does not join early has unused params
DDPUnevenTestInput(
name="unjoined_rank_with_unused_params_model",
model=unjoined_rank_with_unused_params_model,
inp=(torch.ones(batch, 2, device=rank), rank),
sync_interval=1,
),
# Unused parameter test where rank that does join early has unused params
DDPUnevenTestInput(
name="joined_rank_with_unused_params_model",
model=joined_rank_with_unused_params_model,
inp=(torch.ones(batch, 2, device=rank), rank),
sync_interval=1,
),
]
# Test models that have hook installed.
models_with_hook = [
DDPUnevenTestInput(
name="small_model_allreduce_hook",
model=small_model,
hook=default.allreduce_hook,
state=None,
inp=torch.ones(batch, dim, device=rank),
sync_interval=1,
),
DDPUnevenTestInput(
name="small_model_power_sgd_hook",
model=small_model,
hook=powerSGD.powerSGD_hook,
state=powerSGD.PowerSGDState(
process_group=None,
matrix_approximation_rank=1,
# Config so that powerSGD runs immediately instead of
# allreduce.
start_powerSGD_iter=1,
warm_start=False,
use_error_feedback=False,
),
inp=torch.ones(batch, dim, device=rank),
sync_interval=1,
),
]
models_to_test.extend(models_with_hook)
# Add resnet model if we have torchvision installed.
if HAS_TORCHVISION:
resnet_model = torchvision.models.resnet50()
models_to_test.append(
DDPUnevenTestInput(
name="resnet_model",
model=resnet_model,
inp=torch.ones(1, 3, 1000, 1000),
sync_interval=1,
)
)
# Test with no_sync every 2, 3, 4, ... iterations.
models_with_sync = []
for i, test_input in enumerate(models_to_test):
models_with_sync.append(
DDPUnevenTestInput(
name=test_input.name,
model=test_input.model,
inp=test_input.inp,
sync_interval=i + 2,
)
)
throw_on_early_term_tests = []
for test_input in models_to_test:
throw_on_early_term_tests.append(
DDPUnevenTestInput(
name=test_input.name,
model=test_input.model,
inp=test_input.inp,
sync_interval=test_input.sync_interval,
throw_on_early_termination=True,
)
)
models_to_test.extend(models_with_sync)
models_to_test.extend(throw_on_early_term_tests)
# 0 iteration tests for when one process does not train model at all, so
# we must shadow the broadcast calls made when rebuilding buckets.
baseline_num_iters = [0, 5]
iteration_offsets = [2, 3, 10]
num_uneven_ranks = [1]
if dist.get_world_size() > 2:
num_uneven_ranks.append(2)
iteration_mappings = []
# Generate rank : num_iters mappings for various uneven input scenarios.
# This includes cases where rank 0 joins early and all other ranks join
# later, and scenarios where multiple ranks join early, but at different
# iterations, and later ranks join later.
for num_early_join_ranks in num_uneven_ranks:
for baseline_iter in baseline_num_iters:
for offset in iteration_offsets:
mapping = dict.fromkeys(
range(num_early_join_ranks), baseline_iter
)
# if num_early_join_ranks > 1, ranks > 0 that will join early
# iterate offset//2 more times than rank 0, to test nodes
# depleting inputs at different times.
if num_early_join_ranks > 1:
for rank in mapping:
if rank > 0:
mapping[rank] += offset // 2
mapping.update(
dict.fromkeys(
range(num_early_join_ranks, dist.get_world_size()),
baseline_iter + offset,
)
)
iteration_mappings.append(mapping)
for test_case, iteration_mapping in itertools.product(
models_to_test, iteration_mappings
):
if self.rank == 0:
print(
f"""Running test: {test_case.name} sync interval
{test_case.sync_interval} with iteration mapping
{iteration_mapping}"""
)
self._run_uneven_inputs_test(
test_case,
iteration_mapping,
find_unused_params=("unused_params_model" in test_case.name),
)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_uneven_input_join_disable(self):
# tests that if net.join() with enable=False is specified, DDP works as
# expected with even inputs.
torch.manual_seed(self.rank)
net = torch.nn.parallel.DistributedDataParallel(
torch.nn.Linear(1, 1).cuda(self.rank), device_ids=[self.rank]
)
inp = torch.ones(1) * self.rank
n_iters = 5
world_size = dist.get_world_size()
with net.join(enable=False):
for _ in range(n_iters):
# Clear grads
grad = net.module.weight.grad
if grad is not None:
grad.requires_grad_(False)
grad.zero_()
out = net(inp)
loss = out.sum()
loss.backward()
# Validate gradients to ensure that we divide by the correct
# world_size when join mode is disabled.
expected_grad = sum(i for i in range(world_size)) / world_size
self.assertEqual(net.module.weight.grad.item(), expected_grad)
join_config = net._join_config
self.assertFalse(join_config.enable)
self.validate_net_equivalence(net)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_uneven_input_exception(self):
# Tests that exceptions during training are correctly propagated by the
# context manager.
error_str = "Intentional error"
class ExceptionModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = nn.Parameter(torch.ones(1, requires_grad=True))
def forward(self, _):
raise ValueError(error_str)
exception_module = ExceptionModule()
net = torch.nn.parallel.DistributedDataParallel(
exception_module.cuda(self.rank), device_ids=[self.rank]
)
inp = torch.ones(1)
with self.assertRaisesRegex(ValueError, error_str):
with net.join():
out = net(inp)
loss = out.sum()
loss.backward()
def _test_broadcast_object_list(self, group=None):
gather_objects = create_collectives_object_test_list()
# Only set device for NCCL backend since it must use GPUs.
# Case where rank != GPU device.
next_rank = (self.rank + 1) % int(self.world_size)
backend = os.environ["BACKEND"]
if backend == "nccl":
torch.cuda.set_device(next_rank)
src_rank = 0
# If GPU test, add object with GPU tensor
if backend == "nccl":
gather_objects.append(Foo(torch.randn(3, 3, device=0)))
if IS_FBCODE:
# Create Tensor with > 2^31 Bytes storage requirements
# Only on FBCODE as testing OOMs in OSS
gather_objects.append(Foo(torch.randn(3, 178956971)))
objects = (
gather_objects
if self.rank == src_rank
else [None for _ in gather_objects]
)
# Single object test with device specified. Backend="gloo", device=cpu
if backend != "nccl":
single_obj_list = [objects[0]]
if self.rank != src_rank:
self.assertNotEqual(single_obj_list[0], gather_objects[0])
dist.broadcast_object_list(
single_obj_list, src=0, group=group, device=torch.device("cpu")
)
self.assertEqual(single_obj_list[0], gather_objects[0])
# Single object test with device specified. Backend="gloo", device=current_device+1
# The test is gated by the fact GPU count is the same as world size to avoid the case
# when backend is gloo but there is no multiple GPU devices.
if backend != "nccl" and torch.cuda.device_count() == int(self.world_size):
single_obj_list = [objects[0]]
if self.rank != src_rank:
self.assertNotEqual(single_obj_list[0], gather_objects[0])
dist.broadcast_object_list(
single_obj_list, src=0, group=group, device=torch.device(next_rank)
)
self.assertEqual(single_obj_list[0], gather_objects[0])
# Single object test with device specified. Backend="nccl", device=current_device+1
if backend == "nccl" and torch.cuda.device_count() == int(self.world_size):
single_obj_list = [objects[0]]
if self.rank != src_rank:
self.assertNotEqual(single_obj_list[0], gather_objects[0])
dist.broadcast_object_list(
single_obj_list, src=0, group=group, device=torch.device(next_rank)
)
self.assertEqual(single_obj_list[0], gather_objects[0])
# Single object test: backward compatibility with device unspecified
single_obj_list = [objects[0]]
if self.rank != src_rank:
self.assertNotEqual(single_obj_list[0], gather_objects[0])
dist.broadcast_object_list(single_obj_list, src=0, group=group)
self.assertEqual(single_obj_list[0], gather_objects[0])
# Multiple input objects test
if self.rank != src_rank:
self.assertNotEqual(objects, gather_objects)
dist.broadcast_object_list(objects, src=0, group=group)
self.assertEqual(objects, gather_objects)
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@require_n_gpus_for_nccl_backend(
int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"]
)
@with_dist_debug_levels(levels=["DETAIL"])
@unittest.skip(
"Test is failing, see https://github.com/pytorch/pytorch/pull/113620"
)
def test_broadcast_object_list(self):
return self._test_broadcast_object_list()
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@require_n_gpus_for_nccl_backend(
int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"]
)
@with_dist_debug_levels(levels=["DETAIL"])
def _test_broadcast_object_list_subgroup(self):
default = _get_default_group()
backend = dist.get_backend(default)
subgroup = dist.new_group(backend=backend)
return self._test_broadcast_object_list(subgroup)
def _test_ddp_ignore_params_arg(self, static_graph=False):
class TestModel(nn.Module):
def __init__(self, rank):
self.rank = rank
super().__init__()
self.fc1 = nn.Linear(1, 1, bias=False)
# Proxy that will be materialized to another architecture later.
# (after wrapping model with DDP)
if self.rank == 0:
self.fc2 = nn.Linear(1, 10, bias=False)
else:
self.fc2 = nn.Linear(10, 10, bias=False)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
device_id = self.rank
# Ensure the test works for both find_unused_parameter and broadcast_buffer settings.
for find_unused, broadcast_buffers in itertools.product(
[False, True], [False, True]
):
model = TestModel(self.rank).float().to(device_id)
# Note that the model can have different shape buffers if we pass
# them in to be ignored as well.
model.fc2.register_buffer(
"ignore_buffer", torch.zeros(5 + self.rank, device=self.rank)
)
proxy_params = list(model.fc2.parameters())
model_fc2_name = next(
module_name
for module_name, module in model.named_modules()
if module is model.fc2
)
proxy_param_names = [
f"{model_fc2_name}.{param_name}"
for param_name, _ in model.fc2.named_parameters()
]
proxy_buffer_names = [
f"{model_fc2_name}.{buf_name}"
for buf_name, _ in model.fc2.named_buffers()
]
# Specify that we should ignore proxy_params since it will be
# materialized later.
torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(
model, proxy_param_names + proxy_buffer_names
)
ddp = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[device_id],
find_unused_parameters=find_unused,
broadcast_buffers=broadcast_buffers,
static_graph=static_graph,
)
# Materialize new params. These are not registered in DDP and thus
# don't have autograd hooks installed on them.
ddp.module.fc2 = nn.Linear(1, 1, bias=False).to(device_id)
# local model with the new materialized parameters.
local_model = copy.deepcopy(ddp.module).cuda(self.rank)
inp = torch.ones(1, dtype=torch.float).to(device_id) * (self.rank + 1)
for _ in range(6):
ddp(inp).sum().backward()
local_model(inp).sum().backward()
# materialized param grad is not touched by DDP, so its grad should
# be the same as if running locally.
for materialized_param, local_param in zip(
ddp.module.fc2.parameters(),
local_model.fc2.parameters(),
strict=True,
):
self.assertEqual(materialized_param.grad, local_param.grad)
# fc1 parameter grad should still be different, due to allreduce.
for synced_param, local_param in zip(
ddp.module.fc1.parameters(),
local_model.fc1.parameters(),
strict=True,
):
self.assertFalse(synced_param.grad == local_param.grad)
# Proxy module grad should not be touched
for proxy_param in proxy_params:
self.assertTrue(proxy_param.grad is None)
# Synchronize since we run multiple iterations of this test, to
# isolate failure hangs.
torch.cuda.synchronize(device=self.rank)
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_ignore_params_arg(self):
self._test_ddp_ignore_params_arg(static_graph=False)
self._test_ddp_ignore_params_arg(static_graph=True)
@with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"])
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_unused_params_rebuild_buckets_exception(self):
class ToyModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.net1 = nn.Linear(10, 10, bias=False)
self.net2 = nn.Linear(10, 10, bias=False)
def forward(self, x):
return self.net1(x)
ddp = torch.nn.parallel.DistributedDataParallel(
ToyModel().cuda(self.rank), device_ids=[self.rank]
)
for i in range(2):
inp = torch.rand(1, 10)
if i > 0:
# On 2nd iteration, this will fail during rebuild_buckets,
# but we should report an error regarding unused parameters
# since that is the underlying root cause.
try:
ddp(inp).sum().backward()
except RuntimeError as e:
msg = str(e)
verify_ddp_error_logged(ddp, msg)
expected_strs = [
ddp_prev_reduction_unfinished_str,
ddp_recommend_find_unused_params_str,
ddp_outputs_not_used_in_loss_str,
]
# In debug mode, should show parameters that weren't reduced.
# Without debug mode, should show suggestion to use debug mode.
if dist.get_debug_level() == dist.DebugLevel.OFF:
expected_strs.append(ddp_suggest_debug_mode_str)
else:
unreduced_params = ", ".join(["net2.weight"])
expected_strs.append(
f"did not receive grad for rank {self.rank}: {unreduced_params}"
)
for s in expected_strs:
self.assertTrue(s in msg, f"Expected {s} to be in {msg}")
self.assertFalse(ddp_find_unused_params_enabled_str in msg)
else:
self.assertFalse(
True, "DDP unused parameters error not raised."
)
else:
ddp(inp).sum().backward()
dist.barrier()
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_shared_grad_acc_unused_params(self):
# When find_unused_parameters=True, ensure we mark unused parameters
# even if they share gradient accumulators.
class ToyModel(nn.Module):
def __init__(self) -> None:
super().__init__()
# net1, bias, and net1.bias are all unused params.
self.net1 = nn.Linear(10, 5, bias=False)
self.bias = nn.Parameter(torch.zeros(5))
# net1.bias and self.bias are names for the same underlying
# parameter, so they share the same grad acc. This caused
# the bug reported in https://github.com/pytorch/pytorch/issues/41324.
self.net1.bias = self.bias
self.net2 = nn.Linear(10, 5)
def forward(self, x):
return self.net2(x).sum()
torch.cuda.set_device(self.rank)
model = ToyModel().to(torch.cuda.current_device())
for static in [True, False]:
ddp_model = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(model),
device_ids=[self.rank],
find_unused_parameters=True,
static_graph=static,
)
inp = torch.randn(20, 10, device=self.rank)
for _ in range(6):
loss = ddp_model(inp)
# To test https://github.com/pytorch/pytorch/issues/61982
loss /= 10
loss.backward()
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_device(self):
expected_len = 2
class TensorWrapper:
__slots__ = ["t", "moved_to_gpu"]
def __init__(self, t):
self.t = t
self.moved_to_gpu = False
# Handlers for specific types of validation we want to do based on
# the input type.
def tuple_and_list_validator(x):
self.assertTrue(len(x), expected_len)
self.assertEqual(1, len({t.device for t in x}))
self.assertEqual(x[0].device.index, self.rank)
return x[0] + x[1]
def namedtuple_validator(x):
self.assertEqual(x._fields, EXPECTED_FIELDS)
self.assertEqual(x.a.device.index, x.b.device.index)
self.assertEqual(x.a.device.index, self.rank)
return x.a + x.b
def custom_type_validator(x):
self.assertTrue(x.moved_to_gpu or (str(x.t.device) == "cpu"))
x.t = x.t.to(self.rank)
x.moved_to_gpu = True
return x.t
def dict_validator(x):
self.assertTrue(EXPECTED_FIELDS[0] in x)
self.assertTrue(EXPECTED_FIELDS[1] in x)
self.assertEqual(1, len({t.device for t in x.values()}))
self.assertEqual(x[EXPECTED_FIELDS[0]].device.index, self.rank)
return x[EXPECTED_FIELDS[0]] + x[EXPECTED_FIELDS[1]]
validators = {
TensorWrapper: custom_type_validator,
tuple: tuple_and_list_validator,
list: tuple_and_list_validator,
TestNamedTupleInput_0: namedtuple_validator,
TestNamedTupleInput_1: namedtuple_validator,
dict: dict_validator,
}
class ToyModel(torch.nn.Module):
def __init__(self_): # noqa: B902
super().__init__()
self_.lin = nn.Linear(10, 10, bias=False)
def forward(self_, x, expected_type): # noqa: B902
# Similar to scatter, the recursive to in the single-device
# case does not move tensors if they are in a custom type.
self.assertTrue(isinstance(x, expected_type))
fwd_tensor = validators[expected_type](x)
return self_.lin(fwd_tensor)
model = torch.nn.parallel.DistributedDataParallel(
ToyModel().to(self.rank), device_ids=[self.rank]
)
def train_iter(inp, input_type):
for _ in range(4):
out = model(inp, input_type)
out.sum().backward()
# CPU tuple input, should be moved to the proper device before call
# to forward.
inp = tuple(torch.randn(10, 10) for _ in range(expected_len))
train_iter(inp, tuple)
# List CPU input, should be moved to proper device before call to
# forward.
inp = [torch.randn(10, 10) for _ in range(expected_len)]
train_iter(inp, list)
# Custom type containing tensor. The type is maintained, but the
# device is not propagated (which is what happens with scatter too)
inp = TensorWrapper(torch.randn(10, 10))
train_iter(inp, TensorWrapper)
# NamedTuple input. The type should be maintained and tensor inputs
# should be moved to the correct device as in scatter.
batch = 5
dim = 10
a = torch.rand(batch, dim)
b = torch.rand(batch, dim)
inp = TestNamedTupleInput_0(a, b)
train_iter(inp, type(inp))
inp = TestNamedTupleInput_1(a, b)
train_iter(inp, type(inp))
# dictionary input.
inp = {
EXPECTED_FIELDS[0]: a,
EXPECTED_FIELDS[1]: b,
}
train_iter(inp, type(inp))
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_namedtuple(self):
batch = 5
dim = 10
a = torch.rand(batch, dim, device=self.rank)
b = torch.rand(batch, dim, device=self.rank)
class NamedTupleModule(torch.nn.Module):
def __init__(self_): # noqa: B902
super().__init__()
self_.lin = nn.Linear(10, 1)
def forward(self_, input, expected_type): # noqa: B902
# Without NamedTuple support, this would be of type tuple.
self.assertTrue(
isinstance(input, expected_type),
f"Expected type {expected_type} but got {type(input)}",
)
self.assertEqual(input._fields, EXPECTED_FIELDS)
self.assertEqual(a, input.a)
self.assertEqual(b, input.b)
return self_.lin(torch.mul(input.a, input.b))
model = torch.nn.parallel.DistributedDataParallel(
NamedTupleModule().cuda(self.rank), device_ids=[self.rank]
)
inp = TestNamedTupleInput_0(a, b)
# The following would fail if DDP does not propagate NamedTuples correctly.
model(inp, type(inp))
inp = TestNamedTupleInput_1(a, b)
model(inp, type(inp))
@require_backend_is_available({"gloo"})
def test_grads_same_across_ranks_with_no_sync(self):
_group, _group_id, rank = self._init_global_test()
world_size = dist.get_world_size()
if world_size < 2:
self.skipTest("This test requires at least two ranks.")
class SimpleConditionalModel(nn.Module):
# if rank is 0, uses nn1 on the first pass and nn2 on the second pass.
# else, uses nn3 on the first pass and nn4 on the second pass.
def __init__(self, rank):
super().__init__()
self.rank = rank
self.nn1 = nn.Linear(1, 1)
self.nn2 = nn.Linear(1, 1)
self.nn3 = nn.Linear(1, 1)
self.nn4 = nn.Linear(1, 1)
self.state = 0
def forward(self, input):
if self.state == 0:
self.state = 1
if self.rank == 0:
return self.nn1(input)
else:
return self.nn3(input)
else:
self.state = 0
if self.rank == 0:
return self.nn2(input)
else:
return self.nn4(input)
model = torch.nn.parallel.DistributedDataParallel(
SimpleConditionalModel(rank), find_unused_parameters=True
)
mse_loss = nn.MSELoss()
grad_accumulation = 2
for microbatch_idx in range(grad_accumulation):
if microbatch_idx < grad_accumulation - 1:
context = model.no_sync
else:
context = nullcontext
with context():
input = torch.rand((1,))
output = model.forward(input)
target = torch.rand((1,))
loss = mse_loss(output, target)
loss.backward()
self.assertTrue(
not any(p.grad is None for p in model.parameters()),
"Gradients can't be None for any model parameter.",
)
grads = torch.cat([p.grad.view(-1) for p in model.parameters()])
# Gather all gradients to rank 0.
if rank == 0:
gathered_grads = [torch.zeros_like(grads) for _ in range(world_size)]
else:
gathered_grads = []
dist.gather(grads, gather_list=gathered_grads, dst=0)
if rank == 0:
for g in gathered_grads[1:]:
self.assertTrue(
torch.allclose(gathered_grads[0], g),
"Gradients are not the same for all ranks.",
)
@with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"])
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_control_flow_same_across_ranks(self):
# Control flow that is the same across ranks.
batch = 20
dim = 10
world_size = dist.get_world_size()
torch.cuda.set_device(self.rank)
model = torch.nn.parallel.DistributedDataParallel(
ControlFlowToyModel().cuda(self.rank),
device_ids=[self.rank],
find_unused_parameters=True,
)
random_input = torch.randn(batch, dim, device=self.rank)
ones_input = torch.ones(batch, dim, device=self.rank)
for i in range(6):
if i % 2 == 0:
out = model(random_input)
else:
out = model(ones_input)
loss = out.sum()
loss.backward()
# On even iterations, 2nd param goes unused, on odd iterations,
# it is used.
local_used_map = model.reducer._get_local_used_map()
if i % 2 == 0:
expected = torch.tensor(
[world_size, 0], device=self.rank, dtype=torch.int32
)
else:
expected = torch.tensor(
[world_size, world_size], device=self.rank, dtype=torch.int32
)
# Validate parameter usage.
variable_usage_tensor = local_used_map
self.assertEqual(variable_usage_tensor, expected)
# Validate appropriate error message when DDP is used with
# find_unused_parameters=False.
model = torch.nn.parallel.DistributedDataParallel(
ControlFlowToyModel().cuda(self.rank),
device_ids=[self.rank],
find_unused_parameters=False,
)
for i in range(2):
if i == 0:
loss = model(random_input).sum()
loss.backward()
else:
try:
loss = model(random_input).sum()
loss.backward()
except RuntimeError as e:
msg = str(e)
verify_ddp_error_logged(model, msg)
# 2nd linear layer is unused
unused_param_index = 1
expected_strs = [
ddp_prev_reduction_unfinished_str,
ddp_recommend_find_unused_params_str,
ddp_outputs_not_used_in_loss_str,
f"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}",
]
# In debug mode, should show parameters that weren't reduced.
# Without debug mode, should show suggestion to use debug mode.
if dist.get_debug_level() == dist.DebugLevel.OFF:
expected_strs.append(ddp_suggest_debug_mode_str)
else:
unreduced_params = ", ".join(["lin2.weight"])
expected_strs.append(
f"did not receive grad for rank {self.rank}: {unreduced_params}"
)
for s in expected_strs:
self.assertTrue(s in msg, f"Expected {s} to be in {msg}")
self.assertFalse(ddp_find_unused_params_enabled_str in msg)
else:
self.assertFalse(True, "DDP error not raised")
dist.barrier()
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_invalid_static_graph(self):
torch.cuda.set_device(self.rank)
model = torch.nn.parallel.DistributedDataParallel(
ControlFlowToyModel().cuda(self.rank),
device_ids=[self.rank],
static_graph=True,
)
random_input = torch.randn(20, 10, device=self.rank)
ones_input = torch.ones(20, 10, device=self.rank)
# unused parameter in the first iteration got used
# in second iteration.
expected_err = "Your training graph has changed in this iteration"
with self.assertRaisesRegex(RuntimeError, expected_err):
for i in range(2):
if i % 2 == 0:
out = model(random_input)
else:
out = model(ones_input)
loss = out.sum()
loss.backward()
verify_ddp_error_logged(model, expected_err)
# used parameter in the first iteration got unused
# in second iteration.
with self.assertRaisesRegex(
RuntimeError,
"Expected to have finished reduction in the prior iteration "
"before starting a new one. This error indicates that your "
"training graph has changed in this iteration, "
"e.g., one parameter is used in first iteration, "
"but then got unused in the second iteration. "
"this is not compatible with static_graph set to True.\n"
"Parameter indices which did not receive grad for",
):
for i in range(2):
if i % 2 != 0:
out = model(random_input)
else:
out = model(ones_input)
loss = out.sum()
loss.backward()
verify_ddp_error_logged(model, "Expected to have finished reduction")
@with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"])
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_control_flow_different_across_ranks(self):
# Control flow that is different across ranks.
batch = 20
dim = 10
class ToyModel(nn.Module):
def __init__(self, rank):
super().__init__()
self.lin1 = nn.Linear(10, 10, bias=False)
self.lin2 = nn.Linear(10, 10, bias=False)
self.rank = rank
def forward(self, x):
# Control-flow that is rank and input dependent for the
# model.
use_second_layer = (
torch.equal(x, torch.ones(batch, dim, device=x.device))
and self.rank == 1
)
if use_second_layer:
return self.lin2(F.relu(self.lin1(x)))
else:
return F.relu(self.lin1(x))
world_size = dist.get_world_size()
torch.cuda.set_device(self.rank)
model = torch.nn.parallel.DistributedDataParallel(
ToyModel(self.rank).cuda(self.rank),
device_ids=[self.rank],
find_unused_parameters=True,
)
random_input = torch.randn(batch, dim, device=self.rank)
ones_input = torch.ones(batch, dim, device=self.rank)
for i in range(6):
if i % 2 == 0:
out = model(random_input)
else:
out = model(ones_input)
loss = out.sum()
loss.backward()
# On even iterations, 2nd param goes unused, on odd iterations,
# it is used only on rank 1.
local_used_map = model.reducer._get_local_used_map()
if i % 2 == 0:
expected = torch.tensor(
[world_size, 0], device=self.rank, dtype=torch.int32
)
else:
expected = torch.tensor(
[world_size, 1], device=self.rank, dtype=torch.int32
)
variable_usage_tensor = local_used_map
# Validate parameter usage. On odd iterations, 2nd param is only
# used on rank 1.
self.assertEqual(variable_usage_tensor, expected)
# Validate appropriate error message when DDP is used with
# find_unused_parameters=False.
model = torch.nn.parallel.DistributedDataParallel(
ToyModel(self.rank).cuda(self.rank),
device_ids=[self.rank],
find_unused_parameters=False,
)
for i in range(2):
if i == 0:
loss = model(random_input).sum()
loss.backward()
else:
try:
loss = model(random_input).sum()
loss.backward()
except RuntimeError as e:
msg = str(e)
verify_ddp_error_logged(model, msg)
unused_param_index = 1
expected_strs = [
ddp_prev_reduction_unfinished_str,
ddp_recommend_find_unused_params_str,
ddp_outputs_not_used_in_loss_str,
f"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}",
]
# In debug mode, should show parameters that weren't reduced.
# Without debug mode, should show suggestion to use debug mode.
if dist.get_debug_level() == dist.DebugLevel.OFF:
expected_strs.append(ddp_suggest_debug_mode_str)
else:
unreduced_params = ", ".join(["lin2.weight"])
expected_strs.append(
f"did not receive grad for rank {self.rank}: {unreduced_params}"
)
for s in expected_strs:
self.assertTrue(s in msg, f"Expected {s} to be in {msg}")
self.assertFalse(ddp_find_unused_params_enabled_str in msg)
else:
self.assertFalse(True, "DDP error not raised")
dist.barrier()
@require_backend_is_available({"gloo"})
def test_scatter_object_list(self):
src_rank = 0
collectives_object_test_list = create_collectives_object_test_list()
scatter_list = (
collectives_object_test_list
if self.rank == src_rank
else [None for _ in collectives_object_test_list]
)
world_size = dist.get_world_size()
scatter_list = scatter_list[:world_size]
i = 0
while len(scatter_list) < world_size:
scatter_list.append(scatter_list[i])
i += 1
output_obj_list = [None]
dist.scatter_object_list(output_obj_list, scatter_list, src=src_rank)
self.assertEqual(
output_obj_list[0],
collectives_object_test_list[
self.rank % len(collectives_object_test_list)
],
)
# Ensure errors are raised upon incorrect arguments.
with self.assertRaisesRegex(
ValueError,
"Expected argument scatter_object_output_list to be a list of size at least 1.",
):
dist.scatter_object_list([], scatter_list, src=src_rank)
def _generate_sparse_tensors_for_bucket_assignment_test(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
tensors_sparse = [t.to_sparse() for t in tensors]
return tensors_sparse
def _test_compute_bucket_assignment_by_size(self, use_logger):
group_gloo = dist.new_group(
timeout=timedelta(seconds=60), backend=dist.Backend.GLOO
)
# Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test
# determinism.
os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1"
group_to_use = dist.new_group(
backend=dist.get_backend(), timeout=timedelta(seconds=5)
)
torch.cuda.set_device(self.rank)
# Create a valid model. The constructor initializes the logger that we use later.
# We never actually use the rest of the model - we only need its logger.
net = EmbeddingNetDifferentParams(0)
net = torch.nn.parallel.DistributedDataParallel(
net.to(self.rank),
device_ids=[self.rank],
process_group=group_to_use,
)
# if we don't pass a logger then we can only check that an exception was thrown.
expected_err = "No support for sparse tensors."
with self.assertRaisesRegex(RuntimeError, expected_err):
tensors_sparse = (
self._generate_sparse_tensors_for_bucket_assignment_test()
)
if use_logger:
dist._compute_bucket_assignment_by_size(
tensors_sparse, [400], logger=net.logger
)
else:
dist._compute_bucket_assignment_by_size(tensors_sparse, [400])
if use_logger:
verify_ddp_error_logged(net, expected_err)
# Perform gloo-based barrier to ensure one rank doesn't exit test
# early which causes failure with Barrier.sync.
dist.barrier(group_gloo)
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_compute_bucket_assignment_by_size_sparse_error_without_logger(self):
self._test_compute_bucket_assignment_by_size(use_logger=False)
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_compute_bucket_assignment_by_size_sparse_error_with_logger(self):
self._test_compute_bucket_assignment_by_size(use_logger=True)
def _test_verify_model_across_rank(self, use_logger):
group_gloo = dist.new_group(
timeout=timedelta(seconds=60), backend=dist.Backend.GLOO
)
group_to_use = dist.new_group(
backend=dist.get_backend(), timeout=timedelta(seconds=5)
)
torch.cuda.set_device(self.rank)
# Create a valid model. The constructor initializes the logger that we use later.
net = EmbeddingNetDifferentParams(0)
net = torch.nn.parallel.DistributedDataParallel(
net.to(self.rank),
device_ids=[self.rank],
process_group=group_to_use,
)
# Modify the model so that the number of parameters are different for each rank.
# This will cause a RuntimeError to be thrown below in _verify_param_shape_across_processes,
# so we can check if the correct error is thrown and is logged.
# We can't do this in the constructor above otherwise the logger will
# not be properly initialized.
net.module.lin = nn.Linear(100 if self.rank == 0 else 10, 1)
# if we pass a logger we can verify that it was logged
caught = 0
try:
if use_logger:
_verify_param_shape_across_processes(
net.process_group, list(net.parameters()), net.logger
)
else:
_verify_param_shape_across_processes(
net.process_group, list(net.parameters())
)
except Exception:
caught = 1
# As long as there is one rank catching the exception
t = torch.Tensor([caught])
dist.all_reduce(t, group=group_gloo)
self.assertGreater(t, 0)
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_but_pass_in_sandcastle_if(
BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally"
)
@skip_if_lt_x_gpu(2)
def test_verify_model_across_rank_with_logger(self):
self._test_verify_model_across_rank(use_logger=True)
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_but_pass_in_sandcastle_if(
BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally"
)
@skip_if_lt_x_gpu(2)
def test_verify_model_across_rank_without_logger(self):
self._test_verify_model_across_rank(use_logger=False)
def _run_test_ddp_model_with_diff_params(self, net, ddp_group, group_gloo):
caught = 0
try:
net = torch.nn.parallel.DistributedDataParallel(
net.to(self.rank), device_ids=[self.rank], process_group=ddp_group
)
except Exception:
caught = 1
# As long as there is one rank catching the exception
t = torch.Tensor([caught])
dist.all_reduce(t, group=group_gloo)
self.assertGreater(t, 0)
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_but_pass_in_sandcastle_if(
BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally"
)
@skip_if_lt_x_gpu(2)
def test_ddp_model_diff_shape_across_ranks(self):
group_gloo = dist.new_group(
timeout=timedelta(seconds=60), backend=dist.Backend.GLOO
)
group_to_use = dist.new_group(
backend=dist.get_backend(), timeout=timedelta(seconds=10)
)
torch.cuda.set_device(self.rank)
# Creates network with different sized embedding table on different
# ranks. This should throw an error during DDP init.
net = EmbeddingNetDifferentParams(self.rank)
self._run_test_ddp_model_with_diff_params(net, group_to_use, group_gloo)
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_but_pass_in_sandcastle_if(
BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally"
)
@skip_if_lt_x_gpu(2)
def test_ddp_model_diff_num_params_across_ranks(self):
group_gloo = dist.new_group(
timeout=timedelta(seconds=60), backend=dist.Backend.GLOO
)
group_to_use = dist.new_group(
backend=dist.get_backend(), timeout=timedelta(seconds=10)
)
torch.cuda.set_device(self.rank)
# Creates network with diff # of param across ranks, reducer should
# recognize this and throw appropriate error.
net = EmbeddingNetDifferentParams(
self.rank, diff_num_params=(self.rank == 1)
)
self._run_test_ddp_model_with_diff_params(
net,
group_to_use,
group_gloo,
)
def _test_output_unused_in_loss(self, module_cls, gradient_as_bucket_view):
model = module_cls()
local_net = copy.deepcopy(model)
net = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(model).cuda(self.rank),
device_ids=[self.rank],
find_unused_parameters=True,
)
# Tests that certain parameters not getting gradient since the
# output is unused in loss computation is supported. Specifically,
# checks that the grads remain unchanged and are the same as local
# training.
inp = torch.randn(10, 10)
# Ensure that if a param is not used in loss computation, its
# gradient is untouched, i.e. if it is None before it is None after,
# not zero.
if module_cls == DictOutputModule:
a, b = local_net(inp)["predictions"]
a_dist, b_dist = net(inp)["predictions"]
else:
a, b = local_net(inp)
a_dist, b_dist = net(inp)
loss_dist = b_dist.sum()
loss_dist.backward()
# Ensure that gradient corresponding to parameter "a" was not
# touched, i.e. it is None and matches the local grad.
if module_cls == DictOutputModule:
self.assertTrue(net.module.module.a.weight.grad is None)
self.assertEqual(
net.module.module.a.weight.grad, local_net.module.a.weight.grad
)
else:
self.assertTrue(net.module.a.weight.grad is None)
self.assertEqual(net.module.a.weight.grad, local_net.a.weight.grad)
saved_a_local_grad = None
saved_a_dist_grad = None
net.zero_grad()
local_net.zero_grad()
for i in range(6):
if module_cls == DictOutputModule:
a, b = local_net(inp)["predictions"]
a_dist, b_dist = net(inp)["predictions"]
else:
a, b = local_net(inp)
a_dist, b_dist = net(inp)
if i < 2:
# Use both params in loss computation. Later, "a" will go
# unused and we check to ensure DDP supports this and
# gradients remain the same as local training.
t = a @ b
t_dist = a_dist @ b_dist
loss = t.sum()
loss_dist = t_dist.sum()
else:
# Model output "a" unused in loss.
loss = b.sum()
loss_dist = b_dist.sum()
loss.backward()
loss_dist.backward()
if i == 1:
# Save grads to compare with them in next iterations.
if module_cls == DictOutputModule:
saved_a_local_grad = local_net.module.a.weight.grad
saved_a_dist_grad = net.module.module.a.weight.grad
else:
saved_a_local_grad = local_net.a.weight.grad
saved_a_dist_grad = net.module.a.weight.grad
self.assertEqual(saved_a_local_grad, saved_a_dist_grad)
elif i >= 2:
# parameter "a" of both models should be the same and not change
if module_cls == DictOutputModule:
self.assertEqual(
net.module.module.a.weight.grad, saved_a_dist_grad
)
self.assertEqual(
local_net.module.a.weight.grad, saved_a_local_grad
)
else:
self.assertEqual(net.module.a.weight.grad, saved_a_dist_grad)
self.assertEqual(local_net.a.weight.grad, saved_a_local_grad)
# Verify grads are the same
for local_param, dist_param in zip(
local_net.parameters(), net.parameters(), strict=True
):
local_grad = local_param.grad
dist_grad = dist_param.grad
self.assertEqual(local_grad, dist_grad)
dist.barrier()
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@skip_if_lt_x_gpu(2)
def test_output_unused_in_loss_tuple_module(self):
module_cls = UnusedParamTwoLinLayerNet
for grad_as_bucket_view in [True, False]:
self._test_output_unused_in_loss(module_cls, grad_as_bucket_view)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@skip_if_lt_x_gpu(2)
def test_output_unused_in_loss_dict_module(self):
module_cls = DictOutputModule
for grad_as_bucket_view in [True, False]:
self._test_output_unused_in_loss(module_cls, grad_as_bucket_view)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@skip_if_lt_x_gpu(2)
def test_undefined_grad_parity_unused_parameters(self):
# TODO: enable this for general training use cases:
# https://github.com/pytorch/pytorch/issues/58511.
x = torch.ones(1, 2).to(self.rank)
net = Net().to(self.rank)
local_net = copy.deepcopy(net)
net = torch.nn.parallel.DistributedDataParallel(
net,
device_ids=[self.rank],
find_unused_parameters=True,
)
out = net(x).sum()
local_out = local_net(x).sum()
# Simulates undefined gradients.
torch._C._functions.UndefinedGrad()(out).backward()
torch._C._functions.UndefinedGrad()(local_out).backward()
for (dist_param_name, dist_param), (local_param_name, local_param) in zip(
net.named_parameters(), local_net.named_parameters(), strict=True
):
dist_grad = dist_param.grad
local_grad = local_param.grad
self.assertEqual(
dist_grad,
local_grad,
f"""DDP param {dist_param_name} with grad {dist_grad}
does not match local param {local_param_name} with grad
{local_grad}""",
)
def _test_different_graph_across_ranks(
self, find_unused_parameters=False, static_graph=False
):
class ToyModel(nn.Module):
def __init__(self, rank):
super().__init__()
self.lin1 = nn.Linear(10, 10, bias=False)
self.lin2 = nn.Linear(10, 10, bias=False)
self.rank = rank
def forward(self, x):
if self.rank == 0:
return self.lin2(F.relu(self.lin1(x)))
else:
return F.relu(self.lin1(x))
torch.manual_seed(31415)
torch.cuda.set_device(self.rank)
model = ToyModel(self.rank).cuda(self.rank)
ddp_model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
find_unused_parameters=find_unused_parameters,
gradient_as_bucket_view=True,
static_graph=static_graph,
)
random_input = torch.randn(20, 10, device=self.rank)
for _ in range(10):
out = ddp_model(random_input)
loss = out.sum()
loss.backward()
return ddp_model
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_different_graph_across_ranks(self):
base_model = self._test_different_graph_across_ranks(
find_unused_parameters=True
)
self.assertFalse(
base_model._get_ddp_logging_data().get("has_rebuilt_buckets", 0)
)
static_model = self._test_different_graph_across_ranks(static_graph=True)
self.assertTrue(
static_model._get_ddp_logging_data().get("has_rebuilt_buckets", 0)
)
for i, j in zip(
base_model.parameters(), static_model.parameters(), strict=True
):
self.assertEqual(i, j)
@require_backend_is_available({"gloo"})
@skip_but_pass_in_sandcastle_if(
IS_MACOS or IS_WINDOWS,
"MacOS uses uv transport which does not have as robust error handling as tcp transport",
)
def test_monitored_barrier_gloo(self):
tensors = [torch.ones(10) * self.rank]
# Kick off some allreduce work on all ranks
for _ in range(10):
dist.all_reduce(torch.cat(tensors))
# Run monitored barrier and ensure it passes
timeout = timedelta(seconds=2)
dist.monitored_barrier(timeout=timeout)
# Check monitored_barrier success with wait_all_ranks=True
for _ in range(10):
dist.all_reduce(torch.cat(tensors))
dist.monitored_barrier(timeout=timeout, wait_all_ranks=True)
# All ranks besides 1 call into barrier, rank 0 should report failure
# while others report gloo error.
failed_rank = 1
src_rank = 0
if self.rank == src_rank:
with self.assertRaisesRegex(
RuntimeError, f"Rank {failed_rank} failed to pass monitoredBarrier"
):
dist.monitored_barrier(timeout=timeout)
elif self.rank != failed_rank:
# Other ranks should not pass barrier since rank 0 failed.
err_regex = (
f"Rank {self.rank} successfully reached monitoredBarrier,"
f" but received errors while waiting for send/recv from rank"
f" {src_rank}"
)
with self.assertRaisesRegex(RuntimeError, err_regex):
dist.monitored_barrier(timeout=timeout)
# We need a barrier since otherwise failed_rank exits too early
# and cause a timeout.
self._barrier(timeout=30)
@require_backend_is_available({"gloo"})
def test_monitored_barrier_gloo_subgroup(self):
# Tests that monitored_barrier works as expected on non-default
# process groups.
failed_rank = 1
timeout = 0.1
subgroup = dist.new_group(ranks=[0, 1])
if self.rank == failed_rank:
return
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, f"Rank {failed_rank} failed to pass monitoredBarrier"
):
dist.monitored_barrier(subgroup, timeout)
else:
# Other ranks call into monitored_barrier, but this should be a
# noop because they are not part of the subgroup. Verify that
# there are no errors here.
dist.monitored_barrier(subgroup, timeout)
def _test_monitored_barrier_allreduce_hang(self, wait_all_ranks):
# tests expected behavior when nonzero rank hangs.
nccl_pg = dist.new_group(
ranks=list(range(int(self.world_size))),
# provide sufficient timeout so communicators
# can be initialized in ctor.
timeout=timedelta(seconds=15),
backend=dist.Backend.NCCL,
)
gloo_pg = dist.new_group(
ranks=list(range(int(self.world_size))),
backend=dist.Backend.GLOO,
)
tensors = [torch.ones(10, device=self.rank) * self.rank]
# Let all ranks call allreduce first to set up communicators etc.
# Directly simulating error here will run into store issue described
# in https://github.com/pytorch/pytorch/issues/54524.
nccl_pg.allreduce(tensors).wait(timedelta(seconds=5))
# All ranks besides 0 call into allreduce. This is to simulate a
# desync across the world, where some ranks call into
# monitored_barrier() and others are stuck in collective comm. In
# practice, we don't need TORCH_NCCL_BLOCKING_WAIT, but we use it in this
# test to ensure it exits cleanly.
if self.rank != 0:
# Can get different errors here depending on whether gloo-based
# wrapper PG is enabled or not, since with wrapper pg, it will
# fail in a collective synchronization check and not actually
# call into the nccl pg.
if dist.get_debug_level() == dist.DebugLevel.DETAIL:
err_regex = "Timed out waiting"
else:
err_regex = "caught collective operation timeout"
with self.assertRaisesRegex(RuntimeError, err_regex):
nccl_pg.allreduce(tensors).wait(timedelta(seconds=0.1))
else:
# Rank 0 should report first (in order) timed out rank or all ranks
# depending on wait_all_ranks flag passed into monitored_barrier.
if wait_all_ranks:
rank_str = ", ".join(
[str(i) for i in range(1, int(self.world_size))]
)
err_regex = f"Ranks {rank_str} failed to pass monitoredBarrier"
else:
expected_first_fail_rank = 1
err_regex = f"Rank {expected_first_fail_rank} failed to pass monitoredBarrier"
monitored_barrier_timeout_seconds = timedelta(seconds=0.1)
with self.assertRaisesRegex(RuntimeError, err_regex):
gloo_pg.monitored_barrier(
monitored_barrier_timeout_seconds, wait_all_ranks=wait_all_ranks
)
self._barrier(timeout=30)
@with_nccl_blocking_wait
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_monitored_barrier_allreduce_hang(self):
# tests expected behavior when nonzero rank hangs and we want to
# report first timed out rank.
self._test_monitored_barrier_allreduce_hang(wait_all_ranks=False)
@with_nccl_blocking_wait
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_monitored_barrier_allreduce_hang_wait_all_ranks(self):
# Need to disable TORCH_NCCL_DUMP_ON_TIMEOUT otherwise this test times out
os.environ["TORCH_NCCL_DUMP_ON_TIMEOUT"] = "0"
# tests expected behavior when nonzero rank hangs and we want to
# report all timed out ranks.
self._test_monitored_barrier_allreduce_hang(wait_all_ranks=True)
@require_backend_is_available({"gloo"})
def test_monitored_barrier_gloo_rank_0_timeout(self):
# tests error when rank 0 exhausts its given timeout.
process_group = dist.new_group(ranks=list(range(int(self.world_size))))
timeout = timedelta(seconds=0)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, f"Rank {self.rank} timed out in monitoredBarrier"
):
process_group.monitored_barrier(timeout)
@require_backend_is_available({"gloo"})
@skip_if_small_worldsize
@skip_but_pass_in_sandcastle_if(
IS_MACOS or IS_WINDOWS,
"MacOS uses uv transport which does not have as robust error handling as tcp transport",
)
def test_monitored_barrier_failure_order(self):
# Ensure that the first (in sorted order) rank is reported when
# multiple ranks fail to pass the monitored_barrier.
# TODO(#54879): Provide ability to wait and report all failed ranks
expected_first_failed_rank = 2
timeout = timedelta(seconds=2)
src_rank = 0
if self.rank == src_rank:
with self.assertRaisesRegex(
RuntimeError, f"Rank {expected_first_failed_rank}"
):
dist.monitored_barrier(timeout=timeout)
elif self.rank == 1:
err_regex = (
f"Rank {self.rank} successfully reached monitoredBarrier,"
f" but received errors while waiting for send/recv from rank"
f" {src_rank}"
)
with self.assertRaisesRegex(RuntimeError, err_regex):
dist.monitored_barrier(timeout=timeout)
@require_backend_is_available({"gloo"})
@skip_if_small_worldsize
def test_monitored_barrier_wait_all_ranks(self):
# Tests simple case where > 1 rank does not call into monitored
# barrier and verifies all ranks are reported by rank 0.
if self.rank == 0:
timeout = timedelta(seconds=0.1)
rank_str = ", ".join([str(i) for i in range(1, int(self.world_size))])
err_regex = f"Ranks {rank_str} failed to pass monitoredBarrier"
with self.assertRaisesRegex(RuntimeError, err_regex):
dist.monitored_barrier(timeout=timeout, wait_all_ranks=True)
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@with_dist_debug_levels(levels=["INFO"])
@skip_if_lt_x_gpu(2)
def test_ddp_build_debug_param_to_name_mapping(self):
model = TwoLinLayerNet()
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(self.rank),
device_ids=[self.rank],
)
expected_mapping = {0: "a.weight", 1: "b.weight"}
net_params, _ = net._build_params_for_reducer()
param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params)
self.assertDictEqual(expected_mapping, param_to_name_mapping)
# Test when DDP is used with ignored parameters.
model = TwoLinLayerNet()
# Parameters to ignore are in the format {module_name}.{param_name}
params_to_ignore = ["a.weight"]
torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(
model, params_to_ignore
)
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(self.rank),
device_ids=[self.rank],
)
expected_mapping = {0: "b.weight"}
net_params, _ = net._build_params_for_reducer()
param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params)
self.assertDictEqual(expected_mapping, param_to_name_mapping)
# Test errors are raised when DDP and module parameters mismatch.
# This generally indicates a bug with DDP and is not expected to
# happen in user applications.
model = TwoLinLayerNet()
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(self.rank),
device_ids=[self.rank],
)
net_params, _ = net._build_params_for_reducer()
if self.rank == 0:
print(type(net_params[0]))
net_params.extend(
[
torch.nn.Parameter(torch.ones(1)),
torch.nn.Parameter(torch.ones(1)),
]
)
with self.assertRaisesRegex(ValueError, "Expected param to name mapping"):
net._build_debug_param_to_name_mapping(net_params)
net_params = net_params[:-3]
with self.assertRaisesRegex(ValueError, "Param with name"):
net._build_debug_param_to_name_mapping(net_params)
net_params.extend(
[
torch.nn.Parameter(torch.ones(1)),
torch.nn.Parameter(torch.ones(1)),
]
)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@with_dist_debug_levels(levels=["INFO"])
@skip_if_lt_x_gpu(2)
def test_ddp_build_debug_param_to_name_mapping_requires_grad(self):
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin = nn.Linear(10, 10)
# Is not tracked by DDP and should not show up in param to
# name mapping.
self.lin.bias.requires_grad_(False)
def forward(self, x):
return self.lin(x)
model = Net()
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(self.rank), device_ids=[self.rank]
)
expected_mapping = {
0: "lin.weight",
}
net_params, _ = net._build_params_for_reducer()
param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params)
self.assertEqual(param_to_name_mapping, expected_mapping)
def _test_ddp_multiple_nested_unused_params_error(self, ignore_sparse):
debug_mode_off = dist.get_debug_level() == dist.DebugLevel.OFF
class SubModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.embedding_net = EmbeddingNetDifferentParams(0)
self.lin = TwoLinLayerNet()
self.bn = BatchNormNet()
self.lin_layer = nn.Linear(4, 10, bias=False)
def forward(self, x):
x = self.bn(x)
x = self.lin_layer(x)
x = self.lin.a(x) # self.lin.b param unused
# EmbeddingNetDifferentParams entirely unused: self.embedding_net.embedding and
# self.embedding_net.lin unused.
return x
class MyModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub_module = SubModule()
def forward(self, x):
return self.sub_module(x)
model = MyModel()
sparse_embedding_fqns = []
if ignore_sparse:
for module_name, module in model.named_modules():
if module == model.sub_module.embedding_net.embedding:
for parameter_name, _param in module.named_parameters(
recurse=False
):
fqn = f"{module_name}.{parameter_name}"
sparse_embedding_fqns.append(fqn)
torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(
model, sparse_embedding_fqns
)
unused_modules = [
model.sub_module.embedding_net.lin,
model.sub_module.lin.b,
]
else:
unused_modules = list(model.sub_module.embedding_net.modules()) + [
model.sub_module.lin.b,
]
expected_unused_param_fqns = []
used_param_fqns = [] # Validate that these don't mistakenly show up.
fqn_to_param_index = {}
index = 0
for module_name, module in model.named_modules():
for parameter_name, _param in module.named_parameters(recurse=False):
fqn = f"{module_name}.{parameter_name}"
fqn_to_param_index[fqn] = index
if fqn not in sparse_embedding_fqns:
index += 1
if module in unused_modules:
expected_unused_param_fqns.append(fqn)
else:
if (
not ignore_sparse
or module != model.sub_module.embedding_net.embedding
):
used_param_fqns.append(fqn)
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(self.rank),
device_ids=[self.rank],
)
batch, dim = 10, 2
inp = torch.ones(batch, dim)
for i in range(2):
if i == 0:
out = net(inp)
loss = out.sum()
loss.backward()
else:
try:
out = net(inp)
loss = out.sum()
loss.backward()
except RuntimeError as e:
e = str(e)
unused_param_substr = e[e.find("did not receive grad") :]
# Validate that each unused param fully qualified name
# shows up in error logs. We do this instead of
# constructing a joined string since order of parameters
# can be different in Reducer. In addition, validate
# param indices show up as well.
for unused_param_fqn in expected_unused_param_fqns:
self.assertTrue(
unused_param_fqn in unused_param_substr
or debug_mode_off
)
self.assertTrue(
str(fqn_to_param_index[unused_param_fqn])
in unused_param_substr,
f"Did not find index {fqn_to_param_index[unused_param_fqn]} for {unused_param_fqn}",
)
# Validate that used param fqns don't show up in error
# logs.
for used_param_fqn in used_param_fqns:
self.assertFalse(used_param_fqn in unused_param_substr)
# Validate that ignored param fqns don't show up as unused
# (since DDP does not track them)
for sparse_param_fqn in sparse_embedding_fqns:
self.assertFalse(sparse_param_fqn in unused_param_substr)
else:
self.assertTrue(False, "Expected error was not raised!")
@with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"])
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_multiple_nested_unused_params_error(self):
self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=False)
@with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"])
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_multiple_nested_unused_params_err_ignore_params(self):
# Tests unused parameter reporting when DDP is configured to ignore
# certain parameters.
self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=True)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@skip_if_lt_x_gpu(2)
def test_ddp_inference(self):
# tests that DDP module can be run on a single node with no_grad
# or eval setting and there is no hang.
rank = self.rank
torch.cuda.set_device(rank)
model = Net().cuda()
local_model = copy.deepcopy(model)
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[rank],
)
syncbn_model = nn.SyncBatchNorm(
2, momentum=0.99, track_running_stats=False
).cuda()
local_syncbn_model = copy.deepcopy(syncbn_model)
syncbn_model = torch.nn.parallel.DistributedDataParallel(
syncbn_model, device_ids=[rank]
)
inp = torch.randn(10, 2, device=rank)
inp_syncbn = torch.randn(10, 2, 4, 4, device=rank)
tests = [
(model, local_model, inp),
(syncbn_model, local_syncbn_model, inp_syncbn),
]
for test in tests:
test_model, test_local_model, test_inp = test
if self.rank == 0:
test_model.eval()
test_local_model.eval()
for _ in range(6):
self.assertEqual(
test_model(test_inp), test_local_model(test_inp)
)
# Barrier since only rank 0 runs inference. Test should be
# much faster than 30s, but this is to avoid flakiness.
self._barrier(timeout=30)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@skip_if_lt_x_gpu(2)
@unittest.skip(
"Test is failing, see https://github.com/pytorch/pytorch/pull/113620"
)
def test_ddp_sync_bn_training_vs_eval(self):
rank = self.rank
torch.cuda.set_device(rank)
# Need to set track_running_stats=False, when track_running_stats=True,
# bn_training is False and sync could not occur in eval model.
model = nn.SyncBatchNorm(2, momentum=0.99, track_running_stats=False).cuda(
rank
)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank])
# Test sync occurs in training mode.
with torch.autograd.profiler.profile() as prof:
for _ in range(6):
inp = torch.randn(10, 2, 4, 4).cuda(rank)
out = model(inp)
loss = out.sum()
loss.backward()
# SyncBN allgathers stats across all ranks, so verify call to
# all_gather in profiler.
if BACKEND == "nccl":
all_gather_calls = get_profiling_event("_all_gather_base", prof)
else:
all_gather_calls = get_profiling_event("all_gather", prof)
self.assertNotEqual([], all_gather_calls)
# Only do inference on one rank. If SyncBN did collective stats sync,
# this would hang/error.
model_inference = model.module
if self.rank == 0:
model_inference.eval()
with torch.autograd.profiler.profile() as prof:
for _ in range(6):
inp = torch.randn(10, 2, 4, 4).cuda(rank)
out = model_inference(inp)
loss = out.sum()
loss.backward()
# Ensure sync does not occur in eval() mode.
if BACKEND == "nccl":
all_gather_calls = get_profiling_event("_all_gather_base", prof)
else:
all_gather_calls = get_profiling_event("all_gather", prof)
self.assertEqual([], all_gather_calls)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_python_error_logged(self):
# Most python exceptions in DDP are raised during init before
# reducer is constructed, so we don't have a logger in those cases.
# However, the below is one example where a python error is thrown
# after reducer is constructed.
model = TwoLinLayerNet().cuda(self.rank)
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
)
expected_err = "must be callable"
with self.assertRaisesRegex(TypeError, expected_err):
model.register_comm_hook({}, {})
verify_ddp_error_logged(model, expected_err)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_static_graph_nested_types(self):
# Tests for static graph training when outputs are not just tensors
# but can be (nested) tuple, list, dict, etc.
rank = self.rank
torch.cuda.set_device(rank)
class NestedOutputModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin = nn.Linear(100, 1, bias=False)
def forward(self, inp, output_type):
if output_type == "tuple":
return (
self.lin(inp),
(
self.lin(inp),
self.lin(inp),
),
)
elif output_type == "list":
return [
self.lin(inp),
[
self.lin(inp),
self.lin(inp),
],
]
elif output_type == "dict":
return {
"a": self.lin(inp),
"b": {
"c": self.lin(inp),
},
}
def get_loss(model_output):
loss = 0.0
if isinstance(model_output, torch.Tensor):
return model_output.sum()
elif isinstance(model_output, dict):
for value in model_output.values():
loss += get_loss(value)
elif isinstance(model_output, (tuple, list)):
for x in model_output:
loss += get_loss(x)
else:
raise ValueError(f"Unknown model output type {type(model_output)}")
return loss
model = NestedOutputModule().cuda(rank)
model_static_graph = copy.deepcopy(model)
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[rank],
)
model_static_graph = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[rank],
static_graph=True,
)
inp = torch.randn(10, 100)
type_mapping = {
"list": list,
"tuple": tuple,
"dict": dict,
}
for output_type in type_mapping:
for _ in range(6):
out = model(inp, output_type=output_type)
loss = get_loss(out)
loss.backward()
self._model_step(model)
out_static = model_static_graph(inp, output_type=output_type)
self.assertTrue(isinstance(out_static, type_mapping[output_type]))
loss_static = get_loss(out_static)
loss_static.backward()
self._model_step(model_static_graph)
for p, p_static in zip(
model.parameters(), model_static_graph.parameters(), strict=True
):
self.assertEqual(p, p_static)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_returns_tensor_with_no_grad(self):
# Tests case where module returns tensor that does not require grad.
torch.cuda.set_device(self.rank)
class MyModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(10, 10, bias=False)
self.fc2 = nn.Linear(10, 10, bias=False)
def forward(self, x):
x = self.fc2(F.relu(self.fc1(x)))
y = x.clone()
x = x.detach()
assert not x.requires_grad
return (x, y)
model = MyModel().to(self.rank)
inp = torch.randn(1, 10, device=self.rank)
for find_unused, static_graph in itertools.product(
[True, False], [True, False]
):
ddp = DistributedDataParallel(
model,
device_ids=[self.rank],
output_device=self.rank,
find_unused_parameters=find_unused,
static_graph=static_graph,
)
for _ in range(6):
out = ddp(inp)
self.assertFalse(out[0].requires_grad)
o = (out[0] + out[1]).sum()
o.backward()
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_detect_ddp_is_actually_static(self):
class ToyModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.net1 = nn.Linear(10, 10, bias=False)
self.net2 = nn.Linear(10, 10)
def forward(self, x, find_unused, dynamic):
if find_unused:
if dynamic:
return self.net2(self.net1(x))
else:
return self.net2(x)
else:
return self.net2(self.net1(x))
# Set of unused parameters don't change across iterations
torch.cuda.set_device(self.rank)
model = ToyModel().cuda()
for find_unused in [True, False]:
ddp = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
find_unused_parameters=find_unused,
)
inp = torch.randn(1, 10, device="cuda")
for _ in range(6):
out = ddp(inp, find_unused=find_unused, dynamic=False)
loss = out.sum()
loss.backward()
self.assertTrue(ddp.reducer._ddp_graph_static())
# Set of unused parameters dynamically change
ddp = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
find_unused_parameters=True,
)
inp = torch.randn(1, 10, device="cuda")
for i in range(6):
out = ddp(inp, find_unused=True, dynamic=i % 2 == 0)
loss = out.sum()
loss.backward()
self.assertFalse(ddp.reducer._ddp_graph_static())
def _test_ddp_new_tensor_in_fwd(self, static_graph):
# Test from https://github.com/pytorch/pytorch/issues/60733
class MyModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(10, 10, bias=False)
self.fc2 = nn.Linear(10, 10, bias=False)
self.device = self.fc1.weight.device
def __init_opt(self):
opt = torch.randn(1, 10, device=self.device)
return opt
def forward(self, x, opt_1, opt_2, opt_nested):
x = F.relu(self.fc1(x))
x = self.fc2(x)
if opt_1 is None:
opt_1 = self.__init_opt()
if opt_2 is None:
opt_2 = self.__init_opt()
if opt_nested is None or not torch.is_tensor(opt_nested):
opt_nested = self.__init_opt()
# Test multiple tensors as well as newly created tensors
# within a struct.
return x, opt_1, opt_2, {"tensor": opt_nested}
model = MyModel().to(self.rank)
for find_unused in [True, False]:
ddp = DistributedDataParallel(
model,
device_ids=[self.rank],
output_device=self.rank,
broadcast_buffers=False,
find_unused_parameters=find_unused,
static_graph=static_graph,
)
opt = [None for _ in range(3)]
for i in range(2):
ddp.zero_grad()
x = torch.randn(1, 10, device=self.rank)
out, opt[0], opt[1], opt[2] = ddp(
x, opt_1=opt[0], opt_2=opt[1], opt_nested=opt[2]
)
for i in range(len(opt)):
if torch.is_tensor(opt[i]):
self.assertEqual(opt[i].grad_fn, None)
else:
self.assertEqual(opt[i]["tensor"].grad_fn, None)
out.mean().backward()
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_new_tensor_in_fwd(self):
return self._test_ddp_new_tensor_in_fwd(static_graph=False)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_new_tensor_in_fwd_static_graph(self):
return self._test_ddp_new_tensor_in_fwd(static_graph=True)
def _test_ddp_buffer_hook_allreduce(self, return_futures):
rank = self.rank
torch.cuda.set_device(rank)
torch.manual_seed(rank)
torch.cuda.manual_seed(rank)
def buffer_comm_hook(ddp, named_buffers):
buffers = [buffer for (_, buffer) in named_buffers.items()]
futs = [
dist.all_reduce(
buffer, group=ddp.process_group, async_op=True
).get_future()
for buffer in buffers
]
if return_futures:
return futs
else:
torch.futures.collect_all(futs).wait()
hook_pre_fwd = (
torch.nn.parallel.distributed._BufferCommHookLocation.PRE_FORWARD
)
hook_post_fwd = (
torch.nn.parallel.distributed._BufferCommHookLocation.POST_FORWARD
)
for hook_run_location in [
hook_pre_fwd,
hook_post_fwd,
]:
model = NetWithBuffers().cuda(rank)
model_ddp = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
)
model_ddp._register_buffer_comm_hook(
model_ddp, buffer_comm_hook, hook_run_location
)
model_ddp_no_hook = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(model),
device_ids=[self.rank],
broadcast_buffers=False,
)
inp = torch.randn(2, 10, device=rank)
for _ in range(2):
loss_hook = model_ddp(inp).sum()
# Since buffer reduction is done pre-forward, simulate it for
# no hook case here.
# Simulate allreduce appropriately depending on hook location.
if hook_run_location == hook_pre_fwd:
model_no_hook_buffers = list(model_ddp_no_hook.module.buffers())
for tensor in model_no_hook_buffers:
dist.all_reduce(tensor)
loss_no_hook = model_ddp_no_hook(inp).sum()
if hook_run_location == hook_post_fwd:
model_no_hook_buffers = list(model_ddp_no_hook.module.buffers())
for tensor in model_no_hook_buffers:
dist.all_reduce(tensor)
torch.cuda.synchronize()
# if return_futures, they are only awaited on by DDP
# at the end of the backwards pass for maximum overlap.
if not return_futures:
self._verify_buffers_equal(model_ddp, model_ddp_no_hook)
loss_hook.backward()
loss_no_hook.backward()
# Note that when custom hooks return futures, this
# comparison is not expected to work when hook run location
# is pre-forward pass. This is because the hook does async
# communication and forward pass modifies the buffer without
# appropriate synchronization. Therefore, if returning
# futures from custom buffer hooks, it is advised to set
# hook run location to post forward.
if return_futures and hook_run_location == hook_post_fwd:
self._verify_buffers_equal(model_ddp, model_ddp_no_hook)
dist.barrier()
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_buffer_hook_allreduce_return_future(self):
self._test_ddp_buffer_hook_allreduce(return_futures=True)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_buffer_hook_allreduce(self):
self._test_ddp_buffer_hook_allreduce(return_futures=False)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_broadcast_buffer_via_hook(self):
# test that _distributed_broadcast_coalesced via registered hook is
# equivalent to DDP's default broadcast coalesced.
rank = self.rank
torch.cuda.set_device(rank)
torch.manual_seed(rank)
torch.cuda.manual_seed(rank)
def buffer_comm_hook(ddp, named_buffers):
# named_buffers is a Dict[str, Tensor] representing a mapping
# from buffer name to buffer.
buffers = [buffer for (_, buffer) in named_buffers.items()]
ddp._default_broadcast_coalesced(buffers)
model = NetWithBuffers().cuda(rank)
model_ddp = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
)
model_ddp._register_buffer_comm_hook(model_ddp, buffer_comm_hook)
model_ddp_no_hook = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(model),
device_ids=[self.rank],
)
inp = torch.randn(2, 10, device=rank)
for _ in range(2):
loss_hook = model_ddp(inp).sum()
loss_no_hook = model_ddp_no_hook(inp).sum()
self._verify_buffers_equal(model_ddp, model_ddp_no_hook)
loss_hook.backward()
loss_no_hook.backward()
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_remove_autograd_hooks(self):
class SimulateError(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
raise RuntimeError
class MyModel(nn.Module):
def __init__(self, device):
super().__init__()
self.error = True
self.fc1 = nn.Linear(10, 10).cuda(device)
def forward(self, inp):
if self.error:
return self.fc1(SimulateError.apply(inp))
else:
return self.fc1(inp)
# Run with error to trigger backward pass that marks fc1 as being marked
# ready. If we don't remove autograd hooks before running below it would
# fail on the old autograd hook.
model = MyModel(self.rank)
input = torch.rand(10, 10, requires_grad=True).cuda(self.rank)
model_ddp1 = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
)
with self.assertRaises(RuntimeError):
model_ddp1(input).sum().backward()
# Remove autograd hooks on old instance.
model_ddp1._remove_autograd_hooks()
# Try another DDP instance without error now.
model.error = False
model_ddp2 = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
)
model_ddp2(input).sum().backward()
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
@unittest.skip(
"Test is failing, tracking issue at https://github.com/pytorch/pytorch/issues/102751"
)
def test_ddp_has_finalized(self):
@dataclass
class MyClass:
obj: torch.Tensor
class MyModel(nn.Module):
def __init__(self, rank):
super().__init__()
self.rank = rank
self.fc1 = nn.Linear(1024, 1024).cuda(rank)
self.fc2 = nn.Linear(1024, 2 * 1024).cuda(rank)
def forward(self, inp):
if self.rank == 0:
return self.fc1(inp), MyClass(self.fc2(inp))
else:
return self.fc1(inp), self.fc2(inp)
model = MyModel(self.rank)
input = torch.rand(10, 1024, requires_grad=True).cuda(self.rank)
ddp = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
find_unused_parameters=True,
bucket_cap_mb=(1024 * 4 / 1024 / 1024), # One bucket per parameter.
)
if self.rank == 0:
out1, _ = ddp(input)
out1.sum().backward()
else:
out1, out2 = ddp(input)
(out1.sum() + out2.sum()).backward()
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError,
"Expected to have finished reduction in the prior iteration",
):
ddp._check_reducer_finalized()
with self.assertRaisesRegex(
RuntimeError,
"Expected to have finished reduction in the prior iteration",
):
ddp(input)
else:
ddp._check_reducer_finalized()
ddp(input)
"""
# The set of "test_ddp_update_process_group..." below failed after
# upgrading CI from 2 GPUs to 4 GPUs.
# Commented out for now.
# Test purpose needs better documentation.
def _run_ddp_update_process_group(self, new_pg):
def get_num_torch_recompiles():
guard_failures = torch._dynamo.utils.guard_failures
num_recompiles = [len(guard_failures[code]) for code in guard_failures]
return 0 if len(num_recompiles) == 0 else max(num_recompiles)
class SimulateError(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
raise RuntimeError
class MyModel(torch.nn.Module):
def __init__(self, device):
super().__init__()
# 4MB for multiple buckets.
self.fc1 = torch.nn.Linear(1024, 1024).cuda(device)
self.fc2 = torch.nn.Linear(1024, 1024).cuda(device)
self.fc3 = torch.nn.Linear(1024, 1024).cuda(device)
def forward(self, inp, error):
if error:
return self.fc3(self.fc2(self.fc1(SimulateError.apply(inp))))
else:
return self.fc3(self.fc2(self.fc1(inp)))
input = torch.rand(10, 1024, requires_grad=True).cuda(self.rank)
ddp = torch.nn.parallel.DistributedDataParallel(
MyModel(self.rank),
device_ids=[self.rank],
find_unused_parameters=True,
bucket_cap_mb=1,
)
model = torch.compile(ddp)
def run_iteration():
# Run regular iteration.
out = model(input, error=False)
out.sum().backward()
torch.cuda.synchronize()
# Run with error.
with self.assertRaises(RuntimeError):
out = model(input, error=True)
out.sum().backward()
torch.cuda.synchronize()
run_iteration()
assert 0 == get_num_torch_recompiles()
if new_pg:
# Now reduce world_size and run iteration.
group_size_2 = dist.new_group(ranks=[0, 1])
ddp._update_process_group(group_size_2)
if self.rank in [0, 1]:
run_iteration()
# Increase the world size and run iteration.
group_size_3 = dist.new_group(ranks=[1, 2, 3])
ddp._update_process_group(group_size_3)
if self.rank in [1, 2, 3]:
run_iteration()
# Back to default size.
ddp._update_process_group(_get_default_group())
run_iteration()
else:
# Create default pg of smaller size.
dist.destroy_process_group()
if self.rank in [1, 2, 3]:
dist.init_process_group(
init_method=self.init_method,
backend=BACKEND,
world_size=3,
rank=self.rank - 1,
timeout=timedelta(seconds=default_pg_timeout),
)
ddp._update_process_group(_get_default_group())
run_iteration()
dist.destroy_process_group()
# Need a barrier here to ensure ranks 1, 2 and 3 are done.
self._barrier(wait_for=4)
# Need to init pg again for "_barrier" to succeed.
dist.init_process_group(
init_method=self.init_method,
backend=BACKEND,
world_size=4,
rank=self.rank,
timeout=timedelta(seconds=default_pg_timeout),
)
# Validate no more recompiles.
assert 0 == get_num_torch_recompiles()
@skip_if_lt_x_gpu(4)
@require_world_size(4)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_update_process_group_new_group(self):
self._run_ddp_update_process_group(new_pg=True)
@skip_if_lt_x_gpu(4)
@require_world_size(4)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_update_process_group_default_group(self):
self._run_ddp_update_process_group(new_pg=False)
@skip_if_lt_x_gpu(4)
@require_world_size(4)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_update_process_group_grad_undefined(self):
class SimulateError(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
raise RuntimeError
class MyModel(torch.nn.Module):
def __init__(self, device):
super().__init__()
self.fc1 = torch.nn.Linear(10, 10).cuda(device)
self.fc2 = torch.nn.Linear(10, 10).cuda(device)
self.fc3 = torch.nn.Linear(10, 10).cuda(device)
def forward(self, inp, error):
if error:
return self.fc3(self.fc2(self.fc1(SimulateError.apply(inp))))
else:
return self.fc2(self.fc1(inp))
input = torch.rand(10, 10, requires_grad=True).cuda(self.rank)
ddp = torch.nn.parallel.DistributedDataParallel(
MyModel(self.rank),
device_ids=[self.rank],
find_unused_parameters=True,
bucket_cap_mb=1,
)
try:
ddp(input, True).sum().backward()
except RuntimeError:
ddp._update_process_group(_get_default_group())
# Reset grads.
for param in ddp.parameters():
param.grad = None
# Run ddp again.
ddp(input, False).sum().backward()
@skip_if_lt_x_gpu(4)
@require_world_size(4)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_update_process_group_no_find_unused(self):
ddp = torch.nn.parallel.DistributedDataParallel(
torch.nn.Linear(10, 10).cuda(self.rank),
device_ids=[self.rank],
find_unused_parameters=False,
)
ddp._update_process_group(_get_default_group())
"""
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_broadcast_buffer(self):
rank = self.rank
torch.cuda.set_device(rank)
torch.manual_seed(rank)
torch.cuda.manual_seed(rank)
class NetWithBuffers(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = nn.Linear(10, 10, bias=False)
self.b = nn.Linear(10, 1, bias=False)
self.register_buffer("buffer", torch.randn(1, 2))
def forward(self, x):
return self.b(self.a(x))
model = NetWithBuffers().cuda(rank)
model_ddp = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
)
inp = torch.randn(2, 10, device=rank)
for _ in range(2):
if rank == 0:
model_ddp.module.buffer = model_ddp.module.buffer + 1
loss = model_ddp(inp).sum()
loss.backward()
# Ensure all buffers are synchronized.
bufs = [
torch.empty_like(model_ddp.module.buffer)
for _ in range(dist.get_world_size())
]
dist.all_gather(bufs, model_ddp.module.buffer)
rank_0_buf = bufs[0]
for buf in bufs[1:]:
self.assertEqual(rank_0_buf, buf)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl" and BACKEND != "gloo",
"Only Nccl & Gloo backend support DistributedDataParallel",
)
def test_static_graph_multi_forward(self):
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin = nn.Linear(10, 10)
self.relu = nn.ReLU()
def forward(self, x):
return self.relu(self.lin(x))
torch.cuda.set_device(self.rank)
torch.manual_seed(42 << 1337 % (self.rank + 1))
model = Net().cuda(self.rank)
local_model = copy.deepcopy(model)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[self.rank], static_graph=True
)
inp = torch.ones(2, 10, device="cuda")
for _ in range(3):
model.zero_grad()
local_model.zero_grad()
a = model(inp)
b = model(inp)
loss = a.sum() + b.sum()
loss.backward()
# Grads should be equal to a local model that ran through inp
# `world_size` times and averaged grads
if self.rank == 0:
inp_clone = inp.clone()
iters = dist.get_world_size()
for _ in range(iters):
a = local_model(inp_clone)
b = local_model(inp_clone)
loss = a.sum() + b.sum()
loss.backward()
for p in local_model.parameters():
p.grad.data = p.grad / iters
for p_ddp, p_local in zip(
model.parameters(), local_model.parameters(), strict=True
):
self.assertTrue(
torch.allclose(p_ddp.grad, p_local.grad),
f"{p_ddp.grad} vs {p_local.grad}",
)
dist.barrier()
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl" and BACKEND != "gloo",
"Only Nccl & Gloo backend support DistributedDataParallel",
)
def test_sync_bn_logged(self):
model = BatchNormNet()
rank = self.rank
# single gpu training setup
model_gpu = model.cuda(rank)
no_sync_bn = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(model_gpu),
device_ids=[self.rank],
)
ddp_logging_data = no_sync_bn._get_ddp_logging_data()
sync_bn_logged = ddp_logging_data.get("has_sync_bn", True)
self.assertFalse(sync_bn_logged)
model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(model_gpu)
model_DDP = torch.nn.parallel.DistributedDataParallel(
model_DDP,
device_ids=[self.rank],
)
ddp_logging_data = model_DDP._get_ddp_logging_data()
sync_bn_logged = ddp_logging_data.get("has_sync_bn", False)
self.assertTrue(sync_bn_logged)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_stateless_api_with_ddp(self):
class MockModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.l1 = torch.nn.Linear(1, 1)
buffer = torch.ones(1)
self.register_buffer("buffer", buffer)
def forward(self, x):
return self.l1(x) + self.buffer
device = self.rank
module = MockModule().to(device)
module = torch.nn.parallel.DistributedDataParallel(
module, device_ids=[device]
)
x = torch.rand((1, 1)).to(device)
weight = torch.tensor([[1.0]], device=device, requires_grad=True)
bias = torch.tensor([0.0], device=device, requires_grad=True)
buffer = torch.tensor([0.0], device=device)
parameters = {
"module.l1.weight": weight,
"module.l1.bias": bias,
"module.buffer": buffer,
}
prev_weight = module.module.l1.weight.clone()
prev_buffer = module.module.buffer.clone()
res = torch.func.functional_call(module, parameters, x)
self.assertEqual(x, res)
# check that the weight remain unmodified
cur_weight = module.module.l1.weight
cur_buffer = module.module.buffer
self.assertEqual(cur_weight, prev_weight)
self.assertEqual(cur_buffer, prev_buffer)
# run a backward pass and check the gradients
res.backward()
self.assertIsNotNone(weight.grad)
self.assertIsNotNone(bias.grad)
# Gradient was not calculated for the module stated and buffers
self.assertIsNone(buffer.grad)
self.assertIsNone(module.module.l1.weight.grad)
self.assertIsNone(module.module.l1.bias.grad)
self.assertIsNone(module.module.buffer.grad)
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_forward_backward_hook(self):
class DummyTestModel(nn.Module):
def __init__(self) -> None:
super().__init__()
torch.manual_seed(0)
self.fc = nn.Linear(2, 2)
def forward(self, x):
return self.fc(x)
def relu_hook(module, input):
return nn.functional.relu(input[0])
def gelu_hook(module, _input, output):
return nn.functional.gelu(output)
def celu_hook(module, _input, output):
return (nn.functional.celu(output[0]),)
local_model = DummyTestModel()
ddp_model = DummyTestModel()
local_model.fc.register_forward_pre_hook(relu_hook)
local_model.fc.register_forward_hook(gelu_hook)
ddp_model.fc.register_forward_pre_hook(relu_hook)
ddp_model.fc.register_forward_hook(gelu_hook)
local_model.fc.register_backward_hook(celu_hook)
ddp_model.fc.register_backward_hook(celu_hook)
ddp_model = DistributedDataParallel(
ddp_model.to(self.rank), device_ids=[self.rank]
)
input_data = torch.rand(5, 2)
output_local = local_model(input_data)
output_ddp = ddp_model(input_data.to(self.rank))
self.assertEqual(output_local, output_ddp)
output_local.sum().backward()
output_ddp.sum().backward()
ddp_grads = [p.grad for p in ddp_model.parameters()]
self.assertEqual(ddp_grads[0], local_model.fc.weight.grad)
self.assertEqual(ddp_grads[1], local_model.fc.bias.grad)
def _test_hook_pickling(self, hook, hook_state):
torch.manual_seed(0)
learning_rate = 0.01
chkpt_file = tempfile.gettempdir() + "/checkpoint.pt"
rank = self.rank
input = torch.randn(7, 1, device=rank)
target = torch.randn(7, 5, device=rank)
net = torch.nn.Linear(1, 5).to(rank)
ddp_model = DistributedDataParallel(copy.deepcopy(net), device_ids=[rank])
dummy_ddp_model = DistributedDataParallel(
copy.deepcopy(net), device_ids=[rank]
)
optimizer = torch.optim.SGD(ddp_model.parameters(), lr=learning_rate)
ddp_model.register_comm_hook(hook_state, hook)
ddp_model.train()
for _ in range(10):
optimizer.zero_grad()
out = ddp_model(input)
loss = F.mse_loss(out, target)
loss.backward()
optimizer.step()
state = {
"state_dict": ddp_model.state_dict(),
"comm_hook": hook,
"comm_hook_state": hook_state,
}
if rank == 0:
with self.assertLogs("torch.distributed") as captured:
torch.save(state, chkpt_file)
# Check that the logger has only one entry
self.assertEqual(len(captured.records), 1)
# Check that the logger has an expected entry
self.assertEqual(
captured.records[0].getMessage(),
"NOTE: Process group is not serializable and excluded from a saved state.",
)
dist.barrier()
map_location = {"cuda:0": f"cuda:{rank:d}"}
with self.assertLogs("torch.distributed") as captured:
checkpoint = torch.load(chkpt_file, map_location=map_location)
# Check that the logger has only one entry
self.assertEqual(len(captured.records), 1)
# Check that the logger has an expected entry
self.assertEqual(
captured.records[0].getMessage(),
"NOTE: Process group will be set to a default group (i.e. the world size).\
If a different group is desired, please set `self.process_group` after PowerSGD state is loaded.",
)
dummy_ddp_model.load_state_dict(checkpoint["state_dict"])
dummy_hook = checkpoint["comm_hook"]
dummy_hook_state = checkpoint["comm_hook_state"]
dummy_optimizer = torch.optim.SGD(
dummy_ddp_model.parameters(), lr=learning_rate
)
# Check that loaded function is correct
self.assertEqual(dummy_hook.__qualname__, hook.__qualname__)
# Check that all slots' keys were restored correctly
self.assertEqual(hook_state.__slots__, dummy_hook_state.__slots__)
# Check that all slots' attributes are restored correctly
# Excluding ``process_group`` and ``rng``.
for entry in dummy_hook_state.__slots__:
if entry != "process_group" and entry != "rng":
self.assertEqual(
getattr(dummy_hook_state, entry), getattr(hook_state, entry)
)
# Check that ``process_group`` was set to default
self.assertEqual(dummy_hook_state.process_group, _get_default_group())
# Check that a random state was restored properly:
# ``np.random.RandomState.get_state`` returns a tuple with entries:
# ``bit_generator`` - str,
# ``state.key`` - ndarray dtype[uint32],
# ``state.pos`` - int,
# ``has_gauss`` - int,
# ``gauss`` - float
# (refer to https://github.com/numpy/numpy/blob/266aad7478bc7fbcc55eea7f942a0d373b838396/numpy/random/mtrand.pyi)
# To make sure random state was restored properly, all entries should equal the original
for entry1, entry2 in zip(
hook_state.rng.get_state(),
dummy_hook_state.rng.get_state(),
strict=True,
):
np.testing.assert_array_equal(entry1, entry2)
dummy_ddp_model.register_comm_hook(dummy_hook_state, dummy_hook)
dummy_ddp_model.train()
for _ in range(10):
optimizer.zero_grad()
dummy_optimizer.zero_grad()
out_origin = ddp_model(input)
out_dummy = dummy_ddp_model(input)
loss_origin = F.mse_loss(out_origin, target)
loss_dummy = F.mse_loss(out_dummy, target)
loss_origin.backward()
loss_dummy.backward()
optimizer.step()
dummy_optimizer.step()
# Check that gradients after 10 epochs are the same
for orig_param, dummy_param in zip(
ddp_model.parameters(), dummy_ddp_model.parameters(), strict=True
):
self.assertEqual(orig_param.grad, dummy_param.grad)
dist.barrier()
if rank == 0:
os.remove(chkpt_file)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["cuda"],
f"The {BACKEND} backend does not support DDP communication hook on CUDA devices",
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
@skip_but_pass_in_sandcastle_if(True, "Skipped due to flakiness")
def test_ddp_hook_pickling_powerSGD(self):
hook = powerSGD.powerSGD_hook
powersgd_state = powerSGD.PowerSGDState(
process_group=None,
matrix_approximation_rank=1,
start_powerSGD_iter=4,
)
self._test_hook_pickling(hook, powersgd_state)
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_device_mesh_initialization(self):
"""
Test DDP with device_mesh initialization.
"""
world_size = int(os.environ["WORLD_SIZE"])
from torch.distributed.device_mesh import init_device_mesh
device_mesh = init_device_mesh("cuda", (world_size,))
pg = _get_default_group()
torch.cuda.set_device(self.rank)
model = TwoLinLayerNet().cuda()
ddp_model = torch.nn.parallel.DistributedDataParallel(
model, device_mesh=device_mesh
)
self.assertEqual(ddp_model.device_mesh, device_mesh)
with self.assertRaisesRegex(
RuntimeError,
"Cannot specify both process_group and device_mesh arguments.",
):
ddp_model = torch.nn.parallel.DistributedDataParallel(
model, process_group=pg, device_mesh=device_mesh
)
with self.assertRaisesRegex(
RuntimeError, "Only 1D device mesh is supported,"
):
device_mesh = init_device_mesh("cuda", (2, world_size // 2))
ddp_model = torch.nn.parallel.DistributedDataParallel(
model, device_mesh=device_mesh
)
@skip_if_lt_x_gpu(2)
@require_world_size(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_compile_static_graph(self):
"Tests that DDP works with torch compile when static_graph=True"
model = torch.nn.Linear(10, 10).cuda(self.rank)
model_clone = copy.deepcopy(model)
ddp = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
)
ddp_static = torch.nn.parallel.DistributedDataParallel(
model_clone, device_ids=[self.rank], static_graph=True
)
ddp = torch.compile(ddp)
ddp_static = torch.compile(ddp_static)
input = torch.rand(10, 10).cuda(self.rank)
# verify output and gradient parity
for _ in range(6):
out_ddp = ddp(input).sum()
out_ddp_static = ddp_static(input).sum()
self.assertEqual(out_ddp, out_ddp_static)
out_ddp.backward()
out_ddp_static.backward()
for p1, p2 in zip(
ddp.parameters(), ddp_static.parameters(), strict=True
):
self.assertEqual(p1.grad, p2.grad)
@skip_if_lt_x_gpu(2)
@require_world_size(2)
@skip_but_pass_in_sandcastle_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel",
)
def test_ddp_sink_noclone(self):
"Tests that we can configure DDP to avoid clone"
class OpPatcher(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
func_packet = func._overloadpacket
if func_packet == torch.ops.aten.clone:
raise RuntimeError("clone encountered!")
kwargs = kwargs if kwargs else {}
return func(*args, **kwargs)
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc = torch.nn.Linear(10, 10)
def forward(self, input):
return self.fc(input)
model = MyModel().cuda(self.rank)
ddp = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
find_unused_parameters=True,
)
ddp._set_ddp_sink_clone(False)
input = torch.rand(10, 10).cuda(self.rank)
with OpPatcher():
ddp(input).sum().backward()
def _test_skip_all_reduce_unused_parameters(
self,
find_unused_parameters=False,
static_graph=False,
skip_all_reduce_unused_params=False,
):
class LargeNet(nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(100, 5000, bias=False)
# fc2 is unused
self.fc2 = nn.Linear(100, 100, bias=False)
def forward(self, x):
y = self.fc1(x)
return y
torch.manual_seed(31415)
torch.cuda.set_device(self.rank)
model = LargeNet().cuda(self.rank)
ddp_model = torch.nn.parallel.DistributedDataParallel(
model,
find_unused_parameters=find_unused_parameters,
static_graph=static_graph,
bucket_cap_mb=1.5,
skip_all_reduce_unused_params=skip_all_reduce_unused_params,
)
random_input = torch.randn(20, 100, device=self.rank)
for _ in range(10):
out = ddp_model(random_input)
loss = out.sum()
loss.backward()
return ddp_model
@require_backend_is_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_skip_all_reduce_unused_parameters(self):
base_model = self._test_skip_all_reduce_unused_parameters(
find_unused_parameters=True, static_graph=False
)
test_model_1 = self._test_skip_all_reduce_unused_parameters(
find_unused_parameters=True,
static_graph=False,
skip_all_reduce_unused_params=True,
)
self.assertEqual(
base_model._get_ddp_logging_data().get("num_buckets_reduced"), 2
)
self.assertEqual(
test_model_1._get_ddp_logging_data().get("num_buckets_reduced"), 1
)
for i, j in zip(
base_model.parameters(), test_model_1.parameters(), strict=True
):
self.assertEqual(i, j)
instantiate_parametrized_tests(DistributedTest._DistTestBase)
| DistributedTest |
python | sqlalchemy__sqlalchemy | test/sql/test_identity_column.py | {
"start": 4875,
"end": 5025
} | class ____(_IdentityDDLFixture, fixtures.TestBase):
# this uses the connection dialect
__requires__ = ("identity_columns_standard",)
| IdentityDDL |
python | ipython__ipython | IPython/utils/coloransi.py | {
"start": 229,
"end": 293
} | class ____:
Normal = "\033[0m"
Red = "\033[0;31m"
| TermColors |
python | keras-team__keras | keras/src/export/openvino_test.py | {
"start": 456,
"end": 2549
} | class ____(models.Model):
def __init__(self, layer_list):
super().__init__()
self.layer_list = layer_list
def call(self, input):
output = input
for layer in self.layer_list:
output = layer(output)
return output
def get_model(type="sequential", input_shape=(10,), layer_list=None):
layer_list = layer_list or [
layers.Dense(10, activation="relu"),
layers.BatchNormalization(),
layers.Dense(1, activation="sigmoid"),
]
if type == "sequential":
return models.Sequential(layer_list)
elif type == "functional":
input = output = tree.map_shape_structure(layers.Input, input_shape)
for layer in layer_list:
output = layer(output)
return models.Model(inputs=input, outputs=output)
elif type == "subclass":
return CustomModel(layer_list)
elif type == "lstm":
# https://github.com/keras-team/keras/issues/21390
inputs = layers.Input((4, 10))
x = layers.Bidirectional(
layers.LSTM(
10,
kernel_initializer="he_normal",
return_sequences=True,
kernel_regularizer=None,
),
merge_mode="sum",
)(inputs)
outputs = layers.Bidirectional(
layers.LSTM(
10,
kernel_initializer="he_normal",
return_sequences=True,
kernel_regularizer=None,
),
merge_mode="concat",
)(x)
return models.Model(inputs=inputs, outputs=outputs)
@pytest.mark.skipif(ov is None, reason="OpenVINO is not installed")
@pytest.mark.skipif(
backend.backend() not in ("tensorflow", "openvino", "jax", "torch"),
reason=(
"`export_openvino` only currently supports"
"the tensorflow, jax, torch and openvino backends."
),
)
@pytest.mark.skipif(testing.jax_uses_gpu(), reason="Leads to core dumps on CI")
@pytest.mark.skipif(
testing.tensorflow_uses_gpu(), reason="Leads to core dumps on CI"
)
| CustomModel |
python | numba__numba | numba/core/targetconfig.py | {
"start": 3696,
"end": 9817
} | class ____(metaclass=_MetaTargetConfig):
"""Base class for ``TargetConfig``.
Subclass should fill class members with ``Option``. For example:
>>> class MyTargetConfig(TargetConfig):
>>> a_bool_option = Option(type=bool, default=False, doc="a bool")
>>> an_int_option = Option(type=int, default=0, doc="an int")
The metaclass will insert properties for each ``Option``. For example:
>>> tc = MyTargetConfig()
>>> tc.a_bool_option = True # invokes the setter
>>> print(tc.an_int_option) # print the default
"""
__slots__ = ["_values"]
# Used for compression in mangling.
# Set to -15 to disable the header and checksum for smallest output.
_ZLIB_CONFIG = {"wbits": -15}
def __init__(self, copy_from=None):
"""
Parameters
----------
copy_from : TargetConfig or None
if None, creates an empty ``TargetConfig``.
Otherwise, creates a copy.
"""
self._values = {}
if copy_from is not None:
assert isinstance(copy_from, TargetConfig)
self._values.update(copy_from._values)
def __repr__(self):
# NOTE: default options will be placed at the end and grouped inside
# a square bracket; i.e. [optname=optval, ...]
args = []
defs = []
for k in self.options:
msg = f"{k}={getattr(self, k)}"
if not self.is_set(k):
defs.append(msg)
else:
args.append(msg)
clsname = self.__class__.__name__
return f"{clsname}({', '.join(args)}, [{', '.join(defs)}])"
def __hash__(self):
return hash(tuple(sorted(self.values())))
def __eq__(self, other):
if isinstance(other, TargetConfig):
return self.values() == other.values()
else:
return NotImplemented
def values(self):
"""Returns a dict of all the values
"""
return {k: getattr(self, k) for k in self.options}
def is_set(self, name):
"""Is the option set?
"""
self._guard_option(name)
return name in self._values
def discard(self, name):
"""Remove the option by name if it is defined.
After this, the value for the option will be set to its default value.
"""
self._guard_option(name)
self._values.pop(name, None)
def inherit_if_not_set(self, name, default=_NotSet):
"""Inherit flag from ``ConfigStack``.
Parameters
----------
name : str
Option name.
default : optional
When given, it overrides the default value.
It is only used when the flag is not defined locally and there is
no entry in the ``ConfigStack``.
"""
self._guard_option(name)
if not self.is_set(name):
cstk = ConfigStack()
if cstk:
# inherit
top = cstk.top()
setattr(self, name, getattr(top, name))
elif default is not _NotSet:
setattr(self, name, default)
def copy(self):
"""Clone this instance.
"""
return type(self)(self)
def summary(self) -> str:
"""Returns a ``str`` that summarizes this instance.
In contrast to ``__repr__``, only options that are explicitly set will
be shown.
"""
args = [f"{k}={v}" for k, v in self._summary_args()]
clsname = self.__class__.__name__
return f"{clsname}({', '.join(args)})"
def _guard_option(self, name):
if name not in self.options:
msg = f"{name!r} is not a valid option for {type(self)}"
raise ValueError(msg)
def _summary_args(self):
"""returns a sorted sequence of 2-tuple containing the
``(flag_name, flag_value)`` for flag that are set with a non-default
value.
"""
args = []
for k in sorted(self.options):
opt = self.options[k]
if self.is_set(k):
flagval = getattr(self, k)
if opt.default != flagval:
v = (k, flagval)
args.append(v)
return args
@classmethod
def _make_compression_dictionary(cls) -> bytes:
"""Returns a ``bytes`` object suitable for use as a dictionary for
compression.
"""
buf = []
# include package name
buf.append("numba")
# include class name
buf.append(cls.__class__.__name__)
# include common values
buf.extend(["True", "False"])
# include all options name and their default value
for k, opt in cls.options.items():
buf.append(k)
buf.append(str(opt.default))
return ''.join(buf).encode()
def get_mangle_string(self) -> str:
"""Return a string suitable for symbol mangling.
"""
zdict = self._make_compression_dictionary()
comp = zlib.compressobj(zdict=zdict, level=zlib.Z_BEST_COMPRESSION,
**self._ZLIB_CONFIG)
# The mangled string is a compressed and base64 encoded version of the
# summary
buf = [comp.compress(self.summary().encode())]
buf.append(comp.flush())
return base64.b64encode(b''.join(buf)).decode()
@classmethod
def demangle(cls, mangled: str) -> str:
"""Returns the demangled result from ``.get_mangle_string()``
"""
# unescape _XX sequence
def repl(x):
return chr(int('0x' + x.group(0)[1:], 16))
unescaped = re.sub(r"_[a-zA-Z0-9][a-zA-Z0-9]", repl, mangled)
# decode base64
raw = base64.b64decode(unescaped)
# decompress
zdict = cls._make_compression_dictionary()
dc = zlib.decompressobj(zdict=zdict, **cls._ZLIB_CONFIG)
buf = []
while raw:
buf.append(dc.decompress(raw))
raw = dc.unconsumed_tail
buf.append(dc.flush())
return b''.join(buf).decode()
| TargetConfig |
python | huggingface__transformers | src/transformers/models/conditional_detr/modeling_conditional_detr.py | {
"start": 18645,
"end": 21204
} | class ____(nn.Module):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, embedding_dim=256):
super().__init__()
self.row_embeddings = nn.Embedding(50, embedding_dim)
self.column_embeddings = nn.Embedding(50, embedding_dim)
def forward(self, pixel_values, pixel_mask=None):
height, width = pixel_values.shape[-2:]
width_values = torch.arange(width, device=pixel_values.device)
height_values = torch.arange(height, device=pixel_values.device)
x_emb = self.column_embeddings(width_values)
y_emb = self.row_embeddings(height_values)
pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1)
pos = pos.permute(2, 0, 1)
pos = pos.unsqueeze(0)
pos = pos.repeat(pixel_values.shape[0], 1, 1, 1)
return pos
# Copied from transformers.models.detr.modeling_detr.build_position_encoding with Detr->ConditionalDetr
def build_position_encoding(config):
n_steps = config.d_model // 2
if config.position_embedding_type == "sine":
# TODO find a better way of exposing other arguments
position_embedding = ConditionalDetrSinePositionEmbedding(n_steps, normalize=True)
elif config.position_embedding_type == "learned":
position_embedding = ConditionalDetrLearnedPositionEmbedding(n_steps)
else:
raise ValueError(f"Not supported {config.position_embedding_type}")
return position_embedding
# function to generate sine positional embedding for 2d coordinates
def gen_sine_position_embeddings(pos_tensor, d_model):
scale = 2 * math.pi
dim = d_model // 2
dim_t = torch.arange(dim, dtype=torch.float32, device=pos_tensor.device)
dim_t = 10000 ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / dim)
x_embed = pos_tensor[:, :, 0] * scale
y_embed = pos_tensor[:, :, 1] * scale
pos_x = x_embed[:, :, None] / dim_t
pos_y = y_embed[:, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2)
pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2)
pos = torch.cat((pos_y, pos_x), dim=2)
return pos.to(pos_tensor.dtype)
def inverse_sigmoid(x, eps=1e-5):
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
# Copied from transformers.models.detr.modeling_detr.DetrAttention
| ConditionalDetrLearnedPositionEmbedding |
python | getsentry__sentry | src/sentry/web/frontend/debug/debug_auth_views.py | {
"start": 903,
"end": 1515
} | class ____(View):
def get(self, request: HttpRequest) -> HttpResponse:
auth_identity = {"id": "bar@example.com", "email": "test1@example.com"}
return render_to_response(
"sentry/auth-confirm-link.html",
context={
"provider": "Okta",
"existing_user": User(email="test1@example.com", avatar_type=2),
"identity": auth_identity,
"identity_display_name": auth_identity["email"],
"identity_identifier": auth_identity["id"],
},
request=request,
)
| DebugAuthConfirmLink |
python | python-pillow__Pillow | src/PIL/PngImagePlugin.py | {
"start": 7611,
"end": 8260
} | class ____(str):
"""
Subclass of string to allow iTXt chunks to look like strings while
keeping their extra information
"""
lang: str | bytes | None
tkey: str | bytes | None
@staticmethod
def __new__(
cls, text: str, lang: str | None = None, tkey: str | None = None
) -> iTXt:
"""
:param cls: the class to use when creating the instance
:param text: value for this key
:param lang: language code
:param tkey: UTF-8 version of the key name
"""
self = str.__new__(cls, text)
self.lang = lang
self.tkey = tkey
return self
| iTXt |
python | numpy__numpy | numpy/distutils/fcompiler/__init__.py | {
"start": 1371,
"end": 1682
} | class ____(Exception):
pass
def flaglist(s):
if is_string(s):
return split_quoted(s)
else:
return s
def str2bool(s):
if is_string(s):
return strtobool(s)
return bool(s)
def is_sequence_of_strings(seq):
return is_sequence(seq) and all_strings(seq)
| CompilerNotFound |
python | wandb__wandb | wandb/vendor/pygments/lexers/templates.py | {
"start": 39463,
"end": 40092
} | class ____(DelegatingLexer):
"""
Subclass of the `SmartyLexer` that highlights unlexed data with the
`HtmlLexer`.
Nested Javascript and CSS is highlighted too.
"""
name = 'HTML+Smarty'
aliases = ['html+smarty']
alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.tpl']
mimetypes = ['text/html+smarty']
def __init__(self, **options):
super(HtmlSmartyLexer, self).__init__(HtmlLexer, SmartyLexer, **options)
def analyse_text(text):
rv = SmartyLexer.analyse_text(text) - 0.01
if html_doctype_matches(text):
rv += 0.5
return rv
| HtmlSmartyLexer |
python | sympy__sympy | sympy/matrices/expressions/trace.py | {
"start": 365,
"end": 5367
} | class ____(Expr):
"""Matrix Trace
Represents the trace of a matrix expression.
Examples
========
>>> from sympy import MatrixSymbol, Trace, eye
>>> A = MatrixSymbol('A', 3, 3)
>>> Trace(A)
Trace(A)
>>> Trace(eye(3))
Trace(Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]))
>>> Trace(eye(3)).simplify()
3
"""
is_Trace = True
is_commutative = True
def __new__(cls, mat):
mat = sympify(mat)
if not mat.is_Matrix:
raise TypeError("input to Trace, %s, is not a matrix" % str(mat))
if mat.is_square is False:
raise NonSquareMatrixError("Trace of a non-square matrix")
return Basic.__new__(cls, mat)
def _eval_transpose(self):
return self
def _eval_derivative(self, v):
from sympy.concrete.summations import Sum
from .matexpr import MatrixElement
if isinstance(v, MatrixElement):
return self.rewrite(Sum).diff(v)
expr = self.doit()
if isinstance(expr, Trace):
# Avoid looping infinitely:
return trace(expr.arg.diff(v))
return expr._eval_derivative(v)
def _eval_derivative_matrix_lines(self, x):
from sympy.tensor.array.expressions.array_expressions import ArrayTensorProduct, ArrayContraction
r = self.args[0]._eval_derivative_matrix_lines(x)
for lr in r:
if lr.higher == 1:
lr.higher = ExprBuilder(
ArrayContraction,
[
ExprBuilder(
ArrayTensorProduct,
[
lr._lines[0],
lr._lines[1],
]
),
(1, 3),
],
validator=ArrayContraction._validate
)
else:
# This is not a matrix line:
lr.higher = ExprBuilder(
ArrayContraction,
[
ExprBuilder(
ArrayTensorProduct,
[
lr._lines[0],
lr._lines[1],
lr.higher,
]
),
(1, 3), (0, 2)
]
)
lr._lines = [S.One, S.One]
lr._first_pointer_parent = lr._lines
lr._second_pointer_parent = lr._lines
lr._first_pointer_index = 0
lr._second_pointer_index = 1
return r
@property
def arg(self):
return self.args[0]
def doit(self, **hints):
if hints.get('deep', True):
arg = self.arg.doit(**hints)
result = arg._eval_trace()
if result is not None:
return result
else:
return Trace(arg)
else:
# _eval_trace would go too deep here
if isinstance(self.arg, MatrixBase):
return trace(self.arg)
else:
return Trace(self.arg)
def as_explicit(self):
return Trace(self.arg.as_explicit()).doit()
def _normalize(self):
# Normalization of trace of matrix products. Use transposition and
# cyclic properties of traces to make sure the arguments of the matrix
# product are sorted and the first argument is not a transposition.
from sympy.matrices.expressions.matmul import MatMul
from sympy.matrices.expressions.transpose import Transpose
trace_arg = self.arg
if isinstance(trace_arg, MatMul):
def get_arg_key(x):
a = trace_arg.args[x]
if isinstance(a, Transpose):
a = a.arg
return default_sort_key(a)
indmin = min(range(len(trace_arg.args)), key=get_arg_key)
if isinstance(trace_arg.args[indmin], Transpose):
trace_arg = Transpose(trace_arg).doit()
indmin = min(range(len(trace_arg.args)), key=lambda x: default_sort_key(trace_arg.args[x]))
trace_arg = MatMul.fromiter(trace_arg.args[indmin:] + trace_arg.args[:indmin])
return Trace(trace_arg)
return self
def _eval_rewrite_as_Sum(self, expr, **kwargs):
from sympy.concrete.summations import Sum
i = uniquely_named_symbol('i', [expr])
s = Sum(self.arg[i, i], (i, 0, self.arg.rows - 1))
return s.doit()
def trace(expr):
"""Trace of a Matrix. Sum of the diagonal elements.
Examples
========
>>> from sympy import trace, Symbol, MatrixSymbol, eye
>>> n = Symbol('n')
>>> X = MatrixSymbol('X', n, n) # A square matrix
>>> trace(2*X)
2*Trace(X)
>>> trace(eye(3))
3
"""
return Trace(expr).doit()
| Trace |
python | pypa__pip | src/pip/_internal/utils/logging.py | {
"start": 3759,
"end": 4178
} | class ____:
renderable: RenderableType
indent: int
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
segments = console.render(self.renderable, options)
lines = Segment.split_lines(segments)
for line in lines:
yield Segment(" " * self.indent)
yield from line
yield Segment("\n")
| IndentedRenderable |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.