Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- pllava/lib/python3.10/site-packages/sympy/solvers/__pycache__/solvers.cpython-310.pyc +3 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/__init__.py +109 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/_trace_wrapped_higher_order_op.py +127 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/common.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/cudagraphs.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/distributed.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/inductor.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/onnxrt.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tensorrt.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/torchxla.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tvm.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/common.py +128 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/cudagraphs.py +256 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/debugging.py +336 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/distributed.py +552 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/inductor.py +12 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/onnxrt.py +38 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/registry.py +125 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/tensorrt.py +14 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/torchxla.py +47 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/tvm.py +194 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/bytecode_analysis.py +257 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/callback.py +83 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/code_context.py +30 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/config.py +490 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/create_parameter_op.py +60 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/current_scope_id.py +25 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/decorators.py +580 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/device_interface.py +330 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/distributed.py +25 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py +1717 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/exc.py +454 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/external_utils.py +144 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/funcname_cache.py +57 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/guards.py +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/hooks.py +12 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/output_graph.py +2190 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/replay_record.py +112 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_dynamo.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/repro/after_dynamo.py +585 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/side_effects.py +701 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/tensor_version_op.py +59 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/test_case.py +75 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/test_minifier_common.py +249 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/types.py +96 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__init__.py +179 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/base.py +385 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/builder.py +0 -0
.gitattributes
CHANGED
|
@@ -307,3 +307,4 @@ pllava/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/ode.cpython-31
|
|
| 307 |
pllava/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
|
| 308 |
pllava/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 309 |
pllava/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_wester.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 307 |
pllava/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
|
| 308 |
pllava/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 309 |
pllava/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_wester.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 310 |
+
pllava/lib/python3.10/site-packages/sympy/solvers/__pycache__/solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
pllava/lib/python3.10/site-packages/sympy/solvers/__pycache__/solvers.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:360708b5090b8c8964825df333a30572d63b2f98024aa59f221ff62015db0aa4
|
| 3 |
+
size 100347
|
pllava/lib/python3.10/site-packages/torch/_dynamo/__init__.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
from . import convert_frame, eval_frame, resume_execution
|
| 4 |
+
from .backends.registry import list_backends, lookup_backend, register_backend
|
| 5 |
+
from .callback import callback_handler, on_compile_end, on_compile_start
|
| 6 |
+
from .code_context import code_context
|
| 7 |
+
from .convert_frame import replay
|
| 8 |
+
from .decorators import (
|
| 9 |
+
allow_in_graph,
|
| 10 |
+
assume_constant_result,
|
| 11 |
+
disable,
|
| 12 |
+
disallow_in_graph,
|
| 13 |
+
forbid_in_graph,
|
| 14 |
+
graph_break,
|
| 15 |
+
mark_dynamic,
|
| 16 |
+
mark_static,
|
| 17 |
+
mark_static_address,
|
| 18 |
+
maybe_mark_dynamic,
|
| 19 |
+
run,
|
| 20 |
+
substitute_in_graph,
|
| 21 |
+
)
|
| 22 |
+
from .eval_frame import (
|
| 23 |
+
_reset_guarded_backend_cache,
|
| 24 |
+
explain,
|
| 25 |
+
export,
|
| 26 |
+
is_dynamo_supported,
|
| 27 |
+
is_inductor_supported,
|
| 28 |
+
optimize,
|
| 29 |
+
optimize_assert,
|
| 30 |
+
OptimizedModule,
|
| 31 |
+
reset_code,
|
| 32 |
+
)
|
| 33 |
+
from .external_utils import is_compiling
|
| 34 |
+
from .mutation_guard import GenerationTracker
|
| 35 |
+
from .utils import graph_break_reasons, guard_failures, orig_code_map, reset_frame_count
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
# Register polyfill functions
|
| 39 |
+
from .polyfills import loader as _ # usort: skip # noqa: F401
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
__all__ = [
|
| 43 |
+
"allow_in_graph",
|
| 44 |
+
"assume_constant_result",
|
| 45 |
+
"disallow_in_graph",
|
| 46 |
+
"forbid_in_graph",
|
| 47 |
+
"substitute_in_graph",
|
| 48 |
+
"graph_break",
|
| 49 |
+
"mark_dynamic",
|
| 50 |
+
"maybe_mark_dynamic",
|
| 51 |
+
"mark_static",
|
| 52 |
+
"mark_static_address",
|
| 53 |
+
"optimize",
|
| 54 |
+
"optimize_assert",
|
| 55 |
+
"export",
|
| 56 |
+
"explain",
|
| 57 |
+
"run",
|
| 58 |
+
"replay",
|
| 59 |
+
"disable",
|
| 60 |
+
"reset",
|
| 61 |
+
"OptimizedModule",
|
| 62 |
+
"is_compiling",
|
| 63 |
+
"register_backend",
|
| 64 |
+
"list_backends",
|
| 65 |
+
"lookup_backend",
|
| 66 |
+
]
|
| 67 |
+
|
| 68 |
+
if torch.manual_seed is torch.random.manual_seed:
|
| 69 |
+
import torch.jit._builtins
|
| 70 |
+
|
| 71 |
+
# Wrap manual_seed with the disable decorator.
|
| 72 |
+
# Can't do it at its implementation due to dependency issues.
|
| 73 |
+
torch.manual_seed = torch._disable_dynamo(torch.manual_seed)
|
| 74 |
+
# Add the new manual_seed to the builtin registry.
|
| 75 |
+
torch.jit._builtins._register_builtin(torch.manual_seed, "aten::manual_seed")
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def reset() -> None:
|
| 79 |
+
"""Clear all compile caches and restore initial state"""
|
| 80 |
+
with convert_frame.compile_lock:
|
| 81 |
+
reset_code_caches()
|
| 82 |
+
convert_frame.input_codes.clear()
|
| 83 |
+
convert_frame.output_codes.clear()
|
| 84 |
+
orig_code_map.clear()
|
| 85 |
+
guard_failures.clear()
|
| 86 |
+
graph_break_reasons.clear()
|
| 87 |
+
resume_execution.ContinueExecutionCache.cache.clear()
|
| 88 |
+
_reset_guarded_backend_cache()
|
| 89 |
+
reset_frame_count()
|
| 90 |
+
torch._C._dynamo.compiled_autograd.clear_cache()
|
| 91 |
+
convert_frame.FRAME_COUNTER = 0
|
| 92 |
+
convert_frame.FRAME_COMPILE_COUNTER.clear()
|
| 93 |
+
callback_handler.clear()
|
| 94 |
+
GenerationTracker.clear()
|
| 95 |
+
torch._dynamo.utils.warn_once_cache.clear()
|
| 96 |
+
torch._dynamo.utils.user_obj_id_to_weakref.clear()
|
| 97 |
+
torch._C._autograd._saved_tensors_hooks_set_tracing(False)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def reset_code_caches() -> None:
|
| 101 |
+
"""Clear compile caches that are keyed by code objects"""
|
| 102 |
+
with convert_frame.compile_lock:
|
| 103 |
+
for weak_code in (
|
| 104 |
+
convert_frame.input_codes.seen + convert_frame.output_codes.seen
|
| 105 |
+
):
|
| 106 |
+
code = weak_code()
|
| 107 |
+
if code:
|
| 108 |
+
reset_code(code)
|
| 109 |
+
code_context.clear()
|
pllava/lib/python3.10/site-packages/torch/_dynamo/_trace_wrapped_higher_order_op.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
from torch._C import DispatchKey
|
| 4 |
+
from torch._higher_order_ops.utils import autograd_not_implemented
|
| 5 |
+
from torch._ops import HigherOrderOperator
|
| 6 |
+
from torch._subclasses import FakeTensorMode
|
| 7 |
+
from torch.fx.experimental._backward_state import BackwardState
|
| 8 |
+
from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode, track_tensor_tree
|
| 9 |
+
from torch.utils._python_dispatch import _get_current_dispatch_mode
|
| 10 |
+
from torch.utils._pytree import tree_map_only
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ["trace_wrapped"]
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# trace_wrapped(*args, fn) is equivalent to fn(*args), but with a twist:
|
| 17 |
+
# if you make_fx trace through this call, we will not actually trace into fn; instead,
|
| 18 |
+
# we will directly insert it as a call_function to fn in the graph.
|
| 19 |
+
# (Unlike make_fx, Dynamo WILL inline into fn.)
|
| 20 |
+
# You can think of this as a one off allow_in_graph equivalent for proxy tensor tracing.
|
| 21 |
+
#
|
| 22 |
+
# Because proxy tensor tracing does not actually run the function, there are
|
| 23 |
+
# requirements on the behavior of fn. We are still figuring it out, but here is the current state:
|
| 24 |
+
#
|
| 25 |
+
# 1) fn SHOULD only take a single argument, which must be a tensor
|
| 26 |
+
# 2) fn MUST return a new tensor with the same metadata as the original tensor
|
| 27 |
+
# (e.g., zeros_like(input) is a permissible implementation of fn).
|
| 28 |
+
# This is verified via an extra assert that is inserted into the traced graph.
|
| 29 |
+
# 3) fn MAY have side effects, but it MAY NOT perform metadata mutation on other tensors
|
| 30 |
+
# participating in proxy tensor tracing (it MAY mutate other tensors, it MAY mutate Python state)
|
| 31 |
+
# These requirements stem from the requirement that we need to continue performing proxy tensor tracing,
|
| 32 |
+
# which assumes accurate fake tensor metadata, without actually running fn.
|
| 33 |
+
# In the future, we may allow for a "meta" function associated with fn to allow for more interesting input-output patterns.
|
| 34 |
+
#
|
| 35 |
+
# Note that tensors / Python state are allowed to be mutated.
|
| 36 |
+
# This is relaxed constraint is not always sound, but it is sound for backward tracing with fake
|
| 37 |
+
# tensors as it takes place in AOTAutograd, as the backward pass is guaranteed not to depend on concrete
|
| 38 |
+
# tensor values (via fake tensor) or Python state (because the autograd engine doesn't depend on Python).
|
| 39 |
+
#
|
| 40 |
+
# The intended use case for this function is to allow AOTAutograd to defer complex
|
| 41 |
+
# backward hooks to compiled autograd. AOTAutograd performs a make_fx trace which preserves
|
| 42 |
+
# the function call as is in the graph, and only when we Dynamo through the backward graph in
|
| 43 |
+
# compiled autograd do we inline into the function.
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def trace_wrapped(*args, **kwargs):
|
| 47 |
+
with torch.no_grad():
|
| 48 |
+
return _trace_wrapped_op(*args, **kwargs)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class TraceWrapped(HigherOrderOperator):
|
| 52 |
+
def __init__(self):
|
| 53 |
+
super().__init__("trace_wrapped")
|
| 54 |
+
|
| 55 |
+
def __call__(self, *args, **kwargs):
|
| 56 |
+
return super().__call__(*args, **kwargs)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# TODO(jansel): need to ensure this does not get DCEed
|
| 60 |
+
_trace_wrapped_op = TraceWrapped()
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def _assert_meta(grad, size, stride, dtype):
|
| 64 |
+
assert grad.size() == size, "size mismatch"
|
| 65 |
+
assert grad.stride() == stride, "stride mismatch"
|
| 66 |
+
assert grad.dtype == dtype, "dtype mismatch"
|
| 67 |
+
return grad
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@_trace_wrapped_op.py_impl(ProxyTorchDispatchMode)
|
| 71 |
+
def inner_trace(mode, *args, bw_state=None, **kwargs):
|
| 72 |
+
def self_invoke(*args, **dyn_kwargs):
|
| 73 |
+
with torch.no_grad():
|
| 74 |
+
return _trace_wrapped_op(*args, **dyn_kwargs, **kwargs)
|
| 75 |
+
|
| 76 |
+
def unwrap_proxies(x):
|
| 77 |
+
if isinstance(x, torch.Tensor):
|
| 78 |
+
return mode.tracer.unwrap_proxy(x)
|
| 79 |
+
if isinstance(x, (list, tuple)):
|
| 80 |
+
return type(x)(map(unwrap_proxies, x))
|
| 81 |
+
if x is None:
|
| 82 |
+
return None
|
| 83 |
+
raise AssertionError(f"unhandled type: {type(x)}")
|
| 84 |
+
|
| 85 |
+
proxy_kwargs = {}
|
| 86 |
+
if bw_state is not None:
|
| 87 |
+
assert isinstance(bw_state, BackwardState) and bw_state.proxy is not None
|
| 88 |
+
proxy_kwargs["bw_state"] = bw_state.proxy
|
| 89 |
+
out_proxy = mode.tracer.create_proxy(
|
| 90 |
+
"call_function",
|
| 91 |
+
self_invoke,
|
| 92 |
+
unwrap_proxies(args),
|
| 93 |
+
proxy_kwargs,
|
| 94 |
+
name="trace_wrapped",
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
if args[0] is None:
|
| 98 |
+
grad = args[1] # module backward hooks
|
| 99 |
+
else:
|
| 100 |
+
grad = args[0] # other backward hooks
|
| 101 |
+
grad = tree_map_only(torch.Tensor, torch.empty_like, grad)
|
| 102 |
+
track_tensor_tree(grad, out_proxy, constant=None, tracer=mode.tracer)
|
| 103 |
+
return grad
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
@_trace_wrapped_op.py_impl(FakeTensorMode)
|
| 107 |
+
def inner_fake(*args, **kwargs):
|
| 108 |
+
raise RuntimeError("This op should never be invoked here")
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
@_trace_wrapped_op.py_impl(DispatchKey.CompositeExplicitAutograd)
|
| 112 |
+
def _trace_wrapped_op_dense(*args, fn, **kwargs):
|
| 113 |
+
mode = _get_current_dispatch_mode()
|
| 114 |
+
assert mode is None, "Mode should never be enabled for CPU/CUDA key"
|
| 115 |
+
return fn(*args, **kwargs)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
_trace_wrapped_op.py_impl(DispatchKey.Autograd)(
|
| 119 |
+
autograd_not_implemented(_trace_wrapped_op, deferred_error=True)
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
@_trace_wrapped_op.py_functionalize_impl
|
| 124 |
+
def _trace_wrapped_functionalized(ctx, *args, **kwargs):
|
| 125 |
+
unwrapped_args = ctx.unwrap_tensors(args)
|
| 126 |
+
with ctx.redispatch_to_next():
|
| 127 |
+
return ctx.wrap_tensors(_trace_wrapped_op(*unwrapped_args, **kwargs))
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/common.cpython-310.pyc
ADDED
|
Binary file (3.8 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/cudagraphs.cpython-310.pyc
ADDED
|
Binary file (7.25 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/distributed.cpython-310.pyc
ADDED
|
Binary file (17.4 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/inductor.cpython-310.pyc
ADDED
|
Binary file (426 Bytes). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/onnxrt.cpython-310.pyc
ADDED
|
Binary file (1.27 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tensorrt.cpython-310.pyc
ADDED
|
Binary file (250 Bytes). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/torchxla.cpython-310.pyc
ADDED
|
Binary file (1.41 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tvm.cpython-310.pyc
ADDED
|
Binary file (5.51 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/common.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
import contextlib
|
| 4 |
+
import functools
|
| 5 |
+
import logging
|
| 6 |
+
from unittest.mock import patch
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
from torch._dynamo import disable
|
| 10 |
+
from torch._dynamo.utils import counters, defake, flatten_graph_inputs
|
| 11 |
+
from torch._functorch.aot_autograd import aot_module_simplified
|
| 12 |
+
from torch.utils._python_dispatch import _disable_current_modes
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
log = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class AotAutograd:
|
| 19 |
+
def __init__(self, **kwargs) -> None:
|
| 20 |
+
self.__name__ = "compiler_fn"
|
| 21 |
+
self.kwargs = kwargs
|
| 22 |
+
|
| 23 |
+
def __call__(self, gm: torch.fx.GraphModule, example_inputs, **kwargs):
|
| 24 |
+
if kwargs:
|
| 25 |
+
log.warning("aot_autograd-based backend ignoring extra kwargs %s", kwargs)
|
| 26 |
+
|
| 27 |
+
if any(isinstance(x, (list, tuple, dict)) for x in example_inputs):
|
| 28 |
+
return flatten_graph_inputs(
|
| 29 |
+
gm,
|
| 30 |
+
example_inputs,
|
| 31 |
+
self,
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
# Hack to get around circular import problems with aot_eager_decomp_partition
|
| 35 |
+
if callable(self.kwargs.get("decompositions")):
|
| 36 |
+
self.kwargs["decompositions"] = self.kwargs["decompositions"]()
|
| 37 |
+
|
| 38 |
+
# NB: dont delete counter increment
|
| 39 |
+
counters["aot_autograd"]["total"] += 1
|
| 40 |
+
use_fallback = False
|
| 41 |
+
|
| 42 |
+
if use_fallback:
|
| 43 |
+
log.debug("Unable to use AOT Autograd because graph has mutation")
|
| 44 |
+
counters["aot_autograd"]["not_ok"] += 1
|
| 45 |
+
return gm
|
| 46 |
+
|
| 47 |
+
# OK attempt to compile
|
| 48 |
+
|
| 49 |
+
def _wrapped_bw_compiler(*args, **kwargs):
|
| 50 |
+
# stop TorchDynamo from trying to compile our generated backwards pass
|
| 51 |
+
return disable(disable(bw_compiler)(*args, **kwargs))
|
| 52 |
+
|
| 53 |
+
bw_compiler = self.kwargs.get("bw_compiler") or self.kwargs["fw_compiler"]
|
| 54 |
+
self.kwargs["bw_compiler"] = _wrapped_bw_compiler
|
| 55 |
+
self.kwargs["inference_compiler"] = (
|
| 56 |
+
self.kwargs.get("inference_compiler") or self.kwargs["fw_compiler"]
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
from functorch.compile import nop
|
| 60 |
+
from torch._inductor.debug import enable_aot_logging
|
| 61 |
+
|
| 62 |
+
# debug asserts slow down compile time noticeably,
|
| 63 |
+
# So only default them on when the aot_eager backend is used.
|
| 64 |
+
if self.kwargs.get("fw_compiler", None) == nop:
|
| 65 |
+
patch_config = patch("functorch.compile.config.debug_assert", True)
|
| 66 |
+
else:
|
| 67 |
+
patch_config = contextlib.nullcontext()
|
| 68 |
+
|
| 69 |
+
try:
|
| 70 |
+
# NB: NOT cloned!
|
| 71 |
+
with enable_aot_logging(), patch_config:
|
| 72 |
+
cg = aot_module_simplified(gm, example_inputs, **self.kwargs)
|
| 73 |
+
counters["aot_autograd"]["ok"] += 1
|
| 74 |
+
return disable(cg)
|
| 75 |
+
except Exception:
|
| 76 |
+
counters["aot_autograd"]["not_ok"] += 1
|
| 77 |
+
raise
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def aot_autograd(**kwargs):
|
| 81 |
+
return AotAutograd(**kwargs)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def mem_efficient_fusion_kwargs(use_decomps):
|
| 85 |
+
from functorch.compile import (
|
| 86 |
+
default_decompositions,
|
| 87 |
+
min_cut_rematerialization_partition,
|
| 88 |
+
ts_compile,
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
kwargs = {
|
| 92 |
+
# these are taken from memory_efficient_fusion()
|
| 93 |
+
"fw_compiler": ts_compile,
|
| 94 |
+
"bw_compiler": ts_compile,
|
| 95 |
+
"partition_fn": min_cut_rematerialization_partition,
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
if use_decomps:
|
| 99 |
+
kwargs["decompositions"] = default_decompositions
|
| 100 |
+
|
| 101 |
+
return kwargs
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def fake_tensor_unsupported(fn):
|
| 105 |
+
"""
|
| 106 |
+
Decorator for backends that need real inputs. We swap out fake
|
| 107 |
+
tensors for zero tensors.
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
@functools.wraps(fn)
|
| 111 |
+
def wrapper(model, inputs, **kwargs):
|
| 112 |
+
with _disable_current_modes():
|
| 113 |
+
inputs = list(map(defake, inputs))
|
| 114 |
+
return fn(model, inputs, **kwargs)
|
| 115 |
+
|
| 116 |
+
return wrapper
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def device_from_inputs(example_inputs) -> torch.device:
|
| 120 |
+
for x in example_inputs:
|
| 121 |
+
if hasattr(x, "device"):
|
| 122 |
+
return x.device
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def dtype_from_inputs(example_inputs) -> torch.dtype:
|
| 126 |
+
for x in example_inputs:
|
| 127 |
+
if hasattr(x, "dtype"):
|
| 128 |
+
return x.dtype
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/cudagraphs.py
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
import functools
|
| 4 |
+
from collections import defaultdict
|
| 5 |
+
from typing import Dict, List, Optional
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from torch._dynamo import config
|
| 9 |
+
from torch._dynamo.backends.common import aot_autograd
|
| 10 |
+
from torch._dynamo.backends.debugging import boxed_nop
|
| 11 |
+
from torch._inductor.cudagraph_utils import (
|
| 12 |
+
BoxedDeviceIndex,
|
| 13 |
+
check_multiple_devices_or_any_cpu_nodes,
|
| 14 |
+
format_default_skip_message,
|
| 15 |
+
get_mutation_stack_trace,
|
| 16 |
+
get_placeholder_info,
|
| 17 |
+
log_cudagraph_skip_and_bump_counter,
|
| 18 |
+
)
|
| 19 |
+
from torch._inductor.utils import (
|
| 20 |
+
BoxedBool,
|
| 21 |
+
count_tangents,
|
| 22 |
+
get_first_incompatible_cudagraph_node,
|
| 23 |
+
num_fw_fixed_arguments,
|
| 24 |
+
output_node,
|
| 25 |
+
)
|
| 26 |
+
from torch.multiprocessing.reductions import StorageWeakRef
|
| 27 |
+
|
| 28 |
+
from .registry import register_backend
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def find_input_mutations(g):
|
| 32 |
+
def meta_fk(meta):
|
| 33 |
+
return meta["val"] if "val" in meta else meta["fake_result"]
|
| 34 |
+
|
| 35 |
+
inputs = defaultdict(set)
|
| 36 |
+
input_idx = 0
|
| 37 |
+
mutated_inputs = set()
|
| 38 |
+
for n in g.nodes:
|
| 39 |
+
if n.op == "placeholder":
|
| 40 |
+
if isinstance(meta_fk(n.meta), torch.Tensor):
|
| 41 |
+
inputs[StorageWeakRef(meta_fk(n.meta)._typed_storage())].add(input_idx)
|
| 42 |
+
input_idx += 1
|
| 43 |
+
elif n.op == "call_function":
|
| 44 |
+
if not hasattr(n.target, "_schema"):
|
| 45 |
+
continue
|
| 46 |
+
|
| 47 |
+
schema = n.target._schema
|
| 48 |
+
for i, arg in enumerate(schema.arguments):
|
| 49 |
+
if i < len(n.args):
|
| 50 |
+
argument = n.args[i]
|
| 51 |
+
else:
|
| 52 |
+
if arg.name not in n.kwargs:
|
| 53 |
+
continue
|
| 54 |
+
argument = n.kwargs[arg.name]
|
| 55 |
+
mut_arg = False
|
| 56 |
+
if arg.alias_info:
|
| 57 |
+
if arg.alias_info.is_write:
|
| 58 |
+
mut_arg = True
|
| 59 |
+
if mut_arg:
|
| 60 |
+
# TODO: not correct for args that contain tensors in a struct
|
| 61 |
+
# like list
|
| 62 |
+
mutated_inputs |= inputs[
|
| 63 |
+
StorageWeakRef(meta_fk(argument.meta)._typed_storage())
|
| 64 |
+
]
|
| 65 |
+
|
| 66 |
+
# TODO: error on unrecognized nodes
|
| 67 |
+
return mutated_inputs
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def get_device_node_mapping(gm: torch.fx.GraphModule):
|
| 71 |
+
device_node_mapping: Dict[torch.device, torch.fx.Node] = {}
|
| 72 |
+
for n in gm.graph.nodes:
|
| 73 |
+
t = n.meta.get("val", None)
|
| 74 |
+
if isinstance(t, torch.Tensor) and t.device not in device_node_mapping:
|
| 75 |
+
device_node_mapping[t.device] = n
|
| 76 |
+
return device_node_mapping
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def check_for_mutation_ignore_cuda_graph_managed_tensor(
|
| 80 |
+
aot_model: torch.fx.GraphModule, num_fixed
|
| 81 |
+
) -> Optional[str]:
|
| 82 |
+
mutation_indices = find_input_mutations(aot_model.graph) - set(range(num_fixed))
|
| 83 |
+
if not mutation_indices:
|
| 84 |
+
return None
|
| 85 |
+
|
| 86 |
+
placeholders = get_placeholder_info(aot_model.graph)
|
| 87 |
+
return get_mutation_stack_trace(placeholders, mutation_indices)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def check_for_skip(aot_model: torch.fx.GraphModule, num_fixed) -> Optional[str]:
|
| 91 |
+
if not config.cudagraph_backend_support_input_mutation:
|
| 92 |
+
if mut_skip := check_for_mutation_ignore_cuda_graph_managed_tensor(
|
| 93 |
+
aot_model, num_fixed
|
| 94 |
+
):
|
| 95 |
+
return mut_skip
|
| 96 |
+
|
| 97 |
+
if skip := check_multiple_devices_or_any_cpu_nodes(
|
| 98 |
+
get_device_node_mapping(aot_model)
|
| 99 |
+
):
|
| 100 |
+
return skip
|
| 101 |
+
|
| 102 |
+
if node := get_first_incompatible_cudagraph_node(aot_model):
|
| 103 |
+
return format_default_skip_message(f"incompatible op ({node.name})")
|
| 104 |
+
|
| 105 |
+
return None
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def get_device_index(gm) -> int:
|
| 109 |
+
device = next(iter(get_device_node_mapping(gm)))
|
| 110 |
+
assert device.type == "cuda"
|
| 111 |
+
return device.index
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def get_stack_traces(gm) -> List[Optional[str]]:
|
| 115 |
+
output = output_node(gm)
|
| 116 |
+
assert len(output.args) == 1
|
| 117 |
+
return [
|
| 118 |
+
(arg.stack_trace if isinstance(arg, torch.fx.node.Node) else None)
|
| 119 |
+
for arg in output.args[0]
|
| 120 |
+
]
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def cudagraphs(dynamo_model, dynamo_inputs):
|
| 124 |
+
from torch._inductor.cudagraph_trees import cudagraphify_impl
|
| 125 |
+
|
| 126 |
+
do_cudagraphs = BoxedBool(True)
|
| 127 |
+
boxed_device_index = BoxedDeviceIndex(None)
|
| 128 |
+
|
| 129 |
+
def forward_cudagraphs(aot_model, aot_inputs, is_inference=False):
|
| 130 |
+
interp = boxed_nop(aot_model, aot_inputs)
|
| 131 |
+
fixed = num_fw_fixed_arguments(len(dynamo_inputs), len(aot_inputs))
|
| 132 |
+
if skip_msg := check_for_skip(aot_model, fixed):
|
| 133 |
+
BoxedBool.disable(do_cudagraphs)
|
| 134 |
+
log_cudagraph_skip_and_bump_counter(
|
| 135 |
+
f"skipping cudagraphs due to {skip_msg}"
|
| 136 |
+
)
|
| 137 |
+
return interp
|
| 138 |
+
|
| 139 |
+
boxed_device_index.set(get_device_index(aot_model))
|
| 140 |
+
out = cudagraphify_impl(
|
| 141 |
+
interp,
|
| 142 |
+
aot_inputs,
|
| 143 |
+
range(fixed),
|
| 144 |
+
device_index=boxed_device_index.value,
|
| 145 |
+
is_backward=False,
|
| 146 |
+
is_inference=False,
|
| 147 |
+
stack_traces=get_stack_traces(aot_model),
|
| 148 |
+
placeholders=get_placeholder_info(aot_model.graph),
|
| 149 |
+
mutated_input_idxs=find_input_mutations(aot_model.graph),
|
| 150 |
+
)
|
| 151 |
+
out._boxed_call = True
|
| 152 |
+
return out
|
| 153 |
+
|
| 154 |
+
def backward_cudagraphs(aot_model, aot_inputs):
|
| 155 |
+
interp = boxed_nop(aot_model, aot_inputs)
|
| 156 |
+
if not do_cudagraphs:
|
| 157 |
+
return aot_model
|
| 158 |
+
|
| 159 |
+
fixed = count_tangents(aot_model)
|
| 160 |
+
if skip_msg := check_for_skip(aot_model, fixed):
|
| 161 |
+
log_cudagraph_skip_and_bump_counter(
|
| 162 |
+
"skipping cudagraphs due to %s", skip_msg
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
# See [Backward Generation Handling]
|
| 166 |
+
manager = torch._inductor.cudagraph_trees.get_manager(
|
| 167 |
+
boxed_device_index.value, create_if_none_exists=False
|
| 168 |
+
)
|
| 169 |
+
assert manager is not None
|
| 170 |
+
|
| 171 |
+
def fn(inputs):
|
| 172 |
+
manager.set_to_running_backward()
|
| 173 |
+
return aot_model(inputs)
|
| 174 |
+
|
| 175 |
+
fn._boxed_call = True
|
| 176 |
+
return fn
|
| 177 |
+
|
| 178 |
+
out = cudagraphify_impl(
|
| 179 |
+
interp,
|
| 180 |
+
aot_inputs,
|
| 181 |
+
range(fixed),
|
| 182 |
+
device_index=get_device_index(aot_model),
|
| 183 |
+
is_backward=True,
|
| 184 |
+
is_inference=False,
|
| 185 |
+
stack_traces=get_stack_traces(aot_model),
|
| 186 |
+
placeholders=get_placeholder_info(aot_model.graph),
|
| 187 |
+
mutated_input_idxs=find_input_mutations(aot_model.graph),
|
| 188 |
+
)
|
| 189 |
+
out._boxed_call = True
|
| 190 |
+
return out
|
| 191 |
+
|
| 192 |
+
aot_cudagraphs = aot_autograd(
|
| 193 |
+
fw_compiler=forward_cudagraphs,
|
| 194 |
+
bw_compiler=backward_cudagraphs,
|
| 195 |
+
inference_compiler=functools.partial(forward_cudagraphs, is_inference=True),
|
| 196 |
+
keep_inference_input_mutations=torch._dynamo.config.cudagraph_backend_keep_input_mutation,
|
| 197 |
+
)
|
| 198 |
+
return aot_cudagraphs(dynamo_model, dynamo_inputs)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
class CudagraphsBackend:
|
| 202 |
+
compiler_name = "cudagraphs"
|
| 203 |
+
|
| 204 |
+
@staticmethod
|
| 205 |
+
def reset():
|
| 206 |
+
from torch._inductor.cudagraph_trees import reset_cudagraph_trees
|
| 207 |
+
|
| 208 |
+
reset_cudagraph_trees()
|
| 209 |
+
|
| 210 |
+
@staticmethod
|
| 211 |
+
def __call__(model, inputs):
|
| 212 |
+
return cudagraphs(model, inputs)
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
# aot_cudagraphs only applies CUDA graphs to the graph. It is also helpful
|
| 216 |
+
# for debugging and can serve as a perf baseline.
|
| 217 |
+
register_backend(name="cudagraphs", compiler_fn=CudagraphsBackend())
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def cudagraphs_inner(model, inputs, copy_outputs=True, copy_inputs=True):
|
| 221 |
+
"""This isn't registered as a backend, but is used in some benchmarks"""
|
| 222 |
+
assert isinstance(inputs, (list, tuple))
|
| 223 |
+
if copy_inputs:
|
| 224 |
+
static_inputs = [torch.zeros_like(x) for x in inputs]
|
| 225 |
+
else:
|
| 226 |
+
static_inputs = list(inputs)
|
| 227 |
+
|
| 228 |
+
# warmup
|
| 229 |
+
torch.cuda.synchronize()
|
| 230 |
+
stream = torch.cuda.Stream()
|
| 231 |
+
stream.wait_stream(torch.cuda.current_stream())
|
| 232 |
+
with torch.cuda.stream(stream):
|
| 233 |
+
model(*inputs)
|
| 234 |
+
stream.synchronize()
|
| 235 |
+
torch.cuda.current_stream().wait_stream(stream)
|
| 236 |
+
torch.cuda.synchronize()
|
| 237 |
+
|
| 238 |
+
# record
|
| 239 |
+
graph = torch.cuda.CUDAGraph()
|
| 240 |
+
with torch.cuda.graph(graph, stream=stream):
|
| 241 |
+
static_outputs = model(*static_inputs)
|
| 242 |
+
if not isinstance(static_outputs, (list, tuple)):
|
| 243 |
+
static_outputs = (static_outputs,)
|
| 244 |
+
|
| 245 |
+
def run(*new_inputs):
|
| 246 |
+
assert len(static_inputs) == len(new_inputs)
|
| 247 |
+
if copy_inputs:
|
| 248 |
+
for dst, src in zip(static_inputs, new_inputs):
|
| 249 |
+
dst.copy_(src)
|
| 250 |
+
graph.replay()
|
| 251 |
+
if copy_outputs:
|
| 252 |
+
return [x.clone() for x in static_outputs]
|
| 253 |
+
else:
|
| 254 |
+
return static_outputs
|
| 255 |
+
|
| 256 |
+
return run
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/debugging.py
ADDED
|
@@ -0,0 +1,336 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
import dataclasses
|
| 4 |
+
import functools
|
| 5 |
+
import logging
|
| 6 |
+
from importlib import import_module
|
| 7 |
+
from typing import Any, List, Optional
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from functorch.compile import min_cut_rematerialization_partition
|
| 11 |
+
from torch import _guards
|
| 12 |
+
from torch._functorch import config as functorch_config
|
| 13 |
+
from torch._functorch.compilers import ts_compile
|
| 14 |
+
|
| 15 |
+
from .common import aot_autograd
|
| 16 |
+
from .registry import register_debug_backend as register_backend
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
log = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
"""
|
| 23 |
+
This file contains TorchDynamo backends intended for debugging uses.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@register_backend
|
| 28 |
+
def eager(gm, fake_tensor_inputs, **kwargs):
|
| 29 |
+
if kwargs:
|
| 30 |
+
log.warning("eager backend ignoring extra kwargs %s", kwargs)
|
| 31 |
+
return gm.forward
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@register_backend
|
| 35 |
+
def eager_noexcept(gm, fake_tensor_inputs, **kwargs):
|
| 36 |
+
if kwargs:
|
| 37 |
+
log.warning("eager_noexcept backend ignoring extra kwargs %s", kwargs)
|
| 38 |
+
|
| 39 |
+
# This backend is intended to check that dynamo-generated GraphModules
|
| 40 |
+
# do not cause errors.
|
| 41 |
+
def inner(*args):
|
| 42 |
+
try:
|
| 43 |
+
return gm(*args)
|
| 44 |
+
except Exception as e:
|
| 45 |
+
raise torch._dynamo.exc.TorchDynamoException(
|
| 46 |
+
"Unexpected exception when running generated GraphModule"
|
| 47 |
+
) from e
|
| 48 |
+
|
| 49 |
+
return inner
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@register_backend
|
| 53 |
+
def pre_dispatch_eager(gm, fake_tensor_inputs, **kwargs):
|
| 54 |
+
if kwargs:
|
| 55 |
+
log.warning("pre_dispatch_eager backend ignoring extra kwargs %s", kwargs)
|
| 56 |
+
|
| 57 |
+
from torch.fx.experimental.proxy_tensor import make_fx
|
| 58 |
+
|
| 59 |
+
def runnable_gm(*args):
|
| 60 |
+
return torch.fx.Interpreter(gm).run(*args)
|
| 61 |
+
|
| 62 |
+
pre_dispatch_gm = make_fx(runnable_gm, pre_dispatch=True)(*fake_tensor_inputs)
|
| 63 |
+
pre_dispatch_gm.print_readable()
|
| 64 |
+
|
| 65 |
+
return pre_dispatch_gm
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
@register_backend
|
| 69 |
+
def eager_debug(gm, fake_tensor_inputs, **kwargs):
|
| 70 |
+
if kwargs:
|
| 71 |
+
log.warning("eager_debug backend ignoring extra kwargs %s", kwargs)
|
| 72 |
+
|
| 73 |
+
from torch._subclasses.schema_check_mode import SchemaCheckMode
|
| 74 |
+
|
| 75 |
+
# We could add more debugging bits here.
|
| 76 |
+
# Right now, this backend can be used to check for and error on
|
| 77 |
+
# custom dispatcher ops that have incorrect schemas.
|
| 78 |
+
def inner(*args):
|
| 79 |
+
with SchemaCheckMode():
|
| 80 |
+
return torch.fx.Interpreter(gm).run(*args)
|
| 81 |
+
|
| 82 |
+
return inner
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
@register_backend(name="ts")
|
| 86 |
+
def torchscript(gm, fake_tensor_inputs):
|
| 87 |
+
return torch.jit.script(gm)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
# used boxed call to discard inputs when they are no longer needed
|
| 91 |
+
def boxed_nop(fx_g, example_inputs):
|
| 92 |
+
def run(args):
|
| 93 |
+
return torch.fx.Interpreter(fx_g).boxed_run(args)
|
| 94 |
+
|
| 95 |
+
run._boxed_call = True
|
| 96 |
+
return run
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
# Useful for debugging purpose
|
| 100 |
+
# aot_eager uses AOT Autograd backend with nop compiler. It is helpful in debugging.
|
| 101 |
+
aot_eager = aot_autograd(
|
| 102 |
+
fw_compiler=boxed_nop,
|
| 103 |
+
partition_fn=min_cut_rematerialization_partition,
|
| 104 |
+
keep_inference_input_mutations=True,
|
| 105 |
+
)
|
| 106 |
+
register_backend(name="aot_eager", compiler_fn=aot_eager)
|
| 107 |
+
|
| 108 |
+
aot_eager_default_partitioner = aot_autograd(
|
| 109 |
+
fw_compiler=boxed_nop, keep_inference_input_mutations=True
|
| 110 |
+
)
|
| 111 |
+
register_backend(
|
| 112 |
+
name="aot_eager_default_partitioner", compiler_fn=aot_eager_default_partitioner
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
# Uses TorchInductor AOT Autograd decomps and partitioner to isolate aot vs
|
| 117 |
+
# inductor problems.
|
| 118 |
+
# aot_eager_decomp_partition just replaces the inductor compiler with nop to help
|
| 119 |
+
# isolate inductor vs aot_eager errors
|
| 120 |
+
def aot_eager_decomp_partition(gm, fake_tensor_inputs, **kwargs):
|
| 121 |
+
if kwargs:
|
| 122 |
+
log.warning(
|
| 123 |
+
"aot_eager_decomp_partition backend ignoring extra kwargs %s", kwargs
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
with functorch_config.patch(unlift_effect_tokens=True):
|
| 127 |
+
return aot_autograd(
|
| 128 |
+
# these are taken from memory_efficient_fusion()
|
| 129 |
+
fw_compiler=boxed_nop,
|
| 130 |
+
bw_compiler=boxed_nop,
|
| 131 |
+
# NB: lambda here is to delay import of inductor
|
| 132 |
+
decompositions=lambda: import_module(
|
| 133 |
+
"torch._inductor.compile_fx"
|
| 134 |
+
).select_decomp_table(),
|
| 135 |
+
partition_fn=functools.partial(
|
| 136 |
+
min_cut_rematerialization_partition, compiler="inductor"
|
| 137 |
+
),
|
| 138 |
+
)(gm, fake_tensor_inputs)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
register_backend(
|
| 142 |
+
name="aot_eager_decomp_partition", compiler_fn=aot_eager_decomp_partition
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
# AOT Autograd with torchscript backend. Default partitioner.
|
| 147 |
+
# aot_ts uses torchscript backend. We can use this with both nnc and nvfuser
|
| 148 |
+
# by using the relevant fuser with torch.jit.fuser(...)
|
| 149 |
+
aot_ts = aot_autograd(fw_compiler=ts_compile)
|
| 150 |
+
register_backend(name="aot_ts", compiler_fn=aot_ts)
|
| 151 |
+
|
| 152 |
+
# These buggy backends are used for inducing bugs so that we can test
|
| 153 |
+
# our repro extraction / minifier scripts
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
class ReluCompileError(Exception):
|
| 157 |
+
pass
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
class TestingOnlyCompileError(Exception):
|
| 161 |
+
pass
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
@register_backend
|
| 165 |
+
def relu_compile_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
|
| 166 |
+
for node in gm.graph.nodes:
|
| 167 |
+
if node.target == torch.relu:
|
| 168 |
+
raise ReluCompileError
|
| 169 |
+
return gm
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
@register_backend
|
| 173 |
+
def relu_runtime_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
|
| 174 |
+
for node in gm.graph.nodes:
|
| 175 |
+
if node.target == torch.relu:
|
| 176 |
+
node.target = torch._assert
|
| 177 |
+
node.args = (False, "ReluRuntimeError")
|
| 178 |
+
gm.recompile()
|
| 179 |
+
return gm
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
@register_backend
|
| 183 |
+
def relu_accuracy_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
|
| 184 |
+
for node in gm.graph.nodes:
|
| 185 |
+
if node.target == torch.relu:
|
| 186 |
+
node.target = torch.add
|
| 187 |
+
node.args = (node.args[0], 1)
|
| 188 |
+
gm.recompile()
|
| 189 |
+
|
| 190 |
+
return gm
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
@register_backend
|
| 194 |
+
def non_leaf_compile_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
|
| 195 |
+
# Require at least one non-trivial thing in the graph,
|
| 196 |
+
# see https://github.com/pytorch/pytorch/issues/102898
|
| 197 |
+
for node in gm.graph.nodes:
|
| 198 |
+
if node.op == "call_function":
|
| 199 |
+
break
|
| 200 |
+
else:
|
| 201 |
+
return gm
|
| 202 |
+
for t in example_inputs:
|
| 203 |
+
if not t.is_leaf:
|
| 204 |
+
raise TestingOnlyCompileError
|
| 205 |
+
return gm
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
@dataclasses.dataclass
|
| 209 |
+
class ExplainOutput:
|
| 210 |
+
"""
|
| 211 |
+
This is the output of :func:`torch._dynamo.explain()`
|
| 212 |
+
There is no reason to create this class directly.
|
| 213 |
+
"""
|
| 214 |
+
|
| 215 |
+
graphs: List[torch.fx.GraphModule]
|
| 216 |
+
graph_count: int
|
| 217 |
+
graph_break_count: int
|
| 218 |
+
break_reasons: List[
|
| 219 |
+
Any
|
| 220 |
+
] # Type is GraphCompileReason but doesn't matter for this purpose
|
| 221 |
+
op_count: int
|
| 222 |
+
ops_per_graph: Optional[List[torch.fx.Node]] = None
|
| 223 |
+
out_guards: Optional[List[_guards.Guard]] = None
|
| 224 |
+
compile_times: Optional[str] = None
|
| 225 |
+
|
| 226 |
+
def __str__(self) -> str:
|
| 227 |
+
output = f"Graph Count: {self.graph_count}\n"
|
| 228 |
+
output += f"Graph Break Count: {self.graph_break_count}\n"
|
| 229 |
+
output += f"Op Count: {self.op_count}\n"
|
| 230 |
+
|
| 231 |
+
output += "Break Reasons:\n"
|
| 232 |
+
for idx, break_reason in enumerate(self.break_reasons):
|
| 233 |
+
output += f" Break Reason {idx+1}:\n"
|
| 234 |
+
output += f" Reason: {break_reason.reason}\n"
|
| 235 |
+
output += " User Stack:\n"
|
| 236 |
+
for frame_summary in break_reason.user_stack:
|
| 237 |
+
output += f" {frame_summary}\n"
|
| 238 |
+
|
| 239 |
+
if self.ops_per_graph is not None:
|
| 240 |
+
output += "Ops per Graph:\n"
|
| 241 |
+
for idx, ops in enumerate(self.ops_per_graph):
|
| 242 |
+
output += f" Ops {idx+1}:\n"
|
| 243 |
+
for op in ops:
|
| 244 |
+
output += f" {op}\n"
|
| 245 |
+
|
| 246 |
+
if self.out_guards is not None:
|
| 247 |
+
output += "Out Guards:\n"
|
| 248 |
+
for i, guard in enumerate(self.out_guards):
|
| 249 |
+
output += f" Guard {i+1}:\n"
|
| 250 |
+
output += f" {str(guard)}"
|
| 251 |
+
|
| 252 |
+
if self.compile_times is not None:
|
| 253 |
+
output += f"Compile Times: {self.compile_times}\n"
|
| 254 |
+
return output
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def _explain_graph_detail(
|
| 258 |
+
gm: torch.fx.GraphModule, graphs, op_count, ops_per_graph, break_reasons
|
| 259 |
+
):
|
| 260 |
+
"""
|
| 261 |
+
This function is a utility which processes a torch.fx.GraphModule and
|
| 262 |
+
accumulates information about its ops, graph breaks, and other details. It
|
| 263 |
+
is intended to be used by the ExplainWithBackend class and
|
| 264 |
+
`torch._dynamo.explain()` to provide details from Dynamo's graph capture.
|
| 265 |
+
|
| 266 |
+
Parameters:
|
| 267 |
+
gm (torch.fx.GraphModule): The GraphModule to be processed.
|
| 268 |
+
graphs (list): A list that accumulates all the GraphModules processed.
|
| 269 |
+
op_count (int): The total count of operations in all GraphModules processed so far.
|
| 270 |
+
ops_per_graph (list): A list that accumulates the operations of each GraphModule.
|
| 271 |
+
break_reasons (list): A list that accumulates the reasons for breaks in each GraphModule.
|
| 272 |
+
|
| 273 |
+
Returns:
|
| 274 |
+
tuple: A tuple containing the processed GraphModule, the updated lists of graphs,
|
| 275 |
+
operations per graph, and break reasons, and the updated operation count.
|
| 276 |
+
"""
|
| 277 |
+
graphs.append(gm)
|
| 278 |
+
ops = [node.target for node in gm.graph.nodes if node.op == "call_function"]
|
| 279 |
+
op_count += len(ops)
|
| 280 |
+
ops_per_graph.append(ops)
|
| 281 |
+
if gm.compile_subgraph_reason.graph_break:
|
| 282 |
+
break_reasons.append(gm.compile_subgraph_reason)
|
| 283 |
+
|
| 284 |
+
return gm, graphs, op_count, ops_per_graph, break_reasons
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
class ExplainWithBackend:
|
| 288 |
+
"""
|
| 289 |
+
This class is intended to be used as a backend for `torch.compile`. It is
|
| 290 |
+
composable with other backends. When used in this way, it accumulates
|
| 291 |
+
information about graph breaks, ops, and other info and provides a string
|
| 292 |
+
representation summarizing this information.
|
| 293 |
+
|
| 294 |
+
Attributes:
|
| 295 |
+
backend (str): The name of the backend to use for optimization.
|
| 296 |
+
graphs (list): A list of the graphs captured by TorchDynamo.
|
| 297 |
+
op_count (int): The total number of operations in all optimized graphs.
|
| 298 |
+
break_reasons (list): A list of graph break reasons with stack traces.
|
| 299 |
+
|
| 300 |
+
Example Usage:
|
| 301 |
+
def fn(x):
|
| 302 |
+
x = torch.sigmoid(x)
|
| 303 |
+
return x
|
| 304 |
+
|
| 305 |
+
torch._dynamo.reset()
|
| 306 |
+
eb = ExplainWithBackend("inductor")
|
| 307 |
+
optimized_fn = torch.compile(fn, backend=eb)
|
| 308 |
+
result = optimized_fn(torch.randn(5))
|
| 309 |
+
print(eb.output())
|
| 310 |
+
"""
|
| 311 |
+
|
| 312 |
+
def __init__(self, backend) -> None:
|
| 313 |
+
from .registry import lookup_backend
|
| 314 |
+
|
| 315 |
+
self.backend = lookup_backend(backend)
|
| 316 |
+
self.graphs = []
|
| 317 |
+
self.op_count = 0
|
| 318 |
+
self.break_reasons = []
|
| 319 |
+
|
| 320 |
+
def __call__(self, gm: torch.fx.GraphModule, example_inputs):
|
| 321 |
+
gm, self.graphs, self.op_count, _, self.break_reasons = _explain_graph_detail(
|
| 322 |
+
gm, self.graphs, self.op_count, [], self.break_reasons
|
| 323 |
+
)
|
| 324 |
+
return self.backend(gm, example_inputs)
|
| 325 |
+
|
| 326 |
+
def output(self) -> ExplainOutput:
|
| 327 |
+
graph_count = len(self.graphs)
|
| 328 |
+
output = ExplainOutput(
|
| 329 |
+
self.graphs,
|
| 330 |
+
graph_count,
|
| 331 |
+
graph_count - 1,
|
| 332 |
+
self.break_reasons,
|
| 333 |
+
self.op_count,
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
return output
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/distributed.py
ADDED
|
@@ -0,0 +1,552 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
import traceback
|
| 5 |
+
from dataclasses import dataclass, field
|
| 6 |
+
from typing import Any, List, Optional
|
| 7 |
+
from unittest import mock
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from torch import fx
|
| 11 |
+
from torch._dynamo.output_graph import GraphCompileReason
|
| 12 |
+
from torch._dynamo.utils import deepcopy_to_fake_tensor, detect_fake_mode
|
| 13 |
+
from torch._logging import trace_structured
|
| 14 |
+
from torch.fx.node import Node
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# Regular log messages should go through 'log'.
|
| 18 |
+
# ddp_graph_log is a separate artifact logger reserved for dumping graphs.
|
| 19 |
+
# See docs/source/logging.rst for more info.
|
| 20 |
+
log = logging.getLogger(__name__)
|
| 21 |
+
ddp_graph_log = torch._logging.getArtifactLogger(__name__, "ddp_graphs")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def args_str(args):
|
| 25 |
+
# a debug helper
|
| 26 |
+
if torch.is_tensor(args):
|
| 27 |
+
return f"T[{args.shape}]"
|
| 28 |
+
elif isinstance(args, tuple):
|
| 29 |
+
return f"tuple({', '.join([args_str(x) for x in args])})"
|
| 30 |
+
elif isinstance(args, list):
|
| 31 |
+
return f"list({', '.join([args_str(x) for x in args])})"
|
| 32 |
+
else:
|
| 33 |
+
return str(args)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@dataclass
|
| 37 |
+
class Bucket:
|
| 38 |
+
size: int = 0
|
| 39 |
+
params: List[str] = field(default_factory=list)
|
| 40 |
+
nodes: List[fx.Node] = field(default_factory=list)
|
| 41 |
+
|
| 42 |
+
# param_ids is just used for unit testing
|
| 43 |
+
param_ids: List = field(default_factory=list)
|
| 44 |
+
|
| 45 |
+
# keep track of any buckets that were extended for logging purposes
|
| 46 |
+
opcount_increased_to_capture_external_output: int = 0
|
| 47 |
+
paramsize_before_opcount_increase: int = 0
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def bucket_has_external_output(bucket: Bucket) -> bool:
|
| 51 |
+
nodes_in_bucket = set()
|
| 52 |
+
# we want to iterate in reverse order, but clumsi-luckily the bucket.nodes list was already created backwards
|
| 53 |
+
# so we don't reverse it here
|
| 54 |
+
for node in bucket.nodes:
|
| 55 |
+
# assume node.op != output, since those are filtered in the original iteration
|
| 56 |
+
nodes_in_bucket.add(node)
|
| 57 |
+
for user in node.users:
|
| 58 |
+
if user not in nodes_in_bucket:
|
| 59 |
+
return True
|
| 60 |
+
return False
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def pretty_print_buckets(buckets: List[Bucket], bucket_bytes_cap: int):
|
| 64 |
+
headers = ("Index", "Size (b)", "Param Names")
|
| 65 |
+
rows = []
|
| 66 |
+
extended_buckets = []
|
| 67 |
+
for idx, bucket in enumerate(reversed(buckets)):
|
| 68 |
+
if len(bucket.params) > 0:
|
| 69 |
+
rows.append((idx, bucket.size, bucket.params[0]))
|
| 70 |
+
for param in bucket.params[1:]:
|
| 71 |
+
rows.append((None, None, param))
|
| 72 |
+
if bucket.opcount_increased_to_capture_external_output > 0:
|
| 73 |
+
extended_buckets.append(
|
| 74 |
+
(
|
| 75 |
+
idx,
|
| 76 |
+
bucket.opcount_increased_to_capture_external_output,
|
| 77 |
+
bucket.size - bucket.paramsize_before_opcount_increase,
|
| 78 |
+
)
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
if len(rows):
|
| 82 |
+
log.info(
|
| 83 |
+
"\nDDPOptimizer used bucket cap %s and created %d buckets. Enable debug logs for detailed bucket info.",
|
| 84 |
+
bucket_bytes_cap,
|
| 85 |
+
len(buckets),
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
if len(extended_buckets):
|
| 89 |
+
log.warning(
|
| 90 |
+
"Some buckets were extended beyond their requested parameter capacities"
|
| 91 |
+
" in order to ensure each subgraph has an output node, required for fx graph partitioning."
|
| 92 |
+
" This can be the case when a subgraph would have only contained nodes performing inplace mutation,"
|
| 93 |
+
" and returning no logical outputs. This should not be a problem, unless it results in too few graph"
|
| 94 |
+
" partitions for optimal DDP performance."
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
try:
|
| 98 |
+
from tabulate import tabulate
|
| 99 |
+
|
| 100 |
+
log.debug(
|
| 101 |
+
"\nDDPOptimizer produced the following bucket assignments:\n%s",
|
| 102 |
+
tabulate(rows, headers=headers, tablefmt="simple_grid"),
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
if len(extended_buckets):
|
| 106 |
+
log.warning(
|
| 107 |
+
"DDPOptimizer extended these buckets to ensure per-subgraph output nodes:\n%s",
|
| 108 |
+
tabulate(
|
| 109 |
+
extended_buckets,
|
| 110 |
+
headers=("Index", "Extra Ops", "Extra Param Size (b)"),
|
| 111 |
+
tablefmt="simple_grid",
|
| 112 |
+
),
|
| 113 |
+
)
|
| 114 |
+
except ImportError:
|
| 115 |
+
log.debug(
|
| 116 |
+
"Please `pip install tabulate` in order to display ddp bucket sizes and diagnostic information."
|
| 117 |
+
)
|
| 118 |
+
else:
|
| 119 |
+
log.debug("DDPOptimizer captured no parameters and did not split this graph.")
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def has_higher_order_op(gm):
|
| 123 |
+
# Check if there is a higher order op in the graph
|
| 124 |
+
for node in gm.graph.nodes:
|
| 125 |
+
if node.op == "get_attr":
|
| 126 |
+
maybe_param = getattr(gm, node.target)
|
| 127 |
+
if isinstance(maybe_param, torch.fx.GraphModule):
|
| 128 |
+
return True
|
| 129 |
+
return False
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
# compile each of the partitioned submodules using the user-provided compiler
|
| 133 |
+
class SubmodCompiler(torch.fx.interpreter.Interpreter):
|
| 134 |
+
def __init__(self, module, compiler, fake_mode) -> None:
|
| 135 |
+
super().__init__(module)
|
| 136 |
+
self.compiler = compiler
|
| 137 |
+
self.fake_mode = fake_mode
|
| 138 |
+
|
| 139 |
+
def compile_submod(self, input_mod, args, kwargs):
|
| 140 |
+
"""
|
| 141 |
+
Compile the submodule,
|
| 142 |
+
using a wrapper to make sure its output is always a tuple,
|
| 143 |
+
which is required by AotAutograd based compilers
|
| 144 |
+
"""
|
| 145 |
+
assert len(kwargs) == 0, "We assume only args for these modules"
|
| 146 |
+
|
| 147 |
+
class WrapperModule(torch.nn.Module):
|
| 148 |
+
def __init__(self, submod, unwrap_singleton_tuple) -> None:
|
| 149 |
+
super().__init__()
|
| 150 |
+
self.submod = submod
|
| 151 |
+
self.unwrap_singleton_tuple = unwrap_singleton_tuple
|
| 152 |
+
|
| 153 |
+
def forward(self, *args):
|
| 154 |
+
x = self.submod(*args)
|
| 155 |
+
# TODO(whc)
|
| 156 |
+
# for some reason the isinstance check is necessary if I split one node per submod
|
| 157 |
+
# - even though I supposedly wrapped the output in a tuple in those cases, the real
|
| 158 |
+
# compiled module was still returning a tensor
|
| 159 |
+
if self.unwrap_singleton_tuple and isinstance(x, (tuple, list)):
|
| 160 |
+
return x[0]
|
| 161 |
+
return x
|
| 162 |
+
|
| 163 |
+
unwrap_singleton_tuple = False
|
| 164 |
+
for sn in input_mod.graph.nodes:
|
| 165 |
+
if sn.op == "output":
|
| 166 |
+
if not isinstance(sn.args[0], tuple):
|
| 167 |
+
unwrap_singleton_tuple = True
|
| 168 |
+
sn.args = (sn.args,)
|
| 169 |
+
|
| 170 |
+
input_mod.recompile()
|
| 171 |
+
input_mod.compile_subgraph_reason = GraphCompileReason(
|
| 172 |
+
"DDPOptimizer intentional graph-break (See Note [DDPOptimizer])."
|
| 173 |
+
" Set `torch._dynamo.config.optimize_ddp = False` to disable.",
|
| 174 |
+
[
|
| 175 |
+
# it's close to useless to get a real stacktrace here, and quite verbose.
|
| 176 |
+
traceback.FrameSummary(__file__, 0, DDPOptimizer),
|
| 177 |
+
],
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
wrapper = WrapperModule(
|
| 181 |
+
self.compiler(input_mod, args),
|
| 182 |
+
unwrap_singleton_tuple,
|
| 183 |
+
)
|
| 184 |
+
return wrapper
|
| 185 |
+
|
| 186 |
+
# Note:
|
| 187 |
+
#
|
| 188 |
+
# The way distributed works today around fake tensors can be somewhat confusing.
|
| 189 |
+
# Some of these codepaths are shared in both runtime, and compile time. The presence
|
| 190 |
+
# of a fake_mode, read off of fake tensor inputs, dictates how we will operate.
|
| 191 |
+
#
|
| 192 |
+
# A few things to keep in mind:
|
| 193 |
+
#
|
| 194 |
+
# 1) We invoke `compile_submod` with a real module. The output of that gets stored
|
| 195 |
+
# on the graph via `self.module.add_submodule(n.target, compiled_submod_real)`.
|
| 196 |
+
#
|
| 197 |
+
# 2) When running a call_module targeted node, if we have a fake_mode, we fakify the
|
| 198 |
+
# module we got from self.fetch_attr(n.target). Regardless of fake_mode, we then execute it.
|
| 199 |
+
#
|
| 200 |
+
# 3) Fake tensors should always be around during compile time.
|
| 201 |
+
#
|
| 202 |
+
# 4) Fake tensors should never be around at runtime.
|
| 203 |
+
#
|
| 204 |
+
# 5) We end up with a compilation mode that takes a real submodule and fake tensors,
|
| 205 |
+
# to match what aot_autograd expects. See Note: [Fake Modules and AOTAutograd]
|
| 206 |
+
def run_node(self, n: Node) -> Any:
|
| 207 |
+
args, kwargs = self.fetch_args_kwargs_from_env(n)
|
| 208 |
+
new_args = []
|
| 209 |
+
assert self.fake_mode
|
| 210 |
+
for arg in args:
|
| 211 |
+
if isinstance(arg, torch.Tensor) and not isinstance(
|
| 212 |
+
arg, torch._subclasses.FakeTensor
|
| 213 |
+
):
|
| 214 |
+
new_args.append(torch._dynamo.utils.to_fake_tensor(arg, self.fake_mode))
|
| 215 |
+
else:
|
| 216 |
+
new_args.append(arg)
|
| 217 |
+
|
| 218 |
+
log.debug("run_node %s, %s got args %s", n.op, n.target, args_str(args))
|
| 219 |
+
assert isinstance(args, tuple)
|
| 220 |
+
assert isinstance(kwargs, dict)
|
| 221 |
+
|
| 222 |
+
if n.op == "call_module":
|
| 223 |
+
real_mod = self.fetch_attr(n.target)
|
| 224 |
+
if self.fake_mode:
|
| 225 |
+
curr_submod = deepcopy_to_fake_tensor(real_mod, self.fake_mode)
|
| 226 |
+
else:
|
| 227 |
+
curr_submod = real_mod
|
| 228 |
+
|
| 229 |
+
ddp_graph_log.debug("\n---%s graph---\n%s", n.target, curr_submod.graph)
|
| 230 |
+
|
| 231 |
+
# When calling the compiler on the submod, inputs (new_args) are expected to
|
| 232 |
+
# be FakeTensors already since Dynamo would have made them FakeTensors in the
|
| 233 |
+
# non-DDP flow. However, the parameters are _not_ expected to be FakeTensors,
|
| 234 |
+
# since this wrapping happens during compilation
|
| 235 |
+
|
| 236 |
+
# Note: Returning Fake Tensors on First AOT Autograd Call
|
| 237 |
+
#
|
| 238 |
+
# Inductor will optimize strides of outputs when it deems it profitable.
|
| 239 |
+
# For instance, converting to channels last. When we split the graph here
|
| 240 |
+
# into multiple inductor compilations, we need to make sure that the
|
| 241 |
+
# output strides of one compilation is appropriately passed to the subsequent
|
| 242 |
+
# compilations. However, the mapping from inductor output to dynamo output
|
| 243 |
+
# is non-trivial due to aot_autograd's deduping, de-aliasing, mutation, re-writing,
|
| 244 |
+
# subclass handling, etc. In order to replay all this logic we set a flag such that
|
| 245 |
+
# the first invocation of inductor in aot_autograd will return Fake Tensors with
|
| 246 |
+
# appropriate strides. Then, all of aot autograd's runtime logic is replayed.
|
| 247 |
+
# This gives us the appropriately strided outputs here which will reflect runtime strides.
|
| 248 |
+
|
| 249 |
+
class FakeifyFirstAOTInvocationGuard:
|
| 250 |
+
def __init__(self) -> None:
|
| 251 |
+
self.tc = torch._guards.TracingContext.try_get()
|
| 252 |
+
assert self.tc
|
| 253 |
+
torch._guards.TracingContext.try_get().fakify_first_call = True
|
| 254 |
+
|
| 255 |
+
def __del__(self) -> None:
|
| 256 |
+
self.tc.fakify_first_call = False
|
| 257 |
+
|
| 258 |
+
# For aot_eager and other backends, tracing context is not set
|
| 259 |
+
has_tracing_context = torch._guards.TracingContext.try_get() is not None
|
| 260 |
+
if has_tracing_context:
|
| 261 |
+
g = FakeifyFirstAOTInvocationGuard()
|
| 262 |
+
|
| 263 |
+
from torch._dynamo.utils import counters
|
| 264 |
+
|
| 265 |
+
init = counters["aot_autograd"]["total"]
|
| 266 |
+
compiled_submod_real = self.compile_submod(real_mod, new_args, kwargs)
|
| 267 |
+
|
| 268 |
+
# TODO - better way of doing this?
|
| 269 |
+
# Only aot autograd handles fakifying first call
|
| 270 |
+
invoked_aot_autograd = init != counters["aot_autograd"]["total"]
|
| 271 |
+
|
| 272 |
+
# We update the original (outer) graph with a call into the compiled module
|
| 273 |
+
# instead of the uncompiled one.
|
| 274 |
+
self.module.delete_submodule(n.target)
|
| 275 |
+
n.target = "compiled_" + n.target
|
| 276 |
+
self.module.add_submodule(n.target, compiled_submod_real)
|
| 277 |
+
|
| 278 |
+
# Finally, we have to produce inputs for use compiling the next submodule,
|
| 279 |
+
# and these need to be FakeTensors, so we execute the module under fake_mode
|
| 280 |
+
# Because parameters are not fake we patch fake tensor mode to allow non fake inputs
|
| 281 |
+
with self.fake_mode, mock.patch.object(
|
| 282 |
+
self.fake_mode, "allow_non_fake_inputs", True
|
| 283 |
+
):
|
| 284 |
+
if has_tracing_context and invoked_aot_autograd:
|
| 285 |
+
out = compiled_submod_real(*new_args, **kwargs)
|
| 286 |
+
# output should be fake or subclass
|
| 287 |
+
assert all(
|
| 288 |
+
(not isinstance(t, torch.Tensor) or type(t) is not torch.Tensor)
|
| 289 |
+
for t in (out if isinstance(out, (list, tuple)) else [out])
|
| 290 |
+
)
|
| 291 |
+
return out
|
| 292 |
+
else:
|
| 293 |
+
return curr_submod(*new_args, **kwargs)
|
| 294 |
+
else:
|
| 295 |
+
# placeholder or output nodes don't need to get compiled, just executed
|
| 296 |
+
return getattr(self, n.op)(n.target, new_args, kwargs)
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
class DDPOptimizer:
|
| 300 |
+
"""Note [DDPOptimizer]
|
| 301 |
+
DDPOptimizer applies when dynamo compiles models wrapped in DistributedDataParallel (DDP),
|
| 302 |
+
breaking the dynamo graph into chunks to compile separately, with the breaks aligning to
|
| 303 |
+
the boundaries of gradient-allreduce buckets chosen by DDP.
|
| 304 |
+
|
| 305 |
+
Background/Motivation
|
| 306 |
+
- DDP uses allreduce collectives to synchronize partial gradients computed on different workers
|
| 307 |
+
- DDP groups gradient allreduces into 'buckets' to optimize communication efficiency of all-reduce
|
| 308 |
+
- Parameters grouped into buckets are assumed to be adjacent in time, so they become ready
|
| 309 |
+
at around the same time during backward and thus can share the same allreduce efficiently
|
| 310 |
+
- Allreduces must overlap with backward compute for optimal training performance
|
| 311 |
+
- DDP schedules allreduces using 'hooks' fired from the c++ autograd engine in pytorch, which
|
| 312 |
+
operates when individual grads become 'ready'
|
| 313 |
+
- Dynamo+AOTAutograd produces a single fused graph that runs 'atomically' from the perspective of the
|
| 314 |
+
autograd engine, such that all gradients become 'ready' at the same time. Hooks fire after the whole
|
| 315 |
+
fused backward function executes, preventing any overlap of compute and communication
|
| 316 |
+
|
| 317 |
+
Algorithm
|
| 318 |
+
- DDPOptimizer starts off with an FX graph traced by dynamo which represents forward. It can traverse
|
| 319 |
+
this graph in reverse order to determine the true order that gradients will become ready during backward.
|
| 320 |
+
- Parameter sizes are counted in reverse order, up to a bucket size limit, at which point a new bucket is started
|
| 321 |
+
and a graph break introduced
|
| 322 |
+
- Each of the subgraphs is compiled by the compiler provided to dynamo by the user, and then fused back together
|
| 323 |
+
into an outer module that is returned to the user
|
| 324 |
+
|
| 325 |
+
Notes
|
| 326 |
+
- It would be better to enforce (by adding an API to DDP) that the bucket splits chosen here are used by DDP,
|
| 327 |
+
and that DDP does not need to detect or optimize bucket order by observing execution at runtime, as it does
|
| 328 |
+
in eager.
|
| 329 |
+
- If Dynamo can't capture a whole graph for the portion of the model wrapped by DDP, this algorithm will currently
|
| 330 |
+
produce splits that do not necessarily align with the buckets used by DDP. This should result in performance
|
| 331 |
+
degradation approaching the baseline case where graph-splits are not used, but not worse.
|
| 332 |
+
- If the backend compiler fails to compile a single subgraph, it will execute eagerly despite the rest of the
|
| 333 |
+
subgraphs being compiled
|
| 334 |
+
- DDP has a 'parameters_and_buffers_to_ignore' field, which DDPOptimizer attempts to honor by reading markers
|
| 335 |
+
left by DDP on individual parameters. In cases where other transformations, such as reparameterization, are
|
| 336 |
+
also used, the ignore markers could be lost. If DDPOptimizer fails to ignore a parameter ignored by DDP,
|
| 337 |
+
it is not catastrophic but could impact performance by choosing sub-optimal bucket splits.
|
| 338 |
+
- DDPOptimizer always ignores all buffers, regardless of their ignore flag, since buffers do not require gradients,
|
| 339 |
+
and therefore aren't allreduced by DDP. (They are broadcast during forward, but this is not covered by
|
| 340 |
+
DDPOptimizer)
|
| 341 |
+
|
| 342 |
+
Debugging
|
| 343 |
+
- Generally, it is easiest to debug DDPOptimizer in a single process program, using pdb.
|
| 344 |
+
- In many cases, the log messages are helpful (they show bucket size assignments)-
|
| 345 |
+
just set TORCH_LOGS env to include any of 'dynamo', 'distributed', or 'dist_ddp'.
|
| 346 |
+
- See `benchmarks/dynamo/distributed.py` for a simple harness that will run a toy model or a torchbench model
|
| 347 |
+
in a single process (or with torchrun, in multiple processes)
|
| 348 |
+
|
| 349 |
+
Args:
|
| 350 |
+
bucket_bytes_cap (int): Controls the size of buckets, in bytes, used to determine graphbreaks. Should be
|
| 351 |
+
set to match the equivalent parameter on the original DDP module.
|
| 352 |
+
|
| 353 |
+
backend_compile_fn (callable): A dynamo compiler function, to be invoked to compile each subgraph.
|
| 354 |
+
|
| 355 |
+
first_bucket_cap (int): Controls the size of the first bucket. Should match DDP's first bucket cap. DDP
|
| 356 |
+
special-cases the first bucket size since it is sometimes optimal to start a small allreduce early.
|
| 357 |
+
|
| 358 |
+
"""
|
| 359 |
+
|
| 360 |
+
def __init__(
|
| 361 |
+
self,
|
| 362 |
+
bucket_bytes_cap: int,
|
| 363 |
+
backend_compile_fn,
|
| 364 |
+
first_bucket_cap: Optional[int] = None,
|
| 365 |
+
) -> None:
|
| 366 |
+
if first_bucket_cap is not None:
|
| 367 |
+
self.first_bucket_cap = first_bucket_cap
|
| 368 |
+
elif torch.distributed.is_available():
|
| 369 |
+
# this constant comes from C10D lib which is not always built
|
| 370 |
+
self.first_bucket_cap = torch.distributed._DEFAULT_FIRST_BUCKET_BYTES
|
| 371 |
+
else:
|
| 372 |
+
self.first_bucket_cap = bucket_bytes_cap
|
| 373 |
+
|
| 374 |
+
self.bucket_bytes_cap = bucket_bytes_cap
|
| 375 |
+
assert (
|
| 376 |
+
self.first_bucket_cap <= self.bucket_bytes_cap
|
| 377 |
+
), "First bucket should be smaller/equal to other buckets to get comms warmed up ASAP"
|
| 378 |
+
|
| 379 |
+
self.backend_compile_fn = backend_compile_fn
|
| 380 |
+
|
| 381 |
+
def _ignore_parameter(self, parameter):
|
| 382 |
+
return hasattr(parameter, "_ddp_ignored") and parameter._ddp_ignored
|
| 383 |
+
|
| 384 |
+
def add_param(self, bucket, param, name):
|
| 385 |
+
bucket.size += param.untyped_storage().nbytes()
|
| 386 |
+
bucket.params.append(name)
|
| 387 |
+
bucket.param_ids.append(id(param))
|
| 388 |
+
|
| 389 |
+
def add_module_params_to_bucket(self, mod, bucket, processed_modules, prefix):
|
| 390 |
+
processed_modules.add(mod)
|
| 391 |
+
for name, param in mod.named_parameters():
|
| 392 |
+
if param.requires_grad and not self._ignore_parameter(param):
|
| 393 |
+
self.add_param(bucket, param, f"{prefix}_{name}")
|
| 394 |
+
|
| 395 |
+
def add_param_args(self, bucket, node):
|
| 396 |
+
for arg in node.args:
|
| 397 |
+
if not isinstance(arg, torch.fx.node.Node):
|
| 398 |
+
continue
|
| 399 |
+
if arg.op != "placeholder":
|
| 400 |
+
continue
|
| 401 |
+
param = arg.meta["example_value"]
|
| 402 |
+
if (
|
| 403 |
+
isinstance(param, torch.nn.Parameter)
|
| 404 |
+
and param.requires_grad
|
| 405 |
+
and not self._ignore_parameter(param)
|
| 406 |
+
):
|
| 407 |
+
self.add_param(bucket, param, arg.target)
|
| 408 |
+
|
| 409 |
+
def compile_fn(self, gm: fx.GraphModule, example_inputs: List[torch.Tensor]):
|
| 410 |
+
"""
|
| 411 |
+
Implements graph splitting, first determining a set of of buckets by counting
|
| 412 |
+
parameter sizes in reverse graph order, then invoking the user/backend compiler
|
| 413 |
+
to compile each subgraph. Finally, stiches compiled graphs into one graphmodule
|
| 414 |
+
and returns its callable.
|
| 415 |
+
"""
|
| 416 |
+
if has_higher_order_op(gm):
|
| 417 |
+
# This indicates presence of a higher order op. For now, we
|
| 418 |
+
# have no way to break the higher order op into two buckets.
|
| 419 |
+
# Allowing higher order ops in the graph also requires
|
| 420 |
+
# changes in the split_module, becuase graph splitter
|
| 421 |
+
# currently assumes that all the args of all ops are
|
| 422 |
+
# tensors, but in the case of higher order ops, it could be
|
| 423 |
+
# a graph module. As a workaround, we are shortcircuiting
|
| 424 |
+
raise NotImplementedError(
|
| 425 |
+
"DDPOptimizer backend: Found a higher order op in the graph. "
|
| 426 |
+
"This is not supported. Please turn off DDP optimizer using "
|
| 427 |
+
"torch._dynamo.config.optimize_ddp=False. Note that this can "
|
| 428 |
+
"cause performance degradation because there will be one bucket "
|
| 429 |
+
"for the entire Dynamo graph. Please refer to this issue - "
|
| 430 |
+
"https://github.com/pytorch/pytorch/issues/104674."
|
| 431 |
+
)
|
| 432 |
+
|
| 433 |
+
# 1: compute the partition map according to DDP bucket logic
|
| 434 |
+
buckets = [Bucket()] # (size, param_names)
|
| 435 |
+
processed_modules = set()
|
| 436 |
+
for node in reversed(gm.graph.nodes):
|
| 437 |
+
if node.op in ("output", "placeholder"):
|
| 438 |
+
continue
|
| 439 |
+
|
| 440 |
+
if (
|
| 441 |
+
buckets[0].size >= self.bucket_bytes_cap
|
| 442 |
+
or len(buckets) == 1
|
| 443 |
+
and buckets[0].size >= self.first_bucket_cap
|
| 444 |
+
):
|
| 445 |
+
if bucket_has_external_output(buckets[0]):
|
| 446 |
+
buckets.insert(0, Bucket())
|
| 447 |
+
else:
|
| 448 |
+
# continue building this bucket past the point of filling its parameter capacity,
|
| 449 |
+
# to increase chances it contains at least one node that is either a global output or
|
| 450 |
+
# passed as input to a subsequent graph
|
| 451 |
+
|
| 452 |
+
if buckets[0].opcount_increased_to_capture_external_output == 0:
|
| 453 |
+
buckets[0].paramsize_before_opcount_increase = buckets[0].size
|
| 454 |
+
buckets[0].opcount_increased_to_capture_external_output += 1
|
| 455 |
+
|
| 456 |
+
if node.op == "call_function":
|
| 457 |
+
self.add_param_args(buckets[0], node)
|
| 458 |
+
|
| 459 |
+
elif node.op == "call_module":
|
| 460 |
+
target_mod = gm.get_submodule(node.target)
|
| 461 |
+
if target_mod not in processed_modules:
|
| 462 |
+
self.add_module_params_to_bucket(
|
| 463 |
+
target_mod, buckets[0], processed_modules, node.target
|
| 464 |
+
)
|
| 465 |
+
elif node.op == "call_method":
|
| 466 |
+
if isinstance(node.args[0].target, str):
|
| 467 |
+
target_mod = None
|
| 468 |
+
try:
|
| 469 |
+
target_mod = gm.get_submodule(node.args[0].target)
|
| 470 |
+
except AttributeError:
|
| 471 |
+
pass
|
| 472 |
+
if target_mod is not None and target_mod not in processed_modules:
|
| 473 |
+
self.add_module_params_to_bucket(
|
| 474 |
+
target_mod, buckets[0], processed_modules, node.target
|
| 475 |
+
)
|
| 476 |
+
# This handles situations like tmp = torch.mm(x, self.weight.t())
|
| 477 |
+
# t: "f32[512, 512]" = l_self_seq_2_weight.t(); l_self_seq_2_weight = None
|
| 478 |
+
# tmp: "f32[512, 512]" = torch.mm(input_2, t); input_2 = t = None
|
| 479 |
+
self.add_param_args(buckets[0], node)
|
| 480 |
+
|
| 481 |
+
elif node.op == "get_attr":
|
| 482 |
+
maybe_param = getattr(gm, node.target)
|
| 483 |
+
if (
|
| 484 |
+
isinstance(maybe_param, torch.nn.Parameter)
|
| 485 |
+
and maybe_param.requires_grad
|
| 486 |
+
and not self._ignore_parameter(maybe_param)
|
| 487 |
+
):
|
| 488 |
+
self.add_param(buckets[0], maybe_param, node.target)
|
| 489 |
+
|
| 490 |
+
# All nodes have to be mapped to a bucket, even if they don't have their own params
|
| 491 |
+
# Ignored params still end up in buckets, we just don't count them towards the capacity
|
| 492 |
+
buckets[0].nodes.append(node)
|
| 493 |
+
|
| 494 |
+
if len(buckets) > 1 and buckets[0].size == 0:
|
| 495 |
+
# we collected a small preamble graph with ops that don't include parameters, fuse it back
|
| 496 |
+
buckets[1].nodes.extend(buckets[0].nodes)
|
| 497 |
+
assert len(buckets[0].params) == 0, "Params should be empty if size is 0"
|
| 498 |
+
del buckets[0]
|
| 499 |
+
|
| 500 |
+
# stash buckets for testing/debugging purposes
|
| 501 |
+
self.buckets = buckets
|
| 502 |
+
pretty_print_buckets(buckets, self.bucket_bytes_cap)
|
| 503 |
+
|
| 504 |
+
if len(buckets) == 1:
|
| 505 |
+
# bypass split/fuse logic if there is only one bucket
|
| 506 |
+
return self.backend_compile_fn(gm, example_inputs)
|
| 507 |
+
|
| 508 |
+
# 2: partition the graphmodule according to bucket capacity
|
| 509 |
+
partition_map = {}
|
| 510 |
+
for idx, b in enumerate(buckets):
|
| 511 |
+
for node in b.nodes:
|
| 512 |
+
partition_map[node] = idx
|
| 513 |
+
|
| 514 |
+
split_gm = fx.passes.split_module.split_module(
|
| 515 |
+
gm, None, lambda node: partition_map[node]
|
| 516 |
+
)
|
| 517 |
+
|
| 518 |
+
debug_str = (
|
| 519 |
+
f"\n---orig graph---\n{gm.graph}\n"
|
| 520 |
+
+ f"\n---split graph---\n{split_gm.graph}\n"
|
| 521 |
+
)
|
| 522 |
+
for name, module in split_gm.named_modules():
|
| 523 |
+
if "." not in name and len(name):
|
| 524 |
+
# only print the submod graphs, not their children
|
| 525 |
+
debug_str += f"\n---{name} graph---\n{module.graph}\n"
|
| 526 |
+
debug_str += "\n---------------\n"
|
| 527 |
+
ddp_graph_log.debug(debug_str)
|
| 528 |
+
|
| 529 |
+
trace_structured(
|
| 530 |
+
"optimize_ddp_split_graph",
|
| 531 |
+
payload_fn=lambda: split_gm.print_readable(print_output=False),
|
| 532 |
+
)
|
| 533 |
+
for name, module in split_gm.named_modules():
|
| 534 |
+
if "." not in name and len(name):
|
| 535 |
+
trace_structured(
|
| 536 |
+
"optimize_ddp_split_child",
|
| 537 |
+
lambda: {"name": name},
|
| 538 |
+
payload_fn=lambda: module.print_readable(print_output=False),
|
| 539 |
+
)
|
| 540 |
+
|
| 541 |
+
fake_mode = detect_fake_mode(example_inputs)
|
| 542 |
+
if fake_mode is None:
|
| 543 |
+
fake_mode = torch._subclasses.fake_tensor.FakeTensorMode()
|
| 544 |
+
|
| 545 |
+
submod_compiler = SubmodCompiler(split_gm, self.backend_compile_fn, fake_mode)
|
| 546 |
+
submod_compiler.run(*example_inputs)
|
| 547 |
+
split_gm.recompile()
|
| 548 |
+
|
| 549 |
+
ddp_graph_log.debug(
|
| 550 |
+
"\n---final graph---\n%s\n---------------\n", split_gm.graph
|
| 551 |
+
)
|
| 552 |
+
return split_gm
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/inductor.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
from torch._dynamo import register_backend
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@register_backend
|
| 8 |
+
def inductor(*args, **kwargs):
|
| 9 |
+
# do import here to avoid loading inductor into memory when it is not used
|
| 10 |
+
from torch._inductor.compile_fx import compile_fx
|
| 11 |
+
|
| 12 |
+
return compile_fx(*args, **kwargs)
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/onnxrt.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
# This backend is maintained by ONNX team. To direct issues
|
| 4 |
+
# to the right people, please tag related GitHub issues with `module: onnx`.
|
| 5 |
+
#
|
| 6 |
+
# Maintainers' Github IDs: wschin, xadupre
|
| 7 |
+
from torch.onnx._internal.onnxruntime import (
|
| 8 |
+
is_onnxrt_backend_supported,
|
| 9 |
+
torch_compile_backend,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
from .registry import register_backend
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def has_onnxruntime():
|
| 16 |
+
# FIXME: update test/dynamo/test_backends.py to call is_onnxrt_backend_supported()
|
| 17 |
+
return is_onnxrt_backend_supported()
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
if is_onnxrt_backend_supported():
|
| 21 |
+
register_backend(name="onnxrt", compiler_fn=torch_compile_backend)
|
| 22 |
+
else:
|
| 23 |
+
|
| 24 |
+
def information_displaying_backend(*args, **kwargs):
|
| 25 |
+
raise ImportError(
|
| 26 |
+
"onnxrt is not registered as a backend. "
|
| 27 |
+
"Please make sure all dependencies such as "
|
| 28 |
+
"numpy, onnx, onnxscript, and onnxruntime-training are installed. "
|
| 29 |
+
"Suggested procedure to fix dependency problem:\n"
|
| 30 |
+
" (1) pip or conda install numpy onnx onnxscript onnxruntime-training.\n"
|
| 31 |
+
" (2) Open a new python terminal.\n"
|
| 32 |
+
" (3) Call the API `torch.onnx.is_onnxrt_backend_supported()`:\n"
|
| 33 |
+
" (4) If it returns `True`, then you can use `onnxrt` backend.\n"
|
| 34 |
+
" (5) If it returns `False`, please execute the package importing section in "
|
| 35 |
+
"torch/onnx/_internal/onnxruntime.py under pdb line-by-line to see which import fails."
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
register_backend(name="onnxrt", compiler_fn=information_displaying_backend)
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/registry.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
import functools
|
| 4 |
+
import logging
|
| 5 |
+
import sys
|
| 6 |
+
from importlib.metadata import EntryPoint
|
| 7 |
+
from typing import Callable, Dict, List, Optional, Protocol, Sequence, Tuple
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from torch import fx
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
log = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class CompiledFn(Protocol):
|
| 17 |
+
def __call__(self, *args: torch.Tensor) -> Tuple[torch.Tensor, ...]:
|
| 18 |
+
...
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
CompilerFn = Callable[[fx.GraphModule, List[torch.Tensor]], CompiledFn]
|
| 22 |
+
|
| 23 |
+
_BACKENDS: Dict[str, Optional[EntryPoint]] = {}
|
| 24 |
+
_COMPILER_FNS: Dict[str, CompilerFn] = {}
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def register_backend(
|
| 28 |
+
compiler_fn: Optional[CompilerFn] = None,
|
| 29 |
+
name: Optional[str] = None,
|
| 30 |
+
tags: Sequence[str] = (),
|
| 31 |
+
):
|
| 32 |
+
"""
|
| 33 |
+
Decorator to add a given compiler to the registry to allow calling
|
| 34 |
+
`torch.compile` with string shorthand. Note: for projects not
|
| 35 |
+
imported by default, it might be easier to pass a function directly
|
| 36 |
+
as a backend and not use a string.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
compiler_fn: Callable taking a FX graph and fake tensor inputs
|
| 40 |
+
name: Optional name, defaults to `compiler_fn.__name__`
|
| 41 |
+
tags: Optional set of string tags to categorize backend with
|
| 42 |
+
"""
|
| 43 |
+
if compiler_fn is None:
|
| 44 |
+
# @register_backend(name="") syntax
|
| 45 |
+
return functools.partial(register_backend, name=name, tags=tags)
|
| 46 |
+
assert callable(compiler_fn)
|
| 47 |
+
name = name or compiler_fn.__name__
|
| 48 |
+
assert name not in _COMPILER_FNS, f"duplicate name: {name}"
|
| 49 |
+
if compiler_fn not in _BACKENDS:
|
| 50 |
+
_BACKENDS[name] = None
|
| 51 |
+
_COMPILER_FNS[name] = compiler_fn
|
| 52 |
+
compiler_fn._tags = tuple(tags)
|
| 53 |
+
return compiler_fn
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
register_debug_backend = functools.partial(register_backend, tags=("debug",))
|
| 57 |
+
register_experimental_backend = functools.partial(
|
| 58 |
+
register_backend, tags=("experimental",)
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def lookup_backend(compiler_fn):
|
| 63 |
+
"""Expand backend strings to functions"""
|
| 64 |
+
if isinstance(compiler_fn, str):
|
| 65 |
+
if compiler_fn not in _BACKENDS:
|
| 66 |
+
_lazy_import()
|
| 67 |
+
if compiler_fn not in _BACKENDS:
|
| 68 |
+
from ..exc import InvalidBackend
|
| 69 |
+
|
| 70 |
+
raise InvalidBackend(name=compiler_fn)
|
| 71 |
+
|
| 72 |
+
if compiler_fn not in _COMPILER_FNS:
|
| 73 |
+
entry_point = _BACKENDS[compiler_fn]
|
| 74 |
+
register_backend(compiler_fn=entry_point.load(), name=compiler_fn)
|
| 75 |
+
compiler_fn = _COMPILER_FNS[compiler_fn]
|
| 76 |
+
return compiler_fn
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def list_backends(exclude_tags=("debug", "experimental")) -> List[str]:
|
| 80 |
+
"""
|
| 81 |
+
Return valid strings that can be passed to:
|
| 82 |
+
|
| 83 |
+
torch.compile(..., backend="name")
|
| 84 |
+
"""
|
| 85 |
+
_lazy_import()
|
| 86 |
+
exclude_tags = set(exclude_tags or ())
|
| 87 |
+
|
| 88 |
+
backends = [
|
| 89 |
+
name
|
| 90 |
+
for name in _BACKENDS.keys()
|
| 91 |
+
if name not in _COMPILER_FNS
|
| 92 |
+
or not exclude_tags.intersection(_COMPILER_FNS[name]._tags)
|
| 93 |
+
]
|
| 94 |
+
return sorted(backends)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
@functools.lru_cache(None)
|
| 98 |
+
def _lazy_import():
|
| 99 |
+
from .. import backends
|
| 100 |
+
from ..utils import import_submodule
|
| 101 |
+
|
| 102 |
+
import_submodule(backends)
|
| 103 |
+
|
| 104 |
+
from ..repro.after_dynamo import dynamo_minifier_backend
|
| 105 |
+
|
| 106 |
+
assert dynamo_minifier_backend is not None
|
| 107 |
+
|
| 108 |
+
_discover_entrypoint_backends()
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
@functools.lru_cache(None)
|
| 112 |
+
def _discover_entrypoint_backends():
|
| 113 |
+
# importing here so it will pick up the mocked version in test_backends.py
|
| 114 |
+
from importlib.metadata import entry_points
|
| 115 |
+
|
| 116 |
+
group_name = "torch_dynamo_backends"
|
| 117 |
+
if sys.version_info < (3, 10):
|
| 118 |
+
eps = entry_points()
|
| 119 |
+
eps = eps[group_name] if group_name in eps else []
|
| 120 |
+
eps = {ep.name: ep for ep in eps}
|
| 121 |
+
else:
|
| 122 |
+
eps = entry_points(group=group_name)
|
| 123 |
+
eps = {name: eps[name] for name in eps.names}
|
| 124 |
+
for backend_name in eps:
|
| 125 |
+
_BACKENDS[backend_name] = eps[backend_name]
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/tensorrt.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
# import torch # type: ignore[import]
|
| 4 |
+
# from .common import device_from_inputs, fake_tensor_unsupported # type: ignore[import]
|
| 5 |
+
# from .registry import register_backend # type: ignore[import]
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
Placeholder for TensorRT backend for dynamo via torch-tensorrt
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
# @register_backend
|
| 12 |
+
# def tensorrt(gm, example_inputs):
|
| 13 |
+
# import torch_tensorrt # type: ignore[import]
|
| 14 |
+
# pass
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/torchxla.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
|
| 5 |
+
from functorch.compile import make_boxed_func
|
| 6 |
+
|
| 7 |
+
from ..backends.common import aot_autograd
|
| 8 |
+
from .registry import register_backend, register_experimental_backend
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
log = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@register_experimental_backend
|
| 15 |
+
def openxla_eval(model, fake_tensor_inputs):
|
| 16 |
+
return xla_backend_helper(model, fake_tensor_inputs, boxed=False)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def openxla_eval_boxed(model, fake_tensor_inputs):
|
| 20 |
+
return xla_backend_helper(model, fake_tensor_inputs, boxed=True)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def xla_backend_helper(model, fake_tensor_inputs, boxed=False):
|
| 24 |
+
try:
|
| 25 |
+
import torch_xla.core.dynamo_bridge as bridge
|
| 26 |
+
except ImportError as e:
|
| 27 |
+
raise ImportError(
|
| 28 |
+
"Please follow the instruction in https://github.com/pytorch/xla#pytorchxla to install torch_xla"
|
| 29 |
+
) from e
|
| 30 |
+
|
| 31 |
+
compiled_graph = None
|
| 32 |
+
|
| 33 |
+
def fwd(*args):
|
| 34 |
+
nonlocal model
|
| 35 |
+
nonlocal compiled_graph
|
| 36 |
+
if compiled_graph is None:
|
| 37 |
+
compiled_graph = bridge.extract_compiled_graph(model, args)
|
| 38 |
+
del model
|
| 39 |
+
return compiled_graph(*args)
|
| 40 |
+
|
| 41 |
+
return make_boxed_func(fwd) if boxed else fwd
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
openxla = aot_autograd(
|
| 45 |
+
fw_compiler=openxla_eval_boxed,
|
| 46 |
+
)
|
| 47 |
+
register_backend(name="openxla", compiler_fn=openxla)
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/tvm.py
ADDED
|
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
import functools
|
| 4 |
+
import importlib
|
| 5 |
+
import logging
|
| 6 |
+
import os
|
| 7 |
+
import sys
|
| 8 |
+
import tempfile
|
| 9 |
+
from types import MappingProxyType
|
| 10 |
+
from typing import Optional
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
|
| 14 |
+
from .common import device_from_inputs, fake_tensor_unsupported
|
| 15 |
+
from .registry import register_backend
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
log = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@register_backend
|
| 22 |
+
@fake_tensor_unsupported
|
| 23 |
+
def tvm(
|
| 24 |
+
gm,
|
| 25 |
+
example_inputs,
|
| 26 |
+
*,
|
| 27 |
+
options: Optional[MappingProxyType] = MappingProxyType(
|
| 28 |
+
{"scheduler": None, "trials": 20000, "opt_level": 3}
|
| 29 |
+
),
|
| 30 |
+
):
|
| 31 |
+
import tvm # type: ignore[import]
|
| 32 |
+
from tvm import relay # type: ignore[import]
|
| 33 |
+
from tvm.contrib import graph_executor # type: ignore[import]
|
| 34 |
+
|
| 35 |
+
jit_mod = torch.jit.trace(gm, example_inputs)
|
| 36 |
+
device = device_from_inputs(example_inputs)
|
| 37 |
+
shape_list = [(f"inp_{idx}", i.shape) for idx, i in enumerate(example_inputs)]
|
| 38 |
+
example_outputs = gm(*example_inputs)
|
| 39 |
+
if len(example_outputs) == 0:
|
| 40 |
+
log.warning("Explicitly fall back to eager due to zero output")
|
| 41 |
+
return gm.forward
|
| 42 |
+
mod, params = relay.frontend.from_pytorch(jit_mod, shape_list)
|
| 43 |
+
if device.type == "cuda":
|
| 44 |
+
dev = tvm.cuda(device.index)
|
| 45 |
+
target = tvm.target.cuda()
|
| 46 |
+
else:
|
| 47 |
+
dev = tvm.cpu(0)
|
| 48 |
+
target = tvm.target.Target(llvm_target())
|
| 49 |
+
|
| 50 |
+
scheduler = options.get("scheduler", None)
|
| 51 |
+
if scheduler is None:
|
| 52 |
+
scheduler = os.environ.get("TVM_SCHEDULER", None)
|
| 53 |
+
|
| 54 |
+
trials = options.get("trials", 20000)
|
| 55 |
+
opt_level = options.get("opt_level", 3)
|
| 56 |
+
|
| 57 |
+
if scheduler == "auto_scheduler":
|
| 58 |
+
from tvm import auto_scheduler
|
| 59 |
+
|
| 60 |
+
log_file = tempfile.NamedTemporaryFile()
|
| 61 |
+
|
| 62 |
+
if not os.path.exists(log_file):
|
| 63 |
+
tasks, task_weights = auto_scheduler.extract_tasks(
|
| 64 |
+
mod["main"], params, target
|
| 65 |
+
)
|
| 66 |
+
for task in tasks:
|
| 67 |
+
print(task.compute_dag)
|
| 68 |
+
else:
|
| 69 |
+
print("No tasks")
|
| 70 |
+
if len(tasks) != 0:
|
| 71 |
+
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
|
| 72 |
+
if not os.path.exists(log_file):
|
| 73 |
+
assert trials > 0
|
| 74 |
+
tune_option = auto_scheduler.TuningOptions(
|
| 75 |
+
num_measure_trials=trials,
|
| 76 |
+
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
|
| 77 |
+
early_stopping=2000,
|
| 78 |
+
)
|
| 79 |
+
try:
|
| 80 |
+
tuner.tune(tune_option)
|
| 81 |
+
except Exception:
|
| 82 |
+
if os.path.exists(log_file):
|
| 83 |
+
os.unlink(log_file)
|
| 84 |
+
raise
|
| 85 |
+
|
| 86 |
+
with auto_scheduler.ApplyHistoryBest(log_file):
|
| 87 |
+
with tvm.transform.PassContext(
|
| 88 |
+
opt_level=opt_level, config={"relay.backend.use_auto_scheduler": True}
|
| 89 |
+
):
|
| 90 |
+
lib = relay.build(mod, target=target, params=params)
|
| 91 |
+
elif scheduler == "meta_schedule":
|
| 92 |
+
from tvm import meta_schedule as ms
|
| 93 |
+
|
| 94 |
+
with tempfile.TemporaryDirectory() as work_dir:
|
| 95 |
+
if device.type != "cuda":
|
| 96 |
+
# meta_schedule needs num-cores to be specified
|
| 97 |
+
# here we use the maximum core count
|
| 98 |
+
target = tvm.target.Target(
|
| 99 |
+
f"{llvm_target()} --num-cores {ms.utils.cpu_count(logical=False)}"
|
| 100 |
+
)
|
| 101 |
+
# TODO(shingjan): This could be replaced by tvm.contrib.torch.optimize_torch
|
| 102 |
+
# once USE_PT_TVMDSOOP is updated and turned on by default in TVM.
|
| 103 |
+
assert trials > 0
|
| 104 |
+
database = ms.relay_integration.tune_relay(
|
| 105 |
+
mod=mod,
|
| 106 |
+
target=target,
|
| 107 |
+
work_dir=work_dir,
|
| 108 |
+
max_trials_global=trials,
|
| 109 |
+
num_trials_per_iter=64,
|
| 110 |
+
params=params,
|
| 111 |
+
strategy="evolutionary",
|
| 112 |
+
opt_level=opt_level,
|
| 113 |
+
)
|
| 114 |
+
lib = ms.relay_integration.compile_relay(
|
| 115 |
+
database=database,
|
| 116 |
+
mod=mod,
|
| 117 |
+
target=target,
|
| 118 |
+
params=params,
|
| 119 |
+
opt_level=opt_level,
|
| 120 |
+
)
|
| 121 |
+
elif scheduler == "default" or not scheduler:
|
| 122 |
+
# no autotuning
|
| 123 |
+
with tvm.transform.PassContext(opt_level=opt_level):
|
| 124 |
+
lib = relay.build(mod, target=target, params=params)
|
| 125 |
+
else:
|
| 126 |
+
raise NotImplementedError(
|
| 127 |
+
"This tuning option is invalid/not implemented for torchdynamo's TVM-related backend. "
|
| 128 |
+
"There are three available options: default, auto_scheduler and meta_schedule."
|
| 129 |
+
)
|
| 130 |
+
m = graph_executor.GraphModule(lib["default"](dev))
|
| 131 |
+
|
| 132 |
+
def to_torch_tensor(nd_tensor):
|
| 133 |
+
"""A helper function to transfer a NDArray to torch.tensor."""
|
| 134 |
+
if nd_tensor.dtype == "bool":
|
| 135 |
+
# DLPack does not support boolean so it can't be handled by
|
| 136 |
+
# torch.utils.dlpack.from_pack. Workaround by going through
|
| 137 |
+
# numpy, although this brings additional data copy overhead.
|
| 138 |
+
return torch.from_numpy(nd_tensor.numpy())
|
| 139 |
+
return torch.utils.dlpack.from_dlpack(nd_tensor.to_dlpack())
|
| 140 |
+
|
| 141 |
+
def to_tvm_tensor(torch_tensor):
|
| 142 |
+
"""A helper function to transfer a torch.tensor to NDArray."""
|
| 143 |
+
if torch_tensor.dtype == torch.bool:
|
| 144 |
+
# same reason as above, fallback to numpy conversion which
|
| 145 |
+
# could introduce data copy overhead
|
| 146 |
+
return tvm.nd.array(torch_tensor.cpu().numpy())
|
| 147 |
+
return tvm.nd.from_dlpack(torch_tensor)
|
| 148 |
+
|
| 149 |
+
def exec_tvm(*i_args):
|
| 150 |
+
args = [a.contiguous() for a in i_args]
|
| 151 |
+
shape_info, _ = m.get_input_info()
|
| 152 |
+
active_inputs = {name for name, _ in shape_info.items()}
|
| 153 |
+
for idx, arg in enumerate(args, 0):
|
| 154 |
+
if arg.dim() != 0:
|
| 155 |
+
if arg.requires_grad:
|
| 156 |
+
arg = arg.detach()
|
| 157 |
+
inp_name = f"inp_{idx}"
|
| 158 |
+
if inp_name not in active_inputs:
|
| 159 |
+
log.warning(
|
| 160 |
+
"input %s skipped as not found in tvm's runtime library",
|
| 161 |
+
inp_name,
|
| 162 |
+
)
|
| 163 |
+
continue
|
| 164 |
+
m.set_input(
|
| 165 |
+
inp_name,
|
| 166 |
+
to_tvm_tensor(arg),
|
| 167 |
+
)
|
| 168 |
+
m.run()
|
| 169 |
+
return [to_torch_tensor(m.get_output(i)) for i in range(m.get_num_outputs())]
|
| 170 |
+
|
| 171 |
+
return exec_tvm
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
tvm_meta_schedule = functools.partial(tvm, scheduler="meta_schedule")
|
| 175 |
+
tvm_auto_scheduler = functools.partial(tvm, scheduler="auto_scheduler")
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def has_tvm():
|
| 179 |
+
try:
|
| 180 |
+
importlib.import_module("tvm")
|
| 181 |
+
return True
|
| 182 |
+
except ImportError:
|
| 183 |
+
return False
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
@functools.lru_cache(None)
|
| 187 |
+
def llvm_target():
|
| 188 |
+
if sys.platform == "linux":
|
| 189 |
+
cpuinfo = open("/proc/cpuinfo").read()
|
| 190 |
+
if "avx512" in cpuinfo:
|
| 191 |
+
return "llvm -mcpu=skylake-avx512"
|
| 192 |
+
elif "avx2" in cpuinfo:
|
| 193 |
+
return "llvm -mcpu=core-avx2"
|
| 194 |
+
return "llvm"
|
pllava/lib/python3.10/site-packages/torch/_dynamo/bytecode_analysis.py
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import bisect
|
| 3 |
+
import dataclasses
|
| 4 |
+
import dis
|
| 5 |
+
import sys
|
| 6 |
+
from typing import Any, Set, Union
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
TERMINAL_OPCODES = {
|
| 10 |
+
dis.opmap["RETURN_VALUE"],
|
| 11 |
+
dis.opmap["JUMP_FORWARD"],
|
| 12 |
+
dis.opmap["RAISE_VARARGS"],
|
| 13 |
+
# TODO(jansel): double check exception handling
|
| 14 |
+
}
|
| 15 |
+
if sys.version_info >= (3, 9):
|
| 16 |
+
TERMINAL_OPCODES.add(dis.opmap["RERAISE"])
|
| 17 |
+
if sys.version_info >= (3, 11):
|
| 18 |
+
TERMINAL_OPCODES.add(dis.opmap["JUMP_BACKWARD"])
|
| 19 |
+
TERMINAL_OPCODES.add(dis.opmap["JUMP_FORWARD"])
|
| 20 |
+
else:
|
| 21 |
+
TERMINAL_OPCODES.add(dis.opmap["JUMP_ABSOLUTE"])
|
| 22 |
+
if sys.version_info >= (3, 12):
|
| 23 |
+
TERMINAL_OPCODES.add(dis.opmap["RETURN_CONST"])
|
| 24 |
+
JUMP_OPCODES = set(dis.hasjrel + dis.hasjabs)
|
| 25 |
+
JUMP_OPNAMES = {dis.opname[opcode] for opcode in JUMP_OPCODES}
|
| 26 |
+
HASLOCAL = set(dis.haslocal)
|
| 27 |
+
HASFREE = set(dis.hasfree)
|
| 28 |
+
|
| 29 |
+
stack_effect = dis.stack_effect
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def get_indexof(insts):
|
| 33 |
+
"""
|
| 34 |
+
Get a mapping from instruction memory address to index in instruction list.
|
| 35 |
+
Additionally checks that each instruction only appears once in the list.
|
| 36 |
+
"""
|
| 37 |
+
indexof = {}
|
| 38 |
+
for i, inst in enumerate(insts):
|
| 39 |
+
assert inst not in indexof
|
| 40 |
+
indexof[inst] = i
|
| 41 |
+
return indexof
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def remove_dead_code(instructions):
|
| 45 |
+
"""Dead code elimination"""
|
| 46 |
+
indexof = get_indexof(instructions)
|
| 47 |
+
live_code = set()
|
| 48 |
+
|
| 49 |
+
def find_live_code(start):
|
| 50 |
+
for i in range(start, len(instructions)):
|
| 51 |
+
if i in live_code:
|
| 52 |
+
return
|
| 53 |
+
live_code.add(i)
|
| 54 |
+
inst = instructions[i]
|
| 55 |
+
if inst.exn_tab_entry:
|
| 56 |
+
find_live_code(indexof[inst.exn_tab_entry.target])
|
| 57 |
+
if inst.opcode in JUMP_OPCODES:
|
| 58 |
+
find_live_code(indexof[inst.target])
|
| 59 |
+
if inst.opcode in TERMINAL_OPCODES:
|
| 60 |
+
return
|
| 61 |
+
|
| 62 |
+
find_live_code(0)
|
| 63 |
+
|
| 64 |
+
# change exception table entries if start/end instructions are dead
|
| 65 |
+
# assumes that exception table entries have been propagated,
|
| 66 |
+
# e.g. with bytecode_transformation.propagate_inst_exn_table_entries,
|
| 67 |
+
# and that instructions with an exn_tab_entry lies within its start/end.
|
| 68 |
+
if sys.version_info >= (3, 11):
|
| 69 |
+
live_idx = sorted(live_code)
|
| 70 |
+
for i, inst in enumerate(instructions):
|
| 71 |
+
if i in live_code and inst.exn_tab_entry:
|
| 72 |
+
# find leftmost live instruction >= start
|
| 73 |
+
start_idx = bisect.bisect_left(
|
| 74 |
+
live_idx, indexof[inst.exn_tab_entry.start]
|
| 75 |
+
)
|
| 76 |
+
assert start_idx < len(live_idx)
|
| 77 |
+
# find rightmost live instruction <= end
|
| 78 |
+
end_idx = (
|
| 79 |
+
bisect.bisect_right(live_idx, indexof[inst.exn_tab_entry.end]) - 1
|
| 80 |
+
)
|
| 81 |
+
assert end_idx >= 0
|
| 82 |
+
assert live_idx[start_idx] <= i <= live_idx[end_idx]
|
| 83 |
+
inst.exn_tab_entry.start = instructions[live_idx[start_idx]]
|
| 84 |
+
inst.exn_tab_entry.end = instructions[live_idx[end_idx]]
|
| 85 |
+
|
| 86 |
+
return [inst for i, inst in enumerate(instructions) if i in live_code]
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def remove_pointless_jumps(instructions):
|
| 90 |
+
"""Eliminate jumps to the next instruction"""
|
| 91 |
+
pointless_jumps = {
|
| 92 |
+
id(a)
|
| 93 |
+
for a, b in zip(instructions, instructions[1:])
|
| 94 |
+
if a.opname == "JUMP_ABSOLUTE" and a.target is b
|
| 95 |
+
}
|
| 96 |
+
return [inst for inst in instructions if id(inst) not in pointless_jumps]
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def propagate_line_nums(instructions):
|
| 100 |
+
"""Ensure every instruction has line number set in case some are removed"""
|
| 101 |
+
cur_line_no = None
|
| 102 |
+
|
| 103 |
+
def populate_line_num(inst):
|
| 104 |
+
nonlocal cur_line_no
|
| 105 |
+
if inst.starts_line:
|
| 106 |
+
cur_line_no = inst.starts_line
|
| 107 |
+
|
| 108 |
+
inst.starts_line = cur_line_no
|
| 109 |
+
|
| 110 |
+
for inst in instructions:
|
| 111 |
+
populate_line_num(inst)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def remove_extra_line_nums(instructions):
|
| 115 |
+
"""Remove extra starts line properties before packing bytecode"""
|
| 116 |
+
|
| 117 |
+
cur_line_no = None
|
| 118 |
+
|
| 119 |
+
def remove_line_num(inst):
|
| 120 |
+
nonlocal cur_line_no
|
| 121 |
+
if inst.starts_line is None:
|
| 122 |
+
return
|
| 123 |
+
elif inst.starts_line == cur_line_no:
|
| 124 |
+
inst.starts_line = None
|
| 125 |
+
else:
|
| 126 |
+
cur_line_no = inst.starts_line
|
| 127 |
+
|
| 128 |
+
for inst in instructions:
|
| 129 |
+
remove_line_num(inst)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
@dataclasses.dataclass
|
| 133 |
+
class ReadsWrites:
|
| 134 |
+
reads: Set[Any]
|
| 135 |
+
writes: Set[Any]
|
| 136 |
+
visited: Set[Any]
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def livevars_analysis(instructions, instruction):
|
| 140 |
+
indexof = get_indexof(instructions)
|
| 141 |
+
must = ReadsWrites(set(), set(), set())
|
| 142 |
+
may = ReadsWrites(set(), set(), set())
|
| 143 |
+
|
| 144 |
+
def walk(state, start):
|
| 145 |
+
if start in state.visited:
|
| 146 |
+
return
|
| 147 |
+
state.visited.add(start)
|
| 148 |
+
|
| 149 |
+
for i in range(start, len(instructions)):
|
| 150 |
+
inst = instructions[i]
|
| 151 |
+
if inst.opcode in HASLOCAL or inst.opcode in HASFREE:
|
| 152 |
+
if "LOAD" in inst.opname or "DELETE" in inst.opname:
|
| 153 |
+
if inst.argval not in must.writes:
|
| 154 |
+
state.reads.add(inst.argval)
|
| 155 |
+
elif "STORE" in inst.opname:
|
| 156 |
+
state.writes.add(inst.argval)
|
| 157 |
+
elif inst.opname == "MAKE_CELL":
|
| 158 |
+
pass
|
| 159 |
+
else:
|
| 160 |
+
raise NotImplementedError(f"unhandled {inst.opname}")
|
| 161 |
+
if inst.exn_tab_entry:
|
| 162 |
+
walk(may, indexof[inst.exn_tab_entry.target])
|
| 163 |
+
if inst.opcode in JUMP_OPCODES:
|
| 164 |
+
walk(may, indexof[inst.target])
|
| 165 |
+
state = may
|
| 166 |
+
if inst.opcode in TERMINAL_OPCODES:
|
| 167 |
+
return
|
| 168 |
+
|
| 169 |
+
walk(must, indexof[instruction])
|
| 170 |
+
return must.reads | may.reads
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
@dataclasses.dataclass
|
| 174 |
+
class FixedPointBox:
|
| 175 |
+
value: bool = True
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
@dataclasses.dataclass
|
| 179 |
+
class StackSize:
|
| 180 |
+
low: Union[int, float]
|
| 181 |
+
high: Union[int, float]
|
| 182 |
+
fixed_point: FixedPointBox
|
| 183 |
+
|
| 184 |
+
def zero(self):
|
| 185 |
+
self.low = 0
|
| 186 |
+
self.high = 0
|
| 187 |
+
self.fixed_point.value = False
|
| 188 |
+
|
| 189 |
+
def offset_of(self, other, n):
|
| 190 |
+
prior = (self.low, self.high)
|
| 191 |
+
self.low = min(self.low, other.low + n)
|
| 192 |
+
self.high = max(self.high, other.high + n)
|
| 193 |
+
if (self.low, self.high) != prior:
|
| 194 |
+
self.fixed_point.value = False
|
| 195 |
+
|
| 196 |
+
def exn_tab_jump(self, depth):
|
| 197 |
+
prior = (self.low, self.high)
|
| 198 |
+
self.low = min(self.low, depth)
|
| 199 |
+
self.high = max(self.high, depth)
|
| 200 |
+
if (self.low, self.high) != prior:
|
| 201 |
+
self.fixed_point.value = False
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def stacksize_analysis(instructions) -> Union[int, float]:
|
| 205 |
+
assert instructions
|
| 206 |
+
fixed_point = FixedPointBox()
|
| 207 |
+
stack_sizes = {
|
| 208 |
+
inst: StackSize(float("inf"), float("-inf"), fixed_point)
|
| 209 |
+
for inst in instructions
|
| 210 |
+
}
|
| 211 |
+
stack_sizes[instructions[0]].zero()
|
| 212 |
+
|
| 213 |
+
for _ in range(100):
|
| 214 |
+
if fixed_point.value:
|
| 215 |
+
break
|
| 216 |
+
fixed_point.value = True
|
| 217 |
+
|
| 218 |
+
for inst, next_inst in zip(instructions, instructions[1:] + [None]):
|
| 219 |
+
stack_size = stack_sizes[inst]
|
| 220 |
+
# CALL_FINALLY in Python 3.8 is handled differently when determining stack depth.
|
| 221 |
+
# See https://github.com/python/cpython/blob/3.8/Python/compile.c#L5450.
|
| 222 |
+
# Essentially, the stack effect of CALL_FINALLY is computed with jump=True,
|
| 223 |
+
# but the resulting stack depth is propagated to the next instruction, not the
|
| 224 |
+
# jump target.
|
| 225 |
+
is_call_finally = (
|
| 226 |
+
sys.version_info < (3, 9) and inst.opcode == dis.opmap["CALL_FINALLY"]
|
| 227 |
+
)
|
| 228 |
+
if inst.opcode not in TERMINAL_OPCODES:
|
| 229 |
+
assert next_inst is not None, f"missing next inst: {inst}"
|
| 230 |
+
# total stack effect of CALL_FINALLY and END_FINALLY in 3.8 is 0
|
| 231 |
+
eff = (
|
| 232 |
+
0
|
| 233 |
+
if is_call_finally
|
| 234 |
+
else stack_effect(inst.opcode, inst.arg, jump=False)
|
| 235 |
+
)
|
| 236 |
+
stack_sizes[next_inst].offset_of(stack_size, eff)
|
| 237 |
+
if inst.opcode in JUMP_OPCODES and not is_call_finally:
|
| 238 |
+
stack_sizes[inst.target].offset_of(
|
| 239 |
+
stack_size, stack_effect(inst.opcode, inst.arg, jump=True)
|
| 240 |
+
)
|
| 241 |
+
if inst.exn_tab_entry:
|
| 242 |
+
# see https://github.com/python/cpython/blob/3.11/Objects/exception_handling_notes.txt
|
| 243 |
+
# on why depth is computed this way.
|
| 244 |
+
depth = inst.exn_tab_entry.depth + int(inst.exn_tab_entry.lasti) + 1
|
| 245 |
+
stack_sizes[inst.exn_tab_entry.target].exn_tab_jump(depth)
|
| 246 |
+
|
| 247 |
+
if False:
|
| 248 |
+
for inst in instructions:
|
| 249 |
+
stack_size = stack_sizes[inst]
|
| 250 |
+
print(stack_size.low, stack_size.high, inst)
|
| 251 |
+
|
| 252 |
+
low = min(x.low for x in stack_sizes.values())
|
| 253 |
+
high = max(x.high for x in stack_sizes.values())
|
| 254 |
+
|
| 255 |
+
assert fixed_point.value, "failed to reach fixed point"
|
| 256 |
+
assert low >= 0
|
| 257 |
+
return high
|
pllava/lib/python3.10/site-packages/torch/_dynamo/callback.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
class CompilationCallbackHandler:
|
| 3 |
+
def __init__(self):
|
| 4 |
+
self.start_callbacks = []
|
| 5 |
+
self.end_callbacks = []
|
| 6 |
+
|
| 7 |
+
def register_start_callback(self, callback):
|
| 8 |
+
"""
|
| 9 |
+
Register a callback function to be called when the compilation starts.
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
- callback (callable): The callback function to register.
|
| 13 |
+
"""
|
| 14 |
+
self.start_callbacks.append(callback)
|
| 15 |
+
return callback
|
| 16 |
+
|
| 17 |
+
def register_end_callback(self, callback):
|
| 18 |
+
"""
|
| 19 |
+
Register a callback function to be called when the compilation ends.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
- callback (callable): The callback function to register.
|
| 23 |
+
"""
|
| 24 |
+
self.end_callbacks.append(callback)
|
| 25 |
+
return callback
|
| 26 |
+
|
| 27 |
+
def remove_start_callback(self, callback):
|
| 28 |
+
"""
|
| 29 |
+
Remove a registered start callback function.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
- callback (callable): The callback function to remove.
|
| 33 |
+
"""
|
| 34 |
+
self.start_callbacks.remove(callback)
|
| 35 |
+
|
| 36 |
+
def remove_end_callback(self, callback):
|
| 37 |
+
"""
|
| 38 |
+
Remove a registered end callback function.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
- callback (callable): The callback function to remove.
|
| 42 |
+
"""
|
| 43 |
+
self.end_callbacks.remove(callback)
|
| 44 |
+
|
| 45 |
+
def run_start_callbacks(self):
|
| 46 |
+
"""
|
| 47 |
+
Execute all registered start callbacks.
|
| 48 |
+
"""
|
| 49 |
+
for callback in self.start_callbacks:
|
| 50 |
+
callback()
|
| 51 |
+
|
| 52 |
+
def run_end_callbacks(self):
|
| 53 |
+
"""
|
| 54 |
+
Execute all registered end callbacks.
|
| 55 |
+
"""
|
| 56 |
+
for callback in self.end_callbacks:
|
| 57 |
+
callback()
|
| 58 |
+
|
| 59 |
+
def clear(self):
|
| 60 |
+
"""
|
| 61 |
+
Clear all registered callbacks.
|
| 62 |
+
"""
|
| 63 |
+
self.start_callbacks.clear()
|
| 64 |
+
self.end_callbacks.clear()
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
callback_handler = CompilationCallbackHandler()
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def on_compile_start(callback):
|
| 71 |
+
"""
|
| 72 |
+
Decorator to register a callback function for the start of the compilation.
|
| 73 |
+
"""
|
| 74 |
+
callback_handler.register_start_callback(callback)
|
| 75 |
+
return callback
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def on_compile_end(callback):
|
| 79 |
+
"""
|
| 80 |
+
Decorator to register a callback function for the end of the compilation.
|
| 81 |
+
"""
|
| 82 |
+
callback_handler.register_end_callback(callback)
|
| 83 |
+
return callback
|
pllava/lib/python3.10/site-packages/torch/_dynamo/code_context.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import types
|
| 3 |
+
|
| 4 |
+
from .utils import ExactWeakKeyDictionary
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class CodeContextDict:
|
| 8 |
+
def __init__(self) -> None:
|
| 9 |
+
self.code_context = ExactWeakKeyDictionary()
|
| 10 |
+
|
| 11 |
+
def has_context(self, code: types.CodeType):
|
| 12 |
+
return code in self.code_context
|
| 13 |
+
|
| 14 |
+
def get_context(self, code: types.CodeType):
|
| 15 |
+
ctx = self.code_context.get(code)
|
| 16 |
+
if ctx is None:
|
| 17 |
+
ctx = {}
|
| 18 |
+
self.code_context[code] = ctx
|
| 19 |
+
return ctx
|
| 20 |
+
|
| 21 |
+
def pop_context(self, code: types.CodeType):
|
| 22 |
+
ctx = self.get_context(code)
|
| 23 |
+
self.code_context._remove_id(id(code))
|
| 24 |
+
return ctx
|
| 25 |
+
|
| 26 |
+
def clear(self):
|
| 27 |
+
self.code_context.clear()
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
code_context = CodeContextDict()
|
pllava/lib/python3.10/site-packages/torch/_dynamo/config.py
ADDED
|
@@ -0,0 +1,490 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import getpass
|
| 3 |
+
import inspect
|
| 4 |
+
import os
|
| 5 |
+
import re
|
| 6 |
+
import sys
|
| 7 |
+
import tempfile
|
| 8 |
+
from os.path import abspath, dirname
|
| 9 |
+
from typing import Any, Callable, Dict, Optional, Set, Type, TYPE_CHECKING, Union
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def is_fbcode():
|
| 15 |
+
return not hasattr(torch.version, "git_version")
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# to configure logging for dynamo, aot, and inductor
|
| 19 |
+
# use the following API in the torch._logging module
|
| 20 |
+
# torch._logging.set_logs(dynamo=<level>, aot=<level>, inductor<level>)
|
| 21 |
+
# or use the environment variable TORCH_LOGS="dynamo,aot,inductor" (use a prefix + to indicate higher verbosity)
|
| 22 |
+
# see this design doc for more detailed info
|
| 23 |
+
# Design doc: https://docs.google.com/document/d/1ZRfTWKa8eaPq1AxaiHrq4ASTPouzzlPiuquSBEJYwS8/edit#
|
| 24 |
+
# the name of a file to write the logs to
|
| 25 |
+
# [@compile_ignored: debug]
|
| 26 |
+
log_file_name: Optional[str] = None
|
| 27 |
+
|
| 28 |
+
# [@compile_ignored: debug] Verbose will print full stack traces on warnings and errors
|
| 29 |
+
verbose = os.environ.get("TORCHDYNAMO_VERBOSE", "0") == "1"
|
| 30 |
+
|
| 31 |
+
# [@compile_ignored: runtime_behaviour] verify the correctness of optimized backend
|
| 32 |
+
verify_correctness = False
|
| 33 |
+
|
| 34 |
+
# need this many ops to create an FX graph
|
| 35 |
+
minimum_call_count = 1
|
| 36 |
+
|
| 37 |
+
# turn on/off DCE pass
|
| 38 |
+
dead_code_elimination = True
|
| 39 |
+
|
| 40 |
+
# disable (for a function) when cache reaches this size
|
| 41 |
+
|
| 42 |
+
# controls the maximum number of cache entries with a guard on same ID_MATCH'd
|
| 43 |
+
# object. It also controls the maximum size of cache entries if they don't have
|
| 44 |
+
# any ID_MATCH'd guards.
|
| 45 |
+
# [@compile_ignored: runtime_behaviour]
|
| 46 |
+
cache_size_limit = 8
|
| 47 |
+
|
| 48 |
+
# [@compile_ignored: runtime_behaviour] safeguarding to prevent horrible recomps
|
| 49 |
+
accumulated_cache_size_limit = 256
|
| 50 |
+
|
| 51 |
+
# [@compile_ignored: runtime_behaviour] skip tracing recursively if cache limit is hit
|
| 52 |
+
skip_code_recursive_on_cache_limit_hit = True
|
| 53 |
+
|
| 54 |
+
# whether or not to specialize on int inputs. This only has an effect with
|
| 55 |
+
# dynamic_shapes; when dynamic_shapes is False, we ALWAYS specialize on int
|
| 56 |
+
# inputs. Note that assume_static_by_default will also cause ints to get
|
| 57 |
+
# specialized, so this is mostly useful for export, where we want inputs
|
| 58 |
+
# to be dynamic, but accesses to ints should NOT get promoted into inputs.
|
| 59 |
+
specialize_int = False
|
| 60 |
+
|
| 61 |
+
# Whether or not to specialize on float inputs. Dynamo will always promote
|
| 62 |
+
# float inputs into Tensor inputs, but at the moment, backends inconsistently
|
| 63 |
+
# support codegen on float (this is to be fixed).
|
| 64 |
+
specialize_float = True
|
| 65 |
+
|
| 66 |
+
# legacy config, does nothing now!
|
| 67 |
+
dynamic_shapes = True
|
| 68 |
+
|
| 69 |
+
use_lazy_graph_module = (
|
| 70 |
+
os.environ.get("TORCH_COMPILE_USE_LAZY_GRAPH_MODULE", "1") == "1"
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
# This is a temporarily flag, which changes the behavior of dynamic_shapes=True.
|
| 74 |
+
# When assume_static_by_default is True, we only allocate symbols for shapes marked dynamic via mark_dynamic.
|
| 75 |
+
# NOTE - this flag can be removed once we can run dynamic_shapes=False w/ the mark_dynamic API
|
| 76 |
+
# see [Note - on the state of mark_dynamic]
|
| 77 |
+
assume_static_by_default = True
|
| 78 |
+
|
| 79 |
+
# This flag changes how dynamic_shapes=True works, and is meant to be used in conjunction
|
| 80 |
+
# with assume_static_by_default=True.
|
| 81 |
+
# With this flag enabled, we always compile a frame as fully static for the first time, and, if we fail
|
| 82 |
+
# any guards due to wobbles in shape, we recompile with *all* the wobbled shapes as being marked dynamic.
|
| 83 |
+
automatic_dynamic_shapes = True
|
| 84 |
+
|
| 85 |
+
# This flag changes how the shapes of parameters are treated.
|
| 86 |
+
# If this flag is set to True, then the shapes of torch.nn.Parameter as well as of torch.Tensor are attempted to be dynamic
|
| 87 |
+
# If this flag is set to False, then the shapes of torch.nn.Parameter are assumed to be static,
|
| 88 |
+
# while the shapes of torch.Tensor are assumed to be dynamic.
|
| 89 |
+
force_parameter_static_shapes = True
|
| 90 |
+
|
| 91 |
+
# This flag ensures that the shapes of a nn module are always assumed to be static
|
| 92 |
+
# If the flag is set to True, then the shapes of a nn.module are assumed to be static
|
| 93 |
+
# If the flag is set to False, then the shapes of a nn.module can be dynamic
|
| 94 |
+
force_nn_module_property_static_shapes = True
|
| 95 |
+
|
| 96 |
+
# Typically, if you mark_dynamic a dimension, we will error if the dimension
|
| 97 |
+
# actually ended up getting specialized. This knob changes the behavior so
|
| 98 |
+
# that we don't error at all. This is helpful for our CI where I'm using a
|
| 99 |
+
# heuristic to mark batch dimensions as dynamic and the heuristic may get it
|
| 100 |
+
# wrong.
|
| 101 |
+
allow_ignore_mark_dynamic = False
|
| 102 |
+
|
| 103 |
+
# Set this to False to assume nn.Modules() contents are immutable (similar assumption as freezing)
|
| 104 |
+
guard_nn_modules = True
|
| 105 |
+
|
| 106 |
+
# Uses CPython internal dictionary tags to detect mutation. There is some
|
| 107 |
+
# overlap between guard_nn_modules_using_dict_tags and guard_nn_modules flag.
|
| 108 |
+
# guard_nn_modules unspecializes the nn module instance and adds guard for each
|
| 109 |
+
# relevant member of the nn modules. On the other hand,
|
| 110 |
+
# guard_nn_modules_using_dict_tags specializes on each nn module instance but
|
| 111 |
+
# uses low overhead dict version matching to detect mutations, obviating the
|
| 112 |
+
# need to guard on members of the nn modules. With
|
| 113 |
+
# guard_nn_modules_using_dict_tags, the guard_nn_modules is not really required
|
| 114 |
+
# but kept around for debugging and discussing unspecializing nn module
|
| 115 |
+
# variables.
|
| 116 |
+
# TODO(janimesh, voz): Remove both of these flags (or atleast guard_nn_modules)
|
| 117 |
+
# once we have reached stability for the guard_nn_modules_using_dict_tags.
|
| 118 |
+
guard_nn_modules_using_dict_tags = True
|
| 119 |
+
|
| 120 |
+
# This feature doesn't really work. We offer this flag for experimental
|
| 121 |
+
# purposes / if you want to help us build out support.
|
| 122 |
+
#
|
| 123 |
+
# torchdynamo has limited support for tensor subclasses that implement
|
| 124 |
+
# __torch_function__ see [Note: __torch_function__] in torch_function.py.
|
| 125 |
+
# Our current support is limited to tensor subclasses
|
| 126 |
+
# that DO NOT store metadata on the tensor (in general, dynamo does not
|
| 127 |
+
# support Python code that stores extra attributes on tensors at present).
|
| 128 |
+
# If your tensor subclass purely changes function call behavior via
|
| 129 |
+
# __torch_function__, you can allow torchdynamo to trace into it by
|
| 130 |
+
# adding it to traceable_tensor_subclasses. We don't do any safety checks,
|
| 131 |
+
# so it is up to you to ensure that your subclass is well behaved. See also
|
| 132 |
+
# https://github.com/pytorch/torchdynamo/issues/1948
|
| 133 |
+
#
|
| 134 |
+
# We do NOT currently support __torch_dispatch__. The implementation is
|
| 135 |
+
# currently buggy, the main show stopper for nontrivial use is
|
| 136 |
+
# https://github.com/pytorch/torchdynamo/issues/1952
|
| 137 |
+
traceable_tensor_subclasses: Set[Type[Any]] = set()
|
| 138 |
+
|
| 139 |
+
# Suppress errors in torch._dynamo.optimize, instead forcing a fallback to eager.
|
| 140 |
+
# This is a good way to get your model to work one way or another, but you may
|
| 141 |
+
# lose optimization opportunities this way. Devs, if your benchmark model is failing
|
| 142 |
+
# this way, you should figure out why instead of suppressing it.
|
| 143 |
+
suppress_errors = bool(os.environ.get("TORCHDYNAMO_SUPPRESS_ERRORS", False))
|
| 144 |
+
|
| 145 |
+
# Record and write an execution record of the current frame to a file
|
| 146 |
+
# if an exception is encountered
|
| 147 |
+
# @compile_ignored[debug]
|
| 148 |
+
replay_record_enabled = os.environ.get("TORCH_COMPILE_REPLAY_RECORD", "0") == "1"
|
| 149 |
+
|
| 150 |
+
# Rewrite assert statement in python with torch._assert
|
| 151 |
+
rewrite_assert_with_torch_assert = True
|
| 152 |
+
|
| 153 |
+
# Disable dynamo
|
| 154 |
+
disable = os.environ.get("TORCH_COMPILE_DISABLE", False)
|
| 155 |
+
|
| 156 |
+
# [@compile_ignored: runtime_behaviour] Get a cprofile trace of Dynamo
|
| 157 |
+
cprofile = os.environ.get("TORCH_COMPILE_CPROFILE", False)
|
| 158 |
+
|
| 159 |
+
# legacy config, does nothing now!
|
| 160 |
+
skipfiles_inline_module_allowlist: Dict[Any, Any] = {}
|
| 161 |
+
|
| 162 |
+
# If a string representing a PyTorch module is in this ignorelist,
|
| 163 |
+
# the `allowed_functions.is_allowed` function will not consider it
|
| 164 |
+
# when creating a list of PyTorch functions that will appear in
|
| 165 |
+
# FX IR.
|
| 166 |
+
allowed_functions_module_string_ignorelist = {
|
| 167 |
+
"torch.distributions",
|
| 168 |
+
"torch.testing",
|
| 169 |
+
"torch._refs",
|
| 170 |
+
"torch._prims",
|
| 171 |
+
"torch._decomp",
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
# Debug Flag to try minifier at different stages. Possible values are {None, "aot", "dynamo"}
|
| 175 |
+
# None - Minifier is switched off
|
| 176 |
+
# dynamo - Runs minifier on the TorchDynamo produced graphs, if compilation fails
|
| 177 |
+
# aot - Runs minifier on the Aot Autograd produced graphs, if compilation fails
|
| 178 |
+
# [@compile_ignored: debug]
|
| 179 |
+
repro_after = os.environ.get("TORCHDYNAMO_REPRO_AFTER", None)
|
| 180 |
+
|
| 181 |
+
# Compiler compilation debug info
|
| 182 |
+
# 1: Dumps the original graph out to repro.py if compilation fails
|
| 183 |
+
# 2: Dumps a minifier_launcher.py if compilation fails.
|
| 184 |
+
# 3: Always dumps a minifier_launcher.py. Good for segfaults.
|
| 185 |
+
# 4: Dumps a minifier_launcher.py if the accuracy fails.
|
| 186 |
+
# [@compile_ignored: debug]
|
| 187 |
+
repro_level = int(os.environ.get("TORCHDYNAMO_REPRO_LEVEL", 2))
|
| 188 |
+
|
| 189 |
+
# By default, we try to detect accuracy failure by running both forward
|
| 190 |
+
# and backward of a torchdynamo produced graph (if you are using repro_after
|
| 191 |
+
# 'dynamo'). This setting forces us to only test the forward graph and
|
| 192 |
+
# not the backward graph. This can be helpful if you're trying to debug
|
| 193 |
+
# an inference only problem, but the minifier seems to be choking on the
|
| 194 |
+
# backwards step
|
| 195 |
+
# TODO: Detect this situation automatically so the user doesn't need
|
| 196 |
+
# to manually configure this
|
| 197 |
+
# [@compile_ignored: debug]
|
| 198 |
+
repro_forward_only = os.environ.get("TORCHDYNAMO_REPRO_FORWARD_ONLY") == "1"
|
| 199 |
+
|
| 200 |
+
# The tolerance we should use when testing if a compiled graph
|
| 201 |
+
# has diverged so that we should treat it as an accuracy failure
|
| 202 |
+
# [@compile_ignored: debug]
|
| 203 |
+
repro_tolerance = 1e-3
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
# Whether to ignore non-floating point values when checking accuracy.
|
| 207 |
+
# Checking accuracy of non-floating point values such as boolean tensors
|
| 208 |
+
# can lead to false positives.
|
| 209 |
+
# [@compile_ignored: debug]
|
| 210 |
+
repro_ignore_non_fp = os.environ.get("TORCHDYNAMO_REPRO_IGNORE_NON_FP") == "1"
|
| 211 |
+
|
| 212 |
+
# If True, when testing if two models are the same, we will test them against
|
| 213 |
+
# a third fp64 reference and only report a problem if the RMSE relative to the
|
| 214 |
+
# fp64 is greater. However, this will use more memory; you may disable this
|
| 215 |
+
# if memory usage is too high.
|
| 216 |
+
# [@compile_ignored: runtime_behaviour]
|
| 217 |
+
same_two_models_use_fp64 = True
|
| 218 |
+
|
| 219 |
+
# Not all backends support scalars. Some calls on torch.Tensor (like .item()) return a scalar type.
|
| 220 |
+
# When this flag is set to False, we introduce a graph break instead of capturing.
|
| 221 |
+
# This requires dynamic_shapes to be True.
|
| 222 |
+
capture_scalar_outputs = os.environ.get("TORCHDYNAMO_CAPTURE_SCALAR_OUTPUTS") == "1"
|
| 223 |
+
|
| 224 |
+
# Not all backends support operators that have dynamic output shape (e.g.,
|
| 225 |
+
# nonzero, unique). When this flag is set to False, we introduce a graph
|
| 226 |
+
# break instead of capturing. This requires dynamic_shapes to be True.
|
| 227 |
+
# If you set this to True, you probably also want capture_scalar_outputs
|
| 228 |
+
# (these are separated for historical reasons).
|
| 229 |
+
capture_dynamic_output_shape_ops = (
|
| 230 |
+
os.environ.get("TORCHDYNAMO_CAPTURE_DYNAMIC_OUTPUT_SHAPE_OPS", "0") == "1"
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
# hybrid backed unbacked symints
|
| 234 |
+
prefer_deferred_runtime_asserts_over_guards = False
|
| 235 |
+
|
| 236 |
+
# For complex dynamic shapes guards that we're unable to specify with dynamo/export's
|
| 237 |
+
# range constraints + dims + derived dims language, we raise constraint violation
|
| 238 |
+
# errors or specialize by default. If set to True, this flag avoids crashing/specialization,
|
| 239 |
+
# and allows complex guards as runtime assertions in the graph.
|
| 240 |
+
allow_complex_guards_as_runtime_asserts = False
|
| 241 |
+
|
| 242 |
+
# By default, dynamo will treat all ints as backed SymInts, which means (1) it
|
| 243 |
+
# will wait to see the int change over multiple runs before generalizing and
|
| 244 |
+
# (2) it will still always 0/1 specialize an int. When true, this knob
|
| 245 |
+
# forces dynamo to treat _length_per_key and _offset_per_key on
|
| 246 |
+
# KeyedJaggedTensor from torchrec as size-like unbacked SymInts, so that
|
| 247 |
+
# they (1) generalize immediately and (2) unsoundly never compare equal to
|
| 248 |
+
# 0/1. This is not on by default as AOTAutograd/Inductor cannot currently
|
| 249 |
+
# compile this code; however, this can be useful for export.
|
| 250 |
+
force_unspec_int_unbacked_size_like_on_torchrec_kjt = False
|
| 251 |
+
|
| 252 |
+
# Should almost always be true in prod. This relaxes the requirement that cond's true_fn and
|
| 253 |
+
# false_fn produces code with identical guards.
|
| 254 |
+
enforce_cond_guards_match = True
|
| 255 |
+
|
| 256 |
+
# Specify how to optimize a compiled DDP module. The flag accepts a boolean
|
| 257 |
+
# value or a string. There are 4 modes.
|
| 258 |
+
# 1. "ddp_optimizer" (or True): with "ddp_ptimizer", Dynamo will automatically
|
| 259 |
+
# split model graph into pieces to match DDP bucket sizes to allow DDP
|
| 260 |
+
# comm/compute overlap.
|
| 261 |
+
# 2. "python_reducer" (experimental): this optimization requires the usage
|
| 262 |
+
# of compiled_autograd. With "python_reducer", DDP will disable the C++ reducer
|
| 263 |
+
# and use the Python reducer to allow compiled_autograd to trace the
|
| 264 |
+
# communication and allow comm/compute overlap without graph-breaks.
|
| 265 |
+
# 3. "python_reducer_without_compiled_forward" (experimental): this mode is
|
| 266 |
+
# similar to "python_reducer". One should only use this optimization mode
|
| 267 |
+
# when compiled_autograd is used but the DDP module is not compiled.
|
| 268 |
+
# 4. "no_optimization" (or False): Dynamo won't split the model graph, nor
|
| 269 |
+
# will Python reducer be used. With this mode, there will be no graph-breaks
|
| 270 |
+
# and the original DDP C++ reducer will be used. There will no comm/compute
|
| 271 |
+
# overlap. This mode CANNOT be used with compiled_autograd.
|
| 272 |
+
# Note that to avoid breaking the existing usage, mode 1 and mode 4 can be
|
| 273 |
+
# specified with a boolean value. True is using ddp_optimizer and False is
|
| 274 |
+
# no optimization.
|
| 275 |
+
optimize_ddp: Union[bool, str] = True
|
| 276 |
+
|
| 277 |
+
# By default, Dynamo emits runtime asserts (e.g. torch._check, torch._check_is_size) in the graph.
|
| 278 |
+
# In some cases those asserts could be performance costly
|
| 279 |
+
# E.g. torch._check(tensor[0].item() > 2) for tensor on cuda will require cuda sync.
|
| 280 |
+
# Setting this to True keeps them hinting to symbolic shapes engine,
|
| 281 |
+
# but not be emitted in the graph.
|
| 282 |
+
do_not_emit_runtime_asserts: bool = (
|
| 283 |
+
os.environ.get("TORCH_DYNAMO_DO_NOT_EMIT_RUNTIME_ASSERTS", "0") == "1"
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
_ddp_optimization_mode = [
|
| 287 |
+
"ddp_optimizer",
|
| 288 |
+
"python_reducer", # experimental mode
|
| 289 |
+
"python_reducer_without_compiled_forward", # experimental mode
|
| 290 |
+
"no_optimization",
|
| 291 |
+
]
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def _get_optimize_ddp_mode():
|
| 295 |
+
m = sys.modules[__name__]
|
| 296 |
+
if isinstance(m.optimize_ddp, bool):
|
| 297 |
+
if m.optimize_ddp:
|
| 298 |
+
mode = "ddp_optimizer"
|
| 299 |
+
else:
|
| 300 |
+
mode = "no_optimization"
|
| 301 |
+
elif isinstance(m.optimize_ddp, str):
|
| 302 |
+
mode = m.optimize_ddp
|
| 303 |
+
else:
|
| 304 |
+
raise ValueError(f"Invalid type, {type(optimize_ddp)=}")
|
| 305 |
+
|
| 306 |
+
assert mode in m._ddp_optimization_mode, f"Invalid mode {mode=}"
|
| 307 |
+
return mode
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
# Skip tracing the torchrec files added to trace_rules.FBCODE_SKIP_DIRS
|
| 311 |
+
skip_torchrec = True
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
# No longer used
|
| 315 |
+
optimize_ddp_lazy_compile = False
|
| 316 |
+
|
| 317 |
+
# Whether to skip guarding on FSDP-managed modules
|
| 318 |
+
skip_fsdp_guards = True
|
| 319 |
+
# Whether to apply torch._dynamo.disable() to FSDP2 hooks.
|
| 320 |
+
# Defaults to True. If Traceable FSDP2 is used, set this to False.
|
| 321 |
+
skip_fsdp_hooks = True
|
| 322 |
+
|
| 323 |
+
# Make dynamo skip guarding on hooks on nn modules
|
| 324 |
+
# Note: unsafe: if your model actually has hooks and you remove them, or doesn't and you add them,
|
| 325 |
+
# dynamo will not notice and will execute whichever version you first compiled.
|
| 326 |
+
skip_nnmodule_hook_guards = True
|
| 327 |
+
|
| 328 |
+
# If True, raises exception if TorchDynamo is called with a context manager
|
| 329 |
+
raise_on_ctx_manager_usage = True
|
| 330 |
+
|
| 331 |
+
# If True, raise when aot autograd is unsafe to use
|
| 332 |
+
raise_on_unsafe_aot_autograd = False
|
| 333 |
+
|
| 334 |
+
# If true, error if you torch.jit.trace over a dynamo-optimized function.
|
| 335 |
+
# If false, silently suppress dynamo
|
| 336 |
+
error_on_nested_jit_trace = True
|
| 337 |
+
|
| 338 |
+
# If true, error with a better message if we symbolically trace over a
|
| 339 |
+
# dynamo-optimized function. If false, silently suppress dynamo.
|
| 340 |
+
error_on_nested_fx_trace = True
|
| 341 |
+
|
| 342 |
+
# Disables graph breaking on rnn. YMMV with backends.
|
| 343 |
+
allow_rnn = False
|
| 344 |
+
|
| 345 |
+
# If true, enables feature that captures PyTorch sparsity in the
|
| 346 |
+
# exported FX graph. This flag should become the default eventually
|
| 347 |
+
# and be removed, but currently provides a way to fall back to old
|
| 348 |
+
# graph breaking behavior.
|
| 349 |
+
capture_sparse_compute = False if is_fbcode() else True
|
| 350 |
+
|
| 351 |
+
# If true, error if we try to compile a function that has
|
| 352 |
+
# been seen before.
|
| 353 |
+
# [@compile_ignored: runtime_behaviour]
|
| 354 |
+
error_on_recompile = False
|
| 355 |
+
|
| 356 |
+
# [@compile_ignored: debug] Whether to report any guard failures (deprecated: does not do anything)
|
| 357 |
+
report_guard_failures = True
|
| 358 |
+
|
| 359 |
+
# [@compile_ignored: debug] root folder of the project
|
| 360 |
+
base_dir = dirname(dirname(dirname(abspath(__file__))))
|
| 361 |
+
|
| 362 |
+
# Trace through NumPy or graphbreak
|
| 363 |
+
trace_numpy = True
|
| 364 |
+
|
| 365 |
+
# Default NumPy dtypes when tracing with torch.compile
|
| 366 |
+
# We default to 64bits. For efficiency, one may want to change these to float32
|
| 367 |
+
numpy_default_float = "float64"
|
| 368 |
+
numpy_default_complex = "complex128"
|
| 369 |
+
numpy_default_int = "int64"
|
| 370 |
+
|
| 371 |
+
# use numpy's PRNG if True, pytorch otherwise
|
| 372 |
+
use_numpy_random_stream = False
|
| 373 |
+
|
| 374 |
+
# Use C++ guard manager
|
| 375 |
+
enable_cpp_guard_manager = os.environ.get("TORCHDYNAMO_CPP_GUARD_MANAGER", "1") == "1"
|
| 376 |
+
|
| 377 |
+
# Inline inbuilt nn modules
|
| 378 |
+
inline_inbuilt_nn_modules = not is_fbcode()
|
| 379 |
+
|
| 380 |
+
# When set, total compile time instruction count is recorded using
|
| 381 |
+
# torch._dynamo.utilsCompileTimeInstructionCounter.
|
| 382 |
+
record_compile_time_instruction_count = False
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
def default_debug_dir_root():
|
| 386 |
+
# [@compile_ignored: debug]
|
| 387 |
+
DEBUG_DIR_VAR_NAME = "TORCH_COMPILE_DEBUG_DIR"
|
| 388 |
+
if DEBUG_DIR_VAR_NAME in os.environ:
|
| 389 |
+
return os.path.join(os.environ[DEBUG_DIR_VAR_NAME], "torch_compile_debug")
|
| 390 |
+
elif is_fbcode():
|
| 391 |
+
return os.path.join(
|
| 392 |
+
tempfile.gettempdir(), getpass.getuser(), "torch_compile_debug"
|
| 393 |
+
)
|
| 394 |
+
else:
|
| 395 |
+
return os.path.join(os.getcwd(), "torch_compile_debug")
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
# [@compile_ignored: debug]
|
| 399 |
+
debug_dir_root = default_debug_dir_root()
|
| 400 |
+
|
| 401 |
+
# [@compile_ignored: debug]
|
| 402 |
+
_save_config_ignore = {
|
| 403 |
+
"repro_after",
|
| 404 |
+
"repro_level",
|
| 405 |
+
# workaround: "cannot pickle PyCapsule"
|
| 406 |
+
"constant_functions",
|
| 407 |
+
# workaround: "cannot pickle module"
|
| 408 |
+
"skipfiles_inline_module_allowlist",
|
| 409 |
+
}
|
| 410 |
+
|
| 411 |
+
# for backend="cudagraphs", mutations on input be sent to the cudagraph backend
|
| 412 |
+
# or replayed in aot_autograd epilogue. default is False because mutation on inputs
|
| 413 |
+
# can prevent cudagraphing.
|
| 414 |
+
cudagraph_backend_keep_input_mutation = False
|
| 415 |
+
|
| 416 |
+
# enable cudagraph support for mutated inputs from prior cudagraph pool
|
| 417 |
+
cudagraph_backend_support_input_mutation = False
|
| 418 |
+
|
| 419 |
+
# When True, only ops that have the torch.Tag.pt2_compliant tag
|
| 420 |
+
# will be allowed into the graph; all other ops will be disallowed
|
| 421 |
+
# and will fall back to eager-mode PyTorch. Useful to ensure
|
| 422 |
+
# correctness of custom ops.
|
| 423 |
+
only_allow_pt2_compliant_ops = False
|
| 424 |
+
|
| 425 |
+
capture_autograd_function = True
|
| 426 |
+
|
| 427 |
+
# enable/disable dynamo tracing for `torch.func` transforms
|
| 428 |
+
capture_func_transforms = True
|
| 429 |
+
|
| 430 |
+
# If to log Dynamo compilation metrics into log files (for OSS) and Scuba tables (for fbcode).
|
| 431 |
+
log_compilation_metrics = True
|
| 432 |
+
|
| 433 |
+
# A set of logging functions which will be reordered to the end of graph breaks,
|
| 434 |
+
# allowing dynamo to construct larget graph. Note that there are some
|
| 435 |
+
# limitations to this, such as how it does not correctly print objects that were
|
| 436 |
+
# mutated after the print statement.
|
| 437 |
+
reorderable_logging_functions: Set[Callable[[Any], None]] = set()
|
| 438 |
+
|
| 439 |
+
# simulates what would happen if we didn't have support for BUILD_SET opcode,
|
| 440 |
+
# used for testing
|
| 441 |
+
inject_BUILD_SET_unimplemented_TESTING_ONLY = False
|
| 442 |
+
|
| 443 |
+
_autograd_backward_strict_mode_banned_ops = [
|
| 444 |
+
"stride",
|
| 445 |
+
"requires_grad",
|
| 446 |
+
"storage_offset",
|
| 447 |
+
"layout",
|
| 448 |
+
"data",
|
| 449 |
+
]
|
| 450 |
+
|
| 451 |
+
_autograd_backward_strict_mode_banned_ops.extend(
|
| 452 |
+
[name for name, _ in inspect.getmembers(torch.Tensor) if re.match(r"^is_.*", name)]
|
| 453 |
+
)
|
| 454 |
+
|
| 455 |
+
# Enables caching of dispatches to fake tensors.
|
| 456 |
+
fake_tensor_cache_enabled = (
|
| 457 |
+
os.environ.get("TORCH_FAKE_TENSOR_DISPATCH_CACHE", "1") == "1"
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
# Enables cross checking between the fake tensor cache and dispatch.
|
| 461 |
+
fake_tensor_cache_crosscheck_enabled = (
|
| 462 |
+
os.environ.get("TORCH_FAKE_TENSOR_DISPATCH_CACHE_CROSSCHECK", "0") == "1"
|
| 463 |
+
)
|
| 464 |
+
|
| 465 |
+
# Enables the Compiled Autograd engine to trace .backward() calls made under torch.compile().
|
| 466 |
+
# Note: AOT Autograd will still trace joint graphs.
|
| 467 |
+
compiled_autograd = False
|
| 468 |
+
|
| 469 |
+
# Enables use of collectives *during* compilation to synchronize behavior
|
| 470 |
+
# across ranks. Today, this is used solely to modify automatic_dynamic_shapes
|
| 471 |
+
# behavior, making it so that we infer that if an input is dynamic by
|
| 472 |
+
# inspecting whether or not its input size varies across ranks. Because
|
| 473 |
+
# this synchronization uses collectives, all ranks must run compilation at
|
| 474 |
+
# the same time; ranks must not diverge with graph breaks. This can be most
|
| 475 |
+
# reliably achieved by ensuring PT2 only is run on SPMD programs. If this
|
| 476 |
+
# invariant is inviolated, you will likely deadlock NCCL and encounter a
|
| 477 |
+
# NCCL timeout.
|
| 478 |
+
enable_compiler_collectives = os.environ.get("TORCH_COMPILER_COLLECTIVES", "0") == "1"
|
| 479 |
+
|
| 480 |
+
if TYPE_CHECKING:
|
| 481 |
+
from torch.utils._config_typing import * # noqa: F401, F403
|
| 482 |
+
|
| 483 |
+
def _make_closure_patcher(**changes):
|
| 484 |
+
...
|
| 485 |
+
|
| 486 |
+
|
| 487 |
+
from torch.utils._config_module import install_config_module
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
install_config_module(sys.modules[__name__])
|
pllava/lib/python3.10/site-packages/torch/_dynamo/create_parameter_op.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import threading
|
| 3 |
+
from contextlib import contextmanager
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
doc = """
|
| 9 |
+
This is used when dynamo traces torch.nn.Parameter, which normally would not trace properly
|
| 10 |
+
with AOTAutograd. We instead create a placeholder torch.nn.Parameter before the graph, which
|
| 11 |
+
becomes a graph arg and has no storage backing it. At the point in the graph where the parameter
|
| 12 |
+
actually should be created we mutate this sacrificial placeholder into it. This allows gradients
|
| 13 |
+
to flow into the parameter as if it were an input to the graph (which is the only thing we are
|
| 14 |
+
allowed to compute gradients on).
|
| 15 |
+
""".strip()
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class TracableCreateParameter(torch.autograd.Function):
|
| 19 |
+
@staticmethod
|
| 20 |
+
def forward(ctx, tensor, placeholder):
|
| 21 |
+
assert not tensor.requires_grad
|
| 22 |
+
return placeholder.set_(tensor)
|
| 23 |
+
|
| 24 |
+
@staticmethod
|
| 25 |
+
def backward(ctx, grad):
|
| 26 |
+
return None, grad # grad flows to placeholder
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def tracable_create_parameter(tensor, placeholder):
|
| 30 |
+
with torch.set_grad_enabled(placeholder.requires_grad):
|
| 31 |
+
out = TracableCreateParameter.apply(tensor, placeholder)
|
| 32 |
+
return out
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def new_parameter_placeholder(size, dtype, device, requires_grad):
|
| 36 |
+
"""Create a placeholder to be passed to the above functions"""
|
| 37 |
+
result = torch.nn.Parameter(
|
| 38 |
+
torch.empty(size, dtype=dtype, device=device), requires_grad=requires_grad
|
| 39 |
+
)
|
| 40 |
+
# TODO(jansel): alloc followed by free is inefficient, need a way to allocate an unbacked tensor.
|
| 41 |
+
# Allocating a zero tensor would causes assert failures in autograd.
|
| 42 |
+
result.untyped_storage().resize_(0)
|
| 43 |
+
return result
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
_TLS = threading.local()
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
@contextmanager
|
| 50 |
+
def do_not_convert_to_tracable_parameter():
|
| 51 |
+
old_flag = getattr(_TLS, "convert_tracable_parameter", True)
|
| 52 |
+
_TLS.convert_tracable_parameter = False
|
| 53 |
+
try:
|
| 54 |
+
yield False
|
| 55 |
+
finally:
|
| 56 |
+
_TLS.convert_tracable_parameter = old_flag
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def can_convert_to_tracable_parameter():
|
| 60 |
+
return getattr(_TLS, "convert_tracable_parameter", True)
|
pllava/lib/python3.10/site-packages/torch/_dynamo/current_scope_id.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import contextlib
|
| 3 |
+
import threading
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
# Global variable to identify which SubgraphTracer we are in.
|
| 7 |
+
# It is sometimes difficult to find an InstructionTranslator to use.
|
| 8 |
+
_current_scope_id = threading.local()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def current_scope_id():
|
| 12 |
+
global _current_scope_id
|
| 13 |
+
if not hasattr(_current_scope_id, "value"):
|
| 14 |
+
_current_scope_id.value = 1
|
| 15 |
+
return _current_scope_id.value
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@contextlib.contextmanager
|
| 19 |
+
def enter_new_scope():
|
| 20 |
+
global _current_scope_id
|
| 21 |
+
try:
|
| 22 |
+
_current_scope_id.value = current_scope_id() + 1
|
| 23 |
+
yield
|
| 24 |
+
finally:
|
| 25 |
+
_current_scope_id.value = current_scope_id() - 1
|
pllava/lib/python3.10/site-packages/torch/_dynamo/decorators.py
ADDED
|
@@ -0,0 +1,580 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
# ruff: noqa: TCH004
|
| 3 |
+
import functools
|
| 4 |
+
import inspect
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
from typing import Any, Callable, Dict, Type, TYPE_CHECKING, TypeVar
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
from torch.utils._python_dispatch import is_traceable_wrapper_subclass
|
| 10 |
+
|
| 11 |
+
from . import trace_rules, variables
|
| 12 |
+
from .comptime import comptime
|
| 13 |
+
from .eval_frame import DisableContext, innermost_fn, RunOnlyContext
|
| 14 |
+
from .exc import IncorrectUsage
|
| 15 |
+
from .external_utils import is_compiling
|
| 16 |
+
from .utils import is_function
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
if TYPE_CHECKING:
|
| 20 |
+
from types import FunctionType
|
| 21 |
+
|
| 22 |
+
from torch._C._dynamo.eval_frame import ( # noqa: F401
|
| 23 |
+
reset_code,
|
| 24 |
+
set_eval_frame,
|
| 25 |
+
set_guard_error_hook,
|
| 26 |
+
skip_code,
|
| 27 |
+
unsupported,
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
from .variables import VariableTracker
|
| 31 |
+
else:
|
| 32 |
+
for name in dir(torch._C._dynamo.eval_frame):
|
| 33 |
+
if name.startswith("__"):
|
| 34 |
+
continue
|
| 35 |
+
globals()[name] = getattr(torch._C._dynamo.eval_frame, name)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
_F = TypeVar("_F", bound=Callable[..., Any])
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def run(fn=None):
|
| 42 |
+
"""Don't do any dynamic compiles, just use prior optimizations"""
|
| 43 |
+
if fn is not None:
|
| 44 |
+
fn = innermost_fn(fn)
|
| 45 |
+
assert callable(fn)
|
| 46 |
+
return RunOnlyContext()(fn)
|
| 47 |
+
return RunOnlyContext()
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def disable(fn=None, recursive=True):
|
| 51 |
+
"""
|
| 52 |
+
Decorator and context manager to disable TorchDynamo
|
| 53 |
+
|
| 54 |
+
If recursive=True, Dynamo is completely skipped on the decorated function
|
| 55 |
+
frame as well as the recursively invoked functions.
|
| 56 |
+
|
| 57 |
+
If recursive=False, Dynamo skips frames associated with the function code,
|
| 58 |
+
but still process recursively invoked frames.
|
| 59 |
+
"""
|
| 60 |
+
if recursive:
|
| 61 |
+
if fn is not None:
|
| 62 |
+
fn = innermost_fn(fn)
|
| 63 |
+
assert callable(fn)
|
| 64 |
+
return DisableContext()(fn)
|
| 65 |
+
return DisableContext()
|
| 66 |
+
else:
|
| 67 |
+
return skip(fn)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def skip(fn=None):
|
| 71 |
+
"""
|
| 72 |
+
Skip frames associated with the function code, but still process recursively
|
| 73 |
+
invoked frames
|
| 74 |
+
"""
|
| 75 |
+
if fn is None:
|
| 76 |
+
return skip
|
| 77 |
+
fn = innermost_fn(fn)
|
| 78 |
+
assert callable(fn)
|
| 79 |
+
skip_code(fn.__code__)
|
| 80 |
+
fn._torchdynamo_disable = True
|
| 81 |
+
return fn
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def assume_constant_result(fn):
|
| 85 |
+
fn._dynamo_marked_constant = True
|
| 86 |
+
return fn
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def allow_in_graph(fn):
|
| 90 |
+
"""
|
| 91 |
+
Tells the compiler frontend (Dynamo) to skip symbolic introspection of the function
|
| 92 |
+
and instead directly write it to the graph when encountered.
|
| 93 |
+
|
| 94 |
+
See :func:`torch.compiler.allow_in_graph`'s docstring for the full documentation
|
| 95 |
+
|
| 96 |
+
WARNING: this API can be a footgun, please read the documentation carefully.
|
| 97 |
+
"""
|
| 98 |
+
if isinstance(fn, (list, tuple)):
|
| 99 |
+
return [allow_in_graph(x) for x in fn]
|
| 100 |
+
assert callable(fn), "allow_in_graph expects a callable"
|
| 101 |
+
if trace_rules.lookup_callable(fn) != variables.TorchInGraphFunctionVariable:
|
| 102 |
+
trace_rules._disallowed_callable_ids.remove(id(fn))
|
| 103 |
+
trace_rules._allowed_callable_ids.add(id(fn))
|
| 104 |
+
return fn
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def _disallow_in_graph_helper(throw_if_not_allowed):
|
| 108 |
+
def inner(fn):
|
| 109 |
+
if isinstance(fn, (list, tuple)):
|
| 110 |
+
return [disallow_in_graph(x) for x in fn]
|
| 111 |
+
assert callable(fn), "disallow_in_graph expects a callable"
|
| 112 |
+
if (
|
| 113 |
+
throw_if_not_allowed
|
| 114 |
+
and trace_rules.lookup_callable(fn)
|
| 115 |
+
!= variables.TorchInGraphFunctionVariable
|
| 116 |
+
and trace_rules.lookup(fn) != variables.TorchInGraphFunctionVariable
|
| 117 |
+
):
|
| 118 |
+
raise IncorrectUsage(
|
| 119 |
+
"disallow_in_graph is expected to be used on an already allowed callable (like torch.* ops). "
|
| 120 |
+
"Allowed callables means callables that TorchDynamo puts as-is in the extracted graph."
|
| 121 |
+
)
|
| 122 |
+
trace_rules._allowed_callable_ids.remove(id(fn))
|
| 123 |
+
trace_rules._disallowed_callable_ids.add(id(fn))
|
| 124 |
+
return fn
|
| 125 |
+
|
| 126 |
+
return inner
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def disallow_in_graph(fn):
|
| 130 |
+
"""
|
| 131 |
+
Customize which functions TorchDynamo will exclude in the generated
|
| 132 |
+
graph and force a graph break on.
|
| 133 |
+
::
|
| 134 |
+
|
| 135 |
+
torch._dynamo.disallow_in_graph(torch.sub)
|
| 136 |
+
|
| 137 |
+
@torch._dynamo.optimize(...)
|
| 138 |
+
def fn(a):
|
| 139 |
+
x = torch.add(x, 1)
|
| 140 |
+
x = torch.sub(x, 1)
|
| 141 |
+
x = torch.add(x, 1)
|
| 142 |
+
return x
|
| 143 |
+
|
| 144 |
+
fn(...)
|
| 145 |
+
|
| 146 |
+
Will break the graph on `torch.sub`, and give two graphs each with a
|
| 147 |
+
single `torch.add()` op.
|
| 148 |
+
"""
|
| 149 |
+
return _disallow_in_graph_helper(throw_if_not_allowed=True)(fn)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
@_disallow_in_graph_helper(throw_if_not_allowed=False)
|
| 153 |
+
def graph_break():
|
| 154 |
+
"""Force a graph break"""
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def forbid_in_graph(fn):
|
| 158 |
+
"""
|
| 159 |
+
Customize which functions TorchDynamo will assert are not present while tracing.
|
| 160 |
+
|
| 161 |
+
If you want a graph break on this function instead, use disallow_in_graph.
|
| 162 |
+
TODO(voz): We now have allow_in_graph, disallow_in_graph, forbid_in_graph - some more robust
|
| 163 |
+
documentation would not be amiss.
|
| 164 |
+
"""
|
| 165 |
+
if isinstance(fn, (list, tuple)):
|
| 166 |
+
return [forbid_in_graph(x) for x in fn]
|
| 167 |
+
assert callable(fn), "forbid_in_graph applies only to callables"
|
| 168 |
+
fn._dynamo_forbidden = True
|
| 169 |
+
return fn
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def substitute_in_graph(
|
| 173 |
+
original_fn: _F,
|
| 174 |
+
*,
|
| 175 |
+
can_constant_fold_through: bool = False,
|
| 176 |
+
skip_signature_check: bool = False,
|
| 177 |
+
# type that is embedded in the Python interpreter
|
| 178 |
+
is_embedded_type: bool = False, # internal use only
|
| 179 |
+
) -> Callable[[_F], _F]:
|
| 180 |
+
"""
|
| 181 |
+
Register a polyfill handler for a function, usually a C function from the C extension, to be
|
| 182 |
+
used in place of the original function when inlining the original function in the graph.
|
| 183 |
+
|
| 184 |
+
.. note::
|
| 185 |
+
|
| 186 |
+
The polyfill handler is only used when inlining the original function. It is not used when
|
| 187 |
+
the original function is called directly. In the eager mode, the decorated function calls
|
| 188 |
+
the performant C function rather than the polyfill handler.
|
| 189 |
+
|
| 190 |
+
The polyfill handler is a function that will be called in place of the original function when
|
| 191 |
+
inlining the original function. The polyfill handler should have the same signature and the same
|
| 192 |
+
behavior as the original function.
|
| 193 |
+
|
| 194 |
+
Args:
|
| 195 |
+
original_fn (callable): The original function, usually a C function, to register a polyfill
|
| 196 |
+
handler for.
|
| 197 |
+
can_constant_fold_through (bool, optional): Whether the polyfill handler can be constant
|
| 198 |
+
folded through. That is, if the polyfill handler is a pure function and its arguments
|
| 199 |
+
are constant, the result of the polyfill handler can be constant folded during the
|
| 200 |
+
compilation. Defaults to ``False``.
|
| 201 |
+
skip_signature_check (bool, optional): Whether to skip the signature check between the
|
| 202 |
+
original function and the polyfill handler. Defaults to ``False``.
|
| 203 |
+
|
| 204 |
+
Returns:
|
| 205 |
+
A decorator that registers the polyfill handler for the original function.
|
| 206 |
+
|
| 207 |
+
Example::
|
| 208 |
+
|
| 209 |
+
>>> # xdoctest: +SKIP("conflict with the tests: duplicate polyfill handlers")
|
| 210 |
+
>>> import operator
|
| 211 |
+
>>> operator.indexOf([1, 2, 3, 4, 5], 3)
|
| 212 |
+
2
|
| 213 |
+
>>> torch.compile(operator.indexOf, fullgraph=True)([1, 2, 3, 4, 5], 3)
|
| 214 |
+
Traceback (most recent call last):
|
| 215 |
+
...
|
| 216 |
+
torch._dynamo.exc.Unsupported: ...
|
| 217 |
+
|
| 218 |
+
>>> @torch.compiler.substitute_in_graph(operator.indexOf)
|
| 219 |
+
... def indexOf(a, b, /):
|
| 220 |
+
... for i, item in enumerate(a):
|
| 221 |
+
... if item is b or item == b:
|
| 222 |
+
... return i
|
| 223 |
+
... raise ValueError("sequence.index(x): x not in sequence")
|
| 224 |
+
>>>
|
| 225 |
+
>>> torch.compile(operator.indexOf, fullgraph=True)([1, 2, 3, 4, 5], 3)
|
| 226 |
+
2
|
| 227 |
+
"""
|
| 228 |
+
if not is_function(original_fn) and not (
|
| 229 |
+
is_embedded_type and inspect.isclass(original_fn)
|
| 230 |
+
):
|
| 231 |
+
raise TypeError(
|
| 232 |
+
f"substitute_in_graph expects a function but got {type(original_fn)!r}"
|
| 233 |
+
)
|
| 234 |
+
if is_embedded_type:
|
| 235 |
+
if not inspect.isclass(original_fn):
|
| 236 |
+
raise TypeError(
|
| 237 |
+
f"substitute_in_graph expects a class but got {type(original_fn)!r}"
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
from .variables.builder import ITERTOOLS_POLYFILLED_TYPE_IDS, ITERTOOLS_TYPE_IDS
|
| 241 |
+
|
| 242 |
+
if id(original_fn) in ITERTOOLS_TYPE_IDS:
|
| 243 |
+
ITERTOOLS_POLYFILLED_TYPE_IDS.add(id(original_fn))
|
| 244 |
+
|
| 245 |
+
def wrapper(traceable_fn: _F) -> _F:
|
| 246 |
+
if not is_function(traceable_fn):
|
| 247 |
+
raise TypeError(
|
| 248 |
+
f"@substitute_in_graph(...) expects a function but got {type(traceable_fn)!r}"
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
if not skip_signature_check:
|
| 252 |
+
try:
|
| 253 |
+
original_sig = inspect.signature(original_fn)
|
| 254 |
+
except ValueError:
|
| 255 |
+
pass
|
| 256 |
+
else:
|
| 257 |
+
traceable_sig = inspect.signature(traceable_fn)
|
| 258 |
+
|
| 259 |
+
def sig_ident(sig):
|
| 260 |
+
# Ignore annotations for parameters and return type
|
| 261 |
+
return (
|
| 262 |
+
tuple(
|
| 263 |
+
p.name
|
| 264 |
+
for p in sig.parameters.values()
|
| 265 |
+
if (
|
| 266 |
+
p.kind
|
| 267 |
+
not in {
|
| 268 |
+
p.KEYWORD_ONLY,
|
| 269 |
+
# the name of *args and **kwargs is not important
|
| 270 |
+
p.VAR_POSITIONAL,
|
| 271 |
+
p.VAR_KEYWORD,
|
| 272 |
+
}
|
| 273 |
+
)
|
| 274 |
+
),
|
| 275 |
+
{
|
| 276 |
+
p.name
|
| 277 |
+
for p in sig.parameters.values()
|
| 278 |
+
if p.kind == p.KEYWORD_ONLY
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
p.name: p.default
|
| 282 |
+
for p in sig.parameters.values()
|
| 283 |
+
# the name of *args and **kwargs is not important
|
| 284 |
+
if p.kind not in {p.VAR_POSITIONAL, p.VAR_KEYWORD}
|
| 285 |
+
},
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
wildcard_sig = inspect.signature(lambda *args, **kwargs: None)
|
| 289 |
+
|
| 290 |
+
if (
|
| 291 |
+
sig_ident(original_sig) != sig_ident(traceable_sig)
|
| 292 |
+
and sig_ident(original_sig) != sig_ident(wildcard_sig)
|
| 293 |
+
and sig_ident(traceable_sig) != sig_ident(wildcard_sig)
|
| 294 |
+
):
|
| 295 |
+
raise TypeError(
|
| 296 |
+
f"Signature mismatch between {original_fn} and {traceable_fn}: "
|
| 297 |
+
f"{original_sig} != {traceable_sig}"
|
| 298 |
+
)
|
| 299 |
+
|
| 300 |
+
from torch._dynamo.guards import GuardBuilder
|
| 301 |
+
from torch._dynamo.trace_rules import get_torch_obj_rule_map
|
| 302 |
+
from torch._dynamo.variables import PolyfilledFunctionVariable
|
| 303 |
+
from torch._dynamo.variables.builder import VariableBuilder
|
| 304 |
+
|
| 305 |
+
id_dispatch_map = VariableBuilder._id_dispatch()
|
| 306 |
+
if id(original_fn) in id_dispatch_map:
|
| 307 |
+
raise ValueError(
|
| 308 |
+
f"Duplicate dispatch rule for {original_fn}: "
|
| 309 |
+
"already registered in VariableBuilder's id dispatch map"
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
rule_map: Dict[Any, Type[VariableTracker]] = get_torch_obj_rule_map()
|
| 313 |
+
if original_fn in rule_map:
|
| 314 |
+
raise ValueError(
|
| 315 |
+
f"Duplicate object {original_fn} with different rules: "
|
| 316 |
+
f"{PolyfilledFunctionVariable}, {rule_map[original_fn]}"
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
polyfill_handlers: Dict[Callable[..., Any], FunctionType]
|
| 320 |
+
polyfill_handlers = PolyfilledFunctionVariable._get_polyfill_handlers()
|
| 321 |
+
if original_fn in polyfill_handlers:
|
| 322 |
+
raise ValueError(
|
| 323 |
+
f"Duplicate polyfill handlers for {original_fn}: "
|
| 324 |
+
f"already handled by {polyfill_handlers[original_fn]}"
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
# Need to wrap the function because we may cannot assign __torch_dynamo_polyfill__ to a
|
| 328 |
+
# C++ function.
|
| 329 |
+
@functools.wraps(traceable_fn)
|
| 330 |
+
def wrapped(*args, **kwargs):
|
| 331 |
+
return original_fn(*args, **kwargs)
|
| 332 |
+
|
| 333 |
+
def dispatch_fn(self, value: _F) -> PolyfilledFunctionVariable:
|
| 334 |
+
return PolyfilledFunctionVariable(
|
| 335 |
+
value,
|
| 336 |
+
source=self.source,
|
| 337 |
+
**self.install_guards(GuardBuilder.FUNCTION_MATCH),
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
id_dispatch_map[id(original_fn)] = id_dispatch_map[id(wrapped)] = dispatch_fn
|
| 341 |
+
rule_map[original_fn] = rule_map[wrapped] = PolyfilledFunctionVariable
|
| 342 |
+
polyfill_handlers[original_fn] = polyfill_handlers[wrapped] = wrapped # type: ignore[assignment]
|
| 343 |
+
|
| 344 |
+
wrapped.__torch_dynamo_original__ = original_fn # type: ignore[attr-defined]
|
| 345 |
+
wrapped.__torch_dynamo_polyfill__ = traceable_fn # type: ignore[attr-defined]
|
| 346 |
+
wrapped.__torch_dynamo_can_constant_fold_through__ = can_constant_fold_through # type: ignore[attr-defined]
|
| 347 |
+
|
| 348 |
+
return wrapped # type: ignore[return-value]
|
| 349 |
+
|
| 350 |
+
return wrapper
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
# Helper function to flatten a tensor subclass and apply a function to
|
| 354 |
+
# all inner tensors that match the outer dim. Used to reduce duplication
|
| 355 |
+
# across the various marking APIs.
|
| 356 |
+
def _apply_func_to_inner_tensors_of_same_dim(func, t, *args, **kwargs):
|
| 357 |
+
assert is_traceable_wrapper_subclass(t)
|
| 358 |
+
|
| 359 |
+
attrs, ctx = t.__tensor_flatten__()
|
| 360 |
+
assert isinstance(t, torch.Tensor)
|
| 361 |
+
for attr in attrs:
|
| 362 |
+
inner = getattr(t, attr)
|
| 363 |
+
if inner.dim() == t.dim():
|
| 364 |
+
func(inner, *args, **kwargs)
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
@dataclass(frozen=True)
|
| 368 |
+
class _DimRange:
|
| 369 |
+
"""
|
| 370 |
+
This represents an dimension of a tensor and the corresponding
|
| 371 |
+
min and max values it can take. Don't create this
|
| 372 |
+
class directly; instead, use :func:`mark_dynamic`.
|
| 373 |
+
"""
|
| 374 |
+
|
| 375 |
+
dim: int
|
| 376 |
+
min: int
|
| 377 |
+
max: int
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
@forbid_in_graph
|
| 381 |
+
def mark_unbacked(t, index):
|
| 382 |
+
"""
|
| 383 |
+
Mark a tensor as having an unbacked dim. This changes the semantics of operations,
|
| 384 |
+
we will always report the size does not equal zero/one, we will turn asserts
|
| 385 |
+
on this index into runtime asserts, and if you try to get the real value we will
|
| 386 |
+
raise an exception. In other words, we will treat this dimension as if it was
|
| 387 |
+
data dependent (we do not know anything about its value.)
|
| 388 |
+
"""
|
| 389 |
+
# You could have copied the mark_dynamic behavior but I'm not convinced
|
| 390 |
+
# it's what you want
|
| 391 |
+
assert not is_traceable_wrapper_subclass(t), "not implemented yet"
|
| 392 |
+
|
| 393 |
+
if isinstance(index, int):
|
| 394 |
+
if not hasattr(t, "_dynamo_unbacked_indices"):
|
| 395 |
+
t._dynamo_unbacked_indices = set()
|
| 396 |
+
t._dynamo_unbacked_indices.add(index)
|
| 397 |
+
return
|
| 398 |
+
|
| 399 |
+
assert isinstance(index, (list, tuple))
|
| 400 |
+
for i in index:
|
| 401 |
+
mark_unbacked(t, i)
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
@forbid_in_graph
|
| 405 |
+
def mark_dynamic(t, index, *, min=None, max=None):
|
| 406 |
+
"""
|
| 407 |
+
Mark a tensor as having a dynamic dim and set corresponding min and max range for the dim.
|
| 408 |
+
|
| 409 |
+
[Note - on the state of mark_dynamic]
|
| 410 |
+
|
| 411 |
+
The behavior of having a dynamic dimension on a tensor is governed by a few factors:
|
| 412 |
+
|
| 413 |
+
1) torch._dynamo.config dynamic_shapes True or False.
|
| 414 |
+
a) dynamic_shapes=True - dynamic_shapes must be True for mark_dynamic to work.
|
| 415 |
+
a) dynamic_shapes=False - This config will raise an exception when used in conjunction with
|
| 416 |
+
mark_dynamic. We will eventually support this.
|
| 417 |
+
|
| 418 |
+
2) If the dimension is fully constrained - as in, it does not allow more than a single value
|
| 419 |
+
in both eager (torch.compile, torch._dynamo.optimize) mode and export mode (torch._dynamo.export),
|
| 420 |
+
we will raise an error
|
| 421 |
+
|
| 422 |
+
3) If the dimension is partially constrained - allowing at least 2 values but not the full unbounded
|
| 423 |
+
range of shapes, in eager we will pass it through, but export will raise an error.
|
| 424 |
+
|
| 425 |
+
4) Attempts to trace this function will explicitly raise. As such, all calls to mark_dynamic must be made
|
| 426 |
+
before torch.compile.
|
| 427 |
+
|
| 428 |
+
"""
|
| 429 |
+
if is_traceable_wrapper_subclass(t):
|
| 430 |
+
# default behavior: mirror mark_dynamic() on all inner tensors with same dim as t
|
| 431 |
+
# TODO: Make this configurable via a supported public API
|
| 432 |
+
_apply_func_to_inner_tensors_of_same_dim(
|
| 433 |
+
mark_dynamic, t, index, min=min, max=max
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
if isinstance(index, int):
|
| 437 |
+
if not hasattr(t, "_dynamo_dynamic_indices"):
|
| 438 |
+
t._dynamo_dynamic_indices = set()
|
| 439 |
+
t._dynamo_dynamic_range = set()
|
| 440 |
+
# TODO(voz): Should we bounds check?
|
| 441 |
+
t._dynamo_dynamic_indices.add(index)
|
| 442 |
+
t._dynamo_dynamic_range.add(_DimRange(index, min, max))
|
| 443 |
+
return
|
| 444 |
+
|
| 445 |
+
assert isinstance(index, (list, tuple))
|
| 446 |
+
for i in index:
|
| 447 |
+
mark_dynamic(t, i, min=min, max=max)
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
@forbid_in_graph
|
| 451 |
+
def maybe_mark_dynamic(t, index):
|
| 452 |
+
"""
|
| 453 |
+
Mark a tensor as having a dynamic dim, but don't enforce it (i.e., if this
|
| 454 |
+
dimension ends up getting specialized, don't error).
|
| 455 |
+
"""
|
| 456 |
+
if is_traceable_wrapper_subclass(t):
|
| 457 |
+
# default behavior: mirror maybe_mark_dynamic() on all inner tensors with same dim as t
|
| 458 |
+
# TODO: Make this configurable via a supported public API
|
| 459 |
+
_apply_func_to_inner_tensors_of_same_dim(maybe_mark_dynamic, t, index)
|
| 460 |
+
|
| 461 |
+
if isinstance(index, int):
|
| 462 |
+
if not hasattr(t, "_dynamo_weak_dynamic_indices"):
|
| 463 |
+
t._dynamo_weak_dynamic_indices = set()
|
| 464 |
+
# TODO(voz): Should we bounds check?
|
| 465 |
+
t._dynamo_weak_dynamic_indices.add(index)
|
| 466 |
+
return
|
| 467 |
+
|
| 468 |
+
assert isinstance(index, (list, tuple))
|
| 469 |
+
for i in index:
|
| 470 |
+
maybe_mark_dynamic(t, i)
|
| 471 |
+
|
| 472 |
+
|
| 473 |
+
def mark_static(t, index=None):
|
| 474 |
+
"""
|
| 475 |
+
Mark a tensor as having a static dim or mark a nn module class as static.
|
| 476 |
+
|
| 477 |
+
For tensors
|
| 478 |
+
===========
|
| 479 |
+
This will prevent us from attempting to compile it dynamically
|
| 480 |
+
when dynamic=True; this can improve trace-time performance.
|
| 481 |
+
|
| 482 |
+
This has lower precedence than mark_dynamic.
|
| 483 |
+
|
| 484 |
+
Unlike mark_dynamic, this can be done inside a graph, in which case it
|
| 485 |
+
induces specialization on the tensor.
|
| 486 |
+
|
| 487 |
+
For nn.Module classes
|
| 488 |
+
=====================
|
| 489 |
+
For static nn.Module classes, TorchDynamo assumes that the module instance
|
| 490 |
+
attributes will not be modified after compilation. This will ensure that
|
| 491 |
+
TorchDynamo keeps integer attributes CONSTANT and not symints.
|
| 492 |
+
|
| 493 |
+
From TorchDynamo implementation side, the instances of static-marked
|
| 494 |
+
nn.Module class will be converted to UnspecializedBuiltinNNModuleVariable,
|
| 495 |
+
which have the same properties.
|
| 496 |
+
|
| 497 |
+
Note that we still have to guard on the attributes, because different
|
| 498 |
+
instances of the nn.Module can have different values of the attributes. The
|
| 499 |
+
key point here is that the attributes are static.
|
| 500 |
+
"""
|
| 501 |
+
if is_compiling():
|
| 502 |
+
if index is None:
|
| 503 |
+
for s in t.size():
|
| 504 |
+
comptime.force_static(s)
|
| 505 |
+
else:
|
| 506 |
+
comptime.force_static(t.size(index))
|
| 507 |
+
return
|
| 508 |
+
|
| 509 |
+
if is_traceable_wrapper_subclass(t):
|
| 510 |
+
# default behavior: mirror mark_static() on all inner tensors with same dim as t
|
| 511 |
+
# TODO: Make this configurable via a supported public API
|
| 512 |
+
_apply_func_to_inner_tensors_of_same_dim(mark_static, t, index)
|
| 513 |
+
|
| 514 |
+
if not isinstance(t, torch.Tensor) and issubclass(t, torch.nn.Module):
|
| 515 |
+
t._dynamo_marked_static = True
|
| 516 |
+
return t
|
| 517 |
+
|
| 518 |
+
if not isinstance(t, torch.Tensor):
|
| 519 |
+
raise TypeError(
|
| 520 |
+
f"mark_static expects a tensor/nn.Module class but recieved {type(t)}"
|
| 521 |
+
)
|
| 522 |
+
|
| 523 |
+
if isinstance(index, int):
|
| 524 |
+
if not hasattr(t, "_dynamo_static_indices"):
|
| 525 |
+
t._dynamo_static_indices = set() # type: ignore[attr-defined]
|
| 526 |
+
# TODO(voz): Should we bounds check?
|
| 527 |
+
t._dynamo_static_indices.add(index) # type: ignore[attr-defined]
|
| 528 |
+
elif index is None:
|
| 529 |
+
for i in range(t.dim()):
|
| 530 |
+
mark_static(t, i)
|
| 531 |
+
else:
|
| 532 |
+
assert isinstance(index, (list, tuple))
|
| 533 |
+
for i in index:
|
| 534 |
+
mark_static(t, i)
|
| 535 |
+
|
| 536 |
+
|
| 537 |
+
@forbid_in_graph
|
| 538 |
+
def mark_static_address(t, guard=True):
|
| 539 |
+
"""
|
| 540 |
+
Marks an input tensor whose data_ptr will not change across multiple calls
|
| 541 |
+
to a dynamo-compiled function. This indicates to cudagraphs that an extra allocation
|
| 542 |
+
is not needed for this input. The data_ptr will be guarded if guard=True. Note:
|
| 543 |
+
Tensors marked in this way will be kept alive until `torch._dynamo.reset()` is called.
|
| 544 |
+
"""
|
| 545 |
+
if not isinstance(t, torch.Tensor):
|
| 546 |
+
raise TypeError(f"mark_static_address expects a tensor but recieved {type(t)}")
|
| 547 |
+
|
| 548 |
+
if guard:
|
| 549 |
+
t._dynamo_static_input_type = "guarded" # type: ignore[attr-defined]
|
| 550 |
+
else:
|
| 551 |
+
t._dynamo_static_input_type = "unguarded" # type: ignore[attr-defined]
|
| 552 |
+
|
| 553 |
+
|
| 554 |
+
# Note: this carefully avoids eagerly import einops.
|
| 555 |
+
# TODO: we should delete this whole _allow_in_graph_einops logic by approximately 2024 Q2
|
| 556 |
+
def _allow_in_graph_einops():
|
| 557 |
+
import einops
|
| 558 |
+
|
| 559 |
+
try:
|
| 560 |
+
# requires einops > 0.6.1, torch >= 2.0
|
| 561 |
+
from einops._torch_specific import ( # type: ignore[attr-defined] # noqa: F401
|
| 562 |
+
_ops_were_registered_in_torchdynamo,
|
| 563 |
+
)
|
| 564 |
+
|
| 565 |
+
# einops > 0.6.1 will call the op registration logic as it is imported.
|
| 566 |
+
except ImportError:
|
| 567 |
+
# einops <= 0.6.1
|
| 568 |
+
allow_in_graph(einops.rearrange)
|
| 569 |
+
allow_in_graph(einops.reduce)
|
| 570 |
+
if hasattr(einops, "repeat"):
|
| 571 |
+
allow_in_graph(einops.repeat) # available since einops 0.2.0
|
| 572 |
+
if hasattr(einops, "einsum"):
|
| 573 |
+
allow_in_graph(einops.einsum) # available since einops 0.5.0
|
| 574 |
+
if hasattr(einops, "pack"):
|
| 575 |
+
allow_in_graph(einops.pack) # available since einops 0.6.0
|
| 576 |
+
if hasattr(einops, "unpack"):
|
| 577 |
+
allow_in_graph(einops.unpack) # available since einops 0.6.0
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
trace_rules.add_module_init_func("einops", _allow_in_graph_einops)
|
pllava/lib/python3.10/site-packages/torch/_dynamo/device_interface.py
ADDED
|
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import inspect
|
| 3 |
+
from typing import Any, Callable, Dict, Iterable, Optional, Tuple, Type, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from torch._streambase import _EventBase, _StreamBase
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
get_cuda_stream: Optional[Callable[[int], int]]
|
| 10 |
+
if torch.cuda._is_compiled():
|
| 11 |
+
from torch._C import _cuda_getCurrentRawStream as get_cuda_stream
|
| 12 |
+
else:
|
| 13 |
+
get_cuda_stream = None
|
| 14 |
+
|
| 15 |
+
_device_t = Union[torch.device, str, int, None]
|
| 16 |
+
|
| 17 |
+
# Recording the device properties in the main process but used in worker process.
|
| 18 |
+
caching_worker_device_properties: Dict[str, Any] = {}
|
| 19 |
+
caching_worker_current_devices: Dict[str, int] = {}
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class DeviceInterfaceMeta(type):
|
| 23 |
+
def __new__(metacls, *args, **kwargs):
|
| 24 |
+
class_member = args[2]
|
| 25 |
+
if "Event" in class_member:
|
| 26 |
+
assert inspect.isclass(class_member["Event"]) and issubclass(
|
| 27 |
+
class_member["Event"], _EventBase
|
| 28 |
+
), "DeviceInterface member Event should be inherit from _EventBase"
|
| 29 |
+
if "Stream" in class_member:
|
| 30 |
+
assert inspect.isclass(class_member["Stream"]) and issubclass(
|
| 31 |
+
class_member["Stream"], _StreamBase
|
| 32 |
+
), "DeviceInterface member Stream should be inherit from _StreamBase"
|
| 33 |
+
return super().__new__(metacls, *args, **kwargs)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class DeviceInterface(metaclass=DeviceInterfaceMeta):
|
| 37 |
+
"""
|
| 38 |
+
This is a simple device runtime interface for Inductor. It enables custom
|
| 39 |
+
backends to be integrated with Inductor in a device-agnostic semantic.
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
class device:
|
| 43 |
+
def __new__(cls, device: _device_t):
|
| 44 |
+
raise NotImplementedError
|
| 45 |
+
|
| 46 |
+
class Worker:
|
| 47 |
+
"""
|
| 48 |
+
Worker API to query device properties that will work in multi processing
|
| 49 |
+
workers that cannot use the GPU APIs (due to processing fork() and
|
| 50 |
+
initialization time issues). Properties are recorded in the main process
|
| 51 |
+
before we fork the workers.
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
@staticmethod
|
| 55 |
+
def set_device(device: int):
|
| 56 |
+
raise NotImplementedError
|
| 57 |
+
|
| 58 |
+
@staticmethod
|
| 59 |
+
def current_device() -> int:
|
| 60 |
+
raise NotImplementedError
|
| 61 |
+
|
| 62 |
+
@staticmethod
|
| 63 |
+
def get_device_properties(device: _device_t = None):
|
| 64 |
+
raise NotImplementedError
|
| 65 |
+
|
| 66 |
+
@staticmethod
|
| 67 |
+
def current_device():
|
| 68 |
+
raise NotImplementedError
|
| 69 |
+
|
| 70 |
+
@staticmethod
|
| 71 |
+
def set_device(device: _device_t):
|
| 72 |
+
raise NotImplementedError
|
| 73 |
+
|
| 74 |
+
@staticmethod
|
| 75 |
+
def maybe_exchange_device(device: int) -> int:
|
| 76 |
+
raise NotImplementedError
|
| 77 |
+
|
| 78 |
+
@staticmethod
|
| 79 |
+
def exchange_device(device: int) -> int:
|
| 80 |
+
raise NotImplementedError
|
| 81 |
+
|
| 82 |
+
@staticmethod
|
| 83 |
+
def device_count():
|
| 84 |
+
raise NotImplementedError
|
| 85 |
+
|
| 86 |
+
@staticmethod
|
| 87 |
+
def is_available() -> bool:
|
| 88 |
+
raise NotImplementedError
|
| 89 |
+
|
| 90 |
+
@staticmethod
|
| 91 |
+
def stream(stream: torch.Stream):
|
| 92 |
+
raise NotImplementedError
|
| 93 |
+
|
| 94 |
+
@staticmethod
|
| 95 |
+
def current_stream():
|
| 96 |
+
raise NotImplementedError
|
| 97 |
+
|
| 98 |
+
@staticmethod
|
| 99 |
+
def set_stream(stream: torch.Stream):
|
| 100 |
+
raise NotImplementedError
|
| 101 |
+
|
| 102 |
+
@staticmethod
|
| 103 |
+
def _set_stream_by_id(stream_id: int, device_index: int, device_type: int):
|
| 104 |
+
raise NotImplementedError
|
| 105 |
+
|
| 106 |
+
@staticmethod
|
| 107 |
+
def get_raw_stream(device_idx: int) -> int:
|
| 108 |
+
raise NotImplementedError
|
| 109 |
+
|
| 110 |
+
@staticmethod
|
| 111 |
+
def synchronize(device: _device_t = None):
|
| 112 |
+
raise NotImplementedError
|
| 113 |
+
|
| 114 |
+
@staticmethod
|
| 115 |
+
def get_device_properties(device: _device_t = None):
|
| 116 |
+
raise NotImplementedError
|
| 117 |
+
|
| 118 |
+
@staticmethod
|
| 119 |
+
def get_compute_capability(device: _device_t = None):
|
| 120 |
+
raise NotImplementedError
|
| 121 |
+
|
| 122 |
+
@staticmethod
|
| 123 |
+
def is_bf16_supported(including_emulation: bool = False):
|
| 124 |
+
raise NotImplementedError
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
class DeviceGuard:
|
| 128 |
+
"""
|
| 129 |
+
This class provides a context manager for device switching. This is a stripped
|
| 130 |
+
down version of torch.{device_name}.device.
|
| 131 |
+
|
| 132 |
+
The context manager changes the current device to the given device index
|
| 133 |
+
on entering the context and restores the original device on exiting.
|
| 134 |
+
The device is switched using the provided device interface.
|
| 135 |
+
"""
|
| 136 |
+
|
| 137 |
+
def __init__(
|
| 138 |
+
self, device_interface: Type[DeviceInterface], index: Optional[int]
|
| 139 |
+
) -> None:
|
| 140 |
+
self.device_interface = device_interface
|
| 141 |
+
self.idx = index
|
| 142 |
+
self.prev_idx = -1
|
| 143 |
+
|
| 144 |
+
def __enter__(self):
|
| 145 |
+
if self.idx is not None:
|
| 146 |
+
self.prev_idx = self.device_interface.exchange_device(self.idx)
|
| 147 |
+
|
| 148 |
+
def __exit__(self, type: Any, value: Any, traceback: Any):
|
| 149 |
+
if self.idx is not None:
|
| 150 |
+
self.idx = self.device_interface.maybe_exchange_device(self.prev_idx)
|
| 151 |
+
return False
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
class CudaInterface(DeviceInterface):
|
| 155 |
+
device = torch.cuda.device
|
| 156 |
+
|
| 157 |
+
# register Event and Stream class into the backend interface
|
| 158 |
+
# make sure Event and Stream are implemented and inherited from the _EventBase and _StreamBase
|
| 159 |
+
Event = torch.cuda.Event
|
| 160 |
+
Stream = torch.cuda.Stream
|
| 161 |
+
|
| 162 |
+
class Worker:
|
| 163 |
+
@staticmethod
|
| 164 |
+
def set_device(device: int):
|
| 165 |
+
caching_worker_current_devices["cuda"] = device
|
| 166 |
+
|
| 167 |
+
@staticmethod
|
| 168 |
+
def current_device() -> int:
|
| 169 |
+
if "cuda" in caching_worker_current_devices:
|
| 170 |
+
return caching_worker_current_devices["cuda"]
|
| 171 |
+
return torch.cuda.current_device()
|
| 172 |
+
|
| 173 |
+
@staticmethod
|
| 174 |
+
def get_device_properties(device: _device_t = None):
|
| 175 |
+
if device is not None:
|
| 176 |
+
if isinstance(device, str):
|
| 177 |
+
device = torch.device(device)
|
| 178 |
+
assert device.type == "cuda"
|
| 179 |
+
if isinstance(device, torch.device):
|
| 180 |
+
device = device.index
|
| 181 |
+
if device is None:
|
| 182 |
+
device = CudaInterface.Worker.current_device()
|
| 183 |
+
|
| 184 |
+
if "cuda" not in caching_worker_device_properties:
|
| 185 |
+
device_prop = [
|
| 186 |
+
torch.cuda.get_device_properties(i)
|
| 187 |
+
for i in range(torch.cuda.device_count())
|
| 188 |
+
]
|
| 189 |
+
caching_worker_device_properties["cuda"] = device_prop
|
| 190 |
+
|
| 191 |
+
return caching_worker_device_properties["cuda"][device]
|
| 192 |
+
|
| 193 |
+
current_device = staticmethod(torch.cuda.current_device)
|
| 194 |
+
set_device = staticmethod(torch.cuda.set_device)
|
| 195 |
+
device_count = staticmethod(torch.cuda.device_count)
|
| 196 |
+
stream = staticmethod(torch.cuda.stream) # type: ignore[assignment]
|
| 197 |
+
current_stream = staticmethod(torch.cuda.current_stream)
|
| 198 |
+
set_stream = staticmethod(torch.cuda.set_stream) # type: ignore[assignment]
|
| 199 |
+
_set_stream_by_id = staticmethod(torch.cuda._set_stream_by_id) # type: ignore[assignment]
|
| 200 |
+
synchronize = staticmethod(torch.cuda.synchronize)
|
| 201 |
+
get_device_properties = staticmethod(torch.cuda.get_device_properties) # type: ignore[assignment]
|
| 202 |
+
get_raw_stream = staticmethod(get_cuda_stream) # type: ignore[assignment, arg-type]
|
| 203 |
+
exchange_device = staticmethod(torch.cuda._exchange_device) # type: ignore[arg-type]
|
| 204 |
+
maybe_exchange_device = staticmethod(torch.cuda._maybe_exchange_device) # type: ignore[arg-type]
|
| 205 |
+
is_bf16_supported = staticmethod(torch.cuda.is_bf16_supported) # type: ignore[arg-type]
|
| 206 |
+
|
| 207 |
+
# Can be mock patched by @patch decorator.
|
| 208 |
+
@staticmethod
|
| 209 |
+
def is_available() -> bool:
|
| 210 |
+
return torch.cuda.is_available()
|
| 211 |
+
|
| 212 |
+
@staticmethod
|
| 213 |
+
def get_compute_capability(device: _device_t = None):
|
| 214 |
+
if torch.version.hip is None:
|
| 215 |
+
major, min = torch.cuda.get_device_capability(device)
|
| 216 |
+
return major * 10 + min
|
| 217 |
+
else:
|
| 218 |
+
return torch.cuda.get_device_properties(device).gcnArchName.split(":", 1)[0]
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
get_xpu_stream: Optional[Callable[[int], int]]
|
| 222 |
+
if torch.xpu._is_compiled():
|
| 223 |
+
from torch._C import _xpu_getCurrentRawStream as get_xpu_stream
|
| 224 |
+
else:
|
| 225 |
+
get_xpu_stream = None
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
class XpuInterface(DeviceInterface):
|
| 229 |
+
device = torch.xpu.device
|
| 230 |
+
Event = torch.xpu.Event
|
| 231 |
+
Stream = torch.xpu.Stream
|
| 232 |
+
|
| 233 |
+
class Worker:
|
| 234 |
+
@staticmethod
|
| 235 |
+
def set_device(device: int):
|
| 236 |
+
caching_worker_current_devices["xpu"] = device
|
| 237 |
+
|
| 238 |
+
@staticmethod
|
| 239 |
+
def current_device() -> int:
|
| 240 |
+
if "xpu" in caching_worker_current_devices:
|
| 241 |
+
return caching_worker_current_devices["xpu"]
|
| 242 |
+
return torch.xpu.current_device()
|
| 243 |
+
|
| 244 |
+
@staticmethod
|
| 245 |
+
def get_device_properties(device: _device_t = None):
|
| 246 |
+
if device is not None:
|
| 247 |
+
if isinstance(device, str):
|
| 248 |
+
device = torch.device(device)
|
| 249 |
+
assert device.type == "xpu"
|
| 250 |
+
if isinstance(device, torch.device):
|
| 251 |
+
device = device.index
|
| 252 |
+
if device is None:
|
| 253 |
+
device = XpuInterface.Worker.current_device()
|
| 254 |
+
|
| 255 |
+
if "xpu" not in caching_worker_device_properties:
|
| 256 |
+
device_prop = [
|
| 257 |
+
torch.xpu.get_device_properties(i)
|
| 258 |
+
for i in range(torch.xpu.device_count())
|
| 259 |
+
]
|
| 260 |
+
caching_worker_device_properties["xpu"] = device_prop
|
| 261 |
+
|
| 262 |
+
return caching_worker_device_properties["xpu"][device]
|
| 263 |
+
|
| 264 |
+
current_device = staticmethod(torch.xpu.current_device)
|
| 265 |
+
set_device = staticmethod(torch.xpu.set_device)
|
| 266 |
+
device_count = staticmethod(torch.xpu.device_count)
|
| 267 |
+
stream = staticmethod(torch.xpu.stream) # type: ignore[assignment]
|
| 268 |
+
current_stream = staticmethod(torch.xpu.current_stream)
|
| 269 |
+
set_stream = staticmethod(torch.xpu.set_stream) # type: ignore[assignment]
|
| 270 |
+
_set_stream_by_id = staticmethod(torch.xpu._set_stream_by_id) # type: ignore[assignment]
|
| 271 |
+
synchronize = staticmethod(torch.xpu.synchronize)
|
| 272 |
+
get_device_properties = staticmethod(torch.xpu.get_device_properties) # type: ignore[assignment]
|
| 273 |
+
get_raw_stream = staticmethod(get_xpu_stream) # type: ignore[assignment, arg-type]
|
| 274 |
+
exchange_device = staticmethod(torch.xpu._exchange_device) # type: ignore[arg-type]
|
| 275 |
+
maybe_exchange_device = staticmethod(torch.xpu._maybe_exchange_device) # type: ignore[arg-type]
|
| 276 |
+
|
| 277 |
+
# Can be mock patched by @patch decorator.
|
| 278 |
+
@staticmethod
|
| 279 |
+
def is_available() -> bool:
|
| 280 |
+
return torch.xpu.is_available()
|
| 281 |
+
|
| 282 |
+
@staticmethod
|
| 283 |
+
def get_compute_capability(device: _device_t = None):
|
| 284 |
+
cc = torch.xpu.get_device_capability(device)
|
| 285 |
+
return cc
|
| 286 |
+
|
| 287 |
+
@staticmethod
|
| 288 |
+
def is_bf16_supported(including_emulation: bool = False) -> bool:
|
| 289 |
+
return torch.xpu.is_bf16_supported()
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
device_interfaces: Dict[str, Type[DeviceInterface]] = {}
|
| 293 |
+
_device_initialized = False
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def register_interface_for_device(
|
| 297 |
+
device: Union[str, torch.device], device_interface: Type[DeviceInterface]
|
| 298 |
+
):
|
| 299 |
+
if isinstance(device, torch.device):
|
| 300 |
+
device = str(device)
|
| 301 |
+
device_interfaces[device] = device_interface
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def get_interface_for_device(device: Union[str, torch.device]) -> Type[DeviceInterface]:
|
| 305 |
+
if isinstance(device, torch.device):
|
| 306 |
+
device = str(device)
|
| 307 |
+
if not _device_initialized:
|
| 308 |
+
init_device_reg()
|
| 309 |
+
if device in device_interfaces:
|
| 310 |
+
return device_interfaces[device]
|
| 311 |
+
raise NotImplementedError(f"No interface for device {device}")
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
def get_registered_device_interfaces() -> Iterable[Tuple[str, Type[DeviceInterface]]]:
|
| 315 |
+
if not _device_initialized:
|
| 316 |
+
init_device_reg()
|
| 317 |
+
return device_interfaces.items()
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
def init_device_reg():
|
| 321 |
+
global _device_initialized
|
| 322 |
+
register_interface_for_device("cuda", CudaInterface)
|
| 323 |
+
for i in range(torch.cuda.device_count()):
|
| 324 |
+
register_interface_for_device(f"cuda:{i}", CudaInterface)
|
| 325 |
+
|
| 326 |
+
register_interface_for_device("xpu", XpuInterface)
|
| 327 |
+
for i in range(torch.xpu.device_count()):
|
| 328 |
+
register_interface_for_device(f"xpu:{i}", XpuInterface)
|
| 329 |
+
|
| 330 |
+
_device_initialized = True
|
pllava/lib/python3.10/site-packages/torch/_dynamo/distributed.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
|
| 3 |
+
import torch.distributed as dist
|
| 4 |
+
|
| 5 |
+
from . import config
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
_COMPILE_PG: Optional[dist.ProcessGroup] = None
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def get_compile_pg() -> Optional[dist.ProcessGroup]:
|
| 12 |
+
if (
|
| 13 |
+
config.enable_compiler_collectives
|
| 14 |
+
and dist.is_available()
|
| 15 |
+
and dist.is_initialized()
|
| 16 |
+
):
|
| 17 |
+
global _COMPILE_PG
|
| 18 |
+
if _COMPILE_PG is None:
|
| 19 |
+
# , timeout=datetime.timedelta(seconds=2)
|
| 20 |
+
_COMPILE_PG = dist.distributed_c10d._new_group_with_tag(
|
| 21 |
+
pg_tag="pt2_compile_pg"
|
| 22 |
+
)
|
| 23 |
+
return _COMPILE_PG
|
| 24 |
+
|
| 25 |
+
return None
|
pllava/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py
ADDED
|
@@ -0,0 +1,1717 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
# mypy: disable-error-code="method-assign"
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
Functions in this file are responsible for modifying the eval frame
|
| 6 |
+
handler at RUNTIME. Therefore, all functions in this file are hot.
|
| 7 |
+
Functions that only execute at compile time should be placed
|
| 8 |
+
in torch._dynamo.convert_frame.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from __future__ import annotations
|
| 12 |
+
|
| 13 |
+
import contextlib
|
| 14 |
+
import functools
|
| 15 |
+
import inspect
|
| 16 |
+
import logging
|
| 17 |
+
import os
|
| 18 |
+
import sys
|
| 19 |
+
import textwrap
|
| 20 |
+
import traceback
|
| 21 |
+
import types
|
| 22 |
+
import warnings
|
| 23 |
+
import weakref
|
| 24 |
+
from enum import Enum
|
| 25 |
+
from os.path import dirname, join
|
| 26 |
+
from typing import (
|
| 27 |
+
Any,
|
| 28 |
+
Callable,
|
| 29 |
+
Dict,
|
| 30 |
+
List,
|
| 31 |
+
NamedTuple,
|
| 32 |
+
Optional,
|
| 33 |
+
Set,
|
| 34 |
+
Tuple,
|
| 35 |
+
TYPE_CHECKING,
|
| 36 |
+
Union,
|
| 37 |
+
)
|
| 38 |
+
from unittest.mock import patch
|
| 39 |
+
|
| 40 |
+
import sympy
|
| 41 |
+
|
| 42 |
+
import torch
|
| 43 |
+
import torch.fx
|
| 44 |
+
import torch.utils._pytree as pytree
|
| 45 |
+
import torch.utils.checkpoint
|
| 46 |
+
from torch import _guards
|
| 47 |
+
|
| 48 |
+
# see discussion at https://github.com/pytorch/pytorch/issues/120699
|
| 49 |
+
from torch._C._dynamo.eval_frame import ( # noqa: F401
|
| 50 |
+
reset_code,
|
| 51 |
+
set_guard_error_hook,
|
| 52 |
+
skip_code,
|
| 53 |
+
unsupported,
|
| 54 |
+
)
|
| 55 |
+
from torch._dispatch.python import enable_python_dispatcher
|
| 56 |
+
from torch._subclasses.fake_tensor import unset_fake_temporarily
|
| 57 |
+
from torch._utils_internal import justknobs_check, log_export_usage
|
| 58 |
+
from torch.export.dynamic_shapes import _combine_args, _process_dynamic_shapes
|
| 59 |
+
from torch.fx import GraphModule
|
| 60 |
+
from torch.fx.experimental.proxy_tensor import make_fx
|
| 61 |
+
from torch.fx.experimental.symbolic_shapes import (
|
| 62 |
+
ConstraintViolationError,
|
| 63 |
+
DimDynamic,
|
| 64 |
+
ShapeEnv,
|
| 65 |
+
StatelessSymbolicContext,
|
| 66 |
+
)
|
| 67 |
+
from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo
|
| 68 |
+
|
| 69 |
+
from . import config, convert_frame, external_utils, trace_rules, utils
|
| 70 |
+
from .backends.registry import CompilerFn, lookup_backend
|
| 71 |
+
from .code_context import code_context
|
| 72 |
+
from .exc import CondOpArgsMismatchError, UserError, UserErrorType
|
| 73 |
+
from .hooks import Hooks
|
| 74 |
+
from .mutation_guard import install_generation_tagging_init
|
| 75 |
+
from .utils import common_constant_types, compile_times
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
if TYPE_CHECKING:
|
| 79 |
+
from torch._subclasses import fake_tensor
|
| 80 |
+
|
| 81 |
+
from .types import CacheEntry, DynamoCallback
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
log = logging.getLogger(__name__)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
always_optimize_code_objects = utils.ExactWeakKeyDictionary()
|
| 88 |
+
null_context = contextlib.nullcontext
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
# See https://github.com/python/typing/pull/240
|
| 92 |
+
class Unset(Enum):
|
| 93 |
+
token = 0
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
cached_backends: Dict[int, CompilerFn] = {}
|
| 97 |
+
|
| 98 |
+
unset = Unset.token
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def _maybe_set_eval_frame(callback: DynamoCallback):
|
| 102 |
+
# A wrapper on set_eval_frame that is guarded by a Justknob.
|
| 103 |
+
# Users can disable torchDynamo by setting the JK to False.
|
| 104 |
+
from torch._C._dynamo.eval_frame import set_eval_frame
|
| 105 |
+
|
| 106 |
+
if not justknobs_check("pytorch/compiler:enable_compiler_set_eval_frame"):
|
| 107 |
+
torch._dynamo.utils.warn_once(
|
| 108 |
+
"Dynamo disabled by Justknob: enable_compiler_set_eval_frame, skipping set_eval_frame"
|
| 109 |
+
)
|
| 110 |
+
return callback
|
| 111 |
+
else:
|
| 112 |
+
return set_eval_frame(callback)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def _reset_guarded_backend_cache():
|
| 116 |
+
global cached_backends
|
| 117 |
+
for backend in cached_backends.values():
|
| 118 |
+
if hasattr(backend, "reset"):
|
| 119 |
+
backend.reset()
|
| 120 |
+
cached_backends.clear()
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
DONT_WRAP_FILES = {
|
| 124 |
+
# For tracing into fx modules
|
| 125 |
+
inspect.getsourcefile(GraphModule),
|
| 126 |
+
join(dirname(dirname(__file__)), "onnx/_internal/fx/dynamo_graph_extractor.py"),
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def _debug_get_cache_entry_list(
|
| 131 |
+
code: Union[types.CodeType, Callable[..., Any]]
|
| 132 |
+
) -> List[CacheEntry]:
|
| 133 |
+
"""
|
| 134 |
+
Given a code object or a callable object, retrieve the cache entries
|
| 135 |
+
stored in this code.
|
| 136 |
+
"""
|
| 137 |
+
if callable(code):
|
| 138 |
+
code = code.__code__
|
| 139 |
+
return torch._C._dynamo.eval_frame._debug_get_cache_entry_list(code)
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
class OptimizedModule(torch.nn.Module):
|
| 143 |
+
"""
|
| 144 |
+
Wraps the original nn.Module object and later patches its
|
| 145 |
+
forward method to optimized self.forward method.
|
| 146 |
+
"""
|
| 147 |
+
|
| 148 |
+
_torchdynamo_orig_callable: Callable[..., Any]
|
| 149 |
+
get_compiler_config: Callable[[], Any]
|
| 150 |
+
|
| 151 |
+
_opt_mod_attributes = {
|
| 152 |
+
"_orig_mod",
|
| 153 |
+
"dynamo_ctx",
|
| 154 |
+
"_torchdynamo_orig_callable",
|
| 155 |
+
"get_compiler_config",
|
| 156 |
+
"forward",
|
| 157 |
+
"_forward",
|
| 158 |
+
"__dict__",
|
| 159 |
+
"named_children_walk",
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
def __init__(self, mod: torch.nn.Module, dynamo_ctx) -> None:
|
| 163 |
+
super().__init__()
|
| 164 |
+
# Installs the params/buffer
|
| 165 |
+
self._orig_mod = mod
|
| 166 |
+
self.dynamo_ctx = dynamo_ctx
|
| 167 |
+
self._initialize()
|
| 168 |
+
self.training = self._orig_mod.training
|
| 169 |
+
|
| 170 |
+
def _initialize(self):
|
| 171 |
+
# Do this stuff in constructor to lower overhead slightly
|
| 172 |
+
if isinstance(self.dynamo_ctx, DisableContext):
|
| 173 |
+
# No need to check trace rules
|
| 174 |
+
self.forward = self.dynamo_ctx(self._orig_mod.__call__)
|
| 175 |
+
elif isinstance(self._orig_mod.forward, types.MethodType) and (
|
| 176 |
+
trace_rules.check(self._orig_mod.forward)
|
| 177 |
+
or getattr(self._orig_mod, "_is_fsdp_managed_module", False)
|
| 178 |
+
):
|
| 179 |
+
# This may be a torch.nn.* instance in trace_rules.py which
|
| 180 |
+
# won't trigger a frame evaluation workaround to add an extra
|
| 181 |
+
# frame we can capture
|
| 182 |
+
self.forward = self.dynamo_ctx(external_utils.wrap_inline(self._orig_mod))
|
| 183 |
+
else:
|
| 184 |
+
# Invoke hooks outside of dynamo then pickup the inner frame
|
| 185 |
+
self.forward = self.dynamo_ctx(self._orig_mod.__call__)
|
| 186 |
+
|
| 187 |
+
if hasattr(self._orig_mod, "_initialize_hook"):
|
| 188 |
+
self._forward = self.forward
|
| 189 |
+
self.forward = self._call_lazy_check
|
| 190 |
+
|
| 191 |
+
def __reduce__(self):
|
| 192 |
+
return (self.__class__, (self._orig_mod, self.dynamo_ctx))
|
| 193 |
+
|
| 194 |
+
def __getstate__(self):
|
| 195 |
+
state = dict(self.__dict__)
|
| 196 |
+
state.pop("forward", None)
|
| 197 |
+
state.pop("__call__", None)
|
| 198 |
+
return state
|
| 199 |
+
|
| 200 |
+
def __setstate__(self, state):
|
| 201 |
+
self.__dict__ = state
|
| 202 |
+
self._initialize()
|
| 203 |
+
|
| 204 |
+
@property
|
| 205 |
+
def training(self):
|
| 206 |
+
return self._orig_mod.training
|
| 207 |
+
|
| 208 |
+
@training.setter
|
| 209 |
+
def training(self, value):
|
| 210 |
+
try:
|
| 211 |
+
super().__getattr__("_orig_mod")
|
| 212 |
+
self._orig_mod.training = value
|
| 213 |
+
except AttributeError:
|
| 214 |
+
# still initializing
|
| 215 |
+
pass
|
| 216 |
+
|
| 217 |
+
def __getattr__(self, name):
|
| 218 |
+
if name == "_orig_mod":
|
| 219 |
+
return self._modules["_orig_mod"]
|
| 220 |
+
return getattr(self._orig_mod, name)
|
| 221 |
+
|
| 222 |
+
def __setattr__(self, name, val) -> None:
|
| 223 |
+
# Allow patching over class attributes
|
| 224 |
+
if hasattr(type(self), name):
|
| 225 |
+
return super().__setattr__(name, val)
|
| 226 |
+
|
| 227 |
+
if name in OptimizedModule._opt_mod_attributes:
|
| 228 |
+
return super().__setattr__(name, val)
|
| 229 |
+
return setattr(self._orig_mod, name, val)
|
| 230 |
+
|
| 231 |
+
def _call_lazy_check(self, *args, **kwargs):
|
| 232 |
+
if hasattr(self._orig_mod, "_initialize_hook"):
|
| 233 |
+
# In the case of a lazy module, we want to run
|
| 234 |
+
# the pre-hooks which initialize it.
|
| 235 |
+
# Afterwards, lazy module deletes its pre-hooks
|
| 236 |
+
# to avoid treating it as lazy on subsequent recompile.
|
| 237 |
+
self._orig_mod._infer_parameters(self._orig_mod, args, kwargs)
|
| 238 |
+
return self._forward(*args, **kwargs)
|
| 239 |
+
|
| 240 |
+
def __dir__(self):
|
| 241 |
+
orig_mod_attrs = self._orig_mod.__dir__()
|
| 242 |
+
return orig_mod_attrs + [
|
| 243 |
+
attr for attr in super().__dir__() if attr not in orig_mod_attrs
|
| 244 |
+
]
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def remove_from_cache(f):
|
| 248 |
+
"""
|
| 249 |
+
Make sure f.__code__ is not cached to force a recompile
|
| 250 |
+
"""
|
| 251 |
+
if isinstance(f, types.CodeType):
|
| 252 |
+
reset_code(f)
|
| 253 |
+
elif hasattr(f, "__code__"):
|
| 254 |
+
reset_code(f.__code__)
|
| 255 |
+
elif hasattr(getattr(f, "forward", None), "__code__"):
|
| 256 |
+
reset_code(f.forward.__code__)
|
| 257 |
+
else:
|
| 258 |
+
from . import reset # type: ignore[attr-defined]
|
| 259 |
+
|
| 260 |
+
reset()
|
| 261 |
+
log.warning("could not determine __code__ for %s", f)
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def nothing():
|
| 265 |
+
pass
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def always_false():
|
| 269 |
+
return False
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
def innermost_fn(fn):
|
| 273 |
+
"""
|
| 274 |
+
In case of nesting of _TorchDynamoContext calls, find the innermost
|
| 275 |
+
function. TorchDynamo caches on fn.__code__ object, so its necessary to find
|
| 276 |
+
the innermost function to pass on the optimize, run, disable etc.
|
| 277 |
+
"""
|
| 278 |
+
unaltered_fn = fn
|
| 279 |
+
while hasattr(unaltered_fn, "_torchdynamo_orig_callable"):
|
| 280 |
+
unaltered_fn = unaltered_fn._torchdynamo_orig_callable
|
| 281 |
+
assert callable(unaltered_fn)
|
| 282 |
+
return unaltered_fn
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def make_set_enable_dynamic(enable: bool):
|
| 286 |
+
assert isinstance(enable, bool)
|
| 287 |
+
if enable:
|
| 288 |
+
# Assume everything is dynamic by default
|
| 289 |
+
return config._make_closure_patcher(assume_static_by_default=False)
|
| 290 |
+
else:
|
| 291 |
+
return config._make_closure_patcher(
|
| 292 |
+
automatic_dynamic_shapes=False, assume_static_by_default=True
|
| 293 |
+
)
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
class _TorchDynamoContext:
|
| 297 |
+
def __init__(
|
| 298 |
+
self,
|
| 299 |
+
callback: DynamoCallback,
|
| 300 |
+
on_enter=nothing,
|
| 301 |
+
backend_ctx_ctor=null_context,
|
| 302 |
+
patch_fn=nothing,
|
| 303 |
+
first_ctx=False,
|
| 304 |
+
*,
|
| 305 |
+
export=False,
|
| 306 |
+
dynamic=None,
|
| 307 |
+
compiler_config=None,
|
| 308 |
+
) -> None:
|
| 309 |
+
super().__init__()
|
| 310 |
+
assert callable(callback) or callback is False or callback is None
|
| 311 |
+
self.callback: DynamoCallback = callback
|
| 312 |
+
self._backend_ctx_ctor = backend_ctx_ctor
|
| 313 |
+
self.prior: Union[Unset, DynamoCallback] = unset
|
| 314 |
+
self.first_ctx = first_ctx
|
| 315 |
+
self.export = export
|
| 316 |
+
self._dynamic = dynamic
|
| 317 |
+
self.compiler_config = compiler_config
|
| 318 |
+
self.cleanup_fns: List[Callable[[], Any]] = []
|
| 319 |
+
self.enter_exit_hooks = []
|
| 320 |
+
patch_fn()
|
| 321 |
+
|
| 322 |
+
# Save the backends so that we can reset them during torch._dynamo.reset
|
| 323 |
+
backend = innermost_fn(callback)
|
| 324 |
+
cached_backends.setdefault(id(backend), backend)
|
| 325 |
+
|
| 326 |
+
if dynamic is not None:
|
| 327 |
+
self.enter_exit_hooks.append(make_set_enable_dynamic(dynamic))
|
| 328 |
+
|
| 329 |
+
if on_enter is not nothing:
|
| 330 |
+
# this case is not common
|
| 331 |
+
def call_on_enter():
|
| 332 |
+
on_enter()
|
| 333 |
+
return nothing
|
| 334 |
+
|
| 335 |
+
self.enter_exit_hooks.append(call_on_enter)
|
| 336 |
+
|
| 337 |
+
if backend_ctx_ctor is not contextlib.nullcontext:
|
| 338 |
+
# this case is not common
|
| 339 |
+
def call_backend_ctx():
|
| 340 |
+
ctx = backend_ctx_ctor()
|
| 341 |
+
ctx.__enter__()
|
| 342 |
+
return functools.partial(ctx.__exit__, None, None, None)
|
| 343 |
+
|
| 344 |
+
self.enter_exit_hooks.append(call_backend_ctx)
|
| 345 |
+
|
| 346 |
+
def __enter__(self):
|
| 347 |
+
if config.raise_on_ctx_manager_usage:
|
| 348 |
+
raise RuntimeError(
|
| 349 |
+
"torch._dynamo.optimize(...) is used with a context manager. "
|
| 350 |
+
"Please refer to https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html "
|
| 351 |
+
"to use torch._dynamo.optimize(...) as an annotation/decorator. "
|
| 352 |
+
)
|
| 353 |
+
self.cleanup_fns = [enter() for enter in self.enter_exit_hooks]
|
| 354 |
+
self.prior = _maybe_set_eval_frame(self.callback)
|
| 355 |
+
|
| 356 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 357 |
+
assert self.prior is not unset
|
| 358 |
+
_maybe_set_eval_frame(self.prior)
|
| 359 |
+
self.prior = unset
|
| 360 |
+
for cleanup in self.cleanup_fns:
|
| 361 |
+
cleanup()
|
| 362 |
+
self.cleanup_fns.clear()
|
| 363 |
+
|
| 364 |
+
def __call__(self, fn):
|
| 365 |
+
# public api for compiler config/options
|
| 366 |
+
def get_compiler_config():
|
| 367 |
+
return self.compiler_config
|
| 368 |
+
|
| 369 |
+
fn = innermost_fn(fn)
|
| 370 |
+
|
| 371 |
+
# add context containing GraphModule to any GraphModule forward functions
|
| 372 |
+
if isinstance(fn, GraphModule):
|
| 373 |
+
# add context containing GraphModule to any GraphModule forward functions
|
| 374 |
+
code_context.get_context(fn.forward.__code__)[
|
| 375 |
+
"orig_graphmodule"
|
| 376 |
+
] = weakref.ref(fn)
|
| 377 |
+
|
| 378 |
+
# Optimize the forward method of torch.nn.Module object
|
| 379 |
+
if isinstance(fn, torch.nn.Module):
|
| 380 |
+
mod = fn
|
| 381 |
+
new_mod = OptimizedModule(mod, self)
|
| 382 |
+
# Save the function pointer to find the original callable while nesting
|
| 383 |
+
# of decorators.
|
| 384 |
+
new_mod._torchdynamo_orig_callable = mod.forward
|
| 385 |
+
|
| 386 |
+
# when compiling torch.nn.Module,
|
| 387 |
+
# provide public api OptimizedModule.get_compiler_config()
|
| 388 |
+
assert not hasattr(new_mod, "get_compiler_config")
|
| 389 |
+
new_mod.get_compiler_config = get_compiler_config
|
| 390 |
+
|
| 391 |
+
return new_mod
|
| 392 |
+
|
| 393 |
+
if inspect.isclass(fn):
|
| 394 |
+
# User has wrapped the class with compile/disable decorator. Apply
|
| 395 |
+
# disable to init/call method.
|
| 396 |
+
cls_obj = fn
|
| 397 |
+
cls_obj.__call__ = self(cls_obj.__call__)
|
| 398 |
+
if issubclass(cls_obj, torch.nn.Module):
|
| 399 |
+
# NN module variable tracker directly inlines the _call_impl.
|
| 400 |
+
cls_obj._call_impl = self(cls_obj._call_impl)
|
| 401 |
+
return cls_obj
|
| 402 |
+
|
| 403 |
+
assert callable(fn)
|
| 404 |
+
|
| 405 |
+
try:
|
| 406 |
+
filename = inspect.getsourcefile(fn)
|
| 407 |
+
except TypeError:
|
| 408 |
+
filename = None
|
| 409 |
+
if (
|
| 410 |
+
(filename is None or trace_rules.check(fn))
|
| 411 |
+
and (
|
| 412 |
+
getattr(fn, "__name__", "")
|
| 413 |
+
not in ["_call_impl", "_wrapped_call_impl", "_lazy_forward"]
|
| 414 |
+
)
|
| 415 |
+
and filename not in DONT_WRAP_FILES
|
| 416 |
+
):
|
| 417 |
+
# call to a builtin without a frame for us to capture
|
| 418 |
+
fn = external_utils.wrap_inline(fn)
|
| 419 |
+
|
| 420 |
+
def do_nothing(*arg, **kwargs):
|
| 421 |
+
pass
|
| 422 |
+
|
| 423 |
+
if hasattr(self, "callback"):
|
| 424 |
+
callback = self.callback
|
| 425 |
+
else:
|
| 426 |
+
callback = do_nothing
|
| 427 |
+
|
| 428 |
+
is_jit_tracing = torch._C._is_tracing
|
| 429 |
+
is_fx_tracing = torch.fx._symbolic_trace.is_fx_tracing
|
| 430 |
+
|
| 431 |
+
@functools.wraps(fn)
|
| 432 |
+
def _fn(*args, **kwargs):
|
| 433 |
+
if is_fx_tracing():
|
| 434 |
+
if config.error_on_nested_fx_trace:
|
| 435 |
+
raise RuntimeError(
|
| 436 |
+
"Detected that you are using FX to symbolically trace "
|
| 437 |
+
"a dynamo-optimized function. This is not supported at the moment."
|
| 438 |
+
)
|
| 439 |
+
else:
|
| 440 |
+
return fn(*args, **kwargs)
|
| 441 |
+
|
| 442 |
+
if is_jit_tracing():
|
| 443 |
+
if config.error_on_nested_jit_trace:
|
| 444 |
+
raise RuntimeError(
|
| 445 |
+
"Detected that you are using FX to torch.jit.trace "
|
| 446 |
+
"a dynamo-optimized function. This is not supported at the moment."
|
| 447 |
+
)
|
| 448 |
+
else:
|
| 449 |
+
return fn(*args, **kwargs)
|
| 450 |
+
|
| 451 |
+
cleanups = [enter() for enter in self.enter_exit_hooks]
|
| 452 |
+
prior = _maybe_set_eval_frame(callback)
|
| 453 |
+
|
| 454 |
+
# Ensure that if an assertion occurs after graph pushes
|
| 455 |
+
# something onto the DynamicLayerStack then we pop it off (the
|
| 456 |
+
# constructed graph code isn't guarded with try/finally).
|
| 457 |
+
#
|
| 458 |
+
# This used to be a context but putting a `with` here is a noticible
|
| 459 |
+
# perf regression (#126293)
|
| 460 |
+
saved_dynamic_layer_stack_depth = (
|
| 461 |
+
torch._C._functorch.get_dynamic_layer_stack_depth()
|
| 462 |
+
)
|
| 463 |
+
|
| 464 |
+
try:
|
| 465 |
+
return fn(*args, **kwargs)
|
| 466 |
+
finally:
|
| 467 |
+
# Restore the dynamic layer stack depth if necessary.
|
| 468 |
+
torch._C._functorch.pop_dynamic_layer_stack_and_undo_to_depth(
|
| 469 |
+
saved_dynamic_layer_stack_depth
|
| 470 |
+
)
|
| 471 |
+
|
| 472 |
+
_maybe_set_eval_frame(prior)
|
| 473 |
+
for cleanup in cleanups:
|
| 474 |
+
cleanup()
|
| 475 |
+
|
| 476 |
+
# hooks to properly handle inlining
|
| 477 |
+
_fn._torchdynamo_inline = fn # type: ignore[attr-defined]
|
| 478 |
+
|
| 479 |
+
# Save the function pointer to find the original callable while nesting
|
| 480 |
+
# of decorators.
|
| 481 |
+
_fn._torchdynamo_orig_callable = fn # type: ignore[attr-defined]
|
| 482 |
+
|
| 483 |
+
# when compiling user function instead of nn.Module
|
| 484 |
+
# provide public api _fn.get_compiler_config()
|
| 485 |
+
assert not hasattr(_fn, "get_compiler_config")
|
| 486 |
+
_fn.get_compiler_config = get_compiler_config # type: ignore[attr-defined]
|
| 487 |
+
|
| 488 |
+
# If the function is called using torch._dynamo.optimize decorator, we
|
| 489 |
+
# should prevent any type of skipping.
|
| 490 |
+
if callback not in (None, False):
|
| 491 |
+
if not hasattr(fn, "__code__"):
|
| 492 |
+
raise RuntimeError(
|
| 493 |
+
textwrap.dedent(
|
| 494 |
+
"""
|
| 495 |
+
|
| 496 |
+
torch._dynamo.optimize is called on a non function object.
|
| 497 |
+
If this is a callable class, please wrap the relevant code into a function and optimize the
|
| 498 |
+
wrapper function.
|
| 499 |
+
|
| 500 |
+
>> class CallableClass:
|
| 501 |
+
>> def __init__(self) -> None:
|
| 502 |
+
>> super().__init__()
|
| 503 |
+
>> self.relu = torch.nn.ReLU()
|
| 504 |
+
>>
|
| 505 |
+
>> def __call__(self, x):
|
| 506 |
+
>> return self.relu(torch.sin(x))
|
| 507 |
+
>>
|
| 508 |
+
>> def print_hello(self):
|
| 509 |
+
>> print("Hello world")
|
| 510 |
+
>>
|
| 511 |
+
>> mod = CallableClass()
|
| 512 |
+
|
| 513 |
+
If you want to optimize the __call__ function and other code, wrap that up in a function
|
| 514 |
+
|
| 515 |
+
>> def wrapper_fn(x):
|
| 516 |
+
>> y = mod(x)
|
| 517 |
+
>> return y.sum()
|
| 518 |
+
|
| 519 |
+
and then optimize the wrapper_fn
|
| 520 |
+
|
| 521 |
+
>> opt_wrapper_fn = torch._dynamo.optimize(wrapper_fn)
|
| 522 |
+
"""
|
| 523 |
+
)
|
| 524 |
+
)
|
| 525 |
+
always_optimize_code_objects[fn.__code__] = True
|
| 526 |
+
|
| 527 |
+
return _fn
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
class OptimizeContext(_TorchDynamoContext):
|
| 531 |
+
def __init__(
|
| 532 |
+
self,
|
| 533 |
+
callback,
|
| 534 |
+
backend_ctx_ctor,
|
| 535 |
+
first_ctx=False,
|
| 536 |
+
*,
|
| 537 |
+
export=False,
|
| 538 |
+
dynamic=None,
|
| 539 |
+
compiler_config=None,
|
| 540 |
+
rebuild_ctx: Optional[
|
| 541 |
+
Callable[[], Union[OptimizeContext, _NullDecorator]]
|
| 542 |
+
] = None,
|
| 543 |
+
) -> None:
|
| 544 |
+
def on_enter():
|
| 545 |
+
install_generation_tagging_init()
|
| 546 |
+
|
| 547 |
+
super().__init__(
|
| 548 |
+
callback=callback,
|
| 549 |
+
on_enter=on_enter,
|
| 550 |
+
backend_ctx_ctor=backend_ctx_ctor,
|
| 551 |
+
patch_fn=TorchPatcher.patch,
|
| 552 |
+
first_ctx=first_ctx,
|
| 553 |
+
export=export,
|
| 554 |
+
dynamic=dynamic,
|
| 555 |
+
compiler_config=compiler_config,
|
| 556 |
+
)
|
| 557 |
+
|
| 558 |
+
if config.compiled_autograd:
|
| 559 |
+
|
| 560 |
+
def call_compiled_autograd():
|
| 561 |
+
assert rebuild_ctx is not None
|
| 562 |
+
compiler_fn = rebuild_ctx()
|
| 563 |
+
ctx = torch._dynamo.compiled_autograd.enable(compiler_fn)
|
| 564 |
+
ctx.__enter__()
|
| 565 |
+
return functools.partial(ctx.__exit__, None, None, None)
|
| 566 |
+
|
| 567 |
+
self.enter_exit_hooks.append(call_compiled_autograd)
|
| 568 |
+
|
| 569 |
+
def __reduce__(self):
|
| 570 |
+
return (
|
| 571 |
+
self.__class__,
|
| 572 |
+
(self.callback, self._backend_ctx_ctor, self.first_ctx),
|
| 573 |
+
{
|
| 574 |
+
"export": self.export,
|
| 575 |
+
"dynamic": self._dynamic,
|
| 576 |
+
"compiler_config": self.compiler_config,
|
| 577 |
+
},
|
| 578 |
+
)
|
| 579 |
+
|
| 580 |
+
|
| 581 |
+
class RunOnlyContext(_TorchDynamoContext):
|
| 582 |
+
def __init__(self) -> None:
|
| 583 |
+
# cudagraph trees relies on generation increment
|
| 584 |
+
def on_enter():
|
| 585 |
+
torch._dynamo.mutation_guard.GenerationTracker.generation += 1
|
| 586 |
+
|
| 587 |
+
super().__init__(callback=False, on_enter=on_enter)
|
| 588 |
+
|
| 589 |
+
def __reduce__(self):
|
| 590 |
+
return (self.__class__, ())
|
| 591 |
+
|
| 592 |
+
|
| 593 |
+
class DisableContext(_TorchDynamoContext):
|
| 594 |
+
def __init__(self) -> None:
|
| 595 |
+
super().__init__(callback=None)
|
| 596 |
+
|
| 597 |
+
def __call__(self, fn):
|
| 598 |
+
# Earlier this code was in the base class _TorchDynamoContext. But we
|
| 599 |
+
# moved it here to have better code organization. For disable, we just
|
| 600 |
+
# want the callback to be None. We don't have to check trace_rules or
|
| 601 |
+
# create any wrapper.
|
| 602 |
+
fn = innermost_fn(fn)
|
| 603 |
+
|
| 604 |
+
if isinstance(fn, torch.nn.Module):
|
| 605 |
+
mod = fn
|
| 606 |
+
new_mod = OptimizedModule(mod, self)
|
| 607 |
+
new_mod._torchdynamo_orig_callable = mod.forward
|
| 608 |
+
return new_mod
|
| 609 |
+
|
| 610 |
+
if inspect.isclass(fn):
|
| 611 |
+
# User has wrapped the class with compile/disable decorator. Apply
|
| 612 |
+
# disable to init/call method.
|
| 613 |
+
cls_obj = fn
|
| 614 |
+
# Disable on init is useful for reconstruction of bytecodes where we
|
| 615 |
+
# want to prevent Dynamo from tracing into the init function. Check
|
| 616 |
+
# test_reconstruction in test_model_output.py.
|
| 617 |
+
cls_obj.__init__ = self(cls_obj.__init__)
|
| 618 |
+
cls_obj.__call__ = self(cls_obj.__call__)
|
| 619 |
+
if issubclass(cls_obj, torch.nn.Module):
|
| 620 |
+
# NN module variable tracker directly inlines the _call_impl. Disable it.
|
| 621 |
+
cls_obj._call_impl = self(cls_obj._call_impl)
|
| 622 |
+
return cls_obj
|
| 623 |
+
|
| 624 |
+
assert callable(fn)
|
| 625 |
+
|
| 626 |
+
callback = self.callback
|
| 627 |
+
|
| 628 |
+
@functools.wraps(fn)
|
| 629 |
+
def _fn(*args, **kwargs):
|
| 630 |
+
prior = _maybe_set_eval_frame(callback)
|
| 631 |
+
try:
|
| 632 |
+
return fn(*args, **kwargs)
|
| 633 |
+
finally:
|
| 634 |
+
_maybe_set_eval_frame(prior)
|
| 635 |
+
|
| 636 |
+
_fn._torchdynamo_disable = True # type: ignore[attr-defined]
|
| 637 |
+
|
| 638 |
+
# Save the function pointer to find the original callable while nesting
|
| 639 |
+
# of decorators.
|
| 640 |
+
_fn._torchdynamo_orig_callable = fn # type: ignore[attr-defined]
|
| 641 |
+
|
| 642 |
+
return _fn
|
| 643 |
+
|
| 644 |
+
def __reduce__(self):
|
| 645 |
+
return (self.__class__, ())
|
| 646 |
+
|
| 647 |
+
|
| 648 |
+
def _optimize_catch_errors(
|
| 649 |
+
compile_fn,
|
| 650 |
+
hooks: Hooks,
|
| 651 |
+
backend_ctx_ctor=null_context,
|
| 652 |
+
export=False,
|
| 653 |
+
dynamic=None,
|
| 654 |
+
compiler_config=None,
|
| 655 |
+
rebuild_ctx=None,
|
| 656 |
+
):
|
| 657 |
+
return OptimizeContext(
|
| 658 |
+
convert_frame.catch_errors_wrapper(compile_fn, hooks),
|
| 659 |
+
backend_ctx_ctor=backend_ctx_ctor,
|
| 660 |
+
first_ctx=True,
|
| 661 |
+
export=export,
|
| 662 |
+
dynamic=dynamic,
|
| 663 |
+
compiler_config=compiler_config,
|
| 664 |
+
rebuild_ctx=rebuild_ctx,
|
| 665 |
+
)
|
| 666 |
+
|
| 667 |
+
|
| 668 |
+
def get_compiler_fn(compiler_fn):
|
| 669 |
+
from .repro.after_dynamo import wrap_backend_debug
|
| 670 |
+
|
| 671 |
+
if hasattr(compiler_fn, "compiler_name"):
|
| 672 |
+
compiler_str = compiler_fn.compiler_name
|
| 673 |
+
elif isinstance(compiler_fn, str):
|
| 674 |
+
compiler_str = compiler_fn
|
| 675 |
+
else:
|
| 676 |
+
compiler_str = None
|
| 677 |
+
compiler_fn = lookup_backend(compiler_fn)
|
| 678 |
+
return wrap_backend_debug(compiler_fn, compiler_str)
|
| 679 |
+
|
| 680 |
+
|
| 681 |
+
class _NullDecorator(contextlib.nullcontext): # type: ignore[type-arg]
|
| 682 |
+
def __call__(self, fn):
|
| 683 |
+
assert callable(fn)
|
| 684 |
+
return fn
|
| 685 |
+
|
| 686 |
+
|
| 687 |
+
def check_if_dynamo_supported():
|
| 688 |
+
if sys.version_info >= (3, 13):
|
| 689 |
+
raise RuntimeError("Python 3.13+ not yet supported for torch.compile")
|
| 690 |
+
|
| 691 |
+
|
| 692 |
+
def is_dynamo_supported():
|
| 693 |
+
try:
|
| 694 |
+
check_if_dynamo_supported()
|
| 695 |
+
return True
|
| 696 |
+
except Exception:
|
| 697 |
+
return False
|
| 698 |
+
|
| 699 |
+
|
| 700 |
+
def check_if_inductor_supported():
|
| 701 |
+
check_if_dynamo_supported()
|
| 702 |
+
|
| 703 |
+
|
| 704 |
+
def is_inductor_supported():
|
| 705 |
+
try:
|
| 706 |
+
check_if_inductor_supported()
|
| 707 |
+
return True
|
| 708 |
+
except Exception:
|
| 709 |
+
return False
|
| 710 |
+
|
| 711 |
+
|
| 712 |
+
def optimize(*args, **kwargs):
|
| 713 |
+
def rebuild_ctx():
|
| 714 |
+
return optimize(*args, **kwargs)
|
| 715 |
+
|
| 716 |
+
return _optimize(rebuild_ctx, *args, **kwargs)
|
| 717 |
+
|
| 718 |
+
|
| 719 |
+
def _optimize(
|
| 720 |
+
rebuild_ctx: Callable[[], Union[OptimizeContext, _NullDecorator]],
|
| 721 |
+
backend="inductor",
|
| 722 |
+
*,
|
| 723 |
+
nopython=False,
|
| 724 |
+
guard_export_fn=None,
|
| 725 |
+
guard_fail_fn=None,
|
| 726 |
+
disable=False,
|
| 727 |
+
dynamic=None,
|
| 728 |
+
) -> Union[OptimizeContext, _NullDecorator]:
|
| 729 |
+
"""
|
| 730 |
+
The main entrypoint of TorchDynamo. Do graph capture and call
|
| 731 |
+
backend() to optimize extracted graphs.
|
| 732 |
+
|
| 733 |
+
Args:
|
| 734 |
+
backend: One of the two things:
|
| 735 |
+
- Either, a function/callable taking a torch.fx.GraphModule and
|
| 736 |
+
example_inputs and returning a python callable that runs the
|
| 737 |
+
graph faster.
|
| 738 |
+
One can also provide additional context for the backend, like
|
| 739 |
+
torch.jit.fuser("fuser2"), by setting the backend_ctx_ctor attribute.
|
| 740 |
+
See AOTAutogradMemoryEfficientFusionWithContext for the usage.
|
| 741 |
+
- Or, a string backend name in `torch._dynamo.list_backends()`
|
| 742 |
+
nopython: If True, graph breaks will be errors and there will
|
| 743 |
+
be a single whole-program graph.
|
| 744 |
+
disable: If True, turn this decorator into a no-op
|
| 745 |
+
dynamic: If True, upfront compile as dynamic a kernel as possible. If False,
|
| 746 |
+
disable all dynamic shapes support (always specialize). If None, automatically
|
| 747 |
+
detect when sizes vary and generate dynamic kernels upon recompile.
|
| 748 |
+
|
| 749 |
+
Example Usage::
|
| 750 |
+
|
| 751 |
+
@torch._dynamo.optimize()
|
| 752 |
+
def toy_example(a, b):
|
| 753 |
+
...
|
| 754 |
+
"""
|
| 755 |
+
check_if_dynamo_supported()
|
| 756 |
+
# Note: The hooks object could be global instead of passed around, *however* that would make
|
| 757 |
+
# for a confusing API usage and plumbing story wherein we nest multiple .optimize calls.
|
| 758 |
+
# There is some prior art around this, w/r/t nesting backend calls are enforced to be the same
|
| 759 |
+
# compiler, however, this feels onerous for callback and hooks, and it feels better to give our users an
|
| 760 |
+
# easier to understand UX at the cost of a little more plumbing on our end.
|
| 761 |
+
hooks = Hooks(guard_export_fn=guard_export_fn, guard_fail_fn=guard_fail_fn)
|
| 762 |
+
torch._C._log_api_usage_once("torch._dynamo.optimize")
|
| 763 |
+
if (
|
| 764 |
+
disable
|
| 765 |
+
or os.environ.get("TORCHDYNAMO_DISABLE", "") == "1"
|
| 766 |
+
or (not justknobs_check("pytorch/compiler:enable_dynamo"))
|
| 767 |
+
):
|
| 768 |
+
return _NullDecorator()
|
| 769 |
+
|
| 770 |
+
backend = get_compiler_fn(backend)
|
| 771 |
+
|
| 772 |
+
# Find if backend has any extra context manager
|
| 773 |
+
backend_ctx_ctor = getattr(backend, "backend_ctx_ctor", null_context)
|
| 774 |
+
|
| 775 |
+
if nopython:
|
| 776 |
+
return optimize_assert(
|
| 777 |
+
backend,
|
| 778 |
+
dynamic=dynamic,
|
| 779 |
+
hooks=hooks,
|
| 780 |
+
rebuild_ctx=rebuild_ctx,
|
| 781 |
+
)
|
| 782 |
+
# The backend function is stashed in the callable returned by
|
| 783 |
+
# _optimize_catch_errors in the field _torchdynamo_orig_callable. This can
|
| 784 |
+
# be used by eval_frame.c to insert a guard on the backend.
|
| 785 |
+
return _optimize_catch_errors(
|
| 786 |
+
convert_frame.convert_frame(backend, hooks=hooks),
|
| 787 |
+
hooks,
|
| 788 |
+
backend_ctx_ctor,
|
| 789 |
+
dynamic=dynamic,
|
| 790 |
+
compiler_config=backend.get_compiler_config()
|
| 791 |
+
if hasattr(backend, "get_compiler_config")
|
| 792 |
+
else None,
|
| 793 |
+
rebuild_ctx=rebuild_ctx,
|
| 794 |
+
)
|
| 795 |
+
|
| 796 |
+
|
| 797 |
+
# TODO(voz): Consider making "explain" output alongside a run / part of a run
|
| 798 |
+
@patch("torch._dynamo.symbolic_convert.explain", True)
|
| 799 |
+
def explain(f, *extra_args, **extra_kwargs):
|
| 800 |
+
def inner(*args, **kwargs):
|
| 801 |
+
# TODO(voz): Do we want a decorator for this?
|
| 802 |
+
from . import reset # type: ignore[attr-defined]
|
| 803 |
+
|
| 804 |
+
reset()
|
| 805 |
+
|
| 806 |
+
graphs: List[torch.fx.GraphModule] = []
|
| 807 |
+
break_reasons: List[Any] = []
|
| 808 |
+
op_count: int = 0
|
| 809 |
+
ops_per_graph: List[torch.fx.Node] = []
|
| 810 |
+
out_guards: List[_guards.Guard] = []
|
| 811 |
+
|
| 812 |
+
def dynamo_graph_accumulating_compiler(
|
| 813 |
+
gm: torch.fx.GraphModule, example_inputs
|
| 814 |
+
):
|
| 815 |
+
from .backends.debugging import _explain_graph_detail
|
| 816 |
+
|
| 817 |
+
nonlocal graphs
|
| 818 |
+
nonlocal op_count
|
| 819 |
+
nonlocal ops_per_graph
|
| 820 |
+
nonlocal break_reasons
|
| 821 |
+
|
| 822 |
+
gm, graphs, op_count, ops_per_graph, break_reasons = _explain_graph_detail(
|
| 823 |
+
gm, graphs, op_count, ops_per_graph, break_reasons
|
| 824 |
+
)
|
| 825 |
+
|
| 826 |
+
return gm.forward
|
| 827 |
+
|
| 828 |
+
def guard_export_print(guards):
|
| 829 |
+
nonlocal out_guards
|
| 830 |
+
out_guards.extend(guards)
|
| 831 |
+
|
| 832 |
+
opt_f = optimize(
|
| 833 |
+
dynamo_graph_accumulating_compiler,
|
| 834 |
+
nopython=False,
|
| 835 |
+
guard_export_fn=guard_export_print,
|
| 836 |
+
)(f)
|
| 837 |
+
# TODO(voz): We may have instances of `f` that mutate inputs, we should track sideeffects and reject.
|
| 838 |
+
opt_f(*args, **kwargs)
|
| 839 |
+
|
| 840 |
+
graph_count = len(graphs)
|
| 841 |
+
graph_break_count = graph_count - 1
|
| 842 |
+
compile_time = compile_times(repr="str")
|
| 843 |
+
|
| 844 |
+
# TODO(voz): Do we want a decorator for this?
|
| 845 |
+
reset()
|
| 846 |
+
from .backends.debugging import ExplainOutput
|
| 847 |
+
|
| 848 |
+
return ExplainOutput(
|
| 849 |
+
graphs,
|
| 850 |
+
graph_count,
|
| 851 |
+
graph_break_count,
|
| 852 |
+
break_reasons,
|
| 853 |
+
op_count,
|
| 854 |
+
ops_per_graph,
|
| 855 |
+
out_guards,
|
| 856 |
+
compile_time,
|
| 857 |
+
)
|
| 858 |
+
|
| 859 |
+
if extra_args or extra_kwargs:
|
| 860 |
+
warnings.warn(
|
| 861 |
+
"explain(f, *args, **kwargs) is deprecated, use explain(f)(*args, **kwargs) instead. "
|
| 862 |
+
"If you don't migrate, we may break your explain call in the future if your user defined kwargs "
|
| 863 |
+
"conflict with future kwargs added to explain(f).",
|
| 864 |
+
FutureWarning,
|
| 865 |
+
stacklevel=2,
|
| 866 |
+
)
|
| 867 |
+
return inner(*extra_args, **extra_kwargs)
|
| 868 |
+
else:
|
| 869 |
+
return inner
|
| 870 |
+
|
| 871 |
+
|
| 872 |
+
class FlattenInputOutputSignature(torch.fx.interpreter.Transformer):
|
| 873 |
+
def __init__(
|
| 874 |
+
self,
|
| 875 |
+
m: torch.fx.GraphModule,
|
| 876 |
+
flat_args: Tuple[Any],
|
| 877 |
+
matched_input_elements_positions: List[int],
|
| 878 |
+
flat_results: List[Any],
|
| 879 |
+
matched_output_elements_positions: List[int],
|
| 880 |
+
example_fake_inputs: List[torch.Tensor],
|
| 881 |
+
flat_args_dynamic_dims: List[Set[int]],
|
| 882 |
+
fake_mode: Optional[fake_tensor.FakeTensorMode] = None,
|
| 883 |
+
) -> None:
|
| 884 |
+
super().__init__(m)
|
| 885 |
+
|
| 886 |
+
assert len(flat_args_dynamic_dims) == len(flat_args)
|
| 887 |
+
matched_input_elements_to_fake = {
|
| 888 |
+
val: example_fake_inputs[ix]
|
| 889 |
+
for ix, val in enumerate(matched_input_elements_positions)
|
| 890 |
+
}
|
| 891 |
+
|
| 892 |
+
self.new_args = []
|
| 893 |
+
for i in range(0, len(flat_args)):
|
| 894 |
+
arg = super().placeholder(f"arg{i}", (), {})
|
| 895 |
+
if i in matched_input_elements_to_fake:
|
| 896 |
+
arg.node.meta["val"] = matched_input_elements_to_fake[i]
|
| 897 |
+
else:
|
| 898 |
+
# Fill node.mata["val"] with faketensor from the input,
|
| 899 |
+
# if it's not found in matched_input_elements_positions
|
| 900 |
+
if fake_mode is not None and isinstance(flat_args[i], torch.Tensor):
|
| 901 |
+
# TODO(zhxchen17) Also preserve all the user constraints here.
|
| 902 |
+
arg.node.meta["val"] = fake_mode.from_tensor(
|
| 903 |
+
flat_args[i],
|
| 904 |
+
symbolic_context=StatelessSymbolicContext(
|
| 905 |
+
dynamic_sizes=[
|
| 906 |
+
DimDynamic.DYNAMIC
|
| 907 |
+
if d in flat_args_dynamic_dims[i]
|
| 908 |
+
else DimDynamic.STATIC
|
| 909 |
+
for d in range(len(flat_args[i].shape))
|
| 910 |
+
],
|
| 911 |
+
constraint_sizes=[None] * len(flat_args[i].shape),
|
| 912 |
+
),
|
| 913 |
+
)
|
| 914 |
+
self.new_args.append(arg)
|
| 915 |
+
self.old_args_gen = (self.new_args[i] for i in matched_input_elements_positions)
|
| 916 |
+
self.matched_output_elements_positions = matched_output_elements_positions
|
| 917 |
+
self.flat_results = flat_results
|
| 918 |
+
|
| 919 |
+
def placeholder(self, target, args, kwargs):
|
| 920 |
+
arg = next(self.old_args_gen)
|
| 921 |
+
if "val" in self.current_node.meta:
|
| 922 |
+
arg.node.meta["val"] = self.current_node.meta["val"]
|
| 923 |
+
if "tensor_dict" in self.current_node.meta:
|
| 924 |
+
arg.node.meta["tensor_dict"] = self.current_node.meta["tensor_dict"]
|
| 925 |
+
if "example_value" in self.current_node.meta:
|
| 926 |
+
# NB: intentionally do not use set_example_value
|
| 927 |
+
arg.node.meta["example_value"] = self.current_node.meta["example_value"]
|
| 928 |
+
if "unbacked_bindings" in self.current_node.meta:
|
| 929 |
+
arg.node.meta["unbacked_bindings"] = self.current_node.meta[
|
| 930 |
+
"unbacked_bindings"
|
| 931 |
+
]
|
| 932 |
+
return arg
|
| 933 |
+
|
| 934 |
+
def output(self, target, args, kwargs):
|
| 935 |
+
dynamo_result_flat = args[0]
|
| 936 |
+
lookup = [*dynamo_result_flat, *self.new_args]
|
| 937 |
+
new_results_flat = []
|
| 938 |
+
for i in range(len(self.flat_results)):
|
| 939 |
+
if self.matched_output_elements_positions[i] is not None:
|
| 940 |
+
new_results_flat.append(
|
| 941 |
+
lookup[self.matched_output_elements_positions[i]]
|
| 942 |
+
)
|
| 943 |
+
else:
|
| 944 |
+
const_val = self.flat_results[i]
|
| 945 |
+
assert isinstance(const_val, tuple(common_constant_types))
|
| 946 |
+
new_results_flat.append(const_val)
|
| 947 |
+
return super().output(target, (new_results_flat,), {})
|
| 948 |
+
|
| 949 |
+
def run_node(self, n):
|
| 950 |
+
self.current_node = n
|
| 951 |
+
result_proxy = super().run_node(n)
|
| 952 |
+
if "val" in self.current_node.meta:
|
| 953 |
+
result_proxy.node.meta["val"] = self.current_node.meta["val"]
|
| 954 |
+
if "example_value" in self.current_node.meta:
|
| 955 |
+
# NB: intentionally do not use set_example_value
|
| 956 |
+
result_proxy.node.meta["example_value"] = self.current_node.meta[
|
| 957 |
+
"example_value"
|
| 958 |
+
]
|
| 959 |
+
if "unbacked_bindings" in self.current_node.meta:
|
| 960 |
+
result_proxy.node.meta["unbacked_bindings"] = self.current_node.meta[
|
| 961 |
+
"unbacked_bindings"
|
| 962 |
+
]
|
| 963 |
+
if self.current_node.op != "output":
|
| 964 |
+
result_proxy.node._rename(
|
| 965 |
+
getattr(self.current_node, "name", result_proxy.node.name)
|
| 966 |
+
)
|
| 967 |
+
return result_proxy
|
| 968 |
+
|
| 969 |
+
def transform(self):
|
| 970 |
+
result_gm = super().transform()
|
| 971 |
+
if "dynamo_flat_name_to_original_fqn" in self.module.meta:
|
| 972 |
+
result_gm.meta["dynamo_flat_name_to_original_fqn"] = self.module.meta[
|
| 973 |
+
"dynamo_flat_name_to_original_fqn"
|
| 974 |
+
]
|
| 975 |
+
return result_gm
|
| 976 |
+
|
| 977 |
+
|
| 978 |
+
class ExportResult(NamedTuple):
|
| 979 |
+
graph_module: torch.fx.GraphModule
|
| 980 |
+
guards: _guards.GuardsSet
|
| 981 |
+
# NB: Do not add new fields without overriding __iter__; people are
|
| 982 |
+
# destructuring so it is BC-breaking
|
| 983 |
+
|
| 984 |
+
|
| 985 |
+
def check_signature_rewritable(graph):
|
| 986 |
+
input_errors = []
|
| 987 |
+
for node in graph.graph.find_nodes(op="placeholder"):
|
| 988 |
+
assert hasattr(node, "_dynamo_source")
|
| 989 |
+
source = node._dynamo_source
|
| 990 |
+
user_stacks = graph._source_to_user_stacks.get(source)
|
| 991 |
+
if user_stacks is None:
|
| 992 |
+
continue
|
| 993 |
+
assert len(user_stacks) > 0
|
| 994 |
+
# In some cases we may not have a useful stack. Look for a
|
| 995 |
+
# useful stack
|
| 996 |
+
stack = None
|
| 997 |
+
for s in user_stacks:
|
| 998 |
+
if len(s) == 0:
|
| 999 |
+
continue
|
| 1000 |
+
stack = s
|
| 1001 |
+
break
|
| 1002 |
+
if stack is None:
|
| 1003 |
+
msg = f"{source.name()}, a closed over free variable"
|
| 1004 |
+
else:
|
| 1005 |
+
tb = "".join(traceback.format_list(stack))
|
| 1006 |
+
extra = ""
|
| 1007 |
+
if len(user_stacks) > 1:
|
| 1008 |
+
extra = f"(elided {len(user_stacks) - 1} more accesses)"
|
| 1009 |
+
msg = f"{source.name()}, accessed at:\n{tb}{extra}"
|
| 1010 |
+
# TODO: option to print ALL of the stack traces at once
|
| 1011 |
+
input_errors.append(msg)
|
| 1012 |
+
|
| 1013 |
+
if input_errors:
|
| 1014 |
+
raise UserError(
|
| 1015 |
+
UserErrorType.INVALID_INPUT,
|
| 1016 |
+
"Cannot export model which references tensors that are neither "
|
| 1017 |
+
"buffers/parameters/constants nor are direct inputs. For each tensor, if you'd "
|
| 1018 |
+
"like this tensor to be an explicit input, add it as a dummy argument "
|
| 1019 |
+
"to the top-level model definition you are exporting; if you would "
|
| 1020 |
+
"like its value to be embedded as an exported constant, wrap its access "
|
| 1021 |
+
"in a function marked with @assume_constant_result.\n\n"
|
| 1022 |
+
+ "\n\n".join(input_errors),
|
| 1023 |
+
)
|
| 1024 |
+
|
| 1025 |
+
|
| 1026 |
+
def rewrite_signature(
|
| 1027 |
+
f_sig,
|
| 1028 |
+
graph,
|
| 1029 |
+
fake_mode,
|
| 1030 |
+
flat_args,
|
| 1031 |
+
in_spec,
|
| 1032 |
+
example_fake_inputs,
|
| 1033 |
+
graph_captured_input,
|
| 1034 |
+
graph_captured_output,
|
| 1035 |
+
dynamo_traced_result,
|
| 1036 |
+
flat_args_dynamic_dims,
|
| 1037 |
+
):
|
| 1038 |
+
orig_args, orig_kwargs = pytree.tree_unflatten(flat_args, in_spec)
|
| 1039 |
+
|
| 1040 |
+
def check_user_input_output(flat_values, error_type):
|
| 1041 |
+
supported_types = [
|
| 1042 |
+
torch.Tensor,
|
| 1043 |
+
torch.SymInt,
|
| 1044 |
+
torch.SymFloat,
|
| 1045 |
+
torch.SymBool,
|
| 1046 |
+
torch._C.ScriptObject,
|
| 1047 |
+
] + list(common_constant_types)
|
| 1048 |
+
|
| 1049 |
+
def is_supported_type(val):
|
| 1050 |
+
return isinstance(val, tuple(supported_types))
|
| 1051 |
+
|
| 1052 |
+
value_type = "input" if error_type == UserErrorType.INVALID_INPUT else "output"
|
| 1053 |
+
# We only check that the outputs are not None. Inputs can be None.
|
| 1054 |
+
for v in flat_values:
|
| 1055 |
+
if not is_supported_type(v):
|
| 1056 |
+
if error_type == UserErrorType.INVALID_INPUT and v is None:
|
| 1057 |
+
continue
|
| 1058 |
+
|
| 1059 |
+
raise UserError(
|
| 1060 |
+
error_type,
|
| 1061 |
+
f"It looks like one of the {value_type}s with type `{type(v)}` "
|
| 1062 |
+
"is not supported or pytree-flattenable. \n"
|
| 1063 |
+
f"Exported graphs {value_type}s can only contain the "
|
| 1064 |
+
f"following supported types: {supported_types}. \n"
|
| 1065 |
+
"If you are using a custom class object, "
|
| 1066 |
+
"please register a pytree_flatten/unflatten function "
|
| 1067 |
+
"using `torch.utils._pytree.register_pytree_node` or "
|
| 1068 |
+
"`torch.export.register_dataclass`.",
|
| 1069 |
+
)
|
| 1070 |
+
|
| 1071 |
+
check_user_input_output(flat_args, UserErrorType.INVALID_INPUT)
|
| 1072 |
+
flat_results_traced, out_spec_traced = pytree.tree_flatten(dynamo_traced_result)
|
| 1073 |
+
check_user_input_output(flat_results_traced, UserErrorType.INVALID_OUTPUT)
|
| 1074 |
+
|
| 1075 |
+
def check_optional_input_and_error(f_sig: inspect.Signature):
|
| 1076 |
+
# Check if function has optional input.
|
| 1077 |
+
for name, param in f_sig.parameters.items():
|
| 1078 |
+
if param.default is not inspect.Parameter.empty:
|
| 1079 |
+
from torch._dynamo.exc import Unsupported
|
| 1080 |
+
|
| 1081 |
+
log.error(
|
| 1082 |
+
"Parameter %s is optional with a default value of %s",
|
| 1083 |
+
name,
|
| 1084 |
+
param.default,
|
| 1085 |
+
)
|
| 1086 |
+
raise Unsupported(
|
| 1087 |
+
"Tracing through optional input is not supported yet",
|
| 1088 |
+
case_name="optional_input",
|
| 1089 |
+
)
|
| 1090 |
+
|
| 1091 |
+
def produce_matching(debug_type, sources, candidates):
|
| 1092 |
+
matched_elements_positions: List[Optional[int]] = []
|
| 1093 |
+
dict_of_source_vals = {}
|
| 1094 |
+
for i, val in enumerate(sources):
|
| 1095 |
+
dict_of_source_vals[id(val)] = i
|
| 1096 |
+
|
| 1097 |
+
for i, val in enumerate(candidates):
|
| 1098 |
+
if isinstance(val, tuple(common_constant_types)):
|
| 1099 |
+
matched_elements_positions.append(None)
|
| 1100 |
+
elif id(val) not in dict_of_source_vals:
|
| 1101 |
+
if debug_type == "inputs":
|
| 1102 |
+
check_optional_input_and_error(f_sig)
|
| 1103 |
+
raise AssertionError(
|
| 1104 |
+
f"Unexpectedly found a {type(val)} in the {debug_type}.\n"
|
| 1105 |
+
'Please file an issue along with a paste of the logs from TORCH_LOGS="+export"',
|
| 1106 |
+
)
|
| 1107 |
+
else:
|
| 1108 |
+
matched_elements_positions.append(dict_of_source_vals[id(val)])
|
| 1109 |
+
|
| 1110 |
+
return matched_elements_positions
|
| 1111 |
+
|
| 1112 |
+
matched_input_elements_positions = produce_matching(
|
| 1113 |
+
"inputs", flat_args, graph_captured_input
|
| 1114 |
+
)
|
| 1115 |
+
|
| 1116 |
+
assert graph_captured_output is not None
|
| 1117 |
+
matched_output_elements_positions = produce_matching(
|
| 1118 |
+
"outputs", list(graph_captured_output) + flat_args, flat_results_traced
|
| 1119 |
+
)
|
| 1120 |
+
|
| 1121 |
+
new_graph = FlattenInputOutputSignature(
|
| 1122 |
+
graph,
|
| 1123 |
+
flat_args,
|
| 1124 |
+
matched_input_elements_positions,
|
| 1125 |
+
flat_results_traced,
|
| 1126 |
+
matched_output_elements_positions,
|
| 1127 |
+
example_fake_inputs,
|
| 1128 |
+
flat_args_dynamic_dims,
|
| 1129 |
+
fake_mode,
|
| 1130 |
+
).transform()
|
| 1131 |
+
|
| 1132 |
+
# Make dynamo graph to have same input/output spec as user code
|
| 1133 |
+
def argument_names(f_sig, args, kwargs) -> List[str]:
|
| 1134 |
+
def signature_to_fullargspec(sig: inspect.Signature):
|
| 1135 |
+
# Get a list of Parameter objects from the Signature object
|
| 1136 |
+
params = list(sig.parameters.values())
|
| 1137 |
+
# Separate positional arguments, keyword-only arguments and varargs/varkw
|
| 1138 |
+
args = [
|
| 1139 |
+
p.name
|
| 1140 |
+
for p in params
|
| 1141 |
+
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
|
| 1142 |
+
]
|
| 1143 |
+
kwonlyargs = [
|
| 1144 |
+
p.name for p in params if p.kind == inspect.Parameter.KEYWORD_ONLY
|
| 1145 |
+
]
|
| 1146 |
+
varargs = next(
|
| 1147 |
+
(p.name for p in params if p.kind == inspect.Parameter.VAR_POSITIONAL),
|
| 1148 |
+
None,
|
| 1149 |
+
)
|
| 1150 |
+
varkw = next(
|
| 1151 |
+
(p.name for p in params if p.kind == inspect.Parameter.VAR_KEYWORD),
|
| 1152 |
+
None,
|
| 1153 |
+
)
|
| 1154 |
+
# Get default values for positional arguments and keyword-only arguments
|
| 1155 |
+
defaults = tuple(
|
| 1156 |
+
p.default
|
| 1157 |
+
for p in params
|
| 1158 |
+
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
|
| 1159 |
+
and p.default is not inspect.Parameter.empty
|
| 1160 |
+
)
|
| 1161 |
+
kwonlydefaults = {
|
| 1162 |
+
p.name: p.default
|
| 1163 |
+
for p in params
|
| 1164 |
+
if p.kind == inspect.Parameter.KEYWORD_ONLY
|
| 1165 |
+
and p.default is not inspect.Parameter.empty
|
| 1166 |
+
}
|
| 1167 |
+
# Get annotations for parameters and return value
|
| 1168 |
+
annotations = {}
|
| 1169 |
+
if sig.return_annotation:
|
| 1170 |
+
annotations = {"return": sig.return_annotation}
|
| 1171 |
+
for parameter in params:
|
| 1172 |
+
annotations[parameter.name] = parameter.annotation
|
| 1173 |
+
# Return a FullArgSpec object with the extracted attributes
|
| 1174 |
+
return inspect.FullArgSpec(
|
| 1175 |
+
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations
|
| 1176 |
+
)
|
| 1177 |
+
|
| 1178 |
+
fullargspec = signature_to_fullargspec(f_sig)
|
| 1179 |
+
|
| 1180 |
+
# 1. Map `args` 1-to-1 to positional arguments in original signature.
|
| 1181 |
+
input_strs = fullargspec.args[: len(args)]
|
| 1182 |
+
|
| 1183 |
+
if len(args) > len(fullargspec.args):
|
| 1184 |
+
# 2. If there are more arguments left in `args`, they map to varargs in original
|
| 1185 |
+
# signature. Assign names as {varargs}_0, {varargs}_1, ...
|
| 1186 |
+
assert fullargspec.varargs is not None, "More arguments than expected"
|
| 1187 |
+
input_strs += [
|
| 1188 |
+
f"{fullargspec.varargs}_{i}"
|
| 1189 |
+
for i in range(0, len(args) - len(input_strs))
|
| 1190 |
+
]
|
| 1191 |
+
elif len(args) < len(fullargspec.args):
|
| 1192 |
+
# 3. If there are fewer arguments in `args` than `fullargspec.args`,
|
| 1193 |
+
# it implies these are arguments either with default values, or provided in
|
| 1194 |
+
# `kwargs`. The former can be safely ignored. Because Dynamo.export does not
|
| 1195 |
+
# export them as part of the function signature. The latter will be handled
|
| 1196 |
+
# in the next step.
|
| 1197 |
+
for unprovided_arg in fullargspec.args[
|
| 1198 |
+
len(args) : -len(fullargspec.defaults or [])
|
| 1199 |
+
]:
|
| 1200 |
+
assert unprovided_arg in kwargs, f"Missing argument {unprovided_arg}"
|
| 1201 |
+
|
| 1202 |
+
# 4. Keyword arguments provided in `kwargs`.
|
| 1203 |
+
input_strs += list(kwargs.keys())
|
| 1204 |
+
|
| 1205 |
+
# 5. Keyword-only arguments with default values if not provided are not exported
|
| 1206 |
+
# as part of the function signature.
|
| 1207 |
+
for kwonly_arg in fullargspec.kwonlyargs:
|
| 1208 |
+
kwonlydefaults = fullargspec.kwonlydefaults or {}
|
| 1209 |
+
assert (
|
| 1210 |
+
kwonly_arg in kwargs or kwonly_arg in kwonlydefaults
|
| 1211 |
+
), f"Missing keyword only argument {kwonly_arg}"
|
| 1212 |
+
|
| 1213 |
+
return input_strs
|
| 1214 |
+
|
| 1215 |
+
new_graph.graph._codegen = _PyTreeCodeGen(
|
| 1216 |
+
_PyTreeInfo(
|
| 1217 |
+
argument_names(f_sig, orig_args, orig_kwargs),
|
| 1218 |
+
in_spec,
|
| 1219 |
+
out_spec_traced,
|
| 1220 |
+
)
|
| 1221 |
+
)
|
| 1222 |
+
new_graph.recompile()
|
| 1223 |
+
return new_graph
|
| 1224 |
+
|
| 1225 |
+
|
| 1226 |
+
def export(
|
| 1227 |
+
f: Callable[..., Any],
|
| 1228 |
+
*extra_args,
|
| 1229 |
+
aten_graph: bool = False,
|
| 1230 |
+
pre_dispatch: bool = False,
|
| 1231 |
+
decomposition_table: Optional[
|
| 1232 |
+
Dict[torch._ops.OpOverload, Callable[..., Any]]
|
| 1233 |
+
] = None,
|
| 1234 |
+
tracing_mode: str = "symbolic",
|
| 1235 |
+
dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any], List[Any]]] = None,
|
| 1236 |
+
assume_static_by_default: bool = False,
|
| 1237 |
+
same_signature: bool = True,
|
| 1238 |
+
disable_constraint_solver: bool = False,
|
| 1239 |
+
prefer_deferred_runtime_asserts_over_guards: bool = False,
|
| 1240 |
+
allow_complex_guards_as_runtime_asserts: bool = False,
|
| 1241 |
+
_log_export_usage: bool = True,
|
| 1242 |
+
**extra_kwargs,
|
| 1243 |
+
) -> Callable[..., ExportResult]:
|
| 1244 |
+
"""
|
| 1245 |
+
Export an input function f to a format that can be executed outside of PyTorch using the FX graph.
|
| 1246 |
+
|
| 1247 |
+
Args:
|
| 1248 |
+
f (callable): A PyTorch function to be exported.
|
| 1249 |
+
|
| 1250 |
+
aten_graph (bool): If True, exports a graph with ATen operators.
|
| 1251 |
+
If False, exports a graph with Python operators. Default is False.
|
| 1252 |
+
|
| 1253 |
+
pre_dispatch (bool): If True, exports a graph with ATen operators,
|
| 1254 |
+
but before any logic in the PyTorch dispatcher has run.
|
| 1255 |
+
This can be useful if you want to apply further transformations on a graph before running it
|
| 1256 |
+
through autograd, autocast, or any other functionalities that are integrated into the dispatcher.
|
| 1257 |
+
This flag is only valid if aten_graph=True is set.
|
| 1258 |
+
Default is False.
|
| 1259 |
+
|
| 1260 |
+
decomposition_table (dict): A dictionary that maps operators to their decomposition functions.
|
| 1261 |
+
Required if aten_graph or tracing_mode is specified. Default is None.
|
| 1262 |
+
|
| 1263 |
+
tracing_mode (str): If "symbolic", turn on dynamic shapes support. Default is "symbolic".
|
| 1264 |
+
|
| 1265 |
+
dynamic_shapes:
|
| 1266 |
+
An optional argument where the type should either be:
|
| 1267 |
+
1) a dict from argument names of ``f`` to their dynamic shape specifications,
|
| 1268 |
+
2) a tuple that specifies dynamic shape specifications for each input in original order.
|
| 1269 |
+
If you are specifying dynamism on keyword args, you will need to pass them in the order that
|
| 1270 |
+
is defined in the original function signature.
|
| 1271 |
+
|
| 1272 |
+
The dynamic shape of a tensor argument can be specified as either
|
| 1273 |
+
(1) a dict from dynamic dimension indices to :func:`Dim` types, where it is
|
| 1274 |
+
not required to include static dimension indices in this dict, but when they are,
|
| 1275 |
+
they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None,
|
| 1276 |
+
where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions
|
| 1277 |
+
are denoted by None. Arguments that are dicts or tuples / lists of tensors are
|
| 1278 |
+
recursively specified by using mappings or sequences of contained specifications.
|
| 1279 |
+
|
| 1280 |
+
same_signature (bool): If True, rewrite the returned graph's signature to be the same as f.
|
| 1281 |
+
|
| 1282 |
+
disable_constraint_solver (bool): Whether the dim constraint solver must be disabled.
|
| 1283 |
+
|
| 1284 |
+
Returns:
|
| 1285 |
+
A function that given args and kwargs, returns a tuple of (graph, guards)
|
| 1286 |
+
Graph: An FX graph representing the execution of the input PyTorch function with the provided arguments and options.
|
| 1287 |
+
Guards: The guards we accumulated during tracing f above
|
| 1288 |
+
|
| 1289 |
+
Raises:
|
| 1290 |
+
AssertionError: If decomposition_table is specified without setting aten_graph=True,
|
| 1291 |
+
or if graph breaks during tracing in export.
|
| 1292 |
+
|
| 1293 |
+
AssertionError: If Dynamo input and output is not consistent with traced input/output.
|
| 1294 |
+
|
| 1295 |
+
Note - this headerdoc was authored by ChatGPT, with slight modifications by the author.
|
| 1296 |
+
"""
|
| 1297 |
+
if _log_export_usage:
|
| 1298 |
+
log_export_usage(event="export.private_api", flags={"_dynamo"})
|
| 1299 |
+
|
| 1300 |
+
# Deal with "local variable referenced before assignment"
|
| 1301 |
+
_f = f
|
| 1302 |
+
_assume_static_by_default = assume_static_by_default
|
| 1303 |
+
|
| 1304 |
+
def inner(*args, **kwargs):
|
| 1305 |
+
combined_args = _combine_args(_f, args, kwargs)
|
| 1306 |
+
constraints = _process_dynamic_shapes(combined_args, dynamic_shapes)
|
| 1307 |
+
f = _f
|
| 1308 |
+
assume_static_by_default = _assume_static_by_default
|
| 1309 |
+
check_if_dynamo_supported()
|
| 1310 |
+
torch._C._log_api_usage_once("torch._dynamo.export")
|
| 1311 |
+
if decomposition_table is not None:
|
| 1312 |
+
assert (
|
| 1313 |
+
aten_graph
|
| 1314 |
+
), "Specifying a decomposition_table table or tracing mode is illegal without setting aten_graph=True"
|
| 1315 |
+
if pre_dispatch:
|
| 1316 |
+
assert aten_graph, "pre_dispatch=True can only be used when aten_graph=True"
|
| 1317 |
+
f = innermost_fn(f)
|
| 1318 |
+
call_to_inspect = f.forward if isinstance(f, torch.nn.Module) else f
|
| 1319 |
+
original_signature = inspect.signature(call_to_inspect)
|
| 1320 |
+
graph = None
|
| 1321 |
+
out_guards = None
|
| 1322 |
+
graph_captured_input = None
|
| 1323 |
+
graph_captured_result: Optional[Tuple[torch.Tensor, ...]] = None
|
| 1324 |
+
fake_mode = None
|
| 1325 |
+
result_traced = None
|
| 1326 |
+
|
| 1327 |
+
def guard_export_print(guards: _guards.GuardsSet):
|
| 1328 |
+
nonlocal out_guards
|
| 1329 |
+
assert (
|
| 1330 |
+
out_guards is None
|
| 1331 |
+
), "whole graph export entails exactly one guard export"
|
| 1332 |
+
out_guards = guards
|
| 1333 |
+
|
| 1334 |
+
example_inputs = []
|
| 1335 |
+
|
| 1336 |
+
def dynamo_normalization_capturing_compiler(
|
| 1337 |
+
gm: torch.fx.GraphModule, inner_example_inputs
|
| 1338 |
+
):
|
| 1339 |
+
nonlocal graph
|
| 1340 |
+
assert (
|
| 1341 |
+
graph is None
|
| 1342 |
+
), "Tried to emit a second graph during export. Tracing through 'f' must produce a single graph."
|
| 1343 |
+
graph = gm
|
| 1344 |
+
|
| 1345 |
+
nonlocal fake_mode, example_inputs
|
| 1346 |
+
# NB: do NOT pass inner_example_inputs here, we are detecting the
|
| 1347 |
+
# Dynamo allocated fake mode, which should be DISTINCT from a
|
| 1348 |
+
# potential outer ambient fake mode which the user provided.
|
| 1349 |
+
# example_inputs is always the user specified inputs, so they
|
| 1350 |
+
# would have the wrong fake mode attached to them
|
| 1351 |
+
fake_mode = _guards.detect_fake_mode()
|
| 1352 |
+
example_inputs = inner_example_inputs
|
| 1353 |
+
|
| 1354 |
+
def result_capturing_wrapper(*graph_inputs):
|
| 1355 |
+
nonlocal graph_captured_result
|
| 1356 |
+
nonlocal graph_captured_input
|
| 1357 |
+
|
| 1358 |
+
graph_captured_input = graph_inputs
|
| 1359 |
+
assert graph is not None
|
| 1360 |
+
|
| 1361 |
+
named_parameters = dict(graph.named_parameters(remove_duplicate=False))
|
| 1362 |
+
named_buffers = dict(graph.named_buffers(remove_duplicate=False))
|
| 1363 |
+
|
| 1364 |
+
ambient_fake_mode = (
|
| 1365 |
+
_guards.detect_fake_mode(graph_inputs)
|
| 1366 |
+
if _guards.detect_fake_mode(graph_inputs) is not None
|
| 1367 |
+
else fake_mode
|
| 1368 |
+
)
|
| 1369 |
+
|
| 1370 |
+
# We reran fake tensor propagation, but we didn't do
|
| 1371 |
+
# anything with the resulting unbacked SymInts. Drop them
|
| 1372 |
+
# from the pending list.
|
| 1373 |
+
# NB: this is wrong if graph_captured_result has
|
| 1374 |
+
# data-dependent output size!
|
| 1375 |
+
ignore_fresh_unbacked = null_context()
|
| 1376 |
+
if shape_env := ambient_fake_mode.shape_env:
|
| 1377 |
+
ignore_fresh_unbacked = shape_env.ignore_fresh_unbacked_symbols()
|
| 1378 |
+
|
| 1379 |
+
with (
|
| 1380 |
+
ambient_fake_mode
|
| 1381 |
+
), enable_python_dispatcher(), ignore_fresh_unbacked:
|
| 1382 |
+
params_and_buffers = {
|
| 1383 |
+
**named_parameters,
|
| 1384 |
+
**named_buffers,
|
| 1385 |
+
}
|
| 1386 |
+
fake_params_buffers = {}
|
| 1387 |
+
|
| 1388 |
+
for name, value in params_and_buffers.items():
|
| 1389 |
+
fake_params_buffers[name] = ambient_fake_mode.from_tensor(
|
| 1390 |
+
value, static_shapes=True
|
| 1391 |
+
)
|
| 1392 |
+
|
| 1393 |
+
fake_graph_inputs = pytree.tree_map(
|
| 1394 |
+
ambient_fake_mode.from_tensor, graph_inputs
|
| 1395 |
+
)
|
| 1396 |
+
graph_captured_result = torch.func.functional_call(
|
| 1397 |
+
graph, fake_params_buffers, fake_graph_inputs
|
| 1398 |
+
)
|
| 1399 |
+
|
| 1400 |
+
return graph_captured_result
|
| 1401 |
+
|
| 1402 |
+
return result_capturing_wrapper
|
| 1403 |
+
|
| 1404 |
+
# Note: This is needed by rewrite_signature. We need to put it before
|
| 1405 |
+
# optimize_assert since user program may mutate the inputs.
|
| 1406 |
+
flat_args, in_spec = pytree.tree_flatten((args, kwargs))
|
| 1407 |
+
|
| 1408 |
+
remove_from_cache(f)
|
| 1409 |
+
constraint_violation_error = None
|
| 1410 |
+
if tracing_mode != "symbolic":
|
| 1411 |
+
assume_static_by_default = True
|
| 1412 |
+
with config.patch(
|
| 1413 |
+
specialize_int=True,
|
| 1414 |
+
assume_static_by_default=assume_static_by_default,
|
| 1415 |
+
automatic_dynamic_shapes=False,
|
| 1416 |
+
capture_dynamic_output_shape_ops=True,
|
| 1417 |
+
capture_scalar_outputs=True,
|
| 1418 |
+
prefer_deferred_runtime_asserts_over_guards=prefer_deferred_runtime_asserts_over_guards,
|
| 1419 |
+
allow_complex_guards_as_runtime_asserts=allow_complex_guards_as_runtime_asserts,
|
| 1420 |
+
):
|
| 1421 |
+
opt_f = optimize_assert(
|
| 1422 |
+
dynamo_normalization_capturing_compiler,
|
| 1423 |
+
hooks=Hooks(
|
| 1424 |
+
guard_export_fn=guard_export_print,
|
| 1425 |
+
guard_fail_fn=None,
|
| 1426 |
+
),
|
| 1427 |
+
export=True,
|
| 1428 |
+
export_constraints=constraints,
|
| 1429 |
+
)(f)
|
| 1430 |
+
# TODO(voz): We may have instances of `f` that mutate inputs, we should track sideeffects and reject.
|
| 1431 |
+
try:
|
| 1432 |
+
result_traced = opt_f(*args, **kwargs)
|
| 1433 |
+
except ConstraintViolationError as e:
|
| 1434 |
+
constraint_violation_error = e
|
| 1435 |
+
remove_from_cache(f)
|
| 1436 |
+
|
| 1437 |
+
if (
|
| 1438 |
+
not disable_constraint_solver
|
| 1439 |
+
and (shape_env := getattr(fake_mode, "shape_env", None)) is not None
|
| 1440 |
+
and (dim_constraints := shape_env.dim_constraints) is not None
|
| 1441 |
+
and not isinstance(
|
| 1442 |
+
call_to_inspect, (torch._ops.OpOverloadPacket, torch._ops.OpOverload)
|
| 1443 |
+
)
|
| 1444 |
+
and not trace_rules.check(call_to_inspect)
|
| 1445 |
+
):
|
| 1446 |
+
dim_constraints.solve()
|
| 1447 |
+
forced_specializations = dim_constraints.forced_specializations()
|
| 1448 |
+
msg = dim_constraints.prettify_results(
|
| 1449 |
+
original_signature,
|
| 1450 |
+
dynamic_shapes,
|
| 1451 |
+
constraint_violation_error,
|
| 1452 |
+
forced_specializations,
|
| 1453 |
+
)
|
| 1454 |
+
if constraint_violation_error:
|
| 1455 |
+
constraint_violation_error.args = (
|
| 1456 |
+
constraint_violation_error.args[0] + msg,
|
| 1457 |
+
)
|
| 1458 |
+
else:
|
| 1459 |
+
if forced_specializations:
|
| 1460 |
+
constraint_violation_error = ConstraintViolationError(msg)
|
| 1461 |
+
else:
|
| 1462 |
+
log.info(
|
| 1463 |
+
"Summary of dimension constraints:%s",
|
| 1464 |
+
msg,
|
| 1465 |
+
)
|
| 1466 |
+
|
| 1467 |
+
# Error if we have any constraints on static values
|
| 1468 |
+
for k in shape_env.var_to_range.keys():
|
| 1469 |
+
if isinstance(k, sympy.Integer):
|
| 1470 |
+
constraint_violation_error = ConstraintViolationError(
|
| 1471 |
+
f"{''.join(traceback.format_list(shape_env.var_to_stack[k]))}\n"
|
| 1472 |
+
"It appears that you're trying to set a constraint on a "
|
| 1473 |
+
f"value which we evaluated to have a static value of {k}. "
|
| 1474 |
+
'Set TORCH_LOGS="+export" for more information.'
|
| 1475 |
+
)
|
| 1476 |
+
if constraint_violation_error:
|
| 1477 |
+
raise constraint_violation_error
|
| 1478 |
+
|
| 1479 |
+
if graph is None:
|
| 1480 |
+
assert (
|
| 1481 |
+
same_signature
|
| 1482 |
+
), "Failed to produce a graph during tracing as no tensor operations were found and same_signature is False."
|
| 1483 |
+
# If the module does not contain any tensor computation, we would create a graph with inputs and outputs.
|
| 1484 |
+
# To be consitant with the graph traced by dynano, `graph` will have only tensor inputs as placeholders
|
| 1485 |
+
# and tensor outputs as output nodes. non-tensor inputs and outputs will be added when rewriting signature.
|
| 1486 |
+
# We will also construct the `example_inputs`, `graph_captured_input`, and `graph_captured_result` corresponding
|
| 1487 |
+
# to `graph`.
|
| 1488 |
+
example_inputs = []
|
| 1489 |
+
graph_captured_input = ()
|
| 1490 |
+
graph_captured_result = ()
|
| 1491 |
+
fake_mode = torch._subclasses.FakeTensorMode(
|
| 1492 |
+
shape_env=ShapeEnv(), export=True
|
| 1493 |
+
)
|
| 1494 |
+
if out_guards is None:
|
| 1495 |
+
out_guards = _guards.GuardsSet()
|
| 1496 |
+
assert out_guards is not None # suppress mypy error
|
| 1497 |
+
parameter_names = list(original_signature.parameters.keys())
|
| 1498 |
+
fx_graph = torch.fx.Graph()
|
| 1499 |
+
for i, name in enumerate(parameter_names):
|
| 1500 |
+
if torch.is_tensor(flat_args[i]):
|
| 1501 |
+
node = fx_graph.placeholder(name)
|
| 1502 |
+
node.meta["val"] = fake_mode.from_tensor(
|
| 1503 |
+
flat_args[i], static_shapes=True
|
| 1504 |
+
)
|
| 1505 |
+
graph_captured_input = graph_captured_input + (flat_args[i],)
|
| 1506 |
+
example_inputs.append(flat_args[i])
|
| 1507 |
+
fx_graph.output(graph_captured_result)
|
| 1508 |
+
module = torch.nn.Module()
|
| 1509 |
+
graph = torch.fx.GraphModule(module, fx_graph)
|
| 1510 |
+
log.info(
|
| 1511 |
+
"Failed to capture a graph during tracing as no tensor operations were found.:\n\n%s",
|
| 1512 |
+
graph.print_readable(print_output=False, colored=True),
|
| 1513 |
+
)
|
| 1514 |
+
else:
|
| 1515 |
+
assert hasattr(graph, "_source_to_user_stacks")
|
| 1516 |
+
assert out_guards is not None, "Failed to produce guards during tracing"
|
| 1517 |
+
assert fake_mode is not None
|
| 1518 |
+
|
| 1519 |
+
log.info(
|
| 1520 |
+
"Dynamo captured graph:\n\n%s",
|
| 1521 |
+
graph.print_readable(print_output=False, colored=True),
|
| 1522 |
+
)
|
| 1523 |
+
|
| 1524 |
+
# This check need to happened before aten_graph
|
| 1525 |
+
# because placeholder's _source_node attribute is not preserved by make_fx
|
| 1526 |
+
if same_signature:
|
| 1527 |
+
check_signature_rewritable(graph)
|
| 1528 |
+
|
| 1529 |
+
# NB: This is mostly hitting the cache; Dynamo already converted these
|
| 1530 |
+
example_fake_inputs = [fake_mode.from_tensor(t) for t in example_inputs]
|
| 1531 |
+
|
| 1532 |
+
if aten_graph:
|
| 1533 |
+
# Running graph with interpreter is needed for propagating the stack_trace
|
| 1534 |
+
def graph_with_interpreter(*args):
|
| 1535 |
+
with torch.fx.traceback.preserve_node_meta():
|
| 1536 |
+
return torch.fx.Interpreter(graph).run(*args) # type: ignore[arg-type]
|
| 1537 |
+
|
| 1538 |
+
with unset_fake_temporarily(), enable_python_dispatcher(), fake_mode:
|
| 1539 |
+
try:
|
| 1540 |
+
graph = make_fx(
|
| 1541 |
+
graph_with_interpreter,
|
| 1542 |
+
decomposition_table=decomposition_table,
|
| 1543 |
+
tracing_mode="real",
|
| 1544 |
+
_allow_non_fake_inputs=True,
|
| 1545 |
+
pre_dispatch=pre_dispatch,
|
| 1546 |
+
_allow_fake_constant=False,
|
| 1547 |
+
)(*example_fake_inputs)
|
| 1548 |
+
except CondOpArgsMismatchError as e:
|
| 1549 |
+
# Wrap the internal error to the user-facing error
|
| 1550 |
+
raise UserError( # noqa: B904
|
| 1551 |
+
UserErrorType.DYNAMIC_CONTROL_FLOW,
|
| 1552 |
+
str(e),
|
| 1553 |
+
case_name="cond_operands",
|
| 1554 |
+
)
|
| 1555 |
+
|
| 1556 |
+
assert graph is not None
|
| 1557 |
+
for node in graph.graph.find_nodes(op="get_attr"):
|
| 1558 |
+
if isinstance(getattr(graph, node.target), torch.Tensor): # type: ignore[arg-type]
|
| 1559 |
+
node.meta["val"] = fake_mode.from_tensor(
|
| 1560 |
+
getattr(graph, node.target), static_shapes=True # type: ignore[arg-type]
|
| 1561 |
+
)
|
| 1562 |
+
|
| 1563 |
+
if same_signature:
|
| 1564 |
+
flat_args_dynamic_dims = [
|
| 1565 |
+
{
|
| 1566 |
+
c.dim
|
| 1567 |
+
for c in (constraints or ())
|
| 1568 |
+
if (
|
| 1569 |
+
c.t_id == id(x)
|
| 1570 |
+
and c.constraint_range.vr.lower != c.constraint_range.vr.upper
|
| 1571 |
+
)
|
| 1572 |
+
}
|
| 1573 |
+
for x in flat_args
|
| 1574 |
+
]
|
| 1575 |
+
graph = rewrite_signature(
|
| 1576 |
+
original_signature,
|
| 1577 |
+
graph,
|
| 1578 |
+
fake_mode,
|
| 1579 |
+
flat_args,
|
| 1580 |
+
in_spec,
|
| 1581 |
+
example_fake_inputs,
|
| 1582 |
+
graph_captured_input,
|
| 1583 |
+
graph_captured_result,
|
| 1584 |
+
result_traced, # type: ignore[possibly-undefined]
|
| 1585 |
+
flat_args_dynamic_dims,
|
| 1586 |
+
)
|
| 1587 |
+
return ExportResult(graph, out_guards) # type: ignore[arg-type]
|
| 1588 |
+
|
| 1589 |
+
if extra_args or extra_kwargs:
|
| 1590 |
+
warnings.warn(
|
| 1591 |
+
"export(f, *args, **kwargs) is deprecated, use export(f)(*args, **kwargs) instead. "
|
| 1592 |
+
"If you don't migrate, we may break your export call in the future if your user defined kwargs "
|
| 1593 |
+
"conflict with future kwargs added to export(f).",
|
| 1594 |
+
FutureWarning,
|
| 1595 |
+
stacklevel=2,
|
| 1596 |
+
)
|
| 1597 |
+
return inner(*extra_args, **extra_kwargs)
|
| 1598 |
+
else:
|
| 1599 |
+
return inner
|
| 1600 |
+
|
| 1601 |
+
|
| 1602 |
+
def optimize_assert(
|
| 1603 |
+
backend,
|
| 1604 |
+
*,
|
| 1605 |
+
hooks=Hooks(None, None),
|
| 1606 |
+
export=False,
|
| 1607 |
+
export_constraints=None,
|
| 1608 |
+
dynamic=None,
|
| 1609 |
+
rebuild_ctx=None,
|
| 1610 |
+
):
|
| 1611 |
+
"""
|
| 1612 |
+
The same as `torch._dynamo.optimize(backend, nopython=True)`
|
| 1613 |
+
"""
|
| 1614 |
+
backend = get_compiler_fn(backend)
|
| 1615 |
+
|
| 1616 |
+
# Find if backend has any extra context manager
|
| 1617 |
+
backend_ctx_ctor = getattr(backend, "backend_ctx_ctor", null_context)
|
| 1618 |
+
|
| 1619 |
+
return _optimize_catch_errors(
|
| 1620 |
+
convert_frame.convert_frame_assert(
|
| 1621 |
+
backend, export=export, export_constraints=export_constraints
|
| 1622 |
+
),
|
| 1623 |
+
hooks,
|
| 1624 |
+
backend_ctx_ctor,
|
| 1625 |
+
export=export,
|
| 1626 |
+
dynamic=dynamic,
|
| 1627 |
+
rebuild_ctx=rebuild_ctx,
|
| 1628 |
+
)
|
| 1629 |
+
|
| 1630 |
+
|
| 1631 |
+
class TorchPatcher:
|
| 1632 |
+
@staticmethod
|
| 1633 |
+
@functools.lru_cache(None)
|
| 1634 |
+
def patch():
|
| 1635 |
+
# A better way to disable the following would be decorate the source
|
| 1636 |
+
# functions with @torch._disable_dynamo. However, this causes issues
|
| 1637 |
+
# with torch.deploy internally.
|
| 1638 |
+
from .decorators import disable
|
| 1639 |
+
|
| 1640 |
+
torch.jit.trace = disable(torch.jit.trace)
|
| 1641 |
+
torch.jit.trace_module = disable(torch.jit.trace_module)
|
| 1642 |
+
torch.jit._get_trace_graph = disable(torch.jit._get_trace_graph)
|
| 1643 |
+
torch.fx._symbolic_trace.Tracer.trace = disable(
|
| 1644 |
+
torch.fx._symbolic_trace.Tracer.trace
|
| 1645 |
+
)
|
| 1646 |
+
torch.distributions.Distribution.set_default_validate_args(False)
|
| 1647 |
+
|
| 1648 |
+
from torch.optim import (
|
| 1649 |
+
adadelta,
|
| 1650 |
+
adagrad,
|
| 1651 |
+
adam,
|
| 1652 |
+
adamax,
|
| 1653 |
+
adamw,
|
| 1654 |
+
asgd,
|
| 1655 |
+
lbfgs,
|
| 1656 |
+
nadam,
|
| 1657 |
+
radam,
|
| 1658 |
+
rmsprop,
|
| 1659 |
+
rprop,
|
| 1660 |
+
sgd,
|
| 1661 |
+
sparse_adam,
|
| 1662 |
+
)
|
| 1663 |
+
|
| 1664 |
+
optimizer_modules = {
|
| 1665 |
+
adadelta,
|
| 1666 |
+
adagrad,
|
| 1667 |
+
adam,
|
| 1668 |
+
adamax,
|
| 1669 |
+
adamw,
|
| 1670 |
+
asgd,
|
| 1671 |
+
lbfgs,
|
| 1672 |
+
nadam,
|
| 1673 |
+
radam,
|
| 1674 |
+
rmsprop,
|
| 1675 |
+
rprop,
|
| 1676 |
+
sgd,
|
| 1677 |
+
sparse_adam,
|
| 1678 |
+
}
|
| 1679 |
+
|
| 1680 |
+
for opt_mod in optimizer_modules:
|
| 1681 |
+
opt_name = opt_mod.__name__.split(".")[-1]
|
| 1682 |
+
fused_fn_name = f"_fused_{opt_name}"
|
| 1683 |
+
single_tensor_fn_name = f"_single_tensor_{opt_name}"
|
| 1684 |
+
|
| 1685 |
+
if hasattr(opt_mod, fused_fn_name):
|
| 1686 |
+
setattr(
|
| 1687 |
+
opt_mod, fused_fn_name, disable(getattr(opt_mod, fused_fn_name))
|
| 1688 |
+
)
|
| 1689 |
+
|
| 1690 |
+
optimizer_classes = [
|
| 1691 |
+
opt
|
| 1692 |
+
for opt in torch.optim.__dict__.values()
|
| 1693 |
+
if inspect.isclass(opt) and issubclass(opt, torch.optim.Optimizer)
|
| 1694 |
+
]
|
| 1695 |
+
|
| 1696 |
+
# Note: we don't support sparsity or tracing through backwards
|
| 1697 |
+
excluded_optimizer_classes = {
|
| 1698 |
+
torch.optim.SparseAdam,
|
| 1699 |
+
torch.optim.LBFGS,
|
| 1700 |
+
}
|
| 1701 |
+
|
| 1702 |
+
for opt in optimizer_classes:
|
| 1703 |
+
if opt in excluded_optimizer_classes:
|
| 1704 |
+
opt.step = disable(opt.step)
|
| 1705 |
+
|
| 1706 |
+
if hasattr(opt, "_init_group"):
|
| 1707 |
+
opt._init_group = disable(opt._init_group)
|
| 1708 |
+
|
| 1709 |
+
@staticmethod
|
| 1710 |
+
def suppress_torch_distributed_warnings(fn):
|
| 1711 |
+
def inner_fn(*args, **kwargs):
|
| 1712 |
+
warnings.filterwarnings(
|
| 1713 |
+
"ignore", category=UserWarning, module="torch.distributed"
|
| 1714 |
+
)
|
| 1715 |
+
return fn(*args, **kwargs)
|
| 1716 |
+
|
| 1717 |
+
return inner_fn
|
pllava/lib/python3.10/site-packages/torch/_dynamo/exc.py
ADDED
|
@@ -0,0 +1,454 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import os
|
| 3 |
+
import textwrap
|
| 4 |
+
from enum import auto, Enum
|
| 5 |
+
from traceback import extract_stack, format_exc, format_list, StackSummary
|
| 6 |
+
from typing import Any, cast, NoReturn, Optional, Tuple, TYPE_CHECKING
|
| 7 |
+
|
| 8 |
+
import torch._guards
|
| 9 |
+
|
| 10 |
+
from . import config
|
| 11 |
+
from .utils import counters
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
if TYPE_CHECKING:
|
| 15 |
+
from torch._guards import CompileId
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def exportdb_error_message(case_name):
|
| 19 |
+
return (
|
| 20 |
+
"For more information about this error, see: "
|
| 21 |
+
+ "https://pytorch.org/docs/main/generated/exportdb/index.html#"
|
| 22 |
+
+ case_name.replace("_", "-")
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
import logging
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
log = logging.getLogger(__name__)
|
| 30 |
+
graph_breaks_log = torch._logging.getArtifactLogger(__name__, "graph_breaks")
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class TorchDynamoException(RuntimeError):
|
| 34 |
+
pass
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class InternalTorchDynamoError(TorchDynamoException):
|
| 38 |
+
pass
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class RestartAnalysis(TorchDynamoException):
|
| 42 |
+
restart_reason: str
|
| 43 |
+
|
| 44 |
+
def __init__(self, *args, restart_reason=None) -> None:
|
| 45 |
+
self.restart_reason = restart_reason
|
| 46 |
+
super().__init__(*args)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class SpeculationRestartAnalysis(RestartAnalysis):
|
| 50 |
+
pass
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class UnspecializeRestartAnalysis(RestartAnalysis):
|
| 54 |
+
pass
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class CompileCollectiveRestartAnalysis(RestartAnalysis):
|
| 58 |
+
pass
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class SkipFrame(TorchDynamoException):
|
| 62 |
+
pass
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class TorchRuntimeError(TorchDynamoException):
|
| 66 |
+
pass
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class InvalidBackend(TorchDynamoException):
|
| 70 |
+
def __init__(self, name) -> None:
|
| 71 |
+
super().__init__(
|
| 72 |
+
f"Invalid backend: {name!r}, see `torch._dynamo.list_backends()` for available backends."
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class ResetRequired(TorchDynamoException):
|
| 77 |
+
def __init__(self) -> None:
|
| 78 |
+
super().__init__(
|
| 79 |
+
textwrap.dedent(
|
| 80 |
+
"""
|
| 81 |
+
Must call `torch._dynamo.reset()` before changing backends. Detected two calls to
|
| 82 |
+
`torch.compile()` with a different backend compiler arguments.
|
| 83 |
+
"""
|
| 84 |
+
)
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class BackendCompilerFailed(TorchDynamoException):
|
| 89 |
+
def __init__(self, backend_fn, inner_exception) -> None:
|
| 90 |
+
self.backend_name = getattr(backend_fn, "__name__", "?")
|
| 91 |
+
self.inner_exception = inner_exception
|
| 92 |
+
msg = f"backend={self.backend_name!r} raised:\n{type(inner_exception).__name__}: {inner_exception}"
|
| 93 |
+
super().__init__(msg)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class Unsupported(TorchDynamoException):
|
| 97 |
+
def __init__(self, msg, *, case_name=None) -> None:
|
| 98 |
+
super().__init__(msg)
|
| 99 |
+
self.real_stack = torch._guards.TracingContext.extract_stack()
|
| 100 |
+
self.msg = msg
|
| 101 |
+
self.category: Optional[str] = None
|
| 102 |
+
self.add_to_stats()
|
| 103 |
+
self.case_name: Optional[str] = case_name
|
| 104 |
+
|
| 105 |
+
def remove_from_stats(self):
|
| 106 |
+
assert self.category is not None
|
| 107 |
+
counters[self.category][self.msg] -= 1
|
| 108 |
+
if counters[self.category][self.msg] <= 0:
|
| 109 |
+
del counters[self.category][self.msg]
|
| 110 |
+
|
| 111 |
+
def add_to_stats(self, category="unimplemented"):
|
| 112 |
+
self.category = category
|
| 113 |
+
counters[category][self.msg] += 1
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class RecompileError(TorchDynamoException):
|
| 117 |
+
pass
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
class ArgsMismatchError(Unsupported):
|
| 121 |
+
def __init__(self, msg) -> None:
|
| 122 |
+
super().__init__(msg)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class AttributeMutationError(Unsupported):
|
| 126 |
+
def __init__(self, msg) -> None:
|
| 127 |
+
super().__init__(msg)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
class CondOpArgsMismatchError(ArgsMismatchError):
|
| 131 |
+
"""
|
| 132 |
+
Internal error from cond() due to arguments mismatch.
|
| 133 |
+
"""
|
| 134 |
+
|
| 135 |
+
def __init__(self, msg) -> None:
|
| 136 |
+
super().__init__(msg)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
class UserErrorType(Enum):
|
| 140 |
+
DYNAMIC_CONTROL_FLOW = auto()
|
| 141 |
+
ANTI_PATTERN = auto()
|
| 142 |
+
STANDARD_LIBRARY = auto()
|
| 143 |
+
CONSTRAINT_VIOLATION = auto()
|
| 144 |
+
DYNAMIC_DIM = auto()
|
| 145 |
+
INVALID_INPUT = auto()
|
| 146 |
+
INVALID_OUTPUT = auto()
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
class UserError(Unsupported):
|
| 150 |
+
def __init__(self, error_type: UserErrorType, msg, case_name=None) -> None:
|
| 151 |
+
"""
|
| 152 |
+
Type of errors that would be valid in Eager, but not supported in TorchDynamo.
|
| 153 |
+
The error message should tell user about next actions.
|
| 154 |
+
|
| 155 |
+
error_type: Type of user error
|
| 156 |
+
msg: Actionable error message
|
| 157 |
+
case_name: (Optional) Unique name (snake case) for the usage example in exportdb.
|
| 158 |
+
"""
|
| 159 |
+
if case_name is not None:
|
| 160 |
+
assert isinstance(case_name, str)
|
| 161 |
+
if msg.endswith("."):
|
| 162 |
+
msg += " "
|
| 163 |
+
else:
|
| 164 |
+
msg += "\n"
|
| 165 |
+
msg += exportdb_error_message(case_name)
|
| 166 |
+
super().__init__(msg)
|
| 167 |
+
self.error_type = error_type
|
| 168 |
+
self.message = msg
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
class SkipCodeRecursiveException(TorchDynamoException):
|
| 172 |
+
pass
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
class CacheLimitExceeded(SkipCodeRecursiveException, Unsupported):
|
| 176 |
+
pass
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
class UnsafeScriptObjectError(TorchDynamoException):
|
| 180 |
+
pass
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
class UncapturedHigherOrderOpError(TorchDynamoException):
|
| 184 |
+
pass
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
class IncorrectUsage(Exception):
|
| 188 |
+
pass
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
class ObservedException(TorchDynamoException):
|
| 192 |
+
# An exception observed during the tracing. This exception is used by Dynamo to handle exceptions.
|
| 193 |
+
pass
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
class ObservedUserStopIteration(ObservedException):
|
| 197 |
+
# An UserStopIteraion exception observed during the Dynamo tracing (e.g Dynamo tracing __next__)
|
| 198 |
+
value: Optional[Any]
|
| 199 |
+
|
| 200 |
+
# Reference `StopIteration_init` in CPython
|
| 201 |
+
# https://github.com/python/cpython/blob/3.11/Objects/exceptions.c#L568-L584
|
| 202 |
+
def __init__(self, *args, **kwargs) -> None:
|
| 203 |
+
super().__init__("unhandled `raise StopIteration`")
|
| 204 |
+
if len(args) > 0:
|
| 205 |
+
self.value = args[0]
|
| 206 |
+
else:
|
| 207 |
+
self.value = None
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
class ObservedKeyError(ObservedException):
|
| 211 |
+
# A KeyError exception to be raised from inside Dynamo tracing. This can happen on dict __getitem__
|
| 212 |
+
pass
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
class ObservedAttributeError(ObservedException):
|
| 216 |
+
# An AttributeError exception to be raised from inside Dynamo tracing. This can happen on user defined object __getattr__
|
| 217 |
+
pass
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
observed_exception_map = {
|
| 221 |
+
StopIteration: ObservedUserStopIteration,
|
| 222 |
+
KeyError: ObservedKeyError,
|
| 223 |
+
AttributeError: ObservedAttributeError,
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def raise_observed_exception(e, tx, vt):
|
| 228 |
+
from .variables import BuiltinVariable
|
| 229 |
+
|
| 230 |
+
# CPython here raises an exception. Since there is no python code, we have to manually setup the exception
|
| 231 |
+
# stack and raise the exception.
|
| 232 |
+
exception_vt = BuiltinVariable(e).call_function(vt, [], {})
|
| 233 |
+
tx.exn_vt_stack.append(exception_vt)
|
| 234 |
+
raise observed_exception_map[e]
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def handle_observed_exception(tx):
|
| 238 |
+
# This is essentially exception handling code, equivalent of this pseudo code
|
| 239 |
+
#
|
| 240 |
+
# try:
|
| 241 |
+
# ... somebody raising StopIteration
|
| 242 |
+
# except StopIteration
|
| 243 |
+
# pass
|
| 244 |
+
#
|
| 245 |
+
# If this was going through the python code, we would have called exception_handler method, but FOR_ITER
|
| 246 |
+
# handles the exception completely in CPython. For example for 3.11, the resulting bytecode is
|
| 247 |
+
#
|
| 248 |
+
#
|
| 249 |
+
# 6 46 LOAD_GLOBAL 2 (StopIteration)
|
| 250 |
+
# 58 RAISE_VARARGS 1
|
| 251 |
+
# >> 60 PUSH_EXC_INFO
|
| 252 |
+
|
| 253 |
+
# 7 62 LOAD_GLOBAL 2 (StopIteration)
|
| 254 |
+
# 74 CHECK_EXC_MATCH
|
| 255 |
+
# 76 POP_JUMP_FORWARD_IF_FALSE 3 (to 84)
|
| 256 |
+
# 78 POP_TOP
|
| 257 |
+
|
| 258 |
+
# 8 80 POP_EXCEPT
|
| 259 |
+
#
|
| 260 |
+
|
| 261 |
+
# Fortunately this translates to a simple pop from the exn_vt_stack
|
| 262 |
+
tx.exn_vt_stack.pop()
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
# These exceptions are ok to fallback to eager/graph_break.
|
| 266 |
+
exceptions_allowed_to_be_fallback = (
|
| 267 |
+
torch._subclasses.fake_tensor.DataDependentOutputException,
|
| 268 |
+
torch._subclasses.fake_tensor.DynamicOutputShapeException,
|
| 269 |
+
torch._subclasses.fake_tensor.UnsupportedOperatorException,
|
| 270 |
+
torch._subclasses.fake_tensor.UnsupportedFakeTensorException,
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def unimplemented_with_warning(e: Exception, code, msg: str) -> NoReturn:
|
| 275 |
+
# This function calls unimplemented internally and eventually graph breaks
|
| 276 |
+
# or falls to eager. unimplemented itself does not print any user warnings,
|
| 277 |
+
# i.e., its very silent. This helper function is intended when an error is
|
| 278 |
+
# encountered in the torch.compile stack which is worth showing as warning
|
| 279 |
+
# to the user. For example, if AOT Autograd backend fails with a fake tensor
|
| 280 |
+
# exception, its ok to fallback to eager but not silently. Here, we can use
|
| 281 |
+
# this function to log the message and the stack trace.
|
| 282 |
+
graph_break_msg = format_error_msg_verbose(e, code)
|
| 283 |
+
graph_breaks_log.debug("%s", graph_break_msg)
|
| 284 |
+
log.warning(msg)
|
| 285 |
+
unimplemented(msg, from_exc=e)
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
_NOTHING = object()
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def unimplemented(
|
| 292 |
+
msg: str, *, from_exc: Any = _NOTHING, case_name: Optional[str] = None
|
| 293 |
+
) -> NoReturn:
|
| 294 |
+
assert msg != os.environ.get("BREAK", False)
|
| 295 |
+
if from_exc is not _NOTHING:
|
| 296 |
+
raise Unsupported(msg, case_name=case_name) from from_exc
|
| 297 |
+
raise Unsupported(msg, case_name=case_name)
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
def warning(msg: str) -> None:
|
| 301 |
+
counters["warnings"][msg] += 1
|
| 302 |
+
assert msg != os.environ.get("BREAK", False)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
# KeyError has special handling for its args
|
| 306 |
+
# see https://github.com/python/cpython/blob/3.11/Objects/exceptions.c#L2534 for details
|
| 307 |
+
class KeyErrorMsg:
|
| 308 |
+
def __init__(self, value) -> None:
|
| 309 |
+
self.value = value
|
| 310 |
+
|
| 311 |
+
def __str__(self) -> str:
|
| 312 |
+
return str(self.value)
|
| 313 |
+
|
| 314 |
+
def __repr__(self) -> str:
|
| 315 |
+
return self.__str__()
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def augment_exc_message(exc: Exception, msg: str = "\n", export: bool = False) -> None:
|
| 319 |
+
import traceback
|
| 320 |
+
|
| 321 |
+
exc.innermost_user_frame_summary = None # type: ignore[attr-defined]
|
| 322 |
+
|
| 323 |
+
real_stack = get_real_stack(exc)
|
| 324 |
+
if real_stack is not None and len(real_stack) > 0:
|
| 325 |
+
exc.innermost_user_frame_summary = real_stack[-1] # type: ignore[attr-defined]
|
| 326 |
+
msg += f"\nfrom user code:\n {''.join(traceback.format_list(real_stack))}"
|
| 327 |
+
|
| 328 |
+
if config.replay_record_enabled and hasattr(exc, "record_filename"):
|
| 329 |
+
msg += f"\nLast frame execution written to {exc.record_filename}. To run only this frame while debugging, run\
|
| 330 |
+
torch._dynamo.replay('{exc.record_filename}').\n"
|
| 331 |
+
|
| 332 |
+
if not config.verbose and hasattr(exc, "real_stack"):
|
| 333 |
+
msg += '\nSet TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information\n'
|
| 334 |
+
|
| 335 |
+
if hasattr(exc, "inner_exception") and hasattr(
|
| 336 |
+
exc.inner_exception, "minifier_path"
|
| 337 |
+
):
|
| 338 |
+
if hasattr(exc.inner_exception, "buck_command"):
|
| 339 |
+
msg += (
|
| 340 |
+
f"\nMinifier script written to {exc.inner_exception.minifier_path}. Run "
|
| 341 |
+
f"this buck command to find the smallest traced graph "
|
| 342 |
+
f"which reproduces this error: {exc.inner_exception.buck_command}\n"
|
| 343 |
+
)
|
| 344 |
+
else:
|
| 345 |
+
msg += (
|
| 346 |
+
f"\nMinifier script written to {exc.inner_exception.minifier_path}. Run "
|
| 347 |
+
"this script to find the smallest traced graph which reproduces this error.\n"
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
if not config.suppress_errors and not export:
|
| 351 |
+
msg += (
|
| 352 |
+
"\n\n"
|
| 353 |
+
"You can suppress this exception and fall back to eager by setting:\n"
|
| 354 |
+
" import torch._dynamo\n"
|
| 355 |
+
" torch._dynamo.config.suppress_errors = True\n"
|
| 356 |
+
)
|
| 357 |
+
|
| 358 |
+
old_msg = "" if len(exc.args) == 0 else str(exc.args[0])
|
| 359 |
+
|
| 360 |
+
if isinstance(exc, KeyError):
|
| 361 |
+
exc.args = (KeyErrorMsg(old_msg + msg),) + exc.args[1:]
|
| 362 |
+
else:
|
| 363 |
+
new_msg = old_msg + msg
|
| 364 |
+
exc.args = (new_msg,) + exc.args[1:]
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
def get_exc_message(
|
| 368 |
+
e: Exception, compile_id: "CompileId"
|
| 369 |
+
) -> Tuple[Optional[str], Optional[int]]:
|
| 370 |
+
filename = None
|
| 371 |
+
lineno = None
|
| 372 |
+
if e.innermost_user_frame_summary is not None: # type: ignore[attr-defined]
|
| 373 |
+
filename = e.innermost_user_frame_summary.filename # type: ignore[attr-defined]
|
| 374 |
+
lineno = e.innermost_user_frame_summary.lineno # type: ignore[attr-defined]
|
| 375 |
+
e.compile_id = compile_id # type: ignore[attr-defined]
|
| 376 |
+
return filename, lineno
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def get_real_stack(exc: Exception, frame=None) -> Optional[StackSummary]:
|
| 380 |
+
real_stack = getattr(exc, "real_stack", None)
|
| 381 |
+
if real_stack is None:
|
| 382 |
+
return None
|
| 383 |
+
|
| 384 |
+
# NB: it's possible for real_stack to be []; we still attempt to
|
| 385 |
+
# report a stack anyway because the stack_above_dynamo may still
|
| 386 |
+
# be useful for debugging
|
| 387 |
+
|
| 388 |
+
stack_above_dynamo = []
|
| 389 |
+
if frame is not None:
|
| 390 |
+
# NB: frame is PyInterpreterFrame on Python 3.11 and later,
|
| 391 |
+
# not a TRUE frame object. You can't actually feed it
|
| 392 |
+
# to traceback because it doesn't have enough information.
|
| 393 |
+
# To solve this problem, we technically should just materialize
|
| 394 |
+
# the frame, the same way _PyFrame_GetFrameObject would do
|
| 395 |
+
# (but we cannot actually do this, because this populates
|
| 396 |
+
# frame_obj field, which default eval frame doesn't like).
|
| 397 |
+
#
|
| 398 |
+
# Fortunately, in this case, we can hack it: there's no need
|
| 399 |
+
# to actually use the truly top frame, we can just extract
|
| 400 |
+
# from where we are right now and rely on filter_stack to
|
| 401 |
+
# get rid of all the dynamo frames. For ease of testing
|
| 402 |
+
# we apply this behavior to ALL Python versions
|
| 403 |
+
stack_above_dynamo = filter_stack(extract_stack())
|
| 404 |
+
|
| 405 |
+
return cast(StackSummary, stack_above_dynamo + real_stack)
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
# filter out all frames after entering dynamo
|
| 409 |
+
def filter_stack(stack):
|
| 410 |
+
user_stack = []
|
| 411 |
+
for frame in stack:
|
| 412 |
+
if "convert_frame" in frame.filename:
|
| 413 |
+
break
|
| 414 |
+
if "eval_frame" in frame.filename or "torch._dynamo.optimize(" in frame.line:
|
| 415 |
+
continue
|
| 416 |
+
user_stack.append(frame)
|
| 417 |
+
|
| 418 |
+
return user_stack
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
def format_error_msg_verbose(
|
| 422 |
+
exc: Exception, code, record_filename=None, frame=None
|
| 423 |
+
) -> str:
|
| 424 |
+
msg = (
|
| 425 |
+
f"WON'T CONVERT {code.co_name} {code.co_filename} line {code.co_firstlineno}\n"
|
| 426 |
+
)
|
| 427 |
+
msg += "=" * 10 + " TorchDynamo Stack Trace " + "=" * 10 + "\n"
|
| 428 |
+
msg += format_exc()
|
| 429 |
+
real_stack = get_real_stack(exc, frame)
|
| 430 |
+
if real_stack is not None:
|
| 431 |
+
msg += (
|
| 432 |
+
"\n"
|
| 433 |
+
+ "=" * 10
|
| 434 |
+
+ " The above exception occurred while processing the following code "
|
| 435 |
+
+ "=" * 10
|
| 436 |
+
+ "\n\n"
|
| 437 |
+
)
|
| 438 |
+
msg += "".join(format_list(real_stack))
|
| 439 |
+
msg += "\n"
|
| 440 |
+
msg += "=" * 10
|
| 441 |
+
|
| 442 |
+
return msg
|
| 443 |
+
|
| 444 |
+
|
| 445 |
+
def format_error_msg(exc: Exception, code, record_filename=None, frame=None) -> str:
|
| 446 |
+
msg = os.linesep * 2
|
| 447 |
+
|
| 448 |
+
if config.verbose:
|
| 449 |
+
msg = format_error_msg_verbose(exc, code, record_filename, frame)
|
| 450 |
+
else:
|
| 451 |
+
msg = f"WON'T CONVERT {code.co_name} {code.co_filename}\
|
| 452 |
+
line {code.co_firstlineno} \ndue to: \n{format_exc()}"
|
| 453 |
+
|
| 454 |
+
return msg
|
pllava/lib/python3.10/site-packages/torch/_dynamo/external_utils.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
# This module contains functions that *will be allowed* by dynamo
|
| 3 |
+
|
| 4 |
+
import functools
|
| 5 |
+
import warnings
|
| 6 |
+
from typing import List
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.utils._pytree as pytree
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
try:
|
| 13 |
+
import numpy as np
|
| 14 |
+
except ModuleNotFoundError:
|
| 15 |
+
np = None # type: ignore[assignment]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def is_compiling() -> bool:
|
| 19 |
+
"""
|
| 20 |
+
Indicates whether we are tracing/compiling with torch.compile() or torch.export().
|
| 21 |
+
|
| 22 |
+
If need to check specifically that TorchDynamo is used, then use
|
| 23 |
+
torch.compiler.is_dynamo_compiling().
|
| 24 |
+
|
| 25 |
+
TODO(khabinov): we should deprecate this function and use one of these two:
|
| 26 |
+
* torch.compiler.is_compiling(),
|
| 27 |
+
* torch.compiler.is_dynamo_compiling().
|
| 28 |
+
It will depend on the context where to use what.
|
| 29 |
+
"""
|
| 30 |
+
return torch.compiler.is_compiling()
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def wrap_inline(fn):
|
| 34 |
+
"""
|
| 35 |
+
Create an extra frame around fn that is not in skipfiles
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
@functools.wraps(fn)
|
| 39 |
+
def inner(*args, **kwargs):
|
| 40 |
+
return fn(*args, **kwargs)
|
| 41 |
+
|
| 42 |
+
return inner
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def call_hook(hook, *args, **kwargs):
|
| 46 |
+
"""
|
| 47 |
+
Used by compiled autograd to handle hook returning None
|
| 48 |
+
"""
|
| 49 |
+
result = hook(*args)
|
| 50 |
+
if result is None:
|
| 51 |
+
return args[0]
|
| 52 |
+
elif kwargs["hook_type"] == "post_acc_grad_hook":
|
| 53 |
+
raise RuntimeError("Tensor post accumulate grad hooks should return None.")
|
| 54 |
+
return result
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def wrap_numpy(f):
|
| 58 |
+
r"""Decorator that turns a function from ``np.ndarray``s to ``np.ndarray``s into a function
|
| 59 |
+
from ``torch.Tensor``s to ``torch.Tensor``s.
|
| 60 |
+
"""
|
| 61 |
+
if not np:
|
| 62 |
+
return f
|
| 63 |
+
|
| 64 |
+
@functools.wraps(f)
|
| 65 |
+
def wrap(*args, **kwargs):
|
| 66 |
+
args, kwargs = pytree.tree_map_only(
|
| 67 |
+
torch.Tensor, lambda x: x.numpy(), (args, kwargs)
|
| 68 |
+
)
|
| 69 |
+
out = f(*args, **kwargs)
|
| 70 |
+
return pytree.tree_map_only(np.ndarray, lambda x: torch.as_tensor(x), out)
|
| 71 |
+
|
| 72 |
+
return wrap
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class FakeBackwardCFunction:
|
| 76 |
+
def __init__(
|
| 77 |
+
self,
|
| 78 |
+
real: torch.autograd.function.BackwardCFunction,
|
| 79 |
+
saved_tensors: List[torch.Tensor],
|
| 80 |
+
) -> None:
|
| 81 |
+
self.real = real
|
| 82 |
+
self.saved_tensors = saved_tensors
|
| 83 |
+
|
| 84 |
+
def __getattr__(self, name):
|
| 85 |
+
if name == "saved_variables":
|
| 86 |
+
warnings.warn(
|
| 87 |
+
"'saved_variables' is deprecated; use 'saved_tensors'",
|
| 88 |
+
DeprecationWarning,
|
| 89 |
+
)
|
| 90 |
+
return self.saved_tensors
|
| 91 |
+
|
| 92 |
+
# route any attribute that isn't defined on this obj
|
| 93 |
+
return getattr(self.real, name)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
# This function corresponds to the "eager" implementation of a lifted autograd.Function.backward
|
| 97 |
+
def call_backward(backward_c_function, saved_tensors, *args):
|
| 98 |
+
fake = FakeBackwardCFunction(backward_c_function, saved_tensors)
|
| 99 |
+
grads = fake._forward_cls.backward(fake, *args) # type: ignore[attr-defined]
|
| 100 |
+
|
| 101 |
+
# in eager, we wrap in a tuple when there's only one grad output
|
| 102 |
+
if type(grads) is not tuple:
|
| 103 |
+
grads = (grads,)
|
| 104 |
+
|
| 105 |
+
return grads
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def untyped_storage_size(x: torch.Tensor):
|
| 109 |
+
return x.untyped_storage().size()
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class FakeCompiledAutogradEngine:
|
| 113 |
+
@staticmethod
|
| 114 |
+
def queue_callback(final_callbacks, cb):
|
| 115 |
+
final_callbacks.append(cb)
|
| 116 |
+
|
| 117 |
+
@staticmethod
|
| 118 |
+
def exec_final_callbacks(final_callbacks):
|
| 119 |
+
i = 0
|
| 120 |
+
while i < len(final_callbacks):
|
| 121 |
+
cb = final_callbacks[i]
|
| 122 |
+
cb()
|
| 123 |
+
i += 1
|
| 124 |
+
final_callbacks.clear()
|
| 125 |
+
|
| 126 |
+
@staticmethod
|
| 127 |
+
def _exec_final_callbacks_stub():
|
| 128 |
+
pass
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def call_hook_from_backward_state(*args, bw_state, hook_name: str, **kwargs):
|
| 132 |
+
return getattr(bw_state, hook_name)(*args, **kwargs)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def call_module_hooks_from_backward_state(
|
| 136 |
+
_, result, *args, bw_state, hooks_name: str, module_name: str
|
| 137 |
+
):
|
| 138 |
+
module = getattr(bw_state, module_name)
|
| 139 |
+
hooks = getattr(bw_state, hooks_name)
|
| 140 |
+
for hook in hooks:
|
| 141 |
+
new_result = hook(module, result, *args)
|
| 142 |
+
if new_result is not None:
|
| 143 |
+
result = new_result
|
| 144 |
+
return result
|
pllava/lib/python3.10/site-packages/torch/_dynamo/funcname_cache.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import tokenize
|
| 2 |
+
from typing import Dict, List, Optional
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
cache: Dict[str, Dict[int, str]] = {}
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def clearcache() -> None:
|
| 9 |
+
cache.clear()
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def _add_file(filename: str) -> None:
|
| 13 |
+
try:
|
| 14 |
+
with tokenize.open(filename) as f:
|
| 15 |
+
tokens = list(tokenize.generate_tokens(f.readline))
|
| 16 |
+
except OSError:
|
| 17 |
+
cache[filename] = {}
|
| 18 |
+
return
|
| 19 |
+
|
| 20 |
+
# NOTE: undefined behavior if file is not valid Python source,
|
| 21 |
+
# since tokenize will have undefined behavior.
|
| 22 |
+
result: Dict[int, str] = {}
|
| 23 |
+
# current full funcname, e.g. xxx.yyy.zzz
|
| 24 |
+
cur_name = ""
|
| 25 |
+
cur_indent = 0
|
| 26 |
+
significant_indents: List[int] = []
|
| 27 |
+
|
| 28 |
+
for i, token in enumerate(tokens):
|
| 29 |
+
if token.type == tokenize.INDENT:
|
| 30 |
+
cur_indent += 1
|
| 31 |
+
elif token.type == tokenize.DEDENT:
|
| 32 |
+
cur_indent -= 1
|
| 33 |
+
# possible end of function or class
|
| 34 |
+
if significant_indents and cur_indent == significant_indents[-1]:
|
| 35 |
+
significant_indents.pop()
|
| 36 |
+
# pop the last name
|
| 37 |
+
cur_name = cur_name.rpartition(".")[0]
|
| 38 |
+
elif (
|
| 39 |
+
token.type == tokenize.NAME
|
| 40 |
+
and i + 1 < len(tokens)
|
| 41 |
+
and tokens[i + 1].type == tokenize.NAME
|
| 42 |
+
and (token.string == "class" or token.string == "def")
|
| 43 |
+
):
|
| 44 |
+
# name of class/function always follows class/def token
|
| 45 |
+
significant_indents.append(cur_indent)
|
| 46 |
+
if cur_name:
|
| 47 |
+
cur_name += "."
|
| 48 |
+
cur_name += tokens[i + 1].string
|
| 49 |
+
result[token.start[0]] = cur_name
|
| 50 |
+
|
| 51 |
+
cache[filename] = result
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def get_funcname(filename: str, lineno: int) -> Optional[str]:
|
| 55 |
+
if filename not in cache:
|
| 56 |
+
_add_file(filename)
|
| 57 |
+
return cache[filename].get(lineno, None)
|
pllava/lib/python3.10/site-packages/torch/_dynamo/guards.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/hooks.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import dataclasses
|
| 2 |
+
from typing import Callable, Optional
|
| 3 |
+
|
| 4 |
+
from torch._guards import GuardsSet
|
| 5 |
+
|
| 6 |
+
from .types import GuardFail
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@dataclasses.dataclass
|
| 10 |
+
class Hooks:
|
| 11 |
+
guard_export_fn: Optional[Callable[[GuardsSet], None]] = None
|
| 12 |
+
guard_fail_fn: Optional[Callable[[GuardFail], None]] = None
|
pllava/lib/python3.10/site-packages/torch/_dynamo/output_graph.py
ADDED
|
@@ -0,0 +1,2190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import collections
|
| 3 |
+
import contextlib
|
| 4 |
+
import copy
|
| 5 |
+
import dataclasses
|
| 6 |
+
import functools
|
| 7 |
+
import itertools
|
| 8 |
+
import json
|
| 9 |
+
import logging
|
| 10 |
+
import operator
|
| 11 |
+
import re
|
| 12 |
+
import sys
|
| 13 |
+
import traceback
|
| 14 |
+
import weakref
|
| 15 |
+
from dataclasses import dataclass
|
| 16 |
+
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, TYPE_CHECKING, Union
|
| 17 |
+
|
| 18 |
+
import sympy
|
| 19 |
+
|
| 20 |
+
import torch._guards
|
| 21 |
+
import torch._logging
|
| 22 |
+
import torch.distributed as dist
|
| 23 |
+
import torch.nn
|
| 24 |
+
import torch.utils._pytree as pytree
|
| 25 |
+
from torch import fx
|
| 26 |
+
from torch._guards import GlobalContextCheckpointState, Source, TracingContext
|
| 27 |
+
from torch._utils_internal import signpost_event
|
| 28 |
+
from torch.fx._lazy_graph_module import _make_graph_module # type: ignore[attr-defined]
|
| 29 |
+
from torch.fx.experimental._backward_state import BackwardState
|
| 30 |
+
from torch.fx.experimental.symbolic_shapes import free_symbols, is_symbolic, ShapeEnv
|
| 31 |
+
from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts
|
| 32 |
+
from torch.utils._python_dispatch import is_traceable_wrapper_subclass
|
| 33 |
+
|
| 34 |
+
from . import config, exc, logging as torchdynamo_logging, variables
|
| 35 |
+
from .backends.registry import CompiledFn, CompilerFn
|
| 36 |
+
from .bytecode_transformation import (
|
| 37 |
+
create_call_function,
|
| 38 |
+
create_instruction,
|
| 39 |
+
Instruction,
|
| 40 |
+
unique_id,
|
| 41 |
+
)
|
| 42 |
+
from .code_context import code_context
|
| 43 |
+
from .codegen import PyCodegen
|
| 44 |
+
from .current_scope_id import enter_new_scope
|
| 45 |
+
from .exc import (
|
| 46 |
+
BackendCompilerFailed,
|
| 47 |
+
exceptions_allowed_to_be_fallback,
|
| 48 |
+
SkipFrame,
|
| 49 |
+
unimplemented,
|
| 50 |
+
unimplemented_with_warning,
|
| 51 |
+
)
|
| 52 |
+
from .guards import GuardBuilder, install_guard
|
| 53 |
+
from .mutation_guard import is_dynamic_nn_module
|
| 54 |
+
from .side_effects import AttributeMutationExisting, SideEffects
|
| 55 |
+
from .source import (
|
| 56 |
+
AttrSource,
|
| 57 |
+
BackwardStateSource,
|
| 58 |
+
ConstantSource,
|
| 59 |
+
GetItemSource,
|
| 60 |
+
GlobalStateSource,
|
| 61 |
+
is_constant_source,
|
| 62 |
+
is_from_local_source,
|
| 63 |
+
LocalSource,
|
| 64 |
+
ParamBufferSource,
|
| 65 |
+
ShapeEnvSource,
|
| 66 |
+
SyntheticLocalSource,
|
| 67 |
+
TensorProperty,
|
| 68 |
+
TensorPropertySource,
|
| 69 |
+
)
|
| 70 |
+
from .utils import (
|
| 71 |
+
_extract_tensor_dict,
|
| 72 |
+
checkpoint_params,
|
| 73 |
+
CleanupHook,
|
| 74 |
+
clone_inputs,
|
| 75 |
+
count_calls,
|
| 76 |
+
counters,
|
| 77 |
+
dynamo_timed,
|
| 78 |
+
get_instruction_source_311,
|
| 79 |
+
get_locals_to_steal,
|
| 80 |
+
get_static_address_type,
|
| 81 |
+
get_torch_function_mode_stack,
|
| 82 |
+
graph_break_reasons,
|
| 83 |
+
increment_op_count,
|
| 84 |
+
lazy_format_graph_code,
|
| 85 |
+
LazyString,
|
| 86 |
+
nn_module_proxy,
|
| 87 |
+
same,
|
| 88 |
+
set_example_value,
|
| 89 |
+
)
|
| 90 |
+
from .variables.base import VariableTracker
|
| 91 |
+
from .variables.builder import (
|
| 92 |
+
BackwardStateGraphArg,
|
| 93 |
+
GraphArg,
|
| 94 |
+
TrackedFake,
|
| 95 |
+
VariableBuilder,
|
| 96 |
+
wrap_fx_proxy,
|
| 97 |
+
)
|
| 98 |
+
from .variables.lists import BaseListVariable
|
| 99 |
+
from .variables.misc import NullVariable
|
| 100 |
+
from .variables.nn_module import NNModuleVariable
|
| 101 |
+
from .variables.tensor import (
|
| 102 |
+
NumpyNdarrayVariable,
|
| 103 |
+
SymNodeVariable,
|
| 104 |
+
TensorVariable,
|
| 105 |
+
UnspecializedPythonVariable,
|
| 106 |
+
)
|
| 107 |
+
from .variables.torch_function import TensorWithTFOverrideVariable
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
if TYPE_CHECKING:
|
| 111 |
+
from torch._dynamo.symbolic_convert import InstructionTranslatorBase
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
log = logging.getLogger(__name__)
|
| 115 |
+
graph_tabular_log = torch._logging.getArtifactLogger(__name__, "graph")
|
| 116 |
+
graph_code_log = torch._logging.getArtifactLogger(__name__, "graph_code")
|
| 117 |
+
graph_sizes_log = torch._logging.getArtifactLogger(__name__, "graph_sizes")
|
| 118 |
+
trace_call_log = torch._logging.getArtifactLogger(__name__, "trace_call")
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
@dataclass(frozen=True)
|
| 122 |
+
class VariableTrackerCacheKey:
|
| 123 |
+
vt_id: int
|
| 124 |
+
# Two different source can point to the same object. However, Dynamo handles
|
| 125 |
+
# globals and local source differently when it comes to guards and possibly
|
| 126 |
+
# some other parts as well. So, cache also relies on the source.
|
| 127 |
+
source: Source
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
class VariableTrackerCache:
|
| 131 |
+
def __init__(self):
|
| 132 |
+
self.cache = {}
|
| 133 |
+
|
| 134 |
+
def lookup(self, value, source):
|
| 135 |
+
key = VariableTrackerCacheKey(id(value), source)
|
| 136 |
+
if key not in self.cache:
|
| 137 |
+
return None
|
| 138 |
+
return self.cache[key]
|
| 139 |
+
|
| 140 |
+
def add(self, value, source, vt):
|
| 141 |
+
key = VariableTrackerCacheKey(id(value), source)
|
| 142 |
+
self.cache[key] = vt
|
| 143 |
+
|
| 144 |
+
def clone(self):
|
| 145 |
+
# Needed for copy and restore graph state
|
| 146 |
+
new_cache = VariableTrackerCache()
|
| 147 |
+
new_cache.cache.update(self.cache)
|
| 148 |
+
return new_cache
|
| 149 |
+
|
| 150 |
+
def clear(self):
|
| 151 |
+
self.cache.clear()
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
@functools.lru_cache(None)
|
| 155 |
+
def _step_logger():
|
| 156 |
+
return torchdynamo_logging.get_step_logger(log)
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
@dataclass
|
| 160 |
+
class GraphCompileReason:
|
| 161 |
+
"""Stores why a given output graph was compiled; i.e. what caused the graph break."""
|
| 162 |
+
|
| 163 |
+
reason: str
|
| 164 |
+
user_stack: List[traceback.FrameSummary]
|
| 165 |
+
|
| 166 |
+
# Indicates if this was a graph compile reason due to graph break.
|
| 167 |
+
graph_break: bool = True
|
| 168 |
+
|
| 169 |
+
def __post_init__(self):
|
| 170 |
+
if self.graph_break:
|
| 171 |
+
graph_break_reasons.append(self)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def _get_gen_rand_values_fn(random_calls):
|
| 175 |
+
def _gen_rand_values():
|
| 176 |
+
return [fn(*args, **kwargs) for fn, args, kwargs in random_calls]
|
| 177 |
+
|
| 178 |
+
return _gen_rand_values
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
class FakeRootModule(torch.nn.Module):
|
| 182 |
+
"""Trick the constructor of fx.GraphModule"""
|
| 183 |
+
|
| 184 |
+
def __init__(self, nn_modules: Dict[str, torch.nn.Module]):
|
| 185 |
+
super().__init__()
|
| 186 |
+
for k, v in nn_modules.items():
|
| 187 |
+
setattr(self, k, v)
|
| 188 |
+
|
| 189 |
+
def __repr__(self):
|
| 190 |
+
return "FakeRootModule(...)"
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
class WrapperBackend:
|
| 194 |
+
def __init__(self, backend: CompilerFn):
|
| 195 |
+
self.backend: CompilerFn = backend
|
| 196 |
+
|
| 197 |
+
def __call__(self, gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]):
|
| 198 |
+
self.restore = checkpoint_params(gm)
|
| 199 |
+
self.gm = gm
|
| 200 |
+
copy_gm = copy.deepcopy(self.gm)
|
| 201 |
+
self.candidate = self.backend(copy_gm, example_inputs)
|
| 202 |
+
|
| 203 |
+
if self.candidate is None or self.candidate is self.gm.forward:
|
| 204 |
+
return self.gm.forward
|
| 205 |
+
|
| 206 |
+
if not config.verify_correctness:
|
| 207 |
+
return self.candidate
|
| 208 |
+
|
| 209 |
+
# if verify_correctness=True
|
| 210 |
+
try:
|
| 211 |
+
correct = self.gm.forward(*clone_inputs(example_inputs))
|
| 212 |
+
result = self.candidate(*clone_inputs(example_inputs))
|
| 213 |
+
|
| 214 |
+
# TODO: replace `same` function with the one in testing
|
| 215 |
+
if same(correct, result):
|
| 216 |
+
return self.candidate
|
| 217 |
+
|
| 218 |
+
raise RuntimeError(f"incorrect results of backend {self}")
|
| 219 |
+
return self.gm.forward
|
| 220 |
+
|
| 221 |
+
except Exception:
|
| 222 |
+
log.exception("error in verify_correctness")
|
| 223 |
+
raise
|
| 224 |
+
finally:
|
| 225 |
+
self.restore()
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
Scope = Dict[str, object]
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
class OutputGraph:
|
| 232 |
+
"""
|
| 233 |
+
Wrapper class to hold outputs of InstructionTranslator. Mainly the
|
| 234 |
+
generated fx.Graph.
|
| 235 |
+
|
| 236 |
+
OutputGraph is 1:1 with a frame being processed. Each frame is associated
|
| 237 |
+
with some root InstructionTranslator. When user code calls a function,
|
| 238 |
+
we construct a InliningInstructionTranslator that continues to write into
|
| 239 |
+
the root InstructionTranslator's OutputGraph.
|
| 240 |
+
"""
|
| 241 |
+
|
| 242 |
+
def __init__(
|
| 243 |
+
self,
|
| 244 |
+
code_options: Dict[str, Any],
|
| 245 |
+
compiler_fn: Optional[CompilerFn],
|
| 246 |
+
root_tx,
|
| 247 |
+
export: bool,
|
| 248 |
+
export_constraints,
|
| 249 |
+
frame_state,
|
| 250 |
+
local_scope: Scope,
|
| 251 |
+
global_scope: Scope,
|
| 252 |
+
f_code,
|
| 253 |
+
):
|
| 254 |
+
super().__init__()
|
| 255 |
+
self.tracers = [SubgraphTracer(self, export_root=export)]
|
| 256 |
+
# Map from graph input's `Source` to its `VariableTracker` to
|
| 257 |
+
# de-duplicate graph inputs by source and reuse the tracker
|
| 258 |
+
self.input_source_to_var: Dict[Source, VariableTracker] = {}
|
| 259 |
+
self.export = export
|
| 260 |
+
self.export_constraints = export_constraints
|
| 261 |
+
self.frame_state = frame_state
|
| 262 |
+
# Map from graph input's `Source` to sizes / strides metadata
|
| 263 |
+
self.input_source_to_sizes_strides: Dict[Source, Dict[str, Any]] = {}
|
| 264 |
+
self.cleanup_hooks: List[Callable[[], Any]] = []
|
| 265 |
+
# compile_id is an id number for the current torch.compile
|
| 266 |
+
self.compile_id: int = next(_compile_id_counter)
|
| 267 |
+
# Set of globals installed via install_global* APIs
|
| 268 |
+
self.installed_globals: Set[str] = set()
|
| 269 |
+
|
| 270 |
+
# TODO: maybe should just pass the entire f_code in here? Not
|
| 271 |
+
# sure...
|
| 272 |
+
self.co_fields = {
|
| 273 |
+
"co_name": f_code.co_name,
|
| 274 |
+
"co_filename": f_code.co_filename,
|
| 275 |
+
"co_firstlineno": f_code.co_firstlineno,
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
# tracked_fakes says where any tensor that was wrapped to fake came
|
| 279 |
+
# from. It is similar to GraphArg, in that all GraphArgs will get
|
| 280 |
+
# will get added to TrackedFakes, but TrackedFakes also contains
|
| 281 |
+
# GraphArgs that got pruned, and things like Tensor attributes which
|
| 282 |
+
# aren't explicit graph inputs. Used by shape guard
|
| 283 |
+
self.tracked_fakes: List[TrackedFake] = []
|
| 284 |
+
|
| 285 |
+
# List of symbols for which we have exact bindings in the arguments
|
| 286 |
+
# already
|
| 287 |
+
self.bound_symbols: Set[sympy.Symbol] = set()
|
| 288 |
+
|
| 289 |
+
shape_env = ShapeEnv(
|
| 290 |
+
# Reference Cycle!
|
| 291 |
+
# Share a reference to the list of TrackedFake.
|
| 292 |
+
#
|
| 293 |
+
# ShapeEnv needs this in order to be able to reproduce the call
|
| 294 |
+
# to produce_guards at an arbitrary time point. That is because
|
| 295 |
+
# TrackedFake instances may have its metadata changed throughout
|
| 296 |
+
# the program execution.
|
| 297 |
+
tracked_fakes=self.tracked_fakes,
|
| 298 |
+
allow_scalar_outputs=config.capture_scalar_outputs,
|
| 299 |
+
allow_dynamic_output_shape_ops=config.capture_dynamic_output_shape_ops,
|
| 300 |
+
prefer_deferred_runtime_asserts_over_guards=config.prefer_deferred_runtime_asserts_over_guards,
|
| 301 |
+
allow_complex_guards_as_runtime_asserts=config.allow_complex_guards_as_runtime_asserts,
|
| 302 |
+
co_fields=self.co_fields,
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
# In export mode, we force the shape_env to strictly disallow any constraining
|
| 306 |
+
# of the user marked dynamic dims
|
| 307 |
+
import torch._functorch.config as _config
|
| 308 |
+
|
| 309 |
+
with _config.patch(fake_tensor_allow_unsafe_data_ptr_access=False):
|
| 310 |
+
fake_mode = torch._subclasses.FakeTensorMode(
|
| 311 |
+
shape_env=shape_env,
|
| 312 |
+
# TODO (tmanlaibaatar) Remove this once we always lift params and buffers
|
| 313 |
+
allow_non_fake_inputs=True if self.export else False,
|
| 314 |
+
export=self.export,
|
| 315 |
+
)
|
| 316 |
+
self.tracing_context: TracingContext = TracingContext(fake_mode)
|
| 317 |
+
self.init_ambient_guards()
|
| 318 |
+
|
| 319 |
+
# Map each tensor id to a list of sources. This is necessary because
|
| 320 |
+
# tensor ids cannot be recovered from tracked fakes (in general).
|
| 321 |
+
# We use this map to interpret (i.e., check for violations of) constraints,
|
| 322 |
+
# specifically equality constraints, which have shared tensor ids in them.
|
| 323 |
+
# This map should also be generally useful, e.g., for (de)serialization.
|
| 324 |
+
self.tracked_fakes_id_to_source: Dict[
|
| 325 |
+
int, List[Source]
|
| 326 |
+
] = collections.defaultdict(list)
|
| 327 |
+
# Stores the full fqn of a param or buffer to the relevant source.
|
| 328 |
+
self.param_name_to_source: Optional[Dict[str, Source]] = {}
|
| 329 |
+
self.side_effects = SideEffects()
|
| 330 |
+
# Cached variable trackers. This makes symbolic analysis of LOAD_GLOBAL
|
| 331 |
+
# and LOAD_ATTR for same python objects free.
|
| 332 |
+
self.variable_tracker_cache = VariableTrackerCache()
|
| 333 |
+
self.unique_var_id = itertools.count()
|
| 334 |
+
self.code_options = dict(code_options)
|
| 335 |
+
self.output_instructions: List[Instruction] = []
|
| 336 |
+
# used to track nodes that are added between calls of copy_graphstate
|
| 337 |
+
# and restore_graphstate
|
| 338 |
+
self.timestamp = 0
|
| 339 |
+
|
| 340 |
+
# A list of register_finalizer_fns to apply to the output graph module
|
| 341 |
+
self.register_finalizer_fns: List[Callable[[fx.GraphModule], None]] = []
|
| 342 |
+
|
| 343 |
+
# Not checkpointed
|
| 344 |
+
self.compiler_fn: Optional[CompilerFn] = compiler_fn
|
| 345 |
+
self.global_scope = global_scope
|
| 346 |
+
self.local_scope = local_scope
|
| 347 |
+
self.root_tx = root_tx
|
| 348 |
+
|
| 349 |
+
# Given a source, what are the user stacks of all locations that
|
| 350 |
+
# accessed it?
|
| 351 |
+
#
|
| 352 |
+
# For efficiency, we only populate this:
|
| 353 |
+
# - During export, and
|
| 354 |
+
# - If the source could potentially lead to a spurious export input
|
| 355 |
+
#
|
| 356 |
+
# Feel free to populate this more frequently if other use-cases arise,
|
| 357 |
+
# but be aware that we have to generate full stacks for each
|
| 358 |
+
# recording!
|
| 359 |
+
self.source_to_user_stacks: Dict[Source, List[traceback.StackSummary]] = {}
|
| 360 |
+
|
| 361 |
+
self._current_tx: List[InstructionTranslatorBase] = []
|
| 362 |
+
self.cleanups: List[CleanupHook] = []
|
| 363 |
+
self.should_exit = False
|
| 364 |
+
self.unspec_variable_map: Dict[str, UnspecializedPythonVariable] = {}
|
| 365 |
+
|
| 366 |
+
# Note this returns true iff TF Mode and TF Subclasses are enabled
|
| 367 |
+
self.torch_function_enabled = torch._C._is_torch_function_enabled()
|
| 368 |
+
# This returns false if TF Overall (both mode and subclass) is disabled OR that TF Mode stack is empty
|
| 369 |
+
self.torch_function_mode_enabled = torch._C._is_torch_function_mode_enabled()
|
| 370 |
+
# This records the initial torch function mode stack for guarding
|
| 371 |
+
self.torch_function_mode_stack = get_torch_function_mode_stack()
|
| 372 |
+
|
| 373 |
+
# Tracks if the output graph has a user defined allowed function in the
|
| 374 |
+
# graph. This is used later to determine if we should fallback to eager
|
| 375 |
+
# for certain exceptions. THe idea is that if the user has applied
|
| 376 |
+
# allow_in_graph, they would like to see the error instead of falling
|
| 377 |
+
# back for backend errors.
|
| 378 |
+
self.has_user_defined_allowed_in_graph = False
|
| 379 |
+
|
| 380 |
+
# Tracks a list of called ops that were not tagged with "pt2_compliant_tag".
|
| 381 |
+
# This information is useful for logging.
|
| 382 |
+
self.non_compliant_ops: Set[torch._ops.OpOverload] = set({})
|
| 383 |
+
|
| 384 |
+
# Tracks a list of called custom ops that were tagged with "pt2_compliant_tag".
|
| 385 |
+
# This information is useful for logging.
|
| 386 |
+
self.compliant_custom_ops: Set[torch._ops.OpOverload] = set({})
|
| 387 |
+
|
| 388 |
+
# We save the global torch state here to be restored in case of graph
|
| 389 |
+
# breaks. The relevant issue is seen here
|
| 390 |
+
# https://github.com/pytorch/pytorch/pull/100570#issuecomment-1543427086
|
| 391 |
+
# where inlining of a function changes the global state (because of the
|
| 392 |
+
# presence of torch.no_grad) and there is a graph break.
|
| 393 |
+
self.save_global_state()
|
| 394 |
+
|
| 395 |
+
# Tracks the original FQNs of the constant tensors from the original graph,
|
| 396 |
+
# i.e. buffers and parameters.
|
| 397 |
+
self.dynamo_flat_name_to_original_fqn: Dict[str, str] = {}
|
| 398 |
+
|
| 399 |
+
# All calls to random() are replaced with a single call to __gen_rand_values
|
| 400 |
+
# functions that returns a tuple of random values for each original call.
|
| 401 |
+
# random_calls tracks calls to random() and random_values_var stores the name of
|
| 402 |
+
# the variable that stores __gen_rand_values results.
|
| 403 |
+
self.random_calls: List[
|
| 404 |
+
Tuple[Callable[..., object], Tuple[object, ...], Dict[str, object]]
|
| 405 |
+
] = []
|
| 406 |
+
self.random_values_var = None
|
| 407 |
+
|
| 408 |
+
# Bytecode to insert right before we call the graph
|
| 409 |
+
self.pregraph_bytecode: List[Instruction] = []
|
| 410 |
+
|
| 411 |
+
# Use to pass values to backward hooks when using compiled autograd
|
| 412 |
+
self.backward_state: Dict[str, VariableTracker] = {}
|
| 413 |
+
self.backward_state_proxy: Optional[torch.fx.Proxy] = None
|
| 414 |
+
self.backward_state_var: Optional[str] = None
|
| 415 |
+
|
| 416 |
+
self.name_of_builtins_dict_key_in_fglobals: str = (
|
| 417 |
+
self.install_builtins_dict_in_fglobals()
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
self.guard_on_key_order: Set[str] = set()
|
| 421 |
+
|
| 422 |
+
def install_builtins_dict_in_fglobals(self):
|
| 423 |
+
# f_globals["__builtins__"] can be a dict or a module. This is an
|
| 424 |
+
# implemenation detail -
|
| 425 |
+
# https://docs.python.org/3/library/builtins.html.
|
| 426 |
+
|
| 427 |
+
# This makes guarding on any builtin messy because the guard check_fn
|
| 428 |
+
# has to check if the __builtins__ is a module or dict, and then access
|
| 429 |
+
# by either using getattr or getitem respectively.
|
| 430 |
+
|
| 431 |
+
# To solve this problem, we insert a new entry in f_globals which points
|
| 432 |
+
# to the builtins __dict__ and then we guard any builtin on this dict.
|
| 433 |
+
# To avoid any collision with the pre-existing keys, we use the
|
| 434 |
+
# install_global to give us a unique dict key.
|
| 435 |
+
|
| 436 |
+
f_builtins = self.global_scope["__builtins__"]
|
| 437 |
+
if not isinstance(f_builtins, dict):
|
| 438 |
+
f_builtins = f_builtins.__dict__
|
| 439 |
+
return self.install_global("__builtins_dict__", f_builtins)
|
| 440 |
+
|
| 441 |
+
def add_backward_state_hook(self, hook: VariableTracker, prefix="hook"):
|
| 442 |
+
name = f"{prefix}{len(self.backward_state)}"
|
| 443 |
+
assert name not in self.backward_state
|
| 444 |
+
self.backward_state[name] = hook
|
| 445 |
+
return name, self.get_backward_state_proxy()
|
| 446 |
+
|
| 447 |
+
def get_backward_state_proxy(self):
|
| 448 |
+
if self.backward_state_proxy is None:
|
| 449 |
+
if self.export:
|
| 450 |
+
unimplemented("backward_state does not support export")
|
| 451 |
+
self.backward_state_proxy = self.root_tracer.create_graph_input(
|
| 452 |
+
"dynamo_backward_state", BackwardState, source=BackwardStateSource()
|
| 453 |
+
)
|
| 454 |
+
self.backward_state_proxy.node.meta["grapharg"] = BackwardStateGraphArg()
|
| 455 |
+
set_example_value(self.backward_state_proxy.node, BackwardState())
|
| 456 |
+
self.backward_state_var = self.new_var()
|
| 457 |
+
return self.backward_state_proxy
|
| 458 |
+
|
| 459 |
+
# This gets its own helper function so guards DEBUG logs are more informative
|
| 460 |
+
def init_ambient_guards(self):
|
| 461 |
+
# Register a SHAPE_ENV guard to make sure we setup shape guards
|
| 462 |
+
# that show up in ShapeEnv
|
| 463 |
+
self.guards.add(ShapeEnvSource().make_guard(GuardBuilder.SHAPE_ENV))
|
| 464 |
+
|
| 465 |
+
self.guards.add(
|
| 466 |
+
GlobalStateSource().make_guard(GuardBuilder.DETERMINISTIC_ALGORITHMS)
|
| 467 |
+
)
|
| 468 |
+
|
| 469 |
+
self.guards.add(GlobalStateSource().make_guard(GuardBuilder.GRAD_MODE))
|
| 470 |
+
|
| 471 |
+
self.guards.add(GlobalStateSource().make_guard(GuardBuilder.DEFAULT_DEVICE))
|
| 472 |
+
|
| 473 |
+
self.guards.add(
|
| 474 |
+
GlobalStateSource().make_guard(GuardBuilder.TORCH_FUNCTION_STATE)
|
| 475 |
+
)
|
| 476 |
+
|
| 477 |
+
ci = torch._C._functorch.peek_interpreter_stack()
|
| 478 |
+
if ci is not None:
|
| 479 |
+
self.guards.add(
|
| 480 |
+
GlobalStateSource().make_guard(GuardBuilder.FUNCTORCH_STACK_MATCH)
|
| 481 |
+
)
|
| 482 |
+
|
| 483 |
+
def synthetic_graph_input(self, fn, args):
|
| 484 |
+
"""
|
| 485 |
+
call fn(*args) before the graph runs and turn the result into a fake input.
|
| 486 |
+
"""
|
| 487 |
+
example_value = fn(*args)
|
| 488 |
+
varname = self.new_var()
|
| 489 |
+
cg = PyCodegen(self.root_tx)
|
| 490 |
+
cg.add_push_null(
|
| 491 |
+
lambda: cg.load_import_from(
|
| 492 |
+
fn.__module__,
|
| 493 |
+
fn.__name__,
|
| 494 |
+
)
|
| 495 |
+
)
|
| 496 |
+
cg.foreach(map(variables.ConstantVariable.create, args))
|
| 497 |
+
cg.call_function(len(args), False)
|
| 498 |
+
cg.store(varname)
|
| 499 |
+
self.pregraph_bytecode.extend(cg.get_instructions())
|
| 500 |
+
source = SyntheticLocalSource(varname)
|
| 501 |
+
result = VariableBuilder(self.root_tx, source)(example_value)
|
| 502 |
+
TracingContext.get().guards_context.dynamo_guards.remove_guards_with_source(
|
| 503 |
+
source
|
| 504 |
+
)
|
| 505 |
+
return result
|
| 506 |
+
|
| 507 |
+
def add_cleanup_hook(self, fn: Callable[[], Any]):
|
| 508 |
+
self.cleanup_hooks.append(fn)
|
| 509 |
+
|
| 510 |
+
def call_cleanup_hooks(self):
|
| 511 |
+
for hook in reversed(self.cleanup_hooks):
|
| 512 |
+
hook()
|
| 513 |
+
self.cleanup_hooks.clear()
|
| 514 |
+
|
| 515 |
+
@property
|
| 516 |
+
def root_tracer(self):
|
| 517 |
+
return self.tracers[0]
|
| 518 |
+
|
| 519 |
+
@property
|
| 520 |
+
def current_tracer(self):
|
| 521 |
+
return self.tracers[-1]
|
| 522 |
+
|
| 523 |
+
def is_root_tracer(self):
|
| 524 |
+
# Helper to tell if we are inside the higher order operator tracing.
|
| 525 |
+
return len(self.tracers) == 1
|
| 526 |
+
|
| 527 |
+
@property
|
| 528 |
+
def graph(self):
|
| 529 |
+
return self.current_tracer.graph
|
| 530 |
+
|
| 531 |
+
# TODO(rzou): can delete after we refactor speculate_subgraph to use nested GraphTracer.
|
| 532 |
+
@graph.setter
|
| 533 |
+
def graph(self, value):
|
| 534 |
+
self.current_tracer.graph = value
|
| 535 |
+
|
| 536 |
+
@property
|
| 537 |
+
def input_name_to_proxy(self):
|
| 538 |
+
return self.current_tracer.input_name_to_proxy
|
| 539 |
+
|
| 540 |
+
@property
|
| 541 |
+
def real_value_cache(self):
|
| 542 |
+
return self.current_tracer.real_value_cache
|
| 543 |
+
|
| 544 |
+
# If you are here, and you're looking for create_graph_input,
|
| 545 |
+
# to avoid ambiguity, please call one of the following:
|
| 546 |
+
# - self.current_tracer.create_graph_input
|
| 547 |
+
# - self.root_tracer.create_graph_input
|
| 548 |
+
# See NOTE [HigherOrderOperator tracing design] for more context.
|
| 549 |
+
|
| 550 |
+
def create_proxy(self, *args, **kwargs):
|
| 551 |
+
return self.current_tracer.create_proxy(*args, **kwargs)
|
| 552 |
+
|
| 553 |
+
def create_node(self, *args, **kwargs):
|
| 554 |
+
return self.current_tracer.create_node(*args, **kwargs)
|
| 555 |
+
|
| 556 |
+
def remove_node(self, *args, **kwargs):
|
| 557 |
+
return self.current_tracer.remove_node(*args, **kwargs)
|
| 558 |
+
|
| 559 |
+
@contextlib.contextmanager
|
| 560 |
+
def subtracer(self, source_target, prior_tracer):
|
| 561 |
+
new_scope_ctx = enter_new_scope()
|
| 562 |
+
try:
|
| 563 |
+
if prior_tracer:
|
| 564 |
+
# Lineage MUST stay preserved
|
| 565 |
+
assert prior_tracer.parent is self.current_tracer
|
| 566 |
+
new_scope_ctx.__enter__()
|
| 567 |
+
tracer = (
|
| 568 |
+
prior_tracer
|
| 569 |
+
if prior_tracer
|
| 570 |
+
else SubgraphTracer(
|
| 571 |
+
self, parent=self.current_tracer, source_target=source_target
|
| 572 |
+
)
|
| 573 |
+
)
|
| 574 |
+
self.tracers.append(tracer)
|
| 575 |
+
yield tracer
|
| 576 |
+
finally:
|
| 577 |
+
new_scope_ctx.__exit__(None, None, None)
|
| 578 |
+
self.tracers.pop()
|
| 579 |
+
|
| 580 |
+
@property
|
| 581 |
+
def output(self):
|
| 582 |
+
return self
|
| 583 |
+
|
| 584 |
+
@property
|
| 585 |
+
def fake_mode(self):
|
| 586 |
+
return self.tracing_context.fake_mode
|
| 587 |
+
|
| 588 |
+
@property
|
| 589 |
+
def shape_env(self):
|
| 590 |
+
return self.tracing_context.fake_mode.shape_env
|
| 591 |
+
|
| 592 |
+
@property
|
| 593 |
+
def guards(self) -> torch._guards.GuardsSet:
|
| 594 |
+
return self.tracing_context.guards_context.dynamo_guards
|
| 595 |
+
|
| 596 |
+
@property
|
| 597 |
+
def nn_modules(self) -> Dict[str, Any]:
|
| 598 |
+
return self.tracing_context.module_context.nn_modules
|
| 599 |
+
|
| 600 |
+
def save_global_state(self, out=None):
|
| 601 |
+
"""
|
| 602 |
+
Saves to out if it is provided. Else saves to the tracing context's global_state.
|
| 603 |
+
"""
|
| 604 |
+
global_state = (
|
| 605 |
+
out if out is not None else self.tracing_context.global_context.global_state
|
| 606 |
+
)
|
| 607 |
+
|
| 608 |
+
# TODO - Consider having a torch level API for torch_function_state. As
|
| 609 |
+
# of now, we create a ref cycle by passing the
|
| 610 |
+
# output.set_torch_function_state to
|
| 611 |
+
# output.tracing_context.global_context.global_state. In the interim,
|
| 612 |
+
# the problem can be solved by manually set
|
| 613 |
+
# output.tracing_context.global_context.global_state to None at cleanup.
|
| 614 |
+
global_state["torch_function_enabled"] = (
|
| 615 |
+
self.set_torch_function_state,
|
| 616 |
+
self.torch_function_enabled,
|
| 617 |
+
)
|
| 618 |
+
global_state["grad_enabled"] = (torch.set_grad_enabled, torch.is_grad_enabled())
|
| 619 |
+
|
| 620 |
+
global_state["autocast_enabled"] = (
|
| 621 |
+
functools.partial(torch.set_autocast_enabled, "cuda"),
|
| 622 |
+
torch.is_autocast_enabled("cuda"),
|
| 623 |
+
)
|
| 624 |
+
global_state["autocast_cpu_enabled"] = (
|
| 625 |
+
functools.partial(torch.set_autocast_enabled, "cpu"),
|
| 626 |
+
torch.is_autocast_enabled("cpu"),
|
| 627 |
+
)
|
| 628 |
+
global_state["autocast_gpu_dtype"] = (
|
| 629 |
+
functools.partial(torch.set_autocast_dtype, "cuda"),
|
| 630 |
+
torch.get_autocast_dtype("cuda"),
|
| 631 |
+
)
|
| 632 |
+
global_state["autocast_cpu_dtype"] = (
|
| 633 |
+
functools.partial(torch.set_autocast_dtype, "cpu"),
|
| 634 |
+
torch.get_autocast_dtype("cpu"),
|
| 635 |
+
)
|
| 636 |
+
global_state["autocast_cache_enabled"] = (
|
| 637 |
+
torch.set_autocast_cache_enabled,
|
| 638 |
+
torch.is_autocast_cache_enabled(),
|
| 639 |
+
)
|
| 640 |
+
|
| 641 |
+
def push_tx(self, tx):
|
| 642 |
+
self._current_tx.append(tx)
|
| 643 |
+
|
| 644 |
+
def pop_tx(self):
|
| 645 |
+
return self._current_tx.pop()
|
| 646 |
+
|
| 647 |
+
@property
|
| 648 |
+
def current_tx(self):
|
| 649 |
+
return self.root_tx if not self._current_tx else self._current_tx[-1]
|
| 650 |
+
|
| 651 |
+
def add_symbol_bindings(self, arg: GraphArg):
|
| 652 |
+
# Insert implicit size vars as necessary. With dynamic shapes, we
|
| 653 |
+
# maintain the invariant that every sizevar gets a direct SymInt input
|
| 654 |
+
# into the graph. This means downstream graph transforms can assume
|
| 655 |
+
# every size variable is explicitly bound and accessible, instead of
|
| 656 |
+
# having to pull it out implicitly from tensors.
|
| 657 |
+
|
| 658 |
+
if self.export:
|
| 659 |
+
return
|
| 660 |
+
|
| 661 |
+
assert arg.fake_tensor is not None
|
| 662 |
+
|
| 663 |
+
def bind_symint(s, prop):
|
| 664 |
+
if not (is_symbolic(s) and isinstance(s.node.expr, sympy.Symbol)):
|
| 665 |
+
return
|
| 666 |
+
s0 = s.node.expr
|
| 667 |
+
if s0 in self.bound_symbols:
|
| 668 |
+
return
|
| 669 |
+
self.bound_symbols.add(s0)
|
| 670 |
+
log.debug("bind_symint %s %s", s, prop.name())
|
| 671 |
+
# TODO: don't readd symint if we already have it in graph
|
| 672 |
+
# (this is harmless because we do remove the unused ones later)
|
| 673 |
+
proxy = self.root_tracer.create_graph_input(
|
| 674 |
+
str(s0),
|
| 675 |
+
torch.SymInt,
|
| 676 |
+
before=True,
|
| 677 |
+
source=prop,
|
| 678 |
+
)
|
| 679 |
+
set_example_value(proxy.node, s)
|
| 680 |
+
proxy.node.meta["grapharg"] = GraphArg(
|
| 681 |
+
prop,
|
| 682 |
+
s,
|
| 683 |
+
pass_arg_as_tensor=False,
|
| 684 |
+
fake_tensor=None,
|
| 685 |
+
is_tensor=False,
|
| 686 |
+
)
|
| 687 |
+
|
| 688 |
+
def handle_tensor(t, src):
|
| 689 |
+
for i, s in enumerate(t.size()):
|
| 690 |
+
bind_symint(s, TensorPropertySource(src, TensorProperty.SIZE, i))
|
| 691 |
+
if t.layout is torch.strided:
|
| 692 |
+
for i, s in enumerate(t.stride()):
|
| 693 |
+
bind_symint(s, TensorPropertySource(src, TensorProperty.STRIDE, i))
|
| 694 |
+
bind_symint(
|
| 695 |
+
t.storage_offset(),
|
| 696 |
+
TensorPropertySource(src, TensorProperty.STORAGE_OFFSET),
|
| 697 |
+
)
|
| 698 |
+
elif t.layout is torch.sparse_coo:
|
| 699 |
+
handle_tensor(t._indices(), src)
|
| 700 |
+
handle_tensor(t._values(), src)
|
| 701 |
+
elif t.layout in {torch.sparse_csr, torch.sparse_bsr}:
|
| 702 |
+
handle_tensor(t.crow_indices(), src)
|
| 703 |
+
handle_tensor(t.col_indices(), src)
|
| 704 |
+
elif t.layout in {torch.sparse_csc, torch.sparse_bsc}:
|
| 705 |
+
handle_tensor(t.ccol_indices(), src)
|
| 706 |
+
handle_tensor(t.row_indices(), src)
|
| 707 |
+
if is_traceable_wrapper_subclass(t):
|
| 708 |
+
attrs, ctx = t.__tensor_flatten__()
|
| 709 |
+
for attr in attrs:
|
| 710 |
+
inner_t = getattr(t, attr)
|
| 711 |
+
handle_tensor(inner_t, AttrSource(src, attr))
|
| 712 |
+
|
| 713 |
+
handle_tensor(arg.fake_tensor, arg.source)
|
| 714 |
+
|
| 715 |
+
def count_calls(self):
|
| 716 |
+
return count_calls(self.graph)
|
| 717 |
+
|
| 718 |
+
def is_empty_graph(self):
|
| 719 |
+
return len(list(self.graph.nodes)) == 0
|
| 720 |
+
|
| 721 |
+
def get_submodule(self, keys):
|
| 722 |
+
assert keys
|
| 723 |
+
obj: Union[torch.nn.Module, Dict[str, torch.nn.Module]] = self.nn_modules
|
| 724 |
+
for k in keys.split("."):
|
| 725 |
+
if isinstance(obj, dict):
|
| 726 |
+
obj = obj[k]
|
| 727 |
+
else:
|
| 728 |
+
obj = getattr(obj, k)
|
| 729 |
+
return obj
|
| 730 |
+
|
| 731 |
+
def new_var(self, name="tmp"):
|
| 732 |
+
existing = set(self.code_options["co_varnames"])
|
| 733 |
+
# In common case, this will be O(1)
|
| 734 |
+
while True:
|
| 735 |
+
var = f"{name}_{next(self.unique_var_id)}"
|
| 736 |
+
if var not in existing:
|
| 737 |
+
self.code_options["co_varnames"] += (var,)
|
| 738 |
+
return var
|
| 739 |
+
|
| 740 |
+
def update_co_names(self, name):
|
| 741 |
+
"""Ensure self.code_options.co_names contains name"""
|
| 742 |
+
if name not in self.code_options["co_names"]:
|
| 743 |
+
self.code_options["co_names"] += (name,)
|
| 744 |
+
|
| 745 |
+
@staticmethod
|
| 746 |
+
def module_key_name(*names):
|
| 747 |
+
# create a new unique name
|
| 748 |
+
name = "_".join(map(str, names))
|
| 749 |
+
# Strip the guard lookup L/G access
|
| 750 |
+
name = re.sub(r"^[GL]\['?(.*?)'?\]$", r"\1", name)
|
| 751 |
+
# e.g. replace abc.xyz[123].qkv with abc.xyz_123.qkv
|
| 752 |
+
name = re.sub(r"\[(\d+)\]", r"_\g<1>", name)
|
| 753 |
+
# e.g. replace abc.xyz_123.qkv with abc_xyz_123_qkv
|
| 754 |
+
name = re.sub(r"[^a-zA-Z0-9]", "_", name)
|
| 755 |
+
|
| 756 |
+
if not name or not name[0].isalpha():
|
| 757 |
+
name = "sub" + name
|
| 758 |
+
|
| 759 |
+
return name
|
| 760 |
+
|
| 761 |
+
def register_attr_or_module(
|
| 762 |
+
self,
|
| 763 |
+
target: Union[torch.nn.Module, torch.Tensor, Any],
|
| 764 |
+
*names,
|
| 765 |
+
**options,
|
| 766 |
+
):
|
| 767 |
+
if is_dynamic_nn_module(target, self.root_tx.export):
|
| 768 |
+
# Instead of returning UnspecializedNNModuleVariable, call
|
| 769 |
+
# VariableBuilder so that it is tracked for mutation.
|
| 770 |
+
return VariableBuilder(self.current_tx, **options)(target)
|
| 771 |
+
|
| 772 |
+
options = dict(options)
|
| 773 |
+
assert "source" in options
|
| 774 |
+
source = options["source"]
|
| 775 |
+
assert not isinstance(source, ParamBufferSource)
|
| 776 |
+
|
| 777 |
+
if isinstance(target, torch.Tensor):
|
| 778 |
+
tracer = self.current_tracer
|
| 779 |
+
if not self.is_root_tracer():
|
| 780 |
+
# For higher order ops, we don't want to insert the get_attr in
|
| 781 |
+
# innermost graph. Instead, we want to raise the params/buffers
|
| 782 |
+
# as inputs to the higher-order graph, and register them as
|
| 783 |
+
# get_attrs in the root tracer.
|
| 784 |
+
|
| 785 |
+
# Note that Dynamo will still call lift_tracked_freevar_to_input
|
| 786 |
+
# when these inputs are encountered for the inner graph. The
|
| 787 |
+
# only difference is what happens at the root tracer for
|
| 788 |
+
# nn.Parameters vs free inputs. The free inputs are registered
|
| 789 |
+
# as placeholders in the root graph, whereas the nn.Parameters
|
| 790 |
+
# are registered as get_attr nodes in the root graph.
|
| 791 |
+
tracer = self.root_tracer
|
| 792 |
+
|
| 793 |
+
def wrap_name(module_key):
|
| 794 |
+
assert self.param_name_to_source is not None
|
| 795 |
+
self.param_name_to_source[module_key] = source
|
| 796 |
+
|
| 797 |
+
# Check if the attr has already been registered. This can happen
|
| 798 |
+
# when two different sources point to the same tensor.
|
| 799 |
+
if target in self.root_tx.output.side_effects:
|
| 800 |
+
return self.root_tx.output.side_effects[target]
|
| 801 |
+
|
| 802 |
+
if get_static_address_type(target) == "guarded":
|
| 803 |
+
install_guard(source.make_guard(GuardBuilder.ID_MATCH))
|
| 804 |
+
elif not is_constant_source(source):
|
| 805 |
+
install_guard(source.make_guard(GuardBuilder.TENSOR_MATCH))
|
| 806 |
+
|
| 807 |
+
vt = wrap_fx_proxy(
|
| 808 |
+
self.root_tx,
|
| 809 |
+
tracer.create_proxy("get_attr", module_key, (), {}),
|
| 810 |
+
example_value=target,
|
| 811 |
+
**options,
|
| 812 |
+
)
|
| 813 |
+
|
| 814 |
+
# Track the object so to avoid duplicate registration in case of
|
| 815 |
+
# different sources pointing to the same tensor object.
|
| 816 |
+
vt = self.root_tx.output.side_effects.track_object_existing(target, vt)
|
| 817 |
+
|
| 818 |
+
assert "tensor_dict" not in vt.proxy.node.meta
|
| 819 |
+
vt.proxy.node.meta["tensor_dict"] = _extract_tensor_dict(target)
|
| 820 |
+
|
| 821 |
+
return vt
|
| 822 |
+
|
| 823 |
+
elif isinstance(target, torch.nn.Module):
|
| 824 |
+
assert isinstance(target, torch.nn.Module)
|
| 825 |
+
|
| 826 |
+
if source:
|
| 827 |
+
install_guard(source.make_guard(GuardBuilder.NN_MODULE))
|
| 828 |
+
|
| 829 |
+
def wrap_name(module_key):
|
| 830 |
+
return NNModuleVariable(type(target), module_key, target, **options)
|
| 831 |
+
|
| 832 |
+
else:
|
| 833 |
+
# This is Dynamo created graph module, e.g., graph module coming
|
| 834 |
+
# from higher order ops. NNModuleVariable tracker can't be
|
| 835 |
+
# sourceless, so let's return a unspecializedNNModule variable
|
| 836 |
+
# tracker.
|
| 837 |
+
def wrap_name(module_key):
|
| 838 |
+
return variables.UnspecializedNNModuleVariable(target, **options)
|
| 839 |
+
|
| 840 |
+
elif isinstance(target, (torch.SymInt, torch.SymFloat)):
|
| 841 |
+
# HACKY CODE REGION BEGIN
|
| 842 |
+
# WE ARE PIGGYBACKING ON EXISTING INFRA TO REGISTER ATTRS
|
| 843 |
+
# This ultimately gets written to self.nn_modules, which is unfortunate
|
| 844 |
+
# Attrs that are tenors and symints and such need to be migrated to have their
|
| 845 |
+
# own storage
|
| 846 |
+
# alas, this is like this for now
|
| 847 |
+
|
| 848 |
+
def wrap_name(module_key):
|
| 849 |
+
return SymNodeVariable.create(
|
| 850 |
+
self,
|
| 851 |
+
self.create_proxy("get_attr", module_key, (), {}),
|
| 852 |
+
sym_num=target,
|
| 853 |
+
**options,
|
| 854 |
+
)
|
| 855 |
+
|
| 856 |
+
# HACKY CODE REGION END
|
| 857 |
+
else:
|
| 858 |
+
|
| 859 |
+
def wrap_name(module_key):
|
| 860 |
+
self.output.update_co_names(module_key)
|
| 861 |
+
self.global_scope[module_key] = target
|
| 862 |
+
return VariableBuilder(self, ConstantSource(source_name=module_key))(
|
| 863 |
+
target
|
| 864 |
+
)
|
| 865 |
+
|
| 866 |
+
for k, v in self.nn_modules.items():
|
| 867 |
+
if v is target:
|
| 868 |
+
# it already exists
|
| 869 |
+
return wrap_name(k)
|
| 870 |
+
|
| 871 |
+
name = OutputGraph.module_key_name(*names)
|
| 872 |
+
|
| 873 |
+
base = name
|
| 874 |
+
for i in itertools.count():
|
| 875 |
+
if name not in self.nn_modules:
|
| 876 |
+
self.nn_modules[name] = target
|
| 877 |
+
if isinstance(target, torch.nn.Module):
|
| 878 |
+
|
| 879 |
+
def register_leaf_name(leaf_name):
|
| 880 |
+
assert self.param_name_to_source is not None
|
| 881 |
+
new_source = ParamBufferSource(source, leaf_name)
|
| 882 |
+
new_name = f"{name}.{leaf_name}"
|
| 883 |
+
self.param_name_to_source[new_name] = new_source
|
| 884 |
+
if isinstance(source, LocalSource):
|
| 885 |
+
self.dynamo_flat_name_to_original_fqn[
|
| 886 |
+
OutputGraph.module_key_name(new_source.name())
|
| 887 |
+
] = leaf_name
|
| 888 |
+
|
| 889 |
+
# annoying, but there are cases when we do not have parameters
|
| 890 |
+
# see test_nn_moduledict_contains
|
| 891 |
+
if hasattr(target, "_parameters"):
|
| 892 |
+
for leaf_name, _ in target.named_parameters():
|
| 893 |
+
register_leaf_name(leaf_name)
|
| 894 |
+
if hasattr(target, "_buffers"):
|
| 895 |
+
for leaf_name, _ in target.named_buffers():
|
| 896 |
+
register_leaf_name(leaf_name)
|
| 897 |
+
|
| 898 |
+
return wrap_name(name)
|
| 899 |
+
name = f"{base}_{i}"
|
| 900 |
+
|
| 901 |
+
raise AssertionError("unreachable")
|
| 902 |
+
|
| 903 |
+
def handle_aliases_for_stolen_lists(self, tx):
|
| 904 |
+
# If list inputs are stolen, but still needed after the function call, create aliases to keep them alive
|
| 905 |
+
maybe_gm = self.local_scope.get("self")
|
| 906 |
+
stolen_list_names = get_locals_to_steal(maybe_gm)
|
| 907 |
+
if not stolen_list_names:
|
| 908 |
+
return []
|
| 909 |
+
|
| 910 |
+
alias_insts = []
|
| 911 |
+
needs_alias: Dict[
|
| 912 |
+
str, List[Union[VariableTracker, AttributeMutationExisting]]
|
| 913 |
+
] = {}
|
| 914 |
+
|
| 915 |
+
queue = [
|
| 916 |
+
*tx.stack,
|
| 917 |
+
*tx.symbolic_locals.values(),
|
| 918 |
+
*self.side_effects.store_attr_mutations.keys(),
|
| 919 |
+
]
|
| 920 |
+
|
| 921 |
+
while queue:
|
| 922 |
+
x = queue.pop()
|
| 923 |
+
if isinstance(x, BaseListVariable):
|
| 924 |
+
assert isinstance(x.items, List)
|
| 925 |
+
queue += x.items
|
| 926 |
+
continue
|
| 927 |
+
|
| 928 |
+
if not (
|
| 929 |
+
isinstance(x, (VariableTracker, AttributeMutationExisting))
|
| 930 |
+
and isinstance(x.source, GetItemSource)
|
| 931 |
+
and isinstance(x.source.base, LocalSource)
|
| 932 |
+
and x.source.base.local_name in stolen_list_names
|
| 933 |
+
):
|
| 934 |
+
continue
|
| 935 |
+
|
| 936 |
+
stolen_name = x.source.base.local_name
|
| 937 |
+
if stolen_name not in needs_alias:
|
| 938 |
+
needs_alias[stolen_name] = []
|
| 939 |
+
needs_alias[stolen_name].append(x)
|
| 940 |
+
|
| 941 |
+
visited = {}
|
| 942 |
+
for arg in self.graphargs:
|
| 943 |
+
if not (
|
| 944 |
+
isinstance(arg._example, list)
|
| 945 |
+
and isinstance(arg.source, LocalSource)
|
| 946 |
+
and arg.source.local_name in needs_alias
|
| 947 |
+
):
|
| 948 |
+
continue
|
| 949 |
+
|
| 950 |
+
# arg is a list that will be cleared by the compiled function
|
| 951 |
+
list_name = arg.source.local_name
|
| 952 |
+
assert list_name in self.code_options["co_varnames"]
|
| 953 |
+
for x in needs_alias[list_name]:
|
| 954 |
+
list_idx = x.source.index
|
| 955 |
+
if list_idx not in visited:
|
| 956 |
+
alias_name = self.new_var(
|
| 957 |
+
f"{list_name}_ref"
|
| 958 |
+
) # self.new_var already adds unique id suffix
|
| 959 |
+
|
| 960 |
+
visited[list_idx] = alias_name
|
| 961 |
+
# bytecode of `alias_name = list_name[list_idx]`
|
| 962 |
+
alias_insts.extend(
|
| 963 |
+
[
|
| 964 |
+
create_instruction("LOAD_FAST", argval=list_name),
|
| 965 |
+
create_instruction("LOAD_CONST", argval=list_idx),
|
| 966 |
+
create_instruction("BINARY_SUBSCR"),
|
| 967 |
+
create_instruction("STORE_FAST", argval=alias_name),
|
| 968 |
+
]
|
| 969 |
+
)
|
| 970 |
+
|
| 971 |
+
# operate on alias, handled by suffix codegen
|
| 972 |
+
x.source = LocalSource(visited[list_idx])
|
| 973 |
+
|
| 974 |
+
return alias_insts
|
| 975 |
+
|
| 976 |
+
def compile_subgraph(
|
| 977 |
+
self, tx, partial_convert=False, reason: Optional[GraphCompileReason] = None
|
| 978 |
+
):
|
| 979 |
+
"""
|
| 980 |
+
Generate a subgraph to continue execution on user code.
|
| 981 |
+
Automatically restore live variables.
|
| 982 |
+
"""
|
| 983 |
+
assert reason is not None
|
| 984 |
+
|
| 985 |
+
from .decorators import disable
|
| 986 |
+
|
| 987 |
+
self.partial_convert = partial_convert
|
| 988 |
+
self.compile_subgraph_reason = reason
|
| 989 |
+
self.should_exit = True
|
| 990 |
+
|
| 991 |
+
log.debug("COMPILING GRAPH due to %s", reason)
|
| 992 |
+
|
| 993 |
+
if not all(block.can_restore() for block in tx.block_stack):
|
| 994 |
+
unimplemented("compile_subgraph with block_depth != 0")
|
| 995 |
+
|
| 996 |
+
prefix_insts: List[Instruction] = []
|
| 997 |
+
if sys.version_info >= (3, 11):
|
| 998 |
+
# prefix instructions (Python 3.11+)
|
| 999 |
+
for inst in tx.prefix_insts:
|
| 1000 |
+
if inst.opname == "MAKE_CELL":
|
| 1001 |
+
prefix_insts.append(
|
| 1002 |
+
create_instruction("MAKE_CELL", argval=inst.argval)
|
| 1003 |
+
)
|
| 1004 |
+
elif inst.opname == "COPY_FREE_VARS":
|
| 1005 |
+
prefix_insts.append(
|
| 1006 |
+
create_instruction(
|
| 1007 |
+
"COPY_FREE_VARS", arg=len(tx.code_options["co_freevars"])
|
| 1008 |
+
)
|
| 1009 |
+
)
|
| 1010 |
+
else:
|
| 1011 |
+
prefix_insts.append(copy.copy(inst))
|
| 1012 |
+
assert not (
|
| 1013 |
+
self.pregraph_bytecode and self.export
|
| 1014 |
+
), "export does not support pregraph_bytecode"
|
| 1015 |
+
prefix_insts.extend(self.pregraph_bytecode)
|
| 1016 |
+
prefix_insts.extend(self.handle_aliases_for_stolen_lists(tx))
|
| 1017 |
+
|
| 1018 |
+
def append_prefix_insts():
|
| 1019 |
+
self.add_output_instructions(prefix_insts)
|
| 1020 |
+
prefix_insts.clear()
|
| 1021 |
+
|
| 1022 |
+
for block in reversed(tx.block_stack):
|
| 1023 |
+
block.exit(tx)
|
| 1024 |
+
|
| 1025 |
+
self.cleanup_graph()
|
| 1026 |
+
tx.prune_dead_locals()
|
| 1027 |
+
stack_values = list(tx.stack)
|
| 1028 |
+
|
| 1029 |
+
# realize any unrealized tensor VTs in case they
|
| 1030 |
+
# need to be added to self.nn_modules as attributes
|
| 1031 |
+
for value in stack_values:
|
| 1032 |
+
value.realize()
|
| 1033 |
+
|
| 1034 |
+
# Use nn.Module "proxies" in the constructed GraphModule so that
|
| 1035 |
+
# the resulting GM does not hold additional strong references to the original modules.
|
| 1036 |
+
# This prevents a strong ref cycle where Dynamo created code holds on to references
|
| 1037 |
+
# to modules that also have Dynamo code cache invalidation checks.
|
| 1038 |
+
# When cache invalidation runs, the generated GM will be invalidated, which also deletes
|
| 1039 |
+
# the proxies.
|
| 1040 |
+
nn_modules_proxies = {
|
| 1041 |
+
name: nn_module_proxy(mod) for name, mod in self.nn_modules.items()
|
| 1042 |
+
}
|
| 1043 |
+
root = FakeRootModule(nn_modules_proxies)
|
| 1044 |
+
# Add all the local vars to the "stack" so restore at the end
|
| 1045 |
+
restore_vars = []
|
| 1046 |
+
val_to_names: Dict[VariableTracker, List[str]] = {}
|
| 1047 |
+
if stack_values:
|
| 1048 |
+
val_to_names[stack_values[-1]] = []
|
| 1049 |
+
# NB: Typically (i.e., for graph compile from RETURN_VALUE),
|
| 1050 |
+
# symbolic_locals will be empty at this point, as prune_dead_locals
|
| 1051 |
+
# will clear out all of symbolic_locals because RETURN_VALUE is the
|
| 1052 |
+
# last instruction and no more locals are used. The fanciness here
|
| 1053 |
+
# is only needed for partial graphs.
|
| 1054 |
+
for k, v in tx.symbolic_locals.items():
|
| 1055 |
+
# Note! this explicitly uses .local_name for matching
|
| 1056 |
+
# Failure to do so will cause spurious registrations in val_to_names.
|
| 1057 |
+
# This will in turn result in spurious variables showing up in the graph.
|
| 1058 |
+
# This was very tricky to debug. For an example, dump the graph at call_user_compiler
|
| 1059 |
+
# while running test_subgraphs.py
|
| 1060 |
+
if isinstance(v.source, LocalSource) and v.source.local_name == k:
|
| 1061 |
+
continue # no need to restore initial state
|
| 1062 |
+
# Do not load variable if it is NULL.
|
| 1063 |
+
if sys.version_info >= (3, 12):
|
| 1064 |
+
# Continuation function will load the NULL for v.
|
| 1065 |
+
if type.__instancecheck__(NullVariable, v):
|
| 1066 |
+
continue
|
| 1067 |
+
else:
|
| 1068 |
+
# A variable should never be NULL in < 3.12
|
| 1069 |
+
assert not type.__instancecheck__(NullVariable, v)
|
| 1070 |
+
if v not in val_to_names:
|
| 1071 |
+
val_to_names[v] = []
|
| 1072 |
+
val_to_names[v].append(k)
|
| 1073 |
+
for v in val_to_names.keys():
|
| 1074 |
+
restore_vars.extend(val_to_names[v])
|
| 1075 |
+
stack_values.extend([v] * len(val_to_names[v]))
|
| 1076 |
+
|
| 1077 |
+
# to handle random calls
|
| 1078 |
+
if len(self.random_calls) > 0:
|
| 1079 |
+
append_prefix_insts()
|
| 1080 |
+
random_calls_instructions = []
|
| 1081 |
+
self.random_values_var = self.new_var("random_values")
|
| 1082 |
+
rand_fn = disable(_get_gen_rand_values_fn(self.random_calls))
|
| 1083 |
+
rand_fn_name = self.install_global("__gen_rand_values", rand_fn)
|
| 1084 |
+
codegen = PyCodegen(tx, root)
|
| 1085 |
+
random_calls_instructions.extend(
|
| 1086 |
+
codegen.load_function_name(rand_fn_name, True)
|
| 1087 |
+
)
|
| 1088 |
+
random_calls_instructions.extend(create_call_function(0, False))
|
| 1089 |
+
random_calls_instructions.append(
|
| 1090 |
+
codegen.create_store(tx.output.random_values_var),
|
| 1091 |
+
)
|
| 1092 |
+
self.add_output_instructions(random_calls_instructions)
|
| 1093 |
+
|
| 1094 |
+
if (
|
| 1095 |
+
stack_values
|
| 1096 |
+
and all(
|
| 1097 |
+
not isinstance(
|
| 1098 |
+
v,
|
| 1099 |
+
(
|
| 1100 |
+
UnspecializedPythonVariable,
|
| 1101 |
+
NumpyNdarrayVariable,
|
| 1102 |
+
TensorWithTFOverrideVariable,
|
| 1103 |
+
),
|
| 1104 |
+
)
|
| 1105 |
+
and not (isinstance(v, SymNodeVariable) and v.python_type() is float)
|
| 1106 |
+
for v in stack_values
|
| 1107 |
+
)
|
| 1108 |
+
and all(isinstance(x, TensorVariable) for x in stack_values)
|
| 1109 |
+
and len(set(stack_values)) == len(stack_values)
|
| 1110 |
+
and self.side_effects.is_empty()
|
| 1111 |
+
and not len(tx.debug_locals) != 0
|
| 1112 |
+
and not self.backward_state
|
| 1113 |
+
):
|
| 1114 |
+
append_prefix_insts()
|
| 1115 |
+
# optimization to generate better code in a common case
|
| 1116 |
+
self.add_output_instructions(
|
| 1117 |
+
self.compile_and_call_fx_graph(tx, list(reversed(stack_values)), root)
|
| 1118 |
+
+ [create_instruction("UNPACK_SEQUENCE", arg=len(stack_values))]
|
| 1119 |
+
)
|
| 1120 |
+
# restore all the live local vars
|
| 1121 |
+
self.add_output_instructions(
|
| 1122 |
+
[PyCodegen(tx).create_store(var) for var in reversed(restore_vars)]
|
| 1123 |
+
)
|
| 1124 |
+
else:
|
| 1125 |
+
graph_output_var = self.new_var("graph_out")
|
| 1126 |
+
pass1 = PyCodegen(tx, root, graph_output_var)
|
| 1127 |
+
self.codegen_suffix(tx, stack_values, pass1)
|
| 1128 |
+
|
| 1129 |
+
# one more time now that we have established tempvars
|
| 1130 |
+
pass2 = PyCodegen(
|
| 1131 |
+
tx,
|
| 1132 |
+
root,
|
| 1133 |
+
graph_output_var,
|
| 1134 |
+
tempvars={val: None for val, count in pass1.uses.items() if count > 1},
|
| 1135 |
+
)
|
| 1136 |
+
self.codegen_suffix(tx, stack_values, pass2)
|
| 1137 |
+
|
| 1138 |
+
stored_graph_output_var = False
|
| 1139 |
+
output = []
|
| 1140 |
+
if count_calls(self.graph) != 0 or len(pass2.graph_outputs) != 0:
|
| 1141 |
+
output.extend(
|
| 1142 |
+
self.compile_and_call_fx_graph(tx, pass2.graph_output_vars(), root)
|
| 1143 |
+
)
|
| 1144 |
+
|
| 1145 |
+
if len(pass2.graph_outputs) != 0:
|
| 1146 |
+
output.append(pass2.create_store(graph_output_var))
|
| 1147 |
+
stored_graph_output_var = True
|
| 1148 |
+
else:
|
| 1149 |
+
output.append(create_instruction("POP_TOP"))
|
| 1150 |
+
else:
|
| 1151 |
+
# NB: Important to run compiler collective even when there is
|
| 1152 |
+
# a graph break
|
| 1153 |
+
self.run_compiler_collective(tx)
|
| 1154 |
+
append_prefix_insts()
|
| 1155 |
+
self.add_output_instructions(output + pass2.get_instructions())
|
| 1156 |
+
|
| 1157 |
+
# restore all the live local vars
|
| 1158 |
+
self.add_output_instructions(
|
| 1159 |
+
[PyCodegen(tx).create_store(var) for var in reversed(restore_vars)]
|
| 1160 |
+
)
|
| 1161 |
+
|
| 1162 |
+
if stored_graph_output_var:
|
| 1163 |
+
self.add_output_instructions(
|
| 1164 |
+
[PyCodegen(tx).create_delete(graph_output_var)]
|
| 1165 |
+
)
|
| 1166 |
+
|
| 1167 |
+
def codegen_suffix(self, tx, stack_values, cg):
|
| 1168 |
+
if self.backward_state:
|
| 1169 |
+
assert not self.export
|
| 1170 |
+
for name, val in self.backward_state.items():
|
| 1171 |
+
cg(val)
|
| 1172 |
+
cg.append_output(cg.create_load(self.backward_state_var))
|
| 1173 |
+
cg.store_attr(name)
|
| 1174 |
+
self.side_effects.codegen_hooks(cg)
|
| 1175 |
+
self.side_effects.codegen_save_tempvars(cg)
|
| 1176 |
+
|
| 1177 |
+
# Return variables used for logging at the end
|
| 1178 |
+
for debug_var, args in tx.debug_locals:
|
| 1179 |
+
cg.add_push_null(lambda: cg(debug_var))
|
| 1180 |
+
for arg in args:
|
| 1181 |
+
cg(arg)
|
| 1182 |
+
cg.extend_output(create_call_function(len(args), False))
|
| 1183 |
+
cg.extend_output([create_instruction("POP_TOP")])
|
| 1184 |
+
|
| 1185 |
+
cg.restore_stack(stack_values, value_from_source=not tx.export)
|
| 1186 |
+
self.side_effects.codegen_update_mutated(cg)
|
| 1187 |
+
|
| 1188 |
+
def cleanup_graph(self):
|
| 1189 |
+
"""
|
| 1190 |
+
Remove "creation_timestamp" from node meta
|
| 1191 |
+
|
| 1192 |
+
Remove this pattern from the graph:
|
| 1193 |
+
torch._C._set_grad_enabled(False)
|
| 1194 |
+
torch._C._set_grad_enabled(True)
|
| 1195 |
+
"""
|
| 1196 |
+
assert self.should_exit
|
| 1197 |
+
nodes = list(self.graph.nodes)
|
| 1198 |
+
for node in nodes:
|
| 1199 |
+
node.meta.pop("creation_timestamp", None)
|
| 1200 |
+
|
| 1201 |
+
grad_enabled = torch.is_grad_enabled()
|
| 1202 |
+
for node1, node2 in zip(nodes, nodes[1:]):
|
| 1203 |
+
if (
|
| 1204 |
+
node1.target is torch._C._set_grad_enabled
|
| 1205 |
+
and tuple(node1.args) == (not grad_enabled,)
|
| 1206 |
+
and not node1._erased
|
| 1207 |
+
):
|
| 1208 |
+
grad_enabled = node1.args[0]
|
| 1209 |
+
if (
|
| 1210 |
+
node2.target is torch._C._set_grad_enabled
|
| 1211 |
+
and tuple(node2.args) == (not grad_enabled,)
|
| 1212 |
+
and not node2._erased
|
| 1213 |
+
):
|
| 1214 |
+
grad_enabled = node2.args[0]
|
| 1215 |
+
self.graph.erase_node(node1)
|
| 1216 |
+
self.graph.erase_node(node2)
|
| 1217 |
+
|
| 1218 |
+
def get_graph_sizes_structured(self):
|
| 1219 |
+
ret = {}
|
| 1220 |
+
for node in self.graph.nodes:
|
| 1221 |
+
example_value = node.meta.get("example_value", None)
|
| 1222 |
+
if isinstance(example_value, torch._subclasses.FakeTensor):
|
| 1223 |
+
size = example_value.size()
|
| 1224 |
+
ret[node.name] = [s if isinstance(s, int) else repr(s) for s in size]
|
| 1225 |
+
return ret
|
| 1226 |
+
|
| 1227 |
+
def get_graph_sizes(self, name: str):
|
| 1228 |
+
graph_sizes_str = "TRACED GRAPH TENSOR SIZES\n"
|
| 1229 |
+
graph_sizes_str += f"===== {name} =====\n"
|
| 1230 |
+
for node in self.graph.nodes:
|
| 1231 |
+
example_value = node.meta.get("example_value", None)
|
| 1232 |
+
if isinstance(example_value, torch._subclasses.FakeTensor):
|
| 1233 |
+
size = example_value.size()
|
| 1234 |
+
graph_sizes_str += f"{node.name}: {tuple(size)}\n"
|
| 1235 |
+
concrete_size = []
|
| 1236 |
+
has_symint = False
|
| 1237 |
+
for sz in size:
|
| 1238 |
+
if isinstance(sz, int):
|
| 1239 |
+
concrete_size.append(sz)
|
| 1240 |
+
elif isinstance(sz, torch.SymInt):
|
| 1241 |
+
has_symint = True
|
| 1242 |
+
concrete_size.append(sz.node.hint)
|
| 1243 |
+
else:
|
| 1244 |
+
break
|
| 1245 |
+
else:
|
| 1246 |
+
if has_symint:
|
| 1247 |
+
graph_sizes_str += (
|
| 1248 |
+
f"{node.name} (concrete): {tuple(concrete_size)}\n"
|
| 1249 |
+
)
|
| 1250 |
+
return graph_sizes_str
|
| 1251 |
+
|
| 1252 |
+
@contextlib.contextmanager
|
| 1253 |
+
def restore_global_state(self):
|
| 1254 |
+
"""
|
| 1255 |
+
Momentarily restores the global state to what it was prior to tracing the current output
|
| 1256 |
+
"""
|
| 1257 |
+
prior_global_state = self.tracing_context.global_context.copy_graphstate()
|
| 1258 |
+
current_global_state: Dict[str, Tuple[Any, bool]] = {}
|
| 1259 |
+
self.save_global_state(out=current_global_state)
|
| 1260 |
+
try:
|
| 1261 |
+
# Set to state prior to tracing the graph
|
| 1262 |
+
self.tracing_context.global_context.restore_graphstate(prior_global_state)
|
| 1263 |
+
yield
|
| 1264 |
+
finally:
|
| 1265 |
+
# Reset to state at the current time (e.g. before calling the user compiler)
|
| 1266 |
+
self.tracing_context.global_context.restore_graphstate(
|
| 1267 |
+
GlobalContextCheckpointState(current_global_state)
|
| 1268 |
+
)
|
| 1269 |
+
|
| 1270 |
+
def run_compiler_collective(self, tx):
|
| 1271 |
+
if (ds := tx.distributed_state) is not None and ds.all_states is None:
|
| 1272 |
+
compile_pg = ds.compile_pg
|
| 1273 |
+
log.info("compiler_collective %s", ds.local_state)
|
| 1274 |
+
torch._logging.trace_structured(
|
| 1275 |
+
"artifact",
|
| 1276 |
+
metadata_fn=lambda: {
|
| 1277 |
+
"name": "compiler_collective",
|
| 1278 |
+
"encoding": "json",
|
| 1279 |
+
},
|
| 1280 |
+
payload_fn=lambda: json.dumps(
|
| 1281 |
+
dataclasses.asdict(ds.local_state),
|
| 1282 |
+
),
|
| 1283 |
+
)
|
| 1284 |
+
with torch.cuda.device(compile_pg.rank() % torch.cuda.device_count()):
|
| 1285 |
+
all_states = [None] * compile_pg.size()
|
| 1286 |
+
dist.all_gather_object(all_states, ds.local_state, group=compile_pg)
|
| 1287 |
+
ds.all_states = all_states
|
| 1288 |
+
# Clear speculation log, because are tracing may diverge due to
|
| 1289 |
+
# this information from the compiler collective
|
| 1290 |
+
tx.speculation_log.clear()
|
| 1291 |
+
raise exc.CompileCollectiveRestartAnalysis
|
| 1292 |
+
|
| 1293 |
+
def compile_and_call_fx_graph(self, tx, rv, root):
|
| 1294 |
+
"""
|
| 1295 |
+
Generate code from self.graph and return the Instruction()s to
|
| 1296 |
+
call that generated code.
|
| 1297 |
+
"""
|
| 1298 |
+
with torch._guards.TracingContext.clear_frame():
|
| 1299 |
+
from .decorators import disable
|
| 1300 |
+
|
| 1301 |
+
assert self.should_exit
|
| 1302 |
+
|
| 1303 |
+
self.run_compiler_collective(tx)
|
| 1304 |
+
|
| 1305 |
+
name = unique_id("__compiled_fn")
|
| 1306 |
+
|
| 1307 |
+
assert isinstance(rv, list)
|
| 1308 |
+
assert isinstance(root, FakeRootModule)
|
| 1309 |
+
output_node = self.create_node(
|
| 1310 |
+
"output",
|
| 1311 |
+
"output",
|
| 1312 |
+
(self.current_tracer.create_arg(tuple(x.as_proxy() for x in rv)),),
|
| 1313 |
+
{},
|
| 1314 |
+
)
|
| 1315 |
+
tx.output.current_tracer._maybe_preserve_original_meta(tx, output_node)
|
| 1316 |
+
if not config.do_not_emit_runtime_asserts:
|
| 1317 |
+
insert_deferred_runtime_asserts(
|
| 1318 |
+
fx.GraphModule(root, self.graph),
|
| 1319 |
+
self.shape_env,
|
| 1320 |
+
name,
|
| 1321 |
+
)
|
| 1322 |
+
# NB: deferred runtime asserts can keep graphargs live, so make sure
|
| 1323 |
+
# those are inserted before pruning
|
| 1324 |
+
self.remove_unused_graphargs()
|
| 1325 |
+
ncalls = count_calls(self.graph)
|
| 1326 |
+
counters["stats"]["calls_captured"] += ncalls
|
| 1327 |
+
|
| 1328 |
+
# free a bit of memory
|
| 1329 |
+
self.real_value_cache.clear()
|
| 1330 |
+
|
| 1331 |
+
gm = _make_graph_module(root, self.graph)
|
| 1332 |
+
for register_finalizer in self.register_finalizer_fns:
|
| 1333 |
+
register_finalizer(gm)
|
| 1334 |
+
|
| 1335 |
+
gm.compile_subgraph_reason = self.compile_subgraph_reason
|
| 1336 |
+
gm.meta[
|
| 1337 |
+
"dynamo_flat_name_to_original_fqn"
|
| 1338 |
+
] = self.dynamo_flat_name_to_original_fqn.copy()
|
| 1339 |
+
|
| 1340 |
+
graph_code_log.debug(
|
| 1341 |
+
"%s",
|
| 1342 |
+
lazy_format_graph_code(
|
| 1343 |
+
name, gm, include_stride=True, include_device=True, colored=True
|
| 1344 |
+
),
|
| 1345 |
+
)
|
| 1346 |
+
torch._logging.trace_structured(
|
| 1347 |
+
"dynamo_output_graph",
|
| 1348 |
+
lambda: {"sizes": self.get_graph_sizes_structured()},
|
| 1349 |
+
payload_fn=lambda: gm.print_readable(
|
| 1350 |
+
print_output=False, include_stride=True, include_device=True
|
| 1351 |
+
),
|
| 1352 |
+
)
|
| 1353 |
+
self.call_cleanup_hooks()
|
| 1354 |
+
old_fake_mode = self.tracing_context.fake_mode
|
| 1355 |
+
if not self.export:
|
| 1356 |
+
import torch._functorch.config as _config
|
| 1357 |
+
|
| 1358 |
+
with _config.patch(fake_tensor_allow_unsafe_data_ptr_access=False):
|
| 1359 |
+
# TODO(voz): The way export uses gm, and fake tensors, is not supported with us resetting
|
| 1360 |
+
backend_fake_mode = torch._subclasses.FakeTensorMode(
|
| 1361 |
+
shape_env=old_fake_mode.shape_env,
|
| 1362 |
+
)
|
| 1363 |
+
# TODO(voz): Ostensibily, this should be scoped and
|
| 1364 |
+
# restore back to old_fake_mode, but doing so currently violates
|
| 1365 |
+
# a lot of fake_tensor ownership assumptions and runs afoul of detect_fake_mode
|
| 1366 |
+
self.tracing_context.fake_mode = backend_fake_mode
|
| 1367 |
+
|
| 1368 |
+
with self.restore_global_state():
|
| 1369 |
+
compiled_fn = self.call_user_compiler(gm)
|
| 1370 |
+
|
| 1371 |
+
from torch.fx._lazy_graph_module import _LazyGraphModule
|
| 1372 |
+
|
| 1373 |
+
if isinstance(compiled_fn, _LazyGraphModule) or (
|
| 1374 |
+
isinstance(getattr(compiled_fn, "__self__", None), _LazyGraphModule)
|
| 1375 |
+
and compiled_fn.__name__ == "_lazy_forward" # type: ignore[attr-defined]
|
| 1376 |
+
):
|
| 1377 |
+
# Since dynamo will run the forward method for the GraphModule shortly
|
| 1378 |
+
# anyways, it does not hurt to do the real recompilation here if
|
| 1379 |
+
# this is a _LazyGraphModule. This makes it easier for dynamo to
|
| 1380 |
+
# optimize a _LazyGraphModule.
|
| 1381 |
+
|
| 1382 |
+
lazy_gm = (
|
| 1383 |
+
compiled_fn
|
| 1384 |
+
if isinstance(compiled_fn, _LazyGraphModule)
|
| 1385 |
+
else compiled_fn.__self__ # type: ignore[attr-defined]
|
| 1386 |
+
)
|
| 1387 |
+
|
| 1388 |
+
_LazyGraphModule.force_recompile(lazy_gm)
|
| 1389 |
+
|
| 1390 |
+
if not isinstance(compiled_fn, _LazyGraphModule):
|
| 1391 |
+
# replace compiled_fn with the real forward method
|
| 1392 |
+
compiled_fn = lazy_gm.forward
|
| 1393 |
+
|
| 1394 |
+
compiled_fn = disable(compiled_fn)
|
| 1395 |
+
|
| 1396 |
+
counters["stats"]["unique_graphs"] += 1
|
| 1397 |
+
# This is safe because we pre-process name to be unique
|
| 1398 |
+
self.install_global_unsafe(name, compiled_fn)
|
| 1399 |
+
|
| 1400 |
+
cg = PyCodegen(tx)
|
| 1401 |
+
cg.make_call_generated_code(name)
|
| 1402 |
+
return cg.get_instructions()
|
| 1403 |
+
|
| 1404 |
+
@property
|
| 1405 |
+
def placeholders(self) -> List[fx.Node]:
|
| 1406 |
+
return self.graph.find_nodes(op="placeholder")
|
| 1407 |
+
|
| 1408 |
+
@property
|
| 1409 |
+
def graphargs(self) -> List[GraphArg]:
|
| 1410 |
+
return [node.meta["grapharg"] for node in self.placeholders]
|
| 1411 |
+
|
| 1412 |
+
def call_user_compiler(self, gm: fx.GraphModule) -> CompiledFn:
|
| 1413 |
+
with dynamo_timed(
|
| 1414 |
+
"OutputGraph.call_user_compiler", phase_name="backend_compile"
|
| 1415 |
+
):
|
| 1416 |
+
return self._call_user_compiler(gm)
|
| 1417 |
+
|
| 1418 |
+
def _call_user_compiler(self, gm: fx.GraphModule) -> CompiledFn:
|
| 1419 |
+
assert self.compiler_fn is not None
|
| 1420 |
+
tot = 0
|
| 1421 |
+
placeholders = []
|
| 1422 |
+
for node in gm.graph.nodes:
|
| 1423 |
+
if node.op in ("call_function", "call_method", "call_module"):
|
| 1424 |
+
tot += 1
|
| 1425 |
+
if node.op == "placeholder":
|
| 1426 |
+
placeholders.append(node)
|
| 1427 |
+
increment_op_count(tot)
|
| 1428 |
+
for pl in placeholders:
|
| 1429 |
+
arg = pl.meta["grapharg"]
|
| 1430 |
+
# TODO: Why isn't this stored in meta :think:
|
| 1431 |
+
pl._dynamo_source = arg.source
|
| 1432 |
+
|
| 1433 |
+
gm._param_name_to_source = self.param_name_to_source # type: ignore[assignment]
|
| 1434 |
+
gm._source_to_user_stacks = self.source_to_user_stacks # type: ignore[assignment]
|
| 1435 |
+
|
| 1436 |
+
try:
|
| 1437 |
+
name = (
|
| 1438 |
+
self.compiler_fn.__name__
|
| 1439 |
+
if hasattr(self.compiler_fn, "__name__")
|
| 1440 |
+
else ""
|
| 1441 |
+
)
|
| 1442 |
+
_step_logger()(logging.INFO, f"calling compiler function {name}")
|
| 1443 |
+
compiler_fn = self.compiler_fn
|
| 1444 |
+
if config.verify_correctness:
|
| 1445 |
+
compiler_fn = WrapperBackend(compiler_fn)
|
| 1446 |
+
compiled_fn = compiler_fn(gm, self.example_inputs())
|
| 1447 |
+
_step_logger()(logging.INFO, f"done compiler function {name}")
|
| 1448 |
+
assert callable(compiled_fn), "compiler_fn did not return callable"
|
| 1449 |
+
except exceptions_allowed_to_be_fallback as e:
|
| 1450 |
+
if self.has_user_defined_allowed_in_graph:
|
| 1451 |
+
raise BackendCompilerFailed(self.compiler_fn, e).with_traceback(
|
| 1452 |
+
e.__traceback__
|
| 1453 |
+
) from None
|
| 1454 |
+
msg = (
|
| 1455 |
+
"Backend compiler failed with a fake tensor exception at \n"
|
| 1456 |
+
f"{self.root_tx.format_frame_summary()}"
|
| 1457 |
+
"Adding a graph break."
|
| 1458 |
+
)
|
| 1459 |
+
unimplemented_with_warning(e, self.root_tx.f_code, msg)
|
| 1460 |
+
except SkipFrame as e:
|
| 1461 |
+
# The backend compiler has requested that we skip the frame, instead of
|
| 1462 |
+
# aborting execution.
|
| 1463 |
+
raise e
|
| 1464 |
+
except Exception as e:
|
| 1465 |
+
raise BackendCompilerFailed(self.compiler_fn, e) from e
|
| 1466 |
+
|
| 1467 |
+
signpost_event(
|
| 1468 |
+
"dynamo",
|
| 1469 |
+
"OutputGraph.call_user_compiler",
|
| 1470 |
+
{
|
| 1471 |
+
**self.co_fields,
|
| 1472 |
+
"op_count": tot,
|
| 1473 |
+
"node_count": len(gm.graph.nodes),
|
| 1474 |
+
"input_count": len(placeholders),
|
| 1475 |
+
},
|
| 1476 |
+
)
|
| 1477 |
+
|
| 1478 |
+
return compiled_fn
|
| 1479 |
+
|
| 1480 |
+
def example_inputs(self) -> List[torch.Tensor]:
|
| 1481 |
+
result = []
|
| 1482 |
+
for arg in self.graphargs:
|
| 1483 |
+
result.append(arg.example)
|
| 1484 |
+
return result
|
| 1485 |
+
|
| 1486 |
+
def remove_unused_graphargs(self) -> None:
|
| 1487 |
+
# NB: It's always OK to drop GraphArg for symbols that ended up being
|
| 1488 |
+
# specialized. You don't even have to make a guard for it, because
|
| 1489 |
+
# ShapeEnv produce_guards operates on tracked_fakes, which never gets
|
| 1490 |
+
# pruned. That being said, you'll get marginally better generated
|
| 1491 |
+
# guard code if you promote the guard into a Dynamo guard (since that
|
| 1492 |
+
# allows for the guard to be done using C++ guards.) If we get
|
| 1493 |
+
# ShapeEnv guards to go into C++ guards, this will stop being a thing
|
| 1494 |
+
# though!
|
| 1495 |
+
|
| 1496 |
+
assert self.should_exit
|
| 1497 |
+
|
| 1498 |
+
# Miniature DCE pass, but only for obviously trivial operations
|
| 1499 |
+
def is_static_true(b_node: fx.node.Argument):
|
| 1500 |
+
if b_node is True:
|
| 1501 |
+
return True
|
| 1502 |
+
if not isinstance(b_node, fx.Node):
|
| 1503 |
+
return False
|
| 1504 |
+
b = b_node.meta.get("example_value")
|
| 1505 |
+
if b is None:
|
| 1506 |
+
return False
|
| 1507 |
+
if b is True:
|
| 1508 |
+
return True
|
| 1509 |
+
if (
|
| 1510 |
+
isinstance(b, torch.SymBool)
|
| 1511 |
+
and (r := b.node.maybe_as_bool()) is not None
|
| 1512 |
+
):
|
| 1513 |
+
return r
|
| 1514 |
+
# TODO: We can also technically remove all cases when the input
|
| 1515 |
+
# doesn't have unbacked inputs, since it's all in the ShapeEnv
|
| 1516 |
+
return False
|
| 1517 |
+
|
| 1518 |
+
def is_symnode_arg(a: fx.node.Argument):
|
| 1519 |
+
from torch.fx.experimental.sym_node import SymTypes
|
| 1520 |
+
|
| 1521 |
+
if isinstance(a, (int, float, bool)):
|
| 1522 |
+
return True
|
| 1523 |
+
if isinstance(a, fx.Node):
|
| 1524 |
+
return isinstance(a.meta.get("example_value"), SymTypes)
|
| 1525 |
+
return False
|
| 1526 |
+
|
| 1527 |
+
# NB: We assume that you cannot do mutations on int/float/bool,
|
| 1528 |
+
# because they are immutable types, and therefore is always safe to
|
| 1529 |
+
# DCE.
|
| 1530 |
+
def is_symnode_compute_node(node):
|
| 1531 |
+
from torch.fx.experimental.sym_node import SymTypes
|
| 1532 |
+
|
| 1533 |
+
if node.op != "call_function":
|
| 1534 |
+
return False
|
| 1535 |
+
# TODO: I don't think it's possible to have a bare int/float here?
|
| 1536 |
+
if not isinstance(node.meta.get("example_value"), SymTypes):
|
| 1537 |
+
return False
|
| 1538 |
+
# TODO: This will bail here if you ever end up with a more complicated
|
| 1539 |
+
# computation function, like sum(list_of_ints), even though it
|
| 1540 |
+
# should be DCE'able
|
| 1541 |
+
if not all(is_symnode_arg(a) for a in node.args):
|
| 1542 |
+
return False
|
| 1543 |
+
if not all(is_symnode_arg(a) for a in node.kwargs.values()):
|
| 1544 |
+
return False
|
| 1545 |
+
return True
|
| 1546 |
+
|
| 1547 |
+
from torch.fx.experimental.symbolic_shapes import is_accessor_node
|
| 1548 |
+
|
| 1549 |
+
for node in reversed(list(self.graph.nodes)):
|
| 1550 |
+
if len(list(node.users)) == 0:
|
| 1551 |
+
if (
|
| 1552 |
+
node.op == "get_attr"
|
| 1553 |
+
or (node.op == "call_function" and node.target is operator.getitem)
|
| 1554 |
+
or (
|
| 1555 |
+
node.op == "call_function"
|
| 1556 |
+
and node.target is torch._check
|
| 1557 |
+
and is_static_true(node.args[0])
|
| 1558 |
+
)
|
| 1559 |
+
or is_symnode_compute_node(node)
|
| 1560 |
+
or is_accessor_node(node)
|
| 1561 |
+
):
|
| 1562 |
+
self.remove_node(node)
|
| 1563 |
+
|
| 1564 |
+
def placeholder_binds_symbol(node):
|
| 1565 |
+
arg = node.meta["grapharg"]
|
| 1566 |
+
example = arg.example
|
| 1567 |
+
if isinstance(example, torch.SymInt) and isinstance(
|
| 1568 |
+
example.node.expr, sympy.Symbol
|
| 1569 |
+
):
|
| 1570 |
+
return example.node.expr
|
| 1571 |
+
return None
|
| 1572 |
+
|
| 1573 |
+
def remove_unused(node):
|
| 1574 |
+
log.debug("REMOVE UNUSED GRAPHARG %s", node.meta["grapharg"].source.name())
|
| 1575 |
+
# I'm not really sure why you need to delete these from the
|
| 1576 |
+
# node since the node is going to get removed
|
| 1577 |
+
del node.meta["grapharg"]
|
| 1578 |
+
self.remove_node(node)
|
| 1579 |
+
self.real_value_cache.pop(node, None)
|
| 1580 |
+
|
| 1581 |
+
used_symbols: Set[sympy.Symbol] = set()
|
| 1582 |
+
|
| 1583 |
+
def update_used_symbols(used_symbols, fake: Union[torch.SymInt, torch.Tensor]):
|
| 1584 |
+
used_symbols |= free_symbols(fake)
|
| 1585 |
+
|
| 1586 |
+
recheck_placeholders = []
|
| 1587 |
+
for node in self.placeholders:
|
| 1588 |
+
binds_symbol = placeholder_binds_symbol(node) is not None
|
| 1589 |
+
# Don't delete symbol bindings yet
|
| 1590 |
+
if binds_symbol:
|
| 1591 |
+
if not node.users:
|
| 1592 |
+
recheck_placeholders.append(node)
|
| 1593 |
+
else:
|
| 1594 |
+
if not node.users and not isinstance(
|
| 1595 |
+
node.meta["grapharg"], BackwardStateGraphArg
|
| 1596 |
+
):
|
| 1597 |
+
remove_unused(node)
|
| 1598 |
+
else:
|
| 1599 |
+
# Register the free symbols as uses
|
| 1600 |
+
arg = node.meta["grapharg"]
|
| 1601 |
+
if isinstance(arg, BackwardStateGraphArg):
|
| 1602 |
+
continue
|
| 1603 |
+
if isinstance(node.meta["grapharg"].example, torch.ScriptObject):
|
| 1604 |
+
real_script_obj = node.meta["grapharg"].example
|
| 1605 |
+
fake_script_obj = node.meta["grapharg"].example_strong_ref
|
| 1606 |
+
if not torch._library.fake_class_registry.tracing_with_real(
|
| 1607 |
+
real_script_obj
|
| 1608 |
+
):
|
| 1609 |
+
flat_dict = dict(real_script_obj.__obj_flatten__()) # type: ignore[attr-defined]
|
| 1610 |
+
for attr in flat_dict.keys():
|
| 1611 |
+
fake_attr_val = getattr(
|
| 1612 |
+
fake_script_obj.wrapped_obj, attr
|
| 1613 |
+
)
|
| 1614 |
+
pytree.tree_map_only(
|
| 1615 |
+
(torch.SymInt, torch.Tensor),
|
| 1616 |
+
lambda t: update_used_symbols(used_symbols, t),
|
| 1617 |
+
fake_attr_val,
|
| 1618 |
+
)
|
| 1619 |
+
continue
|
| 1620 |
+
fake = (
|
| 1621 |
+
arg.fake_tensor if arg.fake_tensor is not None else arg.example
|
| 1622 |
+
)
|
| 1623 |
+
update_used_symbols(used_symbols, fake)
|
| 1624 |
+
|
| 1625 |
+
# After removing unused graphargs, prune unused binds_symbol
|
| 1626 |
+
for node in recheck_placeholders:
|
| 1627 |
+
symbol = placeholder_binds_symbol(node)
|
| 1628 |
+
if symbol is not None:
|
| 1629 |
+
if symbol not in used_symbols:
|
| 1630 |
+
remove_unused(node)
|
| 1631 |
+
else:
|
| 1632 |
+
# Make sure we delete later occurrences of the same symbol
|
| 1633 |
+
used_symbols.remove(symbol)
|
| 1634 |
+
|
| 1635 |
+
def add_output_instructions(self, prefix: List[Instruction]) -> None:
|
| 1636 |
+
"""
|
| 1637 |
+
We call this on the creation of a new compiled subgraph that is inserted
|
| 1638 |
+
before user code.
|
| 1639 |
+
"""
|
| 1640 |
+
self.output_instructions.extend(prefix)
|
| 1641 |
+
self.should_exit = True
|
| 1642 |
+
|
| 1643 |
+
def install_global_unsafe(self, name, value) -> None:
|
| 1644 |
+
"""
|
| 1645 |
+
WARNING: prefer the safer `install_global_by_id/install_global`.
|
| 1646 |
+
torch.compile instances should be independent of each other;
|
| 1647 |
+
one footgun is to have one instance depend on the existence of
|
| 1648 |
+
a global installed by another instance. This can happen if we mangle
|
| 1649 |
+
a global the same way across both instances.
|
| 1650 |
+
"""
|
| 1651 |
+
assert name not in self.installed_globals
|
| 1652 |
+
self.installed_globals.add(name)
|
| 1653 |
+
self.cleanups.append(CleanupHook.create(self.global_scope, name, value))
|
| 1654 |
+
|
| 1655 |
+
def install_global_by_id(self, prefix, value) -> str:
|
| 1656 |
+
"""
|
| 1657 |
+
Installs a global if it hasn't been installed already.
|
| 1658 |
+
This is determined by (prefix, id(value)) pair.
|
| 1659 |
+
|
| 1660 |
+
Returns the name of the newly installed global.
|
| 1661 |
+
"""
|
| 1662 |
+
# NB: need self.compile_id to distinguish this global
|
| 1663 |
+
# from another global created in a different torch.compile instance
|
| 1664 |
+
name = f"{prefix}_{id(value)}_c{self.compile_id}"
|
| 1665 |
+
if name in self.installed_globals:
|
| 1666 |
+
return name
|
| 1667 |
+
self.install_global_unsafe(name, value)
|
| 1668 |
+
return name
|
| 1669 |
+
|
| 1670 |
+
def install_global(self, prefix, value) -> str:
|
| 1671 |
+
"""
|
| 1672 |
+
Installs a global, generating a unique name for it.
|
| 1673 |
+
|
| 1674 |
+
Returns the name of the newly installed global.
|
| 1675 |
+
"""
|
| 1676 |
+
# NB: unique_id is unique, even across torch.compile instances
|
| 1677 |
+
name = unique_id(prefix)
|
| 1678 |
+
self.install_global_unsafe(name, value)
|
| 1679 |
+
return name
|
| 1680 |
+
|
| 1681 |
+
def cleanup(self) -> None:
|
| 1682 |
+
# There is a reference cycle between tracer and OutputGraph, causing
|
| 1683 |
+
# some of the tensor objects to be held alive for longer than necessary.
|
| 1684 |
+
self.root_tx = None
|
| 1685 |
+
self.nn_modules.clear()
|
| 1686 |
+
self.param_name_to_source = None
|
| 1687 |
+
|
| 1688 |
+
for node in self.graph.nodes:
|
| 1689 |
+
if "grapharg" in node.meta:
|
| 1690 |
+
del node.meta["grapharg"]
|
| 1691 |
+
self.real_value_cache.clear()
|
| 1692 |
+
self.input_name_to_proxy.clear()
|
| 1693 |
+
self.side_effects.clear()
|
| 1694 |
+
self.variable_tracker_cache.clear()
|
| 1695 |
+
self.register_finalizer_fns.clear()
|
| 1696 |
+
self.dynamo_flat_name_to_original_fqn.clear()
|
| 1697 |
+
self.tracing_context.clear()
|
| 1698 |
+
|
| 1699 |
+
def set_torch_function_state(self, enabled: bool) -> None:
|
| 1700 |
+
self.torch_function_enabled = enabled
|
| 1701 |
+
|
| 1702 |
+
def add_graph_finalizer(
|
| 1703 |
+
self, register_finalizer: Callable[[fx.GraphModule], None]
|
| 1704 |
+
) -> None:
|
| 1705 |
+
self.register_finalizer_fns.append(register_finalizer)
|
| 1706 |
+
|
| 1707 |
+
def example_value_from_input_node(self, node: torch.fx.Node):
|
| 1708 |
+
"""Extract the non-fake example tensor"""
|
| 1709 |
+
if node.op == "placeholder":
|
| 1710 |
+
return node.meta["grapharg"].example
|
| 1711 |
+
assert node.op == "get_attr"
|
| 1712 |
+
return self.nn_modules[node.target] # type: ignore[index]
|
| 1713 |
+
|
| 1714 |
+
|
| 1715 |
+
err_epilogue = (
|
| 1716 |
+
"With the current config, we will graph break "
|
| 1717 |
+
"(and fall back to eager-mode PyTorch) on all ops "
|
| 1718 |
+
"that have do not have the 'pt2_compliant_tag'. "
|
| 1719 |
+
"Please see the following doc for how to mark this op as PT2 compliant "
|
| 1720 |
+
"https://pytorch.org/tutorials/advanced/custom_ops_landing_page.html"
|
| 1721 |
+
)
|
| 1722 |
+
|
| 1723 |
+
|
| 1724 |
+
def check_pt2_compliant_op(output_graph, kind, target, args, kwargs):
|
| 1725 |
+
if kind != "call_function":
|
| 1726 |
+
return
|
| 1727 |
+
|
| 1728 |
+
def encountered_compliant_op(target):
|
| 1729 |
+
if target.namespace in {"prim", "prims", "aten"}:
|
| 1730 |
+
return
|
| 1731 |
+
output_graph.compliant_custom_ops.add(target)
|
| 1732 |
+
|
| 1733 |
+
def encountered_non_compliant_op(target, msg):
|
| 1734 |
+
output_graph.non_compliant_ops.add(target)
|
| 1735 |
+
if config.only_allow_pt2_compliant_ops:
|
| 1736 |
+
unimplemented(msg + " " + err_epilogue)
|
| 1737 |
+
|
| 1738 |
+
if isinstance(target, torch._ops.OpOverload):
|
| 1739 |
+
if torch.Tag.pt2_compliant_tag in target.tags:
|
| 1740 |
+
encountered_compliant_op(target)
|
| 1741 |
+
return
|
| 1742 |
+
encountered_non_compliant_op(
|
| 1743 |
+
target,
|
| 1744 |
+
f"Encountered the torch.ops.OpOverload {target} "
|
| 1745 |
+
f"that is not PT2 compliant.",
|
| 1746 |
+
)
|
| 1747 |
+
return
|
| 1748 |
+
|
| 1749 |
+
if isinstance(target, torch._ops.OpOverloadPacket):
|
| 1750 |
+
overloads = tuple(target.overloads())
|
| 1751 |
+
# Optimization: Overload resolution is expensive.
|
| 1752 |
+
# If there's only one overload, we know what it will resolve to.
|
| 1753 |
+
if len(overloads) == 1:
|
| 1754 |
+
op = getattr(target, overloads[0])
|
| 1755 |
+
if torch.Tag.pt2_compliant_tag in op.tags:
|
| 1756 |
+
encountered_compliant_op(op)
|
| 1757 |
+
return
|
| 1758 |
+
encountered_non_compliant_op(
|
| 1759 |
+
op,
|
| 1760 |
+
f"Encountered the non-overloaded "
|
| 1761 |
+
f"torch.ops.OpOverloadPacket {target} "
|
| 1762 |
+
f"that is not PT2 compliant. ",
|
| 1763 |
+
)
|
| 1764 |
+
return
|
| 1765 |
+
|
| 1766 |
+
args, kwargs = torch._dynamo.utils.get_fake_values_from_nodes(
|
| 1767 |
+
output_graph.current_tx, (args, kwargs), False
|
| 1768 |
+
)
|
| 1769 |
+
try:
|
| 1770 |
+
overload = torch._C._jit_resolve_packet(
|
| 1771 |
+
target._qualified_op_name, *args, **kwargs
|
| 1772 |
+
)
|
| 1773 |
+
except RuntimeError as e:
|
| 1774 |
+
unimplemented(str(e))
|
| 1775 |
+
|
| 1776 |
+
op = getattr(target, overload)
|
| 1777 |
+
if torch.Tag.pt2_compliant_tag in op.tags:
|
| 1778 |
+
encountered_compliant_op(op)
|
| 1779 |
+
else:
|
| 1780 |
+
encountered_non_compliant_op(
|
| 1781 |
+
op,
|
| 1782 |
+
f"Encountered the torch.ops.OpOverloadPacket {target} "
|
| 1783 |
+
f"which resolves to the overload ({overload}) that is "
|
| 1784 |
+
f"not PT2 compliant.",
|
| 1785 |
+
)
|
| 1786 |
+
|
| 1787 |
+
|
| 1788 |
+
_compile_id_counter = itertools.count()
|
| 1789 |
+
|
| 1790 |
+
|
| 1791 |
+
class SubgraphTracer(fx.Tracer):
|
| 1792 |
+
"""
|
| 1793 |
+
Holds an FX graph that is being traced. OutputGraph owns a SubgraphTracer
|
| 1794 |
+
and the separation of responsibilities is that SubgraphTracer is
|
| 1795 |
+
responsible for building the graph while OutputGraph is responsible for
|
| 1796 |
+
compiling and executing the graph.
|
| 1797 |
+
"""
|
| 1798 |
+
|
| 1799 |
+
def __init__(
|
| 1800 |
+
self, output_graph, parent=None, export_root=False, source_target=None
|
| 1801 |
+
):
|
| 1802 |
+
super().__init__()
|
| 1803 |
+
self.output_graph = weakref.proxy(output_graph)
|
| 1804 |
+
self.graph = torch.fx.Graph()
|
| 1805 |
+
|
| 1806 |
+
# The export is only ever set for the ROOT tracer. It controls
|
| 1807 |
+
# whether or not certain inputs are allowed to be added or not.
|
| 1808 |
+
# Look at call sites of create_graph_input to see how it is used.
|
| 1809 |
+
if export_root:
|
| 1810 |
+
assert parent is None
|
| 1811 |
+
self.export_root = export_root
|
| 1812 |
+
# Map from graph input name to its placeholder proxy object, where the
|
| 1813 |
+
# map's keys give all current placeholder node names and can be used to
|
| 1814 |
+
# create unique node names
|
| 1815 |
+
self.input_name_to_proxy: Dict[str, fx.Proxy] = {}
|
| 1816 |
+
# Node => computed real value (see utils.get_real_value)
|
| 1817 |
+
self.real_value_cache: Dict[fx.Node, torch.Tensor] = {}
|
| 1818 |
+
|
| 1819 |
+
# SubgraphTracers can be nested. See NOTE [HigherOrderOperator tracing design]
|
| 1820 |
+
self.parent = parent
|
| 1821 |
+
# A dict mapping previously free variables (Proxy objects)
|
| 1822 |
+
# to new Proxy objects that wrap inputs to this subgraph.
|
| 1823 |
+
#
|
| 1824 |
+
# This dict serves two purposes:
|
| 1825 |
+
# - Proxies are associated with VariableTrackers. If we see
|
| 1826 |
+
# the same VariableTracker twice (and it is a free variable),
|
| 1827 |
+
# then we want to use the same Proxy in the current subgraph to
|
| 1828 |
+
# record the tracing.
|
| 1829 |
+
# - If we are tracing a HigherOrderOperator's body_fn, then we
|
| 1830 |
+
# need to keep track of what free variables were lifted so we can
|
| 1831 |
+
# rewrite the HigherOrderOperator call using the traced body_fn.
|
| 1832 |
+
# Dicts maintain the order of args for the HigherOrderOperator call.
|
| 1833 |
+
self.lifted_freevars = {}
|
| 1834 |
+
self.prev_inst = None
|
| 1835 |
+
|
| 1836 |
+
self._cur_code = None
|
| 1837 |
+
self._orig_gm_meta = None
|
| 1838 |
+
self._orig_gm_lineno_map = None
|
| 1839 |
+
self._orig_gm_firstlineno = None
|
| 1840 |
+
# Each SubgraphTracer is associated with a source target, which indicates
|
| 1841 |
+
# which operator this subgraph is attached to. We compute a source_fn_stack
|
| 1842 |
+
# based on the source target. For the root tracer, it's set to [].
|
| 1843 |
+
# This is useful for debugging and transforming the exported graph.
|
| 1844 |
+
if self.parent is None:
|
| 1845 |
+
self.source_fn_stack = []
|
| 1846 |
+
else:
|
| 1847 |
+
self.source_fn_stack = self.parent.source_fn_stack + [
|
| 1848 |
+
(self.graph._target_to_str(source_target), source_target)
|
| 1849 |
+
]
|
| 1850 |
+
|
| 1851 |
+
# preserve original meta if it is available
|
| 1852 |
+
def _maybe_preserve_original_meta(self, tx, node):
|
| 1853 |
+
if (
|
| 1854 |
+
self._orig_gm_meta
|
| 1855 |
+
and self._orig_gm_lineno_map
|
| 1856 |
+
and self._orig_gm_firstlineno
|
| 1857 |
+
):
|
| 1858 |
+
lineno = tx.current_instruction.starts_line
|
| 1859 |
+
node_idx = None
|
| 1860 |
+
if lineno is not None:
|
| 1861 |
+
node_idx = self._orig_gm_lineno_map.get(
|
| 1862 |
+
lineno - self._orig_gm_firstlineno, None
|
| 1863 |
+
)
|
| 1864 |
+
if node_idx is not None:
|
| 1865 |
+
meta = self._orig_gm_meta[node_idx]
|
| 1866 |
+
for field in fx.proxy._COPY_META_FIELDS:
|
| 1867 |
+
if field in meta:
|
| 1868 |
+
node.meta[field] = meta[field]
|
| 1869 |
+
if "stack_trace" in meta:
|
| 1870 |
+
node.meta["stack_trace"] = meta["stack_trace"]
|
| 1871 |
+
|
| 1872 |
+
def create_proxy(
|
| 1873 |
+
self,
|
| 1874 |
+
kind,
|
| 1875 |
+
target,
|
| 1876 |
+
args,
|
| 1877 |
+
kwargs,
|
| 1878 |
+
name=None,
|
| 1879 |
+
type_expr=None,
|
| 1880 |
+
proxy_factory_fn=None,
|
| 1881 |
+
):
|
| 1882 |
+
# NOTE: [Nested SubgraphTracer and free_variable handling]
|
| 1883 |
+
# --------------------------------------------------------
|
| 1884 |
+
# Read NOTE [HigherOrderOperator tracing design] first.
|
| 1885 |
+
#
|
| 1886 |
+
# Let's say we're in the middle of introspecting the body of a possibly
|
| 1887 |
+
# nested HigherOrderOperator, and we see a free variable.
|
| 1888 |
+
#
|
| 1889 |
+
# There are two cases:
|
| 1890 |
+
# 1. We see a free variable that is already tracked by Dynamo.
|
| 1891 |
+
# 2. We see a free variable that has not been tracked by Dynamo
|
| 1892 |
+
#
|
| 1893 |
+
# In case 1, we call `maybe_lift_tracked_freevar_to_input` (below)
|
| 1894 |
+
# which will lift the freevar to be an input of this subgraph
|
| 1895 |
+
# and also recursively lift it to be an input on the parent(s).
|
| 1896 |
+
#
|
| 1897 |
+
# In case 2, before the call to `create_proxy`, the InstructionTranslator
|
| 1898 |
+
# will see the freevar when it gets loaded by Python bytecode.
|
| 1899 |
+
# E.g. for Python 3.11 the bytecodes that may do this are LOAD_DEREF or
|
| 1900 |
+
# LOAD_GLOBAL.
|
| 1901 |
+
# There, the InstructionTranslator asks Dynamo to begin tracking the
|
| 1902 |
+
# freevar by building a new Variable.
|
| 1903 |
+
# Building a new Variable automatically lifts the freevar to be an
|
| 1904 |
+
# input of the root SubgraphTracer.
|
| 1905 |
+
#
|
| 1906 |
+
# The implications for the code below are:
|
| 1907 |
+
# - We will always be in Case 1 when we get to this code.
|
| 1908 |
+
# - Any "free variable" we encounter here is guaranteed to already be
|
| 1909 |
+
# bound, that is, it is either a graph input of the root graph, or
|
| 1910 |
+
# some local variable of the root graph or a subgraph.
|
| 1911 |
+
# - The additional work we need to do here is *only* that we need to
|
| 1912 |
+
# lift this free variable into inputs (recursively) of each nested
|
| 1913 |
+
# higher-order-op subgraph until we hit the subgraph where the free
|
| 1914 |
+
# variable is bound
|
| 1915 |
+
if self.parent is not None:
|
| 1916 |
+
flat_args, tree_spec = pytree.tree_flatten((args, kwargs))
|
| 1917 |
+
new_flat_args = []
|
| 1918 |
+
for arg in flat_args:
|
| 1919 |
+
maybe_new_arg = self.maybe_lift_tracked_freevar_to_input(arg)
|
| 1920 |
+
new_flat_args.append(maybe_new_arg)
|
| 1921 |
+
|
| 1922 |
+
args, kwargs = pytree.tree_unflatten(new_flat_args, tree_spec)
|
| 1923 |
+
|
| 1924 |
+
rv = super().create_proxy(
|
| 1925 |
+
kind, target, args, kwargs, name, type_expr, proxy_factory_fn
|
| 1926 |
+
)
|
| 1927 |
+
|
| 1928 |
+
# append stack trace to fx node
|
| 1929 |
+
tx = self.output_graph.current_tx
|
| 1930 |
+
|
| 1931 |
+
# log detailed location of line of code in 3.11
|
| 1932 |
+
if sys.version_info >= (3, 11) and kind in (
|
| 1933 |
+
"call_function",
|
| 1934 |
+
"call_method",
|
| 1935 |
+
"call_module",
|
| 1936 |
+
):
|
| 1937 |
+
cur_inst = tx.current_instruction
|
| 1938 |
+
if (
|
| 1939 |
+
cur_inst is not self.prev_inst
|
| 1940 |
+
and cur_inst.positions is not None
|
| 1941 |
+
and cur_inst.positions.lineno is not None
|
| 1942 |
+
):
|
| 1943 |
+
tx_code = tx.f_code
|
| 1944 |
+
header = tx.get_line_of_code_header(lineno=cur_inst.positions.lineno)
|
| 1945 |
+
|
| 1946 |
+
def get_trace_call_log_str():
|
| 1947 |
+
line = get_instruction_source_311(tx_code, cur_inst).rstrip()
|
| 1948 |
+
return f"TRACE FX call {rv.node.name} from {header}\n{line}"
|
| 1949 |
+
|
| 1950 |
+
trace_call_log.debug("%s", LazyString(get_trace_call_log_str))
|
| 1951 |
+
self.prev_inst = cur_inst
|
| 1952 |
+
|
| 1953 |
+
# update reference to original meta if we're tracing a new code object
|
| 1954 |
+
is_retracing = False
|
| 1955 |
+
if tx.f_code is not self._cur_code:
|
| 1956 |
+
orig_graphmodule_maybe = code_context.get_context(tx.f_code).get(
|
| 1957 |
+
"orig_graphmodule", lambda: None
|
| 1958 |
+
)()
|
| 1959 |
+
if isinstance(orig_graphmodule_maybe, torch.fx.GraphModule):
|
| 1960 |
+
is_retracing = True
|
| 1961 |
+
self._orig_gm_meta = [
|
| 1962 |
+
nd.meta for nd in orig_graphmodule_maybe.graph.nodes
|
| 1963 |
+
]
|
| 1964 |
+
self._orig_gm_lineno_map = orig_graphmodule_maybe._lineno_map
|
| 1965 |
+
self._orig_gm_firstlineno = (
|
| 1966 |
+
orig_graphmodule_maybe.forward.__code__.co_firstlineno
|
| 1967 |
+
)
|
| 1968 |
+
else:
|
| 1969 |
+
self._orig_gm_meta = None
|
| 1970 |
+
self._orig_gm_lineno_map = None
|
| 1971 |
+
self._orig_gm_firstlineno = None
|
| 1972 |
+
nn_module_stack = tx.nn_module_stack
|
| 1973 |
+
if nn_module_stack:
|
| 1974 |
+
rv.node.meta["nn_module_stack"] = nn_module_stack.copy()
|
| 1975 |
+
|
| 1976 |
+
if kind in {"call_function", "call_method"}:
|
| 1977 |
+
rv.node.meta["source_fn_stack"] = self.source_fn_stack + [
|
| 1978 |
+
(rv.node.name, target)
|
| 1979 |
+
]
|
| 1980 |
+
elif kind == "call_module":
|
| 1981 |
+
if self.parent is not None:
|
| 1982 |
+
unimplemented("Invoking an nn.Module inside HigherOrderOperator")
|
| 1983 |
+
# For modules we store the class
|
| 1984 |
+
rv.node.meta["source_fn_stack"] = self.source_fn_stack + [
|
| 1985 |
+
(
|
| 1986 |
+
rv.node.name,
|
| 1987 |
+
rv.node.meta["nn_module_stack"][target][1],
|
| 1988 |
+
)
|
| 1989 |
+
]
|
| 1990 |
+
|
| 1991 |
+
self._maybe_preserve_original_meta(tx, rv.node)
|
| 1992 |
+
|
| 1993 |
+
if not is_retracing:
|
| 1994 |
+
if "nn_module_stack" not in rv.node.meta:
|
| 1995 |
+
nn_module_stack = tx.nn_module_stack
|
| 1996 |
+
if nn_module_stack:
|
| 1997 |
+
rv.node.meta["nn_module_stack"] = nn_module_stack.copy()
|
| 1998 |
+
|
| 1999 |
+
if "source_fn_stack" not in rv.node.meta:
|
| 2000 |
+
if kind in {"call_function", "call_method"}:
|
| 2001 |
+
rv.node.meta["source_fn_stack"] = self.source_fn_stack + [
|
| 2002 |
+
(rv.node.name, target)
|
| 2003 |
+
]
|
| 2004 |
+
elif kind == "call_module":
|
| 2005 |
+
if self.parent is not None:
|
| 2006 |
+
unimplemented(
|
| 2007 |
+
"Invoking an nn.Module inside HigherOrderOperator"
|
| 2008 |
+
)
|
| 2009 |
+
# For modules we store the class
|
| 2010 |
+
rv.node.meta["source_fn_stack"] = self.source_fn_stack + [
|
| 2011 |
+
(
|
| 2012 |
+
rv.node.name,
|
| 2013 |
+
rv.node.meta["nn_module_stack"][target][1],
|
| 2014 |
+
)
|
| 2015 |
+
]
|
| 2016 |
+
|
| 2017 |
+
if "stack_trace" not in rv.node.meta:
|
| 2018 |
+
frame_summaries: List[traceback.FrameSummary] = []
|
| 2019 |
+
while tx:
|
| 2020 |
+
# Avoid frame summaries from inside the torch/nn/modules. This ensures that we keep the stack trace of
|
| 2021 |
+
# the user code.
|
| 2022 |
+
if not tx.is_co_filename_from_nn_modules():
|
| 2023 |
+
frame_summaries.append(tx.frame_summary())
|
| 2024 |
+
tx = getattr(tx, "parent", None)
|
| 2025 |
+
# Reverse the frame_summaries, such that the innermost frame is at the last
|
| 2026 |
+
frame_summaries.reverse()
|
| 2027 |
+
|
| 2028 |
+
# official from_list stub doesn't have new-style type
|
| 2029 |
+
msgs = traceback.StackSummary.from_list(frame_summaries).format()
|
| 2030 |
+
rv.node.stack_trace = "".join(msgs)
|
| 2031 |
+
|
| 2032 |
+
return rv
|
| 2033 |
+
|
| 2034 |
+
def create_node(
|
| 2035 |
+
self, op, target, args=None, kwargs=None, name=None, type_expr=None
|
| 2036 |
+
):
|
| 2037 |
+
check_pt2_compliant_op(self.output_graph, op, target, args, kwargs)
|
| 2038 |
+
if self.parent is not None:
|
| 2039 |
+
flat_args = pytree.arg_tree_leaves(*args, **kwargs)
|
| 2040 |
+
for arg in flat_args:
|
| 2041 |
+
if not isinstance(arg, torch.fx.Node):
|
| 2042 |
+
continue
|
| 2043 |
+
assert (
|
| 2044 |
+
arg.graph == self.graph
|
| 2045 |
+
), "create_node using arg not from this SubgraphTracer"
|
| 2046 |
+
|
| 2047 |
+
node = super().create_node(op, target, args, kwargs, name, type_expr)
|
| 2048 |
+
node.meta["creation_timestamp"] = self.output_graph.timestamp
|
| 2049 |
+
return node
|
| 2050 |
+
|
| 2051 |
+
# Note: we did not override erase_node since
|
| 2052 |
+
# we call self.graph.erase_node elsewhere
|
| 2053 |
+
def remove_node(self, node):
|
| 2054 |
+
if len(node.users) > 0:
|
| 2055 |
+
user_graph_nodes: List[torch.fx.Node] = []
|
| 2056 |
+
for user in node.users.keys():
|
| 2057 |
+
# For the case where user.graph == self.graph, that is a real bug and will raise
|
| 2058 |
+
# properly.
|
| 2059 |
+
if user.graph != self.graph:
|
| 2060 |
+
# This is a nested graph, which needs to be deleted.
|
| 2061 |
+
# If we do not do this, we will raise on attempting to remove this.
|
| 2062 |
+
# As we only get here during restoration cleanup, this is sound.
|
| 2063 |
+
user_graph_nodes.extend(reversed(list(user.graph.nodes)))
|
| 2064 |
+
for other_graph_node in user_graph_nodes:
|
| 2065 |
+
other_graph_node.graph.erase_node(other_graph_node)
|
| 2066 |
+
self.graph.erase_node(node)
|
| 2067 |
+
self.input_name_to_proxy.pop(node.name, None)
|
| 2068 |
+
|
| 2069 |
+
# when before=True, we will insert this input before the most recent
|
| 2070 |
+
# inserted proxy. This is a hack to get around an ordering problem,
|
| 2071 |
+
# where we first insert a tensor argument, and then insert bindings
|
| 2072 |
+
# for SymInts that may occur in the tensor argument.
|
| 2073 |
+
# Remove this if https://github.com/pytorch/pytorch/issues/99007 gets
|
| 2074 |
+
# fixed.
|
| 2075 |
+
def create_graph_input(self, name, type_expr=None, before=False, source=None):
|
| 2076 |
+
log.debug(
|
| 2077 |
+
"create_graph_input %s %s",
|
| 2078 |
+
name,
|
| 2079 |
+
source.name() if source is not None else "(none)",
|
| 2080 |
+
)
|
| 2081 |
+
if source is None:
|
| 2082 |
+
assert (
|
| 2083 |
+
self.parent is not None
|
| 2084 |
+
), "you are required to provide a source for inputs on the root tracer"
|
| 2085 |
+
|
| 2086 |
+
# In eager, we are generally OK with adding graph inputs whenever we
|
| 2087 |
+
# want, because we take care of writing the bytecode that knows how
|
| 2088 |
+
# to source all the inputs.
|
| 2089 |
+
#
|
| 2090 |
+
# In export, this is bad, because you want a self-contained export
|
| 2091 |
+
# object which only depends on the inputs you explicitly passed to it.
|
| 2092 |
+
# So we are a bit more strict about what sources can become inputs
|
| 2093 |
+
# in export
|
| 2094 |
+
if self.export_root:
|
| 2095 |
+
if not is_from_local_source(source, allow_cell_or_freevar=False):
|
| 2096 |
+
self.output_graph.source_to_user_stacks.setdefault(source, []).append(
|
| 2097 |
+
TracingContext.extract_stack()
|
| 2098 |
+
)
|
| 2099 |
+
|
| 2100 |
+
# unique
|
| 2101 |
+
if name in self.input_name_to_proxy:
|
| 2102 |
+
for i in itertools.count():
|
| 2103 |
+
candidate_name = f"{name}_{i}"
|
| 2104 |
+
if candidate_name not in self.input_name_to_proxy:
|
| 2105 |
+
name = candidate_name
|
| 2106 |
+
break
|
| 2107 |
+
|
| 2108 |
+
if self.input_name_to_proxy:
|
| 2109 |
+
prev_name = next(reversed(self.input_name_to_proxy))
|
| 2110 |
+
node = self.input_name_to_proxy[prev_name].node
|
| 2111 |
+
if before:
|
| 2112 |
+
ctx = self.graph.inserting_before(node)
|
| 2113 |
+
else:
|
| 2114 |
+
ctx = self.graph.inserting_after(node)
|
| 2115 |
+
else:
|
| 2116 |
+
ctx = self.graph.inserting_before(None)
|
| 2117 |
+
with ctx:
|
| 2118 |
+
proxy = self.create_proxy("placeholder", name, (), {}, type_expr=type_expr)
|
| 2119 |
+
if self.input_name_to_proxy and before:
|
| 2120 |
+
k, v = self.input_name_to_proxy.popitem()
|
| 2121 |
+
self.input_name_to_proxy[name] = proxy
|
| 2122 |
+
self.input_name_to_proxy[k] = v
|
| 2123 |
+
else:
|
| 2124 |
+
self.input_name_to_proxy[name] = proxy
|
| 2125 |
+
return proxy
|
| 2126 |
+
|
| 2127 |
+
# See NOTE: [Nested SubgraphTracer and free_variable handling] for more details
|
| 2128 |
+
def lift_tracked_freevar_to_input(self, proxy):
|
| 2129 |
+
# You're doing something wrong if we are the root SubgraphTracer because
|
| 2130 |
+
# Dynamo adds tensors to graph inputs before creating a proxy for them.
|
| 2131 |
+
assert (
|
| 2132 |
+
self.parent is not None
|
| 2133 |
+
), "lift_tracked_freevar_to_input should not be called on root SubgraphTracer"
|
| 2134 |
+
# Proxys are associated with VariableTracker.
|
| 2135 |
+
# It is possible that we've already lifted the Proxy to be an input.
|
| 2136 |
+
# If that is the case, just return the already lifted Proxy.
|
| 2137 |
+
if proxy in self.lifted_freevars:
|
| 2138 |
+
return self.lifted_freevars[proxy]
|
| 2139 |
+
new_proxy = self.create_graph_input(proxy.node.name)
|
| 2140 |
+
set_example_value(new_proxy.node, proxy.node.meta["example_value"])
|
| 2141 |
+
self.lifted_freevars[proxy] = new_proxy
|
| 2142 |
+
if self.parent is not None and proxy.tracer != self.parent:
|
| 2143 |
+
self.parent.lift_tracked_freevar_to_input(proxy)
|
| 2144 |
+
return new_proxy
|
| 2145 |
+
|
| 2146 |
+
def maybe_lift_tracked_freevar_to_input(self, arg):
|
| 2147 |
+
"""
|
| 2148 |
+
If arg is a free variable, then lift it to be an input.
|
| 2149 |
+
Returns the new lifted arg (if arg was a freevar), else the
|
| 2150 |
+
original arg.
|
| 2151 |
+
"""
|
| 2152 |
+
if not isinstance(arg, torch.fx.Proxy):
|
| 2153 |
+
return arg
|
| 2154 |
+
elif arg.tracer == self:
|
| 2155 |
+
return arg
|
| 2156 |
+
return self.lift_tracked_freevar_to_input(arg)
|
| 2157 |
+
|
| 2158 |
+
|
| 2159 |
+
# NOTE: [HigherOrderOperator tracing design]
|
| 2160 |
+
# Ignoring HigherOrderOperators for a moment,
|
| 2161 |
+
# OutputGraph represents the graph being built by Dynamo that may be compiled
|
| 2162 |
+
# and executed. It holds a root SubgraphTracer where the FX graph is built.
|
| 2163 |
+
#
|
| 2164 |
+
# HigherOrderOperators are operators that take functions as their arguments.
|
| 2165 |
+
# When Dynamo encounters a HigherOrderOperator, then it attempts to introspect
|
| 2166 |
+
# the function passed to it (call this the "body function"), capture it into a
|
| 2167 |
+
# GraphModule, and rewrite the call to the HigherOrderOperator to use the
|
| 2168 |
+
# GraphModule.
|
| 2169 |
+
#
|
| 2170 |
+
# The way we handle the capture of body functions is through having
|
| 2171 |
+
# (possibly nested) SubgraphTracers, one per body function.
|
| 2172 |
+
#
|
| 2173 |
+
# Mechanically, we do the introspection by:
|
| 2174 |
+
# - Creating a new SubgraphTracer via OutputGraph.subtracer
|
| 2175 |
+
# - Executing the body function.
|
| 2176 |
+
# This constructs the graph of the body function in the new SubgraphTracer
|
| 2177 |
+
# while modifying the state of the OutputGraph. For example:
|
| 2178 |
+
# - the OutputGraph can receive new GraphArgs (if we discover any new
|
| 2179 |
+
# untracked Tensors)
|
| 2180 |
+
# - side effects from the body function get accumulated into
|
| 2181 |
+
# OutputGraph.side_effects
|
| 2182 |
+
# - guards produced by the body function get accumulated into OutputGraph.guards
|
| 2183 |
+
#
|
| 2184 |
+
# The traced function has some special properties that make it easier for us
|
| 2185 |
+
# to transform later down the line:
|
| 2186 |
+
# - we lift all free variables to being inputs.
|
| 2187 |
+
#
|
| 2188 |
+
# If the introspection fails (due to the existence of graph breaks), then
|
| 2189 |
+
# we roll back the current OutputGraph state and graph break on the
|
| 2190 |
+
# HigherOrderOperator.
|
pllava/lib/python3.10/site-packages/torch/_dynamo/replay_record.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import dataclasses
|
| 3 |
+
from dataclasses import field
|
| 4 |
+
from types import CodeType, ModuleType
|
| 5 |
+
from typing import Any, Dict
|
| 6 |
+
|
| 7 |
+
from torch.utils._import_utils import import_dill
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
dill = import_dill()
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@dataclasses.dataclass
|
| 14 |
+
class ModuleRecord:
|
| 15 |
+
module: ModuleType
|
| 16 |
+
accessed_attrs: Dict[str, Any] = field(default_factory=dict)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@dataclasses.dataclass
|
| 20 |
+
class DummyModule:
|
| 21 |
+
name: str
|
| 22 |
+
is_torch: bool = False
|
| 23 |
+
|
| 24 |
+
@property
|
| 25 |
+
def __name__(self):
|
| 26 |
+
return self.name
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@dataclasses.dataclass
|
| 30 |
+
class ExecutionRecord:
|
| 31 |
+
code: CodeType
|
| 32 |
+
globals: Dict[str, Any] = field(default_factory=dict)
|
| 33 |
+
locals: Dict[str, Any] = field(default_factory=dict)
|
| 34 |
+
builtins: Dict[str, Any] = field(default_factory=dict)
|
| 35 |
+
code_options: Dict[str, Any] = field(default_factory=dict)
|
| 36 |
+
|
| 37 |
+
def dump(self, f):
|
| 38 |
+
assert dill is not None, "replay_record requires `pip install dill`"
|
| 39 |
+
dill.dump(self, f)
|
| 40 |
+
|
| 41 |
+
@classmethod
|
| 42 |
+
def load(cls, f):
|
| 43 |
+
assert dill is not None, "replay_record requires `pip install dill`"
|
| 44 |
+
return dill.load(f)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@dataclasses.dataclass
|
| 48 |
+
class ExecutionRecorder:
|
| 49 |
+
LOCAL_MOD_PREFIX = "___local_mod_"
|
| 50 |
+
|
| 51 |
+
code: CodeType
|
| 52 |
+
globals: Dict[str, Any] = field(default_factory=dict)
|
| 53 |
+
locals: Dict[str, Any] = field(default_factory=dict)
|
| 54 |
+
builtins: Dict[str, Any] = field(default_factory=dict)
|
| 55 |
+
code_options: Dict[str, Any] = field(default_factory=dict)
|
| 56 |
+
name_to_modrec: Dict[str, Any] = field(default_factory=dict)
|
| 57 |
+
|
| 58 |
+
def add_local_var(self, name, var):
|
| 59 |
+
if isinstance(var, ModuleType):
|
| 60 |
+
self.locals[name] = self._add_mod(var)
|
| 61 |
+
else:
|
| 62 |
+
self.locals[name] = var
|
| 63 |
+
|
| 64 |
+
def add_global_var(self, name, var):
|
| 65 |
+
if isinstance(var, ModuleType):
|
| 66 |
+
self.globals[name] = self._add_mod(var)
|
| 67 |
+
else:
|
| 68 |
+
self.globals[name] = var
|
| 69 |
+
|
| 70 |
+
def add_local_mod(self, name, mod):
|
| 71 |
+
assert isinstance(mod, ModuleType)
|
| 72 |
+
|
| 73 |
+
self.add_global_var(name, mod)
|
| 74 |
+
|
| 75 |
+
def record_module_access(self, mod, name, val):
|
| 76 |
+
if isinstance(val, ModuleType):
|
| 77 |
+
self.name_to_modrec[mod.__name__].accessed_attrs[name] = self._add_mod(val)
|
| 78 |
+
return
|
| 79 |
+
|
| 80 |
+
if mod.__name__ in self.name_to_modrec:
|
| 81 |
+
self.name_to_modrec[mod.__name__].accessed_attrs[name] = val
|
| 82 |
+
|
| 83 |
+
def get_record(self):
|
| 84 |
+
return ExecutionRecord(
|
| 85 |
+
self.code,
|
| 86 |
+
ExecutionRecorder._resolve_modules(self.globals),
|
| 87 |
+
ExecutionRecorder._resolve_modules(self.locals),
|
| 88 |
+
self.builtins.copy(),
|
| 89 |
+
self.code_options.copy(),
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
def _add_mod(self, mod):
|
| 93 |
+
if mod.__name__ not in self.name_to_modrec:
|
| 94 |
+
self.name_to_modrec[mod.__name__] = ModuleRecord(mod)
|
| 95 |
+
|
| 96 |
+
return self.name_to_modrec[mod.__name__]
|
| 97 |
+
|
| 98 |
+
# Convert ModuleRecords -> DummyModule tree
|
| 99 |
+
@classmethod
|
| 100 |
+
def _resolve_modules(cls, vars):
|
| 101 |
+
def resolve_module(var):
|
| 102 |
+
if not isinstance(var, ModuleRecord):
|
| 103 |
+
return var
|
| 104 |
+
|
| 105 |
+
dummy_mod = DummyModule(var.module.__name__)
|
| 106 |
+
for attr_name, attr_value in var.accessed_attrs.items():
|
| 107 |
+
attr_value = resolve_module(attr_value)
|
| 108 |
+
dummy_mod.__setattr__(attr_name, attr_value)
|
| 109 |
+
|
| 110 |
+
return dummy_mod
|
| 111 |
+
|
| 112 |
+
return {k: resolve_module(v) for k, v in vars.items()}
|
pllava/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_dynamo.cpython-310.pyc
ADDED
|
Binary file (13.7 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/repro/after_dynamo.py
ADDED
|
@@ -0,0 +1,585 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import argparse
|
| 3 |
+
import copy
|
| 4 |
+
import functools
|
| 5 |
+
import logging
|
| 6 |
+
import os
|
| 7 |
+
import shutil
|
| 8 |
+
import sys
|
| 9 |
+
import textwrap
|
| 10 |
+
from importlib import import_module
|
| 11 |
+
from typing import Union
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
import torch.fx as fx
|
| 15 |
+
from torch._dynamo.debug_utils import (
|
| 16 |
+
AccuracyError,
|
| 17 |
+
backend_accuracy_fails,
|
| 18 |
+
BUCK_CMD_PREFIX,
|
| 19 |
+
BuckTargetWriter,
|
| 20 |
+
extra_imports,
|
| 21 |
+
generate_config_string,
|
| 22 |
+
helper_for_dump_minify,
|
| 23 |
+
InputReader,
|
| 24 |
+
InputWriter,
|
| 25 |
+
minifier_dir,
|
| 26 |
+
NNModuleToString,
|
| 27 |
+
NopInputReader,
|
| 28 |
+
run_fwd_maybe_bwd,
|
| 29 |
+
same_two_models,
|
| 30 |
+
)
|
| 31 |
+
from torch.fx.experimental.symbolic_shapes import fx_placeholder_targets
|
| 32 |
+
from torch.hub import tqdm
|
| 33 |
+
|
| 34 |
+
from .. import config
|
| 35 |
+
from ..backends.registry import lookup_backend, register_debug_backend
|
| 36 |
+
from ..debug_utils import clone_inputs_retaining_gradness
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
log = logging.getLogger(__name__)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
inductor_config = import_module("torch._inductor.config")
|
| 43 |
+
use_buck = inductor_config.is_fbcode()
|
| 44 |
+
|
| 45 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 46 |
+
# MAIN ENTRY POINT
|
| 47 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def _accuracy_fails(gm, example_inputs, compiler_fn):
|
| 51 |
+
return backend_accuracy_fails(
|
| 52 |
+
gm,
|
| 53 |
+
example_inputs,
|
| 54 |
+
compiler_fn,
|
| 55 |
+
only_fwd=config.repro_forward_only,
|
| 56 |
+
ignore_non_fp=config.repro_ignore_non_fp,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class WrapBackendDebug:
|
| 61 |
+
def __init__(self, unconfigured_compiler_fn, compiler_name: str) -> None:
|
| 62 |
+
functools.wraps(unconfigured_compiler_fn)(self)
|
| 63 |
+
self._torchdynamo_orig_callable = unconfigured_compiler_fn # type: ignore[attr-defined]
|
| 64 |
+
self._compiler_name = compiler_name
|
| 65 |
+
if hasattr(unconfigured_compiler_fn, "__name__"):
|
| 66 |
+
self.__name__ = unconfigured_compiler_fn.__name__
|
| 67 |
+
if hasattr(unconfigured_compiler_fn, "compiler_name"):
|
| 68 |
+
self.__name__ = unconfigured_compiler_fn.compiler_name
|
| 69 |
+
if hasattr(unconfigured_compiler_fn, "get_compiler_config"):
|
| 70 |
+
self.get_compiler_config = unconfigured_compiler_fn.get_compiler_config # type: ignore[attr-defined]
|
| 71 |
+
|
| 72 |
+
def __call__(self, gm, example_inputs, **kwargs):
|
| 73 |
+
compiler_fn = functools.partial(self._torchdynamo_orig_callable, **kwargs)
|
| 74 |
+
assert config.repro_after in ("dynamo", "aot", None)
|
| 75 |
+
|
| 76 |
+
if config.repro_after == "dynamo":
|
| 77 |
+
|
| 78 |
+
def add_paths(exc):
|
| 79 |
+
exc.minifier_path = os.path.join(minifier_dir(), "minifier_launcher.py")
|
| 80 |
+
if use_buck:
|
| 81 |
+
exc.buck_command = " ".join(
|
| 82 |
+
BUCK_CMD_PREFIX
|
| 83 |
+
+ [BuckTargetWriter(exc.minifier_path).cmd_line_path]
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
if config.repro_level == 3:
|
| 87 |
+
dump_to_minify_after_dynamo(gm, example_inputs, self._compiler_name)
|
| 88 |
+
|
| 89 |
+
# Check for either accuracy (level 4) or other type of failures.
|
| 90 |
+
if config.repro_level == 4:
|
| 91 |
+
# Check Accuracy
|
| 92 |
+
compiled_gm = compiler_fn(copy.deepcopy(gm), example_inputs)
|
| 93 |
+
if _accuracy_fails(gm, example_inputs, compiler_fn):
|
| 94 |
+
log.warning(
|
| 95 |
+
"Accuracy failed for the TorchDynamo produced graph. Creating script to minify the error."
|
| 96 |
+
)
|
| 97 |
+
dump_to_minify_after_dynamo(
|
| 98 |
+
fx.GraphModule(gm, copy.deepcopy(gm.graph)),
|
| 99 |
+
example_inputs,
|
| 100 |
+
self._compiler_name,
|
| 101 |
+
)
|
| 102 |
+
exc = AccuracyError("Bad accuracy detected.")
|
| 103 |
+
add_paths(exc)
|
| 104 |
+
raise exc
|
| 105 |
+
else:
|
| 106 |
+
try:
|
| 107 |
+
compiled_gm = compiler_fn(copy.deepcopy(gm), example_inputs)
|
| 108 |
+
run_fwd_maybe_bwd(compiled_gm, example_inputs)
|
| 109 |
+
except Exception as exc:
|
| 110 |
+
log.warning(
|
| 111 |
+
"Compiled Fx GraphModule failed. Creating script to minify the error."
|
| 112 |
+
)
|
| 113 |
+
if config.repro_level == 1:
|
| 114 |
+
dump_state_fn = functools.partial(
|
| 115 |
+
dump_backend_state, compiler_name=self._compiler_name
|
| 116 |
+
)
|
| 117 |
+
dump_state_fn(
|
| 118 |
+
fx.GraphModule(gm, copy.deepcopy(gm.graph)), example_inputs
|
| 119 |
+
)
|
| 120 |
+
elif config.repro_level == 2:
|
| 121 |
+
dump_to_minify_after_dynamo(
|
| 122 |
+
fx.GraphModule(gm, copy.deepcopy(gm.graph)),
|
| 123 |
+
example_inputs,
|
| 124 |
+
self._compiler_name,
|
| 125 |
+
)
|
| 126 |
+
add_paths(exc)
|
| 127 |
+
raise
|
| 128 |
+
else:
|
| 129 |
+
compiled_gm = compiler_fn(gm, example_inputs)
|
| 130 |
+
|
| 131 |
+
return compiled_gm
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def wrap_backend_debug(unconfigured_compiler_fn, compiler_name: str):
|
| 135 |
+
"""
|
| 136 |
+
A minifier decorator that wraps the TorchDynamo produced Fx graph modules.
|
| 137 |
+
As opposed to wrap_compiler_debug, this wrapper intercepts at the
|
| 138 |
+
TorchDynamo produced Fx Graph Module. This makes it backend-agnostic to some
|
| 139 |
+
level, e.g., it is useful for minifying issues related to Aot Autograd
|
| 140 |
+
tracing. If an error is found, we minify and save the minified repro in
|
| 141 |
+
repro.tar.gz.
|
| 142 |
+
"""
|
| 143 |
+
return WrapBackendDebug(unconfigured_compiler_fn, compiler_name)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 147 |
+
# REPRO DUMPERS
|
| 148 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def generate_dynamo_fx_repro_string(
|
| 152 |
+
gm,
|
| 153 |
+
args,
|
| 154 |
+
compiler_name,
|
| 155 |
+
check_accuracy=False,
|
| 156 |
+
*,
|
| 157 |
+
stable_output=False,
|
| 158 |
+
save_dir=None,
|
| 159 |
+
command="run",
|
| 160 |
+
):
|
| 161 |
+
"""
|
| 162 |
+
Generate a repro string for backend-agnostic minified version.
|
| 163 |
+
"""
|
| 164 |
+
|
| 165 |
+
model_str = NNModuleToString.convert(gm)
|
| 166 |
+
|
| 167 |
+
# TODO: Figure out why torch.compile'd hash isn't work on this codepath
|
| 168 |
+
writer = InputWriter(save_dir, stable_hash=True)
|
| 169 |
+
for placeholder, arg in zip(fx_placeholder_targets(gm), args):
|
| 170 |
+
if isinstance(arg, (int, torch.SymInt)):
|
| 171 |
+
writer.symint(placeholder, arg)
|
| 172 |
+
elif isinstance(arg, torch.Tensor):
|
| 173 |
+
# TODO: improve these names with FQN
|
| 174 |
+
writer.tensor(placeholder, arg)
|
| 175 |
+
else:
|
| 176 |
+
raise TypeError(f"arg is neither SymInt/int nor torch.Tensor, {arg}")
|
| 177 |
+
load_args = "\n".join(writer.lines())
|
| 178 |
+
|
| 179 |
+
return textwrap.dedent(
|
| 180 |
+
f"""
|
| 181 |
+
from math import inf
|
| 182 |
+
import torch
|
| 183 |
+
from torch import tensor, device
|
| 184 |
+
import torch.fx as fx
|
| 185 |
+
import torch._dynamo
|
| 186 |
+
from torch._dynamo.testing import rand_strided
|
| 187 |
+
from torch._dynamo.debug_utils import run_fwd_maybe_bwd
|
| 188 |
+
|
| 189 |
+
{generate_config_string(stable_output=stable_output)}
|
| 190 |
+
|
| 191 |
+
{extra_imports}
|
| 192 |
+
|
| 193 |
+
{model_str}
|
| 194 |
+
mod = Repro()
|
| 195 |
+
|
| 196 |
+
{load_args}
|
| 197 |
+
|
| 198 |
+
if __name__ == '__main__':
|
| 199 |
+
from torch._dynamo.repro.after_dynamo import run_repro
|
| 200 |
+
run_repro(mod, load_args, accuracy={check_accuracy!r}, command={command!r},
|
| 201 |
+
save_dir={save_dir!r}, autocast={torch.is_autocast_enabled()!r}, backend={compiler_name!r})
|
| 202 |
+
"""
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def dump_backend_repro_as_file(gm, args, compiler_name, check_accuracy=False):
|
| 207 |
+
"""
|
| 208 |
+
Saves the repro to a repro.py file
|
| 209 |
+
"""
|
| 210 |
+
curdir = os.getcwd()
|
| 211 |
+
subdir = os.path.join(os.getcwd(), "checkpoints")
|
| 212 |
+
if not os.path.exists(subdir):
|
| 213 |
+
os.makedirs(subdir, exist_ok=True)
|
| 214 |
+
file_name = os.path.join(subdir, f"minified_{len(gm.graph.nodes)}_nodes.py")
|
| 215 |
+
log.warning(
|
| 216 |
+
"Writing checkpoint with %s nodes to %s", len(gm.graph.nodes), file_name
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
with open(file_name, "w") as fd:
|
| 220 |
+
fd.write(
|
| 221 |
+
generate_dynamo_fx_repro_string(
|
| 222 |
+
gm, args, compiler_name, check_accuracy, save_dir=subdir
|
| 223 |
+
)
|
| 224 |
+
)
|
| 225 |
+
latest_repro = os.path.join(curdir, "repro.py")
|
| 226 |
+
log.warning("Copying %s to %s for convenience", file_name, latest_repro)
|
| 227 |
+
|
| 228 |
+
if use_buck:
|
| 229 |
+
BuckTargetWriter(latest_repro).write()
|
| 230 |
+
|
| 231 |
+
shutil.copyfile(file_name, latest_repro)
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def dump_backend_state(gm, args, compiler_name, check_accuracy=False):
|
| 235 |
+
"""
|
| 236 |
+
Dumps the dynamo graph to repro the issue.
|
| 237 |
+
1) It tries to convert Fx GraphModule to a string. If we can, it writes to a
|
| 238 |
+
repro.py file.
|
| 239 |
+
2) If we can't convert Fx GraphModule to a string, we use to_folder to save
|
| 240 |
+
the module and save a tar file.
|
| 241 |
+
"""
|
| 242 |
+
assert NNModuleToString.can_convert_to_string(gm)
|
| 243 |
+
return dump_backend_repro_as_file(gm, args, compiler_name, check_accuracy)
|
| 244 |
+
# return dump_backend_repro_as_tarfile(gm, args, compiler_name)
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 248 |
+
# MINIFIER DUMPER
|
| 249 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def dump_to_minify_after_dynamo(gm, args, compiler_name):
|
| 253 |
+
# TODO: factor this out
|
| 254 |
+
subdir = os.path.join(minifier_dir(), "checkpoints")
|
| 255 |
+
if not os.path.exists(subdir):
|
| 256 |
+
os.makedirs(subdir, exist_ok=True)
|
| 257 |
+
helper_for_dump_minify(
|
| 258 |
+
generate_dynamo_fx_repro_string(
|
| 259 |
+
gm,
|
| 260 |
+
args,
|
| 261 |
+
compiler_name,
|
| 262 |
+
check_accuracy=config.repro_level == 4,
|
| 263 |
+
save_dir=subdir,
|
| 264 |
+
command="minify",
|
| 265 |
+
)
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 270 |
+
# MINIFIER BACKENDS
|
| 271 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
@register_debug_backend
|
| 275 |
+
def dynamo_minifier_backend(gm, example_inputs, compiler_name):
|
| 276 |
+
from functorch.compile import minifier
|
| 277 |
+
|
| 278 |
+
compiler_fn = lookup_backend(compiler_name)
|
| 279 |
+
|
| 280 |
+
# TODO: It's inconsistent to pass SymInt inputs but REAL tensors.
|
| 281 |
+
# We should pass ints and look at the GraphModule placeholders
|
| 282 |
+
# to resolve them to SymInt (if necessary)
|
| 283 |
+
example_inputs = [
|
| 284 |
+
i.node.hint if isinstance(i, torch.SymInt) else i for i in example_inputs
|
| 285 |
+
]
|
| 286 |
+
|
| 287 |
+
try:
|
| 288 |
+
compiled_gm = compiler_fn(gm, example_inputs)
|
| 289 |
+
run_fwd_maybe_bwd(compiled_gm, example_inputs)
|
| 290 |
+
raise ValueError("No issue was detected")
|
| 291 |
+
except Exception as exc:
|
| 292 |
+
orig_failure = str(exc)
|
| 293 |
+
log.warning(
|
| 294 |
+
"Compiled Fx GraphModule failed. Creating script to minify the error."
|
| 295 |
+
)
|
| 296 |
+
dump_state_fn = functools.partial(
|
| 297 |
+
dump_backend_state, compiler_name=compiler_name
|
| 298 |
+
)
|
| 299 |
+
dump_state_fn(fx.GraphModule(gm, copy.deepcopy(gm.graph)), example_inputs)
|
| 300 |
+
fails_fn = functools.partial(
|
| 301 |
+
backend_fails,
|
| 302 |
+
compiler_fn=compiler_fn,
|
| 303 |
+
orig_failure=orig_failure,
|
| 304 |
+
)
|
| 305 |
+
minifier(
|
| 306 |
+
gm,
|
| 307 |
+
example_inputs,
|
| 308 |
+
module_fails=fails_fn,
|
| 309 |
+
dump_state=dump_state_fn,
|
| 310 |
+
)
|
| 311 |
+
return gm
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
@register_debug_backend
|
| 315 |
+
def dynamo_accuracy_minifier_backend(gm, example_inputs, compiler_name):
|
| 316 |
+
from functorch.compile import minifier
|
| 317 |
+
|
| 318 |
+
compiler_fn = lookup_backend(compiler_name)
|
| 319 |
+
|
| 320 |
+
# Set the eval mode to remove randomness.
|
| 321 |
+
gm.eval()
|
| 322 |
+
|
| 323 |
+
# Check Accuracy
|
| 324 |
+
if _accuracy_fails(gm, example_inputs, compiler_fn):
|
| 325 |
+
log.warning("Accuracy failed for the TorchDynamo produced graph")
|
| 326 |
+
dump_state_fn = functools.partial(
|
| 327 |
+
dump_backend_state, compiler_name=compiler_name, check_accuracy=True
|
| 328 |
+
)
|
| 329 |
+
fails_fn = functools.partial(
|
| 330 |
+
_accuracy_fails,
|
| 331 |
+
compiler_fn=compiler_fn,
|
| 332 |
+
)
|
| 333 |
+
dump_state_fn(fx.GraphModule(gm, copy.deepcopy(gm.graph)), example_inputs)
|
| 334 |
+
minifier(
|
| 335 |
+
gm,
|
| 336 |
+
example_inputs,
|
| 337 |
+
module_fails=fails_fn,
|
| 338 |
+
dump_state=dump_state_fn,
|
| 339 |
+
)
|
| 340 |
+
else:
|
| 341 |
+
log.error("Input graph does not fail accuracy testing")
|
| 342 |
+
return gm
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def backend_fails(gm, example_inputs, compiler_fn, orig_failure):
|
| 346 |
+
"""
|
| 347 |
+
Minifier uses this function to identify if the minified graph module fails
|
| 348 |
+
with the same error.
|
| 349 |
+
|
| 350 |
+
One caveat is that minifier can potentially go into a wrong direction when
|
| 351 |
+
the resulting graph module fails for a different reason. To avoid this, we
|
| 352 |
+
save the string for the original exception and check similarity between new
|
| 353 |
+
and old exception. They can be somewhat different in some cases, when the
|
| 354 |
+
exception string depends on the failing node information. So, we have a
|
| 355 |
+
loose similarity metric to guide the minifier path.
|
| 356 |
+
"""
|
| 357 |
+
from difflib import SequenceMatcher
|
| 358 |
+
|
| 359 |
+
try:
|
| 360 |
+
# Run the original gm to check eager validity
|
| 361 |
+
run_fwd_maybe_bwd(gm, clone_inputs_retaining_gradness(example_inputs))
|
| 362 |
+
compiled_gm = compiler_fn(gm, example_inputs)
|
| 363 |
+
run_fwd_maybe_bwd(compiled_gm, clone_inputs_retaining_gradness(example_inputs))
|
| 364 |
+
except Exception as e:
|
| 365 |
+
new_failure = str(e)
|
| 366 |
+
if SequenceMatcher(None, orig_failure, new_failure).ratio() > 0.5:
|
| 367 |
+
return True
|
| 368 |
+
return False
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 372 |
+
# REPRO MAIN
|
| 373 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
def run_load_args(options, mod, load_args):
|
| 377 |
+
if not hasattr(load_args, "_version"):
|
| 378 |
+
log.warning(
|
| 379 |
+
"load_args does not have a _version attribute, please file a bug to PyTorch "
|
| 380 |
+
"and describe how you generate this repro script"
|
| 381 |
+
)
|
| 382 |
+
else:
|
| 383 |
+
if load_args._version > 0:
|
| 384 |
+
log.warning(
|
| 385 |
+
"load_args is version %s, but this version of PyTorch only supports "
|
| 386 |
+
"version 0. We will try to run it anyway but there may be an incompatibility; "
|
| 387 |
+
"if so, try upgrading your version of PyTorch.",
|
| 388 |
+
load_args._version,
|
| 389 |
+
)
|
| 390 |
+
|
| 391 |
+
nop_reader = NopInputReader()
|
| 392 |
+
load_args(nop_reader)
|
| 393 |
+
|
| 394 |
+
with tqdm(desc="Loading inputs", total=nop_reader.total) as pbar:
|
| 395 |
+
input_reader = InputReader(save_dir=options.save_dir, pbar=pbar)
|
| 396 |
+
load_args(input_reader)
|
| 397 |
+
args = input_reader.args
|
| 398 |
+
|
| 399 |
+
return args
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
def repro_minify(options, mod, load_args):
|
| 403 |
+
args = run_load_args(options, mod, load_args)
|
| 404 |
+
|
| 405 |
+
# Setup debug minifier compiler
|
| 406 |
+
if not options.accuracy:
|
| 407 |
+
compiler_fn = lookup_backend("dynamo_minifier_backend")
|
| 408 |
+
else:
|
| 409 |
+
compiler_fn = lookup_backend("dynamo_accuracy_minifier_backend")
|
| 410 |
+
|
| 411 |
+
if options.backend is None:
|
| 412 |
+
raise RuntimeError(
|
| 413 |
+
"Compiler name is None - this likely means that a custom compiler "
|
| 414 |
+
"was called by torchdynamo. Please remove this error, import your "
|
| 415 |
+
"custom compiler function, and replace the backend=None "
|
| 416 |
+
"line in run_repro to backend=<my_imported_custom_function>"
|
| 417 |
+
)
|
| 418 |
+
|
| 419 |
+
dynamo_minifier_backend = functools.partial(
|
| 420 |
+
compiler_fn,
|
| 421 |
+
compiler_name=options.backend,
|
| 422 |
+
)
|
| 423 |
+
opt_mod = torch._dynamo.optimize(dynamo_minifier_backend)(mod)
|
| 424 |
+
|
| 425 |
+
with torch.amp.autocast("cuda", enabled=options.autocast):
|
| 426 |
+
opt_mod(*args)
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
def repro_run(options, mod, load_args):
|
| 430 |
+
opt_mod = torch._dynamo.optimize(options.backend)(mod)
|
| 431 |
+
|
| 432 |
+
if options.accuracy != "":
|
| 433 |
+
mod.eval()
|
| 434 |
+
opt_mod.eval()
|
| 435 |
+
|
| 436 |
+
with torch.amp.autocast("cuda", enabled=options.autocast):
|
| 437 |
+
# TODO: disable clone
|
| 438 |
+
args = run_load_args(options, mod, load_args)
|
| 439 |
+
assert same_two_models(mod, mod, args), "Eager itself failed"
|
| 440 |
+
if not same_two_models(
|
| 441 |
+
mod,
|
| 442 |
+
opt_mod,
|
| 443 |
+
args,
|
| 444 |
+
only_fwd=config.repro_forward_only,
|
| 445 |
+
ignore_non_fp=config.repro_ignore_non_fp,
|
| 446 |
+
):
|
| 447 |
+
raise AccuracyError("Dynamo failed")
|
| 448 |
+
else:
|
| 449 |
+
with torch.amp.autocast("cuda", enabled=options.autocast):
|
| 450 |
+
args = run_load_args(options, mod, load_args)
|
| 451 |
+
ref = run_fwd_maybe_bwd(
|
| 452 |
+
mod, args, only_fwd=options.only_fwd, disable_clone=True
|
| 453 |
+
)
|
| 454 |
+
del args
|
| 455 |
+
|
| 456 |
+
args = run_load_args(options, mod, load_args)
|
| 457 |
+
res = run_fwd_maybe_bwd(
|
| 458 |
+
opt_mod, args, only_fwd=options.only_fwd, disable_clone=True
|
| 459 |
+
)
|
| 460 |
+
|
| 461 |
+
|
| 462 |
+
def run_repro(
|
| 463 |
+
mod,
|
| 464 |
+
load_args,
|
| 465 |
+
*,
|
| 466 |
+
command="run",
|
| 467 |
+
accuracy: Union[bool, str] = "",
|
| 468 |
+
save_dir=None,
|
| 469 |
+
autocast=False,
|
| 470 |
+
backend="inductor",
|
| 471 |
+
**kwargs,
|
| 472 |
+
):
|
| 473 |
+
for k in kwargs:
|
| 474 |
+
log.warning(
|
| 475 |
+
"Unrecognized kwarg %s; perhaps this repro was made on a newer version of PyTorch",
|
| 476 |
+
k,
|
| 477 |
+
)
|
| 478 |
+
|
| 479 |
+
if accuracy is True:
|
| 480 |
+
accuracy = "accuracy"
|
| 481 |
+
elif accuracy is False:
|
| 482 |
+
accuracy = ""
|
| 483 |
+
|
| 484 |
+
parser = argparse.ArgumentParser(
|
| 485 |
+
description=f"""\
|
| 486 |
+
An after_dynamo repro script, typically triggering a bug in Dynamo or
|
| 487 |
+
AOTAutograd. When run with no arguments, this script defaults to running
|
| 488 |
+
'{command}'. Extra flags may be available; to find out more, try '{command}
|
| 489 |
+
--help'. There are also alternate subcommands available, see below.
|
| 490 |
+
|
| 491 |
+
default settings on this script:
|
| 492 |
+
{accuracy=}
|
| 493 |
+
{save_dir=}
|
| 494 |
+
""",
|
| 495 |
+
formatter_class=argparse.RawTextHelpFormatter,
|
| 496 |
+
)
|
| 497 |
+
|
| 498 |
+
def common_flags(parser):
|
| 499 |
+
accuracy_group = parser.add_mutually_exclusive_group()
|
| 500 |
+
accuracy_group.add_argument(
|
| 501 |
+
"--no-accuracy",
|
| 502 |
+
dest="accuracy",
|
| 503 |
+
action="store_const",
|
| 504 |
+
const="",
|
| 505 |
+
default=accuracy,
|
| 506 |
+
help="do not test accuracy, just run the module and see if it errors",
|
| 507 |
+
)
|
| 508 |
+
accuracy_group.add_argument(
|
| 509 |
+
"--accuracy",
|
| 510 |
+
action="store_const",
|
| 511 |
+
const="accuracy",
|
| 512 |
+
default=accuracy,
|
| 513 |
+
help="test accuracy",
|
| 514 |
+
)
|
| 515 |
+
parser.add_argument(
|
| 516 |
+
"--save-dir",
|
| 517 |
+
type=str,
|
| 518 |
+
default=save_dir,
|
| 519 |
+
metavar="DIR",
|
| 520 |
+
help="directory where saved inputs live",
|
| 521 |
+
)
|
| 522 |
+
parser.add_argument(
|
| 523 |
+
"--no-save-dir",
|
| 524 |
+
dest="save_dir",
|
| 525 |
+
action="store_const",
|
| 526 |
+
const=None,
|
| 527 |
+
help="don't use any directory for saved inputs",
|
| 528 |
+
)
|
| 529 |
+
parser.add_argument(
|
| 530 |
+
"--no-isolate",
|
| 531 |
+
dest="isolate",
|
| 532 |
+
action="store_false",
|
| 533 |
+
default=False,
|
| 534 |
+
help="no isolate (doesn't do anything for after_dynamo)",
|
| 535 |
+
)
|
| 536 |
+
parser.add_argument(
|
| 537 |
+
"--autocast",
|
| 538 |
+
default=autocast,
|
| 539 |
+
action="store_true",
|
| 540 |
+
help="use torch.cuda.amp.autocast",
|
| 541 |
+
)
|
| 542 |
+
parser.add_argument(
|
| 543 |
+
"--no-autocast",
|
| 544 |
+
dest="autocast",
|
| 545 |
+
action="store_false",
|
| 546 |
+
help="don't use torch.cuda.amp.autocast",
|
| 547 |
+
)
|
| 548 |
+
parser.add_argument(
|
| 549 |
+
"--backend",
|
| 550 |
+
type=str,
|
| 551 |
+
default=backend,
|
| 552 |
+
metavar="BACKEND",
|
| 553 |
+
help="torch.compile backend to use",
|
| 554 |
+
)
|
| 555 |
+
|
| 556 |
+
subparsers = parser.add_subparsers(
|
| 557 |
+
dest="command", metavar="{run,minify}", required=True
|
| 558 |
+
)
|
| 559 |
+
|
| 560 |
+
parser_run = subparsers.add_parser(
|
| 561 |
+
"run",
|
| 562 |
+
help="just run the repro",
|
| 563 |
+
)
|
| 564 |
+
common_flags(parser_run)
|
| 565 |
+
parser_run.add_argument(
|
| 566 |
+
"--only-fwd",
|
| 567 |
+
action="store_true",
|
| 568 |
+
help="don't run backwards compilation for testing",
|
| 569 |
+
)
|
| 570 |
+
|
| 571 |
+
parser_minify = subparsers.add_parser(
|
| 572 |
+
"minify", help="run the minifier on the repro"
|
| 573 |
+
)
|
| 574 |
+
common_flags(parser_minify)
|
| 575 |
+
|
| 576 |
+
args = None
|
| 577 |
+
if len(sys.argv) <= 1:
|
| 578 |
+
args = [command, *sys.argv[1:]]
|
| 579 |
+
|
| 580 |
+
options = parser.parse_args(args)
|
| 581 |
+
COMMAND_FNS = {
|
| 582 |
+
"minify": repro_minify,
|
| 583 |
+
"run": repro_run,
|
| 584 |
+
}
|
| 585 |
+
COMMAND_FNS[options.command](options, mod, load_args)
|
pllava/lib/python3.10/site-packages/torch/_dynamo/side_effects.py
ADDED
|
@@ -0,0 +1,701 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import functools
|
| 3 |
+
import inspect
|
| 4 |
+
import warnings
|
| 5 |
+
from collections.abc import MutableMapping
|
| 6 |
+
from typing import Any, Dict, List, Optional, Type, Union
|
| 7 |
+
|
| 8 |
+
import torch.nn
|
| 9 |
+
|
| 10 |
+
from . import utils, variables
|
| 11 |
+
from .bytecode_transformation import (
|
| 12 |
+
bytecode_from_template,
|
| 13 |
+
create_call_function,
|
| 14 |
+
create_call_method,
|
| 15 |
+
create_instruction,
|
| 16 |
+
)
|
| 17 |
+
from .codegen import PyCodegen
|
| 18 |
+
from .exc import unimplemented
|
| 19 |
+
from .source import GlobalSource, LocalSource, Source
|
| 20 |
+
from .utils import is_frozen_dataclass, nn_module_new, object_new
|
| 21 |
+
from .variables.base import (
|
| 22 |
+
is_side_effect_safe,
|
| 23 |
+
MutableLocalBase,
|
| 24 |
+
MutableLocalSource,
|
| 25 |
+
VariableTracker,
|
| 26 |
+
)
|
| 27 |
+
from .variables.user_defined import FrozenDataClassVariable
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class MutableSideEffects(MutableLocalBase):
|
| 31 |
+
"""
|
| 32 |
+
VariableTracker.mutable_local marker to indicate a list passed as
|
| 33 |
+
an input that if we mutate we need to re-apply those mutations after
|
| 34 |
+
the graph runs.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
def __init__(self, source: Source, is_modified: bool = False):
|
| 38 |
+
super().__init__(MutableLocalSource.Existing)
|
| 39 |
+
self.source = source
|
| 40 |
+
self.is_modified = is_modified
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class AttributeMutation(MutableLocalBase):
|
| 44 |
+
"""
|
| 45 |
+
VariableTracker.mutable_local marker to track changes to attributes
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
def __init__(self, typ: MutableLocalSource, source: Optional[Source]):
|
| 49 |
+
super().__init__(typ)
|
| 50 |
+
self.source = source
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class AttributeMutationExisting(AttributeMutation):
|
| 54 |
+
def __init__(self, source: Source):
|
| 55 |
+
super().__init__(MutableLocalSource.Existing, source)
|
| 56 |
+
self.source = source
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class AttributeMutationNew(AttributeMutation):
|
| 60 |
+
def __init__(self, source: Optional[Source], cls_source: Optional[Source]):
|
| 61 |
+
super().__init__(MutableLocalSource.Local, source)
|
| 62 |
+
self.cls_source = cls_source
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def _manual_update_dict(dict_from, dict_to):
|
| 66 |
+
for k, v in dict_from.items():
|
| 67 |
+
dict_to[k] = v
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class SideEffects:
|
| 71 |
+
"""
|
| 72 |
+
Track side effects (list mutation, setattr, etc) that need to be
|
| 73 |
+
applied after an FX graph is run.
|
| 74 |
+
"""
|
| 75 |
+
|
| 76 |
+
id_to_variable: Dict[int, VariableTracker]
|
| 77 |
+
store_attr_mutations: Dict[MutableLocalBase, Dict[str, VariableTracker]]
|
| 78 |
+
keepalive: List[Any]
|
| 79 |
+
|
| 80 |
+
def __init__(
|
| 81 |
+
self,
|
| 82 |
+
id_to_variable=None,
|
| 83 |
+
store_attr_mutations=None,
|
| 84 |
+
keepalive=None,
|
| 85 |
+
save_for_backward=None,
|
| 86 |
+
tensor_hooks=None,
|
| 87 |
+
):
|
| 88 |
+
super().__init__()
|
| 89 |
+
self.id_to_variable = id_to_variable or {}
|
| 90 |
+
self.store_attr_mutations = store_attr_mutations or {}
|
| 91 |
+
self.keepalive = keepalive or []
|
| 92 |
+
self.save_for_backward = save_for_backward or []
|
| 93 |
+
self.tensor_hooks = tensor_hooks or {}
|
| 94 |
+
# Track Compiled Autograd final callbacks that must be called at the end of Compiled Autograd backward graph.
|
| 95 |
+
# Only applicable if this graph is created from Dynamo tracing in Compiled Autograd.
|
| 96 |
+
self.ca_final_callbacks_var = None
|
| 97 |
+
|
| 98 |
+
def __eq__(self, other: object) -> bool:
|
| 99 |
+
assert isinstance(other, SideEffects)
|
| 100 |
+
# NB: do NOT test keepalive
|
| 101 |
+
return (
|
| 102 |
+
self.id_to_variable == other.id_to_variable
|
| 103 |
+
and self.store_attr_mutations == other.store_attr_mutations
|
| 104 |
+
and self.save_for_backward == other.save_for_backward
|
| 105 |
+
and self.tensor_hooks == other.tensor_hooks
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
def diff(self, other: "SideEffects") -> Optional[str]:
|
| 109 |
+
if self.id_to_variable != other.id_to_variable:
|
| 110 |
+
sk_itv = self.id_to_variable.keys()
|
| 111 |
+
ok_itv = other.id_to_variable.keys()
|
| 112 |
+
if sk_itv != ok_itv:
|
| 113 |
+
return f"id_to_variable keys: {sk_itv} != {ok_itv}"
|
| 114 |
+
# Feel free to augment this with more fancy diffing logic
|
| 115 |
+
# if needed for debugging
|
| 116 |
+
return "id_to_variable: unknown diff"
|
| 117 |
+
elif self.store_attr_mutations != other.store_attr_mutations:
|
| 118 |
+
sk_sam = self.store_attr_mutations.keys()
|
| 119 |
+
ok_sam = other.store_attr_mutations.keys()
|
| 120 |
+
if sk_sam != ok_sam:
|
| 121 |
+
return f"store_attr_mutations keys: {sk_sam} != {ok_sam}"
|
| 122 |
+
return "store_attr_mutations: unknown diff"
|
| 123 |
+
elif self.save_for_backward != other.save_for_backward:
|
| 124 |
+
return "save_for_backward"
|
| 125 |
+
elif self.tensor_hooks != other.tensor_hooks:
|
| 126 |
+
return "tensor_hooks"
|
| 127 |
+
else:
|
| 128 |
+
return None
|
| 129 |
+
|
| 130 |
+
def clone(self):
|
| 131 |
+
"""Create a shallow copy"""
|
| 132 |
+
return self.__class__(
|
| 133 |
+
id_to_variable=dict(self.id_to_variable),
|
| 134 |
+
store_attr_mutations={
|
| 135 |
+
k: dict(v) for k, v in self.store_attr_mutations.items()
|
| 136 |
+
},
|
| 137 |
+
keepalive=list(self.keepalive),
|
| 138 |
+
save_for_backward=self.save_for_backward,
|
| 139 |
+
tensor_hooks=self.tensor_hooks,
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
def __contains__(self, item):
|
| 143 |
+
return id(item) in self.id_to_variable
|
| 144 |
+
|
| 145 |
+
def __getitem__(self, item):
|
| 146 |
+
return self.id_to_variable[id(item)]
|
| 147 |
+
|
| 148 |
+
def check_allowed_side_effect(self, item):
|
| 149 |
+
from torch._dynamo.variables.misc import AutogradFunctionContextVariable
|
| 150 |
+
|
| 151 |
+
# People do things like self.dim = dim inside autograd.Function.
|
| 152 |
+
# These are benign.
|
| 153 |
+
if isinstance(item, AutogradFunctionContextVariable):
|
| 154 |
+
return True
|
| 155 |
+
if not is_side_effect_safe(item.mutable_local):
|
| 156 |
+
unimplemented(
|
| 157 |
+
"HigherOrderOperator: Mutating a variable not in the current scope (SideEffects)"
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
def store_attr(self, item: VariableTracker, name: str, value: VariableTracker):
|
| 161 |
+
assert self.is_attribute_mutation(item)
|
| 162 |
+
self.check_allowed_side_effect(item)
|
| 163 |
+
if item.mutable_local not in self.store_attr_mutations:
|
| 164 |
+
self.store_attr_mutations[item.mutable_local] = {}
|
| 165 |
+
self.store_attr_mutations[item.mutable_local][name] = value
|
| 166 |
+
|
| 167 |
+
def load_attr(self, item, name, deleted_ok=False):
|
| 168 |
+
assert self.is_attribute_mutation(item)
|
| 169 |
+
result = self.store_attr_mutations[item.mutable_local][name]
|
| 170 |
+
if not deleted_ok and isinstance(result, variables.DeletedVariable):
|
| 171 |
+
unimplemented("read deleted attribute")
|
| 172 |
+
return result
|
| 173 |
+
|
| 174 |
+
def store_cell(self, cellvar, value):
|
| 175 |
+
assert isinstance(cellvar, variables.NewCellVariable)
|
| 176 |
+
assert isinstance(value, variables.VariableTracker)
|
| 177 |
+
self.store_attr(cellvar, "cell_contents", value)
|
| 178 |
+
|
| 179 |
+
def load_cell(self, cellvar):
|
| 180 |
+
assert isinstance(cellvar, variables.NewCellVariable)
|
| 181 |
+
return self.load_attr(cellvar, "cell_contents")
|
| 182 |
+
|
| 183 |
+
def load_global(self, gvar: VariableTracker, name: str):
|
| 184 |
+
assert isinstance(gvar, variables.VariableTracker)
|
| 185 |
+
return self.load_attr(gvar, name)
|
| 186 |
+
|
| 187 |
+
def store_global(self, gvar: VariableTracker, name: str, value: VariableTracker):
|
| 188 |
+
assert isinstance(gvar, variables.VariableTracker)
|
| 189 |
+
assert isinstance(value, variables.VariableTracker)
|
| 190 |
+
self.store_attr(gvar, name, value)
|
| 191 |
+
|
| 192 |
+
@staticmethod
|
| 193 |
+
def cls_supports_mutation_side_effects(cls):
|
| 194 |
+
return (
|
| 195 |
+
inspect.getattr_static(cls, "__getattribute__", None)
|
| 196 |
+
is object.__getattribute__
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
def is_attribute_mutation(self, item):
|
| 200 |
+
return isinstance(item.mutable_local, AttributeMutation)
|
| 201 |
+
|
| 202 |
+
def has_pending_mutation(self, item):
|
| 203 |
+
return self.is_attribute_mutation(item) and bool(
|
| 204 |
+
self.store_attr_mutations.get(item.mutable_local)
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
def has_pending_mutation_of_attr(self, item, name):
|
| 208 |
+
return self.is_attribute_mutation(
|
| 209 |
+
item
|
| 210 |
+
) and name in self.store_attr_mutations.get(item.mutable_local, ())
|
| 211 |
+
|
| 212 |
+
def is_modified(self, item):
|
| 213 |
+
if isinstance(item.mutable_local, AttributeMutationNew):
|
| 214 |
+
return True
|
| 215 |
+
if self.is_attribute_mutation(item):
|
| 216 |
+
return item.mutable_local in self.store_attr_mutations
|
| 217 |
+
return item.mutable_local.is_modified
|
| 218 |
+
|
| 219 |
+
def _track_obj(
|
| 220 |
+
self,
|
| 221 |
+
item: Any,
|
| 222 |
+
variable: VariableTracker,
|
| 223 |
+
mutable_cls=MutableSideEffects,
|
| 224 |
+
):
|
| 225 |
+
"""Start tracking a new variable for mutation"""
|
| 226 |
+
assert variable.source is not None
|
| 227 |
+
|
| 228 |
+
if id(item) in self.id_to_variable:
|
| 229 |
+
raise AssertionError(
|
| 230 |
+
f"{variable} is already tracked for mutation. This could be "
|
| 231 |
+
"because you are not using VariableBuilder to construct "
|
| 232 |
+
"the variable tracker. "
|
| 233 |
+
f"Source of new object: {variable.source}. "
|
| 234 |
+
f"Source of previously tracked object: {self.id_to_variable[id(item)].source}."
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
variable.mutable_local = mutable_cls(variable.source)
|
| 238 |
+
self.id_to_variable[id(item)] = variable
|
| 239 |
+
self.keepalive.append(item)
|
| 240 |
+
return variable
|
| 241 |
+
|
| 242 |
+
track_mutable = _track_obj
|
| 243 |
+
|
| 244 |
+
def track_object_existing(
|
| 245 |
+
self,
|
| 246 |
+
item: Any,
|
| 247 |
+
variable: VariableTracker,
|
| 248 |
+
):
|
| 249 |
+
return self._track_obj(item, variable, mutable_cls=AttributeMutationExisting)
|
| 250 |
+
|
| 251 |
+
def track_object_new(
|
| 252 |
+
self,
|
| 253 |
+
cls_source: Source,
|
| 254 |
+
user_cls: Any,
|
| 255 |
+
variable_cls: Any,
|
| 256 |
+
options,
|
| 257 |
+
):
|
| 258 |
+
if user_cls is torch.autograd.function.FunctionCtx:
|
| 259 |
+
with warnings.catch_warnings(record=True):
|
| 260 |
+
obj = torch.autograd.Function()
|
| 261 |
+
elif issubclass(user_cls, torch.nn.Module):
|
| 262 |
+
obj = nn_module_new(user_cls)
|
| 263 |
+
else:
|
| 264 |
+
obj = object_new(user_cls)
|
| 265 |
+
variable = variable_cls(
|
| 266 |
+
obj,
|
| 267 |
+
mutable_local=AttributeMutationNew(None, cls_source),
|
| 268 |
+
**options,
|
| 269 |
+
)
|
| 270 |
+
self.id_to_variable[id(obj)] = variable
|
| 271 |
+
self.keepalive.append(obj)
|
| 272 |
+
return variable
|
| 273 |
+
|
| 274 |
+
def track_object_new_from_user_defined_class(
|
| 275 |
+
self,
|
| 276 |
+
cls_variable: "variables.UserDefinedClassVariable",
|
| 277 |
+
):
|
| 278 |
+
cls_source = cls_variable.source
|
| 279 |
+
user_cls = cls_variable.value
|
| 280 |
+
|
| 281 |
+
# Find the variable class
|
| 282 |
+
variable_cls: Type[
|
| 283 |
+
variables.UserDefinedObjectVariable
|
| 284 |
+
] = variables.UserDefinedObjectVariable
|
| 285 |
+
if issubclass(user_cls, torch.nn.Module):
|
| 286 |
+
variable_cls = variables.UnspecializedNNModuleVariable
|
| 287 |
+
elif issubclass(user_cls, MutableMapping):
|
| 288 |
+
variable_cls = variables.MutableMappingVariable
|
| 289 |
+
elif is_frozen_dataclass(user_cls):
|
| 290 |
+
variable_cls = FrozenDataClassVariable
|
| 291 |
+
else:
|
| 292 |
+
variable_cls = variables.UserDefinedObjectVariable
|
| 293 |
+
|
| 294 |
+
assert issubclass(variable_cls, variables.UserDefinedObjectVariable)
|
| 295 |
+
|
| 296 |
+
variable_cls = functools.partial(variable_cls, cls_source=cls_source)
|
| 297 |
+
|
| 298 |
+
return self.track_object_new(cls_source, user_cls, variable_cls, {})
|
| 299 |
+
|
| 300 |
+
def track_cell_new(
|
| 301 |
+
self,
|
| 302 |
+
):
|
| 303 |
+
obj = object()
|
| 304 |
+
variable = variables.NewCellVariable(
|
| 305 |
+
mutable_local=AttributeMutationNew(None, None),
|
| 306 |
+
)
|
| 307 |
+
self.id_to_variable[id(obj)] = variable
|
| 308 |
+
self.keepalive.append(obj)
|
| 309 |
+
return variable
|
| 310 |
+
|
| 311 |
+
def track_cell_existing(self, source: Source, item: Any):
|
| 312 |
+
variable = variables.NewCellVariable(
|
| 313 |
+
mutable_local=AttributeMutationExisting(source),
|
| 314 |
+
)
|
| 315 |
+
self.id_to_variable[id(item)] = variable
|
| 316 |
+
self.keepalive.append(item)
|
| 317 |
+
return variable
|
| 318 |
+
|
| 319 |
+
def track_global_existing(self, source: Source, item: Any):
|
| 320 |
+
variable = variables.NewGlobalVariable(
|
| 321 |
+
mutable_local=AttributeMutationExisting(source),
|
| 322 |
+
)
|
| 323 |
+
self.id_to_variable[id(item)] = variable
|
| 324 |
+
self.keepalive.append(item)
|
| 325 |
+
return variable
|
| 326 |
+
|
| 327 |
+
def track_save_for_backward(self, ctx, args):
|
| 328 |
+
assert isinstance(ctx, variables.AutogradFunctionContextVariable)
|
| 329 |
+
self.save_for_backward.append((ctx, args))
|
| 330 |
+
|
| 331 |
+
def track_tensor_variables_from_runahead_side_effects(self, other):
|
| 332 |
+
# In higher order ops we want to keep track of tensors seen in the
|
| 333 |
+
# speculate_subgraph so that we don't lift them again as a new input in
|
| 334 |
+
# other speculate_subgraph or in the root tracer.
|
| 335 |
+
for other_item in other.keepalive:
|
| 336 |
+
other_id = id(other_item)
|
| 337 |
+
other_variable = other.id_to_variable[other_id]
|
| 338 |
+
if other_id not in self.id_to_variable and isinstance(
|
| 339 |
+
other_variable, variables.TensorVariable
|
| 340 |
+
):
|
| 341 |
+
self.track_object_existing(other_item, other_variable)
|
| 342 |
+
|
| 343 |
+
def prune_dead_object_new(self, tx):
|
| 344 |
+
live_new_objects = set()
|
| 345 |
+
|
| 346 |
+
# use this to avoid cycles in mutable_local (though I'm not sure if that
|
| 347 |
+
# can actually happen).
|
| 348 |
+
visited: Any = set({})
|
| 349 |
+
|
| 350 |
+
def visit(var: VariableTracker):
|
| 351 |
+
mutable_local = var.mutable_local
|
| 352 |
+
if mutable_local is None:
|
| 353 |
+
return
|
| 354 |
+
if mutable_local in visited:
|
| 355 |
+
return
|
| 356 |
+
visited.add(mutable_local)
|
| 357 |
+
# Object may have been mutated, store this mutation.
|
| 358 |
+
if isinstance(mutable_local, AttributeMutationNew):
|
| 359 |
+
live_new_objects.add(mutable_local)
|
| 360 |
+
# It's possible that we have mutated the value of this variable
|
| 361 |
+
# to be another one. The new value is in store_attr_mutations.
|
| 362 |
+
# Also recurse through the new value to detect alive AttributeMutationNew.
|
| 363 |
+
if var.mutable_local in self.store_attr_mutations:
|
| 364 |
+
VariableTracker.visit(
|
| 365 |
+
visit, self.store_attr_mutations[var.mutable_local]
|
| 366 |
+
)
|
| 367 |
+
|
| 368 |
+
def is_live(var: Union[MutableLocalBase, VariableTracker]):
|
| 369 |
+
if isinstance(var, AttributeMutationNew):
|
| 370 |
+
return var in live_new_objects
|
| 371 |
+
if isinstance(var, VariableTracker):
|
| 372 |
+
return is_live(var.mutable_local)
|
| 373 |
+
return True
|
| 374 |
+
|
| 375 |
+
pre_existing_vars = [
|
| 376 |
+
var
|
| 377 |
+
for var in self.id_to_variable.values()
|
| 378 |
+
if not isinstance(var.mutable_local, AttributeMutationNew)
|
| 379 |
+
]
|
| 380 |
+
|
| 381 |
+
# The only live side effects come from returns (tx.stack), any intermediates
|
| 382 |
+
# during a graph break (tx.symbolic_locals), and mutation on pre-existing variables.
|
| 383 |
+
# Recursively visit Variables and see if any of them have been mutated.
|
| 384 |
+
VariableTracker.visit(visit, (tx.stack, tx.symbolic_locals, pre_existing_vars))
|
| 385 |
+
|
| 386 |
+
# NB: cell variable handling.is tricky.
|
| 387 |
+
# cell variables must stay alive if any NestedUserFunctionVariable
|
| 388 |
+
# are live. "visit"-ing the NestedUserFunctionVariable visits
|
| 389 |
+
# the .closures field, from which we will see if we need to keep
|
| 390 |
+
# any mutations to cell variables alive.
|
| 391 |
+
|
| 392 |
+
self.id_to_variable = {
|
| 393 |
+
k: v for k, v in self.id_to_variable.items() if is_live(v)
|
| 394 |
+
}
|
| 395 |
+
self.store_attr_mutations = {
|
| 396 |
+
k: v for k, v in self.store_attr_mutations.items() if is_live(k)
|
| 397 |
+
}
|
| 398 |
+
|
| 399 |
+
def mutation(self, var):
|
| 400 |
+
self.check_allowed_side_effect(var)
|
| 401 |
+
if isinstance(var.mutable_local, MutableSideEffects):
|
| 402 |
+
var.mutable_local = MutableSideEffects(var.mutable_local.source, True)
|
| 403 |
+
|
| 404 |
+
def _get_modified_vars(self):
|
| 405 |
+
return [var for var in self.id_to_variable.values() if self.is_modified(var)]
|
| 406 |
+
|
| 407 |
+
def codegen_save_tempvars(self, cg: PyCodegen):
|
| 408 |
+
for var in self._get_modified_vars():
|
| 409 |
+
if isinstance(
|
| 410 |
+
var.mutable_local, (AttributeMutationExisting, AttributeMutationNew)
|
| 411 |
+
) and isinstance(var, variables.NewCellVariable):
|
| 412 |
+
cg.add_push_null(
|
| 413 |
+
lambda: cg.load_import_from(utils.__name__, "make_cell")
|
| 414 |
+
)
|
| 415 |
+
cg.extend_output(create_call_function(0, False))
|
| 416 |
+
cg.add_cache(var)
|
| 417 |
+
if isinstance(var.mutable_local, AttributeMutationNew):
|
| 418 |
+
var.mutable_local.source = LocalSource(cg.tempvars[var]) # type: ignore[attr-defined]
|
| 419 |
+
elif isinstance(var.mutable_local, AttributeMutationNew):
|
| 420 |
+
if isinstance(var, variables.AutogradFunctionContextVariable):
|
| 421 |
+
unimplemented("AutogradFunctionContextVariable escaped")
|
| 422 |
+
cg.add_push_null(
|
| 423 |
+
lambda: cg.load_import_from(utils.__name__, "object_new")
|
| 424 |
+
)
|
| 425 |
+
cg(var.mutable_local.cls_source)
|
| 426 |
+
cg.extend_output(create_call_function(1, False))
|
| 427 |
+
cg.add_cache(var)
|
| 428 |
+
var.mutable_local.source = LocalSource(cg.tempvars[var])
|
| 429 |
+
elif var in cg.tempvars:
|
| 430 |
+
assert cg.tempvars.get(var) is None
|
| 431 |
+
# subsequent usage should point to the original variable
|
| 432 |
+
cg(var.mutable_local.source)
|
| 433 |
+
cg.add_cache(var)
|
| 434 |
+
|
| 435 |
+
for ctx, args in self.save_for_backward:
|
| 436 |
+
cg(ctx.source)
|
| 437 |
+
cg.load_method("save_for_backward")
|
| 438 |
+
for arg in args:
|
| 439 |
+
cg(arg)
|
| 440 |
+
cg.extend_output(
|
| 441 |
+
[
|
| 442 |
+
*create_call_method(len(args)),
|
| 443 |
+
create_instruction("POP_TOP"),
|
| 444 |
+
]
|
| 445 |
+
)
|
| 446 |
+
|
| 447 |
+
def register_hook(self, tensor, hook, handle, name):
|
| 448 |
+
assert isinstance(tensor, variables.TensorVariable)
|
| 449 |
+
assert isinstance(hook, variables.VariableTracker)
|
| 450 |
+
assert (
|
| 451 |
+
isinstance(handle, variables.RemovableHandleVariable)
|
| 452 |
+
and handle.mutable_local
|
| 453 |
+
)
|
| 454 |
+
assert hasattr(torch.Tensor, name)
|
| 455 |
+
idx = len(self.tensor_hooks.keys())
|
| 456 |
+
# duplicate index possible because of self.remove_hook()
|
| 457 |
+
while idx in self.tensor_hooks:
|
| 458 |
+
idx += 1
|
| 459 |
+
self.tensor_hooks[idx] = (tensor, hook, handle, name)
|
| 460 |
+
assert not handle.idx
|
| 461 |
+
handle.idx = idx
|
| 462 |
+
|
| 463 |
+
def remove_hook(self, idx):
|
| 464 |
+
del self.tensor_hooks[idx]
|
| 465 |
+
|
| 466 |
+
def codegen_hooks(self, cg):
|
| 467 |
+
for (
|
| 468 |
+
tensor,
|
| 469 |
+
hook,
|
| 470 |
+
handle,
|
| 471 |
+
name,
|
| 472 |
+
) in self.tensor_hooks.values():
|
| 473 |
+
# Note: [On tensor.register_hook]
|
| 474 |
+
#
|
| 475 |
+
# register_hook on a tensor, AKA backward hooks, have slightly nuanced differences in how they are implemented
|
| 476 |
+
# when it comes to hooks on objects with sources (inputs, params) vs objects without sources (intermediaries).
|
| 477 |
+
#
|
| 478 |
+
# For tensors with a source, we bypass direct inclusion of register_hook calls in the graph.
|
| 479 |
+
# Instead, these are tracked and stashed as a global variable, enabling their association with tensors in
|
| 480 |
+
# the residuals. During dynamo's frame creation, these hooks are invoked seamlessly on known reconstructible/fetch-able
|
| 481 |
+
# tensors. Because a source indicates knowledge of this object outside the torch compile region, and
|
| 482 |
+
# because we are running residuals firmly before .backward() can be run, it is sound to invoke
|
| 483 |
+
# `register_hook` on a known tensor.
|
| 484 |
+
#
|
| 485 |
+
# For tensors without a source, we support a limited subset of hooks. Global functions only, and
|
| 486 |
+
# compiled_autograd must be enabled or we will graph break.
|
| 487 |
+
#
|
| 488 |
+
# Handling the Handle: When a user retains the register_hook result in a handle, we intercept the
|
| 489 |
+
# STORE_FAST operation to record the user-designated local variable name. This ensures the reconstructed
|
| 490 |
+
# bytecode retains this name. If no handle is defined, we simply pop the generated value to keep the
|
| 491 |
+
# stack intact.
|
| 492 |
+
#
|
| 493 |
+
# Dynamo Tensor Hooks Workflow:
|
| 494 |
+
# - Functions passed to register_hook are lifted globally.
|
| 495 |
+
# - For tensors with sources:
|
| 496 |
+
# - In the "side_effects" phase of codegen, we iterate over tensors with hooks to:
|
| 497 |
+
# - Generate the tensor.
|
| 498 |
+
# - Issue a register_hook call on the tensor, linking to the globally stored function.
|
| 499 |
+
# - Incorporate a handle if one was established in the eager phase.
|
| 500 |
+
# - For tensors without sources:
|
| 501 |
+
# - We don't generate any instructions for registering a hook.
|
| 502 |
+
# - Handles from intermediary hooks are NYI.
|
| 503 |
+
# - We produce a call function that utilizes the trace_wrapped higher order op, closing over it.
|
| 504 |
+
# - We then manually insert the call function above into the graph.
|
| 505 |
+
# - The handle's exact user-specified name, "user_code_variable_name", is discerned and associated during STORE_FAST.
|
| 506 |
+
assert tensor.source, "Hooks on non input tensors NYI - should not get here"
|
| 507 |
+
|
| 508 |
+
def gen_fn():
|
| 509 |
+
cg(tensor)
|
| 510 |
+
cg.extend_output([cg.create_load_attr(name)])
|
| 511 |
+
|
| 512 |
+
cg.add_push_null(gen_fn)
|
| 513 |
+
cg(hook)
|
| 514 |
+
cg.extend_output(create_call_function(1, False))
|
| 515 |
+
|
| 516 |
+
# Adding the handle to the cache means RemovableHandleVariable().reconstruct() will
|
| 517 |
+
# be associated with the return value of register_hook(). This consumes the top of stack.
|
| 518 |
+
cg.add_cache(handle)
|
| 519 |
+
|
| 520 |
+
def get_ca_final_callbacks_var(self):
|
| 521 |
+
from .variables.base import MutableLocal
|
| 522 |
+
|
| 523 |
+
if self.ca_final_callbacks_var is None:
|
| 524 |
+
self.ca_final_callbacks_var = variables.ListVariable(
|
| 525 |
+
[], mutable_local=MutableLocal()
|
| 526 |
+
)
|
| 527 |
+
return self.ca_final_callbacks_var
|
| 528 |
+
|
| 529 |
+
def codegen_update_mutated(self, cg: PyCodegen):
|
| 530 |
+
suffixes = []
|
| 531 |
+
for var in self._get_modified_vars():
|
| 532 |
+
if isinstance(var, variables.ListVariable):
|
| 533 |
+
# old[:] = new
|
| 534 |
+
cg(var, allow_cache=False)
|
| 535 |
+
cg(var.mutable_local.source) # type: ignore[attr-defined]
|
| 536 |
+
cg.extend_output(
|
| 537 |
+
[
|
| 538 |
+
cg.create_load_const(None),
|
| 539 |
+
cg.create_load_const(None),
|
| 540 |
+
create_instruction("BUILD_SLICE", arg=2),
|
| 541 |
+
]
|
| 542 |
+
)
|
| 543 |
+
suffixes.append([create_instruction("STORE_SUBSCR")])
|
| 544 |
+
elif isinstance(var, variables.CustomizedDictVariable):
|
| 545 |
+
# need to update the dict manually since update method may be invalid
|
| 546 |
+
varname_map = {}
|
| 547 |
+
for name in _manual_update_dict.__code__.co_varnames:
|
| 548 |
+
varname_map[name] = cg.tx.output.new_var()
|
| 549 |
+
|
| 550 |
+
cg(var.mutable_local.source) # type: ignore[attr-defined]
|
| 551 |
+
cg.extend_output(
|
| 552 |
+
[create_instruction("STORE_FAST", argval=varname_map["dict_to"])]
|
| 553 |
+
)
|
| 554 |
+
|
| 555 |
+
cg(var, allow_cache=False)
|
| 556 |
+
cg.extend_output(
|
| 557 |
+
[create_instruction("STORE_FAST", argval=varname_map["dict_from"])]
|
| 558 |
+
)
|
| 559 |
+
|
| 560 |
+
cg(var.mutable_local.source) # type: ignore[attr-defined]
|
| 561 |
+
cg.load_method("clear")
|
| 562 |
+
|
| 563 |
+
# unfortunately can't just use DICT_MERGE due to possible custom behaviors
|
| 564 |
+
dict_update_insts = bytecode_from_template(
|
| 565 |
+
_manual_update_dict, varname_map=varname_map
|
| 566 |
+
)
|
| 567 |
+
|
| 568 |
+
suffixes.append(
|
| 569 |
+
[
|
| 570 |
+
*create_call_method(0), # clear
|
| 571 |
+
create_instruction("POP_TOP"),
|
| 572 |
+
*dict_update_insts,
|
| 573 |
+
create_instruction("POP_TOP"),
|
| 574 |
+
]
|
| 575 |
+
)
|
| 576 |
+
|
| 577 |
+
elif isinstance(var, variables.ConstDictVariable):
|
| 578 |
+
cg(var.mutable_local.source) # type: ignore[attr-defined]
|
| 579 |
+
cg.load_method("update")
|
| 580 |
+
cg(var, allow_cache=False)
|
| 581 |
+
|
| 582 |
+
cg(var.mutable_local.source) # type: ignore[attr-defined]
|
| 583 |
+
cg.load_method("clear")
|
| 584 |
+
|
| 585 |
+
suffixes.append(
|
| 586 |
+
[
|
| 587 |
+
*create_call_method(0), # clear
|
| 588 |
+
create_instruction("POP_TOP"),
|
| 589 |
+
*create_call_method(1), # update
|
| 590 |
+
create_instruction("POP_TOP"),
|
| 591 |
+
]
|
| 592 |
+
)
|
| 593 |
+
elif isinstance(
|
| 594 |
+
var, variables.torch_function.TorchFunctionModeStackVariable
|
| 595 |
+
):
|
| 596 |
+
cg.add_push_null(
|
| 597 |
+
lambda: cg.load_import_from(
|
| 598 |
+
utils.__name__, "set_torch_function_mode_stack"
|
| 599 |
+
)
|
| 600 |
+
)
|
| 601 |
+
cg.foreach(var.symbolic_stack)
|
| 602 |
+
cg.append_output(
|
| 603 |
+
create_instruction("BUILD_LIST", arg=len(var.symbolic_stack))
|
| 604 |
+
)
|
| 605 |
+
cg.call_function(1, False)
|
| 606 |
+
cg.append_output(create_instruction("POP_TOP"))
|
| 607 |
+
elif self.is_attribute_mutation(var):
|
| 608 |
+
# Applying mutations involves two steps: 1) Push all
|
| 609 |
+
# reconstructed objects onto the stack. 2) Call STORE_ATTR to
|
| 610 |
+
# apply the mutations.
|
| 611 |
+
#
|
| 612 |
+
# Dynamo must ensure that mutations are applied in the same
|
| 613 |
+
# order as in the original program. Therefore, two reverse
|
| 614 |
+
# operations occur below.
|
| 615 |
+
#
|
| 616 |
+
# The first reverse operation concerns `suffixes`. We apply
|
| 617 |
+
# suffixes in reverse order due to the way Python handles the
|
| 618 |
+
# stack. In Step 1, we push all reconstructed objects onto the
|
| 619 |
+
# stack, but the item at the top of the stack refers to the last
|
| 620 |
+
# attribute in the mutation order. If not fixed, this will apply
|
| 621 |
+
# the mutations of attributes in the reverse order. To account
|
| 622 |
+
# for this reversal, we iterate through the mutable attributes
|
| 623 |
+
# in reverse order.
|
| 624 |
+
for name, value in reversed(
|
| 625 |
+
self.store_attr_mutations.get(var.mutable_local, {}).items()
|
| 626 |
+
):
|
| 627 |
+
if isinstance(var, variables.NewGlobalVariable):
|
| 628 |
+
cg.tx.output.update_co_names(name)
|
| 629 |
+
cg(value)
|
| 630 |
+
assert isinstance(var.mutable_local.source, GlobalSource) # type: ignore[attr-defined]
|
| 631 |
+
suffixes.append(
|
| 632 |
+
[create_instruction("STORE_GLOBAL", argval=name)]
|
| 633 |
+
)
|
| 634 |
+
elif isinstance(value, variables.DeletedVariable):
|
| 635 |
+
if isinstance(
|
| 636 |
+
var.mutable_local, AttributeMutationExisting
|
| 637 |
+
) and hasattr(getattr(var, "value", None), name):
|
| 638 |
+
cg.tx.output.update_co_names(name)
|
| 639 |
+
cg(var.mutable_local.source)
|
| 640 |
+
suffixes.append(
|
| 641 |
+
[create_instruction("DELETE_ATTR", argval=name)]
|
| 642 |
+
)
|
| 643 |
+
elif (
|
| 644 |
+
isinstance(var, variables.UserDefinedObjectVariable)
|
| 645 |
+
and var.needs_slow_setattr()
|
| 646 |
+
):
|
| 647 |
+
# __setattr__ is defined on this object, so call object.__setattr__ directly
|
| 648 |
+
cg.load_import_from("builtins", "object")
|
| 649 |
+
cg.load_method("__setattr__")
|
| 650 |
+
cg(var.mutable_local.source) # type: ignore[attr-defined]
|
| 651 |
+
cg(variables.ConstantVariable(name))
|
| 652 |
+
cg(value)
|
| 653 |
+
suffixes.append(
|
| 654 |
+
[*create_call_method(3), create_instruction("POP_TOP")]
|
| 655 |
+
)
|
| 656 |
+
else:
|
| 657 |
+
cg.tx.output.update_co_names(name)
|
| 658 |
+
cg(value)
|
| 659 |
+
cg(var.mutable_local.source)
|
| 660 |
+
suffixes.append([create_instruction("STORE_ATTR", argval=name)])
|
| 661 |
+
elif isinstance(var, variables.TupleIteratorVariable):
|
| 662 |
+
for _ in range(var.index):
|
| 663 |
+
cg.add_push_null(
|
| 664 |
+
lambda: cg.load_import_from(utils.__name__, "iter_next")
|
| 665 |
+
)
|
| 666 |
+
cg(var.mutable_local.source) # type: ignore[attr-defined]
|
| 667 |
+
cg.call_function(1, False)
|
| 668 |
+
cg.pop_top()
|
| 669 |
+
elif isinstance(var, variables.RandomVariable):
|
| 670 |
+
# set correct random seed state
|
| 671 |
+
def gen_fn():
|
| 672 |
+
cg(var.mutable_local.source) # type: ignore[attr-defined]
|
| 673 |
+
cg.load_attr("setstate")
|
| 674 |
+
|
| 675 |
+
cg.add_push_null(gen_fn)
|
| 676 |
+
cg(var.wrap_state(var.random.getstate()))
|
| 677 |
+
|
| 678 |
+
suffixes.append(
|
| 679 |
+
[
|
| 680 |
+
*create_call_function(1, False), # setstate
|
| 681 |
+
create_instruction("POP_TOP"),
|
| 682 |
+
]
|
| 683 |
+
)
|
| 684 |
+
else:
|
| 685 |
+
raise AssertionError(type(var))
|
| 686 |
+
|
| 687 |
+
# do all the actual mutations at the very end to handle dependencies
|
| 688 |
+
for suffix in reversed(suffixes):
|
| 689 |
+
cg.extend_output(suffix)
|
| 690 |
+
|
| 691 |
+
def is_empty(self):
|
| 692 |
+
return not (
|
| 693 |
+
any(map(self.is_modified, self.id_to_variable.values()))
|
| 694 |
+
or self.tensor_hooks
|
| 695 |
+
or self.save_for_backward
|
| 696 |
+
or self.tensor_hooks
|
| 697 |
+
)
|
| 698 |
+
|
| 699 |
+
def clear(self):
|
| 700 |
+
self.keepalive.clear()
|
| 701 |
+
self.id_to_variable.clear()
|
pllava/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/tensor_version_op.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
from torch._prims import _make_prim, RETURN_TYPE
|
| 4 |
+
from torch._subclasses import FakeTensorMode
|
| 5 |
+
from torch._subclasses.functional_tensor import FunctionalTensorMode
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
_tensor_version = _make_prim(
|
| 9 |
+
schema="_tensor_version(Tensor self) -> SymInt",
|
| 10 |
+
return_type=RETURN_TYPE.NEW,
|
| 11 |
+
meta=torch.ops.aten._version.default,
|
| 12 |
+
impl_aten=torch.ops.aten._version.default,
|
| 13 |
+
doc="Tracable unbacked SymInt version of torch.Tensor._version",
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@_tensor_version.py_impl(FakeTensorMode)
|
| 18 |
+
def _tensor_version_fake(fake_mode, self_tensor):
|
| 19 |
+
"""
|
| 20 |
+
The initial dynamo capture of _tensor_version + _unsafe_set_version_counter turns the
|
| 21 |
+
`._version` into an unbacked SymInt so that we don't need to specialize on the `._version`
|
| 22 |
+
of input tensors to the graph.
|
| 23 |
+
"""
|
| 24 |
+
return fake_mode.shape_env.create_unbacked_symint()
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
_unsafe_set_version_counter = _make_prim(
|
| 28 |
+
schema="_unsafe_set_version_counter(Tensor self, SymInt version) -> ()",
|
| 29 |
+
return_type=RETURN_TYPE.NEW,
|
| 30 |
+
meta=lambda self, version: None,
|
| 31 |
+
impl_aten=torch._C._autograd._unsafe_set_version_counter,
|
| 32 |
+
doc="Tracable+SymInt version of torch._C._autograd._unsafe_set_version_counter",
|
| 33 |
+
)
|
| 34 |
+
torch.fx.node.has_side_effect(_unsafe_set_version_counter)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
"""
|
| 38 |
+
When we functionalize _tensor_version + _unsafe_set_version_counter,
|
| 39 |
+
the ops disappear from the traced graph. We run them eagerly on the
|
| 40 |
+
fake tensors used for tracing, in order to get past asserts that would
|
| 41 |
+
fail in autograd.
|
| 42 |
+
|
| 43 |
+
Why is this ok?
|
| 44 |
+
1) Versions on functional tensors don't make any sense since you can't mutate a functional tensor.
|
| 45 |
+
2) The whole point of version munging is to trick autograd into doing what we want, and after
|
| 46 |
+
AotAtuograd there is no longer any need for these ops.
|
| 47 |
+
|
| 48 |
+
Note this is similar to how no_grad is handled.
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@_tensor_version.py_impl(FunctionalTensorMode)
|
| 53 |
+
def _tensor_version_functional(mode, self):
|
| 54 |
+
return self._version
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@_unsafe_set_version_counter.py_impl(FunctionalTensorMode)
|
| 58 |
+
def _unsafe_set_version_counter_functional(ctx, self, version):
|
| 59 |
+
torch._C._autograd._unsafe_set_version_counter(self, version)
|
pllava/lib/python3.10/site-packages/torch/_dynamo/test_case.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import contextlib
|
| 3 |
+
import importlib
|
| 4 |
+
import logging
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.testing
|
| 8 |
+
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
|
| 9 |
+
IS_WINDOWS,
|
| 10 |
+
TEST_WITH_CROSSREF,
|
| 11 |
+
TEST_WITH_TORCHDYNAMO,
|
| 12 |
+
TestCase as TorchTestCase,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
from . import config, reset, utils
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
log = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def run_tests(needs=()):
|
| 22 |
+
from torch.testing._internal.common_utils import run_tests
|
| 23 |
+
|
| 24 |
+
if TEST_WITH_TORCHDYNAMO or IS_WINDOWS or TEST_WITH_CROSSREF:
|
| 25 |
+
return # skip testing
|
| 26 |
+
|
| 27 |
+
if isinstance(needs, str):
|
| 28 |
+
needs = (needs,)
|
| 29 |
+
for need in needs:
|
| 30 |
+
if need == "cuda":
|
| 31 |
+
if not torch.cuda.is_available():
|
| 32 |
+
return
|
| 33 |
+
else:
|
| 34 |
+
try:
|
| 35 |
+
importlib.import_module(need)
|
| 36 |
+
except ImportError:
|
| 37 |
+
return
|
| 38 |
+
run_tests()
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class TestCase(TorchTestCase):
|
| 42 |
+
_exit_stack: contextlib.ExitStack
|
| 43 |
+
|
| 44 |
+
@classmethod
|
| 45 |
+
def tearDownClass(cls):
|
| 46 |
+
cls._exit_stack.close()
|
| 47 |
+
super().tearDownClass()
|
| 48 |
+
|
| 49 |
+
@classmethod
|
| 50 |
+
def setUpClass(cls):
|
| 51 |
+
super().setUpClass()
|
| 52 |
+
cls._exit_stack = contextlib.ExitStack() # type: ignore[attr-defined]
|
| 53 |
+
cls._exit_stack.enter_context( # type: ignore[attr-defined]
|
| 54 |
+
config.patch(
|
| 55 |
+
raise_on_ctx_manager_usage=True,
|
| 56 |
+
suppress_errors=False,
|
| 57 |
+
log_compilation_metrics=False,
|
| 58 |
+
),
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
def setUp(self):
|
| 62 |
+
self._prior_is_grad_enabled = torch.is_grad_enabled()
|
| 63 |
+
super().setUp()
|
| 64 |
+
reset()
|
| 65 |
+
utils.counters.clear()
|
| 66 |
+
|
| 67 |
+
def tearDown(self):
|
| 68 |
+
for k, v in utils.counters.items():
|
| 69 |
+
print(k, v.most_common())
|
| 70 |
+
reset()
|
| 71 |
+
utils.counters.clear()
|
| 72 |
+
super().tearDown()
|
| 73 |
+
if self._prior_is_grad_enabled is not torch.is_grad_enabled():
|
| 74 |
+
log.warning("Running test changed grad mode")
|
| 75 |
+
torch.set_grad_enabled(self._prior_is_grad_enabled)
|
pllava/lib/python3.10/site-packages/torch/_dynamo/test_minifier_common.py
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import dataclasses
|
| 3 |
+
import io
|
| 4 |
+
import logging
|
| 5 |
+
import os
|
| 6 |
+
import re
|
| 7 |
+
import shutil
|
| 8 |
+
import subprocess
|
| 9 |
+
import sys
|
| 10 |
+
import tempfile
|
| 11 |
+
import traceback
|
| 12 |
+
from typing import Optional
|
| 13 |
+
from unittest.mock import patch
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
import torch._dynamo
|
| 17 |
+
import torch._dynamo.test_case
|
| 18 |
+
from torch._dynamo.trace_rules import _as_posix_path
|
| 19 |
+
from torch.utils._traceback import report_compile_source_on_error
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@dataclasses.dataclass
|
| 23 |
+
class MinifierTestResult:
|
| 24 |
+
minifier_code: str
|
| 25 |
+
repro_code: str
|
| 26 |
+
|
| 27 |
+
def _get_module(self, t):
|
| 28 |
+
match = re.search(r"class Repro\(torch\.nn\.Module\):\s+([ ].*\n| *\n)+", t)
|
| 29 |
+
assert match is not None, "failed to find module"
|
| 30 |
+
r = match.group(0)
|
| 31 |
+
r = re.sub(r"\s+$", "\n", r, flags=re.MULTILINE)
|
| 32 |
+
r = re.sub(r"\n{3,}", "\n\n", r)
|
| 33 |
+
return r.strip()
|
| 34 |
+
|
| 35 |
+
def minifier_module(self):
|
| 36 |
+
return self._get_module(self.minifier_code)
|
| 37 |
+
|
| 38 |
+
def repro_module(self):
|
| 39 |
+
return self._get_module(self.repro_code)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class MinifierTestBase(torch._dynamo.test_case.TestCase):
|
| 43 |
+
DEBUG_DIR = tempfile.mkdtemp()
|
| 44 |
+
|
| 45 |
+
@classmethod
|
| 46 |
+
def setUpClass(cls):
|
| 47 |
+
super().setUpClass()
|
| 48 |
+
cls._exit_stack.enter_context( # type: ignore[attr-defined]
|
| 49 |
+
torch._dynamo.config.patch(debug_dir_root=cls.DEBUG_DIR)
|
| 50 |
+
)
|
| 51 |
+
# These configurations make new process startup slower. Disable them
|
| 52 |
+
# for the minification tests to speed them up.
|
| 53 |
+
cls._exit_stack.enter_context( # type: ignore[attr-defined]
|
| 54 |
+
torch._inductor.config.patch(
|
| 55 |
+
{
|
| 56 |
+
# https://github.com/pytorch/pytorch/issues/100376
|
| 57 |
+
"pattern_matcher": False,
|
| 58 |
+
# multiprocess compilation takes a long time to warmup
|
| 59 |
+
"compile_threads": 1,
|
| 60 |
+
# https://github.com/pytorch/pytorch/issues/100378
|
| 61 |
+
"cpp.vec_isa_ok": False,
|
| 62 |
+
}
|
| 63 |
+
)
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
@classmethod
|
| 67 |
+
def tearDownClass(cls):
|
| 68 |
+
if os.getenv("PYTORCH_KEEP_TMPDIR", "0") != "1":
|
| 69 |
+
shutil.rmtree(cls.DEBUG_DIR)
|
| 70 |
+
else:
|
| 71 |
+
print(f"test_minifier_common tmpdir kept at: {cls.DEBUG_DIR}")
|
| 72 |
+
cls._exit_stack.close() # type: ignore[attr-defined]
|
| 73 |
+
|
| 74 |
+
def _gen_codegen_fn_patch_code(self, device, bug_type):
|
| 75 |
+
assert bug_type in ("compile_error", "runtime_error", "accuracy")
|
| 76 |
+
return f"""\
|
| 77 |
+
{torch._dynamo.config.codegen_config()}
|
| 78 |
+
{torch._inductor.config.codegen_config()}
|
| 79 |
+
torch._inductor.config.{"cpp" if device == "cpu" else "triton"}.inject_relu_bug_TESTING_ONLY = {bug_type!r}
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
def _maybe_subprocess_run(self, args, *, isolate, cwd=None):
|
| 83 |
+
if not isolate:
|
| 84 |
+
assert len(args) >= 2, args
|
| 85 |
+
assert args[0] == "python3", args
|
| 86 |
+
if args[1] == "-c":
|
| 87 |
+
assert len(args) == 3, args
|
| 88 |
+
code = args[2]
|
| 89 |
+
args = ["-c"]
|
| 90 |
+
else:
|
| 91 |
+
assert len(args) >= 2, args
|
| 92 |
+
with open(args[1]) as f:
|
| 93 |
+
code = f.read()
|
| 94 |
+
args = args[1:]
|
| 95 |
+
|
| 96 |
+
# WARNING: This is not a perfect simulation of running
|
| 97 |
+
# the program out of tree. We only interpose on things we KNOW we
|
| 98 |
+
# need to handle for tests. If you need more stuff, you will
|
| 99 |
+
# need to augment this appropriately.
|
| 100 |
+
|
| 101 |
+
# NB: Can't use save_config because that will omit some fields,
|
| 102 |
+
# but we must save and reset ALL fields
|
| 103 |
+
dynamo_config = torch._dynamo.config.shallow_copy_dict()
|
| 104 |
+
inductor_config = torch._inductor.config.shallow_copy_dict()
|
| 105 |
+
try:
|
| 106 |
+
stderr = io.StringIO()
|
| 107 |
+
log_handler = logging.StreamHandler(stderr)
|
| 108 |
+
log = logging.getLogger("torch._dynamo")
|
| 109 |
+
log.addHandler(log_handler)
|
| 110 |
+
try:
|
| 111 |
+
prev_cwd = _as_posix_path(os.getcwd())
|
| 112 |
+
if cwd is not None:
|
| 113 |
+
cwd = _as_posix_path(cwd)
|
| 114 |
+
os.chdir(cwd)
|
| 115 |
+
with patch("sys.argv", args), report_compile_source_on_error():
|
| 116 |
+
exec(code, {"__name__": "__main__", "__compile_source__": code})
|
| 117 |
+
rc = 0
|
| 118 |
+
except Exception:
|
| 119 |
+
rc = 1
|
| 120 |
+
traceback.print_exc(file=stderr)
|
| 121 |
+
finally:
|
| 122 |
+
log.removeHandler(log_handler)
|
| 123 |
+
if cwd is not None:
|
| 124 |
+
os.chdir(prev_cwd) # type: ignore[possibly-undefined]
|
| 125 |
+
# Make sure we don't leave buggy compiled frames lying
|
| 126 |
+
# around
|
| 127 |
+
torch._dynamo.reset()
|
| 128 |
+
finally:
|
| 129 |
+
torch._dynamo.config.load_config(dynamo_config)
|
| 130 |
+
torch._inductor.config.load_config(inductor_config)
|
| 131 |
+
|
| 132 |
+
# TODO: return a more appropriate data structure here
|
| 133 |
+
return subprocess.CompletedProcess(
|
| 134 |
+
args,
|
| 135 |
+
rc,
|
| 136 |
+
b"",
|
| 137 |
+
stderr.getvalue().encode("utf-8"),
|
| 138 |
+
)
|
| 139 |
+
else:
|
| 140 |
+
if cwd is not None:
|
| 141 |
+
cwd = _as_posix_path(cwd)
|
| 142 |
+
return subprocess.run(args, capture_output=True, cwd=cwd, check=False)
|
| 143 |
+
|
| 144 |
+
# Run `code` in a separate python process.
|
| 145 |
+
# Returns the completed process state and the directory containing the
|
| 146 |
+
# minifier launcher script, if `code` outputted it.
|
| 147 |
+
def _run_test_code(self, code, *, isolate):
|
| 148 |
+
proc = self._maybe_subprocess_run(
|
| 149 |
+
["python3", "-c", code], isolate=isolate, cwd=self.DEBUG_DIR
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
print("test stdout:", proc.stdout.decode("utf-8"))
|
| 153 |
+
print("test stderr:", proc.stderr.decode("utf-8"))
|
| 154 |
+
repro_dir_match = re.search(
|
| 155 |
+
r"(\S+)minifier_launcher.py", proc.stderr.decode("utf-8")
|
| 156 |
+
)
|
| 157 |
+
if repro_dir_match is not None:
|
| 158 |
+
return proc, repro_dir_match.group(1)
|
| 159 |
+
return proc, None
|
| 160 |
+
|
| 161 |
+
# Runs the minifier launcher script in `repro_dir`
|
| 162 |
+
def _run_minifier_launcher(self, repro_dir, isolate, *, minifier_args=()):
|
| 163 |
+
self.assertIsNotNone(repro_dir)
|
| 164 |
+
launch_file = _as_posix_path(os.path.join(repro_dir, "minifier_launcher.py"))
|
| 165 |
+
with open(launch_file) as f:
|
| 166 |
+
launch_code = f.read()
|
| 167 |
+
self.assertTrue(os.path.exists(launch_file))
|
| 168 |
+
|
| 169 |
+
args = ["python3", launch_file, "minify", *minifier_args]
|
| 170 |
+
if not isolate:
|
| 171 |
+
args.append("--no-isolate")
|
| 172 |
+
launch_proc = self._maybe_subprocess_run(args, isolate=isolate, cwd=repro_dir)
|
| 173 |
+
print("minifier stdout:", launch_proc.stdout.decode("utf-8"))
|
| 174 |
+
stderr = launch_proc.stderr.decode("utf-8")
|
| 175 |
+
print("minifier stderr:", stderr)
|
| 176 |
+
self.assertNotIn("Input graph did not fail the tester", stderr)
|
| 177 |
+
|
| 178 |
+
return launch_proc, launch_code
|
| 179 |
+
|
| 180 |
+
# Runs the repro script in `repro_dir`
|
| 181 |
+
def _run_repro(self, repro_dir, *, isolate=True):
|
| 182 |
+
self.assertIsNotNone(repro_dir)
|
| 183 |
+
repro_file = _as_posix_path(os.path.join(repro_dir, "repro.py"))
|
| 184 |
+
with open(repro_file) as f:
|
| 185 |
+
repro_code = f.read()
|
| 186 |
+
self.assertTrue(os.path.exists(repro_file))
|
| 187 |
+
|
| 188 |
+
repro_proc = self._maybe_subprocess_run(
|
| 189 |
+
["python3", repro_file], isolate=isolate, cwd=repro_dir
|
| 190 |
+
)
|
| 191 |
+
print("repro stdout:", repro_proc.stdout.decode("utf-8"))
|
| 192 |
+
print("repro stderr:", repro_proc.stderr.decode("utf-8"))
|
| 193 |
+
return repro_proc, repro_code
|
| 194 |
+
|
| 195 |
+
# Template for testing code.
|
| 196 |
+
# `run_code` is the code to run for the test case.
|
| 197 |
+
# `patch_code` is the code to be patched in every generated file; usually
|
| 198 |
+
# just use this to turn on bugs via the config
|
| 199 |
+
def _gen_test_code(self, run_code, repro_after, repro_level):
|
| 200 |
+
return f"""\
|
| 201 |
+
import torch
|
| 202 |
+
import torch._dynamo
|
| 203 |
+
{_as_posix_path(torch._dynamo.config.codegen_config())}
|
| 204 |
+
{_as_posix_path(torch._inductor.config.codegen_config())}
|
| 205 |
+
torch._dynamo.config.repro_after = "{repro_after}"
|
| 206 |
+
torch._dynamo.config.repro_level = {repro_level}
|
| 207 |
+
torch._dynamo.config.debug_dir_root = "{_as_posix_path(self.DEBUG_DIR)}"
|
| 208 |
+
{run_code}
|
| 209 |
+
"""
|
| 210 |
+
|
| 211 |
+
# Runs a full minifier test.
|
| 212 |
+
# Minifier tests generally consist of 3 stages:
|
| 213 |
+
# 1. Run the problematic code
|
| 214 |
+
# 2. Run the generated minifier launcher script
|
| 215 |
+
# 3. Run the generated repro script
|
| 216 |
+
#
|
| 217 |
+
# If possible, you should run the test with isolate=False; use
|
| 218 |
+
# isolate=True only if the bug you're testing would otherwise
|
| 219 |
+
# crash the process
|
| 220 |
+
def _run_full_test(
|
| 221 |
+
self, run_code, repro_after, expected_error, *, isolate, minifier_args=()
|
| 222 |
+
) -> Optional[MinifierTestResult]:
|
| 223 |
+
if isolate:
|
| 224 |
+
repro_level = 3
|
| 225 |
+
elif expected_error is None or expected_error == "AccuracyError":
|
| 226 |
+
repro_level = 4
|
| 227 |
+
else:
|
| 228 |
+
repro_level = 2
|
| 229 |
+
test_code = self._gen_test_code(run_code, repro_after, repro_level)
|
| 230 |
+
print("running test", file=sys.stderr)
|
| 231 |
+
test_proc, repro_dir = self._run_test_code(test_code, isolate=isolate)
|
| 232 |
+
if expected_error is None:
|
| 233 |
+
# Just check that there was no error
|
| 234 |
+
self.assertEqual(test_proc.returncode, 0)
|
| 235 |
+
self.assertIsNone(repro_dir)
|
| 236 |
+
return None
|
| 237 |
+
# NB: Intentionally do not test return code; we only care about
|
| 238 |
+
# actually generating the repro, we don't have to crash
|
| 239 |
+
self.assertIn(expected_error, test_proc.stderr.decode("utf-8"))
|
| 240 |
+
self.assertIsNotNone(repro_dir)
|
| 241 |
+
print("running minifier", file=sys.stderr)
|
| 242 |
+
minifier_proc, minifier_code = self._run_minifier_launcher(
|
| 243 |
+
repro_dir, isolate=isolate, minifier_args=minifier_args
|
| 244 |
+
)
|
| 245 |
+
print("running repro", file=sys.stderr)
|
| 246 |
+
repro_proc, repro_code = self._run_repro(repro_dir, isolate=isolate)
|
| 247 |
+
self.assertIn(expected_error, repro_proc.stderr.decode("utf-8"))
|
| 248 |
+
self.assertNotEqual(repro_proc.returncode, 0)
|
| 249 |
+
return MinifierTestResult(minifier_code=minifier_code, repro_code=repro_code)
|
pllava/lib/python3.10/site-packages/torch/_dynamo/types.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import dataclasses
|
| 2 |
+
import sys
|
| 3 |
+
import types
|
| 4 |
+
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Protocol, Union
|
| 5 |
+
|
| 6 |
+
# CacheEntry has a `check_fn` field for the guard, and a `code` field for the code object.
|
| 7 |
+
from torch._C._dynamo.eval_frame import (
|
| 8 |
+
_CacheEntry as CacheEntry,
|
| 9 |
+
_ExtraState as ExtraState,
|
| 10 |
+
)
|
| 11 |
+
from torch._guards import CompileId
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
if sys.version_info >= (3, 11):
|
| 15 |
+
from torch._C._dynamo.eval_frame import _PyInterpreterFrame as DynamoFrameType
|
| 16 |
+
else:
|
| 17 |
+
from types import FrameType as DynamoFrameType
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# We use a dict to store additional data per frame.
|
| 21 |
+
FrameState = Dict[Any, Any]
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class GuardFail(NamedTuple):
|
| 25 |
+
# A string repr of the piece of failed guard code we eval-ed
|
| 26 |
+
reason: str
|
| 27 |
+
# A code object where we failed a guard
|
| 28 |
+
orig_code: types.CodeType
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class GuardFn(Protocol):
|
| 32 |
+
closure_vars: Dict[str, object]
|
| 33 |
+
args: List[str]
|
| 34 |
+
code_parts: List[str]
|
| 35 |
+
verbose_code_parts: List[str]
|
| 36 |
+
global_scope: Dict[str, object]
|
| 37 |
+
guard_fail_fn: Optional[Callable[[GuardFail], None]]
|
| 38 |
+
cache_entry: Optional[CacheEntry]
|
| 39 |
+
extra_state: Optional[ExtraState]
|
| 40 |
+
|
| 41 |
+
# maps locals of user function to bool
|
| 42 |
+
def __call__(self, f_locals: Dict[str, object]) -> bool:
|
| 43 |
+
...
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
@dataclasses.dataclass
|
| 47 |
+
class GuardedCode:
|
| 48 |
+
code: types.CodeType
|
| 49 |
+
check_fn: GuardFn
|
| 50 |
+
compile_id: CompileId
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class DynamoCallbackFn(Protocol):
|
| 54 |
+
def __call__(
|
| 55 |
+
self,
|
| 56 |
+
frame: DynamoFrameType,
|
| 57 |
+
cache_entry: Optional[CacheEntry],
|
| 58 |
+
frame_state: FrameState,
|
| 59 |
+
) -> Optional[GuardedCode]:
|
| 60 |
+
...
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
DynamoCallback = Union[DynamoCallbackFn, None, bool]
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class DynamoGuardHook(Protocol):
|
| 67 |
+
def __call__(
|
| 68 |
+
self,
|
| 69 |
+
guard_fn: GuardFn,
|
| 70 |
+
code: types.CodeType,
|
| 71 |
+
f_locals: Dict[str, object],
|
| 72 |
+
index: int,
|
| 73 |
+
last: bool,
|
| 74 |
+
) -> None:
|
| 75 |
+
...
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class ProfilerStartHook(Protocol):
|
| 79 |
+
def __call__(
|
| 80 |
+
self,
|
| 81 |
+
name: str,
|
| 82 |
+
# TODO(whc) how do I annotate a _RecordFunction here?
|
| 83 |
+
) -> Any:
|
| 84 |
+
...
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class ProfilerEndHook(Protocol):
|
| 88 |
+
def __call__(self, record: Any) -> None:
|
| 89 |
+
...
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class BytecodeHook(Protocol):
|
| 93 |
+
def __call__(
|
| 94 |
+
self, code: types.CodeType, new_code: types.CodeType
|
| 95 |
+
) -> Optional[types.CodeType]:
|
| 96 |
+
...
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__init__.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .base import VariableTracker
|
| 2 |
+
from .builtin import BuiltinVariable
|
| 3 |
+
from .constant import ConstantVariable, EnumVariable
|
| 4 |
+
from .ctx_manager import (
|
| 5 |
+
CatchWarningsCtxManagerVariable,
|
| 6 |
+
ContextWrappingVariable,
|
| 7 |
+
CUDADeviceVariable,
|
| 8 |
+
DeterministicAlgorithmsVariable,
|
| 9 |
+
DisabledSavedTensorsHooksVariable,
|
| 10 |
+
DualLevelContextManager,
|
| 11 |
+
FSDPParamGroupUseTrainingStateVariable,
|
| 12 |
+
GradIncrementNestingCtxManagerVariable,
|
| 13 |
+
GradInplaceRequiresGradCtxManagerVariable,
|
| 14 |
+
GradModeVariable,
|
| 15 |
+
InferenceModeVariable,
|
| 16 |
+
JvpIncrementNestingCtxManagerVariable,
|
| 17 |
+
SetFwdGradEnabledContextManager,
|
| 18 |
+
StreamContextVariable,
|
| 19 |
+
StreamVariable,
|
| 20 |
+
VmapIncrementNestingCtxManagerVariable,
|
| 21 |
+
WithExitFunctionVariable,
|
| 22 |
+
)
|
| 23 |
+
from .dicts import (
|
| 24 |
+
ConstDictVariable,
|
| 25 |
+
CustomizedDictVariable,
|
| 26 |
+
DefaultDictVariable,
|
| 27 |
+
FrozensetVariable,
|
| 28 |
+
SetVariable,
|
| 29 |
+
)
|
| 30 |
+
from .distributed import BackwardHookVariable, DistributedVariable, PlacementVariable
|
| 31 |
+
from .functions import (
|
| 32 |
+
FunctoolsPartialVariable,
|
| 33 |
+
NestedUserFunctionVariable,
|
| 34 |
+
PolyfilledFunctionVariable,
|
| 35 |
+
SkipFunctionVariable,
|
| 36 |
+
UserFunctionVariable,
|
| 37 |
+
UserMethodVariable,
|
| 38 |
+
)
|
| 39 |
+
from .higher_order_ops import (
|
| 40 |
+
FunctionalCallVariable,
|
| 41 |
+
FunctorchHigherOrderVariable,
|
| 42 |
+
TorchHigherOrderOperatorVariable,
|
| 43 |
+
)
|
| 44 |
+
from .iter import (
|
| 45 |
+
CountIteratorVariable,
|
| 46 |
+
CycleIteratorVariable,
|
| 47 |
+
IteratorVariable,
|
| 48 |
+
ItertoolsVariable,
|
| 49 |
+
MapVariable,
|
| 50 |
+
RepeatIteratorVariable,
|
| 51 |
+
ZipVariable,
|
| 52 |
+
)
|
| 53 |
+
from .lazy import LazyVariableTracker
|
| 54 |
+
from .lists import (
|
| 55 |
+
BaseListVariable,
|
| 56 |
+
ListIteratorVariable,
|
| 57 |
+
ListVariable,
|
| 58 |
+
NamedTupleVariable,
|
| 59 |
+
RangeVariable,
|
| 60 |
+
RestrictedListSubclassVariable,
|
| 61 |
+
SliceVariable,
|
| 62 |
+
TupleIteratorVariable,
|
| 63 |
+
TupleVariable,
|
| 64 |
+
)
|
| 65 |
+
from .misc import (
|
| 66 |
+
AutogradFunctionContextVariable,
|
| 67 |
+
AutogradFunctionVariable,
|
| 68 |
+
ClosureVariable,
|
| 69 |
+
DeletedVariable,
|
| 70 |
+
ExceptionVariable,
|
| 71 |
+
GetAttrVariable,
|
| 72 |
+
InspectSignatureVariable,
|
| 73 |
+
LambdaVariable,
|
| 74 |
+
MethodWrapperVariable,
|
| 75 |
+
NewCellVariable,
|
| 76 |
+
NewGlobalVariable,
|
| 77 |
+
NumpyVariable,
|
| 78 |
+
PythonModuleVariable,
|
| 79 |
+
RandomClassVariable,
|
| 80 |
+
RandomVariable,
|
| 81 |
+
RegexPatternVariable,
|
| 82 |
+
StringFormatVariable,
|
| 83 |
+
SuperVariable,
|
| 84 |
+
TorchVersionVariable,
|
| 85 |
+
TypingVariable,
|
| 86 |
+
UnknownVariable,
|
| 87 |
+
)
|
| 88 |
+
from .nn_module import (
|
| 89 |
+
FSDPManagedNNModuleVariable,
|
| 90 |
+
NNModuleVariable,
|
| 91 |
+
UnspecializedBuiltinNNModuleVariable,
|
| 92 |
+
UnspecializedNNModuleVariable,
|
| 93 |
+
)
|
| 94 |
+
from .optimizer import OptimizerVariable
|
| 95 |
+
from .sdpa import SDPAParamsVariable
|
| 96 |
+
from .tensor import (
|
| 97 |
+
FakeItemVariable,
|
| 98 |
+
NumpyNdarrayVariable,
|
| 99 |
+
SymNodeVariable,
|
| 100 |
+
TensorVariable,
|
| 101 |
+
UnspecializedPythonVariable,
|
| 102 |
+
UntypedStorageVariable,
|
| 103 |
+
)
|
| 104 |
+
from .torch import TorchCtxManagerClassVariable, TorchInGraphFunctionVariable
|
| 105 |
+
from .user_defined import (
|
| 106 |
+
MutableMappingVariable,
|
| 107 |
+
RemovableHandleVariable,
|
| 108 |
+
UserDefinedClassVariable,
|
| 109 |
+
UserDefinedObjectVariable,
|
| 110 |
+
WeakRefVariable,
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
__all__ = [
|
| 115 |
+
"AutogradFunctionContextVariable",
|
| 116 |
+
"AutogradFunctionVariable",
|
| 117 |
+
"BackwardHookVariable",
|
| 118 |
+
"BaseListVariable",
|
| 119 |
+
"BuiltinVariable",
|
| 120 |
+
"CatchWarningsCtxManagerVariable",
|
| 121 |
+
"ClosureVariable",
|
| 122 |
+
"ConstantVariable",
|
| 123 |
+
"ConstDictVariable",
|
| 124 |
+
"ContextWrappingVariable",
|
| 125 |
+
"CountIteratorVariable",
|
| 126 |
+
"CUDADeviceVariable",
|
| 127 |
+
"CustomizedDictVariable",
|
| 128 |
+
"CycleIteratorVariable",
|
| 129 |
+
"DefaultDictVariable",
|
| 130 |
+
"DeletedVariable",
|
| 131 |
+
"DeterministicAlgorithmsVariable",
|
| 132 |
+
"EnumVariable",
|
| 133 |
+
"FakeItemVariable",
|
| 134 |
+
"GetAttrVariable",
|
| 135 |
+
"GradModeVariable",
|
| 136 |
+
"InspectSignatureVariable",
|
| 137 |
+
"IteratorVariable",
|
| 138 |
+
"ItertoolsVariable",
|
| 139 |
+
"LambdaVariable",
|
| 140 |
+
"LazyVariableTracker",
|
| 141 |
+
"ListIteratorVariable",
|
| 142 |
+
"ListVariable",
|
| 143 |
+
"NamedTupleVariable",
|
| 144 |
+
"NestedUserFunctionVariable",
|
| 145 |
+
"NewCellVariable",
|
| 146 |
+
"NewGlobalVariable",
|
| 147 |
+
"NNModuleVariable",
|
| 148 |
+
"NumpyNdarrayVariable",
|
| 149 |
+
"NumpyVariable",
|
| 150 |
+
"OptimizerVariable",
|
| 151 |
+
"PlacementVariable",
|
| 152 |
+
"PolyfilledFunctionVariable",
|
| 153 |
+
"PythonModuleVariable",
|
| 154 |
+
"RangeVariable",
|
| 155 |
+
"RegexPatternVariable",
|
| 156 |
+
"RemovableHandleVariable",
|
| 157 |
+
"RepeatIteratorVariable",
|
| 158 |
+
"RestrictedListSubclassVariable",
|
| 159 |
+
"SDPAParamsVariable",
|
| 160 |
+
"SkipFunctionVariable",
|
| 161 |
+
"SliceVariable",
|
| 162 |
+
"StringFormatVariable",
|
| 163 |
+
"SuperVariable",
|
| 164 |
+
"TensorVariable",
|
| 165 |
+
"TorchCtxManagerClassVariable",
|
| 166 |
+
"TorchInGraphFunctionVariable",
|
| 167 |
+
"TorchVersionVariable",
|
| 168 |
+
"TupleVariable",
|
| 169 |
+
"UnknownVariable",
|
| 170 |
+
"UnspecializedNNModuleVariable",
|
| 171 |
+
"UnspecializedPythonVariable",
|
| 172 |
+
"UntypedStorageVariable",
|
| 173 |
+
"UserDefinedClassVariable",
|
| 174 |
+
"UserDefinedObjectVariable",
|
| 175 |
+
"UserFunctionVariable",
|
| 176 |
+
"UserMethodVariable",
|
| 177 |
+
"VariableTracker",
|
| 178 |
+
"WithExitFunctionVariable",
|
| 179 |
+
]
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/base.py
ADDED
|
@@ -0,0 +1,385 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
import collections
|
| 4 |
+
from enum import Enum
|
| 5 |
+
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING
|
| 6 |
+
|
| 7 |
+
from .. import variables
|
| 8 |
+
from ..current_scope_id import current_scope_id
|
| 9 |
+
from ..exc import unimplemented
|
| 10 |
+
from ..source import AttrSource, Source
|
| 11 |
+
from ..utils import istype
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
if TYPE_CHECKING:
|
| 15 |
+
from torch._dynamo.symbolic_convert import InstructionTranslator
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class MutableLocalSource(Enum):
|
| 19 |
+
"""
|
| 20 |
+
If the VariableTracker.mutable_local represents a Variable that:
|
| 21 |
+
- already existed that Dynamo began tracking while introspection (Existing)
|
| 22 |
+
- is a new variable that is created during Dynamo introspection (Local)
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
Existing = 0
|
| 26 |
+
Local = 1
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class MutableLocalBase:
|
| 30 |
+
"""
|
| 31 |
+
Base class for Variable.mutable_local
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
def __init__(self, typ: MutableLocalSource) -> None:
|
| 35 |
+
# In HigherOrderOperator tracing, we need to distinguish
|
| 36 |
+
# between MutableLocals inside the HigherOrderOperator and
|
| 37 |
+
# ones outside it. For example, it is not safe to mutate
|
| 38 |
+
# `a` in the following example because it was constructed
|
| 39 |
+
# in a different scope.
|
| 40 |
+
#
|
| 41 |
+
# def f(x):
|
| 42 |
+
# a = 1
|
| 43 |
+
# def g(x):
|
| 44 |
+
# nonlocal a
|
| 45 |
+
# a = 2
|
| 46 |
+
# return x
|
| 47 |
+
# return wrap(g, x) + a
|
| 48 |
+
#
|
| 49 |
+
# We use self.scope to distinguish this.
|
| 50 |
+
# scope == 0: The object was an existing variable
|
| 51 |
+
# scope == 1: The object was created while Dynamo
|
| 52 |
+
# was introspecting a function
|
| 53 |
+
# (and no HigherOrderOps were involved)
|
| 54 |
+
# scope >= 2: The object was created through
|
| 55 |
+
# Dynamo introspection of a HigherOrderOp.
|
| 56 |
+
# The exact number corresponds to the level
|
| 57 |
+
# of nested HigherOrderOps.
|
| 58 |
+
if typ is MutableLocalSource.Existing:
|
| 59 |
+
self.scope = 0
|
| 60 |
+
elif typ is MutableLocalSource.Local:
|
| 61 |
+
self.scope = current_scope_id()
|
| 62 |
+
else:
|
| 63 |
+
unimplemented(f"Unsupported MutableLocalSource: {typ}")
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class MutableLocal(MutableLocalBase):
|
| 67 |
+
"""
|
| 68 |
+
Marker used to indicate this (list, iter, etc) was constructed in
|
| 69 |
+
local scope and can be mutated safely in analysis without leaking
|
| 70 |
+
state.
|
| 71 |
+
"""
|
| 72 |
+
|
| 73 |
+
def __init__(self) -> None:
|
| 74 |
+
super().__init__(MutableLocalSource.Local)
|
| 75 |
+
|
| 76 |
+
def __hash__(self):
|
| 77 |
+
return id(self)
|
| 78 |
+
|
| 79 |
+
def __eq__(self, other):
|
| 80 |
+
return self is other
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def _is_top_level_scope(scope_id):
|
| 84 |
+
return scope_id == 1
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def is_side_effect_safe(m: MutableLocalBase):
|
| 88 |
+
scope_id = current_scope_id()
|
| 89 |
+
|
| 90 |
+
# In the top-level scope (if no HigherOrderOperators are involved),
|
| 91 |
+
# we are allowed to modify variables created in this scope as well
|
| 92 |
+
# as existing variables.
|
| 93 |
+
if _is_top_level_scope(scope_id):
|
| 94 |
+
return True
|
| 95 |
+
# Otherwise, only allow local mutation of variables created in the current scope
|
| 96 |
+
return m.scope == scope_id
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class VariableTrackerMeta(type):
|
| 100 |
+
all_subclasses = []
|
| 101 |
+
|
| 102 |
+
def __instancecheck__(cls, instance) -> bool:
|
| 103 |
+
"""Make isinstance work with LazyVariableTracker"""
|
| 104 |
+
if type.__instancecheck__(
|
| 105 |
+
variables.LazyVariableTracker, instance
|
| 106 |
+
) and cls not in (
|
| 107 |
+
VariableTracker,
|
| 108 |
+
variables.LazyVariableTracker,
|
| 109 |
+
):
|
| 110 |
+
instance = instance.realize()
|
| 111 |
+
return type.__instancecheck__(cls, instance)
|
| 112 |
+
|
| 113 |
+
def __init__(cls, name, bases, attrs) -> None:
|
| 114 |
+
super().__init__(name, bases, attrs)
|
| 115 |
+
VariableTrackerMeta.all_subclasses.append(cls)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class VariableTracker(metaclass=VariableTrackerMeta):
|
| 119 |
+
"""
|
| 120 |
+
Base class for tracked locals and stack values
|
| 121 |
+
|
| 122 |
+
VariableTracker instances are immutable and should be copied in
|
| 123 |
+
order to change them.
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
# fields to leave unmodified in apply()
|
| 127 |
+
_nonvar_fields = {
|
| 128 |
+
"value",
|
| 129 |
+
"guards",
|
| 130 |
+
"source",
|
| 131 |
+
"mutable_local",
|
| 132 |
+
"parents_tracker",
|
| 133 |
+
"user_code_variable_name",
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
def clone(self, **kwargs):
|
| 137 |
+
"""Shallow copy with some (optional) changes"""
|
| 138 |
+
args = dict(self.__dict__)
|
| 139 |
+
args.update(kwargs)
|
| 140 |
+
return self.__class__(**args)
|
| 141 |
+
|
| 142 |
+
@classmethod
|
| 143 |
+
def visit(
|
| 144 |
+
cls,
|
| 145 |
+
fn: Callable[["VariableTracker"], None],
|
| 146 |
+
value: Any,
|
| 147 |
+
cache: Optional[Dict[int, Any]] = None,
|
| 148 |
+
) -> None:
|
| 149 |
+
"""
|
| 150 |
+
Walk value and call fn on all the VariableTracker instances
|
| 151 |
+
"""
|
| 152 |
+
if cache is None:
|
| 153 |
+
cache = {}
|
| 154 |
+
|
| 155 |
+
idx = id(value)
|
| 156 |
+
if idx in cache:
|
| 157 |
+
return
|
| 158 |
+
# save `value` to keep it alive and ensure id() isn't reused
|
| 159 |
+
cache[idx] = value
|
| 160 |
+
|
| 161 |
+
if isinstance(value, VariableTracker):
|
| 162 |
+
value = value.unwrap()
|
| 163 |
+
fn(value)
|
| 164 |
+
value = value.unwrap() # calling fn() might have realized it
|
| 165 |
+
nonvars = value._nonvar_fields
|
| 166 |
+
for key, subvalue in value.__dict__.items():
|
| 167 |
+
if key not in nonvars:
|
| 168 |
+
cls.visit(fn, subvalue, cache)
|
| 169 |
+
elif istype(value, (list, tuple)):
|
| 170 |
+
for subvalue in value:
|
| 171 |
+
cls.visit(fn, subvalue, cache)
|
| 172 |
+
elif istype(value, (dict, collections.OrderedDict)):
|
| 173 |
+
for subvalue in value.values():
|
| 174 |
+
cls.visit(fn, subvalue, cache)
|
| 175 |
+
|
| 176 |
+
def __repr__(self) -> str:
|
| 177 |
+
return f"{self.__class__.__name__}()"
|
| 178 |
+
|
| 179 |
+
def debug_repr(self):
|
| 180 |
+
# Intended to be overridden to provide more info
|
| 181 |
+
try:
|
| 182 |
+
return repr(self.as_python_constant())
|
| 183 |
+
except NotImplementedError:
|
| 184 |
+
return repr(self)
|
| 185 |
+
|
| 186 |
+
def python_type(self):
|
| 187 |
+
"""
|
| 188 |
+
Abstract method to be implemented by subclasses of VariableTracker.
|
| 189 |
+
|
| 190 |
+
This method should return the type represented by the instance of the subclass.
|
| 191 |
+
The purpose is to provide a standardized way to retrieve the Python type information
|
| 192 |
+
of the variable being tracked.
|
| 193 |
+
|
| 194 |
+
Returns:
|
| 195 |
+
type: The Python type (such as int, str, list, etc.) of the variable tracked by
|
| 196 |
+
the subclass. If the type cannot be determined or is not relevant,
|
| 197 |
+
leaving it undefined or invoking super() is always sound.
|
| 198 |
+
|
| 199 |
+
Note:
|
| 200 |
+
This is an abstract method and may be overridden in subclasses.
|
| 201 |
+
|
| 202 |
+
Example:
|
| 203 |
+
class SetVariable(VariableTracker):
|
| 204 |
+
def python_type(self):
|
| 205 |
+
return set
|
| 206 |
+
|
| 207 |
+
Raises:
|
| 208 |
+
NotImplementedError: If the method is not implemented in a subclass.
|
| 209 |
+
"""
|
| 210 |
+
try:
|
| 211 |
+
return type(self.as_python_constant())
|
| 212 |
+
except NotImplementedError:
|
| 213 |
+
raise NotImplementedError(f"{self} has no type") from None
|
| 214 |
+
|
| 215 |
+
def as_python_constant(self):
|
| 216 |
+
"""For constants"""
|
| 217 |
+
raise NotImplementedError(f"{self} is not a constant")
|
| 218 |
+
|
| 219 |
+
def guard_as_python_constant(self):
|
| 220 |
+
"""Similar to as_python_constant(), but add ID_MATCH guards to try to force things to become constants"""
|
| 221 |
+
try:
|
| 222 |
+
return self.as_python_constant()
|
| 223 |
+
except NotImplementedError as e:
|
| 224 |
+
unimplemented(str(e))
|
| 225 |
+
|
| 226 |
+
def is_python_constant(self):
|
| 227 |
+
try:
|
| 228 |
+
self.as_python_constant()
|
| 229 |
+
return True
|
| 230 |
+
except NotImplementedError:
|
| 231 |
+
return False
|
| 232 |
+
|
| 233 |
+
def make_guard(self, fn):
|
| 234 |
+
if self.source:
|
| 235 |
+
return self.source.make_guard(fn)
|
| 236 |
+
raise NotImplementedError
|
| 237 |
+
|
| 238 |
+
def const_getattr(self, tx: "InstructionTranslator", name: str) -> Any:
|
| 239 |
+
"""getattr(self, name) returning a python constant"""
|
| 240 |
+
raise NotImplementedError
|
| 241 |
+
|
| 242 |
+
def var_getattr(self, tx: "InstructionTranslator", name: str) -> "VariableTracker":
|
| 243 |
+
"""getattr(self, name) returning a new variable"""
|
| 244 |
+
value = self.const_getattr(tx, name)
|
| 245 |
+
if not variables.ConstantVariable.is_literal(value):
|
| 246 |
+
raise NotImplementedError
|
| 247 |
+
source = None
|
| 248 |
+
if self.source:
|
| 249 |
+
source = AttrSource(self.source, name)
|
| 250 |
+
return variables.ConstantVariable.create(value, source=source)
|
| 251 |
+
|
| 252 |
+
def is_proxy(self):
|
| 253 |
+
try:
|
| 254 |
+
self.as_proxy()
|
| 255 |
+
return True
|
| 256 |
+
except NotImplementedError:
|
| 257 |
+
return False
|
| 258 |
+
|
| 259 |
+
def as_proxy(self):
|
| 260 |
+
raise NotImplementedError(str(self))
|
| 261 |
+
|
| 262 |
+
def maybe_fx_node(self):
|
| 263 |
+
try:
|
| 264 |
+
proxy = self.as_proxy()
|
| 265 |
+
import torch.fx
|
| 266 |
+
|
| 267 |
+
if isinstance(proxy, torch.fx.Proxy):
|
| 268 |
+
return proxy.node
|
| 269 |
+
return None
|
| 270 |
+
except NotImplementedError:
|
| 271 |
+
return None
|
| 272 |
+
|
| 273 |
+
def reconstruct(self, codegen):
|
| 274 |
+
raise NotImplementedError
|
| 275 |
+
|
| 276 |
+
def can_reconstruct(self, tx):
|
| 277 |
+
"""If it is possible to reconstruct the Python object this
|
| 278 |
+
VariableTracker represents."""
|
| 279 |
+
assert tx is tx.output.root_tx, "Only root tx can reconstruct"
|
| 280 |
+
try:
|
| 281 |
+
from ..codegen import PyCodegen
|
| 282 |
+
|
| 283 |
+
cg = PyCodegen(tx)
|
| 284 |
+
self.reconstruct(cg)
|
| 285 |
+
return True
|
| 286 |
+
except NotImplementedError:
|
| 287 |
+
return False
|
| 288 |
+
|
| 289 |
+
def unpack_var_sequence(self, tx) -> List["VariableTracker"]:
|
| 290 |
+
raise NotImplementedError
|
| 291 |
+
|
| 292 |
+
def force_unpack_var_sequence(self, tx) -> List["VariableTracker"]:
|
| 293 |
+
# like unpack_var_sequence, but should only be used when it is
|
| 294 |
+
# safe to eagerly (vs. lazily) unpack this variable.
|
| 295 |
+
# e.g. map(f, x) is normally evaluated lazily but sometimes
|
| 296 |
+
# we want to force eager unpacking, e.g. when converting to a list.
|
| 297 |
+
# NOTE: this method is allowed to mutate the VariableTracker, so
|
| 298 |
+
# it should only be called once.
|
| 299 |
+
return self.unpack_var_sequence(tx)
|
| 300 |
+
|
| 301 |
+
def has_unpack_var_sequence(self, tx) -> bool:
|
| 302 |
+
try:
|
| 303 |
+
self.unpack_var_sequence(tx)
|
| 304 |
+
return True
|
| 305 |
+
except NotImplementedError:
|
| 306 |
+
return False
|
| 307 |
+
|
| 308 |
+
# NB: don't call force_unpack_var_sequence, especially if it mutates!
|
| 309 |
+
def has_force_unpack_var_sequence(self, tx) -> bool:
|
| 310 |
+
return self.has_unpack_var_sequence(tx)
|
| 311 |
+
|
| 312 |
+
def inspect_parameter_names(self) -> List[str]:
|
| 313 |
+
unimplemented(f"inspect_parameter_names: {self}")
|
| 314 |
+
|
| 315 |
+
def call_hasattr(self, tx: "InstructionTranslator", name: str) -> "VariableTracker":
|
| 316 |
+
unimplemented(f"hasattr {self.__class__.__name__} {name}")
|
| 317 |
+
|
| 318 |
+
def call_function(
|
| 319 |
+
self,
|
| 320 |
+
tx: "InstructionTranslator",
|
| 321 |
+
args: "List[VariableTracker]",
|
| 322 |
+
kwargs: "Dict[str, VariableTracker]",
|
| 323 |
+
) -> "VariableTracker":
|
| 324 |
+
unimplemented(f"call_function {self} {args} {kwargs}")
|
| 325 |
+
|
| 326 |
+
def call_method(
|
| 327 |
+
self,
|
| 328 |
+
tx,
|
| 329 |
+
name,
|
| 330 |
+
args: "List[VariableTracker]",
|
| 331 |
+
kwargs: "Dict[str, VariableTracker]",
|
| 332 |
+
) -> "VariableTracker":
|
| 333 |
+
if name == "__len__" and self.has_unpack_var_sequence(tx):
|
| 334 |
+
assert not (args or kwargs)
|
| 335 |
+
return variables.ConstantVariable.create(len(self.unpack_var_sequence(tx)))
|
| 336 |
+
elif (
|
| 337 |
+
name == "__getattr__"
|
| 338 |
+
and len(args) == 1
|
| 339 |
+
and args[0].is_python_constant()
|
| 340 |
+
and not kwargs
|
| 341 |
+
):
|
| 342 |
+
return self.var_getattr(tx, args[0].as_python_constant())
|
| 343 |
+
unimplemented(f"call_method {self} {name} {args} {kwargs}")
|
| 344 |
+
|
| 345 |
+
def set_name_hint(self, name):
|
| 346 |
+
pass
|
| 347 |
+
|
| 348 |
+
def realize(self) -> "VariableTracker":
|
| 349 |
+
"""Used by LazyVariableTracker to build the real VariableTracker"""
|
| 350 |
+
return self
|
| 351 |
+
|
| 352 |
+
def unwrap(self) -> "VariableTracker":
|
| 353 |
+
"""Used by LazyVariableTracker to return the real VariableTracker if it already exists"""
|
| 354 |
+
return self
|
| 355 |
+
|
| 356 |
+
def is_realized(self):
|
| 357 |
+
"""Used by LazyVariableTracker to indicate an unrealized node"""
|
| 358 |
+
return True
|
| 359 |
+
|
| 360 |
+
def next_variable(self, tx):
|
| 361 |
+
unimplemented(f"next({self})")
|
| 362 |
+
|
| 363 |
+
def is_strict_mode(self, tx):
|
| 364 |
+
return tx.strict_checks_fn and tx.strict_checks_fn(self)
|
| 365 |
+
|
| 366 |
+
def __init__(
|
| 367 |
+
self,
|
| 368 |
+
*,
|
| 369 |
+
source: Source = None,
|
| 370 |
+
mutable_local: MutableLocal = None,
|
| 371 |
+
) -> None:
|
| 372 |
+
super().__init__()
|
| 373 |
+
self.source = source
|
| 374 |
+
self.mutable_local = mutable_local
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
def typestr(*objs):
|
| 378 |
+
if len(objs) == 1:
|
| 379 |
+
(obj,) = objs
|
| 380 |
+
if isinstance(obj, VariableTracker):
|
| 381 |
+
return str(obj)
|
| 382 |
+
else:
|
| 383 |
+
return type(obj).__name__
|
| 384 |
+
else:
|
| 385 |
+
return " ".join(map(typestr, objs))
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/builder.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|