Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/callback.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/code_context.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/compiled_autograd.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/comptime.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/exc.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/external_utils.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/guards.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/mutation_guard.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/polyfill.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/replay_record.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/types.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/__init__.py +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/__init__.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/common.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/cudagraphs.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/debugging.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/distributed.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/inductor.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/onnxrt.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/registry.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tensorrt.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/torchxla.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tvm.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/common.py +125 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/cudagraphs.py +256 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/debugging.py +315 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/distributed.py +632 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/inductor.py +16 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/onnxrt.py +37 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/registry.py +115 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/tensorrt.py +14 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/torchxla.py +46 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/backends/tvm.py +193 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/cache_size.py +173 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py +1634 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/repro/__init__.py +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/__init__.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_aot.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_dynamo.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/repro/after_aot.py +958 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/repro/after_dynamo.py +586 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/test_minifier_common.py +245 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/trace_rules.py +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/variables/__init__.py +167 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/base.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builder.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/constant.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/ctx_manager.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/functions.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/higher_order_ops.cpython-310.pyc +0 -0
valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/callback.cpython-310.pyc
ADDED
|
Binary file (2.94 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/code_context.cpython-310.pyc
ADDED
|
Binary file (1.28 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/compiled_autograd.cpython-310.pyc
ADDED
|
Binary file (12.8 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/comptime.cpython-310.pyc
ADDED
|
Binary file (16 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/exc.cpython-310.pyc
ADDED
|
Binary file (11.6 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/external_utils.cpython-310.pyc
ADDED
|
Binary file (3.8 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/guards.cpython-310.pyc
ADDED
|
Binary file (58.3 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/mutation_guard.cpython-310.pyc
ADDED
|
Binary file (4.45 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/polyfill.cpython-310.pyc
ADDED
|
Binary file (1.94 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/replay_record.cpython-310.pyc
ADDED
|
Binary file (4.2 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/__pycache__/types.cpython-310.pyc
ADDED
|
Binary file (3.57 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/__init__.py
ADDED
|
File without changes
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (174 Bytes). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/common.cpython-310.pyc
ADDED
|
Binary file (3.7 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/cudagraphs.cpython-310.pyc
ADDED
|
Binary file (7.45 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/debugging.cpython-310.pyc
ADDED
|
Binary file (9.43 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/distributed.cpython-310.pyc
ADDED
|
Binary file (18.9 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/inductor.cpython-310.pyc
ADDED
|
Binary file (539 Bytes). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/onnxrt.cpython-310.pyc
ADDED
|
Binary file (1.27 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/registry.cpython-310.pyc
ADDED
|
Binary file (3.71 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tensorrt.cpython-310.pyc
ADDED
|
Binary file (250 Bytes). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/torchxla.cpython-310.pyc
ADDED
|
Binary file (1.41 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tvm.cpython-310.pyc
ADDED
|
Binary file (5.51 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/common.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
import contextlib
|
| 4 |
+
import functools
|
| 5 |
+
import logging
|
| 6 |
+
from unittest.mock import patch
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
from torch._dynamo import disable
|
| 10 |
+
from torch._dynamo.utils import counters, defake, flatten_graph_inputs
|
| 11 |
+
from torch._functorch.aot_autograd import aot_module_simplified
|
| 12 |
+
from torch.utils._python_dispatch import _disable_current_modes
|
| 13 |
+
|
| 14 |
+
log = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class AotAutograd:
|
| 18 |
+
def __init__(self, **kwargs):
|
| 19 |
+
self.__name__ = "compiler_fn"
|
| 20 |
+
self.kwargs = kwargs
|
| 21 |
+
|
| 22 |
+
def __call__(self, gm: torch.fx.GraphModule, example_inputs):
|
| 23 |
+
if any(isinstance(x, (list, tuple, dict)) for x in example_inputs):
|
| 24 |
+
return flatten_graph_inputs(
|
| 25 |
+
gm,
|
| 26 |
+
example_inputs,
|
| 27 |
+
self,
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
# Hack to get around circular import problems with aot_eager_decomp_partition
|
| 31 |
+
if callable(self.kwargs.get("decompositions")):
|
| 32 |
+
self.kwargs["decompositions"] = self.kwargs["decompositions"]()
|
| 33 |
+
|
| 34 |
+
# NB: dont delete counter increment
|
| 35 |
+
counters["aot_autograd"]["total"] += 1
|
| 36 |
+
use_fallback = False
|
| 37 |
+
|
| 38 |
+
if use_fallback:
|
| 39 |
+
log.debug("Unable to use AOT Autograd because graph has mutation")
|
| 40 |
+
counters["aot_autograd"]["not_ok"] += 1
|
| 41 |
+
return gm
|
| 42 |
+
|
| 43 |
+
# OK attempt to compile
|
| 44 |
+
|
| 45 |
+
def _wrapped_bw_compiler(*args, **kwargs):
|
| 46 |
+
# stop TorchDynamo from trying to compile our generated backwards pass
|
| 47 |
+
return disable(disable(bw_compiler)(*args, **kwargs))
|
| 48 |
+
|
| 49 |
+
bw_compiler = self.kwargs.get("bw_compiler") or self.kwargs["fw_compiler"]
|
| 50 |
+
self.kwargs["bw_compiler"] = _wrapped_bw_compiler
|
| 51 |
+
self.kwargs["inference_compiler"] = (
|
| 52 |
+
self.kwargs.get("inference_compiler") or self.kwargs["fw_compiler"]
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
from functorch.compile import nop
|
| 56 |
+
|
| 57 |
+
from torch._inductor.debug import enable_aot_logging
|
| 58 |
+
|
| 59 |
+
# debug asserts slow down compile time noticeably,
|
| 60 |
+
# So only default them on when the aot_eager backend is used.
|
| 61 |
+
if self.kwargs.get("fw_compiler", None) == nop:
|
| 62 |
+
patch_config = patch("functorch.compile.config.debug_assert", True)
|
| 63 |
+
else:
|
| 64 |
+
patch_config = contextlib.nullcontext()
|
| 65 |
+
|
| 66 |
+
try:
|
| 67 |
+
# NB: NOT cloned!
|
| 68 |
+
with enable_aot_logging(), patch_config:
|
| 69 |
+
cg = aot_module_simplified(gm, example_inputs, **self.kwargs)
|
| 70 |
+
counters["aot_autograd"]["ok"] += 1
|
| 71 |
+
return disable(cg)
|
| 72 |
+
except Exception:
|
| 73 |
+
counters["aot_autograd"]["not_ok"] += 1
|
| 74 |
+
raise
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def aot_autograd(**kwargs):
|
| 78 |
+
return AotAutograd(**kwargs)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def mem_efficient_fusion_kwargs(use_decomps):
|
| 82 |
+
from functorch.compile import (
|
| 83 |
+
default_decompositions,
|
| 84 |
+
min_cut_rematerialization_partition,
|
| 85 |
+
ts_compile,
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
kwargs = {
|
| 89 |
+
# these are taken from memory_efficient_fusion()
|
| 90 |
+
"fw_compiler": ts_compile,
|
| 91 |
+
"bw_compiler": ts_compile,
|
| 92 |
+
"partition_fn": min_cut_rematerialization_partition,
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
if use_decomps:
|
| 96 |
+
kwargs["decompositions"] = default_decompositions
|
| 97 |
+
|
| 98 |
+
return kwargs
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def fake_tensor_unsupported(fn):
|
| 102 |
+
"""
|
| 103 |
+
Decorator for backends that need real inputs. We swap out fake
|
| 104 |
+
tensors for zero tensors.
|
| 105 |
+
"""
|
| 106 |
+
|
| 107 |
+
@functools.wraps(fn)
|
| 108 |
+
def wrapper(model, inputs, **kwargs):
|
| 109 |
+
with _disable_current_modes():
|
| 110 |
+
inputs = list(map(defake, inputs))
|
| 111 |
+
return fn(model, inputs, **kwargs)
|
| 112 |
+
|
| 113 |
+
return wrapper
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def device_from_inputs(example_inputs) -> torch.device:
|
| 117 |
+
for x in example_inputs:
|
| 118 |
+
if hasattr(x, "device"):
|
| 119 |
+
return x.device
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def dtype_from_inputs(example_inputs) -> torch.dtype:
|
| 123 |
+
for x in example_inputs:
|
| 124 |
+
if hasattr(x, "dtype"):
|
| 125 |
+
return x.dtype
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/cudagraphs.py
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
import functools
|
| 4 |
+
import operator
|
| 5 |
+
from collections import defaultdict
|
| 6 |
+
from typing import Dict, List, Optional
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
from torch._dynamo import config
|
| 10 |
+
from torch._dynamo.backends.common import aot_autograd
|
| 11 |
+
from torch._dynamo.backends.debugging import boxed_nop
|
| 12 |
+
from torch._inductor.cudagraph_utils import (
|
| 13 |
+
BoxedDeviceIndex,
|
| 14 |
+
check_multiple_devices_or_any_cpu_nodes,
|
| 15 |
+
format_default_skip_message,
|
| 16 |
+
get_mutation_stack_trace,
|
| 17 |
+
get_placeholders,
|
| 18 |
+
log_cudagraph_skip_and_bump_counter,
|
| 19 |
+
)
|
| 20 |
+
from torch._inductor.utils import (
|
| 21 |
+
BoxedBool,
|
| 22 |
+
count_tangents,
|
| 23 |
+
get_first_incompatible_cudagraph_node,
|
| 24 |
+
num_fw_fixed_arguments,
|
| 25 |
+
output_node,
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
from torch.multiprocessing.reductions import StorageWeakRef
|
| 29 |
+
from .registry import register_backend
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def find_input_mutations(g):
|
| 33 |
+
def meta_fk(meta):
|
| 34 |
+
return meta["val"] if "val" in meta else meta["fake_result"]
|
| 35 |
+
|
| 36 |
+
inputs = defaultdict(set)
|
| 37 |
+
input_idx = 0
|
| 38 |
+
mutated_inputs = set()
|
| 39 |
+
for n in g.nodes:
|
| 40 |
+
if n.op == "placeholder":
|
| 41 |
+
if isinstance(meta_fk(n.meta), torch.Tensor):
|
| 42 |
+
inputs[StorageWeakRef(meta_fk(n.meta)._typed_storage())].add(input_idx)
|
| 43 |
+
input_idx += 1
|
| 44 |
+
elif n.op == "call_function":
|
| 45 |
+
if n.target is operator.getitem:
|
| 46 |
+
continue
|
| 47 |
+
schema = n.target._schema
|
| 48 |
+
for i, arg in enumerate(schema.arguments):
|
| 49 |
+
if i < len(n.args):
|
| 50 |
+
argument = n.args[i]
|
| 51 |
+
else:
|
| 52 |
+
if arg.name not in n.kwargs:
|
| 53 |
+
continue
|
| 54 |
+
argument = n.kwargs[arg.name]
|
| 55 |
+
mut_arg = False
|
| 56 |
+
if arg.alias_info:
|
| 57 |
+
if arg.alias_info.is_write:
|
| 58 |
+
mut_arg = True
|
| 59 |
+
if mut_arg:
|
| 60 |
+
# TODO: not correct for args that contain tensors in a struct
|
| 61 |
+
# like list
|
| 62 |
+
mutated_inputs |= inputs[
|
| 63 |
+
StorageWeakRef(meta_fk(argument.meta)._typed_storage())
|
| 64 |
+
]
|
| 65 |
+
|
| 66 |
+
# TODO: error on unrecognized nodes
|
| 67 |
+
return mutated_inputs
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def get_device_node_mapping(gm: torch.fx.GraphModule):
|
| 71 |
+
device_node_mapping: Dict[torch.device, torch.fx.Node] = {}
|
| 72 |
+
for n in gm.graph.nodes:
|
| 73 |
+
t = n.meta.get("val", None)
|
| 74 |
+
if isinstance(t, torch.Tensor) and t.device not in device_node_mapping:
|
| 75 |
+
device_node_mapping[t.device] = n
|
| 76 |
+
return device_node_mapping
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def check_for_mutation_ignore_cuda_graph_managed_tensor(
|
| 80 |
+
aot_model: torch.fx.GraphModule, num_fixed
|
| 81 |
+
) -> Optional[str]:
|
| 82 |
+
mutation_indices = find_input_mutations(aot_model.graph) - set(range(num_fixed))
|
| 83 |
+
if not mutation_indices:
|
| 84 |
+
return None
|
| 85 |
+
|
| 86 |
+
placeholders = [node for node in aot_model.graph.nodes if node.op == "placeholder"]
|
| 87 |
+
return get_mutation_stack_trace(placeholders, mutation_indices)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def check_for_skip(aot_model: torch.fx.GraphModule, num_fixed) -> Optional[str]:
|
| 91 |
+
if not config.cudagraph_backend_support_input_mutation:
|
| 92 |
+
if mut_skip := check_for_mutation_ignore_cuda_graph_managed_tensor(
|
| 93 |
+
aot_model, num_fixed
|
| 94 |
+
):
|
| 95 |
+
return mut_skip
|
| 96 |
+
|
| 97 |
+
if skip := check_multiple_devices_or_any_cpu_nodes(
|
| 98 |
+
get_device_node_mapping(aot_model)
|
| 99 |
+
):
|
| 100 |
+
return skip
|
| 101 |
+
|
| 102 |
+
if node := get_first_incompatible_cudagraph_node(aot_model):
|
| 103 |
+
return format_default_skip_message(f"incompatible op ({node.name})")
|
| 104 |
+
|
| 105 |
+
return None
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def get_device_index(gm) -> int:
|
| 109 |
+
device = next(iter(get_device_node_mapping(gm)))
|
| 110 |
+
assert device.type == "cuda"
|
| 111 |
+
return device.index
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def get_stack_traces(gm) -> List[Optional[str]]:
|
| 115 |
+
output = output_node(gm)
|
| 116 |
+
assert len(output.args) == 1
|
| 117 |
+
return [
|
| 118 |
+
(arg.stack_trace if isinstance(arg, torch.fx.node.Node) else None)
|
| 119 |
+
for arg in output.args[0]
|
| 120 |
+
]
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def cudagraphs(dynamo_model, dynamo_inputs):
|
| 124 |
+
from torch._inductor.cudagraph_trees import cudagraphify_impl
|
| 125 |
+
|
| 126 |
+
do_cudagraphs = BoxedBool(True)
|
| 127 |
+
boxed_device_index = BoxedDeviceIndex(None)
|
| 128 |
+
|
| 129 |
+
def forward_cudagraphs(aot_model, aot_inputs, is_inference=False):
|
| 130 |
+
interp = boxed_nop(aot_model, aot_inputs)
|
| 131 |
+
fixed = num_fw_fixed_arguments(len(dynamo_inputs), len(aot_inputs))
|
| 132 |
+
if skip_msg := check_for_skip(aot_model, fixed):
|
| 133 |
+
BoxedBool.disable(do_cudagraphs)
|
| 134 |
+
log_cudagraph_skip_and_bump_counter(
|
| 135 |
+
f"skipping cudagraphs due to {skip_msg}"
|
| 136 |
+
)
|
| 137 |
+
return interp
|
| 138 |
+
|
| 139 |
+
boxed_device_index.set(get_device_index(aot_model))
|
| 140 |
+
out = cudagraphify_impl(
|
| 141 |
+
interp,
|
| 142 |
+
aot_inputs,
|
| 143 |
+
range(fixed),
|
| 144 |
+
device_index=boxed_device_index.value,
|
| 145 |
+
is_backward=False,
|
| 146 |
+
is_inference=False,
|
| 147 |
+
stack_traces=get_stack_traces(aot_model),
|
| 148 |
+
placeholders=get_placeholders(aot_model.graph),
|
| 149 |
+
mutated_input_idxs=find_input_mutations(aot_model.graph),
|
| 150 |
+
)
|
| 151 |
+
out._boxed_call = True
|
| 152 |
+
return out
|
| 153 |
+
|
| 154 |
+
def backward_cudagraphs(aot_model, aot_inputs):
|
| 155 |
+
interp = boxed_nop(aot_model, aot_inputs)
|
| 156 |
+
if not do_cudagraphs:
|
| 157 |
+
return aot_model
|
| 158 |
+
|
| 159 |
+
fixed = count_tangents(aot_model)
|
| 160 |
+
if skip_msg := check_for_skip(aot_model, fixed):
|
| 161 |
+
log_cudagraph_skip_and_bump_counter(
|
| 162 |
+
"skipping cudagraphs due to %s", skip_msg
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
# See [Backward Generation Handling]
|
| 166 |
+
manager = torch._inductor.cudagraph_trees.get_manager(
|
| 167 |
+
boxed_device_index.value, create_if_none_exists=False
|
| 168 |
+
)
|
| 169 |
+
assert manager is not None
|
| 170 |
+
|
| 171 |
+
def fn(inputs):
|
| 172 |
+
manager.set_to_running_backward()
|
| 173 |
+
return aot_model(inputs)
|
| 174 |
+
|
| 175 |
+
fn._boxed_call = True
|
| 176 |
+
return fn
|
| 177 |
+
|
| 178 |
+
out = cudagraphify_impl(
|
| 179 |
+
interp,
|
| 180 |
+
aot_inputs,
|
| 181 |
+
range(fixed),
|
| 182 |
+
device_index=get_device_index(aot_model),
|
| 183 |
+
is_backward=True,
|
| 184 |
+
is_inference=False,
|
| 185 |
+
stack_traces=get_stack_traces(aot_model),
|
| 186 |
+
placeholders=get_placeholders(aot_model.graph),
|
| 187 |
+
mutated_input_idxs=find_input_mutations(aot_model.graph),
|
| 188 |
+
)
|
| 189 |
+
out._boxed_call = True
|
| 190 |
+
return out
|
| 191 |
+
|
| 192 |
+
aot_cudagraphs = aot_autograd(
|
| 193 |
+
fw_compiler=forward_cudagraphs,
|
| 194 |
+
bw_compiler=backward_cudagraphs,
|
| 195 |
+
inference_compiler=functools.partial(forward_cudagraphs, is_inference=True),
|
| 196 |
+
keep_inference_input_mutations=torch._dynamo.config.cudagraph_backend_keep_input_mutation,
|
| 197 |
+
)
|
| 198 |
+
return aot_cudagraphs(dynamo_model, dynamo_inputs)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
class CudagraphsBackend:
|
| 202 |
+
compiler_name = "cudagraphs"
|
| 203 |
+
|
| 204 |
+
@staticmethod
|
| 205 |
+
def reset():
|
| 206 |
+
from torch._inductor.cudagraph_trees import reset_cudagraph_trees
|
| 207 |
+
|
| 208 |
+
reset_cudagraph_trees()
|
| 209 |
+
|
| 210 |
+
@staticmethod
|
| 211 |
+
def __call__(model, inputs):
|
| 212 |
+
return cudagraphs(model, inputs)
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
# aot_cudagraphs only applies CUDA graphs to the graph. It is also helpful
|
| 216 |
+
# for debugging and can serve as a perf baseline.
|
| 217 |
+
register_backend(name="cudagraphs", compiler_fn=CudagraphsBackend())
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def cudagraphs_inner(model, inputs, copy_outputs=True, copy_inputs=True):
|
| 221 |
+
"""This isn't registered as a backend, but is used in some benchmarks"""
|
| 222 |
+
assert isinstance(inputs, (list, tuple))
|
| 223 |
+
if copy_inputs:
|
| 224 |
+
static_inputs = [torch.zeros_like(x) for x in inputs]
|
| 225 |
+
else:
|
| 226 |
+
static_inputs = list(inputs)
|
| 227 |
+
|
| 228 |
+
# warmup
|
| 229 |
+
torch.cuda.synchronize()
|
| 230 |
+
stream = torch.cuda.Stream()
|
| 231 |
+
stream.wait_stream(torch.cuda.current_stream())
|
| 232 |
+
with torch.cuda.stream(stream):
|
| 233 |
+
model(*inputs)
|
| 234 |
+
stream.synchronize()
|
| 235 |
+
torch.cuda.current_stream().wait_stream(stream)
|
| 236 |
+
torch.cuda.synchronize()
|
| 237 |
+
|
| 238 |
+
# record
|
| 239 |
+
graph = torch.cuda.CUDAGraph()
|
| 240 |
+
with torch.cuda.graph(graph, stream=stream):
|
| 241 |
+
static_outputs = model(*static_inputs)
|
| 242 |
+
if not isinstance(static_outputs, (list, tuple)):
|
| 243 |
+
static_outputs = (static_outputs,)
|
| 244 |
+
|
| 245 |
+
def run(*new_inputs):
|
| 246 |
+
assert len(static_inputs) == len(new_inputs)
|
| 247 |
+
if copy_inputs:
|
| 248 |
+
for dst, src in zip(static_inputs, new_inputs):
|
| 249 |
+
dst.copy_(src)
|
| 250 |
+
graph.replay()
|
| 251 |
+
if copy_outputs:
|
| 252 |
+
return [x.clone() for x in static_outputs]
|
| 253 |
+
else:
|
| 254 |
+
return static_outputs
|
| 255 |
+
|
| 256 |
+
return run
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/debugging.py
ADDED
|
@@ -0,0 +1,315 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
import dataclasses
|
| 4 |
+
import functools
|
| 5 |
+
from importlib import import_module
|
| 6 |
+
from typing import Any, List, Optional
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
|
| 10 |
+
from functorch.compile import min_cut_rematerialization_partition
|
| 11 |
+
from torch import _guards
|
| 12 |
+
from torch._functorch import config as functorch_config
|
| 13 |
+
from torch._functorch.compilers import ts_compile
|
| 14 |
+
from .common import aot_autograd
|
| 15 |
+
from .registry import register_debug_backend as register_backend
|
| 16 |
+
|
| 17 |
+
"""
|
| 18 |
+
This file contains TorchDynamo backends intended for debugging uses.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@register_backend
|
| 23 |
+
def eager(gm, fake_tensor_inputs):
|
| 24 |
+
return gm.forward
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@register_backend
|
| 28 |
+
def eager_noexcept(gm, fake_tensor_inputs):
|
| 29 |
+
# This backend is intended to check that dynamo-generated GraphModules
|
| 30 |
+
# do not cause errors.
|
| 31 |
+
def inner(*args):
|
| 32 |
+
try:
|
| 33 |
+
return gm(*args)
|
| 34 |
+
except Exception as e:
|
| 35 |
+
raise torch._dynamo.exc.TorchDynamoException(
|
| 36 |
+
"Unexpected exception when running generated GraphModule"
|
| 37 |
+
) from e
|
| 38 |
+
|
| 39 |
+
return inner
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@register_backend
|
| 43 |
+
def pre_dispatch_eager(gm, fake_tensor_inputs):
|
| 44 |
+
from torch.fx.experimental.proxy_tensor import make_fx
|
| 45 |
+
|
| 46 |
+
def runnable_gm(*args):
|
| 47 |
+
return torch.fx.Interpreter(gm).run(*args)
|
| 48 |
+
|
| 49 |
+
pre_dispatch_gm = make_fx(runnable_gm, pre_dispatch=True)(*fake_tensor_inputs)
|
| 50 |
+
pre_dispatch_gm.print_readable()
|
| 51 |
+
|
| 52 |
+
return pre_dispatch_gm
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@register_backend
|
| 56 |
+
def eager_debug(gm, fake_tensor_inputs):
|
| 57 |
+
from torch._subclasses.schema_check_mode import SchemaCheckMode
|
| 58 |
+
|
| 59 |
+
# We could add more debugging bits here.
|
| 60 |
+
# Right now, this backend can be used to check for and error on
|
| 61 |
+
# custom dispatcher ops that have incorrect schemas.
|
| 62 |
+
def inner(*args):
|
| 63 |
+
with SchemaCheckMode():
|
| 64 |
+
return torch.fx.Interpreter(gm).run(*args)
|
| 65 |
+
|
| 66 |
+
return inner
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
@register_backend(name="ts")
|
| 70 |
+
def torchscript(gm, fake_tensor_inputs):
|
| 71 |
+
return torch.jit.script(gm)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
# used boxed call to discard inputs when they are no longer needed
|
| 75 |
+
def boxed_nop(fx_g, example_inputs):
|
| 76 |
+
def run(args):
|
| 77 |
+
return torch.fx.Interpreter(fx_g).boxed_run(args)
|
| 78 |
+
|
| 79 |
+
run._boxed_call = True
|
| 80 |
+
return run
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# Useful for debugging purpose
|
| 84 |
+
# aot_eager uses AOT Autograd backend with nop compiler. It is helpful in debugging.
|
| 85 |
+
aot_eager = aot_autograd(
|
| 86 |
+
fw_compiler=boxed_nop,
|
| 87 |
+
partition_fn=min_cut_rematerialization_partition,
|
| 88 |
+
keep_inference_input_mutations=True,
|
| 89 |
+
)
|
| 90 |
+
register_backend(name="aot_eager", compiler_fn=aot_eager)
|
| 91 |
+
|
| 92 |
+
aot_eager_default_partitioner = aot_autograd(
|
| 93 |
+
fw_compiler=boxed_nop, keep_inference_input_mutations=True
|
| 94 |
+
)
|
| 95 |
+
register_backend(
|
| 96 |
+
name="aot_eager_default_partitioner", compiler_fn=aot_eager_default_partitioner
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
# Uses TorchInductor AOT Autograd decomps and partitioner to isolate aot vs
|
| 101 |
+
# inductor problems.
|
| 102 |
+
# aot_eager_decomp_partition just replaces the inductor compiler with nop to help
|
| 103 |
+
# isolate inductor vs aot_eager errors
|
| 104 |
+
def aot_eager_decomp_partition(gm, fake_tensor_inputs):
|
| 105 |
+
with functorch_config.patch(unlift_effect_tokens=True):
|
| 106 |
+
return aot_autograd(
|
| 107 |
+
# these are taken from memory_efficient_fusion()
|
| 108 |
+
fw_compiler=boxed_nop,
|
| 109 |
+
bw_compiler=boxed_nop,
|
| 110 |
+
# NB: lambda here is to delay import of inductor
|
| 111 |
+
decompositions=lambda: import_module(
|
| 112 |
+
"torch._inductor.compile_fx"
|
| 113 |
+
).select_decomp_table(),
|
| 114 |
+
partition_fn=functools.partial(
|
| 115 |
+
min_cut_rematerialization_partition, compiler="inductor"
|
| 116 |
+
),
|
| 117 |
+
)(gm, fake_tensor_inputs)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
register_backend(
|
| 121 |
+
name="aot_eager_decomp_partition", compiler_fn=aot_eager_decomp_partition
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
# AOT Autograd with torchscript backend. Default partitioner.
|
| 126 |
+
# aot_ts uses torchscript backend. We can use this with both nnc and nvfuser
|
| 127 |
+
# by using the relevant fuser with torch.jit.fuser(...)
|
| 128 |
+
aot_ts = aot_autograd(fw_compiler=ts_compile)
|
| 129 |
+
register_backend(name="aot_ts", compiler_fn=aot_ts)
|
| 130 |
+
|
| 131 |
+
# These buggy backends are used for inducing bugs so that we can test
|
| 132 |
+
# our repro extraction / minifier scripts
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
class ReluCompileError(Exception):
|
| 136 |
+
pass
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
class TestingOnlyCompileError(Exception):
|
| 140 |
+
pass
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
@register_backend
|
| 144 |
+
def relu_compile_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
|
| 145 |
+
for node in gm.graph.nodes:
|
| 146 |
+
if node.target == torch.relu:
|
| 147 |
+
raise ReluCompileError
|
| 148 |
+
return gm
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
@register_backend
|
| 152 |
+
def relu_runtime_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
|
| 153 |
+
for node in gm.graph.nodes:
|
| 154 |
+
if node.target == torch.relu:
|
| 155 |
+
node.target = torch._assert
|
| 156 |
+
node.args = (False, "ReluRuntimeError")
|
| 157 |
+
gm.recompile()
|
| 158 |
+
return gm
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
@register_backend
|
| 162 |
+
def relu_accuracy_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
|
| 163 |
+
for node in gm.graph.nodes:
|
| 164 |
+
if node.target == torch.relu:
|
| 165 |
+
node.target = torch.add
|
| 166 |
+
node.args = (node.args[0], 1)
|
| 167 |
+
gm.recompile()
|
| 168 |
+
|
| 169 |
+
return gm
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
@register_backend
|
| 173 |
+
def non_leaf_compile_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
|
| 174 |
+
# Require at least one non-trivial thing in the graph,
|
| 175 |
+
# see https://github.com/pytorch/pytorch/issues/102898
|
| 176 |
+
for node in gm.graph.nodes:
|
| 177 |
+
if node.op == "call_function":
|
| 178 |
+
break
|
| 179 |
+
else:
|
| 180 |
+
return gm
|
| 181 |
+
for t in example_inputs:
|
| 182 |
+
if not t.is_leaf:
|
| 183 |
+
raise TestingOnlyCompileError
|
| 184 |
+
return gm
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
@dataclasses.dataclass
|
| 188 |
+
class ExplainOutput:
|
| 189 |
+
"""
|
| 190 |
+
This is the output of :func:`torch._dynamo.explain()`
|
| 191 |
+
There is no reason to create this class directly.
|
| 192 |
+
"""
|
| 193 |
+
|
| 194 |
+
graphs: List[torch.fx.GraphModule]
|
| 195 |
+
graph_count: int
|
| 196 |
+
graph_break_count: int
|
| 197 |
+
break_reasons: List[
|
| 198 |
+
Any
|
| 199 |
+
] # Type is GraphCompileReason but doesn't matter for this purpose
|
| 200 |
+
op_count: int
|
| 201 |
+
ops_per_graph: Optional[List[torch.fx.Node]] = None
|
| 202 |
+
out_guards: Optional[List[_guards.Guard]] = None
|
| 203 |
+
compile_times: Optional[str] = None
|
| 204 |
+
|
| 205 |
+
def __str__(self):
|
| 206 |
+
output = f"Graph Count: {self.graph_count}\n"
|
| 207 |
+
output += f"Graph Break Count: {self.graph_break_count}\n"
|
| 208 |
+
output += f"Op Count: {self.op_count}\n"
|
| 209 |
+
|
| 210 |
+
output += "Break Reasons:\n"
|
| 211 |
+
for idx, break_reason in enumerate(self.break_reasons):
|
| 212 |
+
output += f" Break Reason {idx+1}:\n"
|
| 213 |
+
output += f" Reason: {break_reason.reason}\n"
|
| 214 |
+
output += " User Stack:\n"
|
| 215 |
+
for frame_summary in break_reason.user_stack:
|
| 216 |
+
output += f" {frame_summary}\n"
|
| 217 |
+
|
| 218 |
+
if self.ops_per_graph is not None:
|
| 219 |
+
output += "Ops per Graph:\n"
|
| 220 |
+
for idx, ops in enumerate(self.ops_per_graph):
|
| 221 |
+
output += f" Ops {idx+1}:\n"
|
| 222 |
+
for op in ops:
|
| 223 |
+
output += f" {op}\n"
|
| 224 |
+
|
| 225 |
+
if self.out_guards is not None:
|
| 226 |
+
output += "Out Guards:\n"
|
| 227 |
+
for i, guard in enumerate(self.out_guards):
|
| 228 |
+
output += f" Guard {i+1}:\n"
|
| 229 |
+
output += f" {str(guard)}"
|
| 230 |
+
|
| 231 |
+
if self.compile_times is not None:
|
| 232 |
+
output += f"Compile Times: {self.compile_times}\n"
|
| 233 |
+
return output
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def _explain_graph_detail(
|
| 237 |
+
gm: torch.fx.GraphModule, graphs, op_count, ops_per_graph, break_reasons
|
| 238 |
+
):
|
| 239 |
+
"""
|
| 240 |
+
This function is a utility which processes a torch.fx.GraphModule and
|
| 241 |
+
accumulates information about its ops, graph breaks, and other details. It
|
| 242 |
+
is intended to be used by the ExplainWithBackend class and
|
| 243 |
+
`torch._dynamo.explain()` to provide details from Dynamo's graph capture.
|
| 244 |
+
|
| 245 |
+
Parameters:
|
| 246 |
+
gm (torch.fx.GraphModule): The GraphModule to be processed.
|
| 247 |
+
graphs (list): A list that accumulates all the GraphModules processed.
|
| 248 |
+
op_count (int): The total count of operations in all GraphModules processed so far.
|
| 249 |
+
ops_per_graph (list): A list that accumulates the operations of each GraphModule.
|
| 250 |
+
break_reasons (list): A list that accumulates the reasons for breaks in each GraphModule.
|
| 251 |
+
|
| 252 |
+
Returns:
|
| 253 |
+
tuple: A tuple containing the processed GraphModule, the updated lists of graphs,
|
| 254 |
+
operations per graph, and break reasons, and the updated operation count.
|
| 255 |
+
"""
|
| 256 |
+
graphs.append(gm)
|
| 257 |
+
ops = [node.target for node in gm.graph.nodes if node.op == "call_function"]
|
| 258 |
+
op_count += len(ops)
|
| 259 |
+
ops_per_graph.append(ops)
|
| 260 |
+
if gm.compile_subgraph_reason.graph_break:
|
| 261 |
+
break_reasons.append(gm.compile_subgraph_reason)
|
| 262 |
+
|
| 263 |
+
return gm, graphs, op_count, ops_per_graph, break_reasons
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
class ExplainWithBackend:
|
| 267 |
+
"""
|
| 268 |
+
This class is intended to be used as a backend for `torch.compile`. It is
|
| 269 |
+
composable with other backends. When used in this way, it accumulates
|
| 270 |
+
information about graph breaks, ops, and other info and provides a string
|
| 271 |
+
representation summarizing this information.
|
| 272 |
+
|
| 273 |
+
Attributes:
|
| 274 |
+
backend (str): The name of the backend to use for optimization.
|
| 275 |
+
graphs (list): A list of the graphs captured by TorchDynamo.
|
| 276 |
+
op_count (int): The total number of operations in all optimized graphs.
|
| 277 |
+
break_reasons (list): A list of graph break reasons with stack traces.
|
| 278 |
+
|
| 279 |
+
Example Usage:
|
| 280 |
+
def fn(x):
|
| 281 |
+
x = torch.sigmoid(x)
|
| 282 |
+
return x
|
| 283 |
+
|
| 284 |
+
torch._dynamo.reset()
|
| 285 |
+
eb = ExplainWithBackend("inductor")
|
| 286 |
+
optimized_fn = torch.compile(fn, backend=eb)
|
| 287 |
+
result = optimized_fn(torch.randn(5))
|
| 288 |
+
print(eb.output())
|
| 289 |
+
"""
|
| 290 |
+
|
| 291 |
+
def __init__(self, backend):
|
| 292 |
+
from .registry import lookup_backend
|
| 293 |
+
|
| 294 |
+
self.backend = lookup_backend(backend)
|
| 295 |
+
self.graphs = []
|
| 296 |
+
self.op_count = 0
|
| 297 |
+
self.break_reasons = []
|
| 298 |
+
|
| 299 |
+
def __call__(self, gm: torch.fx.GraphModule, example_inputs):
|
| 300 |
+
gm, self.graphs, self.op_count, _, self.break_reasons = _explain_graph_detail(
|
| 301 |
+
gm, self.graphs, self.op_count, [], self.break_reasons
|
| 302 |
+
)
|
| 303 |
+
return self.backend(gm, example_inputs)
|
| 304 |
+
|
| 305 |
+
def output(self) -> ExplainOutput:
|
| 306 |
+
graph_count = len(self.graphs)
|
| 307 |
+
output = ExplainOutput(
|
| 308 |
+
self.graphs,
|
| 309 |
+
graph_count,
|
| 310 |
+
graph_count - 1,
|
| 311 |
+
self.break_reasons,
|
| 312 |
+
self.op_count,
|
| 313 |
+
)
|
| 314 |
+
|
| 315 |
+
return output
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/distributed.py
ADDED
|
@@ -0,0 +1,632 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
import traceback
|
| 5 |
+
from dataclasses import dataclass, field
|
| 6 |
+
from typing import Any, List, Optional
|
| 7 |
+
from unittest import mock
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from torch import fx
|
| 11 |
+
from torch._dynamo.output_graph import GraphCompileReason
|
| 12 |
+
from torch._dynamo.utils import deepcopy_to_fake_tensor, detect_fake_mode
|
| 13 |
+
from torch._logging import trace_structured
|
| 14 |
+
from torch.fx.node import Node
|
| 15 |
+
|
| 16 |
+
# Regular log messages should go through 'log'.
|
| 17 |
+
# ddp_graph_log is a separate artifact logger reserved for dumping graphs.
|
| 18 |
+
# See docs/source/logging.rst for more info.
|
| 19 |
+
log = logging.getLogger(__name__)
|
| 20 |
+
ddp_graph_log = torch._logging.getArtifactLogger(__name__, "ddp_graphs")
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def args_str(args):
|
| 24 |
+
# a debug helper
|
| 25 |
+
if torch.is_tensor(args):
|
| 26 |
+
return f"T[{args.shape}]"
|
| 27 |
+
elif isinstance(args, tuple):
|
| 28 |
+
return f"tuple({', '.join([args_str(x) for x in args])})"
|
| 29 |
+
elif isinstance(args, list):
|
| 30 |
+
return f"list({', '.join([args_str(x) for x in args])})"
|
| 31 |
+
else:
|
| 32 |
+
return str(args)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@dataclass
|
| 36 |
+
class Bucket:
|
| 37 |
+
size: int = 0
|
| 38 |
+
params: List[str] = field(default_factory=list)
|
| 39 |
+
nodes: List[fx.Node] = field(default_factory=list)
|
| 40 |
+
|
| 41 |
+
# param_ids is just used for unit testing
|
| 42 |
+
param_ids: List = field(default_factory=list)
|
| 43 |
+
|
| 44 |
+
# keep track of any buckets that were extended for logging purposes
|
| 45 |
+
opcount_increased_to_capture_external_output: int = 0
|
| 46 |
+
paramsize_before_opcount_increase: int = 0
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def bucket_has_external_output(bucket: Bucket) -> bool:
|
| 50 |
+
nodes_in_bucket = set()
|
| 51 |
+
# we want to iterate in reverse order, but clumsi-luckily the bucket.nodes list was already created backwards
|
| 52 |
+
# so we don't reverse it here
|
| 53 |
+
for node in bucket.nodes:
|
| 54 |
+
# assume node.op != output, since those are filtered in the original iteration
|
| 55 |
+
nodes_in_bucket.add(node)
|
| 56 |
+
for user in node.users:
|
| 57 |
+
if user not in nodes_in_bucket:
|
| 58 |
+
return True
|
| 59 |
+
return False
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def pretty_print_buckets(buckets: List[Bucket], bucket_bytes_cap: int):
|
| 63 |
+
headers = ("Index", "Size (b)", "Param Names")
|
| 64 |
+
rows = []
|
| 65 |
+
extended_buckets = []
|
| 66 |
+
for idx, bucket in enumerate(reversed(buckets)):
|
| 67 |
+
if len(bucket.params) > 0:
|
| 68 |
+
rows.append((idx, bucket.size, bucket.params[0]))
|
| 69 |
+
for param in bucket.params[1:]:
|
| 70 |
+
rows.append((None, None, param))
|
| 71 |
+
if bucket.opcount_increased_to_capture_external_output > 0:
|
| 72 |
+
extended_buckets.append(
|
| 73 |
+
(
|
| 74 |
+
idx,
|
| 75 |
+
bucket.opcount_increased_to_capture_external_output,
|
| 76 |
+
bucket.size - bucket.paramsize_before_opcount_increase,
|
| 77 |
+
)
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
if len(rows):
|
| 81 |
+
log.info(
|
| 82 |
+
"\nDDPOptimizer used bucket cap %s and created %d buckets. Enable debug logs for detailed bucket info.",
|
| 83 |
+
bucket_bytes_cap,
|
| 84 |
+
len(buckets),
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
if len(extended_buckets):
|
| 88 |
+
log.warning(
|
| 89 |
+
"Some buckets were extended beyond their requested parameter capacities"
|
| 90 |
+
" in order to ensure each subgraph has an output node, required for fx graph partitioning."
|
| 91 |
+
" This can be the case when a subgraph would have only contained nodes performing inplace mutation,"
|
| 92 |
+
" and returning no logical outputs. This should not be a problem, unless it results in too few graph"
|
| 93 |
+
" partitions for optimal DDP performance."
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
try:
|
| 97 |
+
from tabulate import tabulate
|
| 98 |
+
|
| 99 |
+
log.debug(
|
| 100 |
+
"\nDDPOptimizer produced the following bucket assignments:\n%s",
|
| 101 |
+
tabulate(rows, headers=headers, tablefmt="simple_grid"),
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
if len(extended_buckets):
|
| 105 |
+
log.warning(
|
| 106 |
+
"DDPOptimizer extended these buckets to ensure per-subgraph output nodes:\n%s",
|
| 107 |
+
tabulate(
|
| 108 |
+
extended_buckets,
|
| 109 |
+
headers=("Index", "Extra Ops", "Extra Param Size (b)"),
|
| 110 |
+
tablefmt="simple_grid",
|
| 111 |
+
),
|
| 112 |
+
)
|
| 113 |
+
except ImportError:
|
| 114 |
+
log.debug(
|
| 115 |
+
"Please `pip install tabulate` in order to display ddp bucket sizes and diagnostic information."
|
| 116 |
+
)
|
| 117 |
+
else:
|
| 118 |
+
log.debug("DDPOptimizer captured no parameters and did not split this graph.")
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def has_higher_order_op(gm):
|
| 122 |
+
# Check if there is a higher order op in the graph
|
| 123 |
+
for node in gm.graph.nodes:
|
| 124 |
+
if node.op == "get_attr":
|
| 125 |
+
maybe_param = getattr(gm, node.target)
|
| 126 |
+
if isinstance(maybe_param, torch.fx.GraphModule):
|
| 127 |
+
return True
|
| 128 |
+
return False
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
# 3 (lazy compile): Replace submodules with lazily compiling submodule
|
| 132 |
+
class SubmoduleReplacer(torch.fx.interpreter.Interpreter):
|
| 133 |
+
def __init__(self, module, compiler):
|
| 134 |
+
super().__init__(module)
|
| 135 |
+
self.compiler = compiler
|
| 136 |
+
|
| 137 |
+
def lazily_compiled_submod(self, input_mod):
|
| 138 |
+
"""
|
| 139 |
+
Create a wrapper around submodules which:
|
| 140 |
+
- lazily compiles each of the partitioned submodules using the user-provided compiler
|
| 141 |
+
- unpacks singleton tuples/lists into flat arg
|
| 142 |
+
"""
|
| 143 |
+
|
| 144 |
+
class LazilyCompiledModule(torch.nn.Module):
|
| 145 |
+
def __init__(self, submod, compiler, unwrap_singleton_tuple):
|
| 146 |
+
super().__init__()
|
| 147 |
+
self.submod = submod
|
| 148 |
+
self.compiler = compiler
|
| 149 |
+
self.compiled = False
|
| 150 |
+
self.unwrap_singleton_tuple = unwrap_singleton_tuple
|
| 151 |
+
|
| 152 |
+
def forward(self, *args):
|
| 153 |
+
if not self.compiled:
|
| 154 |
+
# First compile with args as example_inputs
|
| 155 |
+
# These args will be fakeified if using Inductor/AOTAutograd
|
| 156 |
+
new_submod = self.compiler(self.submod, args)
|
| 157 |
+
del self.submod
|
| 158 |
+
self.submod = new_submod
|
| 159 |
+
self.compiled = True
|
| 160 |
+
self.compiler = None
|
| 161 |
+
|
| 162 |
+
x = self.submod(*args)
|
| 163 |
+
# we must let 'input_mod' return a tuple, to make AOT happy.
|
| 164 |
+
# (aot_autograd compile_fn literally requires that the output of a graph it compiles is a tuple).
|
| 165 |
+
# however, we don't acutally want this tuple to be returned, since the fx logic that calls the submod
|
| 166 |
+
# will again wrap outputs from the submod in a tuple. So we unwrap it, and count on it being re-wrapped
|
| 167 |
+
if self.unwrap_singleton_tuple and isinstance(x, (tuple, list)):
|
| 168 |
+
return x[0]
|
| 169 |
+
return x
|
| 170 |
+
|
| 171 |
+
unwrap_singleton_tuple = False
|
| 172 |
+
for sn in input_mod.graph.nodes:
|
| 173 |
+
if sn.op == "output":
|
| 174 |
+
if not isinstance(sn.args[0], tuple):
|
| 175 |
+
unwrap_singleton_tuple = True
|
| 176 |
+
sn.args = (sn.args,)
|
| 177 |
+
|
| 178 |
+
input_mod.recompile()
|
| 179 |
+
input_mod.compile_subgraph_reason = GraphCompileReason(
|
| 180 |
+
"DDPOptimizer intentional graph-break (See Note [DDPOptimizer])."
|
| 181 |
+
" Set `torch._dynamo.config.optimize_ddp = False` to disable.",
|
| 182 |
+
[
|
| 183 |
+
# it's close to useless to get a real stacktrace here, and quite verbose.
|
| 184 |
+
traceback.FrameSummary(__file__, 0, DDPOptimizer),
|
| 185 |
+
],
|
| 186 |
+
)
|
| 187 |
+
wrapper = LazilyCompiledModule(
|
| 188 |
+
input_mod,
|
| 189 |
+
self.compiler,
|
| 190 |
+
unwrap_singleton_tuple,
|
| 191 |
+
)
|
| 192 |
+
return wrapper
|
| 193 |
+
|
| 194 |
+
# We replace the submodules with lazy submodules which compile
|
| 195 |
+
# the corresponding submodules when they are run with real values
|
| 196 |
+
# Always returns `None` - we do not need to propagate values in order
|
| 197 |
+
# to replace submodules.
|
| 198 |
+
def run_node(self, n: Node) -> Any:
|
| 199 |
+
if n.op == "call_module":
|
| 200 |
+
real_mod = self.fetch_attr(n.target)
|
| 201 |
+
|
| 202 |
+
ddp_graph_log.debug("\n---%s graph---\n%s", n.target, real_mod.graph)
|
| 203 |
+
|
| 204 |
+
assert len(n.kwargs) == 0, "We assume only args for these modules"
|
| 205 |
+
lazily_compiled_submod = self.lazily_compiled_submod(real_mod)
|
| 206 |
+
|
| 207 |
+
# We update the original (outer) graph with a call into the compiled module
|
| 208 |
+
# instead of the uncompiled one.
|
| 209 |
+
self.module.delete_submodule(n.target)
|
| 210 |
+
n.target = "compiled_" + n.target
|
| 211 |
+
self.module.add_submodule(n.target, lazily_compiled_submod)
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
# 3 (no lazy compile): compile each of the partitioned submodules using the user-provided compiler
|
| 215 |
+
class SubmodCompiler(torch.fx.interpreter.Interpreter):
|
| 216 |
+
def __init__(self, module, compiler, fake_mode):
|
| 217 |
+
super().__init__(module)
|
| 218 |
+
self.compiler = compiler
|
| 219 |
+
self.fake_mode = fake_mode
|
| 220 |
+
|
| 221 |
+
def compile_submod(self, input_mod, args, kwargs):
|
| 222 |
+
"""
|
| 223 |
+
Compile the submodule,
|
| 224 |
+
using a wrapper to make sure its output is always a tuple,
|
| 225 |
+
which is required by AotAutograd based compilers
|
| 226 |
+
"""
|
| 227 |
+
assert len(kwargs) == 0, "We assume only args for these modules"
|
| 228 |
+
|
| 229 |
+
class WrapperModule(torch.nn.Module):
|
| 230 |
+
def __init__(self, submod, unwrap_singleton_tuple):
|
| 231 |
+
super().__init__()
|
| 232 |
+
self.submod = submod
|
| 233 |
+
self.unwrap_singleton_tuple = unwrap_singleton_tuple
|
| 234 |
+
|
| 235 |
+
def forward(self, *args):
|
| 236 |
+
x = self.submod(*args)
|
| 237 |
+
# TODO(whc)
|
| 238 |
+
# for some reason the isinstance check is necessary if I split one node per submod
|
| 239 |
+
# - even though I supposedly wrapped the output in a tuple in those cases, the real
|
| 240 |
+
# compiled module was still returning a tensor
|
| 241 |
+
if self.unwrap_singleton_tuple and isinstance(x, (tuple, list)):
|
| 242 |
+
return x[0]
|
| 243 |
+
return x
|
| 244 |
+
|
| 245 |
+
unwrap_singleton_tuple = False
|
| 246 |
+
for sn in input_mod.graph.nodes:
|
| 247 |
+
if sn.op == "output":
|
| 248 |
+
if not isinstance(sn.args[0], tuple):
|
| 249 |
+
unwrap_singleton_tuple = True
|
| 250 |
+
sn.args = (sn.args,)
|
| 251 |
+
|
| 252 |
+
input_mod.recompile()
|
| 253 |
+
input_mod.compile_subgraph_reason = GraphCompileReason(
|
| 254 |
+
"DDPOptimizer intentional graph-break (See Note [DDPOptimizer])."
|
| 255 |
+
" Set `torch._dynamo.config.optimize_ddp = False` to disable.",
|
| 256 |
+
[
|
| 257 |
+
# it's close to useless to get a real stacktrace here, and quite verbose.
|
| 258 |
+
traceback.FrameSummary(__file__, 0, DDPOptimizer),
|
| 259 |
+
],
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
wrapper = WrapperModule(
|
| 263 |
+
self.compiler(input_mod, args),
|
| 264 |
+
unwrap_singleton_tuple,
|
| 265 |
+
)
|
| 266 |
+
return wrapper
|
| 267 |
+
|
| 268 |
+
# Note:
|
| 269 |
+
#
|
| 270 |
+
# The way distributed works today around fake tensors can be somewhat confusing.
|
| 271 |
+
# Some of these codepaths are shared in both runtime, and compile time. The presence
|
| 272 |
+
# of a fake_mode, read off of fake tensor inputs, dictates how we will operate.
|
| 273 |
+
#
|
| 274 |
+
# A few things to keep in mind:
|
| 275 |
+
#
|
| 276 |
+
# 1) We invoke `compile_submod` with a real module. The output of that gets stored
|
| 277 |
+
# on the graph via `self.module.add_submodule(n.target, compiled_submod_real)`.
|
| 278 |
+
#
|
| 279 |
+
# 2) When running a call_module targeted node, if we have a fake_mode, we fakify the
|
| 280 |
+
# module we got from self.fetch_attr(n.target). Regardless of fake_mode, we then execute it.
|
| 281 |
+
#
|
| 282 |
+
# 3) Fake tensors should always be around during compile time.
|
| 283 |
+
#
|
| 284 |
+
# 4) Fake tensors should never be around at runtime.
|
| 285 |
+
#
|
| 286 |
+
# 5) We end up with a compilation mode that takes a real submodule and fake tensors,
|
| 287 |
+
# to match what aot_autograd expects. See Note: [Fake Modules and AOTAutograd]
|
| 288 |
+
def run_node(self, n: Node) -> Any:
|
| 289 |
+
args, kwargs = self.fetch_args_kwargs_from_env(n)
|
| 290 |
+
new_args = []
|
| 291 |
+
assert self.fake_mode
|
| 292 |
+
for arg in args:
|
| 293 |
+
if isinstance(arg, torch.Tensor) and not isinstance(
|
| 294 |
+
arg, torch._subclasses.FakeTensor
|
| 295 |
+
):
|
| 296 |
+
new_args.append(torch._dynamo.utils.to_fake_tensor(arg, self.fake_mode))
|
| 297 |
+
else:
|
| 298 |
+
new_args.append(arg)
|
| 299 |
+
|
| 300 |
+
log.debug("run_node %s, %s got args %s", n.op, n.target, args_str(args))
|
| 301 |
+
assert isinstance(args, tuple)
|
| 302 |
+
assert isinstance(kwargs, dict)
|
| 303 |
+
|
| 304 |
+
if n.op == "call_module":
|
| 305 |
+
real_mod = self.fetch_attr(n.target)
|
| 306 |
+
if self.fake_mode:
|
| 307 |
+
curr_submod = deepcopy_to_fake_tensor(real_mod, self.fake_mode)
|
| 308 |
+
else:
|
| 309 |
+
curr_submod = real_mod
|
| 310 |
+
|
| 311 |
+
ddp_graph_log.debug("\n---%s graph---\n%s", n.target, curr_submod.graph)
|
| 312 |
+
|
| 313 |
+
# When calling the compiler on the submod, inputs (new_args) are expected to
|
| 314 |
+
# be FakeTensors already since Dynamo would have made them FakeTensors in the
|
| 315 |
+
# non-DDP flow. However, the parameters are _not_ expected to be FakeTensors,
|
| 316 |
+
# since this wrapping happens during compilation
|
| 317 |
+
|
| 318 |
+
# Note: Returning Fake Tensors on First AOT Autograd Call
|
| 319 |
+
#
|
| 320 |
+
# Inductor will optimize strides of outputs when it deems it profitable.
|
| 321 |
+
# For instance, converting to channels last. When we split the graph here
|
| 322 |
+
# into multiple inductor compilations, we need to make sure that the
|
| 323 |
+
# output strides of one compilation is appropriately passed to the subsequent
|
| 324 |
+
# compilations. However, the mapping from inductor output to dynamo output
|
| 325 |
+
# is non-trivial due to aot_autograd's deduping, de-aliasing, mutation, re-writing,
|
| 326 |
+
# subclass handling, etc. In order to replay all this logic we set a flag such that
|
| 327 |
+
# the first invocation of inductor in aot_autograd will return Fake Tensors with
|
| 328 |
+
# appropriate strides. Then, all of aot autograd's runtime logic is replayed.
|
| 329 |
+
# This gives us the appropriately strided outputs here which will reflect runtime strides.
|
| 330 |
+
|
| 331 |
+
class FakeifyFirstAOTInvocationGuard:
|
| 332 |
+
def __init__(self):
|
| 333 |
+
self.tc = torch._guards.TracingContext.try_get()
|
| 334 |
+
assert self.tc
|
| 335 |
+
torch._guards.TracingContext.try_get().fakify_first_call = True
|
| 336 |
+
|
| 337 |
+
def __del__(self):
|
| 338 |
+
self.tc.fakify_first_call = False
|
| 339 |
+
|
| 340 |
+
# For aot_eager and other backends, tracing context is not set
|
| 341 |
+
has_tracing_context = torch._guards.TracingContext.try_get() is not None
|
| 342 |
+
if has_tracing_context:
|
| 343 |
+
g = FakeifyFirstAOTInvocationGuard()
|
| 344 |
+
|
| 345 |
+
from torch._dynamo.utils import counters
|
| 346 |
+
|
| 347 |
+
init = counters["aot_autograd"]["total"]
|
| 348 |
+
compiled_submod_real = self.compile_submod(real_mod, new_args, kwargs)
|
| 349 |
+
|
| 350 |
+
# TODO - better way of doing this?
|
| 351 |
+
# Only aot autograd handles fakifying first call
|
| 352 |
+
invoked_aot_autograd = init != counters["aot_autograd"]["total"]
|
| 353 |
+
|
| 354 |
+
# We update the original (outer) graph with a call into the compiled module
|
| 355 |
+
# instead of the uncompiled one.
|
| 356 |
+
self.module.delete_submodule(n.target)
|
| 357 |
+
n.target = "compiled_" + n.target
|
| 358 |
+
self.module.add_submodule(n.target, compiled_submod_real)
|
| 359 |
+
|
| 360 |
+
# Finally, we have to produce inputs for use compiling the next submodule,
|
| 361 |
+
# and these need to be FakeTensors, so we execute the module under fake_mode
|
| 362 |
+
# Because parameters are not fake we patch fake tensor mode to allow non fake inputs
|
| 363 |
+
with self.fake_mode, mock.patch.object(
|
| 364 |
+
self.fake_mode, "allow_non_fake_inputs", True
|
| 365 |
+
):
|
| 366 |
+
if has_tracing_context and invoked_aot_autograd:
|
| 367 |
+
out = compiled_submod_real(*new_args, **kwargs)
|
| 368 |
+
# output should be fake or subclass
|
| 369 |
+
assert all(
|
| 370 |
+
(not isinstance(t, torch.Tensor) or type(t) is not torch.Tensor)
|
| 371 |
+
for t in (out if isinstance(out, (list, tuple)) else [out])
|
| 372 |
+
)
|
| 373 |
+
return out
|
| 374 |
+
else:
|
| 375 |
+
return curr_submod(*new_args, **kwargs)
|
| 376 |
+
else:
|
| 377 |
+
# placeholder or output nodes don't need to get compiled, just executed
|
| 378 |
+
return getattr(self, n.op)(n.target, new_args, kwargs)
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
class DDPOptimizer:
|
| 382 |
+
|
| 383 |
+
"""Note [DDPOptimizer]
|
| 384 |
+
DDPOptimizer applies when dynamo compiles models wrapped in DistributedDataParallel (DDP),
|
| 385 |
+
breaking the dynamo graph into chunks to compile separately, with the breaks aligning to
|
| 386 |
+
the boundaries of gradient-allreduce buckets chosen by DDP.
|
| 387 |
+
|
| 388 |
+
Background/Motivation
|
| 389 |
+
- DDP uses allreduce collectives to synchronize partial gradients computed on different workers
|
| 390 |
+
- DDP groups gradient allreduces into 'buckets' to optimize communication efficiency of all-reduce
|
| 391 |
+
- Parameters grouped into buckets are assumed to be adjacent in time, so they become ready
|
| 392 |
+
at around the same time during backward and thus can share the same allreduce efficiently
|
| 393 |
+
- Allreduces must overlap with backward compute for optimal training performance
|
| 394 |
+
- DDP schedules allreduces using 'hooks' fired from the c++ autograd engine in pytorch, which
|
| 395 |
+
operates when individual grads become 'ready'
|
| 396 |
+
- Dynamo+AOTAutograd produces a single fused graph that runs 'atomically' from the perspective of the
|
| 397 |
+
autograd engine, such that all gradients become 'ready' at the same time. Hooks fire after the whole
|
| 398 |
+
fused backward function executes, preventing any overlap of compute and communication
|
| 399 |
+
|
| 400 |
+
Algorithm
|
| 401 |
+
- DDPOptimizer starts off with an FX graph traced by dynamo which represents forward. It can traverse
|
| 402 |
+
this graph in reverse order to determine the true order that gradients will become ready during backward.
|
| 403 |
+
- Parameter sizes are counted in reverse order, up to a bucket size limit, at which point a new bucket is started
|
| 404 |
+
and a graph break introduced
|
| 405 |
+
- Each of the subgraphs is compiled by the compiler provided to dynamo by the user, and then fused back together
|
| 406 |
+
into an outer module that is returned to the user
|
| 407 |
+
|
| 408 |
+
Notes
|
| 409 |
+
- It would be better to enforce (by adding an API to DDP) that the bucket splits chosen here are used by DDP,
|
| 410 |
+
and that DDP does not need to detect or optimize bucket order by observing execution at runtime, as it does
|
| 411 |
+
in eager.
|
| 412 |
+
- If Dynamo can't capture a whole graph for the portion of the model wrapped by DDP, this algorithm will currently
|
| 413 |
+
produce splits that do not necessarily align with the buckets used by DDP. This should result in performance
|
| 414 |
+
degradation approaching the baseline case where graph-splits are not used, but not worse.
|
| 415 |
+
- If the backend compiler fails to compile a single subgraph, it will execute eagerly despite the rest of the
|
| 416 |
+
subgraphs being compiled
|
| 417 |
+
- DDP has a 'parameters_and_buffers_to_ignore' field, which DDPOptimizer attempts to honor by reading markers
|
| 418 |
+
left by DDP on individual parameters. In cases where other transformations, such as reparameterization, are
|
| 419 |
+
also used, the ignore markers could be lost. If DDPOptimizer fails to ignore a parameter ignored by DDP,
|
| 420 |
+
it is not catastrophic but could impact performance by choosing sub-optimal bucket splits.
|
| 421 |
+
- DDPOptimizer always ignores all buffers, regardless of their ignore flag, since buffers do not require gradients,
|
| 422 |
+
and therefore aren't allreduced by DDP. (They are broadcast during forward, but this is not covered by
|
| 423 |
+
DDPOptimizer)
|
| 424 |
+
|
| 425 |
+
Debugging
|
| 426 |
+
- Generally, it is easiest to debug DDPOptimizer in a single process program, using pdb.
|
| 427 |
+
- In many cases, the log messages are helpful (they show bucket size assignments)-
|
| 428 |
+
just set TORCH_LOGS env to include any of 'dynamo', 'distributed', or 'dist_ddp'.
|
| 429 |
+
- See `benchmarks/dynamo/distributed.py` for a simple harness that will run a toy model or a torchbench model
|
| 430 |
+
in a single process (or with torchrun, in multiple processes)
|
| 431 |
+
|
| 432 |
+
Args:
|
| 433 |
+
bucket_bytes_cap (int): Controls the size of buckets, in bytes, used to determine graphbreaks. Should be
|
| 434 |
+
set to match the equivalent parameter on the original DDP module.
|
| 435 |
+
|
| 436 |
+
backend_compile_fn (callable): A dynamo compiler function, to be invoked to compile each subgraph.
|
| 437 |
+
|
| 438 |
+
first_bucket_cap (int): Controls the size of the first bucket. Should match DDP's first bucket cap. DDP
|
| 439 |
+
special-cases the first bucket size since it is sometimes optimal to start a small allreduce early.
|
| 440 |
+
|
| 441 |
+
"""
|
| 442 |
+
|
| 443 |
+
def __init__(
|
| 444 |
+
self,
|
| 445 |
+
bucket_bytes_cap: int,
|
| 446 |
+
backend_compile_fn,
|
| 447 |
+
first_bucket_cap: Optional[int] = None,
|
| 448 |
+
):
|
| 449 |
+
if first_bucket_cap is not None:
|
| 450 |
+
self.first_bucket_cap = first_bucket_cap
|
| 451 |
+
elif torch.distributed.is_available():
|
| 452 |
+
# this constant comes from C10D lib which is not always built
|
| 453 |
+
self.first_bucket_cap = torch.distributed._DEFAULT_FIRST_BUCKET_BYTES
|
| 454 |
+
else:
|
| 455 |
+
self.first_bucket_cap = bucket_bytes_cap
|
| 456 |
+
|
| 457 |
+
self.bucket_bytes_cap = bucket_bytes_cap
|
| 458 |
+
assert (
|
| 459 |
+
self.first_bucket_cap <= self.bucket_bytes_cap
|
| 460 |
+
), "First bucket should be smaller/equal to other buckets to get comms warmed up ASAP"
|
| 461 |
+
|
| 462 |
+
self.backend_compile_fn = backend_compile_fn
|
| 463 |
+
|
| 464 |
+
def _ignore_parameter(self, parameter):
|
| 465 |
+
return hasattr(parameter, "_ddp_ignored") and parameter._ddp_ignored
|
| 466 |
+
|
| 467 |
+
def add_module_params_to_bucket(self, mod, bucket, processed_modules, prefix):
|
| 468 |
+
processed_modules.add(mod)
|
| 469 |
+
for name, param in mod.named_parameters():
|
| 470 |
+
if param.requires_grad and not self._ignore_parameter(param):
|
| 471 |
+
bucket.size += param.untyped_storage().nbytes()
|
| 472 |
+
bucket.params.append(f"{prefix}_{name}")
|
| 473 |
+
bucket.param_ids.append(id(param))
|
| 474 |
+
|
| 475 |
+
def compile_fn(self, gm: fx.GraphModule, example_inputs: List[torch.Tensor]):
|
| 476 |
+
"""
|
| 477 |
+
Implements graph splitting, first determining a set of of buckets by counting
|
| 478 |
+
parameter sizes in reverse graph order, then invoking the user/backend compiler
|
| 479 |
+
to compile each subgraph. Finally, stiches compiled graphs into one graphmodule
|
| 480 |
+
and returns its callable.
|
| 481 |
+
"""
|
| 482 |
+
if has_higher_order_op(gm):
|
| 483 |
+
# This indicates presence of a higher order op. For now, we
|
| 484 |
+
# have no way to break the higher order op into two buckets.
|
| 485 |
+
# Allowing higher order ops in the graph also requires
|
| 486 |
+
# changes in the split_module, becuase graph splitter
|
| 487 |
+
# currently assumes that all the args of all ops are
|
| 488 |
+
# tensors, but in the case of higher order ops, it could be
|
| 489 |
+
# a graph module. As a workaround, we are shortcircuiting
|
| 490 |
+
raise NotImplementedError(
|
| 491 |
+
"DDPOptimizer backend: Found a higher order op in the graph. "
|
| 492 |
+
"This is not supported. Please turn off DDP optimizer using "
|
| 493 |
+
"torch._dynamo.config.optimize_ddp=False. Note that this can "
|
| 494 |
+
"cause performance degradation because there will be one bucket "
|
| 495 |
+
"for the entire Dynamo graph. Please refer to this issue - "
|
| 496 |
+
"https://github.com/pytorch/pytorch/issues/104674."
|
| 497 |
+
)
|
| 498 |
+
|
| 499 |
+
# 1: compute the partition map according to DDP bucket logic
|
| 500 |
+
buckets = [Bucket()] # (size, param_names)
|
| 501 |
+
processed_modules = set()
|
| 502 |
+
for node in reversed(gm.graph.nodes):
|
| 503 |
+
if node.op in ("output", "placeholder"):
|
| 504 |
+
continue
|
| 505 |
+
|
| 506 |
+
if (
|
| 507 |
+
buckets[0].size >= self.bucket_bytes_cap
|
| 508 |
+
or len(buckets) == 1
|
| 509 |
+
and buckets[0].size >= self.first_bucket_cap
|
| 510 |
+
):
|
| 511 |
+
if bucket_has_external_output(buckets[0]):
|
| 512 |
+
buckets.insert(0, Bucket())
|
| 513 |
+
else:
|
| 514 |
+
# continue building this bucket past the point of filling its parameter capacity,
|
| 515 |
+
# to increase chances it contains at least one node that is either a global output or
|
| 516 |
+
# passed as input to a subsequent graph
|
| 517 |
+
|
| 518 |
+
if buckets[0].opcount_increased_to_capture_external_output == 0:
|
| 519 |
+
buckets[0].paramsize_before_opcount_increase = buckets[0].size
|
| 520 |
+
buckets[0].opcount_increased_to_capture_external_output += 1
|
| 521 |
+
if node.op == "call_module":
|
| 522 |
+
target_mod = gm.get_submodule(node.target)
|
| 523 |
+
if target_mod not in processed_modules:
|
| 524 |
+
self.add_module_params_to_bucket(
|
| 525 |
+
target_mod, buckets[0], processed_modules, node.target
|
| 526 |
+
)
|
| 527 |
+
elif node.op == "call_method":
|
| 528 |
+
if isinstance(node.args[0].target, str):
|
| 529 |
+
target_mod = None
|
| 530 |
+
try:
|
| 531 |
+
target_mod = gm.get_submodule(node.args[0].target)
|
| 532 |
+
except AttributeError:
|
| 533 |
+
pass
|
| 534 |
+
if target_mod is not None and target_mod not in processed_modules:
|
| 535 |
+
self.add_module_params_to_bucket(
|
| 536 |
+
target_mod, buckets[0], processed_modules, node.target
|
| 537 |
+
)
|
| 538 |
+
elif node.op == "get_attr":
|
| 539 |
+
maybe_param = getattr(gm, node.target)
|
| 540 |
+
if (
|
| 541 |
+
isinstance(maybe_param, torch.nn.Parameter)
|
| 542 |
+
and maybe_param.requires_grad
|
| 543 |
+
and not self._ignore_parameter(maybe_param)
|
| 544 |
+
):
|
| 545 |
+
buckets[0].size += maybe_param.untyped_storage().nbytes()
|
| 546 |
+
buckets[0].params.append(node.target)
|
| 547 |
+
buckets[0].param_ids.append(id(maybe_param))
|
| 548 |
+
|
| 549 |
+
# All nodes have to be mapped to a bucket, even if they don't have their own params
|
| 550 |
+
# Ignored params still end up in buckets, we just don't count them towards the capacity
|
| 551 |
+
buckets[0].nodes.append(node)
|
| 552 |
+
|
| 553 |
+
if len(buckets) > 1 and buckets[0].size == 0:
|
| 554 |
+
# we collected a small preamble graph with ops that don't include parameters, fuse it back
|
| 555 |
+
buckets[1].nodes.extend(buckets[0].nodes)
|
| 556 |
+
assert len(buckets[0].params) == 0, "Params should be empty if size is 0"
|
| 557 |
+
del buckets[0]
|
| 558 |
+
|
| 559 |
+
# stash buckets for testing/debugging purposes
|
| 560 |
+
self.buckets = buckets
|
| 561 |
+
pretty_print_buckets(buckets, self.bucket_bytes_cap)
|
| 562 |
+
|
| 563 |
+
if len(buckets) == 1:
|
| 564 |
+
# bypass split/fuse logic if there is only one bucket
|
| 565 |
+
return self.backend_compile_fn(gm, example_inputs)
|
| 566 |
+
|
| 567 |
+
# 2: partition the graphmodule according to bucket capacity
|
| 568 |
+
partition_map = {}
|
| 569 |
+
for idx, b in enumerate(buckets):
|
| 570 |
+
for node in b.nodes:
|
| 571 |
+
partition_map[node] = idx
|
| 572 |
+
|
| 573 |
+
split_gm = fx.passes.split_module.split_module(
|
| 574 |
+
gm, None, lambda node: partition_map[node]
|
| 575 |
+
)
|
| 576 |
+
|
| 577 |
+
debug_str = (
|
| 578 |
+
f"\n---orig graph---\n{gm.graph}\n"
|
| 579 |
+
+ f"\n---split graph---\n{split_gm.graph}\n"
|
| 580 |
+
)
|
| 581 |
+
for name, module in split_gm.named_modules():
|
| 582 |
+
if "." not in name and len(name):
|
| 583 |
+
# only print the submod graphs, not their children
|
| 584 |
+
debug_str += f"\n---{name} graph---\n{module.graph}\n"
|
| 585 |
+
debug_str += "\n---------------\n"
|
| 586 |
+
ddp_graph_log.debug(debug_str)
|
| 587 |
+
|
| 588 |
+
trace_structured(
|
| 589 |
+
"optimize_ddp_split_graph",
|
| 590 |
+
payload_fn=lambda: split_gm.print_readable(print_output=False),
|
| 591 |
+
)
|
| 592 |
+
for name, module in split_gm.named_modules():
|
| 593 |
+
if "." not in name and len(name):
|
| 594 |
+
trace_structured(
|
| 595 |
+
"optimize_ddp_split_child",
|
| 596 |
+
lambda: {"name": name},
|
| 597 |
+
payload_fn=lambda: module.print_readable(print_output=False),
|
| 598 |
+
)
|
| 599 |
+
|
| 600 |
+
# NOTE, we want to enable `optimize_ddp_lazy_compile` by default as soon as possible,
|
| 601 |
+
# becuase it will fix stride mismatch errors (see motivation: https://github.com/pytorch/pytorch/pull/114154).
|
| 602 |
+
# However, lazy compile currently causes shape mismatch in other cases (`test_graph_split_inductor_transpose`)
|
| 603 |
+
# and we need to fix them before we can enable it by default.
|
| 604 |
+
if not torch._dynamo.config.optimize_ddp_lazy_compile:
|
| 605 |
+
# Today, optimize_ddp=True and keep_output_stride=False can lead to silent
|
| 606 |
+
# correctness issues. The problem is that ddp_optimizer works by partitioning
|
| 607 |
+
# the dynamo graph, sending each subgraph through aot autograd to inductor,
|
| 608 |
+
# and creates example inputs by eagerly interpreting each subgraph to get
|
| 609 |
+
# an output that with the same metadata that we'd get from eager mode.
|
| 610 |
+
# This is a problem though, for torch._inductor.config.keep_output_stride.
|
| 611 |
+
# The above config can cause the outputs of the first graph to have
|
| 612 |
+
# **different** strides from eager, causing the inputs that we pass
|
| 613 |
+
# to the second graph to be wrong.
|
| 614 |
+
# To really fix this, we would need to faithfully ask inductor
|
| 615 |
+
# what the outputs to each graph it expects are.
|
| 616 |
+
fake_mode = detect_fake_mode(example_inputs)
|
| 617 |
+
if fake_mode is None:
|
| 618 |
+
fake_mode = torch._subclasses.fake_tensor.FakeTensorMode()
|
| 619 |
+
|
| 620 |
+
if torch._dynamo.config.optimize_ddp_lazy_compile:
|
| 621 |
+
submod_compiler = SubmoduleReplacer(split_gm, self.backend_compile_fn)
|
| 622 |
+
else:
|
| 623 |
+
submod_compiler = SubmodCompiler(
|
| 624 |
+
split_gm, self.backend_compile_fn, fake_mode
|
| 625 |
+
)
|
| 626 |
+
submod_compiler.run(*example_inputs)
|
| 627 |
+
split_gm.recompile()
|
| 628 |
+
|
| 629 |
+
ddp_graph_log.debug(
|
| 630 |
+
"\n---final graph---\n%s\n---------------\n", split_gm.graph
|
| 631 |
+
)
|
| 632 |
+
return split_gm
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/inductor.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
import sys
|
| 4 |
+
|
| 5 |
+
from torch._dynamo import register_backend
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@register_backend
|
| 9 |
+
def inductor(*args, **kwargs):
|
| 10 |
+
if sys.platform == "win32":
|
| 11 |
+
raise RuntimeError("Windows not yet supported for inductor")
|
| 12 |
+
|
| 13 |
+
# do import here to avoid loading inductor into memory when it is not used
|
| 14 |
+
from torch._inductor.compile_fx import compile_fx
|
| 15 |
+
|
| 16 |
+
return compile_fx(*args, **kwargs)
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/onnxrt.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
# This backend is maintained by ONNX team. To direct issues
|
| 4 |
+
# to the right people, please tag related GitHub issues with `module: onnx`.
|
| 5 |
+
#
|
| 6 |
+
# Maintainers' Github IDs: wschin, thiagocrepaldi, BowenBao
|
| 7 |
+
from torch.onnx._internal.onnxruntime import (
|
| 8 |
+
is_onnxrt_backend_supported,
|
| 9 |
+
torch_compile_backend,
|
| 10 |
+
)
|
| 11 |
+
from .registry import register_backend
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def has_onnxruntime():
|
| 15 |
+
# FIXME: update test/dynamo/test_backends.py to call is_onnxrt_backend_supported()
|
| 16 |
+
return is_onnxrt_backend_supported()
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
if is_onnxrt_backend_supported():
|
| 20 |
+
register_backend(name="onnxrt", compiler_fn=torch_compile_backend)
|
| 21 |
+
else:
|
| 22 |
+
|
| 23 |
+
def information_displaying_backend(*args, **kwargs):
|
| 24 |
+
raise ImportError(
|
| 25 |
+
"onnxrt is not registered as a backend. "
|
| 26 |
+
"Please make sure all dependencies such as "
|
| 27 |
+
"numpy, onnx, onnxscript, and onnxruntime-training are installed. "
|
| 28 |
+
"Suggested procedure to fix dependency problem:\n"
|
| 29 |
+
" (1) pip or conda install numpy onnx onnxscript onnxruntime-training.\n"
|
| 30 |
+
" (2) Open a new python terminal.\n"
|
| 31 |
+
" (3) Call the API `torch.onnx.is_onnxrt_backend_supported()`:\n"
|
| 32 |
+
" (4) If it returns `True`, then you can use `onnxrt` backend.\n"
|
| 33 |
+
" (5) If it returns `False`, please execute the package importing section in "
|
| 34 |
+
"torch/onnx/_internal/onnxruntime.py under pdb line-by-line to see which import fails."
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
register_backend(name="onnxrt", compiler_fn=information_displaying_backend)
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/registry.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
import functools
|
| 4 |
+
import sys
|
| 5 |
+
from typing import Callable, Dict, List, Optional, Protocol, Sequence, Tuple
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from torch import fx
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class CompiledFn(Protocol):
|
| 12 |
+
def __call__(self, *args: torch.Tensor) -> Tuple[torch.Tensor, ...]:
|
| 13 |
+
...
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
CompilerFn = Callable[[fx.GraphModule, List[torch.Tensor]], CompiledFn]
|
| 17 |
+
|
| 18 |
+
_BACKENDS: Dict[str, CompilerFn] = dict()
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def register_backend(
|
| 22 |
+
compiler_fn: Optional[CompilerFn] = None,
|
| 23 |
+
name: Optional[str] = None,
|
| 24 |
+
tags: Sequence[str] = (),
|
| 25 |
+
):
|
| 26 |
+
"""
|
| 27 |
+
Decorator to add a given compiler to the registry to allow calling
|
| 28 |
+
`torch.compile` with string shorthand. Note: for projects not
|
| 29 |
+
imported by default, it might be easier to pass a function directly
|
| 30 |
+
as a backend and not use a string.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
compiler_fn: Callable taking a FX graph and fake tensor inputs
|
| 34 |
+
name: Optional name, defaults to `compiler_fn.__name__`
|
| 35 |
+
tags: Optional set of string tags to categorize backend with
|
| 36 |
+
"""
|
| 37 |
+
if compiler_fn is None:
|
| 38 |
+
# @register_backend(name="") syntax
|
| 39 |
+
return functools.partial(register_backend, name=name, tags=tags)
|
| 40 |
+
assert callable(compiler_fn)
|
| 41 |
+
name = name or compiler_fn.__name__
|
| 42 |
+
assert name not in _BACKENDS, f"duplicate name: {name}"
|
| 43 |
+
_BACKENDS[name] = compiler_fn
|
| 44 |
+
compiler_fn._tags = tuple(tags)
|
| 45 |
+
return compiler_fn
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
register_debug_backend = functools.partial(register_backend, tags=("debug",))
|
| 49 |
+
register_experimental_backend = functools.partial(
|
| 50 |
+
register_backend, tags=("experimental",)
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def lookup_backend(compiler_fn):
|
| 55 |
+
"""Expand backend strings to functions"""
|
| 56 |
+
if isinstance(compiler_fn, str):
|
| 57 |
+
if compiler_fn not in _BACKENDS:
|
| 58 |
+
_lazy_import()
|
| 59 |
+
if compiler_fn not in _BACKENDS:
|
| 60 |
+
_lazy_import_entry_point(compiler_fn)
|
| 61 |
+
if compiler_fn not in _BACKENDS:
|
| 62 |
+
from ..exc import InvalidBackend
|
| 63 |
+
|
| 64 |
+
raise InvalidBackend(name=compiler_fn)
|
| 65 |
+
compiler_fn = _BACKENDS[compiler_fn]
|
| 66 |
+
return compiler_fn
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def list_backends(exclude_tags=("debug", "experimental")) -> List[str]:
|
| 70 |
+
"""
|
| 71 |
+
Return valid strings that can be passed to:
|
| 72 |
+
|
| 73 |
+
torch.compile(..., backend="name")
|
| 74 |
+
"""
|
| 75 |
+
_lazy_import()
|
| 76 |
+
exclude_tags = set(exclude_tags or ())
|
| 77 |
+
return sorted(
|
| 78 |
+
[
|
| 79 |
+
name
|
| 80 |
+
for name, backend in _BACKENDS.items()
|
| 81 |
+
if not exclude_tags.intersection(backend._tags)
|
| 82 |
+
]
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
@functools.lru_cache(None)
|
| 87 |
+
def _lazy_import():
|
| 88 |
+
from .. import backends
|
| 89 |
+
from ..utils import import_submodule
|
| 90 |
+
|
| 91 |
+
import_submodule(backends)
|
| 92 |
+
|
| 93 |
+
from ..repro.after_dynamo import dynamo_minifier_backend
|
| 94 |
+
|
| 95 |
+
assert dynamo_minifier_backend is not None
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
@functools.lru_cache(None)
|
| 99 |
+
def _lazy_import_entry_point(backend_name: str):
|
| 100 |
+
from importlib.metadata import entry_points
|
| 101 |
+
|
| 102 |
+
compiler_fn = None
|
| 103 |
+
group_name = "torch_dynamo_backends"
|
| 104 |
+
if sys.version_info < (3, 10):
|
| 105 |
+
backend_eps = entry_points()
|
| 106 |
+
eps = [ep for ep in backend_eps.get(group_name, ()) if ep.name == backend_name]
|
| 107 |
+
if len(eps) > 0:
|
| 108 |
+
compiler_fn = eps[0].load()
|
| 109 |
+
else:
|
| 110 |
+
backend_eps = entry_points(group=group_name)
|
| 111 |
+
if backend_name in backend_eps.names:
|
| 112 |
+
compiler_fn = backend_eps[backend_name].load()
|
| 113 |
+
|
| 114 |
+
if compiler_fn is not None and backend_name not in list_backends(tuple()):
|
| 115 |
+
register_backend(compiler_fn=compiler_fn, name=backend_name)
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/tensorrt.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
# import torch # type: ignore[import]
|
| 4 |
+
# from .common import device_from_inputs, fake_tensor_unsupported # type: ignore[import]
|
| 5 |
+
# from .registry import register_backend # type: ignore[import]
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
Placeholder for TensorRT backend for dynamo via torch-tensorrt
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
# @register_backend
|
| 12 |
+
# def tensorrt(gm, example_inputs):
|
| 13 |
+
# import torch_tensorrt # type: ignore[import]
|
| 14 |
+
# pass
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/torchxla.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
|
| 5 |
+
from functorch.compile import make_boxed_func
|
| 6 |
+
|
| 7 |
+
from ..backends.common import aot_autograd
|
| 8 |
+
from .registry import register_backend, register_experimental_backend
|
| 9 |
+
|
| 10 |
+
log = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@register_experimental_backend
|
| 14 |
+
def openxla_eval(model, fake_tensor_inputs):
|
| 15 |
+
return xla_backend_helper(model, fake_tensor_inputs, boxed=False)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def openxla_eval_boxed(model, fake_tensor_inputs):
|
| 19 |
+
return xla_backend_helper(model, fake_tensor_inputs, boxed=True)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def xla_backend_helper(model, fake_tensor_inputs, boxed=False):
|
| 23 |
+
try:
|
| 24 |
+
import torch_xla.core.dynamo_bridge as bridge
|
| 25 |
+
except ImportError as e:
|
| 26 |
+
raise ImportError(
|
| 27 |
+
"Please follow the instruction in https://github.com/pytorch/xla#pytorchxla to install torch_xla"
|
| 28 |
+
) from e
|
| 29 |
+
|
| 30 |
+
compiled_graph = None
|
| 31 |
+
|
| 32 |
+
def fwd(*args):
|
| 33 |
+
nonlocal model
|
| 34 |
+
nonlocal compiled_graph
|
| 35 |
+
if compiled_graph is None:
|
| 36 |
+
compiled_graph = bridge.extract_compiled_graph(model, args)
|
| 37 |
+
del model
|
| 38 |
+
return compiled_graph(*args)
|
| 39 |
+
|
| 40 |
+
return make_boxed_func(fwd) if boxed else fwd
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
openxla = aot_autograd(
|
| 44 |
+
fw_compiler=openxla_eval_boxed,
|
| 45 |
+
)
|
| 46 |
+
register_backend(name="openxla", compiler_fn=openxla)
|
valley/lib/python3.10/site-packages/torch/_dynamo/backends/tvm.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
import functools
|
| 4 |
+
import importlib
|
| 5 |
+
import logging
|
| 6 |
+
import os
|
| 7 |
+
import sys
|
| 8 |
+
import tempfile
|
| 9 |
+
from types import MappingProxyType
|
| 10 |
+
from typing import Optional
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
from .common import device_from_inputs, fake_tensor_unsupported
|
| 14 |
+
|
| 15 |
+
from .registry import register_backend
|
| 16 |
+
|
| 17 |
+
log = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@register_backend
|
| 21 |
+
@fake_tensor_unsupported
|
| 22 |
+
def tvm(
|
| 23 |
+
gm,
|
| 24 |
+
example_inputs,
|
| 25 |
+
*,
|
| 26 |
+
options: Optional[MappingProxyType] = MappingProxyType(
|
| 27 |
+
{"scheduler": None, "trials": 20000, "opt_level": 3}
|
| 28 |
+
),
|
| 29 |
+
):
|
| 30 |
+
import tvm # type: ignore[import]
|
| 31 |
+
from tvm import relay # type: ignore[import]
|
| 32 |
+
from tvm.contrib import graph_executor # type: ignore[import]
|
| 33 |
+
|
| 34 |
+
jit_mod = torch.jit.trace(gm, example_inputs)
|
| 35 |
+
device = device_from_inputs(example_inputs)
|
| 36 |
+
shape_list = [(f"inp_{idx}", i.shape) for idx, i in enumerate(example_inputs)]
|
| 37 |
+
example_outputs = gm(*example_inputs)
|
| 38 |
+
if len(example_outputs) == 0:
|
| 39 |
+
log.warning("Explicitly fall back to eager due to zero output")
|
| 40 |
+
return gm.forward
|
| 41 |
+
mod, params = relay.frontend.from_pytorch(jit_mod, shape_list)
|
| 42 |
+
if device.type == "cuda":
|
| 43 |
+
dev = tvm.cuda(device.index)
|
| 44 |
+
target = tvm.target.cuda()
|
| 45 |
+
else:
|
| 46 |
+
dev = tvm.cpu(0)
|
| 47 |
+
target = tvm.target.Target(llvm_target())
|
| 48 |
+
|
| 49 |
+
scheduler = options.get("scheduler", None)
|
| 50 |
+
if scheduler is None:
|
| 51 |
+
scheduler = os.environ.get("TVM_SCHEDULER", None)
|
| 52 |
+
|
| 53 |
+
trials = options.get("trials", 20000)
|
| 54 |
+
opt_level = options.get("opt_level", 3)
|
| 55 |
+
|
| 56 |
+
if scheduler == "auto_scheduler":
|
| 57 |
+
from tvm import auto_scheduler
|
| 58 |
+
|
| 59 |
+
log_file = tempfile.NamedTemporaryFile()
|
| 60 |
+
|
| 61 |
+
if not os.path.exists(log_file):
|
| 62 |
+
tasks, task_weights = auto_scheduler.extract_tasks(
|
| 63 |
+
mod["main"], params, target
|
| 64 |
+
)
|
| 65 |
+
for task in tasks:
|
| 66 |
+
print(task.compute_dag)
|
| 67 |
+
else:
|
| 68 |
+
print("No tasks")
|
| 69 |
+
if len(tasks) != 0:
|
| 70 |
+
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
|
| 71 |
+
if not os.path.exists(log_file):
|
| 72 |
+
assert trials > 0
|
| 73 |
+
tune_option = auto_scheduler.TuningOptions(
|
| 74 |
+
num_measure_trials=trials,
|
| 75 |
+
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
|
| 76 |
+
early_stopping=2000,
|
| 77 |
+
)
|
| 78 |
+
try:
|
| 79 |
+
tuner.tune(tune_option)
|
| 80 |
+
except Exception:
|
| 81 |
+
if os.path.exists(log_file):
|
| 82 |
+
os.unlink(log_file)
|
| 83 |
+
raise
|
| 84 |
+
|
| 85 |
+
with auto_scheduler.ApplyHistoryBest(log_file):
|
| 86 |
+
with tvm.transform.PassContext(
|
| 87 |
+
opt_level=opt_level, config={"relay.backend.use_auto_scheduler": True}
|
| 88 |
+
):
|
| 89 |
+
lib = relay.build(mod, target=target, params=params)
|
| 90 |
+
elif scheduler == "meta_schedule":
|
| 91 |
+
from tvm import meta_schedule as ms
|
| 92 |
+
|
| 93 |
+
with tempfile.TemporaryDirectory() as work_dir:
|
| 94 |
+
if device.type != "cuda":
|
| 95 |
+
# meta_schedule needs num-cores to be specified
|
| 96 |
+
# here we use the maximum core count
|
| 97 |
+
target = tvm.target.Target(
|
| 98 |
+
f"{llvm_target()} --num-cores {ms.utils.cpu_count(logical=False)}"
|
| 99 |
+
)
|
| 100 |
+
# TODO(shingjan): This could be replaced by tvm.contrib.torch.optimize_torch
|
| 101 |
+
# once USE_PT_TVMDSOOP is updated and turned on by default in TVM.
|
| 102 |
+
assert trials > 0
|
| 103 |
+
database = ms.relay_integration.tune_relay(
|
| 104 |
+
mod=mod,
|
| 105 |
+
target=target,
|
| 106 |
+
work_dir=work_dir,
|
| 107 |
+
max_trials_global=trials,
|
| 108 |
+
num_trials_per_iter=64,
|
| 109 |
+
params=params,
|
| 110 |
+
strategy="evolutionary",
|
| 111 |
+
opt_level=opt_level,
|
| 112 |
+
)
|
| 113 |
+
lib = ms.relay_integration.compile_relay(
|
| 114 |
+
database=database,
|
| 115 |
+
mod=mod,
|
| 116 |
+
target=target,
|
| 117 |
+
params=params,
|
| 118 |
+
opt_level=opt_level,
|
| 119 |
+
)
|
| 120 |
+
elif scheduler == "default" or not scheduler:
|
| 121 |
+
# no autotuning
|
| 122 |
+
with tvm.transform.PassContext(opt_level=opt_level):
|
| 123 |
+
lib = relay.build(mod, target=target, params=params)
|
| 124 |
+
else:
|
| 125 |
+
raise NotImplementedError(
|
| 126 |
+
"This tuning option is invalid/not implemented for torchdynamo's TVM-related backend. "
|
| 127 |
+
"There are three available options: default, auto_scheduler and meta_schedule."
|
| 128 |
+
)
|
| 129 |
+
m = graph_executor.GraphModule(lib["default"](dev))
|
| 130 |
+
|
| 131 |
+
def to_torch_tensor(nd_tensor):
|
| 132 |
+
"""A helper function to transfer a NDArray to torch.tensor."""
|
| 133 |
+
if nd_tensor.dtype == "bool":
|
| 134 |
+
# DLPack does not support boolean so it can't be handled by
|
| 135 |
+
# torch.utils.dlpack.from_pack. Workaround by going through
|
| 136 |
+
# numpy, although this brings additional data copy overhead.
|
| 137 |
+
return torch.from_numpy(nd_tensor.numpy())
|
| 138 |
+
return torch.utils.dlpack.from_dlpack(nd_tensor.to_dlpack())
|
| 139 |
+
|
| 140 |
+
def to_tvm_tensor(torch_tensor):
|
| 141 |
+
"""A helper function to transfer a torch.tensor to NDArray."""
|
| 142 |
+
if torch_tensor.dtype == torch.bool:
|
| 143 |
+
# same reason as above, fallback to numpy conversion which
|
| 144 |
+
# could introduce data copy overhead
|
| 145 |
+
return tvm.nd.array(torch_tensor.cpu().numpy())
|
| 146 |
+
return tvm.nd.from_dlpack(torch_tensor)
|
| 147 |
+
|
| 148 |
+
def exec_tvm(*i_args):
|
| 149 |
+
args = [a.contiguous() for a in i_args]
|
| 150 |
+
shape_info, _ = m.get_input_info()
|
| 151 |
+
active_inputs = {name for name, _ in shape_info.items()}
|
| 152 |
+
for idx, arg in enumerate(args, 0):
|
| 153 |
+
if arg.dim() != 0:
|
| 154 |
+
if arg.requires_grad:
|
| 155 |
+
arg = arg.detach()
|
| 156 |
+
inp_name = f"inp_{idx}"
|
| 157 |
+
if inp_name not in active_inputs:
|
| 158 |
+
log.warning(
|
| 159 |
+
"input %s skipped as not found in tvm's runtime library",
|
| 160 |
+
inp_name,
|
| 161 |
+
)
|
| 162 |
+
continue
|
| 163 |
+
m.set_input(
|
| 164 |
+
inp_name,
|
| 165 |
+
to_tvm_tensor(arg),
|
| 166 |
+
)
|
| 167 |
+
m.run()
|
| 168 |
+
return [to_torch_tensor(m.get_output(i)) for i in range(m.get_num_outputs())]
|
| 169 |
+
|
| 170 |
+
return exec_tvm
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
tvm_meta_schedule = functools.partial(tvm, scheduler="meta_schedule")
|
| 174 |
+
tvm_auto_scheduler = functools.partial(tvm, scheduler="auto_scheduler")
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def has_tvm():
|
| 178 |
+
try:
|
| 179 |
+
importlib.import_module("tvm")
|
| 180 |
+
return True
|
| 181 |
+
except ImportError:
|
| 182 |
+
return False
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
@functools.lru_cache(None)
|
| 186 |
+
def llvm_target():
|
| 187 |
+
if sys.platform == "linux":
|
| 188 |
+
cpuinfo = open("/proc/cpuinfo").read()
|
| 189 |
+
if "avx512" in cpuinfo:
|
| 190 |
+
return "llvm -mcpu=skylake-avx512"
|
| 191 |
+
elif "avx2" in cpuinfo:
|
| 192 |
+
return "llvm -mcpu=core-avx2"
|
| 193 |
+
return "llvm"
|
valley/lib/python3.10/site-packages/torch/_dynamo/cache_size.py
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import logging
|
| 3 |
+
import types
|
| 4 |
+
import weakref
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
from typing import Tuple
|
| 7 |
+
|
| 8 |
+
from . import config
|
| 9 |
+
|
| 10 |
+
log = logging.getLogger(__name__)
|
| 11 |
+
"""
|
| 12 |
+
[Note on cache size limit]
|
| 13 |
+
|
| 14 |
+
Background - TorchDynamo cache is a linked list. Each cache entry is a
|
| 15 |
+
(check_fn, out_code, next pointer). These are stored on the f_code's co_extra
|
| 16 |
+
scratch space. When a frame is invoked, we walk this linked list and run
|
| 17 |
+
check_fn in each cache_entry to decide if the frame needs recompilation. If none
|
| 18 |
+
of the check_fn's returns True, we recompile and add a new entry. To ensure we
|
| 19 |
+
don't end up recompiling infinitely, we put limits on the cache size.
|
| 20 |
+
|
| 21 |
+
There are two limits
|
| 22 |
+
1) cache_size_limit
|
| 23 |
+
2) accumulated_cache_size_limit
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
Earlier we used to have only limit - maximum number of entries in 1 cache line
|
| 27 |
+
(which is now represented by (2) above). So, why do we need two limits? Lets try
|
| 28 |
+
to understand that.
|
| 29 |
+
|
| 30 |
+
In general, we want our cache limit value to be a small number (e.g. 8 or even
|
| 31 |
+
lower). This ensures that for frames that cause too many recompilation fall to
|
| 32 |
+
eager quickly. However, there is another problem that prevents us from lowering
|
| 33 |
+
the value of cache_size_limit. This is due to ID_MATCH'd guards. Today, we put
|
| 34 |
+
ID_MATCH guards on nn module if there is a graph break. This means we will have
|
| 35 |
+
many recompilations for the same code object because the ID_MATCH guard fails
|
| 36 |
+
for different instances of the nn module. This is a common pattern in how models
|
| 37 |
+
are authored. Therefore, this requires us to keep the cache_size_limit high.
|
| 38 |
+
|
| 39 |
+
We resolve this by introducing these two limits. The first limit (1) limits the
|
| 40 |
+
number of cache entries that have an ID_MATCH'd guard for an nn module instance.
|
| 41 |
+
And, (2)nd limit becomes a safeguard mechanism to have a maximum compilations
|
| 42 |
+
for a code object. One important question is - what is the limit for the code
|
| 43 |
+
object that does not have any ID_MATCH guard? For such code objects, we choose
|
| 44 |
+
(1) as the cache size limit.
|
| 45 |
+
|
| 46 |
+
Lets take an example to understand how these limits help. Suppose, we have 16
|
| 47 |
+
instances of a nn module and we ID_MATCH on the self object. Further, suppose
|
| 48 |
+
the inputs to these functions have varying batch size, leading to one
|
| 49 |
+
recompilation. In total, there will be 32 recompilations, and therefore 32 cache
|
| 50 |
+
entries on the forward code object. In the older case when we had only 1 limit,
|
| 51 |
+
our cache size limit must be >= 32 to capture all these recompilations. Now,
|
| 52 |
+
suppose there is a separate function in the same program which is very dynamic
|
| 53 |
+
and unsuitable for compilation. Such a function will need to undergo 32
|
| 54 |
+
compilations to burst the cache and fallback to eager. These 32 recompilations
|
| 55 |
+
are too many and we want to fallback for these compilation-unfriendly functions
|
| 56 |
+
sooner.
|
| 57 |
+
|
| 58 |
+
In the new scenario, we can have (1) cache_size_limit = 2, (2)
|
| 59 |
+
accumulated_cache_size_limit = 32. This means that each ID_MATCH'd object can
|
| 60 |
+
have maximum of two cache entries, and the maximum number of cache entries
|
| 61 |
+
(irrespective of ID_MATCH obj) is 32. This covers the case of forward code
|
| 62 |
+
object which has 32 recompilations. For the other function, the one unsuitable
|
| 63 |
+
for recompilation, our limit is 2. So, we will burst the cache in just 2
|
| 64 |
+
recompilations. In this manner, these 2 limits help us resolve the tension
|
| 65 |
+
mentioned earlier.
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
@dataclass
|
| 70 |
+
class CacheSizeRelevantForFrame:
|
| 71 |
+
"""
|
| 72 |
+
We track the number of cache entries that have same id_match objects as the
|
| 73 |
+
given frame.
|
| 74 |
+
|
| 75 |
+
TODO(janimesh) - Consider adding a map from tuple_of_match_ids to count -
|
| 76 |
+
https://github.com/pytorch/pytorch/pull/107496#discussion_r1304564682 - this
|
| 77 |
+
could be useful for debugging as well.
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
# Total number of CacheEntry objects in the Dynamo linked list
|
| 81 |
+
num_cache_entries: int = 0
|
| 82 |
+
|
| 83 |
+
# Number of CacheEntry objects having same ID_MATCH'd objects as given frame.
|
| 84 |
+
num_cache_entries_with_same_id_matched_objs: int = 0
|
| 85 |
+
|
| 86 |
+
def will_compilation_exceed(self, limit: int) -> bool:
|
| 87 |
+
# Checks if a compilation will exceed the given limit (thats why >=).
|
| 88 |
+
return (
|
| 89 |
+
self.will_compilation_exceed_accumulated_limit()
|
| 90 |
+
or self.will_compilation_exceed_specific_limit(limit)
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
def will_compilation_exceed_accumulated_limit(self) -> bool:
|
| 94 |
+
return self.num_cache_entries >= config.accumulated_cache_size_limit
|
| 95 |
+
|
| 96 |
+
def will_compilation_exceed_specific_limit(self, limit: int) -> bool:
|
| 97 |
+
return self.num_cache_entries_with_same_id_matched_objs >= limit
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def _get_weakref_from_f_locals(frame: types.FrameType, local_name: str):
|
| 101 |
+
obj = frame.f_locals.get(local_name, None)
|
| 102 |
+
weak_id = None
|
| 103 |
+
try:
|
| 104 |
+
weak_id = weakref.ref(obj)
|
| 105 |
+
except TypeError:
|
| 106 |
+
pass # cannot weakref bool object
|
| 107 |
+
return weak_id
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def _has_same_id_matched_objs(frame: types.FrameType, cache_entry) -> bool:
|
| 111 |
+
"""
|
| 112 |
+
Checks if the ID_MATCH'd objects saved on cache_entry are same as the ones
|
| 113 |
+
in frame.f_locals.
|
| 114 |
+
"""
|
| 115 |
+
if not cache_entry:
|
| 116 |
+
return False
|
| 117 |
+
|
| 118 |
+
for (
|
| 119 |
+
local_name,
|
| 120 |
+
weakref_from_cache_entry,
|
| 121 |
+
) in cache_entry.check_fn.id_matched_objs.items():
|
| 122 |
+
if weakref_from_cache_entry() is not None:
|
| 123 |
+
weakref_from_frame = _get_weakref_from_f_locals(frame, local_name)
|
| 124 |
+
if weakref_from_frame != weakref_from_cache_entry:
|
| 125 |
+
return False
|
| 126 |
+
|
| 127 |
+
# Also covers the case where no ID_MATCH objects are saved in frame.f_locals
|
| 128 |
+
return True
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def compute_cache_size(
|
| 132 |
+
frame: types.FrameType, cache_entry
|
| 133 |
+
) -> CacheSizeRelevantForFrame:
|
| 134 |
+
# Walk the linked list to calculate the cache size
|
| 135 |
+
num_cache_entries = 0
|
| 136 |
+
num_cache_entries_with_same_id_matched_objs = 0
|
| 137 |
+
|
| 138 |
+
while cache_entry:
|
| 139 |
+
num_cache_entries += 1
|
| 140 |
+
# Track the number of cache entries having same ID_MATCH'd objects as
|
| 141 |
+
# that of frame.f_locals. This will be used later to compare against the
|
| 142 |
+
# cache_size_limit.
|
| 143 |
+
if _has_same_id_matched_objs(frame, cache_entry):
|
| 144 |
+
num_cache_entries_with_same_id_matched_objs += 1
|
| 145 |
+
cache_entry = cache_entry.next
|
| 146 |
+
|
| 147 |
+
return CacheSizeRelevantForFrame(
|
| 148 |
+
num_cache_entries, num_cache_entries_with_same_id_matched_objs
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def is_recompilation(cache_size: CacheSizeRelevantForFrame) -> bool:
|
| 153 |
+
"""
|
| 154 |
+
If the frame (earlier parsed by compute_cache_size) has more than 1 cache
|
| 155 |
+
entry with same ID_MATCH'd objects, then its a recompilation.
|
| 156 |
+
"""
|
| 157 |
+
# Note that you can have multiple entries in the cache but still not a
|
| 158 |
+
# recompile, e.g., you can have 64 nn module instances, each one having an
|
| 159 |
+
# ID_MATCH guard, and each one having just 1 cache entry in the cache. In
|
| 160 |
+
# this case, we can have 64 entries in the cache, but no recompilation
|
| 161 |
+
# because there is only one entry for each id_matched_obj.
|
| 162 |
+
return cache_size.will_compilation_exceed(1)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def exceeds_cache_size_limit(cache_size: CacheSizeRelevantForFrame) -> Tuple[bool, str]:
|
| 166 |
+
"""
|
| 167 |
+
Checks if we are exceeding the cache size limit.
|
| 168 |
+
"""
|
| 169 |
+
if cache_size.will_compilation_exceed_accumulated_limit():
|
| 170 |
+
return True, "accumulated_cache_size_limit"
|
| 171 |
+
if cache_size.will_compilation_exceed_specific_limit(config.cache_size_limit):
|
| 172 |
+
return True, "cache_size_limit"
|
| 173 |
+
return False, ""
|
valley/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py
ADDED
|
@@ -0,0 +1,1634 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
# mypy: disable-error-code="method-assign"
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
Functions in this file are responsible for modifying the eval frame
|
| 6 |
+
handler at RUNTIME. Therefore, all functions in this file are hot.
|
| 7 |
+
Functions that only execute at compile time should be placed
|
| 8 |
+
in torch._dynamo.convert_frame.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from __future__ import annotations
|
| 12 |
+
|
| 13 |
+
import contextlib
|
| 14 |
+
import functools
|
| 15 |
+
import inspect
|
| 16 |
+
import logging
|
| 17 |
+
import os
|
| 18 |
+
import sys
|
| 19 |
+
import textwrap
|
| 20 |
+
import traceback
|
| 21 |
+
import types
|
| 22 |
+
import warnings
|
| 23 |
+
import weakref
|
| 24 |
+
from enum import Enum
|
| 25 |
+
from os.path import dirname, join
|
| 26 |
+
from typing import (
|
| 27 |
+
Any,
|
| 28 |
+
Callable,
|
| 29 |
+
Dict,
|
| 30 |
+
List,
|
| 31 |
+
NamedTuple,
|
| 32 |
+
Optional,
|
| 33 |
+
Set,
|
| 34 |
+
Tuple,
|
| 35 |
+
TYPE_CHECKING,
|
| 36 |
+
Union,
|
| 37 |
+
)
|
| 38 |
+
from unittest.mock import patch
|
| 39 |
+
|
| 40 |
+
import torch
|
| 41 |
+
import torch.fx
|
| 42 |
+
import torch.utils._pytree as pytree
|
| 43 |
+
import torch.utils.checkpoint
|
| 44 |
+
from torch import _guards
|
| 45 |
+
from torch._utils_internal import log_export_usage
|
| 46 |
+
from torch.export.dynamic_shapes import _process_dynamic_shapes
|
| 47 |
+
from torch.fx.experimental.proxy_tensor import make_fx, maybe_disable_fake_tensor_mode
|
| 48 |
+
from torch.fx.experimental.symbolic_shapes import (
|
| 49 |
+
ConstraintViolationError,
|
| 50 |
+
DimDynamic,
|
| 51 |
+
StatelessSymbolicContext,
|
| 52 |
+
)
|
| 53 |
+
from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo
|
| 54 |
+
|
| 55 |
+
from ..fx import GraphModule
|
| 56 |
+
from .backends.registry import CompilerFn, lookup_backend
|
| 57 |
+
|
| 58 |
+
from .hooks import Hooks
|
| 59 |
+
|
| 60 |
+
# see discussion at https://github.com/pytorch/pytorch/issues/120699
|
| 61 |
+
reset_code = torch._C._dynamo.eval_frame.reset_code # noqa: F401
|
| 62 |
+
set_eval_frame = torch._C._dynamo.eval_frame.set_eval_frame # noqa: F401
|
| 63 |
+
set_guard_error_hook = torch._C._dynamo.eval_frame.set_guard_error_hook # noqa: F401
|
| 64 |
+
skip_code = torch._C._dynamo.eval_frame.skip_code # noqa: F401
|
| 65 |
+
unsupported = torch._C._dynamo.eval_frame.unsupported # noqa: F401
|
| 66 |
+
|
| 67 |
+
from . import config, convert_frame, external_utils, trace_rules, utils
|
| 68 |
+
from .code_context import code_context
|
| 69 |
+
from .exc import CondOpArgsMismatchError, UserError, UserErrorType
|
| 70 |
+
from .mutation_guard import install_generation_tagging_init
|
| 71 |
+
from .utils import common_constant_types, compile_times
|
| 72 |
+
|
| 73 |
+
log = logging.getLogger(__name__)
|
| 74 |
+
|
| 75 |
+
from torch._dispatch.python import enable_python_dispatcher
|
| 76 |
+
|
| 77 |
+
always_optimize_code_objects = utils.ExactWeakKeyDictionary()
|
| 78 |
+
null_context = contextlib.nullcontext
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
import sympy
|
| 82 |
+
|
| 83 |
+
if TYPE_CHECKING:
|
| 84 |
+
from torch._subclasses import fake_tensor
|
| 85 |
+
from .types import CacheEntry, DynamoCallback
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
# See https://github.com/python/typing/pull/240
|
| 89 |
+
class Unset(Enum):
|
| 90 |
+
token = 0
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
cached_backends: Dict[int, CompilerFn] = {}
|
| 94 |
+
|
| 95 |
+
unset = Unset.token
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def _reset_guarded_backend_cache():
|
| 99 |
+
global cached_backends
|
| 100 |
+
for backend in cached_backends.values():
|
| 101 |
+
if hasattr(backend, "reset"):
|
| 102 |
+
backend.reset()
|
| 103 |
+
cached_backends.clear()
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
DONT_WRAP_FILES = {
|
| 107 |
+
# For tracing into fx modules
|
| 108 |
+
inspect.getsourcefile(GraphModule),
|
| 109 |
+
join(dirname(dirname(__file__)), "onnx/_internal/fx/dynamo_graph_extractor.py"),
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def _debug_get_cache_entry_list(
|
| 114 |
+
code: Union[types.CodeType, Callable[..., Any]]
|
| 115 |
+
) -> List[CacheEntry]:
|
| 116 |
+
"""
|
| 117 |
+
Given a code object or a callable object, retrieve the cache entries
|
| 118 |
+
stored in this code.
|
| 119 |
+
"""
|
| 120 |
+
if callable(code):
|
| 121 |
+
code = code.__code__
|
| 122 |
+
return torch._C._dynamo.eval_frame._debug_get_cache_entry_list(code)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class OptimizedModule(torch.nn.Module):
|
| 126 |
+
"""
|
| 127 |
+
Wraps the original nn.Module object and later patches its
|
| 128 |
+
forward method to optimized self.forward method.
|
| 129 |
+
"""
|
| 130 |
+
|
| 131 |
+
_torchdynamo_orig_callable: Callable[..., Any]
|
| 132 |
+
get_compiler_config: Callable[[], Any]
|
| 133 |
+
|
| 134 |
+
_opt_mod_attributes = {
|
| 135 |
+
"_orig_mod",
|
| 136 |
+
"dynamo_ctx",
|
| 137 |
+
"_torchdynamo_orig_callable",
|
| 138 |
+
"get_compiler_config",
|
| 139 |
+
"forward",
|
| 140 |
+
"_forward",
|
| 141 |
+
"__dict__",
|
| 142 |
+
"named_children_walk",
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
def __init__(self, mod: torch.nn.Module, dynamo_ctx):
|
| 146 |
+
super().__init__()
|
| 147 |
+
# Installs the params/buffer
|
| 148 |
+
self._orig_mod = mod
|
| 149 |
+
self.dynamo_ctx = dynamo_ctx
|
| 150 |
+
self._initialize()
|
| 151 |
+
|
| 152 |
+
def _initialize(self):
|
| 153 |
+
# Do this stuff in constructor to lower overhead slightly
|
| 154 |
+
if isinstance(self.dynamo_ctx, DisableContext):
|
| 155 |
+
# No need to check trace rules
|
| 156 |
+
self.forward = self.dynamo_ctx(self._orig_mod.__call__)
|
| 157 |
+
elif isinstance(self._orig_mod.forward, types.MethodType) and trace_rules.check(
|
| 158 |
+
self._orig_mod.forward
|
| 159 |
+
):
|
| 160 |
+
# This may be a torch.nn.* instance in trace_rules.py which
|
| 161 |
+
# won't trigger a frame evaluation workaround to add an extra
|
| 162 |
+
# frame we can capture
|
| 163 |
+
self.forward = self.dynamo_ctx(external_utils.wrap_inline(self._orig_mod))
|
| 164 |
+
else:
|
| 165 |
+
# Invoke hooks outside of dynamo then pickup the inner frame
|
| 166 |
+
self.forward = self.dynamo_ctx(self._orig_mod.__call__)
|
| 167 |
+
|
| 168 |
+
if hasattr(self._orig_mod, "_initialize_hook"):
|
| 169 |
+
self._forward = self.forward
|
| 170 |
+
self.forward = self._call_lazy_check
|
| 171 |
+
|
| 172 |
+
def __reduce__(self):
|
| 173 |
+
return (self.__class__, (self._orig_mod, self.dynamo_ctx))
|
| 174 |
+
|
| 175 |
+
def __getstate__(self):
|
| 176 |
+
state = dict(self.__dict__)
|
| 177 |
+
state.pop("forward", None)
|
| 178 |
+
state.pop("__call__", None)
|
| 179 |
+
return state
|
| 180 |
+
|
| 181 |
+
def __setstate__(self, state):
|
| 182 |
+
self.__dict__ = state
|
| 183 |
+
self._initialize()
|
| 184 |
+
|
| 185 |
+
def __getattr__(self, name):
|
| 186 |
+
if name == "_orig_mod":
|
| 187 |
+
return self._modules["_orig_mod"]
|
| 188 |
+
return getattr(self._orig_mod, name)
|
| 189 |
+
|
| 190 |
+
def __setattr__(self, name, val):
|
| 191 |
+
# Allow patching over class attributes
|
| 192 |
+
if hasattr(type(self), name):
|
| 193 |
+
return super().__setattr__(name, val)
|
| 194 |
+
|
| 195 |
+
if name in OptimizedModule._opt_mod_attributes:
|
| 196 |
+
return super().__setattr__(name, val)
|
| 197 |
+
return setattr(self._orig_mod, name, val)
|
| 198 |
+
|
| 199 |
+
def _call_lazy_check(self, *args, **kwargs):
|
| 200 |
+
if hasattr(self._orig_mod, "_initialize_hook"):
|
| 201 |
+
# In the case of a lazy module, we want to run
|
| 202 |
+
# the pre-hooks which initialize it.
|
| 203 |
+
# Afterwards, lazy module deletes its pre-hooks
|
| 204 |
+
# to avoid treating it as lazy on subsequent recompile.
|
| 205 |
+
self._orig_mod._infer_parameters(self._orig_mod, args, kwargs)
|
| 206 |
+
return self._forward(*args, **kwargs)
|
| 207 |
+
|
| 208 |
+
def __dir__(self):
|
| 209 |
+
orig_mod_attrs = self._orig_mod.__dir__()
|
| 210 |
+
return orig_mod_attrs + [
|
| 211 |
+
attr for attr in super().__dir__() if attr not in orig_mod_attrs
|
| 212 |
+
]
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def remove_from_cache(f):
|
| 216 |
+
"""
|
| 217 |
+
Make sure f.__code__ is not cached to force a recompile
|
| 218 |
+
"""
|
| 219 |
+
if isinstance(f, types.CodeType):
|
| 220 |
+
reset_code(f)
|
| 221 |
+
elif hasattr(f, "__code__"):
|
| 222 |
+
reset_code(f.__code__)
|
| 223 |
+
elif hasattr(getattr(f, "forward", None), "__code__"):
|
| 224 |
+
reset_code(f.forward.__code__)
|
| 225 |
+
else:
|
| 226 |
+
from . import reset # type: ignore[attr-defined]
|
| 227 |
+
|
| 228 |
+
reset()
|
| 229 |
+
log.warning("could not determine __code__ for %s", f)
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def nothing():
|
| 233 |
+
pass
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def always_false():
|
| 237 |
+
return False
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
def innermost_fn(fn):
|
| 241 |
+
"""
|
| 242 |
+
In case of nesting of _TorchDynamoContext calls, find the innermost
|
| 243 |
+
function. TorchDynamo caches on fn.__code__ object, so its necessary to find
|
| 244 |
+
the innermost function to pass on the optimize, run, disable etc.
|
| 245 |
+
"""
|
| 246 |
+
unaltered_fn = fn
|
| 247 |
+
while hasattr(unaltered_fn, "_torchdynamo_orig_callable"):
|
| 248 |
+
unaltered_fn = unaltered_fn._torchdynamo_orig_callable
|
| 249 |
+
assert callable(unaltered_fn)
|
| 250 |
+
return unaltered_fn
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def make_set_enable_dynamic(enable: bool):
|
| 254 |
+
assert isinstance(enable, bool)
|
| 255 |
+
if enable:
|
| 256 |
+
# Assume everything is dynamic by default
|
| 257 |
+
return config._make_closure_patcher(assume_static_by_default=False)
|
| 258 |
+
else:
|
| 259 |
+
return config._make_closure_patcher(
|
| 260 |
+
automatic_dynamic_shapes=False, assume_static_by_default=True
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
class _TorchDynamoContext:
|
| 265 |
+
def __init__(
|
| 266 |
+
self,
|
| 267 |
+
callback: DynamoCallback,
|
| 268 |
+
on_enter=nothing,
|
| 269 |
+
backend_ctx_ctor=null_context,
|
| 270 |
+
patch_fn=nothing,
|
| 271 |
+
first_ctx=False,
|
| 272 |
+
*,
|
| 273 |
+
export=False,
|
| 274 |
+
dynamic=None,
|
| 275 |
+
compiler_config=None,
|
| 276 |
+
):
|
| 277 |
+
super().__init__()
|
| 278 |
+
assert callable(callback) or callback is False or callback is None
|
| 279 |
+
self.callback: DynamoCallback = callback
|
| 280 |
+
self._backend_ctx_ctor = backend_ctx_ctor
|
| 281 |
+
self.prior: Union[Unset, DynamoCallback] = unset
|
| 282 |
+
self.first_ctx = first_ctx
|
| 283 |
+
self.export = export
|
| 284 |
+
self._dynamic = dynamic
|
| 285 |
+
self.compiler_config = compiler_config
|
| 286 |
+
self.cleanup_fns: List[Callable[[], Any]] = []
|
| 287 |
+
self.enter_exit_hooks = []
|
| 288 |
+
patch_fn()
|
| 289 |
+
|
| 290 |
+
# Save the backends so that we can reset them during torch._dynamo.reset
|
| 291 |
+
backend = innermost_fn(callback)
|
| 292 |
+
cached_backends.setdefault(id(backend), backend)
|
| 293 |
+
|
| 294 |
+
if dynamic is not None:
|
| 295 |
+
self.enter_exit_hooks.append(make_set_enable_dynamic(dynamic))
|
| 296 |
+
|
| 297 |
+
if on_enter is not nothing:
|
| 298 |
+
# this case is not common
|
| 299 |
+
def call_on_enter():
|
| 300 |
+
on_enter()
|
| 301 |
+
return nothing
|
| 302 |
+
|
| 303 |
+
self.enter_exit_hooks.append(call_on_enter)
|
| 304 |
+
|
| 305 |
+
if backend_ctx_ctor is not contextlib.nullcontext:
|
| 306 |
+
# this case is not common
|
| 307 |
+
def call_backend_ctx():
|
| 308 |
+
ctx = backend_ctx_ctor()
|
| 309 |
+
ctx.__enter__()
|
| 310 |
+
return functools.partial(ctx.__exit__, None, None, None)
|
| 311 |
+
|
| 312 |
+
self.enter_exit_hooks.append(call_backend_ctx)
|
| 313 |
+
|
| 314 |
+
def __enter__(self):
|
| 315 |
+
if config.raise_on_ctx_manager_usage:
|
| 316 |
+
raise RuntimeError(
|
| 317 |
+
"torch._dynamo.optimize(...) is used with a context manager. "
|
| 318 |
+
"Please refer to https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html "
|
| 319 |
+
"to use torch._dynamo.optimize(...) as an annotation/decorator. "
|
| 320 |
+
)
|
| 321 |
+
self.cleanup_fns = [enter() for enter in self.enter_exit_hooks]
|
| 322 |
+
self.prior = set_eval_frame(self.callback)
|
| 323 |
+
|
| 324 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 325 |
+
assert self.prior is not unset
|
| 326 |
+
set_eval_frame(self.prior)
|
| 327 |
+
self.prior = unset
|
| 328 |
+
for cleanup in self.cleanup_fns:
|
| 329 |
+
cleanup()
|
| 330 |
+
self.cleanup_fns.clear()
|
| 331 |
+
|
| 332 |
+
def __call__(self, fn):
|
| 333 |
+
# public api for compiler config/options
|
| 334 |
+
def get_compiler_config():
|
| 335 |
+
return self.compiler_config
|
| 336 |
+
|
| 337 |
+
fn = innermost_fn(fn)
|
| 338 |
+
|
| 339 |
+
# add context containing GraphModule to any GraphModule forward functions
|
| 340 |
+
if isinstance(fn, GraphModule):
|
| 341 |
+
# add context containing GraphModule to any GraphModule forward functions
|
| 342 |
+
code_context.get_context(fn.forward.__code__)[
|
| 343 |
+
"orig_graphmodule"
|
| 344 |
+
] = weakref.ref(fn)
|
| 345 |
+
|
| 346 |
+
# Optimize the forward method of torch.nn.Module object
|
| 347 |
+
if isinstance(fn, torch.nn.Module):
|
| 348 |
+
mod = fn
|
| 349 |
+
new_mod = OptimizedModule(mod, self)
|
| 350 |
+
# Save the function pointer to find the original callable while nesting
|
| 351 |
+
# of decorators.
|
| 352 |
+
new_mod._torchdynamo_orig_callable = mod.forward
|
| 353 |
+
|
| 354 |
+
# when compiling torch.nn.Module,
|
| 355 |
+
# provide public api OptimizedModule.get_compiler_config()
|
| 356 |
+
assert not hasattr(new_mod, "get_compiler_config")
|
| 357 |
+
new_mod.get_compiler_config = get_compiler_config
|
| 358 |
+
|
| 359 |
+
return new_mod
|
| 360 |
+
|
| 361 |
+
if inspect.isclass(fn):
|
| 362 |
+
# User has wrapped the class with compile/disable decorator. Apply
|
| 363 |
+
# disable to init/call method.
|
| 364 |
+
cls_obj = fn
|
| 365 |
+
cls_obj.__call__ = self(cls_obj.__call__)
|
| 366 |
+
if issubclass(cls_obj, torch.nn.Module):
|
| 367 |
+
# NN module variable tracker directly inlines the _call_impl.
|
| 368 |
+
cls_obj._call_impl = self(cls_obj._call_impl)
|
| 369 |
+
return cls_obj
|
| 370 |
+
|
| 371 |
+
assert callable(fn)
|
| 372 |
+
|
| 373 |
+
try:
|
| 374 |
+
filename = inspect.getsourcefile(fn)
|
| 375 |
+
except TypeError:
|
| 376 |
+
filename = None
|
| 377 |
+
if (
|
| 378 |
+
(filename is None or trace_rules.check(fn))
|
| 379 |
+
and (
|
| 380 |
+
getattr(fn, "__name__", "")
|
| 381 |
+
not in ["_call_impl", "_wrapped_call_impl", "_lazy_forward"]
|
| 382 |
+
)
|
| 383 |
+
and filename not in DONT_WRAP_FILES
|
| 384 |
+
):
|
| 385 |
+
# call to a builtin without a frame for us to capture
|
| 386 |
+
fn = external_utils.wrap_inline(fn)
|
| 387 |
+
|
| 388 |
+
def do_nothing(*arg, **kwargs):
|
| 389 |
+
pass
|
| 390 |
+
|
| 391 |
+
if hasattr(self, "callback"):
|
| 392 |
+
callback = self.callback
|
| 393 |
+
else:
|
| 394 |
+
callback = do_nothing
|
| 395 |
+
|
| 396 |
+
is_jit_tracing = torch._C._is_tracing
|
| 397 |
+
is_fx_tracing = torch.fx._symbolic_trace.is_fx_tracing
|
| 398 |
+
|
| 399 |
+
@functools.wraps(fn)
|
| 400 |
+
def _fn(*args, **kwargs):
|
| 401 |
+
if is_fx_tracing():
|
| 402 |
+
if config.error_on_nested_fx_trace:
|
| 403 |
+
raise RuntimeError(
|
| 404 |
+
"Detected that you are using FX to symbolically trace "
|
| 405 |
+
"a dynamo-optimized function. This is not supported at the moment."
|
| 406 |
+
)
|
| 407 |
+
else:
|
| 408 |
+
return fn(*args, **kwargs)
|
| 409 |
+
|
| 410 |
+
if is_jit_tracing():
|
| 411 |
+
if config.error_on_nested_jit_trace:
|
| 412 |
+
raise RuntimeError(
|
| 413 |
+
"Detected that you are using FX to torch.jit.trace "
|
| 414 |
+
"a dynamo-optimized function. This is not supported at the moment."
|
| 415 |
+
)
|
| 416 |
+
else:
|
| 417 |
+
return fn(*args, **kwargs)
|
| 418 |
+
|
| 419 |
+
cleanups = [enter() for enter in self.enter_exit_hooks]
|
| 420 |
+
prior = set_eval_frame(callback)
|
| 421 |
+
|
| 422 |
+
# Ensure that if an assertion occurs after graph pushes
|
| 423 |
+
# something onto the DynamicLayerStack then we pop it off (the
|
| 424 |
+
# constructed graph code isn't guarded with try/finally).
|
| 425 |
+
#
|
| 426 |
+
# This used to be a context but putting a `with` here is a noticible
|
| 427 |
+
# perf regression (#126293)
|
| 428 |
+
saved_dynamic_layer_stack_depth = (
|
| 429 |
+
torch._C._functorch.get_dynamic_layer_stack_depth()
|
| 430 |
+
)
|
| 431 |
+
|
| 432 |
+
try:
|
| 433 |
+
return fn(*args, **kwargs)
|
| 434 |
+
finally:
|
| 435 |
+
# Restore the dynamic layer stack depth if necessary.
|
| 436 |
+
torch._C._functorch.pop_dynamic_layer_stack_and_undo_to_depth(
|
| 437 |
+
saved_dynamic_layer_stack_depth
|
| 438 |
+
)
|
| 439 |
+
|
| 440 |
+
set_eval_frame(prior)
|
| 441 |
+
for cleanup in cleanups:
|
| 442 |
+
cleanup()
|
| 443 |
+
|
| 444 |
+
# hooks to properly handle inlining
|
| 445 |
+
_fn._torchdynamo_inline = fn # type: ignore[attr-defined]
|
| 446 |
+
|
| 447 |
+
# Save the function pointer to find the original callable while nesting
|
| 448 |
+
# of decorators.
|
| 449 |
+
_fn._torchdynamo_orig_callable = fn # type: ignore[attr-defined]
|
| 450 |
+
|
| 451 |
+
# when compiling user function instead of nn.Module
|
| 452 |
+
# provide public api _fn.get_compiler_config()
|
| 453 |
+
assert not hasattr(_fn, "get_compiler_config")
|
| 454 |
+
_fn.get_compiler_config = get_compiler_config # type: ignore[attr-defined]
|
| 455 |
+
|
| 456 |
+
# If the function is called using torch._dynamo.optimize decorator, we
|
| 457 |
+
# should prevent any type of skipping.
|
| 458 |
+
if callback not in (None, False):
|
| 459 |
+
if not hasattr(fn, "__code__"):
|
| 460 |
+
raise RuntimeError(
|
| 461 |
+
textwrap.dedent(
|
| 462 |
+
"""
|
| 463 |
+
|
| 464 |
+
torch._dynamo.optimize is called on a non function object.
|
| 465 |
+
If this is a callable class, please wrap the relevant code into a function and optimize the
|
| 466 |
+
wrapper function.
|
| 467 |
+
|
| 468 |
+
>> class CallableClass:
|
| 469 |
+
>> def __init__(self):
|
| 470 |
+
>> super().__init__()
|
| 471 |
+
>> self.relu = torch.nn.ReLU()
|
| 472 |
+
>>
|
| 473 |
+
>> def __call__(self, x):
|
| 474 |
+
>> return self.relu(torch.sin(x))
|
| 475 |
+
>>
|
| 476 |
+
>> def print_hello(self):
|
| 477 |
+
>> print("Hello world")
|
| 478 |
+
>>
|
| 479 |
+
>> mod = CallableClass()
|
| 480 |
+
|
| 481 |
+
If you want to optimize the __call__ function and other code, wrap that up in a function
|
| 482 |
+
|
| 483 |
+
>> def wrapper_fn(x):
|
| 484 |
+
>> y = mod(x)
|
| 485 |
+
>> return y.sum()
|
| 486 |
+
|
| 487 |
+
and then optimize the wrapper_fn
|
| 488 |
+
|
| 489 |
+
>> opt_wrapper_fn = torch._dynamo.optimize(wrapper_fn)
|
| 490 |
+
"""
|
| 491 |
+
)
|
| 492 |
+
)
|
| 493 |
+
always_optimize_code_objects[fn.__code__] = True
|
| 494 |
+
|
| 495 |
+
return _fn
|
| 496 |
+
|
| 497 |
+
|
| 498 |
+
class OptimizeContext(_TorchDynamoContext):
|
| 499 |
+
def __init__(
|
| 500 |
+
self,
|
| 501 |
+
callback,
|
| 502 |
+
backend_ctx_ctor,
|
| 503 |
+
first_ctx=False,
|
| 504 |
+
*,
|
| 505 |
+
export=False,
|
| 506 |
+
dynamic=None,
|
| 507 |
+
compiler_config=None,
|
| 508 |
+
rebuild_ctx: Optional[
|
| 509 |
+
Callable[[], Union[OptimizeContext, _NullDecorator]]
|
| 510 |
+
] = None,
|
| 511 |
+
):
|
| 512 |
+
def on_enter():
|
| 513 |
+
install_generation_tagging_init()
|
| 514 |
+
|
| 515 |
+
super().__init__(
|
| 516 |
+
callback=callback,
|
| 517 |
+
on_enter=on_enter,
|
| 518 |
+
backend_ctx_ctor=backend_ctx_ctor,
|
| 519 |
+
patch_fn=TorchPatcher.patch,
|
| 520 |
+
first_ctx=first_ctx,
|
| 521 |
+
export=export,
|
| 522 |
+
dynamic=dynamic,
|
| 523 |
+
compiler_config=compiler_config,
|
| 524 |
+
)
|
| 525 |
+
|
| 526 |
+
if config.compiled_autograd:
|
| 527 |
+
|
| 528 |
+
def call_compiled_autograd():
|
| 529 |
+
assert rebuild_ctx is not None
|
| 530 |
+
compiler_fn = rebuild_ctx()
|
| 531 |
+
ctx = torch._dynamo.compiled_autograd.enable(compiler_fn)
|
| 532 |
+
ctx.__enter__()
|
| 533 |
+
return functools.partial(ctx.__exit__, None, None, None)
|
| 534 |
+
|
| 535 |
+
self.enter_exit_hooks.append(call_compiled_autograd)
|
| 536 |
+
|
| 537 |
+
def __reduce__(self):
|
| 538 |
+
return (
|
| 539 |
+
self.__class__,
|
| 540 |
+
(self.callback, self._backend_ctx_ctor, self.first_ctx),
|
| 541 |
+
{
|
| 542 |
+
"export": self.export,
|
| 543 |
+
"dynamic": self._dynamic,
|
| 544 |
+
"compiler_config": self.compiler_config,
|
| 545 |
+
},
|
| 546 |
+
)
|
| 547 |
+
|
| 548 |
+
|
| 549 |
+
class RunOnlyContext(_TorchDynamoContext):
|
| 550 |
+
def __init__(self):
|
| 551 |
+
# cudagraph trees relies on generation increment
|
| 552 |
+
def on_enter():
|
| 553 |
+
torch._dynamo.mutation_guard.GenerationTracker.generation += 1
|
| 554 |
+
|
| 555 |
+
super().__init__(callback=False, on_enter=on_enter)
|
| 556 |
+
|
| 557 |
+
def __reduce__(self):
|
| 558 |
+
return (self.__class__, ())
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
class DisableContext(_TorchDynamoContext):
|
| 562 |
+
def __init__(self):
|
| 563 |
+
super().__init__(callback=None)
|
| 564 |
+
|
| 565 |
+
def __call__(self, fn):
|
| 566 |
+
# Earlier this code was in the base class _TorchDynamoContext. But we
|
| 567 |
+
# moved it here to have better code organization. For disable, we just
|
| 568 |
+
# want the callback to be None. We don't have to check trace_rules or
|
| 569 |
+
# create any wrapper.
|
| 570 |
+
fn = innermost_fn(fn)
|
| 571 |
+
|
| 572 |
+
if isinstance(fn, torch.nn.Module):
|
| 573 |
+
mod = fn
|
| 574 |
+
new_mod = OptimizedModule(mod, self)
|
| 575 |
+
new_mod._torchdynamo_orig_callable = mod.forward
|
| 576 |
+
return new_mod
|
| 577 |
+
|
| 578 |
+
if inspect.isclass(fn):
|
| 579 |
+
# User has wrapped the class with compile/disable decorator. Apply
|
| 580 |
+
# disable to init/call method.
|
| 581 |
+
cls_obj = fn
|
| 582 |
+
# Disable on init is useful for reconstruction of bytecodes where we
|
| 583 |
+
# want to prevent Dynamo from tracing into the init function. Check
|
| 584 |
+
# test_reconstruction in test_model_output.py.
|
| 585 |
+
cls_obj.__init__ = self(cls_obj.__init__)
|
| 586 |
+
cls_obj.__call__ = self(cls_obj.__call__)
|
| 587 |
+
if issubclass(cls_obj, torch.nn.Module):
|
| 588 |
+
# NN module variable tracker directly inlines the _call_impl. Disable it.
|
| 589 |
+
cls_obj._call_impl = self(cls_obj._call_impl)
|
| 590 |
+
return cls_obj
|
| 591 |
+
|
| 592 |
+
assert callable(fn)
|
| 593 |
+
|
| 594 |
+
callback = self.callback
|
| 595 |
+
|
| 596 |
+
@functools.wraps(fn)
|
| 597 |
+
def _fn(*args, **kwargs):
|
| 598 |
+
prior = set_eval_frame(callback)
|
| 599 |
+
try:
|
| 600 |
+
return fn(*args, **kwargs)
|
| 601 |
+
finally:
|
| 602 |
+
set_eval_frame(prior)
|
| 603 |
+
|
| 604 |
+
_fn._torchdynamo_disable = True # type: ignore[attr-defined]
|
| 605 |
+
|
| 606 |
+
# Save the function pointer to find the original callable while nesting
|
| 607 |
+
# of decorators.
|
| 608 |
+
_fn._torchdynamo_orig_callable = fn # type: ignore[attr-defined]
|
| 609 |
+
|
| 610 |
+
return _fn
|
| 611 |
+
|
| 612 |
+
def __reduce__(self):
|
| 613 |
+
return (self.__class__, ())
|
| 614 |
+
|
| 615 |
+
|
| 616 |
+
def _optimize_catch_errors(
|
| 617 |
+
compile_fn,
|
| 618 |
+
hooks: Hooks,
|
| 619 |
+
backend_ctx_ctor=null_context,
|
| 620 |
+
export=False,
|
| 621 |
+
dynamic=None,
|
| 622 |
+
compiler_config=None,
|
| 623 |
+
rebuild_ctx=None,
|
| 624 |
+
):
|
| 625 |
+
return OptimizeContext(
|
| 626 |
+
convert_frame.catch_errors_wrapper(compile_fn, hooks),
|
| 627 |
+
backend_ctx_ctor=backend_ctx_ctor,
|
| 628 |
+
first_ctx=True,
|
| 629 |
+
export=export,
|
| 630 |
+
dynamic=dynamic,
|
| 631 |
+
compiler_config=compiler_config,
|
| 632 |
+
rebuild_ctx=rebuild_ctx,
|
| 633 |
+
)
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
def get_compiler_fn(compiler_fn):
|
| 637 |
+
from .repro.after_dynamo import wrap_backend_debug
|
| 638 |
+
|
| 639 |
+
if hasattr(compiler_fn, "compiler_name"):
|
| 640 |
+
compiler_str = compiler_fn.compiler_name
|
| 641 |
+
elif isinstance(compiler_fn, str):
|
| 642 |
+
compiler_str = compiler_fn
|
| 643 |
+
else:
|
| 644 |
+
compiler_str = None
|
| 645 |
+
compiler_fn = lookup_backend(compiler_fn)
|
| 646 |
+
return wrap_backend_debug(compiler_fn, compiler_str)
|
| 647 |
+
|
| 648 |
+
|
| 649 |
+
class _NullDecorator(contextlib.nullcontext): # type: ignore[type-arg]
|
| 650 |
+
def __call__(self, fn):
|
| 651 |
+
assert callable(fn)
|
| 652 |
+
return fn
|
| 653 |
+
|
| 654 |
+
|
| 655 |
+
def check_if_dynamo_supported():
|
| 656 |
+
if sys.version_info >= (3, 13):
|
| 657 |
+
raise RuntimeError("Python 3.13+ not yet supported for torch.compile")
|
| 658 |
+
|
| 659 |
+
|
| 660 |
+
def is_dynamo_supported():
|
| 661 |
+
try:
|
| 662 |
+
check_if_dynamo_supported()
|
| 663 |
+
return True
|
| 664 |
+
except Exception:
|
| 665 |
+
return False
|
| 666 |
+
|
| 667 |
+
|
| 668 |
+
def check_if_inductor_supported():
|
| 669 |
+
check_if_dynamo_supported()
|
| 670 |
+
|
| 671 |
+
if sys.platform == "win32":
|
| 672 |
+
raise RuntimeError("Windows not yet supported for inductor")
|
| 673 |
+
|
| 674 |
+
|
| 675 |
+
def is_inductor_supported():
|
| 676 |
+
try:
|
| 677 |
+
check_if_inductor_supported()
|
| 678 |
+
return True
|
| 679 |
+
except Exception:
|
| 680 |
+
return False
|
| 681 |
+
|
| 682 |
+
|
| 683 |
+
def optimize(*args, **kwargs):
|
| 684 |
+
def rebuild_ctx():
|
| 685 |
+
return optimize(*args, **kwargs)
|
| 686 |
+
|
| 687 |
+
return _optimize(rebuild_ctx, *args, **kwargs)
|
| 688 |
+
|
| 689 |
+
|
| 690 |
+
def _optimize(
|
| 691 |
+
rebuild_ctx: Callable[[], Union[OptimizeContext, _NullDecorator]],
|
| 692 |
+
backend="inductor",
|
| 693 |
+
*,
|
| 694 |
+
nopython=False,
|
| 695 |
+
guard_export_fn=None,
|
| 696 |
+
guard_fail_fn=None,
|
| 697 |
+
disable=False,
|
| 698 |
+
dynamic=None,
|
| 699 |
+
) -> Union[OptimizeContext, _NullDecorator]:
|
| 700 |
+
"""
|
| 701 |
+
The main entrypoint of TorchDynamo. Do graph capture and call
|
| 702 |
+
backend() to optimize extracted graphs.
|
| 703 |
+
|
| 704 |
+
Args:
|
| 705 |
+
backend: One of the two things:
|
| 706 |
+
- Either, a function/callable taking a torch.fx.GraphModule and
|
| 707 |
+
example_inputs and returning a python callable that runs the
|
| 708 |
+
graph faster.
|
| 709 |
+
One can also provide additional context for the backend, like
|
| 710 |
+
torch.jit.fuser("fuser2"), by setting the backend_ctx_ctor attribute.
|
| 711 |
+
See AOTAutogradMemoryEfficientFusionWithContext for the usage.
|
| 712 |
+
- Or, a string backend name in `torch._dynamo.list_backends()`
|
| 713 |
+
nopython: If True, graph breaks will be errors and there will
|
| 714 |
+
be a single whole-program graph.
|
| 715 |
+
disable: If True, turn this decorator into a no-op
|
| 716 |
+
dynamic: If True, upfront compile as dynamic a kernel as possible. If False,
|
| 717 |
+
disable all dynamic shapes support (always specialize). If None, automatically
|
| 718 |
+
detect when sizes vary and generate dynamic kernels upon recompile.
|
| 719 |
+
|
| 720 |
+
Example Usage::
|
| 721 |
+
|
| 722 |
+
@torch._dynamo.optimize()
|
| 723 |
+
def toy_example(a, b):
|
| 724 |
+
...
|
| 725 |
+
"""
|
| 726 |
+
check_if_dynamo_supported()
|
| 727 |
+
# Note: The hooks object could be global instead of passed around, *however* that would make
|
| 728 |
+
# for a confusing API usage and plumbing story wherein we nest multiple .optimize calls.
|
| 729 |
+
# There is some prior art around this, w/r/t nesting backend calls are enforced to be the same
|
| 730 |
+
# compiler, however, this feels onerous for callback and hooks, and it feels better to give our users an
|
| 731 |
+
# easier to understand UX at the cost of a little more plumbing on our end.
|
| 732 |
+
hooks = Hooks(guard_export_fn=guard_export_fn, guard_fail_fn=guard_fail_fn)
|
| 733 |
+
torch._C._log_api_usage_once("torch._dynamo.optimize")
|
| 734 |
+
if disable or os.environ.get("TORCHDYNAMO_DISABLE", "") == "1":
|
| 735 |
+
return _NullDecorator()
|
| 736 |
+
|
| 737 |
+
backend = get_compiler_fn(backend)
|
| 738 |
+
|
| 739 |
+
# Find if backend has any extra context manager
|
| 740 |
+
backend_ctx_ctor = getattr(backend, "backend_ctx_ctor", null_context)
|
| 741 |
+
|
| 742 |
+
if nopython:
|
| 743 |
+
return optimize_assert(
|
| 744 |
+
backend,
|
| 745 |
+
dynamic=dynamic,
|
| 746 |
+
hooks=hooks,
|
| 747 |
+
rebuild_ctx=rebuild_ctx,
|
| 748 |
+
)
|
| 749 |
+
# The backend function is stashed in the callable returned by
|
| 750 |
+
# _optimize_catch_errors in the field _torchdynamo_orig_callable. This can
|
| 751 |
+
# be used by eval_frame.c to insert a guard on the backend.
|
| 752 |
+
return _optimize_catch_errors(
|
| 753 |
+
convert_frame.convert_frame(backend, hooks=hooks),
|
| 754 |
+
hooks,
|
| 755 |
+
backend_ctx_ctor,
|
| 756 |
+
dynamic=dynamic,
|
| 757 |
+
compiler_config=backend.get_compiler_config()
|
| 758 |
+
if hasattr(backend, "get_compiler_config")
|
| 759 |
+
else None,
|
| 760 |
+
rebuild_ctx=rebuild_ctx,
|
| 761 |
+
)
|
| 762 |
+
|
| 763 |
+
|
| 764 |
+
# TODO(voz): Consider making "explain" output alongside a run / part of a run
|
| 765 |
+
@patch("torch._dynamo.symbolic_convert.explain", True)
|
| 766 |
+
def explain(f, *extra_args, **extra_kwargs):
|
| 767 |
+
def inner(*args, **kwargs):
|
| 768 |
+
# TODO(voz): Do we want a decorator for this?
|
| 769 |
+
from . import reset # type: ignore[attr-defined]
|
| 770 |
+
|
| 771 |
+
reset()
|
| 772 |
+
|
| 773 |
+
graphs: List[torch.fx.GraphModule] = []
|
| 774 |
+
break_reasons: List[Any] = []
|
| 775 |
+
op_count: int = 0
|
| 776 |
+
ops_per_graph: List[torch.fx.Node] = []
|
| 777 |
+
out_guards: List[_guards.Guard] = []
|
| 778 |
+
|
| 779 |
+
def dynamo_graph_accumulating_compiler(
|
| 780 |
+
gm: torch.fx.GraphModule, example_inputs
|
| 781 |
+
):
|
| 782 |
+
from .backends.debugging import _explain_graph_detail
|
| 783 |
+
|
| 784 |
+
nonlocal graphs
|
| 785 |
+
nonlocal op_count
|
| 786 |
+
nonlocal ops_per_graph
|
| 787 |
+
nonlocal break_reasons
|
| 788 |
+
|
| 789 |
+
gm, graphs, op_count, ops_per_graph, break_reasons = _explain_graph_detail(
|
| 790 |
+
gm, graphs, op_count, ops_per_graph, break_reasons
|
| 791 |
+
)
|
| 792 |
+
|
| 793 |
+
return gm.forward
|
| 794 |
+
|
| 795 |
+
def guard_export_print(guards):
|
| 796 |
+
nonlocal out_guards
|
| 797 |
+
out_guards.extend(guards)
|
| 798 |
+
|
| 799 |
+
opt_f = optimize(
|
| 800 |
+
dynamo_graph_accumulating_compiler,
|
| 801 |
+
nopython=False,
|
| 802 |
+
guard_export_fn=guard_export_print,
|
| 803 |
+
)(f)
|
| 804 |
+
# TODO(voz): We may have instances of `f` that mutate inputs, we should track sideeffects and reject.
|
| 805 |
+
opt_f(*args, **kwargs)
|
| 806 |
+
|
| 807 |
+
graph_count = len(graphs)
|
| 808 |
+
graph_break_count = graph_count - 1
|
| 809 |
+
compile_time = compile_times(repr="str")
|
| 810 |
+
|
| 811 |
+
# TODO(voz): Do we want a decorator for this?
|
| 812 |
+
reset()
|
| 813 |
+
from .backends.debugging import ExplainOutput
|
| 814 |
+
|
| 815 |
+
return ExplainOutput(
|
| 816 |
+
graphs,
|
| 817 |
+
graph_count,
|
| 818 |
+
graph_break_count,
|
| 819 |
+
break_reasons,
|
| 820 |
+
op_count,
|
| 821 |
+
ops_per_graph,
|
| 822 |
+
out_guards,
|
| 823 |
+
compile_time,
|
| 824 |
+
)
|
| 825 |
+
|
| 826 |
+
if extra_args or extra_kwargs:
|
| 827 |
+
warnings.warn(
|
| 828 |
+
"explain(f, *args, **kwargs) is deprecated, use explain(f)(*args, **kwargs) instead. "
|
| 829 |
+
"If you don't migrate, we may break your explain call in the future if your user defined kwargs "
|
| 830 |
+
"conflict with future kwargs added to explain(f).",
|
| 831 |
+
FutureWarning,
|
| 832 |
+
stacklevel=2,
|
| 833 |
+
)
|
| 834 |
+
return inner(*extra_args, **extra_kwargs)
|
| 835 |
+
else:
|
| 836 |
+
return inner
|
| 837 |
+
|
| 838 |
+
|
| 839 |
+
class FlattenInputOutputSignature(torch.fx.interpreter.Transformer):
|
| 840 |
+
def __init__(
|
| 841 |
+
self,
|
| 842 |
+
m: torch.fx.GraphModule,
|
| 843 |
+
flat_args: Tuple[Any],
|
| 844 |
+
matched_input_elements_positions: List[int],
|
| 845 |
+
flat_results: List[Any],
|
| 846 |
+
matched_output_elements_positions: List[int],
|
| 847 |
+
example_fake_inputs: List[torch.Tensor],
|
| 848 |
+
flat_args_dynamic_dims: List[Set[int]],
|
| 849 |
+
fake_mode: Optional[fake_tensor.FakeTensorMode] = None,
|
| 850 |
+
):
|
| 851 |
+
super().__init__(m)
|
| 852 |
+
|
| 853 |
+
assert len(flat_args_dynamic_dims) == len(flat_args)
|
| 854 |
+
matched_input_elements_to_fake = {
|
| 855 |
+
val: example_fake_inputs[ix]
|
| 856 |
+
for ix, val in enumerate(matched_input_elements_positions)
|
| 857 |
+
}
|
| 858 |
+
|
| 859 |
+
self.new_args = []
|
| 860 |
+
for i in range(0, len(flat_args)):
|
| 861 |
+
arg = super().placeholder(f"arg{i}", (), {})
|
| 862 |
+
if i in matched_input_elements_to_fake:
|
| 863 |
+
arg.node.meta["val"] = matched_input_elements_to_fake[i]
|
| 864 |
+
else:
|
| 865 |
+
# Fill node.mata["val"] with faketensor from the input,
|
| 866 |
+
# if it's not found in matched_input_elements_positions
|
| 867 |
+
if fake_mode is not None and isinstance(flat_args[i], torch.Tensor):
|
| 868 |
+
# TODO(zhxchen17) Also preserve all the user constraints here.
|
| 869 |
+
arg.node.meta["val"] = fake_mode.from_tensor(
|
| 870 |
+
flat_args[i],
|
| 871 |
+
symbolic_context=StatelessSymbolicContext(
|
| 872 |
+
dynamic_sizes=[
|
| 873 |
+
DimDynamic.DYNAMIC
|
| 874 |
+
if d in flat_args_dynamic_dims[i]
|
| 875 |
+
else DimDynamic.STATIC
|
| 876 |
+
for d in range(len(flat_args[i].shape))
|
| 877 |
+
],
|
| 878 |
+
constraint_sizes=[None] * len(flat_args[i].shape),
|
| 879 |
+
),
|
| 880 |
+
)
|
| 881 |
+
self.new_args.append(arg)
|
| 882 |
+
self.old_args_gen = (self.new_args[i] for i in matched_input_elements_positions)
|
| 883 |
+
self.matched_output_elements_positions = matched_output_elements_positions
|
| 884 |
+
self.flat_results = flat_results
|
| 885 |
+
|
| 886 |
+
def placeholder(self, target, args, kwargs):
|
| 887 |
+
arg = next(self.old_args_gen)
|
| 888 |
+
if "val" in self.current_node.meta:
|
| 889 |
+
arg.node.meta["val"] = self.current_node.meta["val"]
|
| 890 |
+
if "tensor_dict" in self.current_node.meta:
|
| 891 |
+
arg.node.meta["tensor_dict"] = self.current_node.meta["tensor_dict"]
|
| 892 |
+
if "example_value" in self.current_node.meta:
|
| 893 |
+
# NB: intentionally do not use set_example_value
|
| 894 |
+
arg.node.meta["example_value"] = self.current_node.meta["example_value"]
|
| 895 |
+
if "unbacked_bindings" in self.current_node.meta:
|
| 896 |
+
arg.node.meta["unbacked_bindings"] = self.current_node.meta[
|
| 897 |
+
"unbacked_bindings"
|
| 898 |
+
]
|
| 899 |
+
return arg
|
| 900 |
+
|
| 901 |
+
def output(self, target, args, kwargs):
|
| 902 |
+
dynamo_result_flat = args[0]
|
| 903 |
+
lookup = [*dynamo_result_flat, *self.new_args]
|
| 904 |
+
new_results_flat = []
|
| 905 |
+
for i in range(len(self.flat_results)):
|
| 906 |
+
if self.matched_output_elements_positions[i] is not None:
|
| 907 |
+
new_results_flat.append(
|
| 908 |
+
lookup[self.matched_output_elements_positions[i]]
|
| 909 |
+
)
|
| 910 |
+
else:
|
| 911 |
+
const_val = self.flat_results[i]
|
| 912 |
+
assert isinstance(const_val, tuple(common_constant_types))
|
| 913 |
+
new_results_flat.append(const_val)
|
| 914 |
+
return super().output(target, (new_results_flat,), {})
|
| 915 |
+
|
| 916 |
+
def run_node(self, n):
|
| 917 |
+
self.current_node = n
|
| 918 |
+
result_proxy = super().run_node(n)
|
| 919 |
+
if "val" in self.current_node.meta:
|
| 920 |
+
result_proxy.node.meta["val"] = self.current_node.meta["val"]
|
| 921 |
+
if "example_value" in self.current_node.meta:
|
| 922 |
+
# NB: intentionally do not use set_example_value
|
| 923 |
+
result_proxy.node.meta["example_value"] = self.current_node.meta[
|
| 924 |
+
"example_value"
|
| 925 |
+
]
|
| 926 |
+
if "unbacked_bindings" in self.current_node.meta:
|
| 927 |
+
result_proxy.node.meta["unbacked_bindings"] = self.current_node.meta[
|
| 928 |
+
"unbacked_bindings"
|
| 929 |
+
]
|
| 930 |
+
if self.current_node.op != "output":
|
| 931 |
+
result_proxy.node._rename(
|
| 932 |
+
getattr(self.current_node, "name", result_proxy.node.name)
|
| 933 |
+
)
|
| 934 |
+
return result_proxy
|
| 935 |
+
|
| 936 |
+
def transform(self):
|
| 937 |
+
result_gm = super().transform()
|
| 938 |
+
if "dynamo_flat_name_to_original_fqn" in self.module.meta:
|
| 939 |
+
result_gm.meta["dynamo_flat_name_to_original_fqn"] = self.module.meta[
|
| 940 |
+
"dynamo_flat_name_to_original_fqn"
|
| 941 |
+
]
|
| 942 |
+
return result_gm
|
| 943 |
+
|
| 944 |
+
|
| 945 |
+
class ExportResult(NamedTuple):
|
| 946 |
+
graph_module: torch.fx.GraphModule
|
| 947 |
+
guards: _guards.GuardsSet
|
| 948 |
+
# NB: Do not add new fields without overriding __iter__; people are
|
| 949 |
+
# destructuring so it is BC-breaking
|
| 950 |
+
|
| 951 |
+
|
| 952 |
+
def check_signature_rewritable(graph):
|
| 953 |
+
input_errors = []
|
| 954 |
+
for node in graph.graph.find_nodes(op="placeholder"):
|
| 955 |
+
assert hasattr(node, "_dynamo_source")
|
| 956 |
+
source = node._dynamo_source
|
| 957 |
+
user_stacks = graph._source_to_user_stacks.get(source)
|
| 958 |
+
if user_stacks is None:
|
| 959 |
+
continue
|
| 960 |
+
assert len(user_stacks) > 0
|
| 961 |
+
# In some cases we may not have a useful stack. Look for a
|
| 962 |
+
# useful stack
|
| 963 |
+
stack = None
|
| 964 |
+
for s in user_stacks:
|
| 965 |
+
if len(s) == 0:
|
| 966 |
+
continue
|
| 967 |
+
stack = s
|
| 968 |
+
break
|
| 969 |
+
if stack is None:
|
| 970 |
+
msg = f"{source.name()}, a closed over free variable"
|
| 971 |
+
else:
|
| 972 |
+
tb = "".join(traceback.format_list(stack))
|
| 973 |
+
extra = ""
|
| 974 |
+
if len(user_stacks) > 1:
|
| 975 |
+
extra = f"(elided {len(user_stacks) - 1} more accesses)"
|
| 976 |
+
msg = f"{source.name()}, accessed at:\n{tb}{extra}"
|
| 977 |
+
# TODO: option to print ALL of the stack traces at once
|
| 978 |
+
input_errors.append(msg)
|
| 979 |
+
|
| 980 |
+
if input_errors:
|
| 981 |
+
raise UserError(
|
| 982 |
+
UserErrorType.INVALID_INPUT,
|
| 983 |
+
"Cannot export model which references tensors that are neither "
|
| 984 |
+
"buffers/parameters/constants nor are direct inputs. For each tensor, if you'd "
|
| 985 |
+
"like this tensor to be an explicit input, add it as a dummy argument "
|
| 986 |
+
"to the top-level model definition you are exporting; if you would "
|
| 987 |
+
"like its value to be embedded as an exported constant, wrap its access "
|
| 988 |
+
"in a function marked with @assume_constant_result.\n\n"
|
| 989 |
+
+ "\n\n".join(input_errors),
|
| 990 |
+
)
|
| 991 |
+
|
| 992 |
+
|
| 993 |
+
def rewrite_signature(
|
| 994 |
+
f_sig,
|
| 995 |
+
graph,
|
| 996 |
+
fake_mode,
|
| 997 |
+
flat_args,
|
| 998 |
+
in_spec,
|
| 999 |
+
example_fake_inputs,
|
| 1000 |
+
graph_captured_input,
|
| 1001 |
+
graph_captured_output,
|
| 1002 |
+
dynamo_traced_result,
|
| 1003 |
+
flat_args_dynamic_dims,
|
| 1004 |
+
):
|
| 1005 |
+
orig_args, orig_kwargs = pytree.tree_unflatten(flat_args, in_spec)
|
| 1006 |
+
|
| 1007 |
+
def check_user_input_output(flat_values, error_type):
|
| 1008 |
+
supported_types = [
|
| 1009 |
+
torch.Tensor,
|
| 1010 |
+
torch.SymInt,
|
| 1011 |
+
torch.SymFloat,
|
| 1012 |
+
torch.SymBool,
|
| 1013 |
+
torch._C.ScriptObject,
|
| 1014 |
+
] + list(common_constant_types)
|
| 1015 |
+
|
| 1016 |
+
def is_supported_type(val):
|
| 1017 |
+
return isinstance(val, tuple(supported_types))
|
| 1018 |
+
|
| 1019 |
+
value_type = "input" if error_type == UserErrorType.INVALID_INPUT else "output"
|
| 1020 |
+
# We only check that the outputs are not None. Inputs can be None.
|
| 1021 |
+
for v in flat_values:
|
| 1022 |
+
if not is_supported_type(v):
|
| 1023 |
+
if error_type == UserErrorType.INVALID_INPUT and v is None:
|
| 1024 |
+
continue
|
| 1025 |
+
|
| 1026 |
+
raise UserError(
|
| 1027 |
+
error_type,
|
| 1028 |
+
f"It looks like one of the {value_type}s with type `{type(v)}` "
|
| 1029 |
+
"is not supported or pytree-flattenable. \n"
|
| 1030 |
+
f"Exported graphs {value_type}s can only contain the "
|
| 1031 |
+
f"following supported types: {supported_types}. \n"
|
| 1032 |
+
"If you are using a custom class object, "
|
| 1033 |
+
"please register a pytree_flatten/unflatten function "
|
| 1034 |
+
"using `torch.utils._pytree.register_pytree_node` or "
|
| 1035 |
+
"`torch.export.register_dataclass`.",
|
| 1036 |
+
)
|
| 1037 |
+
|
| 1038 |
+
check_user_input_output(flat_args, UserErrorType.INVALID_INPUT)
|
| 1039 |
+
flat_results_traced, out_spec_traced = pytree.tree_flatten(dynamo_traced_result)
|
| 1040 |
+
check_user_input_output(flat_results_traced, UserErrorType.INVALID_OUTPUT)
|
| 1041 |
+
|
| 1042 |
+
def produce_matching(debug_type, sources, candidates):
|
| 1043 |
+
matched_elements_positions: List[Optional[int]] = []
|
| 1044 |
+
dict_of_source_vals = {}
|
| 1045 |
+
for i, val in enumerate(sources):
|
| 1046 |
+
dict_of_source_vals[id(val)] = i
|
| 1047 |
+
|
| 1048 |
+
for i, val in enumerate(candidates):
|
| 1049 |
+
if isinstance(val, tuple(common_constant_types)):
|
| 1050 |
+
matched_elements_positions.append(None)
|
| 1051 |
+
elif id(val) not in dict_of_source_vals:
|
| 1052 |
+
raise AssertionError(
|
| 1053 |
+
f"Unexpectedly found a {type(val)} in the {debug_type}.\n"
|
| 1054 |
+
'Please file an issue along with a paste of the logs from TORCH_LOGS="+export"'
|
| 1055 |
+
)
|
| 1056 |
+
else:
|
| 1057 |
+
matched_elements_positions.append(dict_of_source_vals[id(val)])
|
| 1058 |
+
|
| 1059 |
+
return matched_elements_positions
|
| 1060 |
+
|
| 1061 |
+
matched_input_elements_positions = produce_matching(
|
| 1062 |
+
"inputs", flat_args, graph_captured_input
|
| 1063 |
+
)
|
| 1064 |
+
|
| 1065 |
+
assert graph_captured_output is not None
|
| 1066 |
+
matched_output_elements_positions = produce_matching(
|
| 1067 |
+
"outputs", list(graph_captured_output) + flat_args, flat_results_traced
|
| 1068 |
+
)
|
| 1069 |
+
|
| 1070 |
+
new_graph = FlattenInputOutputSignature(
|
| 1071 |
+
graph,
|
| 1072 |
+
flat_args,
|
| 1073 |
+
matched_input_elements_positions,
|
| 1074 |
+
flat_results_traced,
|
| 1075 |
+
matched_output_elements_positions,
|
| 1076 |
+
example_fake_inputs,
|
| 1077 |
+
flat_args_dynamic_dims,
|
| 1078 |
+
fake_mode,
|
| 1079 |
+
).transform()
|
| 1080 |
+
|
| 1081 |
+
# Make dynamo graph to have same input/output spec as user code
|
| 1082 |
+
def argument_names(f_sig, args, kwargs) -> List[str]:
|
| 1083 |
+
def signature_to_fullargspec(sig: inspect.Signature):
|
| 1084 |
+
# Get a list of Parameter objects from the Signature object
|
| 1085 |
+
params = list(sig.parameters.values())
|
| 1086 |
+
# Separate positional arguments, keyword-only arguments and varargs/varkw
|
| 1087 |
+
args = [
|
| 1088 |
+
p.name
|
| 1089 |
+
for p in params
|
| 1090 |
+
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
|
| 1091 |
+
]
|
| 1092 |
+
kwonlyargs = [
|
| 1093 |
+
p.name for p in params if p.kind == inspect.Parameter.KEYWORD_ONLY
|
| 1094 |
+
]
|
| 1095 |
+
varargs = next(
|
| 1096 |
+
(p.name for p in params if p.kind == inspect.Parameter.VAR_POSITIONAL),
|
| 1097 |
+
None,
|
| 1098 |
+
)
|
| 1099 |
+
varkw = next(
|
| 1100 |
+
(p.name for p in params if p.kind == inspect.Parameter.VAR_KEYWORD),
|
| 1101 |
+
None,
|
| 1102 |
+
)
|
| 1103 |
+
# Get default values for positional arguments and keyword-only arguments
|
| 1104 |
+
defaults = tuple(
|
| 1105 |
+
p.default
|
| 1106 |
+
for p in params
|
| 1107 |
+
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
|
| 1108 |
+
and p.default is not inspect.Parameter.empty
|
| 1109 |
+
)
|
| 1110 |
+
kwonlydefaults = {
|
| 1111 |
+
p.name: p.default
|
| 1112 |
+
for p in params
|
| 1113 |
+
if p.kind == inspect.Parameter.KEYWORD_ONLY
|
| 1114 |
+
and p.default is not inspect.Parameter.empty
|
| 1115 |
+
}
|
| 1116 |
+
# Get annotations for parameters and return value
|
| 1117 |
+
annotations = {}
|
| 1118 |
+
if sig.return_annotation:
|
| 1119 |
+
annotations = {"return": sig.return_annotation}
|
| 1120 |
+
for parameter in params:
|
| 1121 |
+
annotations[parameter.name] = parameter.annotation
|
| 1122 |
+
# Return a FullArgSpec object with the extracted attributes
|
| 1123 |
+
return inspect.FullArgSpec(
|
| 1124 |
+
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations
|
| 1125 |
+
)
|
| 1126 |
+
|
| 1127 |
+
fullargspec = signature_to_fullargspec(f_sig)
|
| 1128 |
+
|
| 1129 |
+
# 1. Map `args` 1-to-1 to positional arguments in original signature.
|
| 1130 |
+
input_strs = fullargspec.args[: len(args)]
|
| 1131 |
+
|
| 1132 |
+
if len(args) > len(fullargspec.args):
|
| 1133 |
+
# 2. If there are more arguments left in `args`, they map to varargs in original
|
| 1134 |
+
# signature. Assign names as {varargs}_0, {varargs}_1, ...
|
| 1135 |
+
assert fullargspec.varargs is not None, "More arguments than expected"
|
| 1136 |
+
input_strs += [
|
| 1137 |
+
f"{fullargspec.varargs}_{i}"
|
| 1138 |
+
for i in range(0, len(args) - len(input_strs))
|
| 1139 |
+
]
|
| 1140 |
+
elif len(args) < len(fullargspec.args):
|
| 1141 |
+
# 3. If there are fewer arguments in `args` than `fullargspec.args`,
|
| 1142 |
+
# it implies these are arguments either with default values, or provided in
|
| 1143 |
+
# `kwargs`. The former can be safely ignored. Because Dynamo.export does not
|
| 1144 |
+
# export them as part of the function signature. The latter will be handled
|
| 1145 |
+
# in the next step.
|
| 1146 |
+
for unprovided_arg in fullargspec.args[
|
| 1147 |
+
len(args) : -len(fullargspec.defaults or [])
|
| 1148 |
+
]:
|
| 1149 |
+
assert unprovided_arg in kwargs, f"Missing argument {unprovided_arg}"
|
| 1150 |
+
|
| 1151 |
+
# 4. Keyword arguments provided in `kwargs`.
|
| 1152 |
+
input_strs += list(kwargs.keys())
|
| 1153 |
+
|
| 1154 |
+
# 5. Keyword-only arguments with default values if not provided are not exported
|
| 1155 |
+
# as part of the function signature.
|
| 1156 |
+
for kwonly_arg in fullargspec.kwonlyargs:
|
| 1157 |
+
kwonlydefaults = fullargspec.kwonlydefaults or {}
|
| 1158 |
+
assert (
|
| 1159 |
+
kwonly_arg in kwargs or kwonly_arg in kwonlydefaults
|
| 1160 |
+
), f"Missing keyword only argument {kwonly_arg}"
|
| 1161 |
+
|
| 1162 |
+
return input_strs
|
| 1163 |
+
|
| 1164 |
+
new_graph.graph._codegen = _PyTreeCodeGen(
|
| 1165 |
+
_PyTreeInfo(
|
| 1166 |
+
argument_names(f_sig, orig_args, orig_kwargs),
|
| 1167 |
+
in_spec,
|
| 1168 |
+
out_spec_traced,
|
| 1169 |
+
)
|
| 1170 |
+
)
|
| 1171 |
+
new_graph.recompile()
|
| 1172 |
+
return new_graph
|
| 1173 |
+
|
| 1174 |
+
|
| 1175 |
+
def export(
|
| 1176 |
+
f: Callable[..., Any],
|
| 1177 |
+
*extra_args,
|
| 1178 |
+
aten_graph: bool = False,
|
| 1179 |
+
pre_dispatch: bool = False,
|
| 1180 |
+
decomposition_table: Optional[
|
| 1181 |
+
Dict[torch._ops.OpOverload, Callable[..., Any]]
|
| 1182 |
+
] = None,
|
| 1183 |
+
tracing_mode: str = "symbolic",
|
| 1184 |
+
dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any], List[Any]]] = None,
|
| 1185 |
+
assume_static_by_default: bool = False,
|
| 1186 |
+
same_signature: bool = True,
|
| 1187 |
+
disable_constraint_solver: bool = False,
|
| 1188 |
+
prefer_deferred_runtime_asserts_over_guards: bool = False,
|
| 1189 |
+
_allow_complex_guards_as_runtime_asserts: bool = False,
|
| 1190 |
+
_log_export_usage: bool = True,
|
| 1191 |
+
**extra_kwargs,
|
| 1192 |
+
) -> Callable[..., ExportResult]:
|
| 1193 |
+
"""
|
| 1194 |
+
Export an input function f to a format that can be executed outside of PyTorch using the FX graph.
|
| 1195 |
+
|
| 1196 |
+
Args:
|
| 1197 |
+
f (callable): A PyTorch function to be exported.
|
| 1198 |
+
|
| 1199 |
+
aten_graph (bool): If True, exports a graph with ATen operators.
|
| 1200 |
+
If False, exports a graph with Python operators. Default is False.
|
| 1201 |
+
|
| 1202 |
+
pre_dispatch (bool): If True, exports a graph with ATen operators,
|
| 1203 |
+
but before any logic in the PyTorch dispatcher has run.
|
| 1204 |
+
This can be useful if you want to apply further transformations on a graph before running it
|
| 1205 |
+
through autograd, autocast, or any other functionalities that are integrated into the dispatcher.
|
| 1206 |
+
This flag is only valid if aten_graph=True is set.
|
| 1207 |
+
Default is False.
|
| 1208 |
+
|
| 1209 |
+
decomposition_table (dict): A dictionary that maps operators to their decomposition functions.
|
| 1210 |
+
Required if aten_graph or tracing_mode is specified. Default is None.
|
| 1211 |
+
|
| 1212 |
+
tracing_mode (str): If "symbolic", turn on dynamic shapes support. Default is "symbolic".
|
| 1213 |
+
|
| 1214 |
+
dynamic_shapes:
|
| 1215 |
+
An optional argument where the type should either be:
|
| 1216 |
+
1) a dict from argument names of ``f`` to their dynamic shape specifications,
|
| 1217 |
+
2) a tuple that specifies dynamic shape specifications for each input in original order.
|
| 1218 |
+
If you are specifying dynamism on keyword args, you will need to pass them in the order that
|
| 1219 |
+
is defined in the original function signature.
|
| 1220 |
+
|
| 1221 |
+
The dynamic shape of a tensor argument can be specified as either
|
| 1222 |
+
(1) a dict from dynamic dimension indices to :func:`Dim` types, where it is
|
| 1223 |
+
not required to include static dimension indices in this dict, but when they are,
|
| 1224 |
+
they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None,
|
| 1225 |
+
where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions
|
| 1226 |
+
are denoted by None. Arguments that are dicts or tuples / lists of tensors are
|
| 1227 |
+
recursively specified by using mappings or sequences of contained specifications.
|
| 1228 |
+
|
| 1229 |
+
same_signature (bool): If True, rewrite the returned graph's signature to be the same as f.
|
| 1230 |
+
|
| 1231 |
+
disable_constraint_solver (bool): Whether the dim constraint solver must be disabled.
|
| 1232 |
+
|
| 1233 |
+
Returns:
|
| 1234 |
+
A function that given args and kwargs, returns a tuple of (graph, guards)
|
| 1235 |
+
Graph: An FX graph representing the execution of the input PyTorch function with the provided arguments and options.
|
| 1236 |
+
Guards: The guards we accumulated during tracing f above
|
| 1237 |
+
|
| 1238 |
+
Raises:
|
| 1239 |
+
AssertionError: If decomposition_table is specified without setting aten_graph=True,
|
| 1240 |
+
or if graph breaks during tracing in export.
|
| 1241 |
+
|
| 1242 |
+
AssertionError: If Dynamo input and output is not consistent with traced input/output.
|
| 1243 |
+
|
| 1244 |
+
Note - this headerdoc was authored by ChatGPT, with slight modifications by the author.
|
| 1245 |
+
"""
|
| 1246 |
+
if _log_export_usage:
|
| 1247 |
+
log_export_usage(event="export.private_api", flags={"_dynamo"})
|
| 1248 |
+
|
| 1249 |
+
# Deal with "local variable referenced before assignment"
|
| 1250 |
+
_f = f
|
| 1251 |
+
_assume_static_by_default = assume_static_by_default
|
| 1252 |
+
|
| 1253 |
+
def inner(*args, **kwargs):
|
| 1254 |
+
constraints = _process_dynamic_shapes(_f, args, kwargs, dynamic_shapes)
|
| 1255 |
+
f = _f
|
| 1256 |
+
assume_static_by_default = _assume_static_by_default
|
| 1257 |
+
check_if_dynamo_supported()
|
| 1258 |
+
torch._C._log_api_usage_once("torch._dynamo.export")
|
| 1259 |
+
if decomposition_table is not None:
|
| 1260 |
+
assert (
|
| 1261 |
+
aten_graph
|
| 1262 |
+
), "Specifying a decomposition_table table or tracing mode is illegal without setting aten_graph=True"
|
| 1263 |
+
if pre_dispatch:
|
| 1264 |
+
assert aten_graph, "pre_dispatch=True can only be used when aten_graph=True"
|
| 1265 |
+
f = innermost_fn(f)
|
| 1266 |
+
call_to_inspect = f.forward if isinstance(f, torch.nn.Module) else f
|
| 1267 |
+
original_signature = inspect.signature(call_to_inspect)
|
| 1268 |
+
graph = None
|
| 1269 |
+
out_guards = None
|
| 1270 |
+
graph_captured_input = None
|
| 1271 |
+
graph_captured_result: Optional[Tuple[torch.Tensor, ...]] = None
|
| 1272 |
+
fake_mode = None
|
| 1273 |
+
|
| 1274 |
+
def guard_export_print(guards: _guards.GuardsSet):
|
| 1275 |
+
nonlocal out_guards
|
| 1276 |
+
assert (
|
| 1277 |
+
out_guards is None
|
| 1278 |
+
), "whole graph export entails exactly one guard export"
|
| 1279 |
+
out_guards = guards
|
| 1280 |
+
|
| 1281 |
+
example_inputs = []
|
| 1282 |
+
|
| 1283 |
+
def dynamo_normalization_capturing_compiler(
|
| 1284 |
+
gm: torch.fx.GraphModule, inner_example_inputs
|
| 1285 |
+
):
|
| 1286 |
+
nonlocal graph
|
| 1287 |
+
assert (
|
| 1288 |
+
graph is None
|
| 1289 |
+
), "Tried to emit a second graph during export. Tracing through 'f' must produce a single graph."
|
| 1290 |
+
graph = gm
|
| 1291 |
+
|
| 1292 |
+
nonlocal fake_mode, example_inputs
|
| 1293 |
+
# NB: do NOT pass inner_example_inputs here, we are detecting the
|
| 1294 |
+
# Dynamo allocated fake mode, which should be DISTINCT from a
|
| 1295 |
+
# potential outer ambient fake mode which the user provided.
|
| 1296 |
+
# example_inputs is always the user specified inputs, so they
|
| 1297 |
+
# would have the wrong fake mode attached to them
|
| 1298 |
+
fake_mode = _guards.detect_fake_mode()
|
| 1299 |
+
example_inputs = inner_example_inputs
|
| 1300 |
+
|
| 1301 |
+
def result_capturing_wrapper(*graph_inputs):
|
| 1302 |
+
nonlocal graph_captured_result
|
| 1303 |
+
nonlocal graph_captured_input
|
| 1304 |
+
|
| 1305 |
+
graph_captured_input = graph_inputs
|
| 1306 |
+
assert graph is not None
|
| 1307 |
+
|
| 1308 |
+
named_parameters = dict(graph.named_parameters(remove_duplicate=False))
|
| 1309 |
+
named_buffers = dict(graph.named_buffers(remove_duplicate=False))
|
| 1310 |
+
|
| 1311 |
+
ambient_fake_mode = (
|
| 1312 |
+
_guards.detect_fake_mode(graph_inputs)
|
| 1313 |
+
if _guards.detect_fake_mode(graph_inputs) is not None
|
| 1314 |
+
else fake_mode
|
| 1315 |
+
)
|
| 1316 |
+
|
| 1317 |
+
# We reran fake tensor propagation, but we didn't do
|
| 1318 |
+
# anything with the resulting unbacked SymInts. Drop them
|
| 1319 |
+
# from the pending list.
|
| 1320 |
+
# NB: this is wrong if graph_captured_result has
|
| 1321 |
+
# data-dependent output size!
|
| 1322 |
+
ignore_fresh_unbacked = null_context()
|
| 1323 |
+
if shape_env := ambient_fake_mode.shape_env:
|
| 1324 |
+
ignore_fresh_unbacked = shape_env.ignore_fresh_unbacked_symbols()
|
| 1325 |
+
|
| 1326 |
+
with (
|
| 1327 |
+
ambient_fake_mode
|
| 1328 |
+
), enable_python_dispatcher(), ignore_fresh_unbacked:
|
| 1329 |
+
params_and_buffers = {
|
| 1330 |
+
**named_parameters,
|
| 1331 |
+
**named_buffers,
|
| 1332 |
+
}
|
| 1333 |
+
fake_params_buffers = dict()
|
| 1334 |
+
|
| 1335 |
+
for name, value in params_and_buffers.items():
|
| 1336 |
+
fake_params_buffers[name] = ambient_fake_mode.from_tensor(
|
| 1337 |
+
value, static_shapes=True
|
| 1338 |
+
)
|
| 1339 |
+
|
| 1340 |
+
fake_graph_inputs = pytree.tree_map(
|
| 1341 |
+
ambient_fake_mode.from_tensor, graph_inputs
|
| 1342 |
+
)
|
| 1343 |
+
graph_captured_result = torch.func.functional_call(
|
| 1344 |
+
graph, fake_params_buffers, fake_graph_inputs
|
| 1345 |
+
)
|
| 1346 |
+
|
| 1347 |
+
return graph_captured_result
|
| 1348 |
+
|
| 1349 |
+
return result_capturing_wrapper
|
| 1350 |
+
|
| 1351 |
+
# Note: This is needed by rewrite_signature. We need to put it before
|
| 1352 |
+
# optimize_assert since user program may mutate the inputs.
|
| 1353 |
+
flat_args, in_spec = pytree.tree_flatten((args, kwargs))
|
| 1354 |
+
|
| 1355 |
+
remove_from_cache(f)
|
| 1356 |
+
constraint_violation_error = None
|
| 1357 |
+
if tracing_mode != "symbolic":
|
| 1358 |
+
assume_static_by_default = True
|
| 1359 |
+
with config.patch(
|
| 1360 |
+
specialize_int=True,
|
| 1361 |
+
assume_static_by_default=assume_static_by_default,
|
| 1362 |
+
automatic_dynamic_shapes=False,
|
| 1363 |
+
capture_dynamic_output_shape_ops=True,
|
| 1364 |
+
capture_scalar_outputs=True,
|
| 1365 |
+
prefer_deferred_runtime_asserts_over_guards=prefer_deferred_runtime_asserts_over_guards,
|
| 1366 |
+
_allow_complex_guards_as_runtime_asserts=_allow_complex_guards_as_runtime_asserts,
|
| 1367 |
+
):
|
| 1368 |
+
opt_f = optimize_assert(
|
| 1369 |
+
dynamo_normalization_capturing_compiler,
|
| 1370 |
+
hooks=Hooks(
|
| 1371 |
+
guard_export_fn=guard_export_print,
|
| 1372 |
+
guard_fail_fn=None,
|
| 1373 |
+
),
|
| 1374 |
+
export=True,
|
| 1375 |
+
export_constraints=constraints,
|
| 1376 |
+
)(f)
|
| 1377 |
+
# TODO(voz): We may have instances of `f` that mutate inputs, we should track sideeffects and reject.
|
| 1378 |
+
try:
|
| 1379 |
+
result_traced = opt_f(*args, **kwargs)
|
| 1380 |
+
except ConstraintViolationError as e:
|
| 1381 |
+
constraint_violation_error = e
|
| 1382 |
+
remove_from_cache(f)
|
| 1383 |
+
|
| 1384 |
+
if (
|
| 1385 |
+
not disable_constraint_solver
|
| 1386 |
+
and (shape_env := getattr(fake_mode, "shape_env", None)) is not None
|
| 1387 |
+
and (dim_constraints := shape_env.dim_constraints) is not None
|
| 1388 |
+
and not isinstance(
|
| 1389 |
+
call_to_inspect, (torch._ops.OpOverloadPacket, torch._ops.OpOverload)
|
| 1390 |
+
)
|
| 1391 |
+
and not trace_rules.check(call_to_inspect)
|
| 1392 |
+
):
|
| 1393 |
+
dim_constraints.solve()
|
| 1394 |
+
dim_constraints.remove_redundant_dynamic_results()
|
| 1395 |
+
forced_specializations = dim_constraints.forced_specializations()
|
| 1396 |
+
msg = dim_constraints.prettify_results(
|
| 1397 |
+
original_signature,
|
| 1398 |
+
dynamic_shapes,
|
| 1399 |
+
constraint_violation_error,
|
| 1400 |
+
forced_specializations,
|
| 1401 |
+
)
|
| 1402 |
+
if constraint_violation_error:
|
| 1403 |
+
constraint_violation_error.args = (
|
| 1404 |
+
constraint_violation_error.args[0] + msg,
|
| 1405 |
+
)
|
| 1406 |
+
else:
|
| 1407 |
+
if forced_specializations:
|
| 1408 |
+
constraint_violation_error = ConstraintViolationError(msg)
|
| 1409 |
+
else:
|
| 1410 |
+
log.info(
|
| 1411 |
+
"Summary of dimension constraints:%s",
|
| 1412 |
+
msg,
|
| 1413 |
+
)
|
| 1414 |
+
|
| 1415 |
+
# Error if we have any constraints on static values
|
| 1416 |
+
for k in shape_env.var_to_range.keys():
|
| 1417 |
+
if isinstance(k, sympy.Integer):
|
| 1418 |
+
constraint_violation_error = ConstraintViolationError(
|
| 1419 |
+
f"{''.join(traceback.format_list(shape_env.var_to_stack[k]))}\n"
|
| 1420 |
+
"It appears that you're trying to set a constraint on a "
|
| 1421 |
+
f"value which we evaluated to have a static value of {k}. "
|
| 1422 |
+
'Set TORCH_LOGS="+export" for more information.'
|
| 1423 |
+
)
|
| 1424 |
+
if constraint_violation_error:
|
| 1425 |
+
raise constraint_violation_error
|
| 1426 |
+
|
| 1427 |
+
assert (
|
| 1428 |
+
graph is not None
|
| 1429 |
+
), "Failed to produce a graph during tracing as no tensor operations were found."
|
| 1430 |
+
assert hasattr(graph, "_source_to_user_stacks")
|
| 1431 |
+
assert out_guards is not None, "Failed to produce guards during tracing"
|
| 1432 |
+
assert fake_mode is not None
|
| 1433 |
+
|
| 1434 |
+
log.info(
|
| 1435 |
+
"Dynamo captured graph:\n\n%s", graph.print_readable(print_output=False)
|
| 1436 |
+
)
|
| 1437 |
+
|
| 1438 |
+
# This check need to happened before aten_graph
|
| 1439 |
+
# because placeholder's _source_node attribute is not preserved by make_fx
|
| 1440 |
+
if same_signature:
|
| 1441 |
+
check_signature_rewritable(graph)
|
| 1442 |
+
|
| 1443 |
+
# NB: This is mostly hitting the cache; Dynamo already converted these
|
| 1444 |
+
example_fake_inputs = [fake_mode.from_tensor(t) for t in example_inputs]
|
| 1445 |
+
|
| 1446 |
+
if aten_graph:
|
| 1447 |
+
# Running graph with interpreter is needed for propagating the stack_trace
|
| 1448 |
+
def graph_with_interpreter(*args):
|
| 1449 |
+
with torch.fx.traceback.preserve_node_meta():
|
| 1450 |
+
return torch.fx.Interpreter(graph).run(*args)
|
| 1451 |
+
|
| 1452 |
+
with maybe_disable_fake_tensor_mode(), enable_python_dispatcher(), (
|
| 1453 |
+
fake_mode
|
| 1454 |
+
):
|
| 1455 |
+
try:
|
| 1456 |
+
graph = make_fx(
|
| 1457 |
+
graph_with_interpreter,
|
| 1458 |
+
decomposition_table=decomposition_table,
|
| 1459 |
+
tracing_mode="real",
|
| 1460 |
+
_allow_non_fake_inputs=True,
|
| 1461 |
+
pre_dispatch=pre_dispatch,
|
| 1462 |
+
_allow_fake_constant=False,
|
| 1463 |
+
)(*example_fake_inputs)
|
| 1464 |
+
except CondOpArgsMismatchError as e:
|
| 1465 |
+
# Wrap the internal error to the user-facing error
|
| 1466 |
+
raise UserError( # noqa: B904
|
| 1467 |
+
UserErrorType.DYNAMIC_CONTROL_FLOW,
|
| 1468 |
+
str(e),
|
| 1469 |
+
case_name="cond_operands",
|
| 1470 |
+
)
|
| 1471 |
+
|
| 1472 |
+
assert graph is not None
|
| 1473 |
+
for node in graph.graph.find_nodes(op="get_attr"):
|
| 1474 |
+
if isinstance(getattr(graph, node.target), torch.Tensor):
|
| 1475 |
+
node.meta["val"] = fake_mode.from_tensor(
|
| 1476 |
+
getattr(graph, node.target), static_shapes=True
|
| 1477 |
+
)
|
| 1478 |
+
|
| 1479 |
+
if same_signature:
|
| 1480 |
+
flat_args_dynamic_dims = [
|
| 1481 |
+
{c.dim for c in (constraints or ()) if c.w_tensor() is x}
|
| 1482 |
+
for x in flat_args
|
| 1483 |
+
]
|
| 1484 |
+
graph = rewrite_signature(
|
| 1485 |
+
original_signature,
|
| 1486 |
+
graph,
|
| 1487 |
+
fake_mode,
|
| 1488 |
+
flat_args,
|
| 1489 |
+
in_spec,
|
| 1490 |
+
example_fake_inputs,
|
| 1491 |
+
graph_captured_input,
|
| 1492 |
+
graph_captured_result,
|
| 1493 |
+
result_traced, # type: ignore[possibly-undefined]
|
| 1494 |
+
flat_args_dynamic_dims,
|
| 1495 |
+
)
|
| 1496 |
+
# Store constraints and inputs as metadata for user passes, e.g. turn constraints to runtime check
|
| 1497 |
+
assert graph is not None
|
| 1498 |
+
graph.meta["input_shape_constraints"] = (
|
| 1499 |
+
[constraint.serializable_spec for constraint in constraints]
|
| 1500 |
+
if constraints
|
| 1501 |
+
else []
|
| 1502 |
+
)
|
| 1503 |
+
|
| 1504 |
+
return ExportResult(graph, out_guards)
|
| 1505 |
+
|
| 1506 |
+
if extra_args or extra_kwargs:
|
| 1507 |
+
warnings.warn(
|
| 1508 |
+
"export(f, *args, **kwargs) is deprecated, use export(f)(*args, **kwargs) instead. "
|
| 1509 |
+
"If you don't migrate, we may break your export call in the future if your user defined kwargs "
|
| 1510 |
+
"conflict with future kwargs added to export(f).",
|
| 1511 |
+
FutureWarning,
|
| 1512 |
+
stacklevel=2,
|
| 1513 |
+
)
|
| 1514 |
+
return inner(*extra_args, **extra_kwargs)
|
| 1515 |
+
else:
|
| 1516 |
+
return inner
|
| 1517 |
+
|
| 1518 |
+
|
| 1519 |
+
def optimize_assert(
|
| 1520 |
+
backend,
|
| 1521 |
+
*,
|
| 1522 |
+
hooks=Hooks(None, None),
|
| 1523 |
+
export=False,
|
| 1524 |
+
export_constraints=None,
|
| 1525 |
+
dynamic=None,
|
| 1526 |
+
rebuild_ctx=None,
|
| 1527 |
+
):
|
| 1528 |
+
"""
|
| 1529 |
+
The same as `torch._dynamo.optimize(backend, nopython=True)`
|
| 1530 |
+
"""
|
| 1531 |
+
backend = get_compiler_fn(backend)
|
| 1532 |
+
|
| 1533 |
+
# Find if backend has any extra context manager
|
| 1534 |
+
backend_ctx_ctor = getattr(backend, "backend_ctx_ctor", null_context)
|
| 1535 |
+
|
| 1536 |
+
return _optimize_catch_errors(
|
| 1537 |
+
convert_frame.convert_frame_assert(
|
| 1538 |
+
backend, export=export, export_constraints=export_constraints
|
| 1539 |
+
),
|
| 1540 |
+
hooks,
|
| 1541 |
+
backend_ctx_ctor,
|
| 1542 |
+
export=export,
|
| 1543 |
+
dynamic=dynamic,
|
| 1544 |
+
rebuild_ctx=rebuild_ctx,
|
| 1545 |
+
)
|
| 1546 |
+
|
| 1547 |
+
|
| 1548 |
+
class TorchPatcher:
|
| 1549 |
+
@staticmethod
|
| 1550 |
+
@functools.lru_cache(None)
|
| 1551 |
+
def patch():
|
| 1552 |
+
# A better way to disable the following would be decorate the source
|
| 1553 |
+
# functions with @torch._disable_dynamo. However, this causes issues
|
| 1554 |
+
# with torch.deploy internally.
|
| 1555 |
+
from .decorators import disable
|
| 1556 |
+
|
| 1557 |
+
torch.jit.trace = disable(torch.jit.trace)
|
| 1558 |
+
torch.jit.trace_module = disable(torch.jit.trace_module)
|
| 1559 |
+
torch.jit._get_trace_graph = disable(torch.jit._get_trace_graph)
|
| 1560 |
+
torch.fx._symbolic_trace.Tracer.trace = disable(
|
| 1561 |
+
torch.fx._symbolic_trace.Tracer.trace
|
| 1562 |
+
)
|
| 1563 |
+
torch.distributions.Distribution.set_default_validate_args(False)
|
| 1564 |
+
|
| 1565 |
+
from ..optim import (
|
| 1566 |
+
adadelta,
|
| 1567 |
+
adagrad,
|
| 1568 |
+
adam,
|
| 1569 |
+
adamax,
|
| 1570 |
+
adamw,
|
| 1571 |
+
asgd,
|
| 1572 |
+
lbfgs,
|
| 1573 |
+
nadam,
|
| 1574 |
+
radam,
|
| 1575 |
+
rmsprop,
|
| 1576 |
+
rprop,
|
| 1577 |
+
sgd,
|
| 1578 |
+
sparse_adam,
|
| 1579 |
+
)
|
| 1580 |
+
|
| 1581 |
+
optimizer_modules = {
|
| 1582 |
+
adadelta,
|
| 1583 |
+
adagrad,
|
| 1584 |
+
adam,
|
| 1585 |
+
adamax,
|
| 1586 |
+
adamw,
|
| 1587 |
+
asgd,
|
| 1588 |
+
lbfgs,
|
| 1589 |
+
nadam,
|
| 1590 |
+
radam,
|
| 1591 |
+
rmsprop,
|
| 1592 |
+
rprop,
|
| 1593 |
+
sgd,
|
| 1594 |
+
sparse_adam,
|
| 1595 |
+
}
|
| 1596 |
+
|
| 1597 |
+
for opt_mod in optimizer_modules:
|
| 1598 |
+
opt_name = opt_mod.__name__.split(".")[-1]
|
| 1599 |
+
fused_fn_name = f"_fused_{opt_name}"
|
| 1600 |
+
single_tensor_fn_name = f"_single_tensor_{opt_name}"
|
| 1601 |
+
|
| 1602 |
+
if hasattr(opt_mod, fused_fn_name):
|
| 1603 |
+
setattr(
|
| 1604 |
+
opt_mod, fused_fn_name, disable(getattr(opt_mod, fused_fn_name))
|
| 1605 |
+
)
|
| 1606 |
+
|
| 1607 |
+
optimizer_classes = [
|
| 1608 |
+
opt
|
| 1609 |
+
for opt in torch.optim.__dict__.values()
|
| 1610 |
+
if inspect.isclass(opt) and issubclass(opt, torch.optim.Optimizer)
|
| 1611 |
+
]
|
| 1612 |
+
|
| 1613 |
+
# Note: we don't support sparsity or tracing through backwards
|
| 1614 |
+
excluded_optimizer_classes = {
|
| 1615 |
+
torch.optim.SparseAdam,
|
| 1616 |
+
torch.optim.LBFGS,
|
| 1617 |
+
}
|
| 1618 |
+
|
| 1619 |
+
for opt in optimizer_classes:
|
| 1620 |
+
if opt in excluded_optimizer_classes:
|
| 1621 |
+
opt.step = disable(opt.step)
|
| 1622 |
+
|
| 1623 |
+
if hasattr(opt, "_init_group"):
|
| 1624 |
+
opt._init_group = disable(opt._init_group)
|
| 1625 |
+
|
| 1626 |
+
@staticmethod
|
| 1627 |
+
def suppress_torch_distributed_warnings(fn):
|
| 1628 |
+
def inner_fn(*args, **kwargs):
|
| 1629 |
+
warnings.filterwarnings(
|
| 1630 |
+
"ignore", category=UserWarning, module="torch.distributed"
|
| 1631 |
+
)
|
| 1632 |
+
return fn(*args, **kwargs)
|
| 1633 |
+
|
| 1634 |
+
return inner_fn
|
valley/lib/python3.10/site-packages/torch/_dynamo/repro/__init__.py
ADDED
|
File without changes
|
valley/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (171 Bytes). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_aot.cpython-310.pyc
ADDED
|
Binary file (24.7 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_dynamo.cpython-310.pyc
ADDED
|
Binary file (13.7 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/repro/after_aot.py
ADDED
|
@@ -0,0 +1,958 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import argparse
|
| 3 |
+
import copy
|
| 4 |
+
import functools
|
| 5 |
+
import io
|
| 6 |
+
import logging
|
| 7 |
+
import os
|
| 8 |
+
import shutil
|
| 9 |
+
import subprocess
|
| 10 |
+
import sys
|
| 11 |
+
import textwrap
|
| 12 |
+
import uuid
|
| 13 |
+
from importlib import import_module
|
| 14 |
+
from tempfile import TemporaryFile
|
| 15 |
+
from typing import Any, Callable, Dict, Union
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
import torch.fx as fx
|
| 19 |
+
import torch.nn as nn
|
| 20 |
+
from torch._dynamo.debug_utils import (
|
| 21 |
+
_cuda_system_info_comment,
|
| 22 |
+
AccuracyError,
|
| 23 |
+
backend_accuracy_fails,
|
| 24 |
+
BuckTargetWriter,
|
| 25 |
+
cast_to_fp64,
|
| 26 |
+
extra_imports,
|
| 27 |
+
generate_config_string,
|
| 28 |
+
helper_for_dump_minify,
|
| 29 |
+
InputReader,
|
| 30 |
+
InputWriter,
|
| 31 |
+
MAX_CONSTANT_NUMEL_INLINE,
|
| 32 |
+
minifier_dir,
|
| 33 |
+
NNModuleToString,
|
| 34 |
+
NopInputReader,
|
| 35 |
+
same_two_models,
|
| 36 |
+
)
|
| 37 |
+
from torch._dynamo.utils import clone_inputs, counters, same
|
| 38 |
+
from torch.fx.experimental.proxy_tensor import make_fx
|
| 39 |
+
from torch.fx.experimental.symbolic_shapes import (
|
| 40 |
+
fx_placeholder_targets,
|
| 41 |
+
has_free_symbols,
|
| 42 |
+
)
|
| 43 |
+
from torch.hub import tqdm
|
| 44 |
+
|
| 45 |
+
from .. import config
|
| 46 |
+
|
| 47 |
+
log = logging.getLogger(__name__)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
inductor_config = import_module("torch._inductor.config")
|
| 51 |
+
use_buck = inductor_config.is_fbcode()
|
| 52 |
+
|
| 53 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 54 |
+
# MAIN ENTRY POINT
|
| 55 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def wrap_compiler_debug(unconfigured_compiler_fn, compiler_name: str):
|
| 59 |
+
"""
|
| 60 |
+
Minifier for Fx Graph modules after Aot Autograd has finished. We wrap both
|
| 61 |
+
forward and backward call separately with the backend compiler_fn - like
|
| 62 |
+
inductor or nvfuser. Intercepting after Aot Autograd presents neat
|
| 63 |
+
abstraction, where all the params are lifted as graph inputs, making it easy
|
| 64 |
+
to save the graph as a string.
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
@functools.wraps(unconfigured_compiler_fn)
|
| 68 |
+
def debug_wrapper(gm, example_inputs, **kwargs):
|
| 69 |
+
from torch._subclasses import FakeTensorMode
|
| 70 |
+
|
| 71 |
+
compiler_fn = functools.partial(unconfigured_compiler_fn, **kwargs)
|
| 72 |
+
|
| 73 |
+
from torch._functorch.aot_autograd import get_aot_graph_name
|
| 74 |
+
|
| 75 |
+
graph_name = get_aot_graph_name()
|
| 76 |
+
|
| 77 |
+
# TODO: why do we need to deepcopy the original graph?
|
| 78 |
+
orig_graph = copy.deepcopy(gm.graph)
|
| 79 |
+
assert config.repro_after in ("dynamo", "aot", None)
|
| 80 |
+
|
| 81 |
+
try:
|
| 82 |
+
# Call the compiler_fn - which is either aot_autograd or inductor
|
| 83 |
+
# with fake inputs
|
| 84 |
+
inner_compiled_fn = compiler_fn(gm, example_inputs)
|
| 85 |
+
except Exception as e:
|
| 86 |
+
# TODO: Failures here are troublesome because no real inputs,
|
| 87 |
+
# need a different serialization strategy
|
| 88 |
+
if config.repro_after == "aot":
|
| 89 |
+
if config.repro_level == 1:
|
| 90 |
+
dump_compiler_graph_state(
|
| 91 |
+
fx.GraphModule(gm, orig_graph),
|
| 92 |
+
example_inputs,
|
| 93 |
+
compiler_name,
|
| 94 |
+
)
|
| 95 |
+
elif config.repro_level == 2:
|
| 96 |
+
dump_to_minify(
|
| 97 |
+
fx.GraphModule(gm, orig_graph),
|
| 98 |
+
example_inputs,
|
| 99 |
+
compiler_name,
|
| 100 |
+
)
|
| 101 |
+
log.error("CompilerError")
|
| 102 |
+
raise
|
| 103 |
+
|
| 104 |
+
# We may run regular PyTorch compute that may trigger Dynamo, do NOT
|
| 105 |
+
# recursively attempt to accuracy minify in that case!
|
| 106 |
+
def deferred_for_real_inputs(real_inputs):
|
| 107 |
+
# This is a bit obscure: if we recursively try to accuracy minify
|
| 108 |
+
# the SAME function, this would trigger. But most of the time
|
| 109 |
+
# we should never hit this branch
|
| 110 |
+
if config.repro_after != "aot":
|
| 111 |
+
return inner_compiled_fn(real_inputs)
|
| 112 |
+
with config.patch(repro_after=None):
|
| 113 |
+
return inner_debug_fn(real_inputs)
|
| 114 |
+
|
| 115 |
+
def inner_debug_fn(real_inputs):
|
| 116 |
+
"""
|
| 117 |
+
Aot Autograd fw_compiler and bw_compiler can have fake tensors. So,
|
| 118 |
+
example_inputs can be fake tensors. We can call compiler_fn (which is
|
| 119 |
+
inductor or nvfuser) with fake tensors but the actually compiled_fn
|
| 120 |
+
should be called with real tensors. Therefore, the actual invocation
|
| 121 |
+
is deferred.
|
| 122 |
+
"""
|
| 123 |
+
# Copy the tensor attrs like shape, stride etc by converting to Fake Tensor
|
| 124 |
+
# because inductor clears the tensor list in its codegen. And example_inputs
|
| 125 |
+
# are available only for the first invocation.
|
| 126 |
+
fake_mode = FakeTensorMode()
|
| 127 |
+
copy_tensor_attrs = [
|
| 128 |
+
fake_mode.from_tensor(x) if isinstance(x, torch.Tensor) else x
|
| 129 |
+
for x in real_inputs
|
| 130 |
+
]
|
| 131 |
+
if config.repro_level == 3:
|
| 132 |
+
# Always dump the original module in case we have segfaults
|
| 133 |
+
dump_to_minify(
|
| 134 |
+
fx.GraphModule(gm, orig_graph), real_inputs, compiler_name
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
if config.repro_level == 4:
|
| 138 |
+
if compiler_name != "inductor":
|
| 139 |
+
raise NotImplementedError(
|
| 140 |
+
"Accuracy minification is supported for inductor only"
|
| 141 |
+
)
|
| 142 |
+
failed = not same_two_models(
|
| 143 |
+
gm,
|
| 144 |
+
inner_compiled_fn,
|
| 145 |
+
real_inputs,
|
| 146 |
+
only_fwd=True,
|
| 147 |
+
ignore_non_fp=config.repro_ignore_non_fp,
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
if failed:
|
| 151 |
+
log.warning(
|
| 152 |
+
"Accuracy failed for the AOT Autograd graph %s", graph_name
|
| 153 |
+
)
|
| 154 |
+
dump_compiler_graph_state(
|
| 155 |
+
fx.GraphModule(gm, orig_graph),
|
| 156 |
+
real_inputs,
|
| 157 |
+
f"{compiler_name}_accuracy",
|
| 158 |
+
)
|
| 159 |
+
dump_to_minify(
|
| 160 |
+
fx.GraphModule(gm, orig_graph),
|
| 161 |
+
real_inputs,
|
| 162 |
+
f"{compiler_name}_accuracy",
|
| 163 |
+
)
|
| 164 |
+
raise AccuracyError("Bad accuracy detected")
|
| 165 |
+
else:
|
| 166 |
+
# Call the compiled function with real inputs
|
| 167 |
+
return inner_compiled_fn(real_inputs)
|
| 168 |
+
else:
|
| 169 |
+
try:
|
| 170 |
+
# Call the compiled function with real inputs
|
| 171 |
+
out = inner_compiled_fn(real_inputs)
|
| 172 |
+
# sync cuda kernels to ensure IMA detection
|
| 173 |
+
for arg in example_inputs:
|
| 174 |
+
if isinstance(arg, torch.Tensor) and arg.is_cuda:
|
| 175 |
+
torch.cuda.synchronize()
|
| 176 |
+
break
|
| 177 |
+
return out
|
| 178 |
+
except Exception as e:
|
| 179 |
+
if config.repro_level == 1:
|
| 180 |
+
dump_compiler_graph_state(
|
| 181 |
+
fx.GraphModule(gm, orig_graph),
|
| 182 |
+
copy_tensor_attrs,
|
| 183 |
+
compiler_name,
|
| 184 |
+
)
|
| 185 |
+
elif config.repro_level == 2:
|
| 186 |
+
dump_to_minify(
|
| 187 |
+
fx.GraphModule(gm, orig_graph),
|
| 188 |
+
copy_tensor_attrs,
|
| 189 |
+
compiler_name,
|
| 190 |
+
)
|
| 191 |
+
raise
|
| 192 |
+
|
| 193 |
+
if config.repro_after == "aot":
|
| 194 |
+
compiled_fn = deferred_for_real_inputs
|
| 195 |
+
compiled_fn._boxed_call = True # type: ignore[attr-defined]
|
| 196 |
+
return compiled_fn
|
| 197 |
+
else:
|
| 198 |
+
return inner_compiled_fn
|
| 199 |
+
|
| 200 |
+
return debug_wrapper
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 204 |
+
# DUMP REPROS
|
| 205 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def generate_compiler_repro_string(gm, args, *, stable_output=False, save_dir=None):
|
| 209 |
+
model_str = textwrap.dedent(
|
| 210 |
+
f"""
|
| 211 |
+
import torch
|
| 212 |
+
from torch import tensor, device
|
| 213 |
+
import torch.fx as fx
|
| 214 |
+
from torch._dynamo.testing import rand_strided
|
| 215 |
+
from math import inf
|
| 216 |
+
import torch._inductor.inductor_prims
|
| 217 |
+
|
| 218 |
+
{generate_config_string(stable_output=stable_output)}
|
| 219 |
+
|
| 220 |
+
isolate_fails_code_str = None
|
| 221 |
+
|
| 222 |
+
{extra_imports}
|
| 223 |
+
|
| 224 |
+
"""
|
| 225 |
+
)
|
| 226 |
+
if not stable_output:
|
| 227 |
+
model_str += f"# torch version: {torch.version.__version__}\n"
|
| 228 |
+
if hasattr(torch.version, "cuda"):
|
| 229 |
+
model_str += f"# torch cuda version: {torch.version.cuda}\n"
|
| 230 |
+
if hasattr(torch.version, "git_version"):
|
| 231 |
+
model_str += f"# torch git version: {torch.version.git_version}\n\n\n"
|
| 232 |
+
model_str += _cuda_system_info_comment()
|
| 233 |
+
|
| 234 |
+
model_str += NNModuleToString.convert(gm)
|
| 235 |
+
|
| 236 |
+
# get hint shape/stride when dynamic shape enabled
|
| 237 |
+
def hint_if_symint(x):
|
| 238 |
+
return tuple(i.node.hint if isinstance(i, torch.SymInt) else i for i in x)
|
| 239 |
+
|
| 240 |
+
writer = InputWriter(save_dir)
|
| 241 |
+
for placeholder, arg in zip(fx_placeholder_targets(gm), args):
|
| 242 |
+
if isinstance(arg, (int, torch.SymInt)):
|
| 243 |
+
writer.symint(placeholder, arg)
|
| 244 |
+
elif isinstance(arg, torch.Tensor):
|
| 245 |
+
# TODO: improve these names with FQN
|
| 246 |
+
writer.tensor(placeholder, arg)
|
| 247 |
+
else:
|
| 248 |
+
raise TypeError(f"arg is neither SymInt/int nor torch.Tensor, {arg}")
|
| 249 |
+
|
| 250 |
+
model_str += "\n".join(writer.lines()) + "\n"
|
| 251 |
+
|
| 252 |
+
model_str += "mod = Repro()\n"
|
| 253 |
+
return model_str
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
def save_graph_repro(
|
| 257 |
+
fd,
|
| 258 |
+
gm,
|
| 259 |
+
args,
|
| 260 |
+
compiler_name,
|
| 261 |
+
*,
|
| 262 |
+
stable_output=False,
|
| 263 |
+
save_dir=None,
|
| 264 |
+
command="run",
|
| 265 |
+
accuracy=None,
|
| 266 |
+
tracing_mode=None,
|
| 267 |
+
check_str=None,
|
| 268 |
+
):
|
| 269 |
+
fd.write(
|
| 270 |
+
generate_compiler_repro_string(
|
| 271 |
+
gm,
|
| 272 |
+
args,
|
| 273 |
+
stable_output=stable_output,
|
| 274 |
+
save_dir=save_dir,
|
| 275 |
+
)
|
| 276 |
+
)
|
| 277 |
+
if accuracy is None:
|
| 278 |
+
accuracy = "_accuracy" in compiler_name
|
| 279 |
+
if tracing_mode is None:
|
| 280 |
+
tracing_mode = "real"
|
| 281 |
+
if any(has_free_symbols(a) for a in args):
|
| 282 |
+
tracing_mode = "symbolic"
|
| 283 |
+
fd.write("if __name__ == '__main__':\n")
|
| 284 |
+
fd.write(" from torch._dynamo.repro.after_aot import run_repro\n")
|
| 285 |
+
fd.write(
|
| 286 |
+
f" with torch.no_grad():\n"
|
| 287 |
+
f" run_repro(mod, load_args, accuracy={accuracy!r}, command={command!r}, "
|
| 288 |
+
f"save_dir={save_dir!r}, tracing_mode={tracing_mode!r}, check_str={check_str!r})\n"
|
| 289 |
+
f" # To run it separately, do \n"
|
| 290 |
+
f" # mod, args = run_repro(mod, load_args, accuracy={accuracy!r}, command='get_args', "
|
| 291 |
+
f"save_dir={save_dir!r}, tracing_mode={tracing_mode!r}, check_str={check_str!r})\n"
|
| 292 |
+
f" # mod(*args)"
|
| 293 |
+
)
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def dump_compiler_graph_state(gm, args, compiler_name, *, accuracy=None):
|
| 297 |
+
subdir = os.path.join(minifier_dir(), "checkpoints")
|
| 298 |
+
if not os.path.exists(subdir):
|
| 299 |
+
os.makedirs(subdir, exist_ok=True)
|
| 300 |
+
file_name = os.path.join(subdir, f"{len(gm.graph.nodes)}.py")
|
| 301 |
+
log.warning(
|
| 302 |
+
"Writing checkpoint with %s nodes to %s", len(gm.graph.nodes), file_name
|
| 303 |
+
)
|
| 304 |
+
with open(file_name, "w") as fd:
|
| 305 |
+
save_graph_repro(
|
| 306 |
+
fd, gm, args, compiler_name, save_dir=subdir, accuracy=accuracy
|
| 307 |
+
)
|
| 308 |
+
curdir = os.getcwd()
|
| 309 |
+
repro_path = os.path.join(curdir, "repro.py")
|
| 310 |
+
try:
|
| 311 |
+
shutil.copyfile(file_name, repro_path)
|
| 312 |
+
log.warning("Copying repro file for convenience to %s", repro_path)
|
| 313 |
+
if use_buck:
|
| 314 |
+
BuckTargetWriter(file_name).write()
|
| 315 |
+
except OSError:
|
| 316 |
+
log.warning("No write permissions for %s", repro_path)
|
| 317 |
+
pass
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 321 |
+
# DUMP MINIFIER
|
| 322 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def dump_to_minify(gm, args, compiler_name: str):
|
| 326 |
+
out = io.StringIO()
|
| 327 |
+
# TODO: factor this out
|
| 328 |
+
subdir = os.path.join(minifier_dir(), "checkpoints")
|
| 329 |
+
if not os.path.exists(subdir):
|
| 330 |
+
os.makedirs(subdir, exist_ok=True)
|
| 331 |
+
save_graph_repro(out, gm, args, compiler_name, save_dir=subdir, command="minify")
|
| 332 |
+
return helper_for_dump_minify(out.getvalue())
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
def isolate_fails(
|
| 336 |
+
fx_g,
|
| 337 |
+
args,
|
| 338 |
+
compiler_name: str,
|
| 339 |
+
env=None,
|
| 340 |
+
save_dir=None,
|
| 341 |
+
accuracy=None,
|
| 342 |
+
tracing_mode=None,
|
| 343 |
+
check_str=None,
|
| 344 |
+
):
|
| 345 |
+
if env is None:
|
| 346 |
+
env = {}
|
| 347 |
+
subdir = os.path.join(os.getcwd(), "isolate")
|
| 348 |
+
if not os.path.exists(subdir):
|
| 349 |
+
os.makedirs(subdir, exist_ok=True)
|
| 350 |
+
file_name = os.path.join(subdir, f"{str(uuid.uuid4())[:5]}.py")
|
| 351 |
+
with open(file_name, "w") as fd:
|
| 352 |
+
save_graph_repro(
|
| 353 |
+
fd,
|
| 354 |
+
fx_g,
|
| 355 |
+
args,
|
| 356 |
+
compiler_name,
|
| 357 |
+
save_dir=save_dir,
|
| 358 |
+
command="minifier-query",
|
| 359 |
+
accuracy=accuracy,
|
| 360 |
+
tracing_mode=tracing_mode,
|
| 361 |
+
check_str=check_str,
|
| 362 |
+
)
|
| 363 |
+
# with open(file_name, "r") as fd:
|
| 364 |
+
# print(fd.read())
|
| 365 |
+
new_env = os.environ.copy()
|
| 366 |
+
new_env = {**new_env, **env}
|
| 367 |
+
stdout, stderr = TemporaryFile(), TemporaryFile()
|
| 368 |
+
|
| 369 |
+
if use_buck:
|
| 370 |
+
cmd = BuckTargetWriter(file_name).write(print_msg=False)
|
| 371 |
+
else:
|
| 372 |
+
cmd = ["python", file_name]
|
| 373 |
+
|
| 374 |
+
p = subprocess.Popen(
|
| 375 |
+
cmd,
|
| 376 |
+
cwd=subdir,
|
| 377 |
+
stdout=stdout,
|
| 378 |
+
stderr=stderr,
|
| 379 |
+
env=new_env,
|
| 380 |
+
)
|
| 381 |
+
p.wait()
|
| 382 |
+
|
| 383 |
+
stdout.seek(0)
|
| 384 |
+
stderr.seek(0)
|
| 385 |
+
print(
|
| 386 |
+
textwrap.indent(stdout.read().decode("utf-8"), prefix=">> "), file=sys.stdout
|
| 387 |
+
)
|
| 388 |
+
print(
|
| 389 |
+
textwrap.indent(stderr.read().decode("utf-8"), prefix=">> "), file=sys.stderr
|
| 390 |
+
)
|
| 391 |
+
# print(f"Isolated test failed - {file_name}")
|
| 392 |
+
return p.returncode != 0
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 396 |
+
# MINIFIER TOOLS
|
| 397 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
def inductor_fails(fx_g, args, check_str=None):
|
| 401 |
+
has_cuda = False
|
| 402 |
+
for arg in args:
|
| 403 |
+
if isinstance(arg, torch.Tensor) and arg.is_cuda:
|
| 404 |
+
has_cuda = True
|
| 405 |
+
break
|
| 406 |
+
|
| 407 |
+
def sync():
|
| 408 |
+
if has_cuda:
|
| 409 |
+
# Ensures that segfaults are surfaced
|
| 410 |
+
torch.cuda.synchronize()
|
| 411 |
+
|
| 412 |
+
from torch._inductor.compile_fx import compile_fx_inner
|
| 413 |
+
|
| 414 |
+
try:
|
| 415 |
+
result = fx_g(*args)
|
| 416 |
+
assert isinstance(result, (tuple, list))
|
| 417 |
+
assert not any(isinstance(x, (tuple, list)) for x in result)
|
| 418 |
+
except Exception:
|
| 419 |
+
return False
|
| 420 |
+
|
| 421 |
+
sync()
|
| 422 |
+
|
| 423 |
+
try:
|
| 424 |
+
compile_mod = compile_fx_inner(fx_g, args)
|
| 425 |
+
compile_mod(args)
|
| 426 |
+
sync()
|
| 427 |
+
except Exception as e:
|
| 428 |
+
if check_str is not None and check_str not in repr(e):
|
| 429 |
+
return False
|
| 430 |
+
print(repr(e))
|
| 431 |
+
return True
|
| 432 |
+
return False
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
def inductor_accuracy_fails(
|
| 436 |
+
fx_g, args, check_str=None, *, require_fp64=False, ignore_non_fp=False
|
| 437 |
+
):
|
| 438 |
+
from torch._inductor.compile_fx import compile_fx_inner
|
| 439 |
+
|
| 440 |
+
return backend_aot_accuracy_fails(
|
| 441 |
+
fx_g,
|
| 442 |
+
args,
|
| 443 |
+
compile_fx_inner,
|
| 444 |
+
require_fp64=require_fp64,
|
| 445 |
+
ignore_non_fp=ignore_non_fp,
|
| 446 |
+
)
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
backend_aot_accuracy_fails = functools.partial(backend_accuracy_fails, only_fwd=True)
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 453 |
+
# REPRO MAIN
|
| 454 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
def repro_common(options, mod, load_args):
|
| 458 |
+
# Invariant for graphs we generate with the repro script
|
| 459 |
+
assert not any(mod.named_parameters())
|
| 460 |
+
for n, b in mod.named_buffers():
|
| 461 |
+
if b.numel() > MAX_CONSTANT_NUMEL_INLINE:
|
| 462 |
+
log.warning(
|
| 463 |
+
"Constant %s was not serialized, generated random data instead. "
|
| 464 |
+
"If you think this is affecting you, please comment on "
|
| 465 |
+
"https://github.com/pytorch/pytorch/issues/100468",
|
| 466 |
+
n,
|
| 467 |
+
)
|
| 468 |
+
|
| 469 |
+
if not hasattr(load_args, "_version"):
|
| 470 |
+
log.warning(
|
| 471 |
+
"load_args does not have a _version attribute, please file a bug to PyTorch "
|
| 472 |
+
"and describe how you generate this repro script"
|
| 473 |
+
)
|
| 474 |
+
else:
|
| 475 |
+
if load_args._version > 0:
|
| 476 |
+
log.warning(
|
| 477 |
+
"load_args is version %s, but this version of PyTorch only supports "
|
| 478 |
+
"version 0. We will try to run it anyway but there may be an incompatibility; "
|
| 479 |
+
"if so, try upgrading your version of PyTorch.",
|
| 480 |
+
load_args._version,
|
| 481 |
+
)
|
| 482 |
+
|
| 483 |
+
nop_reader = NopInputReader()
|
| 484 |
+
load_args(nop_reader)
|
| 485 |
+
|
| 486 |
+
with tqdm(desc="Loading inputs", total=nop_reader.total) as pbar:
|
| 487 |
+
input_reader = InputReader(save_dir=options.save_dir, pbar=pbar)
|
| 488 |
+
load_args(input_reader)
|
| 489 |
+
args = input_reader.args
|
| 490 |
+
|
| 491 |
+
# Turn mod into a GraphModule the slow way
|
| 492 |
+
# TODO: speed this up
|
| 493 |
+
mod = make_fx(mod, tracing_mode=options.tracing_mode)(*args)
|
| 494 |
+
|
| 495 |
+
torch._inductor.config.generate_intermediate_hooks = True
|
| 496 |
+
|
| 497 |
+
return mod, args
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
ACCURACY_FAILS: Dict[str, Callable[[nn.Module, Any], bool]] = {
|
| 501 |
+
"": inductor_fails,
|
| 502 |
+
# This might look inverted but it's not. strict_accuracy means "we will
|
| 503 |
+
# minify any time we see anything that diverges", whereas accuracy is more
|
| 504 |
+
# conservative, and will only minify if there is a meaningful fp64
|
| 505 |
+
# divergence
|
| 506 |
+
"accuracy": functools.partial(
|
| 507 |
+
inductor_accuracy_fails, require_fp64=True, ignore_non_fp=True
|
| 508 |
+
),
|
| 509 |
+
"strict_accuracy": inductor_accuracy_fails,
|
| 510 |
+
}
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
def repro_minifier_query(options, mod, load_args):
|
| 514 |
+
mod, args = repro_common(options, mod, load_args)
|
| 515 |
+
fail_fn = functools.partial(
|
| 516 |
+
ACCURACY_FAILS[options.accuracy], check_str=options.check_str
|
| 517 |
+
)
|
| 518 |
+
if fail_fn(mod, args):
|
| 519 |
+
sys.exit(1)
|
| 520 |
+
else:
|
| 521 |
+
sys.exit(0)
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
def repro_minify(options, mod, load_args):
|
| 525 |
+
from functorch.compile import minifier
|
| 526 |
+
|
| 527 |
+
mod, args = repro_common(options, mod, load_args)
|
| 528 |
+
compiler_name = "inductor_accuracy" if options.accuracy != "" else "inductor"
|
| 529 |
+
|
| 530 |
+
favored_device = 1 if torch.cuda.device_count() >= 2 else 0
|
| 531 |
+
env_variables = {"CUDA_VISIBLE_DEVICES": str(favored_device)}
|
| 532 |
+
|
| 533 |
+
module_fails: Any
|
| 534 |
+
if options.isolate:
|
| 535 |
+
module_fails = functools.partial(
|
| 536 |
+
isolate_fails,
|
| 537 |
+
env=env_variables,
|
| 538 |
+
compiler_name=compiler_name,
|
| 539 |
+
save_dir=options.save_dir,
|
| 540 |
+
accuracy=options.accuracy,
|
| 541 |
+
tracing_mode=options.tracing_mode,
|
| 542 |
+
)
|
| 543 |
+
else:
|
| 544 |
+
module_fails = ACCURACY_FAILS[options.accuracy]
|
| 545 |
+
|
| 546 |
+
minifier(
|
| 547 |
+
mod,
|
| 548 |
+
args,
|
| 549 |
+
module_fails=functools.partial(module_fails, check_str=options.check_str),
|
| 550 |
+
dump_state=functools.partial(
|
| 551 |
+
dump_compiler_graph_state, compiler_name=compiler_name
|
| 552 |
+
),
|
| 553 |
+
save_dir=options.save_dir,
|
| 554 |
+
offload_to_disk=options.offload_to_disk,
|
| 555 |
+
skip_offload=options.skip_saving_eager_intermediates,
|
| 556 |
+
skip_sanity=options.skip_sanity,
|
| 557 |
+
max_granularity=options.max_granularity,
|
| 558 |
+
)
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
def repro_analyze(options, mod, load_args):
|
| 562 |
+
from torch._inductor.compile_fx import compile_fx_inner
|
| 563 |
+
from torch._inductor.hooks import intermediate_hook
|
| 564 |
+
|
| 565 |
+
mod, args = repro_common(options, mod, load_args)
|
| 566 |
+
|
| 567 |
+
# TODO: The logic for cloning inputs/models here is intentionally
|
| 568 |
+
# modeled off of run_fwd_maybe_bwd, but arguably it is better not to
|
| 569 |
+
# clone inputs (as you are doubling your effective GPU memory usage).
|
| 570 |
+
# It is certainly faster though! It probably makes sense to let the
|
| 571 |
+
# user specify the offload strategy.
|
| 572 |
+
|
| 573 |
+
with tqdm(desc="Compiling"):
|
| 574 |
+
compiled = compile_fx_inner(mod, args)
|
| 575 |
+
total = counters["inductor"]["intermediate_hooks"]
|
| 576 |
+
|
| 577 |
+
known_names = set()
|
| 578 |
+
|
| 579 |
+
def save_hook(name, val):
|
| 580 |
+
known_names.add(name)
|
| 581 |
+
if not options.skip_saving_inductor_intermediates:
|
| 582 |
+
writer.write_tensor(os.path.join("inductor", name), val)
|
| 583 |
+
pbar.update(1) # type: ignore[has-type]
|
| 584 |
+
|
| 585 |
+
writer = torch.utils._content_store.ContentStoreWriter(
|
| 586 |
+
options.save_dir, stable_hash=options.stable_hash
|
| 587 |
+
)
|
| 588 |
+
reader = torch.utils._content_store.ContentStoreReader(options.save_dir)
|
| 589 |
+
|
| 590 |
+
new_args = clone_inputs(args)
|
| 591 |
+
with intermediate_hook(save_hook), tqdm(
|
| 592 |
+
desc="Saving inductor intermediates", total=total
|
| 593 |
+
) as pbar:
|
| 594 |
+
compiled(new_args)
|
| 595 |
+
assert not new_args
|
| 596 |
+
|
| 597 |
+
def compare_tuples(tuple1, tuple2):
|
| 598 |
+
diff_indices = [i for i in range(len(tuple1)) if tuple1[i] != tuple2[i]]
|
| 599 |
+
diff_values = [(tuple1[i], tuple2[i]) for i in diff_indices]
|
| 600 |
+
|
| 601 |
+
if not diff_values:
|
| 602 |
+
return None
|
| 603 |
+
else:
|
| 604 |
+
return " and ".join(f"{a} != {b}" for a, b in diff_values)
|
| 605 |
+
|
| 606 |
+
def check_hook(name, val):
|
| 607 |
+
meta = writer.compute_tensor_metadata(val)
|
| 608 |
+
meta2 = reader.read_tensor_metadata(os.path.join("inductor", name))
|
| 609 |
+
reason = compare_tuples(meta, meta2)
|
| 610 |
+
if reason is not None:
|
| 611 |
+
pbar.write(f"NONDETERMINISTIC INDUCTOR at {name} ({reason})")
|
| 612 |
+
pbar.update(1)
|
| 613 |
+
|
| 614 |
+
if not options.skip_check_deterministic:
|
| 615 |
+
new_args = clone_inputs(args)
|
| 616 |
+
with intermediate_hook(check_hook), tqdm(
|
| 617 |
+
desc="Checking inductor determinism", total=total
|
| 618 |
+
) as pbar:
|
| 619 |
+
compiled(new_args)
|
| 620 |
+
assert not new_args
|
| 621 |
+
|
| 622 |
+
class WriterInterp(fx.Interpreter):
|
| 623 |
+
def __init__(self, mod, subdir):
|
| 624 |
+
super().__init__(mod)
|
| 625 |
+
self.subdir = subdir
|
| 626 |
+
|
| 627 |
+
def run_node(self, n):
|
| 628 |
+
r = super().run_node(n)
|
| 629 |
+
name = n.name
|
| 630 |
+
if name in known_names:
|
| 631 |
+
pbar.update(1)
|
| 632 |
+
writer.write_tensor(os.path.join(self.subdir, name), r)
|
| 633 |
+
return r
|
| 634 |
+
|
| 635 |
+
# NB: the module cast doesn't actually do anything, since there are no
|
| 636 |
+
# parameters/buffers on the module
|
| 637 |
+
if not options.skip_saving_float64_intermediates:
|
| 638 |
+
new_mod, new_args = cast_to_fp64(copy.deepcopy(mod), clone_inputs(args))
|
| 639 |
+
with tqdm(desc="Saving float64 intermediates", total=total) as pbar:
|
| 640 |
+
WriterInterp(new_mod, "float64").boxed_run(new_args)
|
| 641 |
+
assert not new_args
|
| 642 |
+
|
| 643 |
+
class ExactReaderInterp(fx.Interpreter):
|
| 644 |
+
def run_node(self, n):
|
| 645 |
+
r = super().run_node(n)
|
| 646 |
+
name = n.name
|
| 647 |
+
if name in known_names:
|
| 648 |
+
meta = writer.compute_tensor_metadata(r)
|
| 649 |
+
meta2 = reader.read_tensor_metadata(os.path.join("float64", name))
|
| 650 |
+
reason = compare_tuples(meta, meta2)
|
| 651 |
+
if reason is not None:
|
| 652 |
+
pbar.write(f"NONDETERMINISTIC FLOAT64 at {name} ({reason})")
|
| 653 |
+
pbar.update(1)
|
| 654 |
+
return r
|
| 655 |
+
|
| 656 |
+
# TODO: check eager determinism
|
| 657 |
+
|
| 658 |
+
if not options.skip_check_deterministic:
|
| 659 |
+
new_mod, new_args = cast_to_fp64(copy.deepcopy(mod), clone_inputs(args))
|
| 660 |
+
with tqdm(desc="Checking float64 determinism", total=total) as pbar:
|
| 661 |
+
ExactReaderInterp(new_mod).boxed_run(new_args)
|
| 662 |
+
assert not new_args
|
| 663 |
+
|
| 664 |
+
# Now that we've saved everything, interp through the eager graph
|
| 665 |
+
# and do comparisons
|
| 666 |
+
class ReaderInterp(fx.Interpreter):
|
| 667 |
+
def run_node(self, n):
|
| 668 |
+
r = super().run_node(n)
|
| 669 |
+
name = n.name
|
| 670 |
+
if name in known_names:
|
| 671 |
+
inductor = reader.read_tensor(os.path.join("inductor", name))
|
| 672 |
+
float64 = reader.read_tensor(os.path.join("float64", name))
|
| 673 |
+
logged = False
|
| 674 |
+
|
| 675 |
+
def log_error(msg, *args):
|
| 676 |
+
nonlocal logged
|
| 677 |
+
logged = True
|
| 678 |
+
pbar.write(f"DIVERGED at {name}: {msg % args}")
|
| 679 |
+
|
| 680 |
+
if not same(
|
| 681 |
+
r,
|
| 682 |
+
inductor,
|
| 683 |
+
float64,
|
| 684 |
+
tol=torch._dynamo.config.repro_tolerance,
|
| 685 |
+
equal_nan=True,
|
| 686 |
+
log_error=log_error,
|
| 687 |
+
):
|
| 688 |
+
assert logged
|
| 689 |
+
pbar.update(1)
|
| 690 |
+
return r
|
| 691 |
+
|
| 692 |
+
with tqdm(desc="Checking divergence", total=total) as pbar:
|
| 693 |
+
ReaderInterp(mod).boxed_run(args)
|
| 694 |
+
assert not args
|
| 695 |
+
|
| 696 |
+
|
| 697 |
+
def repro_get_args(options, mod, load_args):
|
| 698 |
+
mod, args = repro_common(options, mod, load_args)
|
| 699 |
+
return mod, args
|
| 700 |
+
|
| 701 |
+
|
| 702 |
+
def repro_run(options, mod, load_args):
|
| 703 |
+
from torch._inductor.compile_fx import compile_fx_inner
|
| 704 |
+
|
| 705 |
+
mod, args = repro_common(options, mod, load_args)
|
| 706 |
+
|
| 707 |
+
from torch.cuda import synchronize
|
| 708 |
+
|
| 709 |
+
compiled = compile_fx_inner(mod, args)
|
| 710 |
+
|
| 711 |
+
if options.accuracy != "":
|
| 712 |
+
# We don't really respect --accuracy vs --strict-accuracy here, it
|
| 713 |
+
# seems counterintuitive
|
| 714 |
+
if not same_two_models(
|
| 715 |
+
mod,
|
| 716 |
+
compiled,
|
| 717 |
+
args,
|
| 718 |
+
only_fwd=True,
|
| 719 |
+
ignore_non_fp=config.repro_ignore_non_fp,
|
| 720 |
+
):
|
| 721 |
+
raise AccuracyError("Bad accuracy detected")
|
| 722 |
+
else:
|
| 723 |
+
need_sync = False
|
| 724 |
+
for arg in args:
|
| 725 |
+
if isinstance(arg, torch.Tensor) and arg.is_cuda:
|
| 726 |
+
need_sync = True
|
| 727 |
+
break
|
| 728 |
+
ref = compiled(list(args))
|
| 729 |
+
if need_sync:
|
| 730 |
+
synchronize() # ensure segfaults are surfaced
|
| 731 |
+
return lambda: compiled(list(args))
|
| 732 |
+
|
| 733 |
+
|
| 734 |
+
# TODO: lazily load the inputs or something, rather than cloning them
|
| 735 |
+
def run_repro(
|
| 736 |
+
mod,
|
| 737 |
+
load_args,
|
| 738 |
+
*,
|
| 739 |
+
command="run",
|
| 740 |
+
accuracy: Union[bool, str] = "",
|
| 741 |
+
save_dir=None,
|
| 742 |
+
tracing_mode=None,
|
| 743 |
+
patch_code=None,
|
| 744 |
+
check_str=None,
|
| 745 |
+
**kwargs,
|
| 746 |
+
):
|
| 747 |
+
for k in kwargs:
|
| 748 |
+
log.warning(
|
| 749 |
+
"Unrecognized kwarg %s; perhaps this repro was made on a newer version of PyTorch",
|
| 750 |
+
k,
|
| 751 |
+
)
|
| 752 |
+
|
| 753 |
+
if accuracy is True:
|
| 754 |
+
accuracy = "accuracy"
|
| 755 |
+
elif accuracy is False:
|
| 756 |
+
accuracy = ""
|
| 757 |
+
|
| 758 |
+
if patch_code is not None:
|
| 759 |
+
log.warning(
|
| 760 |
+
"patch_code no longer works on this version of PyTorch, silently ignoring"
|
| 761 |
+
)
|
| 762 |
+
|
| 763 |
+
parser = argparse.ArgumentParser(
|
| 764 |
+
description=f"""\
|
| 765 |
+
An after_aot repro script, typically triggering a bug in PyTorch Inductor.
|
| 766 |
+
When run with no arguments, this script defaults to running '{command}'.
|
| 767 |
+
Extra flags may be available; to find out more, try '{command} --help'.
|
| 768 |
+
There are also alternate subcommands available, see below.
|
| 769 |
+
|
| 770 |
+
default settings on this script:
|
| 771 |
+
{accuracy=}
|
| 772 |
+
{tracing_mode=}
|
| 773 |
+
{save_dir=}
|
| 774 |
+
{check_str=}
|
| 775 |
+
""",
|
| 776 |
+
formatter_class=argparse.RawTextHelpFormatter,
|
| 777 |
+
)
|
| 778 |
+
|
| 779 |
+
def common_flags(parser):
|
| 780 |
+
accuracy_group = parser.add_mutually_exclusive_group()
|
| 781 |
+
accuracy_group.add_argument(
|
| 782 |
+
"--no-accuracy",
|
| 783 |
+
dest="accuracy",
|
| 784 |
+
action="store_const",
|
| 785 |
+
const="",
|
| 786 |
+
default=accuracy,
|
| 787 |
+
help="do not test accuracy, just run the module and see if it errors",
|
| 788 |
+
)
|
| 789 |
+
accuracy_group.add_argument(
|
| 790 |
+
"--accuracy",
|
| 791 |
+
action="store_const",
|
| 792 |
+
const="accuracy",
|
| 793 |
+
default=accuracy,
|
| 794 |
+
help="""\
|
| 795 |
+
test if the RMSE between the compiled module and the fp64 reference is greater
|
| 796 |
+
than eager and the fp64 reference. This is usually more reliable than the
|
| 797 |
+
standard allclose test, as we expect numeric differences from compiling, often
|
| 798 |
+
improving accuracy over eager. RMSE test allows for compiled module to
|
| 799 |
+
diverge greatly from eager, as long as this divergence moves it closer to the
|
| 800 |
+
'true' mathematical value of the network. Caveats: (1) double precision can
|
| 801 |
+
still suffer from rounding error, so it is not a perfect reference (see for
|
| 802 |
+
example 'Herbie: Automatically Improving Floating Point Accuracy') for
|
| 803 |
+
approaches that detect the necessary working precision and compute it in
|
| 804 |
+
arbitrary precision floating point; unfortunately, this is not practical for
|
| 805 |
+
tensor computation; (2) if there are not enough samples in the output being
|
| 806 |
+
compared, we may get unlucky and have an unlucky greater RMSE than eager; this
|
| 807 |
+
could be overcome by applying a more rigorous statistical test at some
|
| 808 |
+
p-value, which we leave for future work.
|
| 809 |
+
""",
|
| 810 |
+
)
|
| 811 |
+
accuracy_group.add_argument(
|
| 812 |
+
"--strict-accuracy",
|
| 813 |
+
dest="accuracy",
|
| 814 |
+
action="store_const",
|
| 815 |
+
const="strict_accuracy",
|
| 816 |
+
default=accuracy,
|
| 817 |
+
help="""\
|
| 818 |
+
by default, when doing accuracy minification we will reject reductions which
|
| 819 |
+
change the divergence from a floating point divergence to a integral/boolean
|
| 820 |
+
divergence. This is because some operations like ReLU involve temporarily
|
| 821 |
+
sharp boundaries that smooth out again afterwards; without requiring
|
| 822 |
+
divergence on floating point, the minifier will often fixate on divergent
|
| 823 |
+
boolean tensor even though this is not the true source of the divergence.
|
| 824 |
+
However, rejecting these reductions makes it more difficult for the minifier
|
| 825 |
+
to make process. Using this option will let the minifier progress for ALL
|
| 826 |
+
divergences--you just might not end up with a useful repro in the end.""",
|
| 827 |
+
)
|
| 828 |
+
|
| 829 |
+
parser.add_argument(
|
| 830 |
+
"--save-dir",
|
| 831 |
+
type=str,
|
| 832 |
+
default=save_dir,
|
| 833 |
+
metavar="DIR",
|
| 834 |
+
help="directory where saved inputs live",
|
| 835 |
+
)
|
| 836 |
+
parser.add_argument(
|
| 837 |
+
"--no-save-dir",
|
| 838 |
+
dest="save_dir",
|
| 839 |
+
action="store_const",
|
| 840 |
+
const=None,
|
| 841 |
+
help="don't use any directory for saved inputs",
|
| 842 |
+
)
|
| 843 |
+
parser.add_argument(
|
| 844 |
+
"--tracing-mode",
|
| 845 |
+
type=str,
|
| 846 |
+
metavar="{real,fake,symbolic}",
|
| 847 |
+
default=tracing_mode,
|
| 848 |
+
help="how to trace the repro module into a GraphModule with metadata",
|
| 849 |
+
)
|
| 850 |
+
|
| 851 |
+
subparsers = parser.add_subparsers(
|
| 852 |
+
dest="command", metavar="{run,minify,analyze}", required=True
|
| 853 |
+
)
|
| 854 |
+
|
| 855 |
+
parser_run = subparsers.add_parser(
|
| 856 |
+
"run",
|
| 857 |
+
help="just run the repro",
|
| 858 |
+
)
|
| 859 |
+
common_flags(parser_run)
|
| 860 |
+
|
| 861 |
+
parser_minify = subparsers.add_parser(
|
| 862 |
+
"minify", help="run the minifier on the repro"
|
| 863 |
+
)
|
| 864 |
+
common_flags(parser_minify)
|
| 865 |
+
parser_get_args = subparsers.add_parser("get_args", help="get the args")
|
| 866 |
+
common_flags(parser_get_args)
|
| 867 |
+
parser_minify_isolate = parser_minify.add_mutually_exclusive_group()
|
| 868 |
+
parser_minify_isolate.add_argument(
|
| 869 |
+
"--isolate",
|
| 870 |
+
action="store_true",
|
| 871 |
+
default=True,
|
| 872 |
+
help="run in separate processes to avoid interference (default)",
|
| 873 |
+
)
|
| 874 |
+
parser_minify_isolate.add_argument(
|
| 875 |
+
"--no-isolate",
|
| 876 |
+
dest="isolate",
|
| 877 |
+
action="store_false",
|
| 878 |
+
help="speed up by running all compilation in same process",
|
| 879 |
+
)
|
| 880 |
+
parser_minify.add_argument(
|
| 881 |
+
"--skip-saving-eager-intermediates",
|
| 882 |
+
action="store_true",
|
| 883 |
+
help="skip saving eager intermediates on --minify",
|
| 884 |
+
)
|
| 885 |
+
# TODO: make this an option for --analyze too
|
| 886 |
+
parser_minify.add_argument(
|
| 887 |
+
"--offload-to-disk",
|
| 888 |
+
action="store_true",
|
| 889 |
+
help="during minification, offload delta debugging intermediates to disk. Use if you're OOMing",
|
| 890 |
+
)
|
| 891 |
+
parser_minify.add_argument(
|
| 892 |
+
"--skip-sanity",
|
| 893 |
+
action="store_true",
|
| 894 |
+
help="skip sanity check at beginning of minification on original graph",
|
| 895 |
+
)
|
| 896 |
+
parser_minify.add_argument(
|
| 897 |
+
"--max-granularity",
|
| 898 |
+
type=int,
|
| 899 |
+
default=None,
|
| 900 |
+
help="start at this granularity and work down; must be power of 2",
|
| 901 |
+
)
|
| 902 |
+
parser_minify.add_argument(
|
| 903 |
+
"--check-str",
|
| 904 |
+
type=str,
|
| 905 |
+
default=check_str,
|
| 906 |
+
help="require minified program to fail with error containing this string",
|
| 907 |
+
)
|
| 908 |
+
|
| 909 |
+
parser_analyze = subparsers.add_parser(
|
| 910 |
+
"analyze", help="run the accuracy analyzer on the repro"
|
| 911 |
+
)
|
| 912 |
+
common_flags(parser_analyze)
|
| 913 |
+
parser_analyze.add_argument(
|
| 914 |
+
"--skip-saving-inductor-intermediates",
|
| 915 |
+
action="store_true",
|
| 916 |
+
help="skip saving inductor intermediates on --analyze",
|
| 917 |
+
)
|
| 918 |
+
parser_analyze.add_argument(
|
| 919 |
+
"--skip-saving-float64-intermediates",
|
| 920 |
+
action="store_true",
|
| 921 |
+
help="skip saving float64 intermediates",
|
| 922 |
+
)
|
| 923 |
+
parser_analyze.add_argument(
|
| 924 |
+
"--skip-check-deterministic",
|
| 925 |
+
action="store_true",
|
| 926 |
+
help="skip checking that the network is deterministic",
|
| 927 |
+
)
|
| 928 |
+
parser_analyze.add_argument(
|
| 929 |
+
"--stable-hash",
|
| 930 |
+
action="store_true",
|
| 931 |
+
help="use SHA-1 checksum instead of fast (but possibly unsound) hash",
|
| 932 |
+
)
|
| 933 |
+
|
| 934 |
+
# Run the repro in the context of minification, inverting exit code meaning
|
| 935 |
+
parser_minifier_query = subparsers.add_parser(
|
| 936 |
+
"minifier-query",
|
| 937 |
+
)
|
| 938 |
+
common_flags(parser_minifier_query)
|
| 939 |
+
parser_minifier_query.add_argument(
|
| 940 |
+
"--check-str",
|
| 941 |
+
type=str,
|
| 942 |
+
default=check_str,
|
| 943 |
+
help="require minified program to fail with error containing this string",
|
| 944 |
+
)
|
| 945 |
+
|
| 946 |
+
args = None
|
| 947 |
+
if len(sys.argv) <= 1:
|
| 948 |
+
args = [command, *sys.argv[1:]]
|
| 949 |
+
|
| 950 |
+
options = parser.parse_args(args)
|
| 951 |
+
COMMAND_FNS = {
|
| 952 |
+
"minify": repro_minify,
|
| 953 |
+
"analyze": repro_analyze,
|
| 954 |
+
"minifier-query": repro_minifier_query,
|
| 955 |
+
"run": repro_run,
|
| 956 |
+
"get_args": repro_get_args,
|
| 957 |
+
}
|
| 958 |
+
return COMMAND_FNS[options.command](options, mod, load_args)
|
valley/lib/python3.10/site-packages/torch/_dynamo/repro/after_dynamo.py
ADDED
|
@@ -0,0 +1,586 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import argparse
|
| 3 |
+
import copy
|
| 4 |
+
import functools
|
| 5 |
+
import logging
|
| 6 |
+
import os
|
| 7 |
+
import shutil
|
| 8 |
+
import sys
|
| 9 |
+
import textwrap
|
| 10 |
+
from importlib import import_module
|
| 11 |
+
from typing import Union
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
import torch.fx as fx
|
| 15 |
+
|
| 16 |
+
from torch._dynamo.debug_utils import (
|
| 17 |
+
AccuracyError,
|
| 18 |
+
backend_accuracy_fails,
|
| 19 |
+
BUCK_CMD_PREFIX,
|
| 20 |
+
BuckTargetWriter,
|
| 21 |
+
extra_imports,
|
| 22 |
+
generate_config_string,
|
| 23 |
+
helper_for_dump_minify,
|
| 24 |
+
InputReader,
|
| 25 |
+
InputWriter,
|
| 26 |
+
minifier_dir,
|
| 27 |
+
NNModuleToString,
|
| 28 |
+
NopInputReader,
|
| 29 |
+
run_fwd_maybe_bwd,
|
| 30 |
+
same_two_models,
|
| 31 |
+
)
|
| 32 |
+
from torch.fx.experimental.symbolic_shapes import fx_placeholder_targets
|
| 33 |
+
from torch.hub import tqdm
|
| 34 |
+
|
| 35 |
+
from .. import config
|
| 36 |
+
from ..backends.registry import lookup_backend, register_debug_backend
|
| 37 |
+
from ..debug_utils import clone_inputs_retaining_gradness
|
| 38 |
+
|
| 39 |
+
log = logging.getLogger(__name__)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
inductor_config = import_module("torch._inductor.config")
|
| 43 |
+
use_buck = inductor_config.is_fbcode()
|
| 44 |
+
|
| 45 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 46 |
+
# MAIN ENTRY POINT
|
| 47 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def _accuracy_fails(gm, example_inputs, compiler_fn):
|
| 51 |
+
return backend_accuracy_fails(
|
| 52 |
+
gm,
|
| 53 |
+
example_inputs,
|
| 54 |
+
compiler_fn,
|
| 55 |
+
only_fwd=config.repro_forward_only,
|
| 56 |
+
ignore_non_fp=config.repro_ignore_non_fp,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class WrapBackendDebug:
|
| 61 |
+
def __init__(self, unconfigured_compiler_fn, compiler_name: str):
|
| 62 |
+
functools.wraps(unconfigured_compiler_fn)(self)
|
| 63 |
+
self._torchdynamo_orig_callable = unconfigured_compiler_fn # type: ignore[attr-defined]
|
| 64 |
+
self._compiler_name = compiler_name
|
| 65 |
+
if hasattr(unconfigured_compiler_fn, "__name__"):
|
| 66 |
+
self.__name__ = unconfigured_compiler_fn.__name__
|
| 67 |
+
if hasattr(unconfigured_compiler_fn, "compiler_name"):
|
| 68 |
+
self.__name__ = unconfigured_compiler_fn.compiler_name
|
| 69 |
+
if hasattr(unconfigured_compiler_fn, "get_compiler_config"):
|
| 70 |
+
self.get_compiler_config = unconfigured_compiler_fn.get_compiler_config # type: ignore[attr-defined]
|
| 71 |
+
|
| 72 |
+
def __call__(self, gm, example_inputs, **kwargs):
|
| 73 |
+
compiler_fn = functools.partial(self._torchdynamo_orig_callable, **kwargs)
|
| 74 |
+
assert config.repro_after in ("dynamo", "aot", None)
|
| 75 |
+
|
| 76 |
+
if config.repro_after == "dynamo":
|
| 77 |
+
|
| 78 |
+
def add_paths(exc):
|
| 79 |
+
exc.minifier_path = os.path.join(minifier_dir(), "minifier_launcher.py")
|
| 80 |
+
if use_buck:
|
| 81 |
+
exc.buck_command = " ".join(
|
| 82 |
+
BUCK_CMD_PREFIX
|
| 83 |
+
+ [BuckTargetWriter(exc.minifier_path).cmd_line_path]
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
if config.repro_level == 3:
|
| 87 |
+
dump_to_minify_after_dynamo(gm, example_inputs, self._compiler_name)
|
| 88 |
+
|
| 89 |
+
# Check for either accuracy (level 4) or other type of failures.
|
| 90 |
+
if config.repro_level == 4:
|
| 91 |
+
# Check Accuracy
|
| 92 |
+
compiled_gm = compiler_fn(copy.deepcopy(gm), example_inputs)
|
| 93 |
+
if _accuracy_fails(gm, example_inputs, compiler_fn):
|
| 94 |
+
log.warning(
|
| 95 |
+
"Accuracy failed for the TorchDynamo produced graph. Creating script to minify the error."
|
| 96 |
+
)
|
| 97 |
+
dump_to_minify_after_dynamo(
|
| 98 |
+
fx.GraphModule(gm, copy.deepcopy(gm.graph)),
|
| 99 |
+
example_inputs,
|
| 100 |
+
self._compiler_name,
|
| 101 |
+
)
|
| 102 |
+
exc = AccuracyError("Bad accuracy detected.")
|
| 103 |
+
add_paths(exc)
|
| 104 |
+
raise exc
|
| 105 |
+
else:
|
| 106 |
+
try:
|
| 107 |
+
compiled_gm = compiler_fn(copy.deepcopy(gm), example_inputs)
|
| 108 |
+
run_fwd_maybe_bwd(compiled_gm, example_inputs)
|
| 109 |
+
except Exception as exc:
|
| 110 |
+
log.warning(
|
| 111 |
+
"Compiled Fx GraphModule failed. Creating script to minify the error."
|
| 112 |
+
)
|
| 113 |
+
if config.repro_level == 1:
|
| 114 |
+
dump_state_fn = functools.partial(
|
| 115 |
+
dump_backend_state, compiler_name=self._compiler_name
|
| 116 |
+
)
|
| 117 |
+
dump_state_fn(
|
| 118 |
+
fx.GraphModule(gm, copy.deepcopy(gm.graph)), example_inputs
|
| 119 |
+
)
|
| 120 |
+
elif config.repro_level == 2:
|
| 121 |
+
dump_to_minify_after_dynamo(
|
| 122 |
+
fx.GraphModule(gm, copy.deepcopy(gm.graph)),
|
| 123 |
+
example_inputs,
|
| 124 |
+
self._compiler_name,
|
| 125 |
+
)
|
| 126 |
+
add_paths(exc)
|
| 127 |
+
raise
|
| 128 |
+
else:
|
| 129 |
+
compiled_gm = compiler_fn(gm, example_inputs)
|
| 130 |
+
|
| 131 |
+
return compiled_gm
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def wrap_backend_debug(unconfigured_compiler_fn, compiler_name: str):
|
| 135 |
+
"""
|
| 136 |
+
A minifier decorator that wraps the TorchDynamo produced Fx graph modules.
|
| 137 |
+
As opposed to wrap_compiler_debug, this wrapper intercepts at the
|
| 138 |
+
TorchDynamo produced Fx Graph Module. This makes it backend-agnostic to some
|
| 139 |
+
level, e.g., it is useful for minifying issues related to Aot Autograd
|
| 140 |
+
tracing. If an error is found, we minify and save the minified repro in
|
| 141 |
+
repro.tar.gz.
|
| 142 |
+
"""
|
| 143 |
+
return WrapBackendDebug(unconfigured_compiler_fn, compiler_name)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 147 |
+
# REPRO DUMPERS
|
| 148 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def generate_dynamo_fx_repro_string(
|
| 152 |
+
gm,
|
| 153 |
+
args,
|
| 154 |
+
compiler_name,
|
| 155 |
+
check_accuracy=False,
|
| 156 |
+
*,
|
| 157 |
+
stable_output=False,
|
| 158 |
+
save_dir=None,
|
| 159 |
+
command="run",
|
| 160 |
+
):
|
| 161 |
+
"""
|
| 162 |
+
Generate a repro string for backend-agnostic minified version.
|
| 163 |
+
"""
|
| 164 |
+
|
| 165 |
+
model_str = NNModuleToString.convert(gm)
|
| 166 |
+
|
| 167 |
+
# TODO: Figure out why torch.compile'd hash isn't work on this codepath
|
| 168 |
+
writer = InputWriter(save_dir, stable_hash=True)
|
| 169 |
+
for placeholder, arg in zip(fx_placeholder_targets(gm), args):
|
| 170 |
+
if isinstance(arg, (int, torch.SymInt)):
|
| 171 |
+
writer.symint(placeholder, arg)
|
| 172 |
+
elif isinstance(arg, torch.Tensor):
|
| 173 |
+
# TODO: improve these names with FQN
|
| 174 |
+
writer.tensor(placeholder, arg)
|
| 175 |
+
else:
|
| 176 |
+
raise TypeError(f"arg is neither SymInt/int nor torch.Tensor, {arg}")
|
| 177 |
+
load_args = "\n".join(writer.lines())
|
| 178 |
+
|
| 179 |
+
return textwrap.dedent(
|
| 180 |
+
f"""
|
| 181 |
+
from math import inf
|
| 182 |
+
import torch
|
| 183 |
+
from torch import tensor, device
|
| 184 |
+
import torch.fx as fx
|
| 185 |
+
import torch._dynamo
|
| 186 |
+
from torch._dynamo.testing import rand_strided
|
| 187 |
+
from torch._dynamo.debug_utils import run_fwd_maybe_bwd
|
| 188 |
+
|
| 189 |
+
{generate_config_string(stable_output=stable_output)}
|
| 190 |
+
|
| 191 |
+
{extra_imports}
|
| 192 |
+
|
| 193 |
+
{model_str}
|
| 194 |
+
mod = Repro()
|
| 195 |
+
|
| 196 |
+
{load_args}
|
| 197 |
+
|
| 198 |
+
if __name__ == '__main__':
|
| 199 |
+
from torch._dynamo.repro.after_dynamo import run_repro
|
| 200 |
+
run_repro(mod, load_args, accuracy={check_accuracy!r}, command={command!r},
|
| 201 |
+
save_dir={save_dir!r}, autocast={torch.is_autocast_enabled()!r}, backend={compiler_name!r})
|
| 202 |
+
"""
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def dump_backend_repro_as_file(gm, args, compiler_name, check_accuracy=False):
|
| 207 |
+
"""
|
| 208 |
+
Saves the repro to a repro.py file
|
| 209 |
+
"""
|
| 210 |
+
curdir = os.getcwd()
|
| 211 |
+
subdir = os.path.join(os.getcwd(), "checkpoints")
|
| 212 |
+
if not os.path.exists(subdir):
|
| 213 |
+
os.makedirs(subdir, exist_ok=True)
|
| 214 |
+
file_name = os.path.join(subdir, f"minified_{len(gm.graph.nodes)}_nodes.py")
|
| 215 |
+
log.warning(
|
| 216 |
+
"Writing checkpoint with %s nodes to %s", len(gm.graph.nodes), file_name
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
with open(file_name, "w") as fd:
|
| 220 |
+
fd.write(
|
| 221 |
+
generate_dynamo_fx_repro_string(
|
| 222 |
+
gm, args, compiler_name, check_accuracy, save_dir=subdir
|
| 223 |
+
)
|
| 224 |
+
)
|
| 225 |
+
latest_repro = os.path.join(curdir, "repro.py")
|
| 226 |
+
log.warning("Copying %s to %s for convenience", file_name, latest_repro)
|
| 227 |
+
|
| 228 |
+
if use_buck:
|
| 229 |
+
BuckTargetWriter(latest_repro).write()
|
| 230 |
+
|
| 231 |
+
shutil.copyfile(file_name, latest_repro)
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def dump_backend_state(gm, args, compiler_name, check_accuracy=False):
|
| 235 |
+
"""
|
| 236 |
+
Dumps the dynamo graph to repro the issue.
|
| 237 |
+
1) It tries to convert Fx GraphModule to a string. If we can, it writes to a
|
| 238 |
+
repro.py file.
|
| 239 |
+
2) If we can't convert Fx GraphModule to a string, we use to_folder to save
|
| 240 |
+
the module and save a tar file.
|
| 241 |
+
"""
|
| 242 |
+
assert NNModuleToString.can_convert_to_string(gm)
|
| 243 |
+
return dump_backend_repro_as_file(gm, args, compiler_name, check_accuracy)
|
| 244 |
+
# return dump_backend_repro_as_tarfile(gm, args, compiler_name)
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 248 |
+
# MINIFIER DUMPER
|
| 249 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def dump_to_minify_after_dynamo(gm, args, compiler_name):
|
| 253 |
+
# TODO: factor this out
|
| 254 |
+
subdir = os.path.join(minifier_dir(), "checkpoints")
|
| 255 |
+
if not os.path.exists(subdir):
|
| 256 |
+
os.makedirs(subdir, exist_ok=True)
|
| 257 |
+
helper_for_dump_minify(
|
| 258 |
+
generate_dynamo_fx_repro_string(
|
| 259 |
+
gm,
|
| 260 |
+
args,
|
| 261 |
+
compiler_name,
|
| 262 |
+
check_accuracy=config.repro_level == 4,
|
| 263 |
+
save_dir=subdir,
|
| 264 |
+
command="minify",
|
| 265 |
+
)
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 270 |
+
# MINIFIER BACKENDS
|
| 271 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
@register_debug_backend
|
| 275 |
+
def dynamo_minifier_backend(gm, example_inputs, compiler_name):
|
| 276 |
+
from functorch.compile import minifier
|
| 277 |
+
|
| 278 |
+
compiler_fn = lookup_backend(compiler_name)
|
| 279 |
+
|
| 280 |
+
# TODO: It's inconsistent to pass SymInt inputs but REAL tensors.
|
| 281 |
+
# We should pass ints and look at the GraphModule placeholders
|
| 282 |
+
# to resolve them to SymInt (if necessary)
|
| 283 |
+
example_inputs = [
|
| 284 |
+
i.node.hint if isinstance(i, torch.SymInt) else i for i in example_inputs
|
| 285 |
+
]
|
| 286 |
+
|
| 287 |
+
try:
|
| 288 |
+
compiled_gm = compiler_fn(gm, example_inputs)
|
| 289 |
+
run_fwd_maybe_bwd(compiled_gm, example_inputs)
|
| 290 |
+
raise ValueError("No issue was detected")
|
| 291 |
+
except Exception as exc:
|
| 292 |
+
orig_failure = str(exc)
|
| 293 |
+
log.warning(
|
| 294 |
+
"Compiled Fx GraphModule failed. Creating script to minify the error."
|
| 295 |
+
)
|
| 296 |
+
dump_state_fn = functools.partial(
|
| 297 |
+
dump_backend_state, compiler_name=compiler_name
|
| 298 |
+
)
|
| 299 |
+
dump_state_fn(fx.GraphModule(gm, copy.deepcopy(gm.graph)), example_inputs)
|
| 300 |
+
fails_fn = functools.partial(
|
| 301 |
+
backend_fails,
|
| 302 |
+
compiler_fn=compiler_fn,
|
| 303 |
+
orig_failure=orig_failure,
|
| 304 |
+
)
|
| 305 |
+
minifier(
|
| 306 |
+
gm,
|
| 307 |
+
example_inputs,
|
| 308 |
+
module_fails=fails_fn,
|
| 309 |
+
dump_state=dump_state_fn,
|
| 310 |
+
)
|
| 311 |
+
return gm
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
@register_debug_backend
|
| 315 |
+
def dynamo_accuracy_minifier_backend(gm, example_inputs, compiler_name):
|
| 316 |
+
from functorch.compile import minifier
|
| 317 |
+
|
| 318 |
+
compiler_fn = lookup_backend(compiler_name)
|
| 319 |
+
|
| 320 |
+
# Set the eval mode to remove randomness.
|
| 321 |
+
gm.eval()
|
| 322 |
+
|
| 323 |
+
# Check Accuracy
|
| 324 |
+
if _accuracy_fails(gm, example_inputs, compiler_fn):
|
| 325 |
+
log.warning("Accuracy failed for the TorchDynamo produced graph")
|
| 326 |
+
dump_state_fn = functools.partial(
|
| 327 |
+
dump_backend_state, compiler_name=compiler_name, check_accuracy=True
|
| 328 |
+
)
|
| 329 |
+
fails_fn = functools.partial(
|
| 330 |
+
_accuracy_fails,
|
| 331 |
+
compiler_fn=compiler_fn,
|
| 332 |
+
)
|
| 333 |
+
dump_state_fn(fx.GraphModule(gm, copy.deepcopy(gm.graph)), example_inputs)
|
| 334 |
+
minifier(
|
| 335 |
+
gm,
|
| 336 |
+
example_inputs,
|
| 337 |
+
module_fails=fails_fn,
|
| 338 |
+
dump_state=dump_state_fn,
|
| 339 |
+
)
|
| 340 |
+
else:
|
| 341 |
+
log.error("Input graph does not fail accuracy testing")
|
| 342 |
+
return gm
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def backend_fails(gm, example_inputs, compiler_fn, orig_failure):
|
| 346 |
+
"""
|
| 347 |
+
Minifier uses this function to identify if the minified graph module fails
|
| 348 |
+
with the same error.
|
| 349 |
+
|
| 350 |
+
One caveat is that minifier can potentially go into a wrong direction when
|
| 351 |
+
the resulting graph module fails for a different reason. To avoid this, we
|
| 352 |
+
save the string for the original exception and check similarity between new
|
| 353 |
+
and old exception. They can be somewhat different in some cases, when the
|
| 354 |
+
exception string depends on the failing node information. So, we have a
|
| 355 |
+
loose similarity metric to guide the minifier path.
|
| 356 |
+
"""
|
| 357 |
+
from difflib import SequenceMatcher
|
| 358 |
+
|
| 359 |
+
try:
|
| 360 |
+
# Run the original gm to check eager validity
|
| 361 |
+
run_fwd_maybe_bwd(gm, clone_inputs_retaining_gradness(example_inputs))
|
| 362 |
+
compiled_gm = compiler_fn(gm, example_inputs)
|
| 363 |
+
run_fwd_maybe_bwd(compiled_gm, clone_inputs_retaining_gradness(example_inputs))
|
| 364 |
+
return False
|
| 365 |
+
except Exception as e:
|
| 366 |
+
new_failure = str(e)
|
| 367 |
+
if SequenceMatcher(None, orig_failure, new_failure).ratio() > 0.5:
|
| 368 |
+
return True
|
| 369 |
+
return False
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 373 |
+
# REPRO MAIN
|
| 374 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
def run_load_args(options, mod, load_args):
|
| 378 |
+
if not hasattr(load_args, "_version"):
|
| 379 |
+
log.warning(
|
| 380 |
+
"load_args does not have a _version attribute, please file a bug to PyTorch "
|
| 381 |
+
"and describe how you generate this repro script"
|
| 382 |
+
)
|
| 383 |
+
else:
|
| 384 |
+
if load_args._version > 0:
|
| 385 |
+
log.warning(
|
| 386 |
+
"load_args is version %s, but this version of PyTorch only supports "
|
| 387 |
+
"version 0. We will try to run it anyway but there may be an incompatibility; "
|
| 388 |
+
"if so, try upgrading your version of PyTorch.",
|
| 389 |
+
load_args._version,
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
nop_reader = NopInputReader()
|
| 393 |
+
load_args(nop_reader)
|
| 394 |
+
|
| 395 |
+
with tqdm(desc="Loading inputs", total=nop_reader.total) as pbar:
|
| 396 |
+
input_reader = InputReader(save_dir=options.save_dir, pbar=pbar)
|
| 397 |
+
load_args(input_reader)
|
| 398 |
+
args = input_reader.args
|
| 399 |
+
|
| 400 |
+
return args
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
def repro_minify(options, mod, load_args):
|
| 404 |
+
args = run_load_args(options, mod, load_args)
|
| 405 |
+
|
| 406 |
+
# Setup debug minifier compiler
|
| 407 |
+
if not options.accuracy:
|
| 408 |
+
compiler_fn = lookup_backend("dynamo_minifier_backend")
|
| 409 |
+
else:
|
| 410 |
+
compiler_fn = lookup_backend("dynamo_accuracy_minifier_backend")
|
| 411 |
+
|
| 412 |
+
if options.backend is None:
|
| 413 |
+
raise RuntimeError(
|
| 414 |
+
"Compiler name is None - this likely means that a custom compiler "
|
| 415 |
+
"was called by torchdynamo. Please remove this error, import your "
|
| 416 |
+
"custom compiler function, and replace the backend=None "
|
| 417 |
+
"line in run_repro to backend=<my_imported_custom_function>"
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
dynamo_minifier_backend = functools.partial(
|
| 421 |
+
compiler_fn,
|
| 422 |
+
compiler_name=options.backend,
|
| 423 |
+
)
|
| 424 |
+
opt_mod = torch._dynamo.optimize(dynamo_minifier_backend)(mod)
|
| 425 |
+
|
| 426 |
+
with torch.cuda.amp.autocast(enabled=options.autocast):
|
| 427 |
+
opt_mod(*args)
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
def repro_run(options, mod, load_args):
|
| 431 |
+
opt_mod = torch._dynamo.optimize(options.backend)(mod)
|
| 432 |
+
|
| 433 |
+
if options.accuracy != "":
|
| 434 |
+
mod.eval()
|
| 435 |
+
opt_mod.eval()
|
| 436 |
+
|
| 437 |
+
with torch.cuda.amp.autocast(enabled=options.autocast):
|
| 438 |
+
# TODO: disable clone
|
| 439 |
+
args = run_load_args(options, mod, load_args)
|
| 440 |
+
assert same_two_models(mod, mod, args), "Eager itself failed"
|
| 441 |
+
if not same_two_models(
|
| 442 |
+
mod,
|
| 443 |
+
opt_mod,
|
| 444 |
+
args,
|
| 445 |
+
only_fwd=config.repro_forward_only,
|
| 446 |
+
ignore_non_fp=config.repro_ignore_non_fp,
|
| 447 |
+
):
|
| 448 |
+
raise AccuracyError("Dynamo failed")
|
| 449 |
+
else:
|
| 450 |
+
with torch.cuda.amp.autocast(enabled=options.autocast):
|
| 451 |
+
args = run_load_args(options, mod, load_args)
|
| 452 |
+
ref = run_fwd_maybe_bwd(
|
| 453 |
+
mod, args, only_fwd=options.only_fwd, disable_clone=True
|
| 454 |
+
)
|
| 455 |
+
del args
|
| 456 |
+
|
| 457 |
+
args = run_load_args(options, mod, load_args)
|
| 458 |
+
res = run_fwd_maybe_bwd(
|
| 459 |
+
opt_mod, args, only_fwd=options.only_fwd, disable_clone=True
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
def run_repro(
|
| 464 |
+
mod,
|
| 465 |
+
load_args,
|
| 466 |
+
*,
|
| 467 |
+
command="run",
|
| 468 |
+
accuracy: Union[bool, str] = "",
|
| 469 |
+
save_dir=None,
|
| 470 |
+
autocast=False,
|
| 471 |
+
backend="inductor",
|
| 472 |
+
**kwargs,
|
| 473 |
+
):
|
| 474 |
+
for k in kwargs:
|
| 475 |
+
log.warning(
|
| 476 |
+
"Unrecognized kwarg %s; perhaps this repro was made on a newer version of PyTorch",
|
| 477 |
+
k,
|
| 478 |
+
)
|
| 479 |
+
|
| 480 |
+
if accuracy is True:
|
| 481 |
+
accuracy = "accuracy"
|
| 482 |
+
elif accuracy is False:
|
| 483 |
+
accuracy = ""
|
| 484 |
+
|
| 485 |
+
parser = argparse.ArgumentParser(
|
| 486 |
+
description=f"""\
|
| 487 |
+
An after_dynamo repro script, typically triggering a bug in Dynamo or
|
| 488 |
+
AOTAutograd. When run with no arguments, this script defaults to running
|
| 489 |
+
'{command}'. Extra flags may be available; to find out more, try '{command}
|
| 490 |
+
--help'. There are also alternate subcommands available, see below.
|
| 491 |
+
|
| 492 |
+
default settings on this script:
|
| 493 |
+
{accuracy=}
|
| 494 |
+
{save_dir=}
|
| 495 |
+
""",
|
| 496 |
+
formatter_class=argparse.RawTextHelpFormatter,
|
| 497 |
+
)
|
| 498 |
+
|
| 499 |
+
def common_flags(parser):
|
| 500 |
+
accuracy_group = parser.add_mutually_exclusive_group()
|
| 501 |
+
accuracy_group.add_argument(
|
| 502 |
+
"--no-accuracy",
|
| 503 |
+
dest="accuracy",
|
| 504 |
+
action="store_const",
|
| 505 |
+
const="",
|
| 506 |
+
default=accuracy,
|
| 507 |
+
help="do not test accuracy, just run the module and see if it errors",
|
| 508 |
+
)
|
| 509 |
+
accuracy_group.add_argument(
|
| 510 |
+
"--accuracy",
|
| 511 |
+
action="store_const",
|
| 512 |
+
const="accuracy",
|
| 513 |
+
default=accuracy,
|
| 514 |
+
help="test accuracy",
|
| 515 |
+
)
|
| 516 |
+
parser.add_argument(
|
| 517 |
+
"--save-dir",
|
| 518 |
+
type=str,
|
| 519 |
+
default=save_dir,
|
| 520 |
+
metavar="DIR",
|
| 521 |
+
help="directory where saved inputs live",
|
| 522 |
+
)
|
| 523 |
+
parser.add_argument(
|
| 524 |
+
"--no-save-dir",
|
| 525 |
+
dest="save_dir",
|
| 526 |
+
action="store_const",
|
| 527 |
+
const=None,
|
| 528 |
+
help="don't use any directory for saved inputs",
|
| 529 |
+
)
|
| 530 |
+
parser.add_argument(
|
| 531 |
+
"--no-isolate",
|
| 532 |
+
dest="isolate",
|
| 533 |
+
action="store_false",
|
| 534 |
+
default=False,
|
| 535 |
+
help="no isolate (doesn't do anything for after_dynamo)",
|
| 536 |
+
)
|
| 537 |
+
parser.add_argument(
|
| 538 |
+
"--autocast",
|
| 539 |
+
default=autocast,
|
| 540 |
+
action="store_true",
|
| 541 |
+
help="use torch.cuda.amp.autocast",
|
| 542 |
+
)
|
| 543 |
+
parser.add_argument(
|
| 544 |
+
"--no-autocast",
|
| 545 |
+
dest="autocast",
|
| 546 |
+
action="store_false",
|
| 547 |
+
help="don't use torch.cuda.amp.autocast",
|
| 548 |
+
)
|
| 549 |
+
parser.add_argument(
|
| 550 |
+
"--backend",
|
| 551 |
+
type=str,
|
| 552 |
+
default=backend,
|
| 553 |
+
metavar="BACKEND",
|
| 554 |
+
help="torch.compile backend to use",
|
| 555 |
+
)
|
| 556 |
+
|
| 557 |
+
subparsers = parser.add_subparsers(
|
| 558 |
+
dest="command", metavar="{run,minify}", required=True
|
| 559 |
+
)
|
| 560 |
+
|
| 561 |
+
parser_run = subparsers.add_parser(
|
| 562 |
+
"run",
|
| 563 |
+
help="just run the repro",
|
| 564 |
+
)
|
| 565 |
+
common_flags(parser_run)
|
| 566 |
+
parser_run.add_argument(
|
| 567 |
+
"--only-fwd",
|
| 568 |
+
action="store_true",
|
| 569 |
+
help="don't run backwards compilation for testing",
|
| 570 |
+
)
|
| 571 |
+
|
| 572 |
+
parser_minify = subparsers.add_parser(
|
| 573 |
+
"minify", help="run the minifier on the repro"
|
| 574 |
+
)
|
| 575 |
+
common_flags(parser_minify)
|
| 576 |
+
|
| 577 |
+
args = None
|
| 578 |
+
if len(sys.argv) <= 1:
|
| 579 |
+
args = [command, *sys.argv[1:]]
|
| 580 |
+
|
| 581 |
+
options = parser.parse_args(args)
|
| 582 |
+
COMMAND_FNS = {
|
| 583 |
+
"minify": repro_minify,
|
| 584 |
+
"run": repro_run,
|
| 585 |
+
}
|
| 586 |
+
COMMAND_FNS[options.command](options, mod, load_args)
|
valley/lib/python3.10/site-packages/torch/_dynamo/test_minifier_common.py
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import dataclasses
|
| 3 |
+
import io
|
| 4 |
+
import logging
|
| 5 |
+
import os
|
| 6 |
+
import re
|
| 7 |
+
import shutil
|
| 8 |
+
import subprocess
|
| 9 |
+
import sys
|
| 10 |
+
import tempfile
|
| 11 |
+
import traceback
|
| 12 |
+
from typing import Optional
|
| 13 |
+
from unittest.mock import patch
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
import torch._dynamo
|
| 17 |
+
import torch._dynamo.test_case
|
| 18 |
+
from torch.utils._traceback import report_compile_source_on_error
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@dataclasses.dataclass
|
| 22 |
+
class MinifierTestResult:
|
| 23 |
+
minifier_code: str
|
| 24 |
+
repro_code: str
|
| 25 |
+
|
| 26 |
+
def _get_module(self, t):
|
| 27 |
+
match = re.search(r"class Repro\(torch\.nn\.Module\):\s+([ ].*\n| *\n)+", t)
|
| 28 |
+
assert match is not None, "failed to find module"
|
| 29 |
+
r = match.group(0)
|
| 30 |
+
r = re.sub(r"\s+$", "\n", r, flags=re.MULTILINE)
|
| 31 |
+
r = re.sub(r"\n{3,}", "\n\n", r)
|
| 32 |
+
return r.strip()
|
| 33 |
+
|
| 34 |
+
def minifier_module(self):
|
| 35 |
+
return self._get_module(self.minifier_code)
|
| 36 |
+
|
| 37 |
+
def repro_module(self):
|
| 38 |
+
return self._get_module(self.repro_code)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class MinifierTestBase(torch._dynamo.test_case.TestCase):
|
| 42 |
+
DEBUG_DIR = tempfile.mkdtemp()
|
| 43 |
+
|
| 44 |
+
@classmethod
|
| 45 |
+
def setUpClass(cls):
|
| 46 |
+
super().setUpClass()
|
| 47 |
+
cls._exit_stack.enter_context( # type: ignore[attr-defined]
|
| 48 |
+
torch._dynamo.config.patch(debug_dir_root=cls.DEBUG_DIR)
|
| 49 |
+
)
|
| 50 |
+
# These configurations make new process startup slower. Disable them
|
| 51 |
+
# for the minification tests to speed them up.
|
| 52 |
+
cls._exit_stack.enter_context( # type: ignore[attr-defined]
|
| 53 |
+
torch._inductor.config.patch(
|
| 54 |
+
{
|
| 55 |
+
# https://github.com/pytorch/pytorch/issues/100376
|
| 56 |
+
"pattern_matcher": False,
|
| 57 |
+
# multiprocess compilation takes a long time to warmup
|
| 58 |
+
"compile_threads": 1,
|
| 59 |
+
# https://github.com/pytorch/pytorch/issues/100378
|
| 60 |
+
"cpp.vec_isa_ok": False,
|
| 61 |
+
}
|
| 62 |
+
)
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
@classmethod
|
| 66 |
+
def tearDownClass(cls):
|
| 67 |
+
if os.getenv("PYTORCH_KEEP_TMPDIR", "0") != "1":
|
| 68 |
+
shutil.rmtree(cls.DEBUG_DIR)
|
| 69 |
+
else:
|
| 70 |
+
print(f"test_minifier_common tmpdir kept at: {cls.DEBUG_DIR}")
|
| 71 |
+
cls._exit_stack.close() # type: ignore[attr-defined]
|
| 72 |
+
|
| 73 |
+
def _gen_codegen_fn_patch_code(self, device, bug_type):
|
| 74 |
+
assert bug_type in ("compile_error", "runtime_error", "accuracy")
|
| 75 |
+
return f"""\
|
| 76 |
+
{torch._dynamo.config.codegen_config()}
|
| 77 |
+
{torch._inductor.config.codegen_config()}
|
| 78 |
+
torch._inductor.config.{"cpp" if device == "cpu" else "triton"}.inject_relu_bug_TESTING_ONLY = {bug_type!r}
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
def _maybe_subprocess_run(self, args, *, isolate, cwd=None):
|
| 82 |
+
if not isolate:
|
| 83 |
+
assert len(args) >= 2, args
|
| 84 |
+
assert args[0] == "python3", args
|
| 85 |
+
if args[1] == "-c":
|
| 86 |
+
assert len(args) == 3, args
|
| 87 |
+
code = args[2]
|
| 88 |
+
args = ["-c"]
|
| 89 |
+
else:
|
| 90 |
+
assert len(args) >= 2, args
|
| 91 |
+
with open(args[1]) as f:
|
| 92 |
+
code = f.read()
|
| 93 |
+
args = args[1:]
|
| 94 |
+
|
| 95 |
+
# WARNING: This is not a perfect simulation of running
|
| 96 |
+
# the program out of tree. We only interpose on things we KNOW we
|
| 97 |
+
# need to handle for tests. If you need more stuff, you will
|
| 98 |
+
# need to augment this appropriately.
|
| 99 |
+
|
| 100 |
+
# NB: Can't use save_config because that will omit some fields,
|
| 101 |
+
# but we must save and reset ALL fields
|
| 102 |
+
dynamo_config = torch._dynamo.config.shallow_copy_dict()
|
| 103 |
+
inductor_config = torch._inductor.config.shallow_copy_dict()
|
| 104 |
+
try:
|
| 105 |
+
stderr = io.StringIO()
|
| 106 |
+
log_handler = logging.StreamHandler(stderr)
|
| 107 |
+
log = logging.getLogger("torch._dynamo")
|
| 108 |
+
log.addHandler(log_handler)
|
| 109 |
+
try:
|
| 110 |
+
prev_cwd = os.getcwd()
|
| 111 |
+
if cwd is not None:
|
| 112 |
+
os.chdir(cwd)
|
| 113 |
+
with patch("sys.argv", args), report_compile_source_on_error():
|
| 114 |
+
exec(code, {"__name__": "__main__", "__compile_source__": code})
|
| 115 |
+
rc = 0
|
| 116 |
+
except Exception:
|
| 117 |
+
rc = 1
|
| 118 |
+
traceback.print_exc(file=stderr)
|
| 119 |
+
finally:
|
| 120 |
+
log.removeHandler(log_handler)
|
| 121 |
+
if cwd is not None:
|
| 122 |
+
os.chdir(prev_cwd) # type: ignore[possibly-undefined]
|
| 123 |
+
# Make sure we don't leave buggy compiled frames lying
|
| 124 |
+
# around
|
| 125 |
+
torch._dynamo.reset()
|
| 126 |
+
finally:
|
| 127 |
+
torch._dynamo.config.load_config(dynamo_config)
|
| 128 |
+
torch._inductor.config.load_config(inductor_config)
|
| 129 |
+
|
| 130 |
+
# TODO: return a more appropriate data structure here
|
| 131 |
+
return subprocess.CompletedProcess(
|
| 132 |
+
args,
|
| 133 |
+
rc,
|
| 134 |
+
b"",
|
| 135 |
+
stderr.getvalue().encode("utf-8"),
|
| 136 |
+
)
|
| 137 |
+
else:
|
| 138 |
+
return subprocess.run(args, capture_output=True, cwd=cwd, check=False)
|
| 139 |
+
|
| 140 |
+
# Run `code` in a separate python process.
|
| 141 |
+
# Returns the completed process state and the directory containing the
|
| 142 |
+
# minifier launcher script, if `code` outputted it.
|
| 143 |
+
def _run_test_code(self, code, *, isolate):
|
| 144 |
+
proc = self._maybe_subprocess_run(
|
| 145 |
+
["python3", "-c", code], isolate=isolate, cwd=self.DEBUG_DIR
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
print("test stdout:", proc.stdout.decode("utf-8"))
|
| 149 |
+
print("test stderr:", proc.stderr.decode("utf-8"))
|
| 150 |
+
repro_dir_match = re.search(
|
| 151 |
+
r"(\S+)minifier_launcher.py", proc.stderr.decode("utf-8")
|
| 152 |
+
)
|
| 153 |
+
if repro_dir_match is not None:
|
| 154 |
+
return proc, repro_dir_match.group(1)
|
| 155 |
+
return proc, None
|
| 156 |
+
|
| 157 |
+
# Runs the minifier launcher script in `repro_dir`
|
| 158 |
+
def _run_minifier_launcher(self, repro_dir, isolate, *, minifier_args=()):
|
| 159 |
+
self.assertIsNotNone(repro_dir)
|
| 160 |
+
launch_file = os.path.join(repro_dir, "minifier_launcher.py")
|
| 161 |
+
with open(launch_file) as f:
|
| 162 |
+
launch_code = f.read()
|
| 163 |
+
self.assertTrue(os.path.exists(launch_file))
|
| 164 |
+
|
| 165 |
+
args = ["python3", launch_file, "minify", *minifier_args]
|
| 166 |
+
if not isolate:
|
| 167 |
+
args.append("--no-isolate")
|
| 168 |
+
launch_proc = self._maybe_subprocess_run(args, isolate=isolate, cwd=repro_dir)
|
| 169 |
+
print("minifier stdout:", launch_proc.stdout.decode("utf-8"))
|
| 170 |
+
stderr = launch_proc.stderr.decode("utf-8")
|
| 171 |
+
print("minifier stderr:", stderr)
|
| 172 |
+
self.assertNotIn("Input graph did not fail the tester", stderr)
|
| 173 |
+
|
| 174 |
+
return launch_proc, launch_code
|
| 175 |
+
|
| 176 |
+
# Runs the repro script in `repro_dir`
|
| 177 |
+
def _run_repro(self, repro_dir, *, isolate=True):
|
| 178 |
+
self.assertIsNotNone(repro_dir)
|
| 179 |
+
repro_file = os.path.join(repro_dir, "repro.py")
|
| 180 |
+
with open(repro_file) as f:
|
| 181 |
+
repro_code = f.read()
|
| 182 |
+
self.assertTrue(os.path.exists(repro_file))
|
| 183 |
+
|
| 184 |
+
repro_proc = self._maybe_subprocess_run(
|
| 185 |
+
["python3", repro_file], isolate=isolate, cwd=repro_dir
|
| 186 |
+
)
|
| 187 |
+
print("repro stdout:", repro_proc.stdout.decode("utf-8"))
|
| 188 |
+
print("repro stderr:", repro_proc.stderr.decode("utf-8"))
|
| 189 |
+
return repro_proc, repro_code
|
| 190 |
+
|
| 191 |
+
# Template for testing code.
|
| 192 |
+
# `run_code` is the code to run for the test case.
|
| 193 |
+
# `patch_code` is the code to be patched in every generated file; usually
|
| 194 |
+
# just use this to turn on bugs via the config
|
| 195 |
+
def _gen_test_code(self, run_code, repro_after, repro_level):
|
| 196 |
+
return f"""\
|
| 197 |
+
import torch
|
| 198 |
+
import torch._dynamo
|
| 199 |
+
{torch._dynamo.config.codegen_config()}
|
| 200 |
+
{torch._inductor.config.codegen_config()}
|
| 201 |
+
torch._dynamo.config.repro_after = "{repro_after}"
|
| 202 |
+
torch._dynamo.config.repro_level = {repro_level}
|
| 203 |
+
torch._dynamo.config.debug_dir_root = "{self.DEBUG_DIR}"
|
| 204 |
+
{run_code}
|
| 205 |
+
"""
|
| 206 |
+
|
| 207 |
+
# Runs a full minifier test.
|
| 208 |
+
# Minifier tests generally consist of 3 stages:
|
| 209 |
+
# 1. Run the problematic code
|
| 210 |
+
# 2. Run the generated minifier launcher script
|
| 211 |
+
# 3. Run the generated repro script
|
| 212 |
+
#
|
| 213 |
+
# If possible, you should run the test with isolate=False; use
|
| 214 |
+
# isolate=True only if the bug you're testing would otherwise
|
| 215 |
+
# crash the process
|
| 216 |
+
def _run_full_test(
|
| 217 |
+
self, run_code, repro_after, expected_error, *, isolate, minifier_args=()
|
| 218 |
+
) -> Optional[MinifierTestResult]:
|
| 219 |
+
if isolate:
|
| 220 |
+
repro_level = 3
|
| 221 |
+
elif expected_error is None or expected_error == "AccuracyError":
|
| 222 |
+
repro_level = 4
|
| 223 |
+
else:
|
| 224 |
+
repro_level = 2
|
| 225 |
+
test_code = self._gen_test_code(run_code, repro_after, repro_level)
|
| 226 |
+
print("running test", file=sys.stderr)
|
| 227 |
+
test_proc, repro_dir = self._run_test_code(test_code, isolate=isolate)
|
| 228 |
+
if expected_error is None:
|
| 229 |
+
# Just check that there was no error
|
| 230 |
+
self.assertEqual(test_proc.returncode, 0)
|
| 231 |
+
self.assertIsNone(repro_dir)
|
| 232 |
+
return None
|
| 233 |
+
# NB: Intentionally do not test return code; we only care about
|
| 234 |
+
# actually generating the repro, we don't have to crash
|
| 235 |
+
self.assertIn(expected_error, test_proc.stderr.decode("utf-8"))
|
| 236 |
+
self.assertIsNotNone(repro_dir)
|
| 237 |
+
print("running minifier", file=sys.stderr)
|
| 238 |
+
minifier_proc, minifier_code = self._run_minifier_launcher(
|
| 239 |
+
repro_dir, isolate=isolate, minifier_args=minifier_args
|
| 240 |
+
)
|
| 241 |
+
print("running repro", file=sys.stderr)
|
| 242 |
+
repro_proc, repro_code = self._run_repro(repro_dir, isolate=isolate)
|
| 243 |
+
self.assertIn(expected_error, repro_proc.stderr.decode("utf-8"))
|
| 244 |
+
self.assertNotEqual(repro_proc.returncode, 0)
|
| 245 |
+
return MinifierTestResult(minifier_code=minifier_code, repro_code=repro_code)
|
valley/lib/python3.10/site-packages/torch/_dynamo/trace_rules.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/variables/__init__.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
from .base import VariableTracker
|
| 4 |
+
from .builtin import BuiltinVariable
|
| 5 |
+
from .constant import ConstantVariable, EnumVariable
|
| 6 |
+
from .ctx_manager import (
|
| 7 |
+
CatchWarningsCtxManagerVariable,
|
| 8 |
+
ContextWrappingVariable,
|
| 9 |
+
DeterministicAlgorithmsVariable,
|
| 10 |
+
DisabledSavedTensorsHooksVariable,
|
| 11 |
+
DualLevelContextManager,
|
| 12 |
+
GradIncrementNestingCtxManagerVariable,
|
| 13 |
+
GradInplaceRequiresGradCtxManagerVariable,
|
| 14 |
+
GradModeVariable,
|
| 15 |
+
InferenceModeVariable,
|
| 16 |
+
JvpIncrementNestingCtxManagerVariable,
|
| 17 |
+
SetFwdGradEnabledContextManager,
|
| 18 |
+
StreamContextVariable,
|
| 19 |
+
StreamVariable,
|
| 20 |
+
VmapIncrementNestingCtxManagerVariable,
|
| 21 |
+
WithExitFunctionVariable,
|
| 22 |
+
)
|
| 23 |
+
from .dicts import (
|
| 24 |
+
ConstDictVariable,
|
| 25 |
+
CustomizedDictVariable,
|
| 26 |
+
DataClassVariable,
|
| 27 |
+
DefaultDictVariable,
|
| 28 |
+
SetVariable,
|
| 29 |
+
)
|
| 30 |
+
from .distributed import BackwardHookVariable, DistributedVariable, PlacementVariable
|
| 31 |
+
from .functions import (
|
| 32 |
+
FunctoolsPartialVariable,
|
| 33 |
+
NestedUserFunctionVariable,
|
| 34 |
+
SkipFunctionVariable,
|
| 35 |
+
UserFunctionVariable,
|
| 36 |
+
UserMethodVariable,
|
| 37 |
+
)
|
| 38 |
+
from .higher_order_ops import (
|
| 39 |
+
FunctorchHigherOrderVariable,
|
| 40 |
+
TorchHigherOrderOperatorVariable,
|
| 41 |
+
)
|
| 42 |
+
from .iter import (
|
| 43 |
+
CountIteratorVariable,
|
| 44 |
+
CycleIteratorVariable,
|
| 45 |
+
IteratorVariable,
|
| 46 |
+
ItertoolsVariable,
|
| 47 |
+
RepeatIteratorVariable,
|
| 48 |
+
)
|
| 49 |
+
from .lazy import LazyVariableTracker
|
| 50 |
+
from .lists import (
|
| 51 |
+
BaseListVariable,
|
| 52 |
+
ListIteratorVariable,
|
| 53 |
+
ListVariable,
|
| 54 |
+
NamedTupleVariable,
|
| 55 |
+
RangeVariable,
|
| 56 |
+
RestrictedListSubclassVariable,
|
| 57 |
+
SliceVariable,
|
| 58 |
+
TupleIteratorVariable,
|
| 59 |
+
TupleVariable,
|
| 60 |
+
)
|
| 61 |
+
from .misc import (
|
| 62 |
+
AutogradFunctionContextVariable,
|
| 63 |
+
AutogradFunctionVariable,
|
| 64 |
+
ClosureVariable,
|
| 65 |
+
DeletedVariable,
|
| 66 |
+
ExceptionVariable,
|
| 67 |
+
GetAttrVariable,
|
| 68 |
+
InspectSignatureVariable,
|
| 69 |
+
LambdaVariable,
|
| 70 |
+
MethodWrapperVariable,
|
| 71 |
+
NewCellVariable,
|
| 72 |
+
NewGlobalVariable,
|
| 73 |
+
NumpyVariable,
|
| 74 |
+
PythonModuleVariable,
|
| 75 |
+
RegexPatternVariable,
|
| 76 |
+
StopIterationVariable,
|
| 77 |
+
StringFormatVariable,
|
| 78 |
+
SuperVariable,
|
| 79 |
+
TorchVersionVariable,
|
| 80 |
+
TypingVariable,
|
| 81 |
+
UnknownVariable,
|
| 82 |
+
)
|
| 83 |
+
from .nn_module import NNModuleVariable, UnspecializedNNModuleVariable
|
| 84 |
+
|
| 85 |
+
from .optimizer import OptimizerVariable
|
| 86 |
+
from .sdpa import SDPAParamsVariable
|
| 87 |
+
from .tensor import (
|
| 88 |
+
FakeItemVariable,
|
| 89 |
+
NumpyNdarrayVariable,
|
| 90 |
+
SymNodeVariable,
|
| 91 |
+
TensorVariable,
|
| 92 |
+
UnspecializedPythonVariable,
|
| 93 |
+
UntypedStorageVariable,
|
| 94 |
+
)
|
| 95 |
+
from .torch import TorchCtxManagerClassVariable, TorchInGraphFunctionVariable
|
| 96 |
+
from .user_defined import (
|
| 97 |
+
RemovableHandleVariable,
|
| 98 |
+
UserDefinedClassVariable,
|
| 99 |
+
UserDefinedObjectVariable,
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
__all__ = [
|
| 103 |
+
"AutogradFunctionContextVariable",
|
| 104 |
+
"AutogradFunctionVariable",
|
| 105 |
+
"BackwardHookVariable",
|
| 106 |
+
"BaseListVariable",
|
| 107 |
+
"BuiltinVariable",
|
| 108 |
+
"CatchWarningsCtxManagerVariable",
|
| 109 |
+
"ClosureVariable",
|
| 110 |
+
"ConstantVariable",
|
| 111 |
+
"ConstDictVariable",
|
| 112 |
+
"ContextWrappingVariable",
|
| 113 |
+
"CountIteratorVariable",
|
| 114 |
+
"CustomizedDictVariable",
|
| 115 |
+
"CycleIteratorVariable",
|
| 116 |
+
"DataClassVariable",
|
| 117 |
+
"DefaultDictVariable",
|
| 118 |
+
"DeletedVariable",
|
| 119 |
+
"DeterministicAlgorithmsVariable",
|
| 120 |
+
"EnumVariable",
|
| 121 |
+
"FakeItemVariable",
|
| 122 |
+
"GetAttrVariable",
|
| 123 |
+
"GradModeVariable",
|
| 124 |
+
"InspectSignatureVariable",
|
| 125 |
+
"IteratorVariable",
|
| 126 |
+
"ItertoolsVariable",
|
| 127 |
+
"LambdaVariable",
|
| 128 |
+
"LazyVariableTracker",
|
| 129 |
+
"ListIteratorVariable",
|
| 130 |
+
"ListVariable",
|
| 131 |
+
"NamedTupleVariable",
|
| 132 |
+
"NestedUserFunctionVariable",
|
| 133 |
+
"NewCellVariable",
|
| 134 |
+
"NewGlobalVariable",
|
| 135 |
+
"NNModuleVariable",
|
| 136 |
+
"NumpyNdarrayVariable",
|
| 137 |
+
"NumpyVariable",
|
| 138 |
+
"OptimizerVariable",
|
| 139 |
+
"PlacementVariable",
|
| 140 |
+
"PythonModuleVariable",
|
| 141 |
+
"RangeVariable",
|
| 142 |
+
"RegexPatternVariable",
|
| 143 |
+
"RemovableHandleVariable",
|
| 144 |
+
"RepeatIteratorVariable",
|
| 145 |
+
"RestrictedListSubclassVariable",
|
| 146 |
+
"SDPAParamsVariable",
|
| 147 |
+
"SkipFunctionVariable",
|
| 148 |
+
"SliceVariable",
|
| 149 |
+
"StopIterationVariable",
|
| 150 |
+
"StringFormatVariable",
|
| 151 |
+
"SuperVariable",
|
| 152 |
+
"TensorVariable",
|
| 153 |
+
"TorchCtxManagerClassVariable",
|
| 154 |
+
"TorchInGraphFunctionVariable",
|
| 155 |
+
"TorchVersionVariable",
|
| 156 |
+
"TupleVariable",
|
| 157 |
+
"UnknownVariable",
|
| 158 |
+
"UnspecializedNNModuleVariable",
|
| 159 |
+
"UnspecializedPythonVariable",
|
| 160 |
+
"UntypedStorageVariable",
|
| 161 |
+
"UserDefinedClassVariable",
|
| 162 |
+
"UserDefinedObjectVariable",
|
| 163 |
+
"UserFunctionVariable",
|
| 164 |
+
"UserMethodVariable",
|
| 165 |
+
"VariableTracker",
|
| 166 |
+
"WithExitFunctionVariable",
|
| 167 |
+
]
|
valley/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/base.cpython-310.pyc
ADDED
|
Binary file (11.6 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builder.cpython-310.pyc
ADDED
|
Binary file (48.8 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/constant.cpython-310.pyc
ADDED
|
Binary file (8.4 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/ctx_manager.cpython-310.pyc
ADDED
|
Binary file (32.4 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/functions.cpython-310.pyc
ADDED
|
Binary file (30.1 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/higher_order_ops.cpython-310.pyc
ADDED
|
Binary file (41.6 kB). View file
|
|
|