Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- pllava/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc +3 -0
- pllava/lib/python3.10/site-packages/sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc +3 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/__pycache__/__init__.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/__pycache__/guards.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/__pycache__/mutation_guard.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__init__.py +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/__init__.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/debugging.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/registry.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/cache_size.py +185 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/codegen.py +511 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/comptime.py +401 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/logging.py +59 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/__pycache__/functools.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/__pycache__/loader.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/__pycache__/os.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/builtins.py +48 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/functools.py +6 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/itertools.py +85 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/loader.py +35 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/os.py +36 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/profiler.py +156 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/repro/__init__.py +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/__init__.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_aot.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/repro/after_aot.py +966 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/resume_execution.py +720 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/source.py +759 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/trace_rules.py +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/utils.py +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/__init__.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/base.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builder.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builtin.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/constant.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/ctx_manager.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/dicts.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/distributed.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/functions.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/higher_order_ops.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/iter.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/lazy.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/lists.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/misc.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/nn_module.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/optimizer.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/script_object.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/sdpa.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/tensor.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -308,3 +308,5 @@ pllava/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=l
|
|
| 308 |
pllava/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 309 |
pllava/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_wester.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 310 |
pllava/lib/python3.10/site-packages/sympy/solvers/__pycache__/solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 308 |
pllava/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 309 |
pllava/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_wester.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 310 |
pllava/lib/python3.10/site-packages/sympy/solvers/__pycache__/solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 311 |
+
pllava/lib/python3.10/site-packages/sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 312 |
+
pllava/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
pllava/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8cec32ae3a222bc4422bccf19ad22cbd790c72380a493add8286b0ac9426d4f2
|
| 3 |
+
size 106614
|
pllava/lib/python3.10/site-packages/sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aec3146ddca95163efdce7fc532f74276fd61b5eb913b4d7f0de225ae548bfd2
|
| 3 |
+
size 112034
|
pllava/lib/python3.10/site-packages/torch/_dynamo/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (2.74 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/__pycache__/guards.cpython-310.pyc
ADDED
|
Binary file (63.2 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/__pycache__/mutation_guard.cpython-310.pyc
ADDED
|
Binary file (4.5 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__init__.py
ADDED
|
File without changes
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (174 Bytes). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/debugging.cpython-310.pyc
ADDED
|
Binary file (9.93 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/registry.cpython-310.pyc
ADDED
|
Binary file (3.93 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/cache_size.py
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import logging
|
| 3 |
+
import types
|
| 4 |
+
import weakref
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
from typing import Tuple
|
| 7 |
+
|
| 8 |
+
from torch._guards import CompileId
|
| 9 |
+
|
| 10 |
+
from . import config
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
log = logging.getLogger(__name__)
|
| 14 |
+
"""
|
| 15 |
+
[Note on cache size limit]
|
| 16 |
+
|
| 17 |
+
Background - TorchDynamo cache is a linked list. Each cache entry is a
|
| 18 |
+
(check_fn, out_code, next pointer). These are stored on the f_code's co_extra
|
| 19 |
+
scratch space. When a frame is invoked, we walk this linked list and run
|
| 20 |
+
check_fn in each cache_entry to decide if the frame needs recompilation. If none
|
| 21 |
+
of the check_fn's returns True, we recompile and add a new entry. To ensure we
|
| 22 |
+
don't end up recompiling infinitely, we put limits on the cache size.
|
| 23 |
+
|
| 24 |
+
There are two limits
|
| 25 |
+
1) cache_size_limit
|
| 26 |
+
2) accumulated_cache_size_limit
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
Earlier we used to have only limit - maximum number of entries in 1 cache line
|
| 30 |
+
(which is now represented by (2) above). So, why do we need two limits? Lets try
|
| 31 |
+
to understand that.
|
| 32 |
+
|
| 33 |
+
In general, we want our cache limit value to be a small number (e.g. 8 or even
|
| 34 |
+
lower). This ensures that for frames that cause too many recompilation fall to
|
| 35 |
+
eager quickly. However, there is another problem that prevents us from lowering
|
| 36 |
+
the value of cache_size_limit. This is due to ID_MATCH'd guards. Today, we put
|
| 37 |
+
ID_MATCH guards on nn module if there is a graph break. This means we will have
|
| 38 |
+
many recompilations for the same code object because the ID_MATCH guard fails
|
| 39 |
+
for different instances of the nn module. This is a common pattern in how models
|
| 40 |
+
are authored. Therefore, this requires us to keep the cache_size_limit high.
|
| 41 |
+
|
| 42 |
+
We resolve this by introducing these two limits. The first limit (1) limits the
|
| 43 |
+
number of cache entries that have an ID_MATCH'd guard for an nn module instance.
|
| 44 |
+
And, (2)nd limit becomes a safeguard mechanism to have a maximum compilations
|
| 45 |
+
for a code object. One important question is - what is the limit for the code
|
| 46 |
+
object that does not have any ID_MATCH guard? For such code objects, we choose
|
| 47 |
+
(1) as the cache size limit.
|
| 48 |
+
|
| 49 |
+
Lets take an example to understand how these limits help. Suppose, we have 16
|
| 50 |
+
instances of a nn module and we ID_MATCH on the self object. Further, suppose
|
| 51 |
+
the inputs to these functions have varying batch size, leading to one
|
| 52 |
+
recompilation. In total, there will be 32 recompilations, and therefore 32 cache
|
| 53 |
+
entries on the forward code object. In the older case when we had only 1 limit,
|
| 54 |
+
our cache size limit must be >= 32 to capture all these recompilations. Now,
|
| 55 |
+
suppose there is a separate function in the same program which is very dynamic
|
| 56 |
+
and unsuitable for compilation. Such a function will need to undergo 32
|
| 57 |
+
compilations to burst the cache and fallback to eager. These 32 recompilations
|
| 58 |
+
are too many and we want to fallback for these compilation-unfriendly functions
|
| 59 |
+
sooner.
|
| 60 |
+
|
| 61 |
+
In the new scenario, we can have (1) cache_size_limit = 2, (2)
|
| 62 |
+
accumulated_cache_size_limit = 32. This means that each ID_MATCH'd object can
|
| 63 |
+
have maximum of two cache entries, and the maximum number of cache entries
|
| 64 |
+
(irrespective of ID_MATCH obj) is 32. This covers the case of forward code
|
| 65 |
+
object which has 32 recompilations. For the other function, the one unsuitable
|
| 66 |
+
for recompilation, our limit is 2. So, we will burst the cache in just 2
|
| 67 |
+
recompilations. In this manner, these 2 limits help us resolve the tension
|
| 68 |
+
mentioned earlier.
|
| 69 |
+
"""
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
@dataclass
|
| 73 |
+
class CacheSizeRelevantForFrame:
|
| 74 |
+
"""
|
| 75 |
+
We track the number of cache entries that have same id_match objects as the
|
| 76 |
+
given frame.
|
| 77 |
+
|
| 78 |
+
TODO(janimesh) - Consider adding a map from tuple_of_match_ids to count -
|
| 79 |
+
https://github.com/pytorch/pytorch/pull/107496#discussion_r1304564682 - this
|
| 80 |
+
could be useful for debugging as well.
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
# Total number of CacheEntry objects in the Dynamo linked list
|
| 84 |
+
num_cache_entries: int = 0
|
| 85 |
+
|
| 86 |
+
# Number of CacheEntry objects having same ID_MATCH'd objects as given frame.
|
| 87 |
+
num_cache_entries_with_same_id_matched_objs: int = 0
|
| 88 |
+
|
| 89 |
+
def will_compilation_exceed(self, limit: int) -> bool:
|
| 90 |
+
# Checks if a compilation will exceed the given limit (thats why >=).
|
| 91 |
+
return (
|
| 92 |
+
self.will_compilation_exceed_accumulated_limit()
|
| 93 |
+
or self.will_compilation_exceed_specific_limit(limit)
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
def will_compilation_exceed_accumulated_limit(self) -> bool:
|
| 97 |
+
return self.num_cache_entries >= config.accumulated_cache_size_limit
|
| 98 |
+
|
| 99 |
+
def will_compilation_exceed_specific_limit(self, limit: int) -> bool:
|
| 100 |
+
return self.num_cache_entries_with_same_id_matched_objs >= limit
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def _get_weakref_from_f_locals(frame: types.FrameType, local_name: str):
|
| 104 |
+
obj = frame.f_locals.get(local_name, None)
|
| 105 |
+
weak_id = None
|
| 106 |
+
try:
|
| 107 |
+
weak_id = weakref.ref(obj)
|
| 108 |
+
except TypeError:
|
| 109 |
+
pass # cannot weakref bool object
|
| 110 |
+
return weak_id
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def _has_same_id_matched_objs(frame: types.FrameType, cache_entry) -> bool:
|
| 114 |
+
"""
|
| 115 |
+
Checks if the ID_MATCH'd objects saved on cache_entry are same as the ones
|
| 116 |
+
in frame.f_locals.
|
| 117 |
+
"""
|
| 118 |
+
if not cache_entry:
|
| 119 |
+
return False
|
| 120 |
+
|
| 121 |
+
for (
|
| 122 |
+
local_name,
|
| 123 |
+
weakref_from_cache_entry,
|
| 124 |
+
) in cache_entry.check_fn.id_matched_objs.items():
|
| 125 |
+
if weakref_from_cache_entry() is not None:
|
| 126 |
+
weakref_from_frame = _get_weakref_from_f_locals(frame, local_name)
|
| 127 |
+
if weakref_from_frame != weakref_from_cache_entry:
|
| 128 |
+
return False
|
| 129 |
+
|
| 130 |
+
# Also covers the case where no ID_MATCH objects are saved in frame.f_locals
|
| 131 |
+
return True
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def compute_cache_size(
|
| 135 |
+
frame: types.FrameType, cache_entry
|
| 136 |
+
) -> CacheSizeRelevantForFrame:
|
| 137 |
+
# Walk the linked list to calculate the cache size
|
| 138 |
+
num_cache_entries = 0
|
| 139 |
+
num_cache_entries_with_same_id_matched_objs = 0
|
| 140 |
+
|
| 141 |
+
while cache_entry:
|
| 142 |
+
num_cache_entries += 1
|
| 143 |
+
# Track the number of cache entries having same ID_MATCH'd objects as
|
| 144 |
+
# that of frame.f_locals. This will be used later to compare against the
|
| 145 |
+
# cache_size_limit.
|
| 146 |
+
if _has_same_id_matched_objs(frame, cache_entry):
|
| 147 |
+
num_cache_entries_with_same_id_matched_objs += 1
|
| 148 |
+
cache_entry = cache_entry.next
|
| 149 |
+
|
| 150 |
+
return CacheSizeRelevantForFrame(
|
| 151 |
+
num_cache_entries, num_cache_entries_with_same_id_matched_objs
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def is_recompilation(cache_size: CacheSizeRelevantForFrame) -> bool:
|
| 156 |
+
"""
|
| 157 |
+
If the frame (earlier parsed by compute_cache_size) has more than 1 cache
|
| 158 |
+
entry with same ID_MATCH'd objects, then its a recompilation.
|
| 159 |
+
"""
|
| 160 |
+
# Note that you can have multiple entries in the cache but still not a
|
| 161 |
+
# recompile, e.g., you can have 64 nn module instances, each one having an
|
| 162 |
+
# ID_MATCH guard, and each one having just 1 cache entry in the cache. In
|
| 163 |
+
# this case, we can have 64 entries in the cache, but no recompilation
|
| 164 |
+
# because there is only one entry for each id_matched_obj.
|
| 165 |
+
return cache_size.will_compilation_exceed(1)
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def exceeds_cache_size_limit(
|
| 169 |
+
cache_size: CacheSizeRelevantForFrame, compile_id: CompileId
|
| 170 |
+
) -> Tuple[bool, str]:
|
| 171 |
+
"""
|
| 172 |
+
Checks if we are exceeding the cache size limit.
|
| 173 |
+
"""
|
| 174 |
+
if cache_size.will_compilation_exceed_accumulated_limit():
|
| 175 |
+
return True, "accumulated_cache_size_limit"
|
| 176 |
+
if cache_size.will_compilation_exceed_specific_limit(config.cache_size_limit):
|
| 177 |
+
return True, "cache_size_limit"
|
| 178 |
+
# NOTE this check is needed in the case that the frame's cache doesn't grow
|
| 179 |
+
# and we keep recompiling. This can happen if the guard check_fn becomes invalidated,
|
| 180 |
+
# e.g. due to guarded objects being freed. This technically makes the
|
| 181 |
+
# will_compilation_exceed_accumulated_limit check unnecessary, but we will keep the
|
| 182 |
+
# check in case we have a better fix in the future.
|
| 183 |
+
if compile_id.frame_compile_id >= config.accumulated_cache_size_limit:
|
| 184 |
+
return True, "accumulated_cache_size_limit"
|
| 185 |
+
return False, ""
|
pllava/lib/python3.10/site-packages/torch/_dynamo/codegen.py
ADDED
|
@@ -0,0 +1,511 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import collections
|
| 3 |
+
import dataclasses
|
| 4 |
+
import re
|
| 5 |
+
import sys
|
| 6 |
+
import types
|
| 7 |
+
from typing import Counter, Dict, List, Optional
|
| 8 |
+
|
| 9 |
+
import torch.nn
|
| 10 |
+
|
| 11 |
+
from . import utils
|
| 12 |
+
from .bytecode_transformation import (
|
| 13 |
+
add_push_null,
|
| 14 |
+
add_push_null_call_function_ex,
|
| 15 |
+
create_call_function,
|
| 16 |
+
create_call_method,
|
| 17 |
+
create_dup_top,
|
| 18 |
+
create_instruction,
|
| 19 |
+
create_load_method,
|
| 20 |
+
create_rot_n,
|
| 21 |
+
Instruction,
|
| 22 |
+
)
|
| 23 |
+
from .exc import unimplemented
|
| 24 |
+
from .source import AttrSource, Source
|
| 25 |
+
from .utils import is_safe_constant, rot_n_helper
|
| 26 |
+
from .variables.base import VariableTracker
|
| 27 |
+
from .variables.nn_module import NNModuleVariable
|
| 28 |
+
from .variables.tensor import (
|
| 29 |
+
NumpyNdarrayVariable,
|
| 30 |
+
SymNodeVariable,
|
| 31 |
+
TensorVariable,
|
| 32 |
+
UnspecializedPythonVariable,
|
| 33 |
+
)
|
| 34 |
+
from .variables.torch_function import TensorWithTFOverrideVariable
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@dataclasses.dataclass
|
| 38 |
+
class GraphOutputEntry:
|
| 39 |
+
index: int
|
| 40 |
+
variable: VariableTracker
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class PyCodegen:
|
| 44 |
+
"""
|
| 45 |
+
Helper class uses for constructing Python bytecode
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
def __init__(
|
| 49 |
+
self,
|
| 50 |
+
tx=None,
|
| 51 |
+
root: Optional[torch.nn.Module] = None,
|
| 52 |
+
graph_output_var: Optional[str] = None,
|
| 53 |
+
tempvars=None,
|
| 54 |
+
) -> None:
|
| 55 |
+
self.root = root
|
| 56 |
+
self.top_of_stack: Optional[VariableTracker] = None
|
| 57 |
+
self.uses: Counter[VariableTracker] = collections.Counter()
|
| 58 |
+
self.graph_outputs: Dict[int, GraphOutputEntry] = {}
|
| 59 |
+
self._output: List[Instruction] = []
|
| 60 |
+
self.tempvars = tempvars or {}
|
| 61 |
+
self.tx = tx
|
| 62 |
+
self.graph_output_var = graph_output_var
|
| 63 |
+
self.code_options = self.tx.output.code_options
|
| 64 |
+
self.cell_and_freevars = self.tx.cell_and_freevars
|
| 65 |
+
self.new_var = self.tx.output.new_var
|
| 66 |
+
self.mutable_side_effects_from_source = False
|
| 67 |
+
self.value_from_source: bool = True
|
| 68 |
+
|
| 69 |
+
def restore_stack(self, stack_values, *, value_from_source=True):
|
| 70 |
+
prior = self.mutable_side_effects_from_source
|
| 71 |
+
self.mutable_side_effects_from_source = True
|
| 72 |
+
prev = self.value_from_source
|
| 73 |
+
self.value_from_source &= value_from_source
|
| 74 |
+
try:
|
| 75 |
+
self.foreach(stack_values)
|
| 76 |
+
finally:
|
| 77 |
+
self.mutable_side_effects_from_source = prior
|
| 78 |
+
self.value_from_source = prev
|
| 79 |
+
|
| 80 |
+
def graph_output_vars(self):
|
| 81 |
+
return [x.variable for x in self.graph_outputs.values()]
|
| 82 |
+
|
| 83 |
+
def call_reconstruct(self, value):
|
| 84 |
+
res = value.reconstruct(self)
|
| 85 |
+
assert res is None, f"reconstruct!=None {value}"
|
| 86 |
+
|
| 87 |
+
def add_push_null(self, gen_fn, call_function_ex=False):
|
| 88 |
+
"""
|
| 89 |
+
`gen_fn` generates instructions via PyCodegen methods
|
| 90 |
+
that push a single callable to the stack.
|
| 91 |
+
|
| 92 |
+
`add_push_null` pushes a NULL to the stack before or after the
|
| 93 |
+
instructions generated by `gen_fn`, depending on Python version.
|
| 94 |
+
|
| 95 |
+
Will attempt to use the NULL push bit for instructions
|
| 96 |
+
with such bits (LOAD_GLOBAL 3.11+, LOAD_ATTR 3.12+, LOAD_SUPER_ATTR).
|
| 97 |
+
"""
|
| 98 |
+
old_len = len(self._output)
|
| 99 |
+
if sys.version_info < (3, 13):
|
| 100 |
+
# gen_fn may DUP_TOP instead if TOS is not cleared.
|
| 101 |
+
# Will cause problems since NULL will be pushed right
|
| 102 |
+
# before the generated instructions in <= 3.12
|
| 103 |
+
self.clear_tos()
|
| 104 |
+
gen_fn()
|
| 105 |
+
# inplace modify self._output
|
| 106 |
+
added_insts = self._output[old_len:]
|
| 107 |
+
del self._output[old_len:]
|
| 108 |
+
if call_function_ex:
|
| 109 |
+
self._output.extend(add_push_null_call_function_ex(added_insts))
|
| 110 |
+
else:
|
| 111 |
+
self._output.extend(add_push_null(added_insts))
|
| 112 |
+
if sys.version_info >= (3, 13):
|
| 113 |
+
# NULL will be at top of stack
|
| 114 |
+
self.clear_tos()
|
| 115 |
+
|
| 116 |
+
def __call__(self, value, allow_cache=True):
|
| 117 |
+
"""Generate code such that top-of-stack (TOS) is set to value"""
|
| 118 |
+
if isinstance(value, Source):
|
| 119 |
+
self.call_reconstruct(value)
|
| 120 |
+
self.clear_tos()
|
| 121 |
+
return
|
| 122 |
+
|
| 123 |
+
assert isinstance(value, VariableTracker)
|
| 124 |
+
output = self._output
|
| 125 |
+
graph_outputs = self.graph_outputs
|
| 126 |
+
|
| 127 |
+
if self.top_of_stack is value and allow_cache:
|
| 128 |
+
output.append(create_dup_top())
|
| 129 |
+
return
|
| 130 |
+
|
| 131 |
+
if self.mutable_side_effects_from_source:
|
| 132 |
+
# this is needed to get aliasing relationships right
|
| 133 |
+
# value.mutable_local.source will get mutated to hold `value`
|
| 134 |
+
# mutable_side_effects_from_source=False is used to codegen the mutation
|
| 135 |
+
# mutable_side_effects_from_source=True is used to codegen a reference
|
| 136 |
+
from .side_effects import MutableSideEffects
|
| 137 |
+
|
| 138 |
+
if isinstance(value.mutable_local, MutableSideEffects):
|
| 139 |
+
self(value.mutable_local.source)
|
| 140 |
+
return
|
| 141 |
+
|
| 142 |
+
if allow_cache:
|
| 143 |
+
if value.mutable_local and value.mutable_local in self.tempvars:
|
| 144 |
+
output.append(self.create_load(self.tempvars[value.mutable_local]))
|
| 145 |
+
self.top_of_stack = value
|
| 146 |
+
return
|
| 147 |
+
if self.tempvars.get(value) is not None:
|
| 148 |
+
output.append(self.create_load(self.tempvars[value]))
|
| 149 |
+
self.top_of_stack = value
|
| 150 |
+
return
|
| 151 |
+
|
| 152 |
+
if value.source is not None and allow_cache and self.value_from_source:
|
| 153 |
+
self.call_reconstruct(value.source)
|
| 154 |
+
elif value.is_python_constant() and is_safe_constant(
|
| 155 |
+
value.as_python_constant()
|
| 156 |
+
):
|
| 157 |
+
output.append(self.create_load_const(value.as_python_constant()))
|
| 158 |
+
elif isinstance(value, TensorWithTFOverrideVariable):
|
| 159 |
+
graph_outputs_key = self.add_graph_output(value)
|
| 160 |
+
|
| 161 |
+
self.add_push_null(
|
| 162 |
+
lambda: self.load_import_from(utils.__name__, "to_subclass")
|
| 163 |
+
)
|
| 164 |
+
self.load_graph_output(graph_outputs[graph_outputs_key].index)
|
| 165 |
+
output.append(
|
| 166 |
+
self.create_load_global(
|
| 167 |
+
value.global_mangled_class_name(self.tx), add=True
|
| 168 |
+
)
|
| 169 |
+
)
|
| 170 |
+
output.extend(create_call_function(2, False))
|
| 171 |
+
elif (
|
| 172 |
+
isinstance(value, SymNodeVariable)
|
| 173 |
+
and value.python_type() == float
|
| 174 |
+
and not self.tx.export
|
| 175 |
+
):
|
| 176 |
+
# This is a little unusual; force the output convention to be a
|
| 177 |
+
# Tensor here. Don't do this for export because this is
|
| 178 |
+
# apparently load bearing for export tests (but I am a bit
|
| 179 |
+
# doubtful it actually works in the real world)
|
| 180 |
+
# NB: It works to add_graph_output on a computed expression
|
| 181 |
+
# as_tensor here, because we memoize as_tensor calls on
|
| 182 |
+
# SymNodeVariable!
|
| 183 |
+
graph_outputs_key = self.add_graph_output(value.as_tensor(self.tx))
|
| 184 |
+
|
| 185 |
+
def gen_fn():
|
| 186 |
+
self.load_graph_output(graph_outputs[graph_outputs_key].index)
|
| 187 |
+
output.append(self.create_load_attr("item"))
|
| 188 |
+
|
| 189 |
+
self.add_push_null(gen_fn)
|
| 190 |
+
output.extend(create_call_function(0, False))
|
| 191 |
+
elif isinstance(
|
| 192 |
+
value,
|
| 193 |
+
(
|
| 194 |
+
TensorVariable,
|
| 195 |
+
SymNodeVariable,
|
| 196 |
+
UnspecializedPythonVariable,
|
| 197 |
+
NumpyNdarrayVariable,
|
| 198 |
+
),
|
| 199 |
+
):
|
| 200 |
+
graph_outputs_key = self.add_graph_output(value)
|
| 201 |
+
|
| 202 |
+
if isinstance(value, NumpyNdarrayVariable):
|
| 203 |
+
self.add_push_null(
|
| 204 |
+
lambda: self.load_import_from(utils.__name__, "to_numpy_helper")
|
| 205 |
+
)
|
| 206 |
+
self.load_graph_output(graph_outputs[graph_outputs_key].index)
|
| 207 |
+
output.extend(create_call_function(1, False))
|
| 208 |
+
elif isinstance(value, UnspecializedPythonVariable) and value.need_unwrap:
|
| 209 |
+
|
| 210 |
+
def gen_fn():
|
| 211 |
+
self.load_graph_output(graph_outputs[graph_outputs_key].index)
|
| 212 |
+
output.append(self.create_load_attr("item"))
|
| 213 |
+
|
| 214 |
+
self.add_push_null(gen_fn)
|
| 215 |
+
output.extend(create_call_function(0, False))
|
| 216 |
+
else:
|
| 217 |
+
self.load_graph_output(graph_outputs[graph_outputs_key].index)
|
| 218 |
+
elif isinstance(value, NNModuleVariable):
|
| 219 |
+
parts = value.module_key.split(".")
|
| 220 |
+
if parts[0] in self.code_options["co_varnames"]:
|
| 221 |
+
output.append(self.create_load(parts[0]))
|
| 222 |
+
parts = parts[1:]
|
| 223 |
+
else:
|
| 224 |
+
assert self.root is not None
|
| 225 |
+
output.append(self.create_load_output(self.root))
|
| 226 |
+
for part in parts:
|
| 227 |
+
output.append(self.create_load_attr(part))
|
| 228 |
+
else:
|
| 229 |
+
self.uses[value] += 1
|
| 230 |
+
try:
|
| 231 |
+
self.call_reconstruct(value)
|
| 232 |
+
except NotImplementedError:
|
| 233 |
+
unimplemented(f"reconstruct: {value}")
|
| 234 |
+
if allow_cache and value in self.tempvars:
|
| 235 |
+
self._output.append(create_dup_top())
|
| 236 |
+
self.add_cache(value)
|
| 237 |
+
|
| 238 |
+
self.top_of_stack = value
|
| 239 |
+
|
| 240 |
+
def add_graph_output(self, value):
|
| 241 |
+
graph_outputs_key = id(value.as_proxy())
|
| 242 |
+
if graph_outputs_key not in self.graph_outputs:
|
| 243 |
+
self.graph_outputs[graph_outputs_key] = GraphOutputEntry(
|
| 244 |
+
len(self.graph_outputs), value
|
| 245 |
+
)
|
| 246 |
+
return graph_outputs_key
|
| 247 |
+
|
| 248 |
+
def load_graph_output(self, index):
|
| 249 |
+
output = self._output
|
| 250 |
+
output.append(self.create_load(self.graph_output_var))
|
| 251 |
+
output.append(self._create_load_const(index))
|
| 252 |
+
output.append(create_instruction("BINARY_SUBSCR"))
|
| 253 |
+
|
| 254 |
+
def add_cache(self, value):
|
| 255 |
+
var = self.new_var()
|
| 256 |
+
self.tempvars[value] = var
|
| 257 |
+
if value.mutable_local:
|
| 258 |
+
self.tempvars[value.mutable_local] = var
|
| 259 |
+
self._output.append(self.create_store(var))
|
| 260 |
+
|
| 261 |
+
def foreach(self, items):
|
| 262 |
+
for i in items:
|
| 263 |
+
self(i)
|
| 264 |
+
|
| 265 |
+
def setup_globally_cached(self, name, value):
|
| 266 |
+
"""Store value in a new global"""
|
| 267 |
+
name = re.sub(r"[^a-zA-Z0-9_]+", "_", name)
|
| 268 |
+
f_globals = self.tx.f_globals
|
| 269 |
+
if name in f_globals:
|
| 270 |
+
assert id(f_globals[name]) == id(value)
|
| 271 |
+
else:
|
| 272 |
+
f_globals[name] = value
|
| 273 |
+
return [self.create_load_global(name, add=True)]
|
| 274 |
+
|
| 275 |
+
def clear_tos(self):
|
| 276 |
+
self.top_of_stack = None
|
| 277 |
+
|
| 278 |
+
def append_output(self, inst):
|
| 279 |
+
assert isinstance(inst, Instruction)
|
| 280 |
+
self._output.append(inst)
|
| 281 |
+
self.clear_tos()
|
| 282 |
+
|
| 283 |
+
def extend_output(self, insts):
|
| 284 |
+
assert all(isinstance(x, Instruction) for x in insts)
|
| 285 |
+
self._output.extend(insts)
|
| 286 |
+
self.clear_tos()
|
| 287 |
+
|
| 288 |
+
def get_instructions(self) -> List[Instruction]:
|
| 289 |
+
return self._output
|
| 290 |
+
|
| 291 |
+
def create_load(self, name) -> Instruction:
|
| 292 |
+
if name in self.cell_and_freevars():
|
| 293 |
+
return create_instruction("LOAD_DEREF", argval=name)
|
| 294 |
+
assert name in self.code_options["co_varnames"], f"{name} missing"
|
| 295 |
+
return create_instruction("LOAD_FAST", argval=name)
|
| 296 |
+
|
| 297 |
+
def create_load_closure(self, name) -> Instruction:
|
| 298 |
+
assert name in self.cell_and_freevars()
|
| 299 |
+
inst_name = "LOAD_FAST" if sys.version_info >= (3, 13) else "LOAD_CLOSURE"
|
| 300 |
+
return create_instruction(inst_name, argval=name)
|
| 301 |
+
|
| 302 |
+
def create_store(self, name) -> Instruction:
|
| 303 |
+
if name in self.cell_and_freevars():
|
| 304 |
+
return create_instruction("STORE_DEREF", argval=name)
|
| 305 |
+
assert name in self.code_options["co_varnames"]
|
| 306 |
+
return create_instruction("STORE_FAST", argval=name)
|
| 307 |
+
|
| 308 |
+
def create_load_global(self, name, add=False) -> Instruction:
|
| 309 |
+
if add:
|
| 310 |
+
self.tx.output.update_co_names(name)
|
| 311 |
+
assert name in self.code_options["co_names"], f"{name} not in co_names"
|
| 312 |
+
return create_instruction("LOAD_GLOBAL", argval=name)
|
| 313 |
+
|
| 314 |
+
def create_load_const(self, value) -> Instruction:
|
| 315 |
+
assert is_safe_constant(value), f"unsafe constant {value}"
|
| 316 |
+
return self._create_load_const(value)
|
| 317 |
+
|
| 318 |
+
def _create_load_const(self, value) -> Instruction:
|
| 319 |
+
return create_instruction("LOAD_CONST", argval=value)
|
| 320 |
+
|
| 321 |
+
create_load_output = _create_load_const
|
| 322 |
+
|
| 323 |
+
def load_method(self, name):
|
| 324 |
+
self.tx.output.update_co_names(name)
|
| 325 |
+
self.append_output(create_load_method(name))
|
| 326 |
+
|
| 327 |
+
def call_method(self, nargs):
|
| 328 |
+
self.extend_output(create_call_method(nargs))
|
| 329 |
+
|
| 330 |
+
def create_load_attr(self, name) -> Instruction:
|
| 331 |
+
if name not in self.code_options["co_names"]:
|
| 332 |
+
self.code_options["co_names"] += (name,)
|
| 333 |
+
return create_instruction("LOAD_ATTR", argval=name)
|
| 334 |
+
|
| 335 |
+
def load_attr(self, name):
|
| 336 |
+
self.append_output(self.create_load_attr(name))
|
| 337 |
+
|
| 338 |
+
def create_load_attrs(self, names):
|
| 339 |
+
return [self.create_load_attr(name) for name in names.split(".")]
|
| 340 |
+
|
| 341 |
+
def create_store_attr(self, name) -> Instruction:
|
| 342 |
+
if name not in self.code_options["co_names"]:
|
| 343 |
+
self.code_options["co_names"] += (name,)
|
| 344 |
+
return create_instruction("STORE_ATTR", argval=name)
|
| 345 |
+
|
| 346 |
+
def store_attr(self, name):
|
| 347 |
+
self.append_output(self.create_store_attr(name))
|
| 348 |
+
|
| 349 |
+
def load_function_name(self, fn_name, push_null, num_on_stack=0):
|
| 350 |
+
"""Load the global fn_name on the stack num_on_stack down"""
|
| 351 |
+
output = []
|
| 352 |
+
if push_null and sys.version_info >= (3, 11):
|
| 353 |
+
output.extend(add_push_null(self.create_load_global(fn_name, add=True)))
|
| 354 |
+
if num_on_stack > 0:
|
| 355 |
+
output.extend(
|
| 356 |
+
[
|
| 357 |
+
*self.rot_n(num_on_stack + 2),
|
| 358 |
+
*self.rot_n(num_on_stack + 2),
|
| 359 |
+
]
|
| 360 |
+
)
|
| 361 |
+
else:
|
| 362 |
+
output.extend(
|
| 363 |
+
[
|
| 364 |
+
self.create_load_global(fn_name, add=True),
|
| 365 |
+
*self.rot_n(num_on_stack + 1),
|
| 366 |
+
]
|
| 367 |
+
)
|
| 368 |
+
return output
|
| 369 |
+
|
| 370 |
+
def rot_n(self, n):
|
| 371 |
+
try:
|
| 372 |
+
return create_rot_n(n)
|
| 373 |
+
except AttributeError:
|
| 374 |
+
# desired rotate bytecode doesn't exist, generate equivalent bytecode
|
| 375 |
+
return [
|
| 376 |
+
create_instruction("BUILD_TUPLE", arg=n),
|
| 377 |
+
self._create_load_const(rot_n_helper(n)),
|
| 378 |
+
*create_rot_n(2),
|
| 379 |
+
create_instruction("CALL_FUNCTION_EX", arg=0),
|
| 380 |
+
create_instruction("UNPACK_SEQUENCE", arg=n),
|
| 381 |
+
]
|
| 382 |
+
|
| 383 |
+
def pop_null(self):
|
| 384 |
+
# POP_TOP doesn't work for null, so we pop nulls by pushing in a
|
| 385 |
+
# nop function, calling it (which consumes the null), and popping the result.
|
| 386 |
+
assert sys.version_info >= (3, 11)
|
| 387 |
+
return [
|
| 388 |
+
self._create_load_const(lambda: None),
|
| 389 |
+
# 3.13 swapped NULL and callable
|
| 390 |
+
*(
|
| 391 |
+
(create_instruction("SWAP", arg=2),)
|
| 392 |
+
if sys.version_info >= (3, 13)
|
| 393 |
+
else ()
|
| 394 |
+
),
|
| 395 |
+
*create_call_function(0, False),
|
| 396 |
+
create_instruction("POP_TOP"),
|
| 397 |
+
]
|
| 398 |
+
|
| 399 |
+
def pop_top(self):
|
| 400 |
+
self.append_output(create_instruction("POP_TOP"))
|
| 401 |
+
|
| 402 |
+
def call_function(self, nargs: int, push_null: bool):
|
| 403 |
+
self.extend_output(create_call_function(nargs, push_null=push_null))
|
| 404 |
+
|
| 405 |
+
def dup_top(self):
|
| 406 |
+
self.append_output(create_dup_top())
|
| 407 |
+
|
| 408 |
+
def store(self, varname):
|
| 409 |
+
self.append_output(self.create_store(varname))
|
| 410 |
+
|
| 411 |
+
def make_function_with_closure(
|
| 412 |
+
self, fn_name: str, code: types.CodeType, push_null: bool, num_on_stack=0
|
| 413 |
+
):
|
| 414 |
+
freevars = code.co_freevars
|
| 415 |
+
assert freevars
|
| 416 |
+
output = self._output
|
| 417 |
+
|
| 418 |
+
def gen_fn():
|
| 419 |
+
for var in freevars:
|
| 420 |
+
assert var in self.cell_and_freevars()
|
| 421 |
+
inst_name = (
|
| 422 |
+
"LOAD_FAST" if sys.version_info >= (3, 13) else "LOAD_CLOSURE"
|
| 423 |
+
)
|
| 424 |
+
output.append(create_instruction(inst_name, argval=var))
|
| 425 |
+
output.append(create_instruction("BUILD_TUPLE", arg=len(freevars)))
|
| 426 |
+
output.append(self.create_load_const(code))
|
| 427 |
+
if sys.version_info < (3, 11):
|
| 428 |
+
output.append(self.create_load_const(fn_name))
|
| 429 |
+
if sys.version_info >= (3, 13):
|
| 430 |
+
output.extend(
|
| 431 |
+
[
|
| 432 |
+
create_instruction("MAKE_FUNCTION"),
|
| 433 |
+
create_instruction("SET_FUNCTION_ATTRIBUTE", arg=0x08),
|
| 434 |
+
]
|
| 435 |
+
)
|
| 436 |
+
else:
|
| 437 |
+
output.append(create_instruction("MAKE_FUNCTION", arg=0x08))
|
| 438 |
+
|
| 439 |
+
if push_null and sys.version_info >= (3, 11):
|
| 440 |
+
self.add_push_null(gen_fn)
|
| 441 |
+
output.extend(self.rot_n(num_on_stack + 2))
|
| 442 |
+
output.extend(self.rot_n(num_on_stack + 2))
|
| 443 |
+
else:
|
| 444 |
+
gen_fn()
|
| 445 |
+
output.extend(self.rot_n(num_on_stack + 1))
|
| 446 |
+
self.clear_tos()
|
| 447 |
+
|
| 448 |
+
def create_load_python_module(self, mod) -> Instruction:
|
| 449 |
+
"""
|
| 450 |
+
Generate a LOAD_GLOBAL instruction to fetch a given python module.
|
| 451 |
+
"""
|
| 452 |
+
output = self.tx.output
|
| 453 |
+
global_scope = output.global_scope
|
| 454 |
+
name = re.sub(r"^.*[.]", "", mod.__name__)
|
| 455 |
+
if global_scope.get(name, None) is mod:
|
| 456 |
+
return self.create_load_global(name, add=True)
|
| 457 |
+
prefix = f"___module_{name}"
|
| 458 |
+
global_name = self.tx.output.install_global_by_id(prefix, mod)
|
| 459 |
+
return self.create_load_global(global_name, add=True)
|
| 460 |
+
|
| 461 |
+
def make_call_generated_code(self, fn_name: str) -> None:
|
| 462 |
+
"""Call the generated code function stored in fn_name"""
|
| 463 |
+
self.extend_output(self.load_function_name(fn_name, True))
|
| 464 |
+
|
| 465 |
+
graphargs = self.tx.output.graphargs
|
| 466 |
+
for arg in graphargs:
|
| 467 |
+
if arg.pass_arg_as_tensor:
|
| 468 |
+
self.add_push_null(
|
| 469 |
+
lambda: self.extend_output(
|
| 470 |
+
[
|
| 471 |
+
self.create_load_python_module(torch),
|
| 472 |
+
self.create_load_attr("as_tensor"),
|
| 473 |
+
]
|
| 474 |
+
)
|
| 475 |
+
)
|
| 476 |
+
self.call_reconstruct(arg)
|
| 477 |
+
self.extend_output(create_call_function(1, False))
|
| 478 |
+
else:
|
| 479 |
+
self.call_reconstruct(arg)
|
| 480 |
+
|
| 481 |
+
self.extend_output(create_call_function(len(graphargs), False))
|
| 482 |
+
|
| 483 |
+
def load_import_from(self, module_name, object_name) -> None:
|
| 484 |
+
self(AttrSource(self.tx.import_source(module_name), object_name))
|
| 485 |
+
|
| 486 |
+
def create_call_function_kw(self, nargs, kw_names, push_null) -> List[Instruction]:
|
| 487 |
+
if sys.version_info >= (3, 13):
|
| 488 |
+
output = create_call_function(nargs, push_null)
|
| 489 |
+
assert output[-1].opname == "CALL"
|
| 490 |
+
output.insert(-1, self.create_load_const(kw_names))
|
| 491 |
+
output[-1] = create_instruction("CALL_KW", arg=nargs)
|
| 492 |
+
return output
|
| 493 |
+
elif sys.version_info >= (3, 11):
|
| 494 |
+
output = create_call_function(nargs, push_null)
|
| 495 |
+
if sys.version_info >= (3, 12):
|
| 496 |
+
idx = -1
|
| 497 |
+
expected_inst = "CALL"
|
| 498 |
+
else:
|
| 499 |
+
idx = -2
|
| 500 |
+
expected_inst = "PRECALL"
|
| 501 |
+
assert output[idx].opname == expected_inst
|
| 502 |
+
kw_names_inst = create_instruction("KW_NAMES", argval=kw_names)
|
| 503 |
+
output.insert(idx, kw_names_inst)
|
| 504 |
+
return output
|
| 505 |
+
return [
|
| 506 |
+
self.create_load_const(kw_names),
|
| 507 |
+
create_instruction("CALL_FUNCTION_KW", arg=nargs),
|
| 508 |
+
]
|
| 509 |
+
|
| 510 |
+
def create_delete(self, value) -> Instruction:
|
| 511 |
+
return create_instruction("DELETE_FAST", argval=value)
|
pllava/lib/python3.10/site-packages/torch/_dynamo/comptime.py
ADDED
|
@@ -0,0 +1,401 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
# This file establishes the public comptime interface to Dynamo.
|
| 3 |
+
# This allows Dynamo users to execute arbitrary Python code while
|
| 4 |
+
# Dynamo is symbolically evaluating their original programs.
|
| 5 |
+
#
|
| 6 |
+
# The goal of the public API is to give users rope, without actually
|
| 7 |
+
# leaking private implementation details of Dynamo.
|
| 8 |
+
|
| 9 |
+
import builtins
|
| 10 |
+
import dis
|
| 11 |
+
import time
|
| 12 |
+
import traceback
|
| 13 |
+
from typing import Optional, Union
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
from torch.fx.experimental.symbolic_shapes import free_symbols
|
| 17 |
+
|
| 18 |
+
from .exc import unimplemented
|
| 19 |
+
from .variables import NewCellVariable
|
| 20 |
+
from .variables.constant import ConstantVariable
|
| 21 |
+
from .variables.misc import ClosureVariable
|
| 22 |
+
from .variables.tensor import SymNodeVariable
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class ComptimeVar:
|
| 26 |
+
"""
|
| 27 |
+
A ComptimeVar represents a Python value, at some particular point
|
| 28 |
+
in time, in the Python code we are symbolically evaluating with
|
| 29 |
+
torchdynamo. This must be distinguished from a runtime value, as
|
| 30 |
+
at compile-time there are some properties of the variable we
|
| 31 |
+
do not know (for example, if the ComptimeVar represents a Tensor,
|
| 32 |
+
we only know metadata about the tensor; we do NOT know what the
|
| 33 |
+
actual data in the Tensor is.)
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(self, v) -> None:
|
| 37 |
+
self.__variable = v
|
| 38 |
+
|
| 39 |
+
def as_proxy(self):
|
| 40 |
+
"""
|
| 41 |
+
Returns an fx.Proxy (or tuple/list of fx.Proxy) representing
|
| 42 |
+
this variable in the FX graph we are assembling to pass
|
| 43 |
+
to the user compiler.
|
| 44 |
+
|
| 45 |
+
This method only works for variables we actually track in
|
| 46 |
+
the FX graph, aka Tensors (and ints, if you are compiling
|
| 47 |
+
with dynamic shapes). In particular, if you have a list
|
| 48 |
+
or tuple of tensors, you will get a list/tuple of proxies
|
| 49 |
+
(not a single proxy representing the entire list/tuple).
|
| 50 |
+
"""
|
| 51 |
+
return self.__variable.as_proxy()
|
| 52 |
+
|
| 53 |
+
def is_proxy(self):
|
| 54 |
+
"""
|
| 55 |
+
Returns True if as_proxy() would succeed.
|
| 56 |
+
"""
|
| 57 |
+
return self.__variable.is_proxy()
|
| 58 |
+
|
| 59 |
+
def as_fake(self):
|
| 60 |
+
"""
|
| 61 |
+
Returns a "fake" value (either a FakeTensor or a SymInt)
|
| 62 |
+
representing the variable in question. This only works
|
| 63 |
+
for variables that denote Tensor or int. You can use
|
| 64 |
+
this to query metadata; e.g., v.as_fake().size(0) will
|
| 65 |
+
tell you the compile-time known size of the tensor.
|
| 66 |
+
|
| 67 |
+
WARNING: Do NOT mutate the returned tensor.
|
| 68 |
+
"""
|
| 69 |
+
return self.__variable.as_proxy().node.meta["example_value"]
|
| 70 |
+
|
| 71 |
+
def size(self, dim: Optional[int] = None) -> Union[int, torch.SymInt]:
|
| 72 |
+
"""
|
| 73 |
+
Returns the size of the tensor (if dim is None) or the size
|
| 74 |
+
at the dimension dim. The returned size may be a SymInt.
|
| 75 |
+
"""
|
| 76 |
+
return self.as_fake().size(dim)
|
| 77 |
+
|
| 78 |
+
def python_type(self):
|
| 79 |
+
"""
|
| 80 |
+
Returns what type(v) would have returned for the variable
|
| 81 |
+
at compile time.
|
| 82 |
+
"""
|
| 83 |
+
return self.__variable.python_type()
|
| 84 |
+
|
| 85 |
+
def as_python_constant(self):
|
| 86 |
+
"""
|
| 87 |
+
Returns the Python value this variable would have, but only if it is
|
| 88 |
+
completely known at compile-time (e.g., it is constant).
|
| 89 |
+
|
| 90 |
+
WARNING: Do NOT mutate the returned constant. The returned constant
|
| 91 |
+
may or may not correspond to the actual value this variable may take
|
| 92 |
+
on at runtime; for example, if the variable in question is a constant
|
| 93 |
+
list, we may return a copy of that list.
|
| 94 |
+
"""
|
| 95 |
+
return self.__variable.as_python_constant()
|
| 96 |
+
|
| 97 |
+
def is_python_constant(self):
|
| 98 |
+
"""
|
| 99 |
+
Returns True if as_python_constant would succeed.
|
| 100 |
+
"""
|
| 101 |
+
return self.__variable.is_python_constant()
|
| 102 |
+
|
| 103 |
+
def is_dynamic(self):
|
| 104 |
+
if isinstance(self.__variable, SymNodeVariable):
|
| 105 |
+
fs = free_symbols(self.__variable.sym_num)
|
| 106 |
+
return bool(fs)
|
| 107 |
+
return False
|
| 108 |
+
|
| 109 |
+
def force_static(self):
|
| 110 |
+
"""
|
| 111 |
+
Forces that a value is static, inducing a guard on its specific value
|
| 112 |
+
"""
|
| 113 |
+
if isinstance(self.__variable, SymNodeVariable):
|
| 114 |
+
self.__variable.evaluate_expr()
|
| 115 |
+
elif isinstance(self.__variable, ConstantVariable):
|
| 116 |
+
# TODO: Maybe complain if this isn't a int/bool/float variable
|
| 117 |
+
pass
|
| 118 |
+
else:
|
| 119 |
+
raise AssertionError(
|
| 120 |
+
f"cannot force {self.__variable} ({type(self.__variable)}) static"
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
def _i_will_not_complain_if_bc_breaks_VariableTracker(self):
|
| 124 |
+
"""
|
| 125 |
+
Returns the internal data structure VariableTracker that Dynamo uses
|
| 126 |
+
to represent variables at compile time. There are no BC guarantees on
|
| 127 |
+
this API and WE RESERVE THE RIGHT TO BREAK YOUR CODE if you rely on
|
| 128 |
+
it.
|
| 129 |
+
"""
|
| 130 |
+
return self.__variable
|
| 131 |
+
|
| 132 |
+
def __repr__(self) -> str:
|
| 133 |
+
return self.__variable.debug_repr()
|
| 134 |
+
|
| 135 |
+
# TODO: API for adding a custom guard
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
class ComptimeContext:
|
| 139 |
+
"""
|
| 140 |
+
This context class provides access to a public API for Dynamo's internals.
|
| 141 |
+
If there is something here you would find useful that is missing, please
|
| 142 |
+
file a feature request at https://github.com/pytorch/pytorch/
|
| 143 |
+
"""
|
| 144 |
+
|
| 145 |
+
def __init__(self, tx) -> None:
|
| 146 |
+
self.__tx = tx
|
| 147 |
+
|
| 148 |
+
def get_local(self, name: str, *, stacklevel=0) -> ComptimeVar:
|
| 149 |
+
"""
|
| 150 |
+
Retrieve the compile-time known information about a local.
|
| 151 |
+
"""
|
| 152 |
+
tx = self.__get_tx(stacklevel)
|
| 153 |
+
|
| 154 |
+
# This is analogous to LOAD_DEREF
|
| 155 |
+
if hasattr(tx, "closure_cells") and name in tx.closure_cells:
|
| 156 |
+
cell = tx.closure_cells[name]
|
| 157 |
+
if isinstance(cell, ClosureVariable):
|
| 158 |
+
return ComptimeVar(tx.output.root_tx.symbolic_locals[cell.name])
|
| 159 |
+
else:
|
| 160 |
+
return ComptimeVar(tx.output.side_effects.load_cell(cell))
|
| 161 |
+
else:
|
| 162 |
+
r = tx.symbolic_locals[name]
|
| 163 |
+
if isinstance(r, NewCellVariable):
|
| 164 |
+
return ComptimeVar(tx.output.side_effects.load_cell(r))
|
| 165 |
+
else:
|
| 166 |
+
return ComptimeVar(r)
|
| 167 |
+
|
| 168 |
+
def graph_break(self, msg="ComptimeContext.graph_break"):
|
| 169 |
+
"""
|
| 170 |
+
Manually trigger a graph break
|
| 171 |
+
"""
|
| 172 |
+
unimplemented(msg)
|
| 173 |
+
|
| 174 |
+
def graph(self):
|
| 175 |
+
"""
|
| 176 |
+
Retrieve the partially constructed FX graph that would be
|
| 177 |
+
passed to the user compiler after compilation.
|
| 178 |
+
"""
|
| 179 |
+
return self.__tx.output.graph
|
| 180 |
+
|
| 181 |
+
def assert_static(self, val):
|
| 182 |
+
"""
|
| 183 |
+
Asserts that the int is static (and not dynamic, per dynamic shapes)
|
| 184 |
+
"""
|
| 185 |
+
assert (
|
| 186 |
+
not val.is_dynamic()
|
| 187 |
+
), "expected static but got dynamic (run with TORCH_LOGS=dynamic for more info)"
|
| 188 |
+
|
| 189 |
+
def print_graph(self, *, verbose=True, file=None):
|
| 190 |
+
"""
|
| 191 |
+
Print the partially constructed FX graph that would be passed
|
| 192 |
+
to the user compiler after compilation.
|
| 193 |
+
"""
|
| 194 |
+
print(
|
| 195 |
+
self.__tx.output.graph.python_code("self", verbose=verbose).src, file=file
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
def parent(self):
|
| 199 |
+
return ComptimeContext(self.__tx.parent)
|
| 200 |
+
|
| 201 |
+
def __get_tx(self, stacklevel):
|
| 202 |
+
tx = self.__tx
|
| 203 |
+
for _ in range(stacklevel):
|
| 204 |
+
tx = tx.parent
|
| 205 |
+
return tx
|
| 206 |
+
|
| 207 |
+
def print(self, val, *, file=None):
|
| 208 |
+
print(repr(val), file=file)
|
| 209 |
+
|
| 210 |
+
def print_disas(self, *, file=None, stacklevel=0):
|
| 211 |
+
"""
|
| 212 |
+
Print the current series of opcodes being executed (not including
|
| 213 |
+
parent frames), including where you are in the particular opcode
|
| 214 |
+
stream.
|
| 215 |
+
"""
|
| 216 |
+
tx = self.__get_tx(stacklevel)
|
| 217 |
+
print(
|
| 218 |
+
dis.Bytecode(
|
| 219 |
+
tx.f_code,
|
| 220 |
+
current_offset=tx.instructions[tx.instruction_pointer].offset,
|
| 221 |
+
).dis(),
|
| 222 |
+
file=file,
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
def print_value_stack(self, *, file=None, stacklevel=0):
|
| 226 |
+
"""
|
| 227 |
+
Print the current Python value stack. Note that this is NOT the same
|
| 228 |
+
as the traceback; use print_bt() to print that. Note that at
|
| 229 |
+
stacklevel=0, this will typically be empty, as comptime cannot
|
| 230 |
+
currently be used in an expression context where there would be
|
| 231 |
+
intermediates on the stack. If you would find this useful, please
|
| 232 |
+
file a bug at https://github.com/pytorch/pytorch/
|
| 233 |
+
|
| 234 |
+
NB: Stack grows downwards in our print
|
| 235 |
+
"""
|
| 236 |
+
tx = self.__get_tx(stacklevel)
|
| 237 |
+
for s in tx.stack:
|
| 238 |
+
print(f"- {s.debug_repr()}", file=file)
|
| 239 |
+
|
| 240 |
+
def print_locals(self, *, file=None, stacklevel=0):
|
| 241 |
+
"""
|
| 242 |
+
Print all of the locals available in the current context.
|
| 243 |
+
By default this view is very limited; you can get more information
|
| 244 |
+
about any individual local using get_local().
|
| 245 |
+
"""
|
| 246 |
+
tx = self.__get_tx(stacklevel)
|
| 247 |
+
for k, v in tx.symbolic_locals.items():
|
| 248 |
+
print(f"{k} = {v.debug_repr()}", file=file)
|
| 249 |
+
|
| 250 |
+
def print_bt(self, *, file=None, stacklevel=0):
|
| 251 |
+
"""
|
| 252 |
+
Print the user code backtrace, starting at the beginning of the
|
| 253 |
+
frame Dynamo started evaluating. Note that this MAY NOT go all
|
| 254 |
+
the way to the torch.compile invocation, as we may have done
|
| 255 |
+
a graph break and are compiling an intermediate frame as the
|
| 256 |
+
starting point. If you think the other behavior would be better,
|
| 257 |
+
file a bug at https://github.com/pytorch/pytorch/
|
| 258 |
+
"""
|
| 259 |
+
stack = []
|
| 260 |
+
tx = self.__get_tx(stacklevel)
|
| 261 |
+
while tx is not None:
|
| 262 |
+
stack.append(tx.frame_summary())
|
| 263 |
+
tx = getattr(tx, "parent", None)
|
| 264 |
+
print(
|
| 265 |
+
"".join(traceback.StackSummary.from_list(reversed(stack)).format()),
|
| 266 |
+
file=file,
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
def print_guards(self, *, file=None):
|
| 270 |
+
"""
|
| 271 |
+
Print the currently installed guards for the Dynamo context.
|
| 272 |
+
This does NOT include guards associated with variables that
|
| 273 |
+
may or may not be installed in the future if those variables
|
| 274 |
+
are used.
|
| 275 |
+
"""
|
| 276 |
+
# TODO: improve print format, current guard format is extremely
|
| 277 |
+
# verbose
|
| 278 |
+
print(
|
| 279 |
+
"\n".join(f"{repr(guard)}" for guard in sorted(self.__tx.output.guards)),
|
| 280 |
+
file=file,
|
| 281 |
+
)
|
| 282 |
+
|
| 283 |
+
def _i_will_not_complain_if_bc_breaks_InstructionTranslator(self):
|
| 284 |
+
"""
|
| 285 |
+
Returns the internal data structure InstructionTranslator that Dynamo
|
| 286 |
+
uses to track state of symbolic evaluation. There are no BC
|
| 287 |
+
guarantees on this API and WE RESERVE THE RIGHT TO BREAK YOUR CODE if
|
| 288 |
+
you rely on it.
|
| 289 |
+
"""
|
| 290 |
+
return self.__tx
|
| 291 |
+
|
| 292 |
+
def sleep(self, sec):
|
| 293 |
+
time.sleep(sec)
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
class _Comptime:
|
| 297 |
+
@staticmethod
|
| 298 |
+
def __call__(fn, fallback_fn=lambda: None):
|
| 299 |
+
"""fn gets called at compile time in TorchDynamo, calls fallback_fn otherwise"""
|
| 300 |
+
fallback_fn()
|
| 301 |
+
|
| 302 |
+
# Convenience wrappers that are more compact to use
|
| 303 |
+
|
| 304 |
+
@staticmethod
|
| 305 |
+
def graph_break():
|
| 306 |
+
comptime(lambda ctx: ctx.graph_break())
|
| 307 |
+
|
| 308 |
+
@staticmethod
|
| 309 |
+
def print(e):
|
| 310 |
+
comptime(lambda ctx: ctx.print(ctx.get_local("e")), lambda: print(e))
|
| 311 |
+
|
| 312 |
+
@staticmethod
|
| 313 |
+
def print_graph():
|
| 314 |
+
comptime(lambda ctx: ctx.print_graph())
|
| 315 |
+
|
| 316 |
+
@staticmethod
|
| 317 |
+
def print_disas(*, stacklevel=0):
|
| 318 |
+
comptime(
|
| 319 |
+
lambda ctx: ctx.print_disas(
|
| 320 |
+
stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
|
| 321 |
+
)
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
@staticmethod
|
| 325 |
+
def print_value_stack(*, stacklevel=0):
|
| 326 |
+
comptime(
|
| 327 |
+
lambda ctx: ctx.print_value_stack(
|
| 328 |
+
stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
|
| 329 |
+
)
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
# This is a more useful variant of print_value_stack that can be used
|
| 333 |
+
# in an expression context; e.g., x + print_value_stack_and_return(y + z),
|
| 334 |
+
# you will see x on the stack prior to the addition operation
|
| 335 |
+
@staticmethod
|
| 336 |
+
def print_value_stack_and_return(e, *, stacklevel=0):
|
| 337 |
+
comptime(
|
| 338 |
+
lambda ctx: ctx.print_value_stack(
|
| 339 |
+
stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
|
| 340 |
+
)
|
| 341 |
+
)
|
| 342 |
+
return e
|
| 343 |
+
|
| 344 |
+
@staticmethod
|
| 345 |
+
def print_locals(*, stacklevel=0):
|
| 346 |
+
comptime(
|
| 347 |
+
lambda ctx: ctx.print_locals(
|
| 348 |
+
stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
|
| 349 |
+
)
|
| 350 |
+
)
|
| 351 |
+
|
| 352 |
+
@staticmethod
|
| 353 |
+
def print_bt(*, stacklevel=0):
|
| 354 |
+
comptime(
|
| 355 |
+
lambda ctx: ctx.print_bt(
|
| 356 |
+
stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
|
| 357 |
+
)
|
| 358 |
+
)
|
| 359 |
+
|
| 360 |
+
@staticmethod
|
| 361 |
+
def print_guards():
|
| 362 |
+
comptime(lambda ctx: ctx.print_guards())
|
| 363 |
+
|
| 364 |
+
@staticmethod
|
| 365 |
+
def assert_static(val):
|
| 366 |
+
comptime(lambda ctx: ctx.assert_static(ctx.get_local("val")))
|
| 367 |
+
|
| 368 |
+
@staticmethod
|
| 369 |
+
def force_static(val):
|
| 370 |
+
comptime(lambda ctx: ctx.get_local("val").force_static())
|
| 371 |
+
|
| 372 |
+
@staticmethod
|
| 373 |
+
def breakpoint():
|
| 374 |
+
"""
|
| 375 |
+
Like pdb breakpoint(), but drop into pdb whenever this line
|
| 376 |
+
of code is compiled by dynamo. Use it by putting
|
| 377 |
+
this in your model code::
|
| 378 |
+
|
| 379 |
+
from torch._dynamo.comptime import comptime
|
| 380 |
+
comptime.breakpoint()
|
| 381 |
+
|
| 382 |
+
And then, inside pdb, you can access 'ctx' to query things
|
| 383 |
+
about the compilation context::
|
| 384 |
+
|
| 385 |
+
(Pdb) !ctx.print_bt()
|
| 386 |
+
(Pdb) !ctx.print_locals()
|
| 387 |
+
(Pdb) p ctx.get_local("attention").as_fake()
|
| 388 |
+
"""
|
| 389 |
+
|
| 390 |
+
def inner(inner_ctx):
|
| 391 |
+
ctx = inner_ctx.parent()
|
| 392 |
+
builtins.breakpoint()
|
| 393 |
+
|
| 394 |
+
comptime(inner)
|
| 395 |
+
|
| 396 |
+
@staticmethod
|
| 397 |
+
def sleep(sec):
|
| 398 |
+
comptime(lambda ctx: ctx.sleep(ctx.get_local("sec").as_python_constant()))
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
comptime = _Comptime()
|
pllava/lib/python3.10/site-packages/torch/_dynamo/logging.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import itertools
|
| 3 |
+
import logging
|
| 4 |
+
|
| 5 |
+
from torch.hub import _Faketqdm, tqdm
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
# Disable progress bar by default, not in dynamo config because otherwise get a circular import
|
| 9 |
+
disable_progress = True
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Return all loggers that torchdynamo/torchinductor is responsible for
|
| 13 |
+
def get_loggers():
|
| 14 |
+
return [
|
| 15 |
+
logging.getLogger("torch.fx.experimental.symbolic_shapes"),
|
| 16 |
+
logging.getLogger("torch._dynamo"),
|
| 17 |
+
logging.getLogger("torch._inductor"),
|
| 18 |
+
]
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# Creates a logging function that logs a message with a step # prepended.
|
| 22 |
+
# get_step_logger should be lazily called (i.e. at runtime, not at module-load time)
|
| 23 |
+
# so that step numbers are initialized properly. e.g.:
|
| 24 |
+
|
| 25 |
+
# @functools.lru_cache(None)
|
| 26 |
+
# def _step_logger():
|
| 27 |
+
# return get_step_logger(logging.getLogger(...))
|
| 28 |
+
|
| 29 |
+
# def fn():
|
| 30 |
+
# _step_logger()(logging.INFO, "msg")
|
| 31 |
+
|
| 32 |
+
_step_counter = itertools.count(1)
|
| 33 |
+
|
| 34 |
+
# Update num_steps if more phases are added: Dynamo, AOT, Backend
|
| 35 |
+
# This is very inductor centric
|
| 36 |
+
# _inductor.utils.has_triton() gives a circular import error here
|
| 37 |
+
|
| 38 |
+
if not disable_progress:
|
| 39 |
+
try:
|
| 40 |
+
import triton # noqa: F401
|
| 41 |
+
|
| 42 |
+
num_steps = 3
|
| 43 |
+
except ImportError:
|
| 44 |
+
num_steps = 2
|
| 45 |
+
pbar = tqdm(total=num_steps, desc="torch.compile()", delay=0)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def get_step_logger(logger):
|
| 49 |
+
if not disable_progress:
|
| 50 |
+
pbar.update(1)
|
| 51 |
+
if not isinstance(pbar, _Faketqdm):
|
| 52 |
+
pbar.set_postfix_str(f"{logger.name}")
|
| 53 |
+
|
| 54 |
+
step = next(_step_counter)
|
| 55 |
+
|
| 56 |
+
def log(level, msg, **kwargs):
|
| 57 |
+
logger.log(level, "Step %s: %s", step, msg, **kwargs)
|
| 58 |
+
|
| 59 |
+
return log
|
pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/__pycache__/functools.cpython-310.pyc
ADDED
|
Binary file (235 Bytes). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/__pycache__/loader.cpython-310.pyc
ADDED
|
Binary file (957 Bytes). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/__pycache__/os.cpython-310.pyc
ADDED
|
Binary file (990 Bytes). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/builtins.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Python polyfills for builtins
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from __future__ import annotations
|
| 6 |
+
|
| 7 |
+
import builtins
|
| 8 |
+
from typing import Iterable, TypeVar
|
| 9 |
+
|
| 10 |
+
from ..decorators import substitute_in_graph
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = [
|
| 14 |
+
"all",
|
| 15 |
+
"any",
|
| 16 |
+
"enumerate",
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
_T = TypeVar("_T")
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
@substitute_in_graph(builtins.all, can_constant_fold_through=True)
|
| 24 |
+
def all(iterable: Iterable[object], /) -> bool:
|
| 25 |
+
for elem in iterable:
|
| 26 |
+
if not elem:
|
| 27 |
+
return False
|
| 28 |
+
return True
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@substitute_in_graph(builtins.any, can_constant_fold_through=True)
|
| 32 |
+
def any(iterable: Iterable[object], /) -> bool:
|
| 33 |
+
for elem in iterable:
|
| 34 |
+
if elem:
|
| 35 |
+
return True
|
| 36 |
+
return False
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@substitute_in_graph(builtins.enumerate, is_embedded_type=True) # type: ignore[arg-type]
|
| 40 |
+
def enumerate(iterable: Iterable[_T], start: int = 0) -> Iterable[tuple[int, _T]]:
|
| 41 |
+
if not isinstance(start, int):
|
| 42 |
+
raise TypeError(
|
| 43 |
+
f"{type(start).__name__!r} object cannot be interpreted as an integer"
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
for x in iterable:
|
| 47 |
+
yield start, x
|
| 48 |
+
start += 1
|
pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/functools.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Python polyfills for functools
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
__all__ = [] # type: ignore[var-annotated]
|
pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/itertools.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Python polyfills for itertools
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from __future__ import annotations
|
| 6 |
+
|
| 7 |
+
import itertools
|
| 8 |
+
from typing import Iterable, Iterator, TypeVar
|
| 9 |
+
|
| 10 |
+
from ..decorators import substitute_in_graph
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = [
|
| 14 |
+
"chain",
|
| 15 |
+
"chain_from_iterable",
|
| 16 |
+
"islice",
|
| 17 |
+
"tee",
|
| 18 |
+
]
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
_T = TypeVar("_T")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# Reference: https://docs.python.org/3/library/itertools.html#itertools.chain
|
| 25 |
+
@substitute_in_graph(itertools.chain, is_embedded_type=True) # type: ignore[arg-type]
|
| 26 |
+
def chain(*iterables: Iterable[_T]) -> Iterator[_T]:
|
| 27 |
+
for iterable in iterables:
|
| 28 |
+
yield from iterable
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@substitute_in_graph(itertools.chain.from_iterable) # type: ignore[arg-type]
|
| 32 |
+
def chain_from_iterable(iterable: Iterable[Iterable[_T]], /) -> Iterator[_T]:
|
| 33 |
+
return itertools.chain(*iterable)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
chain.from_iterable = chain_from_iterable # type: ignore[method-assign]
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
# Reference: https://docs.python.org/3/library/itertools.html#itertools.islice
|
| 40 |
+
@substitute_in_graph(itertools.islice, is_embedded_type=True) # type: ignore[arg-type]
|
| 41 |
+
def islice(iterable: Iterable[_T], /, *args: int | None) -> Iterator[_T]:
|
| 42 |
+
s = slice(*args)
|
| 43 |
+
start = 0 if s.start is None else s.start
|
| 44 |
+
stop = s.stop
|
| 45 |
+
step = 1 if s.step is None else s.step
|
| 46 |
+
if start < 0 or (stop is not None and stop < 0) or step <= 0:
|
| 47 |
+
raise ValueError(
|
| 48 |
+
"Indices for islice() must be None or an integer: 0 <= x <= sys.maxsize.",
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
if stop is None:
|
| 52 |
+
# TODO: use indices = itertools.count() and merge implementation with the else branch
|
| 53 |
+
# when we support infinite iterators
|
| 54 |
+
next_i = start
|
| 55 |
+
for i, element in enumerate(iterable):
|
| 56 |
+
if i == next_i:
|
| 57 |
+
yield element
|
| 58 |
+
next_i += step
|
| 59 |
+
else:
|
| 60 |
+
indices = range(max(start, stop))
|
| 61 |
+
next_i = start
|
| 62 |
+
for i, element in zip(indices, iterable):
|
| 63 |
+
if i == next_i:
|
| 64 |
+
yield element
|
| 65 |
+
next_i += step
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
# Reference: https://docs.python.org/3/library/itertools.html#itertools.tee
|
| 69 |
+
@substitute_in_graph(itertools.tee)
|
| 70 |
+
def tee(iterable: Iterable[_T], n: int = 2, /) -> tuple[Iterator[_T], ...]:
|
| 71 |
+
iterator = iter(iterable)
|
| 72 |
+
shared_link = [None, None]
|
| 73 |
+
|
| 74 |
+
def _tee(link) -> Iterator[_T]: # type: ignore[no-untyped-def]
|
| 75 |
+
try:
|
| 76 |
+
while True:
|
| 77 |
+
if link[1] is None:
|
| 78 |
+
link[0] = next(iterator)
|
| 79 |
+
link[1] = [None, None]
|
| 80 |
+
value, link = link
|
| 81 |
+
yield value
|
| 82 |
+
except StopIteration:
|
| 83 |
+
return
|
| 84 |
+
|
| 85 |
+
return tuple(_tee(shared_link) for _ in range(n))
|
pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/loader.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Used to load and initialize polyfill handlers when importing torch._dynamo
|
| 2 |
+
# Please add a new import when adding a new polyfill module.
|
| 3 |
+
|
| 4 |
+
import importlib
|
| 5 |
+
from typing import Tuple, TYPE_CHECKING
|
| 6 |
+
|
| 7 |
+
from .. import polyfills, trace_rules
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
if TYPE_CHECKING:
|
| 11 |
+
from types import ModuleType
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# See also the TYPE_CHECKING block in torch/_dynamo/polyfills/__init__.py
|
| 15 |
+
POLYFILLED_MODULE_NAMES: Tuple[str, ...] = (
|
| 16 |
+
"builtins",
|
| 17 |
+
"functools",
|
| 18 |
+
"itertools",
|
| 19 |
+
"os",
|
| 20 |
+
"sys",
|
| 21 |
+
)
|
| 22 |
+
POLYFILLED_MODULES: Tuple["ModuleType", ...] = tuple(
|
| 23 |
+
importlib.import_module(f".{submodule}", package=polyfills.__name__)
|
| 24 |
+
for submodule in POLYFILLED_MODULE_NAMES
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# Unregister the builtin functions from _builtin_function_ids to let them to be
|
| 29 |
+
# dispatched with the appropriate VariableTracker type. Otherwise, they will be
|
| 30 |
+
# dispatched with BuiltinVariable if present in _builtin_function_ids.
|
| 31 |
+
for polyfill_module in POLYFILLED_MODULES:
|
| 32 |
+
for polyfill_name in polyfill_module.__all__:
|
| 33 |
+
polyfill_handler = getattr(polyfill_module, polyfill_name)
|
| 34 |
+
original_fn = polyfill_handler.__torch_dynamo_original__
|
| 35 |
+
trace_rules._builtin_function_ids.remove(id(original_fn))
|
pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/os.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Python polyfills for os
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from __future__ import annotations
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
from typing import AnyStr
|
| 9 |
+
|
| 10 |
+
from ..decorators import substitute_in_graph
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ["fspath"]
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# Copied from os.py in the standard library
|
| 17 |
+
@substitute_in_graph(os.fspath, can_constant_fold_through=True)
|
| 18 |
+
def fspath(path: AnyStr | os.PathLike[AnyStr]) -> AnyStr:
|
| 19 |
+
if isinstance(path, (str, bytes)):
|
| 20 |
+
return path
|
| 21 |
+
|
| 22 |
+
path_type = type(path)
|
| 23 |
+
try:
|
| 24 |
+
path_repr = path_type.__fspath__(path) # type: ignore[arg-type]
|
| 25 |
+
except AttributeError:
|
| 26 |
+
if hasattr(path_type, "__fspath__"):
|
| 27 |
+
raise
|
| 28 |
+
raise TypeError(
|
| 29 |
+
f"expected str, bytes or os.PathLike object, not {path_type.__name__}",
|
| 30 |
+
) from None
|
| 31 |
+
if isinstance(path_repr, (str, bytes)):
|
| 32 |
+
return path_repr # type: ignore[return-value]
|
| 33 |
+
raise TypeError(
|
| 34 |
+
f"expected {path_type.__name__}.__fspath__() to return str or bytes, "
|
| 35 |
+
f"not {type(path_repr).__name__}",
|
| 36 |
+
)
|
pllava/lib/python3.10/site-packages/torch/_dynamo/profiler.py
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import dataclasses
|
| 3 |
+
import os
|
| 4 |
+
from typing import Any, List
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from .utils import print_once
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@dataclasses.dataclass
|
| 12 |
+
class ProfileMetrics:
|
| 13 |
+
microseconds: float = 0.0
|
| 14 |
+
operators: int = 0
|
| 15 |
+
fusions: int = 0
|
| 16 |
+
graphs: int = 0
|
| 17 |
+
|
| 18 |
+
def __iadd__(self, other: "ProfileMetrics"):
|
| 19 |
+
self.microseconds += other.microseconds
|
| 20 |
+
self.operators += other.operators
|
| 21 |
+
self.fusions += other.fusions
|
| 22 |
+
return self
|
| 23 |
+
|
| 24 |
+
def __add__(self, other: "ProfileMetrics"):
|
| 25 |
+
assert isinstance(other, ProfileMetrics)
|
| 26 |
+
return ProfileMetrics(
|
| 27 |
+
self.microseconds + other.microseconds,
|
| 28 |
+
self.operators + other.operators,
|
| 29 |
+
self.fusions + other.fusions,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
def __truediv__(self, other):
|
| 33 |
+
if isinstance(other, int):
|
| 34 |
+
other = ProfileMetrics(other, other, other)
|
| 35 |
+
return ProfileMetrics(
|
| 36 |
+
self.microseconds / max(1, other.microseconds),
|
| 37 |
+
self.operators / max(1, other.operators),
|
| 38 |
+
self.fusions / max(1, other.fusions),
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
def __str__(self) -> str:
|
| 42 |
+
return f"{self.operators:4.0%} ops {self.microseconds:4.0%} time"
|
| 43 |
+
|
| 44 |
+
def tocsv(self):
|
| 45 |
+
return [self.operators, self.microseconds]
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class ProfileResult:
|
| 49 |
+
def __init__(self, captured, total, unique_graphs) -> None:
|
| 50 |
+
self.captured: ProfileMetrics = captured or ProfileMetrics()
|
| 51 |
+
self.total: ProfileMetrics = total or ProfileMetrics()
|
| 52 |
+
self.unique_graphs: int = unique_graphs
|
| 53 |
+
|
| 54 |
+
def __iadd__(self, other: "ProfileResult"):
|
| 55 |
+
self.captured += other.captured
|
| 56 |
+
self.total += other.total
|
| 57 |
+
self.unique_graphs += other.unique_graphs
|
| 58 |
+
return self
|
| 59 |
+
|
| 60 |
+
def percent(self):
|
| 61 |
+
return self.captured / self.total
|
| 62 |
+
|
| 63 |
+
def __str__(self) -> str:
|
| 64 |
+
return (
|
| 65 |
+
f"{self.unique_graphs:2} graphs {self.captured.graphs:2} graph calls "
|
| 66 |
+
f"{self.captured.operators:4}/{self.total.operators:4} = "
|
| 67 |
+
+ str(self.percent())
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
def tocsv(self):
|
| 71 |
+
return [
|
| 72 |
+
self.unique_graphs,
|
| 73 |
+
self.captured.graphs,
|
| 74 |
+
self.captured.operators,
|
| 75 |
+
self.total.operators,
|
| 76 |
+
] + self.percent().tocsv()
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def should_print_missing():
|
| 80 |
+
return os.environ.get("TORCHDYNAMO_PRINT_MISSING") == "1"
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def print_missing(stack):
|
| 84 |
+
if any("/torch/autograd/profiler.py" in x for x in stack):
|
| 85 |
+
return
|
| 86 |
+
stack = [
|
| 87 |
+
x for x in stack if ("<built-in" not in x and "site-packages/torch/" not in x)
|
| 88 |
+
]
|
| 89 |
+
print_once("MISSING", " >> ".join(stack[-3:]))
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class Profiler:
|
| 93 |
+
unique_graphs = 0
|
| 94 |
+
|
| 95 |
+
def __init__(self) -> None:
|
| 96 |
+
self.prof = torch.profiler.profile(
|
| 97 |
+
activities=[torch.profiler.ProfilerActivity.CPU],
|
| 98 |
+
with_stack=should_print_missing(),
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
def results(self):
|
| 102 |
+
captured_regions = 0
|
| 103 |
+
captured_ops = 0
|
| 104 |
+
captured_microseconds = 0
|
| 105 |
+
total_ops = 0
|
| 106 |
+
total_microseconds = 0
|
| 107 |
+
|
| 108 |
+
last_op_end_time = -1
|
| 109 |
+
captured_region_end_time = -1
|
| 110 |
+
events = sorted(self.prof.events(), key=lambda x: x.time_range.start)
|
| 111 |
+
for e in events:
|
| 112 |
+
if e.name == "TORCHDYNAMO":
|
| 113 |
+
captured_region_end_time = e.time_range.end
|
| 114 |
+
captured_regions += 1
|
| 115 |
+
# ignore `handle = torch.zeros(1)` in record_function.__init__()
|
| 116 |
+
total_ops -= 1
|
| 117 |
+
elif e.time_range.start >= last_op_end_time:
|
| 118 |
+
last_op_end_time = e.time_range.end
|
| 119 |
+
if e.time_range.end <= captured_region_end_time:
|
| 120 |
+
captured_ops += 1
|
| 121 |
+
captured_microseconds += e.time_range.elapsed_us()
|
| 122 |
+
elif should_print_missing():
|
| 123 |
+
print_missing(e.stack)
|
| 124 |
+
total_ops += 1
|
| 125 |
+
total_microseconds += e.time_range.elapsed_us()
|
| 126 |
+
else:
|
| 127 |
+
pass # ops recursively called from other ops (ignored)
|
| 128 |
+
|
| 129 |
+
unique_graphs = Profiler.unique_graphs
|
| 130 |
+
Profiler.unique_graphs = 0
|
| 131 |
+
# we counted one extra op that is part of the profiler setup code
|
| 132 |
+
total_ops -= 1
|
| 133 |
+
|
| 134 |
+
return ProfileResult(
|
| 135 |
+
captured=ProfileMetrics(
|
| 136 |
+
microseconds=captured_microseconds,
|
| 137 |
+
operators=captured_ops,
|
| 138 |
+
fusions=captured_ops - captured_regions,
|
| 139 |
+
graphs=captured_regions,
|
| 140 |
+
),
|
| 141 |
+
total=ProfileMetrics(
|
| 142 |
+
microseconds=total_microseconds,
|
| 143 |
+
operators=total_ops,
|
| 144 |
+
fusions=total_ops - 1,
|
| 145 |
+
),
|
| 146 |
+
unique_graphs=unique_graphs,
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def fx_insert_profiling(gm: torch.fx.GraphModule, example_inputs: List[Any]):
|
| 151 |
+
def _wrapped(*args):
|
| 152 |
+
with torch.profiler.record_function("TORCHDYNAMO"):
|
| 153 |
+
return gm.forward(*args)
|
| 154 |
+
|
| 155 |
+
Profiler.unique_graphs += 1
|
| 156 |
+
return _wrapped
|
pllava/lib/python3.10/site-packages/torch/_dynamo/repro/__init__.py
ADDED
|
File without changes
|
pllava/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (171 Bytes). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_aot.cpython-310.pyc
ADDED
|
Binary file (25.1 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/repro/after_aot.py
ADDED
|
@@ -0,0 +1,966 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import argparse
|
| 3 |
+
import copy
|
| 4 |
+
import functools
|
| 5 |
+
import io
|
| 6 |
+
import logging
|
| 7 |
+
import os
|
| 8 |
+
import shutil
|
| 9 |
+
import subprocess
|
| 10 |
+
import sys
|
| 11 |
+
import textwrap
|
| 12 |
+
import uuid
|
| 13 |
+
from importlib import import_module
|
| 14 |
+
from tempfile import TemporaryFile
|
| 15 |
+
from typing import Any, Callable, Dict, Union
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
import torch.fx as fx
|
| 19 |
+
import torch.nn as nn
|
| 20 |
+
from torch._dynamo.debug_utils import (
|
| 21 |
+
_cuda_system_info_comment,
|
| 22 |
+
AccuracyError,
|
| 23 |
+
backend_accuracy_fails,
|
| 24 |
+
BuckTargetWriter,
|
| 25 |
+
cast_to_fp64,
|
| 26 |
+
extra_imports,
|
| 27 |
+
generate_config_string,
|
| 28 |
+
helper_for_dump_minify,
|
| 29 |
+
InputReader,
|
| 30 |
+
InputWriter,
|
| 31 |
+
MAX_CONSTANT_NUMEL_INLINE,
|
| 32 |
+
minifier_dir,
|
| 33 |
+
NNModuleToString,
|
| 34 |
+
NopInputReader,
|
| 35 |
+
same_two_models,
|
| 36 |
+
)
|
| 37 |
+
from torch._dynamo.utils import clone_inputs, counters, same
|
| 38 |
+
from torch.fx.experimental.proxy_tensor import make_fx
|
| 39 |
+
from torch.fx.experimental.symbolic_shapes import (
|
| 40 |
+
fx_placeholder_targets,
|
| 41 |
+
has_free_symbols,
|
| 42 |
+
)
|
| 43 |
+
from torch.hub import tqdm
|
| 44 |
+
|
| 45 |
+
from .. import config
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
log = logging.getLogger(__name__)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
inductor_config = import_module("torch._inductor.config")
|
| 52 |
+
use_buck = inductor_config.is_fbcode()
|
| 53 |
+
|
| 54 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 55 |
+
# MAIN ENTRY POINT
|
| 56 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def wrap_compiler_debug(unconfigured_compiler_fn, compiler_name: str):
|
| 60 |
+
"""
|
| 61 |
+
Minifier for Fx Graph modules after Aot Autograd has finished. We wrap both
|
| 62 |
+
forward and backward call separately with the backend compiler_fn - like
|
| 63 |
+
inductor or nvfuser. Intercepting after Aot Autograd presents neat
|
| 64 |
+
abstraction, where all the params are lifted as graph inputs, making it easy
|
| 65 |
+
to save the graph as a string.
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
@functools.wraps(unconfigured_compiler_fn)
|
| 69 |
+
def debug_wrapper(gm, example_inputs, **kwargs):
|
| 70 |
+
from torch._subclasses import FakeTensorMode
|
| 71 |
+
|
| 72 |
+
compiler_fn = functools.partial(unconfigured_compiler_fn, **kwargs)
|
| 73 |
+
|
| 74 |
+
from torch._functorch.aot_autograd import get_aot_graph_name
|
| 75 |
+
|
| 76 |
+
graph_name = get_aot_graph_name()
|
| 77 |
+
|
| 78 |
+
# TODO: why do we need to deepcopy the original graph?
|
| 79 |
+
orig_graph = copy.deepcopy(gm.graph)
|
| 80 |
+
assert config.repro_after in ("dynamo", "aot", None)
|
| 81 |
+
|
| 82 |
+
try:
|
| 83 |
+
# Call the compiler_fn - which is either aot_autograd or inductor
|
| 84 |
+
# with fake inputs
|
| 85 |
+
inner_compiled_fn = compiler_fn(gm, example_inputs)
|
| 86 |
+
except Exception as e:
|
| 87 |
+
# TODO: Failures here are troublesome because no real inputs,
|
| 88 |
+
# need a different serialization strategy
|
| 89 |
+
if config.repro_after == "aot":
|
| 90 |
+
if config.repro_level == 1:
|
| 91 |
+
dump_compiler_graph_state(
|
| 92 |
+
fx.GraphModule(gm, orig_graph),
|
| 93 |
+
example_inputs,
|
| 94 |
+
compiler_name,
|
| 95 |
+
)
|
| 96 |
+
elif config.repro_level == 2:
|
| 97 |
+
dump_to_minify(
|
| 98 |
+
fx.GraphModule(gm, orig_graph),
|
| 99 |
+
example_inputs,
|
| 100 |
+
compiler_name,
|
| 101 |
+
)
|
| 102 |
+
log.error("CompilerError")
|
| 103 |
+
raise
|
| 104 |
+
|
| 105 |
+
# We may run regular PyTorch compute that may trigger Dynamo, do NOT
|
| 106 |
+
# recursively attempt to accuracy minify in that case!
|
| 107 |
+
def deferred_for_real_inputs(real_inputs):
|
| 108 |
+
# This is a bit obscure: if we recursively try to accuracy minify
|
| 109 |
+
# the SAME function, this would trigger. But most of the time
|
| 110 |
+
# we should never hit this branch
|
| 111 |
+
if config.repro_after != "aot":
|
| 112 |
+
return inner_compiled_fn(real_inputs)
|
| 113 |
+
with config.patch(repro_after=None):
|
| 114 |
+
return inner_debug_fn(real_inputs)
|
| 115 |
+
|
| 116 |
+
def inner_debug_fn(real_inputs):
|
| 117 |
+
"""
|
| 118 |
+
Aot Autograd fw_compiler and bw_compiler can have fake tensors. So,
|
| 119 |
+
example_inputs can be fake tensors. We can call compiler_fn (which is
|
| 120 |
+
inductor or nvfuser) with fake tensors but the actually compiled_fn
|
| 121 |
+
should be called with real tensors. Therefore, the actual invocation
|
| 122 |
+
is deferred.
|
| 123 |
+
"""
|
| 124 |
+
# Copy the tensor attrs like shape, stride etc by converting to Fake Tensor
|
| 125 |
+
# because inductor clears the tensor list in its codegen. And example_inputs
|
| 126 |
+
# are available only for the first invocation.
|
| 127 |
+
fake_mode = FakeTensorMode()
|
| 128 |
+
copy_tensor_attrs = [
|
| 129 |
+
fake_mode.from_tensor(x) if isinstance(x, torch.Tensor) else x
|
| 130 |
+
for x in real_inputs
|
| 131 |
+
]
|
| 132 |
+
if config.repro_level == 3:
|
| 133 |
+
# Always dump the original module in case we have segfaults
|
| 134 |
+
dump_to_minify(
|
| 135 |
+
fx.GraphModule(gm, orig_graph), real_inputs, compiler_name
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
if config.repro_level == 4:
|
| 139 |
+
if compiler_name != "inductor":
|
| 140 |
+
raise NotImplementedError(
|
| 141 |
+
"Accuracy minification is supported for inductor only"
|
| 142 |
+
)
|
| 143 |
+
failed = not same_two_models(
|
| 144 |
+
gm,
|
| 145 |
+
inner_compiled_fn,
|
| 146 |
+
real_inputs,
|
| 147 |
+
only_fwd=True,
|
| 148 |
+
ignore_non_fp=config.repro_ignore_non_fp,
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
if failed:
|
| 152 |
+
log.warning(
|
| 153 |
+
"Accuracy failed for the AOT Autograd graph %s", graph_name
|
| 154 |
+
)
|
| 155 |
+
dump_compiler_graph_state(
|
| 156 |
+
fx.GraphModule(gm, orig_graph),
|
| 157 |
+
real_inputs,
|
| 158 |
+
f"{compiler_name}_accuracy",
|
| 159 |
+
)
|
| 160 |
+
dump_to_minify(
|
| 161 |
+
fx.GraphModule(gm, orig_graph),
|
| 162 |
+
real_inputs,
|
| 163 |
+
f"{compiler_name}_accuracy",
|
| 164 |
+
)
|
| 165 |
+
raise AccuracyError("Bad accuracy detected")
|
| 166 |
+
else:
|
| 167 |
+
# Call the compiled function with real inputs
|
| 168 |
+
return inner_compiled_fn(real_inputs)
|
| 169 |
+
else:
|
| 170 |
+
try:
|
| 171 |
+
# Call the compiled function with real inputs
|
| 172 |
+
out = inner_compiled_fn(real_inputs)
|
| 173 |
+
# sync cuda kernels to ensure IMA detection
|
| 174 |
+
for arg in example_inputs:
|
| 175 |
+
if isinstance(arg, torch.Tensor) and arg.is_cuda:
|
| 176 |
+
torch.cuda.synchronize()
|
| 177 |
+
break
|
| 178 |
+
return out
|
| 179 |
+
except Exception as e:
|
| 180 |
+
if config.repro_level == 1:
|
| 181 |
+
dump_compiler_graph_state(
|
| 182 |
+
fx.GraphModule(gm, orig_graph),
|
| 183 |
+
copy_tensor_attrs,
|
| 184 |
+
compiler_name,
|
| 185 |
+
)
|
| 186 |
+
elif config.repro_level == 2:
|
| 187 |
+
dump_to_minify(
|
| 188 |
+
fx.GraphModule(gm, orig_graph),
|
| 189 |
+
copy_tensor_attrs,
|
| 190 |
+
compiler_name,
|
| 191 |
+
)
|
| 192 |
+
raise
|
| 193 |
+
|
| 194 |
+
if config.repro_after == "aot":
|
| 195 |
+
compiled_fn = deferred_for_real_inputs
|
| 196 |
+
compiled_fn._boxed_call = True # type: ignore[attr-defined]
|
| 197 |
+
return compiled_fn
|
| 198 |
+
else:
|
| 199 |
+
return inner_compiled_fn
|
| 200 |
+
|
| 201 |
+
return debug_wrapper
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 205 |
+
# DUMP REPROS
|
| 206 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def generate_compiler_repro_string(gm, args, *, stable_output=False, save_dir=None):
|
| 210 |
+
model_str = textwrap.dedent(
|
| 211 |
+
f"""
|
| 212 |
+
import torch
|
| 213 |
+
from torch import tensor, device
|
| 214 |
+
import torch.fx as fx
|
| 215 |
+
from torch._dynamo.testing import rand_strided
|
| 216 |
+
from math import inf
|
| 217 |
+
import torch._inductor.inductor_prims
|
| 218 |
+
|
| 219 |
+
{generate_config_string(stable_output=stable_output)}
|
| 220 |
+
|
| 221 |
+
isolate_fails_code_str = None
|
| 222 |
+
|
| 223 |
+
{extra_imports}
|
| 224 |
+
|
| 225 |
+
"""
|
| 226 |
+
)
|
| 227 |
+
if not stable_output:
|
| 228 |
+
model_str += f"# torch version: {torch.version.__version__}\n"
|
| 229 |
+
if hasattr(torch.version, "cuda"):
|
| 230 |
+
model_str += f"# torch cuda version: {torch.version.cuda}\n"
|
| 231 |
+
if hasattr(torch.version, "git_version"):
|
| 232 |
+
model_str += f"# torch git version: {torch.version.git_version}\n\n\n"
|
| 233 |
+
model_str += _cuda_system_info_comment()
|
| 234 |
+
|
| 235 |
+
model_str += NNModuleToString.convert(gm)
|
| 236 |
+
|
| 237 |
+
# get hint shape/stride when dynamic shape enabled
|
| 238 |
+
def hint_if_symint(x):
|
| 239 |
+
return tuple(i.node.hint if isinstance(i, torch.SymInt) else i for i in x)
|
| 240 |
+
|
| 241 |
+
writer = InputWriter(save_dir)
|
| 242 |
+
for placeholder, arg in zip(fx_placeholder_targets(gm), args):
|
| 243 |
+
if isinstance(arg, (int, torch.SymInt)):
|
| 244 |
+
writer.symint(placeholder, arg)
|
| 245 |
+
elif isinstance(arg, torch.Tensor):
|
| 246 |
+
# TODO: improve these names with FQN
|
| 247 |
+
writer.tensor(placeholder, arg)
|
| 248 |
+
else:
|
| 249 |
+
raise TypeError(f"arg is neither SymInt/int nor torch.Tensor, {arg}")
|
| 250 |
+
|
| 251 |
+
model_str += "\n".join(writer.lines()) + "\n"
|
| 252 |
+
|
| 253 |
+
model_str += "mod = Repro()\n"
|
| 254 |
+
return model_str
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def save_graph_repro(
|
| 258 |
+
fd,
|
| 259 |
+
gm,
|
| 260 |
+
args,
|
| 261 |
+
compiler_name,
|
| 262 |
+
*,
|
| 263 |
+
stable_output=False,
|
| 264 |
+
save_dir=None,
|
| 265 |
+
command="run",
|
| 266 |
+
accuracy=None,
|
| 267 |
+
tracing_mode=None,
|
| 268 |
+
check_str=None,
|
| 269 |
+
):
|
| 270 |
+
if any(
|
| 271 |
+
isinstance(arg, torch.fx.experimental._backward_state.BackwardState)
|
| 272 |
+
for arg in args
|
| 273 |
+
):
|
| 274 |
+
fd.write(
|
| 275 |
+
"Repro is not generated due to existence of BackwardState in graph input"
|
| 276 |
+
)
|
| 277 |
+
return
|
| 278 |
+
fd.write(
|
| 279 |
+
generate_compiler_repro_string(
|
| 280 |
+
gm,
|
| 281 |
+
args,
|
| 282 |
+
stable_output=stable_output,
|
| 283 |
+
save_dir=save_dir,
|
| 284 |
+
)
|
| 285 |
+
)
|
| 286 |
+
if accuracy is None:
|
| 287 |
+
accuracy = "_accuracy" in compiler_name
|
| 288 |
+
if tracing_mode is None:
|
| 289 |
+
tracing_mode = "real"
|
| 290 |
+
if any(has_free_symbols(a) for a in args):
|
| 291 |
+
tracing_mode = "symbolic"
|
| 292 |
+
fd.write("if __name__ == '__main__':\n")
|
| 293 |
+
fd.write(" from torch._dynamo.repro.after_aot import run_repro\n")
|
| 294 |
+
fd.write(
|
| 295 |
+
f" with torch.no_grad():\n"
|
| 296 |
+
f" run_repro(mod, load_args, accuracy={accuracy!r}, command={command!r}, "
|
| 297 |
+
f"save_dir={save_dir!r}, tracing_mode={tracing_mode!r}, check_str={check_str!r})\n"
|
| 298 |
+
f" # To run it separately, do \n"
|
| 299 |
+
f" # mod, args = run_repro(mod, load_args, accuracy={accuracy!r}, command='get_args', "
|
| 300 |
+
f"save_dir={save_dir!r}, tracing_mode={tracing_mode!r}, check_str={check_str!r})\n"
|
| 301 |
+
f" # mod(*args)"
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def dump_compiler_graph_state(gm, args, compiler_name, *, accuracy=None):
|
| 306 |
+
subdir = os.path.join(minifier_dir(), "checkpoints")
|
| 307 |
+
if not os.path.exists(subdir):
|
| 308 |
+
os.makedirs(subdir, exist_ok=True)
|
| 309 |
+
file_name = os.path.join(subdir, f"{len(gm.graph.nodes)}.py")
|
| 310 |
+
log.warning(
|
| 311 |
+
"Writing checkpoint with %s nodes to %s", len(gm.graph.nodes), file_name
|
| 312 |
+
)
|
| 313 |
+
with open(file_name, "w") as fd:
|
| 314 |
+
save_graph_repro(
|
| 315 |
+
fd, gm, args, compiler_name, save_dir=subdir, accuracy=accuracy
|
| 316 |
+
)
|
| 317 |
+
curdir = os.getcwd()
|
| 318 |
+
repro_path = os.path.join(curdir, "repro.py")
|
| 319 |
+
try:
|
| 320 |
+
shutil.copyfile(file_name, repro_path)
|
| 321 |
+
log.warning("Copying repro file for convenience to %s", repro_path)
|
| 322 |
+
if use_buck:
|
| 323 |
+
BuckTargetWriter(file_name).write()
|
| 324 |
+
except OSError:
|
| 325 |
+
log.warning("No write permissions for %s", repro_path)
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 329 |
+
# DUMP MINIFIER
|
| 330 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
def dump_to_minify(gm, args, compiler_name: str):
|
| 334 |
+
out = io.StringIO()
|
| 335 |
+
# TODO: factor this out
|
| 336 |
+
subdir = os.path.join(minifier_dir(), "checkpoints")
|
| 337 |
+
if not os.path.exists(subdir):
|
| 338 |
+
os.makedirs(subdir, exist_ok=True)
|
| 339 |
+
save_graph_repro(out, gm, args, compiler_name, save_dir=subdir, command="minify")
|
| 340 |
+
return helper_for_dump_minify(out.getvalue())
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def isolate_fails(
|
| 344 |
+
fx_g,
|
| 345 |
+
args,
|
| 346 |
+
compiler_name: str,
|
| 347 |
+
env=None,
|
| 348 |
+
save_dir=None,
|
| 349 |
+
accuracy=None,
|
| 350 |
+
tracing_mode=None,
|
| 351 |
+
check_str=None,
|
| 352 |
+
):
|
| 353 |
+
if env is None:
|
| 354 |
+
env = {}
|
| 355 |
+
subdir = os.path.join(os.getcwd(), "isolate")
|
| 356 |
+
if not os.path.exists(subdir):
|
| 357 |
+
os.makedirs(subdir, exist_ok=True)
|
| 358 |
+
file_name = os.path.join(subdir, f"{str(uuid.uuid4())[:5]}.py")
|
| 359 |
+
with open(file_name, "w") as fd:
|
| 360 |
+
save_graph_repro(
|
| 361 |
+
fd,
|
| 362 |
+
fx_g,
|
| 363 |
+
args,
|
| 364 |
+
compiler_name,
|
| 365 |
+
save_dir=save_dir,
|
| 366 |
+
command="minifier-query",
|
| 367 |
+
accuracy=accuracy,
|
| 368 |
+
tracing_mode=tracing_mode,
|
| 369 |
+
check_str=check_str,
|
| 370 |
+
)
|
| 371 |
+
# with open(file_name, "r") as fd:
|
| 372 |
+
# print(fd.read())
|
| 373 |
+
new_env = os.environ.copy()
|
| 374 |
+
new_env = {**new_env, **env}
|
| 375 |
+
stdout, stderr = TemporaryFile(), TemporaryFile()
|
| 376 |
+
|
| 377 |
+
if use_buck:
|
| 378 |
+
cmd = BuckTargetWriter(file_name).write(print_msg=False)
|
| 379 |
+
else:
|
| 380 |
+
cmd = ["python", file_name]
|
| 381 |
+
|
| 382 |
+
p = subprocess.Popen(
|
| 383 |
+
cmd,
|
| 384 |
+
cwd=subdir,
|
| 385 |
+
stdout=stdout,
|
| 386 |
+
stderr=stderr,
|
| 387 |
+
env=new_env,
|
| 388 |
+
)
|
| 389 |
+
p.wait()
|
| 390 |
+
|
| 391 |
+
stdout.seek(0)
|
| 392 |
+
stderr.seek(0)
|
| 393 |
+
print(
|
| 394 |
+
textwrap.indent(stdout.read().decode("utf-8"), prefix=">> "), file=sys.stdout
|
| 395 |
+
)
|
| 396 |
+
print(
|
| 397 |
+
textwrap.indent(stderr.read().decode("utf-8"), prefix=">> "), file=sys.stderr
|
| 398 |
+
)
|
| 399 |
+
# print(f"Isolated test failed - {file_name}")
|
| 400 |
+
return p.returncode != 0
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 404 |
+
# MINIFIER TOOLS
|
| 405 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
def inductor_fails(fx_g, args, check_str=None):
|
| 409 |
+
has_cuda = False
|
| 410 |
+
for arg in args:
|
| 411 |
+
if isinstance(arg, torch.Tensor) and arg.is_cuda:
|
| 412 |
+
has_cuda = True
|
| 413 |
+
break
|
| 414 |
+
|
| 415 |
+
def sync():
|
| 416 |
+
if has_cuda:
|
| 417 |
+
# Ensures that segfaults are surfaced
|
| 418 |
+
torch.cuda.synchronize()
|
| 419 |
+
|
| 420 |
+
from torch._inductor.compile_fx import compile_fx_inner
|
| 421 |
+
|
| 422 |
+
try:
|
| 423 |
+
result = fx_g(*args)
|
| 424 |
+
assert isinstance(result, (tuple, list))
|
| 425 |
+
assert not any(isinstance(x, (tuple, list)) for x in result)
|
| 426 |
+
except Exception:
|
| 427 |
+
return False
|
| 428 |
+
|
| 429 |
+
sync()
|
| 430 |
+
|
| 431 |
+
try:
|
| 432 |
+
compile_mod = compile_fx_inner(fx_g, args)
|
| 433 |
+
compile_mod(args)
|
| 434 |
+
sync()
|
| 435 |
+
except Exception as e:
|
| 436 |
+
if check_str is not None and check_str not in repr(e):
|
| 437 |
+
return False
|
| 438 |
+
print(repr(e))
|
| 439 |
+
return True
|
| 440 |
+
return False
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
def inductor_accuracy_fails(
|
| 444 |
+
fx_g, args, check_str=None, *, require_fp64=False, ignore_non_fp=False
|
| 445 |
+
):
|
| 446 |
+
from torch._inductor.compile_fx import compile_fx_inner
|
| 447 |
+
|
| 448 |
+
return backend_aot_accuracy_fails(
|
| 449 |
+
fx_g,
|
| 450 |
+
args,
|
| 451 |
+
compile_fx_inner,
|
| 452 |
+
require_fp64=require_fp64,
|
| 453 |
+
ignore_non_fp=ignore_non_fp,
|
| 454 |
+
)
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
backend_aot_accuracy_fails = functools.partial(backend_accuracy_fails, only_fwd=True)
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 461 |
+
# REPRO MAIN
|
| 462 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
def repro_common(options, mod, load_args):
|
| 466 |
+
# Invariant for graphs we generate with the repro script
|
| 467 |
+
assert not any(mod.named_parameters())
|
| 468 |
+
for n, b in mod.named_buffers():
|
| 469 |
+
if b.numel() > MAX_CONSTANT_NUMEL_INLINE:
|
| 470 |
+
log.warning(
|
| 471 |
+
"Constant %s was not serialized, generated random data instead. "
|
| 472 |
+
"If you think this is affecting you, please comment on "
|
| 473 |
+
"https://github.com/pytorch/pytorch/issues/100468",
|
| 474 |
+
n,
|
| 475 |
+
)
|
| 476 |
+
|
| 477 |
+
if not hasattr(load_args, "_version"):
|
| 478 |
+
log.warning(
|
| 479 |
+
"load_args does not have a _version attribute, please file a bug to PyTorch "
|
| 480 |
+
"and describe how you generate this repro script"
|
| 481 |
+
)
|
| 482 |
+
else:
|
| 483 |
+
if load_args._version > 0:
|
| 484 |
+
log.warning(
|
| 485 |
+
"load_args is version %s, but this version of PyTorch only supports "
|
| 486 |
+
"version 0. We will try to run it anyway but there may be an incompatibility; "
|
| 487 |
+
"if so, try upgrading your version of PyTorch.",
|
| 488 |
+
load_args._version,
|
| 489 |
+
)
|
| 490 |
+
|
| 491 |
+
nop_reader = NopInputReader()
|
| 492 |
+
load_args(nop_reader)
|
| 493 |
+
|
| 494 |
+
with tqdm(desc="Loading inputs", total=nop_reader.total) as pbar:
|
| 495 |
+
input_reader = InputReader(save_dir=options.save_dir, pbar=pbar)
|
| 496 |
+
load_args(input_reader)
|
| 497 |
+
args = input_reader.args
|
| 498 |
+
|
| 499 |
+
# Turn mod into a GraphModule the slow way
|
| 500 |
+
# TODO: speed this up
|
| 501 |
+
mod = make_fx(mod, tracing_mode=options.tracing_mode)(*args)
|
| 502 |
+
|
| 503 |
+
torch._inductor.config.generate_intermediate_hooks = True
|
| 504 |
+
|
| 505 |
+
return mod, args
|
| 506 |
+
|
| 507 |
+
|
| 508 |
+
ACCURACY_FAILS: Dict[str, Callable[[nn.Module, Any], bool]] = {
|
| 509 |
+
"": inductor_fails,
|
| 510 |
+
# This might look inverted but it's not. strict_accuracy means "we will
|
| 511 |
+
# minify any time we see anything that diverges", whereas accuracy is more
|
| 512 |
+
# conservative, and will only minify if there is a meaningful fp64
|
| 513 |
+
# divergence
|
| 514 |
+
"accuracy": functools.partial(
|
| 515 |
+
inductor_accuracy_fails, require_fp64=True, ignore_non_fp=True
|
| 516 |
+
),
|
| 517 |
+
"strict_accuracy": inductor_accuracy_fails,
|
| 518 |
+
}
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
def repro_minifier_query(options, mod, load_args):
|
| 522 |
+
mod, args = repro_common(options, mod, load_args)
|
| 523 |
+
fail_fn = functools.partial(
|
| 524 |
+
ACCURACY_FAILS[options.accuracy], check_str=options.check_str
|
| 525 |
+
)
|
| 526 |
+
if fail_fn(mod, args):
|
| 527 |
+
sys.exit(1)
|
| 528 |
+
else:
|
| 529 |
+
sys.exit(0)
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
def repro_minify(options, mod, load_args):
|
| 533 |
+
from functorch.compile import minifier
|
| 534 |
+
|
| 535 |
+
mod, args = repro_common(options, mod, load_args)
|
| 536 |
+
compiler_name = "inductor_accuracy" if options.accuracy != "" else "inductor"
|
| 537 |
+
|
| 538 |
+
favored_device = 1 if torch.cuda.device_count() >= 2 else 0
|
| 539 |
+
env_variables = {"CUDA_VISIBLE_DEVICES": str(favored_device)}
|
| 540 |
+
|
| 541 |
+
module_fails: Any
|
| 542 |
+
if options.isolate:
|
| 543 |
+
module_fails = functools.partial(
|
| 544 |
+
isolate_fails,
|
| 545 |
+
env=env_variables,
|
| 546 |
+
compiler_name=compiler_name,
|
| 547 |
+
save_dir=options.save_dir,
|
| 548 |
+
accuracy=options.accuracy,
|
| 549 |
+
tracing_mode=options.tracing_mode,
|
| 550 |
+
)
|
| 551 |
+
else:
|
| 552 |
+
module_fails = ACCURACY_FAILS[options.accuracy]
|
| 553 |
+
|
| 554 |
+
minifier(
|
| 555 |
+
mod,
|
| 556 |
+
args,
|
| 557 |
+
module_fails=functools.partial(module_fails, check_str=options.check_str),
|
| 558 |
+
dump_state=functools.partial(
|
| 559 |
+
dump_compiler_graph_state, compiler_name=compiler_name
|
| 560 |
+
),
|
| 561 |
+
save_dir=options.save_dir,
|
| 562 |
+
offload_to_disk=options.offload_to_disk,
|
| 563 |
+
skip_offload=options.skip_saving_eager_intermediates,
|
| 564 |
+
skip_sanity=options.skip_sanity,
|
| 565 |
+
max_granularity=options.max_granularity,
|
| 566 |
+
)
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
def repro_analyze(options, mod, load_args):
|
| 570 |
+
from torch._inductor.compile_fx import compile_fx_inner
|
| 571 |
+
from torch._inductor.hooks import intermediate_hook
|
| 572 |
+
|
| 573 |
+
mod, args = repro_common(options, mod, load_args)
|
| 574 |
+
|
| 575 |
+
# TODO: The logic for cloning inputs/models here is intentionally
|
| 576 |
+
# modeled off of run_fwd_maybe_bwd, but arguably it is better not to
|
| 577 |
+
# clone inputs (as you are doubling your effective GPU memory usage).
|
| 578 |
+
# It is certainly faster though! It probably makes sense to let the
|
| 579 |
+
# user specify the offload strategy.
|
| 580 |
+
|
| 581 |
+
with tqdm(desc="Compiling"):
|
| 582 |
+
compiled = compile_fx_inner(mod, args)
|
| 583 |
+
total = counters["inductor"]["intermediate_hooks"]
|
| 584 |
+
|
| 585 |
+
known_names = set()
|
| 586 |
+
|
| 587 |
+
def save_hook(name, val):
|
| 588 |
+
known_names.add(name)
|
| 589 |
+
if not options.skip_saving_inductor_intermediates:
|
| 590 |
+
writer.write_tensor(os.path.join("inductor", name), val)
|
| 591 |
+
pbar.update(1) # type: ignore[has-type]
|
| 592 |
+
|
| 593 |
+
writer = torch.utils._content_store.ContentStoreWriter(
|
| 594 |
+
options.save_dir, stable_hash=options.stable_hash
|
| 595 |
+
)
|
| 596 |
+
reader = torch.utils._content_store.ContentStoreReader(options.save_dir)
|
| 597 |
+
|
| 598 |
+
new_args = clone_inputs(args)
|
| 599 |
+
with intermediate_hook(save_hook), tqdm(
|
| 600 |
+
desc="Saving inductor intermediates", total=total
|
| 601 |
+
) as pbar:
|
| 602 |
+
compiled(new_args)
|
| 603 |
+
assert not new_args
|
| 604 |
+
|
| 605 |
+
def compare_tuples(tuple1, tuple2):
|
| 606 |
+
diff_indices = [i for i in range(len(tuple1)) if tuple1[i] != tuple2[i]]
|
| 607 |
+
diff_values = [(tuple1[i], tuple2[i]) for i in diff_indices]
|
| 608 |
+
|
| 609 |
+
if not diff_values:
|
| 610 |
+
return None
|
| 611 |
+
else:
|
| 612 |
+
return " and ".join(f"{a} != {b}" for a, b in diff_values)
|
| 613 |
+
|
| 614 |
+
def check_hook(name, val):
|
| 615 |
+
meta = writer.compute_tensor_metadata(val)
|
| 616 |
+
meta2 = reader.read_tensor_metadata(os.path.join("inductor", name))
|
| 617 |
+
reason = compare_tuples(meta, meta2)
|
| 618 |
+
if reason is not None:
|
| 619 |
+
pbar.write(f"NONDETERMINISTIC INDUCTOR at {name} ({reason})")
|
| 620 |
+
pbar.update(1)
|
| 621 |
+
|
| 622 |
+
if not options.skip_check_deterministic:
|
| 623 |
+
new_args = clone_inputs(args)
|
| 624 |
+
with intermediate_hook(check_hook), tqdm(
|
| 625 |
+
desc="Checking inductor determinism", total=total
|
| 626 |
+
) as pbar:
|
| 627 |
+
compiled(new_args)
|
| 628 |
+
assert not new_args
|
| 629 |
+
|
| 630 |
+
class WriterInterp(fx.Interpreter):
|
| 631 |
+
def __init__(self, mod, subdir) -> None:
|
| 632 |
+
super().__init__(mod)
|
| 633 |
+
self.subdir = subdir
|
| 634 |
+
|
| 635 |
+
def run_node(self, n):
|
| 636 |
+
r = super().run_node(n)
|
| 637 |
+
name = n.name
|
| 638 |
+
if name in known_names:
|
| 639 |
+
pbar.update(1)
|
| 640 |
+
writer.write_tensor(os.path.join(self.subdir, name), r)
|
| 641 |
+
return r
|
| 642 |
+
|
| 643 |
+
# NB: the module cast doesn't actually do anything, since there are no
|
| 644 |
+
# parameters/buffers on the module
|
| 645 |
+
if not options.skip_saving_float64_intermediates:
|
| 646 |
+
new_mod, new_args = cast_to_fp64(copy.deepcopy(mod), clone_inputs(args))
|
| 647 |
+
with tqdm(desc="Saving float64 intermediates", total=total) as pbar:
|
| 648 |
+
WriterInterp(new_mod, "float64").boxed_run(new_args)
|
| 649 |
+
assert not new_args
|
| 650 |
+
|
| 651 |
+
class ExactReaderInterp(fx.Interpreter):
|
| 652 |
+
def run_node(self, n):
|
| 653 |
+
r = super().run_node(n)
|
| 654 |
+
name = n.name
|
| 655 |
+
if name in known_names:
|
| 656 |
+
meta = writer.compute_tensor_metadata(r)
|
| 657 |
+
meta2 = reader.read_tensor_metadata(os.path.join("float64", name))
|
| 658 |
+
reason = compare_tuples(meta, meta2)
|
| 659 |
+
if reason is not None:
|
| 660 |
+
pbar.write(f"NONDETERMINISTIC FLOAT64 at {name} ({reason})")
|
| 661 |
+
pbar.update(1)
|
| 662 |
+
return r
|
| 663 |
+
|
| 664 |
+
# TODO: check eager determinism
|
| 665 |
+
|
| 666 |
+
if not options.skip_check_deterministic:
|
| 667 |
+
new_mod, new_args = cast_to_fp64(copy.deepcopy(mod), clone_inputs(args))
|
| 668 |
+
with tqdm(desc="Checking float64 determinism", total=total) as pbar:
|
| 669 |
+
ExactReaderInterp(new_mod).boxed_run(new_args)
|
| 670 |
+
assert not new_args
|
| 671 |
+
|
| 672 |
+
# Now that we've saved everything, interp through the eager graph
|
| 673 |
+
# and do comparisons
|
| 674 |
+
class ReaderInterp(fx.Interpreter):
|
| 675 |
+
def run_node(self, n):
|
| 676 |
+
r = super().run_node(n)
|
| 677 |
+
name = n.name
|
| 678 |
+
if name in known_names:
|
| 679 |
+
inductor = reader.read_tensor(os.path.join("inductor", name))
|
| 680 |
+
float64 = reader.read_tensor(os.path.join("float64", name))
|
| 681 |
+
logged = False
|
| 682 |
+
|
| 683 |
+
def log_error(msg, *args):
|
| 684 |
+
nonlocal logged
|
| 685 |
+
logged = True
|
| 686 |
+
pbar.write(f"DIVERGED at {name}: {msg % args}")
|
| 687 |
+
|
| 688 |
+
if not same(
|
| 689 |
+
r,
|
| 690 |
+
inductor,
|
| 691 |
+
float64,
|
| 692 |
+
tol=torch._dynamo.config.repro_tolerance,
|
| 693 |
+
equal_nan=True,
|
| 694 |
+
log_error=log_error,
|
| 695 |
+
):
|
| 696 |
+
assert logged
|
| 697 |
+
pbar.update(1)
|
| 698 |
+
return r
|
| 699 |
+
|
| 700 |
+
with tqdm(desc="Checking divergence", total=total) as pbar:
|
| 701 |
+
ReaderInterp(mod).boxed_run(args)
|
| 702 |
+
assert not args
|
| 703 |
+
|
| 704 |
+
|
| 705 |
+
def repro_get_args(options, mod, load_args):
|
| 706 |
+
mod, args = repro_common(options, mod, load_args)
|
| 707 |
+
return mod, args
|
| 708 |
+
|
| 709 |
+
|
| 710 |
+
def repro_run(options, mod, load_args):
|
| 711 |
+
from torch._inductor.compile_fx import compile_fx_inner
|
| 712 |
+
|
| 713 |
+
mod, args = repro_common(options, mod, load_args)
|
| 714 |
+
|
| 715 |
+
from torch.cuda import synchronize
|
| 716 |
+
|
| 717 |
+
compiled = compile_fx_inner(mod, args)
|
| 718 |
+
|
| 719 |
+
if options.accuracy != "":
|
| 720 |
+
# We don't really respect --accuracy vs --strict-accuracy here, it
|
| 721 |
+
# seems counterintuitive
|
| 722 |
+
if not same_two_models(
|
| 723 |
+
mod,
|
| 724 |
+
compiled,
|
| 725 |
+
args,
|
| 726 |
+
only_fwd=True,
|
| 727 |
+
ignore_non_fp=config.repro_ignore_non_fp,
|
| 728 |
+
):
|
| 729 |
+
raise AccuracyError("Bad accuracy detected")
|
| 730 |
+
else:
|
| 731 |
+
need_sync = False
|
| 732 |
+
for arg in args:
|
| 733 |
+
if isinstance(arg, torch.Tensor) and arg.is_cuda:
|
| 734 |
+
need_sync = True
|
| 735 |
+
break
|
| 736 |
+
ref = compiled(list(args))
|
| 737 |
+
if need_sync:
|
| 738 |
+
synchronize() # ensure segfaults are surfaced
|
| 739 |
+
return lambda: compiled(list(args))
|
| 740 |
+
|
| 741 |
+
|
| 742 |
+
# TODO: lazily load the inputs or something, rather than cloning them
|
| 743 |
+
def run_repro(
|
| 744 |
+
mod,
|
| 745 |
+
load_args,
|
| 746 |
+
*,
|
| 747 |
+
command="run",
|
| 748 |
+
accuracy: Union[bool, str] = "",
|
| 749 |
+
save_dir=None,
|
| 750 |
+
tracing_mode=None,
|
| 751 |
+
patch_code=None,
|
| 752 |
+
check_str=None,
|
| 753 |
+
**kwargs,
|
| 754 |
+
):
|
| 755 |
+
for k in kwargs:
|
| 756 |
+
log.warning(
|
| 757 |
+
"Unrecognized kwarg %s; perhaps this repro was made on a newer version of PyTorch",
|
| 758 |
+
k,
|
| 759 |
+
)
|
| 760 |
+
|
| 761 |
+
if accuracy is True:
|
| 762 |
+
accuracy = "accuracy"
|
| 763 |
+
elif accuracy is False:
|
| 764 |
+
accuracy = ""
|
| 765 |
+
|
| 766 |
+
if patch_code is not None:
|
| 767 |
+
log.warning(
|
| 768 |
+
"patch_code no longer works on this version of PyTorch, silently ignoring"
|
| 769 |
+
)
|
| 770 |
+
|
| 771 |
+
parser = argparse.ArgumentParser(
|
| 772 |
+
description=f"""\
|
| 773 |
+
An after_aot repro script, typically triggering a bug in PyTorch Inductor.
|
| 774 |
+
When run with no arguments, this script defaults to running '{command}'.
|
| 775 |
+
Extra flags may be available; to find out more, try '{command} --help'.
|
| 776 |
+
There are also alternate subcommands available, see below.
|
| 777 |
+
|
| 778 |
+
default settings on this script:
|
| 779 |
+
{accuracy=}
|
| 780 |
+
{tracing_mode=}
|
| 781 |
+
{save_dir=}
|
| 782 |
+
{check_str=}
|
| 783 |
+
""",
|
| 784 |
+
formatter_class=argparse.RawTextHelpFormatter,
|
| 785 |
+
)
|
| 786 |
+
|
| 787 |
+
def common_flags(parser):
|
| 788 |
+
accuracy_group = parser.add_mutually_exclusive_group()
|
| 789 |
+
accuracy_group.add_argument(
|
| 790 |
+
"--no-accuracy",
|
| 791 |
+
dest="accuracy",
|
| 792 |
+
action="store_const",
|
| 793 |
+
const="",
|
| 794 |
+
default=accuracy,
|
| 795 |
+
help="do not test accuracy, just run the module and see if it errors",
|
| 796 |
+
)
|
| 797 |
+
accuracy_group.add_argument(
|
| 798 |
+
"--accuracy",
|
| 799 |
+
action="store_const",
|
| 800 |
+
const="accuracy",
|
| 801 |
+
default=accuracy,
|
| 802 |
+
help="""\
|
| 803 |
+
test if the RMSE between the compiled module and the fp64 reference is greater
|
| 804 |
+
than eager and the fp64 reference. This is usually more reliable than the
|
| 805 |
+
standard allclose test, as we expect numeric differences from compiling, often
|
| 806 |
+
improving accuracy over eager. RMSE test allows for compiled module to
|
| 807 |
+
diverge greatly from eager, as long as this divergence moves it closer to the
|
| 808 |
+
'true' mathematical value of the network. Caveats: (1) double precision can
|
| 809 |
+
still suffer from rounding error, so it is not a perfect reference (see for
|
| 810 |
+
example 'Herbie: Automatically Improving Floating Point Accuracy') for
|
| 811 |
+
approaches that detect the necessary working precision and compute it in
|
| 812 |
+
arbitrary precision floating point; unfortunately, this is not practical for
|
| 813 |
+
tensor computation; (2) if there are not enough samples in the output being
|
| 814 |
+
compared, we may get unlucky and have an unlucky greater RMSE than eager; this
|
| 815 |
+
could be overcome by applying a more rigorous statistical test at some
|
| 816 |
+
p-value, which we leave for future work.
|
| 817 |
+
""",
|
| 818 |
+
)
|
| 819 |
+
accuracy_group.add_argument(
|
| 820 |
+
"--strict-accuracy",
|
| 821 |
+
dest="accuracy",
|
| 822 |
+
action="store_const",
|
| 823 |
+
const="strict_accuracy",
|
| 824 |
+
default=accuracy,
|
| 825 |
+
help="""\
|
| 826 |
+
by default, when doing accuracy minification we will reject reductions which
|
| 827 |
+
change the divergence from a floating point divergence to a integral/boolean
|
| 828 |
+
divergence. This is because some operations like ReLU involve temporarily
|
| 829 |
+
sharp boundaries that smooth out again afterwards; without requiring
|
| 830 |
+
divergence on floating point, the minifier will often fixate on divergent
|
| 831 |
+
boolean tensor even though this is not the true source of the divergence.
|
| 832 |
+
However, rejecting these reductions makes it more difficult for the minifier
|
| 833 |
+
to make process. Using this option will let the minifier progress for ALL
|
| 834 |
+
divergences--you just might not end up with a useful repro in the end.""",
|
| 835 |
+
)
|
| 836 |
+
|
| 837 |
+
parser.add_argument(
|
| 838 |
+
"--save-dir",
|
| 839 |
+
type=str,
|
| 840 |
+
default=save_dir,
|
| 841 |
+
metavar="DIR",
|
| 842 |
+
help="directory where saved inputs live",
|
| 843 |
+
)
|
| 844 |
+
parser.add_argument(
|
| 845 |
+
"--no-save-dir",
|
| 846 |
+
dest="save_dir",
|
| 847 |
+
action="store_const",
|
| 848 |
+
const=None,
|
| 849 |
+
help="don't use any directory for saved inputs",
|
| 850 |
+
)
|
| 851 |
+
parser.add_argument(
|
| 852 |
+
"--tracing-mode",
|
| 853 |
+
type=str,
|
| 854 |
+
metavar="{real,fake,symbolic}",
|
| 855 |
+
default=tracing_mode,
|
| 856 |
+
help="how to trace the repro module into a GraphModule with metadata",
|
| 857 |
+
)
|
| 858 |
+
|
| 859 |
+
subparsers = parser.add_subparsers(
|
| 860 |
+
dest="command", metavar="{run,minify,analyze}", required=True
|
| 861 |
+
)
|
| 862 |
+
|
| 863 |
+
parser_run = subparsers.add_parser(
|
| 864 |
+
"run",
|
| 865 |
+
help="just run the repro",
|
| 866 |
+
)
|
| 867 |
+
common_flags(parser_run)
|
| 868 |
+
|
| 869 |
+
parser_minify = subparsers.add_parser(
|
| 870 |
+
"minify", help="run the minifier on the repro"
|
| 871 |
+
)
|
| 872 |
+
common_flags(parser_minify)
|
| 873 |
+
parser_get_args = subparsers.add_parser("get_args", help="get the args")
|
| 874 |
+
common_flags(parser_get_args)
|
| 875 |
+
parser_minify_isolate = parser_minify.add_mutually_exclusive_group()
|
| 876 |
+
parser_minify_isolate.add_argument(
|
| 877 |
+
"--isolate",
|
| 878 |
+
action="store_true",
|
| 879 |
+
default=True,
|
| 880 |
+
help="run in separate processes to avoid interference (default)",
|
| 881 |
+
)
|
| 882 |
+
parser_minify_isolate.add_argument(
|
| 883 |
+
"--no-isolate",
|
| 884 |
+
dest="isolate",
|
| 885 |
+
action="store_false",
|
| 886 |
+
help="speed up by running all compilation in same process",
|
| 887 |
+
)
|
| 888 |
+
parser_minify.add_argument(
|
| 889 |
+
"--skip-saving-eager-intermediates",
|
| 890 |
+
action="store_true",
|
| 891 |
+
help="skip saving eager intermediates on --minify",
|
| 892 |
+
)
|
| 893 |
+
# TODO: make this an option for --analyze too
|
| 894 |
+
parser_minify.add_argument(
|
| 895 |
+
"--offload-to-disk",
|
| 896 |
+
action="store_true",
|
| 897 |
+
help="during minification, offload delta debugging intermediates to disk. Use if you're OOMing",
|
| 898 |
+
)
|
| 899 |
+
parser_minify.add_argument(
|
| 900 |
+
"--skip-sanity",
|
| 901 |
+
action="store_true",
|
| 902 |
+
help="skip sanity check at beginning of minification on original graph",
|
| 903 |
+
)
|
| 904 |
+
parser_minify.add_argument(
|
| 905 |
+
"--max-granularity",
|
| 906 |
+
type=int,
|
| 907 |
+
default=None,
|
| 908 |
+
help="start at this granularity and work down; must be power of 2",
|
| 909 |
+
)
|
| 910 |
+
parser_minify.add_argument(
|
| 911 |
+
"--check-str",
|
| 912 |
+
type=str,
|
| 913 |
+
default=check_str,
|
| 914 |
+
help="require minified program to fail with error containing this string",
|
| 915 |
+
)
|
| 916 |
+
|
| 917 |
+
parser_analyze = subparsers.add_parser(
|
| 918 |
+
"analyze", help="run the accuracy analyzer on the repro"
|
| 919 |
+
)
|
| 920 |
+
common_flags(parser_analyze)
|
| 921 |
+
parser_analyze.add_argument(
|
| 922 |
+
"--skip-saving-inductor-intermediates",
|
| 923 |
+
action="store_true",
|
| 924 |
+
help="skip saving inductor intermediates on --analyze",
|
| 925 |
+
)
|
| 926 |
+
parser_analyze.add_argument(
|
| 927 |
+
"--skip-saving-float64-intermediates",
|
| 928 |
+
action="store_true",
|
| 929 |
+
help="skip saving float64 intermediates",
|
| 930 |
+
)
|
| 931 |
+
parser_analyze.add_argument(
|
| 932 |
+
"--skip-check-deterministic",
|
| 933 |
+
action="store_true",
|
| 934 |
+
help="skip checking that the network is deterministic",
|
| 935 |
+
)
|
| 936 |
+
parser_analyze.add_argument(
|
| 937 |
+
"--stable-hash",
|
| 938 |
+
action="store_true",
|
| 939 |
+
help="use SHA-1 checksum instead of fast (but possibly unsound) hash",
|
| 940 |
+
)
|
| 941 |
+
|
| 942 |
+
# Run the repro in the context of minification, inverting exit code meaning
|
| 943 |
+
parser_minifier_query = subparsers.add_parser(
|
| 944 |
+
"minifier-query",
|
| 945 |
+
)
|
| 946 |
+
common_flags(parser_minifier_query)
|
| 947 |
+
parser_minifier_query.add_argument(
|
| 948 |
+
"--check-str",
|
| 949 |
+
type=str,
|
| 950 |
+
default=check_str,
|
| 951 |
+
help="require minified program to fail with error containing this string",
|
| 952 |
+
)
|
| 953 |
+
|
| 954 |
+
args = None
|
| 955 |
+
if len(sys.argv) <= 1:
|
| 956 |
+
args = [command, *sys.argv[1:]]
|
| 957 |
+
|
| 958 |
+
options = parser.parse_args(args)
|
| 959 |
+
COMMAND_FNS = {
|
| 960 |
+
"minify": repro_minify,
|
| 961 |
+
"analyze": repro_analyze,
|
| 962 |
+
"minifier-query": repro_minifier_query,
|
| 963 |
+
"run": repro_run,
|
| 964 |
+
"get_args": repro_get_args,
|
| 965 |
+
}
|
| 966 |
+
return COMMAND_FNS[options.command](options, mod, load_args)
|
pllava/lib/python3.10/site-packages/torch/_dynamo/resume_execution.py
ADDED
|
@@ -0,0 +1,720 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import copy
|
| 3 |
+
import dataclasses
|
| 4 |
+
import sys
|
| 5 |
+
import types
|
| 6 |
+
from typing import Any, cast, Dict, List, Optional, Tuple
|
| 7 |
+
|
| 8 |
+
from .bytecode_transformation import (
|
| 9 |
+
create_call_function,
|
| 10 |
+
create_call_method,
|
| 11 |
+
create_dup_top,
|
| 12 |
+
create_instruction,
|
| 13 |
+
create_jump_absolute,
|
| 14 |
+
create_load_method,
|
| 15 |
+
Instruction,
|
| 16 |
+
InstructionExnTabEntry,
|
| 17 |
+
transform_code_object,
|
| 18 |
+
unique_id,
|
| 19 |
+
)
|
| 20 |
+
from .utils import ExactWeakKeyDictionary
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
# taken from code.h in cpython
|
| 24 |
+
CO_OPTIMIZED = 0x0001
|
| 25 |
+
CO_NEWLOCALS = 0x0002
|
| 26 |
+
CO_VARARGS = 0x0004
|
| 27 |
+
CO_VARKEYWORDS = 0x0008
|
| 28 |
+
CO_NESTED = 0x0010
|
| 29 |
+
CO_GENERATOR = 0x0020
|
| 30 |
+
CO_NOFREE = 0x0040
|
| 31 |
+
CO_COROUTINE = 0x0080
|
| 32 |
+
CO_ITERABLE_COROUTINE = 0x0100
|
| 33 |
+
CO_ASYNC_GENERATOR = 0x0200
|
| 34 |
+
|
| 35 |
+
# trace_rules.py import this constant for consistency
|
| 36 |
+
TORCH_DYNAMO_RESUME_IN_PREFIX = "torch_dynamo_resume_in"
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def _initial_push_null(insts):
|
| 40 |
+
if sys.version_info >= (3, 11):
|
| 41 |
+
insts.append(create_instruction("PUSH_NULL"))
|
| 42 |
+
if sys.version_info < (3, 13):
|
| 43 |
+
insts.append(create_instruction("SWAP", arg=2))
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
@dataclasses.dataclass(frozen=True)
|
| 47 |
+
class ReenterWith:
|
| 48 |
+
stack_index: int
|
| 49 |
+
target_values: Optional[Tuple[Any, ...]] = None
|
| 50 |
+
|
| 51 |
+
# If we do not want to destroy the stack, we can do the same thing as a
|
| 52 |
+
# `SETUP_WITH` block, only that we store the context manager in a local_symbol
|
| 53 |
+
def try_except(self, code_options, cleanup: List[Instruction]):
|
| 54 |
+
"""
|
| 55 |
+
Codegen based off of:
|
| 56 |
+
load args
|
| 57 |
+
enter context
|
| 58 |
+
try:
|
| 59 |
+
(rest)
|
| 60 |
+
finally:
|
| 61 |
+
exit context
|
| 62 |
+
"""
|
| 63 |
+
# NOTE: we assume that TOS is a context manager CLASS!
|
| 64 |
+
load_args = []
|
| 65 |
+
if self.target_values:
|
| 66 |
+
load_args = [
|
| 67 |
+
create_instruction("LOAD_CONST", argval=val)
|
| 68 |
+
for val in self.target_values
|
| 69 |
+
]
|
| 70 |
+
ctx_name = unique_id(f"___context_manager_{self.stack_index}")
|
| 71 |
+
if ctx_name not in code_options["co_varnames"]:
|
| 72 |
+
code_options["co_varnames"] += (ctx_name,)
|
| 73 |
+
for name in ["__enter__", "__exit__"]:
|
| 74 |
+
if name not in code_options["co_names"]:
|
| 75 |
+
code_options["co_names"] += (name,)
|
| 76 |
+
|
| 77 |
+
except_jump_target = create_instruction(
|
| 78 |
+
"NOP" if sys.version_info < (3, 11) else "PUSH_EXC_INFO"
|
| 79 |
+
)
|
| 80 |
+
cleanup_complete_jump_target = create_instruction("NOP")
|
| 81 |
+
|
| 82 |
+
setup_finally: List[Instruction] = []
|
| 83 |
+
_initial_push_null(setup_finally)
|
| 84 |
+
|
| 85 |
+
# TODO(williamwen42) call method order is wrong for 3.13+ - will fix later
|
| 86 |
+
setup_finally.extend(
|
| 87 |
+
[
|
| 88 |
+
*load_args,
|
| 89 |
+
*create_call_function(len(load_args), False),
|
| 90 |
+
create_instruction("STORE_FAST", argval=ctx_name),
|
| 91 |
+
create_instruction("LOAD_FAST", argval=ctx_name),
|
| 92 |
+
create_load_method("__enter__"),
|
| 93 |
+
*create_call_method(0),
|
| 94 |
+
create_instruction("POP_TOP"),
|
| 95 |
+
]
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
if sys.version_info < (3, 11):
|
| 99 |
+
setup_finally.append(
|
| 100 |
+
create_instruction("SETUP_FINALLY", target=except_jump_target)
|
| 101 |
+
)
|
| 102 |
+
else:
|
| 103 |
+
exn_tab_begin = create_instruction("NOP")
|
| 104 |
+
exn_tab_end = create_instruction("NOP")
|
| 105 |
+
exn_tab_begin.exn_tab_entry = InstructionExnTabEntry(
|
| 106 |
+
exn_tab_begin,
|
| 107 |
+
exn_tab_end,
|
| 108 |
+
except_jump_target,
|
| 109 |
+
self.stack_index + 1,
|
| 110 |
+
False,
|
| 111 |
+
)
|
| 112 |
+
setup_finally.append(exn_tab_begin)
|
| 113 |
+
|
| 114 |
+
def create_reset():
|
| 115 |
+
return [
|
| 116 |
+
create_instruction("LOAD_FAST", argval=ctx_name),
|
| 117 |
+
create_load_method("__exit__"),
|
| 118 |
+
create_instruction("LOAD_CONST", argval=None),
|
| 119 |
+
create_dup_top(),
|
| 120 |
+
create_dup_top(),
|
| 121 |
+
*create_call_method(3),
|
| 122 |
+
create_instruction("POP_TOP"),
|
| 123 |
+
]
|
| 124 |
+
|
| 125 |
+
if sys.version_info < (3, 9):
|
| 126 |
+
epilogue = [
|
| 127 |
+
create_instruction("POP_BLOCK"),
|
| 128 |
+
create_instruction("BEGIN_FINALLY"),
|
| 129 |
+
except_jump_target,
|
| 130 |
+
*create_reset(),
|
| 131 |
+
create_instruction("END_FINALLY"),
|
| 132 |
+
]
|
| 133 |
+
elif sys.version_info < (3, 11):
|
| 134 |
+
epilogue = [
|
| 135 |
+
create_instruction("POP_BLOCK"),
|
| 136 |
+
*create_reset(),
|
| 137 |
+
create_instruction("JUMP_FORWARD", target=cleanup_complete_jump_target),
|
| 138 |
+
except_jump_target,
|
| 139 |
+
*create_reset(),
|
| 140 |
+
create_instruction("RERAISE"),
|
| 141 |
+
cleanup_complete_jump_target,
|
| 142 |
+
]
|
| 143 |
+
else:
|
| 144 |
+
finally_exn_tab_end = create_instruction("RERAISE", arg=0)
|
| 145 |
+
finally_exn_tab_target = create_instruction("COPY", arg=3)
|
| 146 |
+
except_jump_target.exn_tab_entry = InstructionExnTabEntry(
|
| 147 |
+
except_jump_target,
|
| 148 |
+
finally_exn_tab_end,
|
| 149 |
+
finally_exn_tab_target,
|
| 150 |
+
self.stack_index + 2,
|
| 151 |
+
True,
|
| 152 |
+
)
|
| 153 |
+
epilogue = [
|
| 154 |
+
exn_tab_end,
|
| 155 |
+
*create_reset(),
|
| 156 |
+
create_instruction("JUMP_FORWARD", target=cleanup_complete_jump_target),
|
| 157 |
+
except_jump_target, # PUSH_EXC_INFO
|
| 158 |
+
*create_reset(),
|
| 159 |
+
finally_exn_tab_end, # RERAISE 0
|
| 160 |
+
finally_exn_tab_target, # COPY 3
|
| 161 |
+
create_instruction("POP_EXCEPT"),
|
| 162 |
+
create_instruction("RERAISE", arg=1),
|
| 163 |
+
cleanup_complete_jump_target,
|
| 164 |
+
]
|
| 165 |
+
|
| 166 |
+
cleanup[:] = epilogue + cleanup
|
| 167 |
+
return setup_finally
|
| 168 |
+
|
| 169 |
+
def __call__(self, code_options, cleanup):
|
| 170 |
+
"""
|
| 171 |
+
Codegen based off of:
|
| 172 |
+
with ctx(args):
|
| 173 |
+
(rest)
|
| 174 |
+
"""
|
| 175 |
+
# NOTE: we assume that TOS is a context manager CLASS!
|
| 176 |
+
load_args = []
|
| 177 |
+
if self.target_values:
|
| 178 |
+
load_args = [
|
| 179 |
+
create_instruction("LOAD_CONST", argval=val)
|
| 180 |
+
for val in self.target_values
|
| 181 |
+
]
|
| 182 |
+
if sys.version_info < (3, 9):
|
| 183 |
+
with_cleanup_start = create_instruction("WITH_CLEANUP_START")
|
| 184 |
+
begin_finally = create_instruction("BEGIN_FINALLY")
|
| 185 |
+
cleanup[:] = [
|
| 186 |
+
create_instruction("POP_BLOCK"),
|
| 187 |
+
begin_finally,
|
| 188 |
+
with_cleanup_start,
|
| 189 |
+
create_instruction("WITH_CLEANUP_FINISH"),
|
| 190 |
+
create_instruction("END_FINALLY"),
|
| 191 |
+
] + cleanup
|
| 192 |
+
|
| 193 |
+
return [
|
| 194 |
+
*load_args,
|
| 195 |
+
create_instruction("CALL_FUNCTION", arg=len(load_args)),
|
| 196 |
+
create_instruction("SETUP_WITH", target=with_cleanup_start),
|
| 197 |
+
create_instruction("POP_TOP"),
|
| 198 |
+
], None
|
| 199 |
+
elif sys.version_info < (3, 11):
|
| 200 |
+
with_except_start = create_instruction("WITH_EXCEPT_START")
|
| 201 |
+
pop_top_after_with_except_start = create_instruction("POP_TOP")
|
| 202 |
+
|
| 203 |
+
cleanup_complete_jump_target = create_instruction("NOP")
|
| 204 |
+
|
| 205 |
+
cleanup[:] = [
|
| 206 |
+
create_instruction("POP_BLOCK"),
|
| 207 |
+
create_instruction("LOAD_CONST", argval=None),
|
| 208 |
+
create_instruction("DUP_TOP"),
|
| 209 |
+
create_instruction("DUP_TOP"),
|
| 210 |
+
create_instruction("CALL_FUNCTION", arg=3),
|
| 211 |
+
create_instruction("POP_TOP"),
|
| 212 |
+
create_instruction("JUMP_FORWARD", target=cleanup_complete_jump_target),
|
| 213 |
+
with_except_start,
|
| 214 |
+
create_instruction(
|
| 215 |
+
"POP_JUMP_IF_TRUE", target=pop_top_after_with_except_start
|
| 216 |
+
),
|
| 217 |
+
create_instruction("RERAISE"),
|
| 218 |
+
pop_top_after_with_except_start,
|
| 219 |
+
create_instruction("POP_TOP"),
|
| 220 |
+
create_instruction("POP_TOP"),
|
| 221 |
+
create_instruction("POP_EXCEPT"),
|
| 222 |
+
create_instruction("POP_TOP"),
|
| 223 |
+
cleanup_complete_jump_target,
|
| 224 |
+
] + cleanup
|
| 225 |
+
|
| 226 |
+
return [
|
| 227 |
+
*load_args,
|
| 228 |
+
create_instruction("CALL_FUNCTION", arg=len(load_args)),
|
| 229 |
+
create_instruction("SETUP_WITH", target=with_except_start),
|
| 230 |
+
create_instruction("POP_TOP"),
|
| 231 |
+
], None
|
| 232 |
+
else:
|
| 233 |
+
pop_top_after_with_except_start = create_instruction("POP_TOP")
|
| 234 |
+
cleanup_complete_jump_target = create_instruction("NOP")
|
| 235 |
+
|
| 236 |
+
def create_load_none():
|
| 237 |
+
return create_instruction("LOAD_CONST", argval=None)
|
| 238 |
+
|
| 239 |
+
exn_tab_1_begin = create_instruction("POP_TOP")
|
| 240 |
+
exn_tab_1_end = create_instruction("NOP")
|
| 241 |
+
exn_tab_1_target = create_instruction("PUSH_EXC_INFO")
|
| 242 |
+
exn_tab_2_end = create_instruction("RERAISE", arg=2)
|
| 243 |
+
exn_tab_2_target = create_instruction("COPY", arg=3)
|
| 244 |
+
|
| 245 |
+
exn_tab_1_begin.exn_tab_entry = InstructionExnTabEntry(
|
| 246 |
+
exn_tab_1_begin,
|
| 247 |
+
exn_tab_1_end,
|
| 248 |
+
exn_tab_1_target,
|
| 249 |
+
self.stack_index + 1,
|
| 250 |
+
True,
|
| 251 |
+
)
|
| 252 |
+
exn_tab_1_target.exn_tab_entry = InstructionExnTabEntry(
|
| 253 |
+
exn_tab_1_target,
|
| 254 |
+
exn_tab_2_end,
|
| 255 |
+
exn_tab_2_target,
|
| 256 |
+
self.stack_index + 3,
|
| 257 |
+
True,
|
| 258 |
+
)
|
| 259 |
+
pop_top_after_with_except_start.exn_tab_entry = InstructionExnTabEntry(
|
| 260 |
+
pop_top_after_with_except_start,
|
| 261 |
+
pop_top_after_with_except_start,
|
| 262 |
+
exn_tab_2_target,
|
| 263 |
+
self.stack_index + 3,
|
| 264 |
+
True,
|
| 265 |
+
)
|
| 266 |
+
|
| 267 |
+
cleanup[:] = [
|
| 268 |
+
exn_tab_1_end,
|
| 269 |
+
create_load_none(),
|
| 270 |
+
create_load_none(),
|
| 271 |
+
create_load_none(),
|
| 272 |
+
*create_call_function(2, False),
|
| 273 |
+
create_instruction("POP_TOP"),
|
| 274 |
+
create_instruction("JUMP_FORWARD", target=cleanup_complete_jump_target),
|
| 275 |
+
exn_tab_1_target, # PUSH_EXC_INFO
|
| 276 |
+
create_instruction("WITH_EXCEPT_START"),
|
| 277 |
+
create_instruction(
|
| 278 |
+
"POP_JUMP_FORWARD_IF_TRUE"
|
| 279 |
+
if sys.version_info < (3, 12)
|
| 280 |
+
else "POP_JUMP_IF_TRUE",
|
| 281 |
+
target=pop_top_after_with_except_start,
|
| 282 |
+
),
|
| 283 |
+
exn_tab_2_end, # RERAISE 2
|
| 284 |
+
exn_tab_2_target, # COPY 3
|
| 285 |
+
create_instruction("POP_EXCEPT"),
|
| 286 |
+
create_instruction("RERAISE", arg=1),
|
| 287 |
+
pop_top_after_with_except_start,
|
| 288 |
+
create_instruction("POP_EXCEPT"),
|
| 289 |
+
create_instruction("POP_TOP"),
|
| 290 |
+
create_instruction("POP_TOP"),
|
| 291 |
+
cleanup_complete_jump_target,
|
| 292 |
+
] + cleanup
|
| 293 |
+
|
| 294 |
+
ret: List[Instruction] = []
|
| 295 |
+
_initial_push_null(ret)
|
| 296 |
+
ret.extend(
|
| 297 |
+
[
|
| 298 |
+
*load_args,
|
| 299 |
+
*create_call_function(len(load_args), False),
|
| 300 |
+
create_instruction("BEFORE_WITH"),
|
| 301 |
+
exn_tab_1_begin, # POP_TOP
|
| 302 |
+
]
|
| 303 |
+
)
|
| 304 |
+
return ret, exn_tab_1_target
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
@dataclasses.dataclass
|
| 308 |
+
class ResumeFunctionMetadata:
|
| 309 |
+
code: types.CodeType
|
| 310 |
+
instructions: List[Instruction] = dataclasses.field(default_factory=list)
|
| 311 |
+
# Python 3.11+ fields
|
| 312 |
+
# NOTE: Python 3.11 removed blocks, but for our purposes, a "block" consists
|
| 313 |
+
# of instructions of all exception table entries that have the same target.
|
| 314 |
+
|
| 315 |
+
# map from PUSH_EXC_INFO's in the prefix to original block target offset
|
| 316 |
+
prefix_block_target_offset_remap: List[int] = dataclasses.field(
|
| 317 |
+
default_factory=list
|
| 318 |
+
)
|
| 319 |
+
# map from new block target offsets to original block target offsets
|
| 320 |
+
block_target_offset_remap: Optional[Dict[int, int]] = None
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
def _filter_iter(l1, l2, cond):
|
| 324 |
+
"""
|
| 325 |
+
Two-pointer conditional filter.
|
| 326 |
+
e.g. _filter_iter(insts, sorted_offsets, lambda i, o: i.offset == o)
|
| 327 |
+
returns the instructions with offsets in sorted_offsets
|
| 328 |
+
"""
|
| 329 |
+
it = iter(l2)
|
| 330 |
+
res: List[Instruction] = []
|
| 331 |
+
try:
|
| 332 |
+
cur = next(it)
|
| 333 |
+
for val in l1:
|
| 334 |
+
if cond(val, cur):
|
| 335 |
+
res.append(val)
|
| 336 |
+
cur = next(it)
|
| 337 |
+
except StopIteration:
|
| 338 |
+
pass
|
| 339 |
+
return res
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
def _load_tuple_and_call(tup):
|
| 343 |
+
insts: List[Instruction] = []
|
| 344 |
+
_initial_push_null(insts)
|
| 345 |
+
for val in tup:
|
| 346 |
+
insts.append(create_instruction("LOAD_CONST", argval=val))
|
| 347 |
+
insts.extend(create_call_function(len(tup), False))
|
| 348 |
+
return insts
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
class ContinueExecutionCache:
|
| 352 |
+
cache = ExactWeakKeyDictionary()
|
| 353 |
+
generated_code_metadata = ExactWeakKeyDictionary()
|
| 354 |
+
|
| 355 |
+
@classmethod
|
| 356 |
+
def lookup(cls, code, lineno, *key):
|
| 357 |
+
if code not in cls.cache:
|
| 358 |
+
cls.cache[code] = {}
|
| 359 |
+
key = tuple(key)
|
| 360 |
+
if key not in cls.cache[code]:
|
| 361 |
+
cls.cache[code][key] = cls.generate(code, lineno, *key)
|
| 362 |
+
return cls.cache[code][key]
|
| 363 |
+
|
| 364 |
+
@classmethod
|
| 365 |
+
def generate(
|
| 366 |
+
cls,
|
| 367 |
+
code,
|
| 368 |
+
lineno,
|
| 369 |
+
offset: int,
|
| 370 |
+
setup_fn_target_offsets: Tuple[int], # only used in Python 3.11+
|
| 371 |
+
nstack: int,
|
| 372 |
+
argnames: Tuple[str],
|
| 373 |
+
argnames_null: Tuple[str],
|
| 374 |
+
setup_fns: Tuple[ReenterWith],
|
| 375 |
+
stack_ctx_vars: Tuple[int, Tuple[Any]],
|
| 376 |
+
argnames_ctx_vars: Tuple[str, Tuple[Any]],
|
| 377 |
+
null_idxes: Tuple[int],
|
| 378 |
+
) -> types.CodeType:
|
| 379 |
+
assert offset is not None
|
| 380 |
+
assert not (
|
| 381 |
+
code.co_flags
|
| 382 |
+
& (CO_GENERATOR | CO_COROUTINE | CO_ITERABLE_COROUTINE | CO_ASYNC_GENERATOR)
|
| 383 |
+
)
|
| 384 |
+
assert code.co_flags & CO_OPTIMIZED
|
| 385 |
+
if code in ContinueExecutionCache.generated_code_metadata:
|
| 386 |
+
return cls.generate_based_on_original_code_object(
|
| 387 |
+
code,
|
| 388 |
+
lineno,
|
| 389 |
+
offset,
|
| 390 |
+
setup_fn_target_offsets,
|
| 391 |
+
nstack,
|
| 392 |
+
argnames,
|
| 393 |
+
argnames_null,
|
| 394 |
+
setup_fns,
|
| 395 |
+
stack_ctx_vars,
|
| 396 |
+
argnames_ctx_vars,
|
| 397 |
+
null_idxes,
|
| 398 |
+
)
|
| 399 |
+
|
| 400 |
+
is_py311_plus = sys.version_info >= (3, 11)
|
| 401 |
+
meta = ResumeFunctionMetadata(code)
|
| 402 |
+
|
| 403 |
+
def update(instructions: List[Instruction], code_options: Dict[str, Any]):
|
| 404 |
+
meta.instructions = copy.deepcopy(instructions)
|
| 405 |
+
|
| 406 |
+
args = [f"___stack{i}" for i in range(nstack)]
|
| 407 |
+
args.extend(v for v in argnames if v not in args)
|
| 408 |
+
freevars = tuple(code_options["co_cellvars"] or []) + tuple(
|
| 409 |
+
code_options["co_freevars"] or []
|
| 410 |
+
)
|
| 411 |
+
freevars = tuple(sorted(freevars))
|
| 412 |
+
code_options[
|
| 413 |
+
"co_name"
|
| 414 |
+
] = f"{TORCH_DYNAMO_RESUME_IN_PREFIX}_{code_options['co_name']}_at_{lineno}"
|
| 415 |
+
if is_py311_plus:
|
| 416 |
+
qualified_path = code_options["co_qualname"].rsplit(".", maxsplit=1)
|
| 417 |
+
if len(qualified_path) == 1:
|
| 418 |
+
code_options["co_qualname"] = code_options["co_name"]
|
| 419 |
+
else:
|
| 420 |
+
assert len(qualified_path) == 2
|
| 421 |
+
module_name, co_name = qualified_path
|
| 422 |
+
code_options[
|
| 423 |
+
"co_qualname"
|
| 424 |
+
] = f"{module_name}.{TORCH_DYNAMO_RESUME_IN_PREFIX}_{co_name}_at_{lineno}"
|
| 425 |
+
code_options["co_firstlineno"] = lineno
|
| 426 |
+
code_options["co_cellvars"] = ()
|
| 427 |
+
code_options["co_freevars"] = freevars
|
| 428 |
+
code_options["co_argcount"] = len(args)
|
| 429 |
+
code_options["co_posonlyargcount"] = 0
|
| 430 |
+
code_options["co_kwonlyargcount"] = 0
|
| 431 |
+
code_options["co_varnames"] = tuple(
|
| 432 |
+
args
|
| 433 |
+
+ [v for v in argnames_null if v not in args]
|
| 434 |
+
+ [v for v in code_options["co_varnames"] if v not in args]
|
| 435 |
+
)
|
| 436 |
+
code_options["co_flags"] = code_options["co_flags"] & ~(
|
| 437 |
+
CO_VARARGS | CO_VARKEYWORDS
|
| 438 |
+
)
|
| 439 |
+
target = next(i for i in instructions if i.offset == offset)
|
| 440 |
+
|
| 441 |
+
prefix = []
|
| 442 |
+
if is_py311_plus:
|
| 443 |
+
if freevars:
|
| 444 |
+
prefix.append(
|
| 445 |
+
create_instruction("COPY_FREE_VARS", arg=len(freevars))
|
| 446 |
+
)
|
| 447 |
+
prefix.append(create_instruction("RESUME", arg=0))
|
| 448 |
+
|
| 449 |
+
cleanup: List[Instruction] = []
|
| 450 |
+
hooks = {fn.stack_index: fn for fn in setup_fns}
|
| 451 |
+
hook_target_offsets = {
|
| 452 |
+
fn.stack_index: setup_fn_target_offsets[i]
|
| 453 |
+
for i, fn in enumerate(setup_fns)
|
| 454 |
+
}
|
| 455 |
+
offset_to_inst = {inst.offset: inst for inst in instructions}
|
| 456 |
+
# map old hook targets to new targets generated by the hook
|
| 457 |
+
old_hook_target_remap = {}
|
| 458 |
+
null_idxes_i = 0
|
| 459 |
+
stack_ctx_vars_d = dict(stack_ctx_vars) # type: ignore[var-annotated,arg-type]
|
| 460 |
+
for i in range(nstack):
|
| 461 |
+
while (
|
| 462 |
+
null_idxes_i < len(null_idxes)
|
| 463 |
+
and null_idxes[null_idxes_i] == i + null_idxes_i
|
| 464 |
+
):
|
| 465 |
+
prefix.append(create_instruction("PUSH_NULL"))
|
| 466 |
+
null_idxes_i += 1
|
| 467 |
+
prefix.append(create_instruction("LOAD_FAST", argval=f"___stack{i}"))
|
| 468 |
+
if i in hooks:
|
| 469 |
+
hook = hooks.pop(i)
|
| 470 |
+
hook_insts, exn_target = hook(code_options, cleanup)
|
| 471 |
+
prefix.extend(hook_insts)
|
| 472 |
+
if is_py311_plus:
|
| 473 |
+
hook_target_offset = hook_target_offsets.pop(i)
|
| 474 |
+
old_hook_target = offset_to_inst[hook_target_offset]
|
| 475 |
+
meta.prefix_block_target_offset_remap.append(hook_target_offset)
|
| 476 |
+
old_hook_target_remap[old_hook_target] = exn_target
|
| 477 |
+
real_i = i + null_idxes_i
|
| 478 |
+
if real_i in stack_ctx_vars_d:
|
| 479 |
+
# NOTE: we assume that current stack var is a context manager CLASS!
|
| 480 |
+
# Load args for context variable and construct it
|
| 481 |
+
prefix.extend(_load_tuple_and_call(stack_ctx_vars_d[real_i]))
|
| 482 |
+
|
| 483 |
+
if is_py311_plus:
|
| 484 |
+
# reverse the mapping since targets of later/nested contexts are inserted
|
| 485 |
+
# into the mapping later, but show up earlier in the prefix.
|
| 486 |
+
meta.prefix_block_target_offset_remap = list(
|
| 487 |
+
reversed(meta.prefix_block_target_offset_remap)
|
| 488 |
+
)
|
| 489 |
+
|
| 490 |
+
assert not hooks
|
| 491 |
+
|
| 492 |
+
# NOTE: we assume that local var is a context manager CLASS!
|
| 493 |
+
# initialize inactive context vars in argnames
|
| 494 |
+
for name, vals in argnames_ctx_vars:
|
| 495 |
+
prefix.append(create_instruction("LOAD_FAST", argval=name))
|
| 496 |
+
prefix.extend(_load_tuple_and_call(vals))
|
| 497 |
+
prefix.append(create_instruction("STORE_FAST", argval=name))
|
| 498 |
+
|
| 499 |
+
# 3.12+: store NULL into variables that were NULL
|
| 500 |
+
if argnames_null:
|
| 501 |
+
assert sys.version_info >= (3, 12)
|
| 502 |
+
for v in argnames_null:
|
| 503 |
+
assert v not in args
|
| 504 |
+
prefix.extend(
|
| 505 |
+
[
|
| 506 |
+
create_instruction("PUSH_NULL"),
|
| 507 |
+
create_instruction("STORE_FAST", argval=v),
|
| 508 |
+
]
|
| 509 |
+
)
|
| 510 |
+
|
| 511 |
+
prefix.append(create_jump_absolute(target))
|
| 512 |
+
|
| 513 |
+
# because the line number table monotonically increases from co_firstlineno
|
| 514 |
+
# remove starts_line for any instructions before the graph break instruction
|
| 515 |
+
# this will ensure the instructions after the break have the correct line numbers
|
| 516 |
+
for inst in instructions:
|
| 517 |
+
if inst.offset == target.offset:
|
| 518 |
+
break
|
| 519 |
+
inst.starts_line = None
|
| 520 |
+
if sys.version_info >= (3, 11):
|
| 521 |
+
inst.positions = None
|
| 522 |
+
|
| 523 |
+
if cleanup:
|
| 524 |
+
prefix.extend(cleanup)
|
| 525 |
+
prefix.extend(cls.unreachable_codes(code_options))
|
| 526 |
+
|
| 527 |
+
# remap original instructions' exception table entries
|
| 528 |
+
if old_hook_target_remap:
|
| 529 |
+
assert is_py311_plus
|
| 530 |
+
for inst in instructions:
|
| 531 |
+
if (
|
| 532 |
+
inst.exn_tab_entry
|
| 533 |
+
and inst.exn_tab_entry.target in old_hook_target_remap
|
| 534 |
+
):
|
| 535 |
+
inst.exn_tab_entry.target = old_hook_target_remap[
|
| 536 |
+
inst.exn_tab_entry.target
|
| 537 |
+
]
|
| 538 |
+
|
| 539 |
+
# TODO(jansel): add dead code elimination here
|
| 540 |
+
instructions[:] = prefix + instructions
|
| 541 |
+
|
| 542 |
+
new_code = transform_code_object(code, update)
|
| 543 |
+
ContinueExecutionCache.generated_code_metadata[new_code] = meta
|
| 544 |
+
return new_code
|
| 545 |
+
|
| 546 |
+
@staticmethod
|
| 547 |
+
def unreachable_codes(code_options) -> List[Instruction]:
|
| 548 |
+
"""Codegen a `raise None` to make analysis work for unreachable code"""
|
| 549 |
+
return [
|
| 550 |
+
create_instruction("LOAD_CONST", argval=None),
|
| 551 |
+
create_instruction("RAISE_VARARGS", arg=1),
|
| 552 |
+
]
|
| 553 |
+
|
| 554 |
+
@classmethod
|
| 555 |
+
def generate_based_on_original_code_object(
|
| 556 |
+
cls, code, lineno, offset: int, setup_fn_target_offsets: Tuple[int, ...], *args
|
| 557 |
+
):
|
| 558 |
+
"""
|
| 559 |
+
This handles the case of generating a resume into code generated
|
| 560 |
+
to resume something else. We want to always generate starting
|
| 561 |
+
from the original code object so that if control flow paths
|
| 562 |
+
converge we only generated 1 resume function (rather than 2^n
|
| 563 |
+
resume functions).
|
| 564 |
+
"""
|
| 565 |
+
|
| 566 |
+
meta: ResumeFunctionMetadata = ContinueExecutionCache.generated_code_metadata[
|
| 567 |
+
code
|
| 568 |
+
]
|
| 569 |
+
new_offset = None
|
| 570 |
+
|
| 571 |
+
def find_new_offset(
|
| 572 |
+
instructions: List[Instruction], code_options: Dict[str, Any]
|
| 573 |
+
):
|
| 574 |
+
nonlocal new_offset
|
| 575 |
+
(target,) = (i for i in instructions if i.offset == offset)
|
| 576 |
+
# match the functions starting at the last instruction as we have added a prefix
|
| 577 |
+
(new_target,) = (
|
| 578 |
+
i2
|
| 579 |
+
for i1, i2 in zip(reversed(instructions), reversed(meta.instructions))
|
| 580 |
+
if i1 is target
|
| 581 |
+
)
|
| 582 |
+
assert target.opcode == new_target.opcode
|
| 583 |
+
new_offset = new_target.offset
|
| 584 |
+
|
| 585 |
+
transform_code_object(code, find_new_offset)
|
| 586 |
+
|
| 587 |
+
if sys.version_info >= (3, 11):
|
| 588 |
+
# setup_fn_target_offsets currently contains the target offset of
|
| 589 |
+
# each setup_fn, based on `code`. When we codegen the resume function
|
| 590 |
+
# based on the original code object, `meta.code`, the offsets in
|
| 591 |
+
# setup_fn_target_offsets must be based on `meta.code` instead.
|
| 592 |
+
if not meta.block_target_offset_remap:
|
| 593 |
+
block_target_offset_remap = meta.block_target_offset_remap = {}
|
| 594 |
+
|
| 595 |
+
def remap_block_offsets(
|
| 596 |
+
instructions: List[Instruction], code_options: Dict[str, Any]
|
| 597 |
+
):
|
| 598 |
+
# NOTE: each prefix block generates exactly one PUSH_EXC_INFO,
|
| 599 |
+
# so we can tell which block a prefix PUSH_EXC_INFO belongs to,
|
| 600 |
+
# by counting. Then we can use meta.prefix_block-target_offset_remap
|
| 601 |
+
# to determine where in the original code the PUSH_EXC_INFO offset
|
| 602 |
+
# replaced.
|
| 603 |
+
prefix_blocks: List[Instruction] = []
|
| 604 |
+
for inst in instructions:
|
| 605 |
+
if len(prefix_blocks) == len(
|
| 606 |
+
meta.prefix_block_target_offset_remap
|
| 607 |
+
):
|
| 608 |
+
break
|
| 609 |
+
if inst.opname == "PUSH_EXC_INFO":
|
| 610 |
+
prefix_blocks.append(inst)
|
| 611 |
+
|
| 612 |
+
# offsets into prefix
|
| 613 |
+
for inst, o in zip(
|
| 614 |
+
prefix_blocks, meta.prefix_block_target_offset_remap
|
| 615 |
+
):
|
| 616 |
+
block_target_offset_remap[cast(int, inst.offset)] = o
|
| 617 |
+
|
| 618 |
+
# old bytecode targets are after the prefix PUSH_EXC_INFO's
|
| 619 |
+
old_start_offset = (
|
| 620 |
+
cast(int, prefix_blocks[-1].offset) if prefix_blocks else -1
|
| 621 |
+
)
|
| 622 |
+
# offsets into old bytecode
|
| 623 |
+
old_inst_offsets = sorted(
|
| 624 |
+
n for n in setup_fn_target_offsets if n > old_start_offset
|
| 625 |
+
)
|
| 626 |
+
targets = _filter_iter(
|
| 627 |
+
instructions, old_inst_offsets, lambda inst, o: inst.offset == o
|
| 628 |
+
)
|
| 629 |
+
new_targets = _filter_iter(
|
| 630 |
+
zip(reversed(instructions), reversed(meta.instructions)),
|
| 631 |
+
targets,
|
| 632 |
+
lambda v1, v2: v1[0] is v2,
|
| 633 |
+
)
|
| 634 |
+
for new, old in zip(new_targets, targets):
|
| 635 |
+
block_target_offset_remap[old.offset] = new[1].offset
|
| 636 |
+
|
| 637 |
+
transform_code_object(code, remap_block_offsets)
|
| 638 |
+
|
| 639 |
+
# if offset is not in setup_fn_target_offsets, it is an error
|
| 640 |
+
setup_fn_target_offsets = tuple(
|
| 641 |
+
meta.block_target_offset_remap[n] for n in setup_fn_target_offsets
|
| 642 |
+
)
|
| 643 |
+
return ContinueExecutionCache.lookup(
|
| 644 |
+
meta.code, lineno, new_offset, setup_fn_target_offsets, *args
|
| 645 |
+
)
|
| 646 |
+
|
| 647 |
+
|
| 648 |
+
"""
|
| 649 |
+
# partially finished support for with statements
|
| 650 |
+
|
| 651 |
+
def convert_locals_to_cells(
|
| 652 |
+
instructions: List[Instruction],
|
| 653 |
+
code_options: Dict[str, Any]):
|
| 654 |
+
|
| 655 |
+
code_options["co_cellvars"] = tuple(
|
| 656 |
+
var
|
| 657 |
+
for var in code_options["co_varnames"]
|
| 658 |
+
if var not in code_options["co_freevars"]
|
| 659 |
+
and not var.startswith("___stack")
|
| 660 |
+
)
|
| 661 |
+
cell_and_free = code_options["co_cellvars"] + code_options["co_freevars"]
|
| 662 |
+
for inst in instructions:
|
| 663 |
+
if str(inst.argval).startswith("___stack"):
|
| 664 |
+
continue
|
| 665 |
+
elif inst.opname == "LOAD_FAST":
|
| 666 |
+
inst.opname = "LOAD_DEREF"
|
| 667 |
+
elif inst.opname == "STORE_FAST":
|
| 668 |
+
inst.opname = "STORE_DEREF"
|
| 669 |
+
elif inst.opname == "DELETE_FAST":
|
| 670 |
+
inst.opname = "DELETE_DEREF"
|
| 671 |
+
else:
|
| 672 |
+
continue
|
| 673 |
+
inst.opcode = dis.opmap[inst.opname]
|
| 674 |
+
assert inst.argval in cell_and_free, inst.argval
|
| 675 |
+
inst.arg = cell_and_free.index(inst.argval)
|
| 676 |
+
|
| 677 |
+
def patch_setup_with(
|
| 678 |
+
instructions: List[Instruction],
|
| 679 |
+
code_options: Dict[str, Any]
|
| 680 |
+
):
|
| 681 |
+
nonlocal need_skip
|
| 682 |
+
need_skip = True
|
| 683 |
+
target_index = next(
|
| 684 |
+
idx for idx, i in enumerate(instructions) if i.offset == offset
|
| 685 |
+
)
|
| 686 |
+
assert instructions[target_index].opname == "SETUP_WITH"
|
| 687 |
+
convert_locals_to_cells(instructions, code_options)
|
| 688 |
+
|
| 689 |
+
stack_depth_before = nstack + stack_effect(instructions[target_index].opcode,
|
| 690 |
+
instructions[target_index].arg)
|
| 691 |
+
|
| 692 |
+
inside_with = []
|
| 693 |
+
inside_with_resume_at = None
|
| 694 |
+
stack_depth = stack_depth_before
|
| 695 |
+
idx = target_index + 1
|
| 696 |
+
for idx in range(idx, len(instructions)):
|
| 697 |
+
inst = instructions[idx]
|
| 698 |
+
if inst.opname == "BEGIN_FINALLY":
|
| 699 |
+
inside_with_resume_at = inst
|
| 700 |
+
break
|
| 701 |
+
elif inst.target is not None:
|
| 702 |
+
unimplemented("jump from with not supported")
|
| 703 |
+
elif inst.opname in ("BEGIN_FINALLY", "WITH_CLEANUP_START", "WITH_CLEANUP_FINISH", "END_FINALLY",
|
| 704 |
+
"POP_FINALLY", "POP_EXCEPT",
|
| 705 |
+
"POP_BLOCK", "END_ASYNC_FOR"):
|
| 706 |
+
unimplemented("block ops not supported")
|
| 707 |
+
inside_with.append(inst)
|
| 708 |
+
stack_depth += stack_effect(inst.opcode, inst.arg)
|
| 709 |
+
assert inside_with_resume_at
|
| 710 |
+
|
| 711 |
+
instructions = [
|
| 712 |
+
create_instruction("LOAD_FAST", f"___stack{i}") for i in range(nstack)
|
| 713 |
+
] + [
|
| 714 |
+
create_instruction("SETUP_WITH", target=instructions[target_index].target)
|
| 715 |
+
... call the function ...
|
| 716 |
+
unpack_tuple
|
| 717 |
+
] + [
|
| 718 |
+
create_instruction("JUMP_ABSOLUTE", target=inside_with_resume_at)
|
| 719 |
+
]
|
| 720 |
+
"""
|
pllava/lib/python3.10/site-packages/torch/_dynamo/source.py
ADDED
|
@@ -0,0 +1,759 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import collections
|
| 3 |
+
import dataclasses
|
| 4 |
+
import enum
|
| 5 |
+
from typing import Any, Optional, Union
|
| 6 |
+
|
| 7 |
+
from torch._guards import ChainedSource, GuardSource, Source
|
| 8 |
+
|
| 9 |
+
from . import utils
|
| 10 |
+
from .bytecode_transformation import create_call_function, create_instruction
|
| 11 |
+
from .utils import enum_repr
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# It shouldn't be supported to construct an NNModuleVariable inside an FSDP module,
|
| 15 |
+
# so those cases are omitted intentionally
|
| 16 |
+
|
| 17 |
+
# represents nn.Modules tracked with NNModuleVariable (specialized is implicit in the variable name)
|
| 18 |
+
_GUARD_SOURCE_SPECIALIZED_NN_MODULE = {
|
| 19 |
+
GuardSource.LOCAL: GuardSource.LOCAL_SPECIALIZED_NN_MODULE,
|
| 20 |
+
GuardSource.GLOBAL: GuardSource.GLOBAL_SPECIALIZED_NN_MODULE,
|
| 21 |
+
GuardSource.LOCAL_SPECIALIZED_NN_MODULE: GuardSource.LOCAL_SPECIALIZED_NN_MODULE,
|
| 22 |
+
GuardSource.GLOBAL_SPECIALIZED_NN_MODULE: GuardSource.GLOBAL_SPECIALIZED_NN_MODULE,
|
| 23 |
+
# Just to ensure that guard_source() works
|
| 24 |
+
GuardSource.LOCAL_UNSPECIALIZED_NN_MODULE: GuardSource.LOCAL_UNSPECIALIZED_NN_MODULE,
|
| 25 |
+
GuardSource.GLOBAL_UNSPECIALIZED_NN_MODULE: GuardSource.GLOBAL_UNSPECIALIZED_NN_MODULE,
|
| 26 |
+
GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE: GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE,
|
| 27 |
+
GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE: GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE,
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
# represents nn.Modules tracked with UnspecializedNNModuleVariable
|
| 31 |
+
_GUARD_SOURCE_UNSPECIALIZED_NN_MODULE = {
|
| 32 |
+
GuardSource.LOCAL: GuardSource.LOCAL_UNSPECIALIZED_NN_MODULE,
|
| 33 |
+
GuardSource.GLOBAL: GuardSource.GLOBAL_UNSPECIALIZED_NN_MODULE,
|
| 34 |
+
GuardSource.LOCAL_UNSPECIALIZED_NN_MODULE: GuardSource.LOCAL_UNSPECIALIZED_NN_MODULE,
|
| 35 |
+
GuardSource.GLOBAL_UNSPECIALIZED_NN_MODULE: GuardSource.GLOBAL_UNSPECIALIZED_NN_MODULE,
|
| 36 |
+
# this happens for an UnspecializedNNModule submodule on a NNModuleVariable
|
| 37 |
+
GuardSource.LOCAL_SPECIALIZED_NN_MODULE: GuardSource.LOCAL_UNSPECIALIZED_NN_MODULE,
|
| 38 |
+
GuardSource.GLOBAL_SPECIALIZED_NN_MODULE: GuardSource.GLOBAL_UNSPECIALIZED_NN_MODULE,
|
| 39 |
+
# Just to ensure that guard_source() works
|
| 40 |
+
GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE: GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE,
|
| 41 |
+
GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE: GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE,
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
# represents nn.Modules tracked with UnspecializedBuiltinNNModuleVariable
|
| 45 |
+
_GUARD_SOURCE_UNSPECIALIZED_BUILTIN_NN_MODULE = {
|
| 46 |
+
GuardSource.LOCAL: GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE,
|
| 47 |
+
GuardSource.GLOBAL: GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE,
|
| 48 |
+
GuardSource.LOCAL_UNSPECIALIZED_NN_MODULE: GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE,
|
| 49 |
+
GuardSource.GLOBAL_UNSPECIALIZED_NN_MODULE: GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE,
|
| 50 |
+
GuardSource.LOCAL_SPECIALIZED_NN_MODULE: GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE,
|
| 51 |
+
GuardSource.GLOBAL_SPECIALIZED_NN_MODULE: GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE,
|
| 52 |
+
# Just to ensure that guard_source() works
|
| 53 |
+
GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE: GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE,
|
| 54 |
+
GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE: GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE,
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
_GUARD_SOURCE_FSDP_MODULE = {
|
| 58 |
+
GuardSource.LOCAL: GuardSource.LOCAL_FSDP_MODULE,
|
| 59 |
+
GuardSource.GLOBAL: GuardSource.GLOBAL_FSDP_MODULE,
|
| 60 |
+
GuardSource.LOCAL_SPECIALIZED_NN_MODULE: GuardSource.LOCAL_FSDP_MODULE,
|
| 61 |
+
GuardSource.GLOBAL_SPECIALIZED_NN_MODULE: GuardSource.GLOBAL_FSDP_MODULE,
|
| 62 |
+
GuardSource.LOCAL_FSDP_MODULE: GuardSource.LOCAL_FSDP_MODULE,
|
| 63 |
+
GuardSource.GLOBAL_FSDP_MODULE: GuardSource.GLOBAL_FSDP_MODULE,
|
| 64 |
+
GuardSource.LOCAL_UNSPECIALIZED_NN_MODULE: GuardSource.LOCAL_FSDP_MODULE,
|
| 65 |
+
GuardSource.GLOBAL_UNSPECIALIZED_NN_MODULE: GuardSource.GLOBAL_FSDP_MODULE,
|
| 66 |
+
GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE: GuardSource.LOCAL_FSDP_MODULE,
|
| 67 |
+
GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE: GuardSource.GLOBAL_FSDP_MODULE,
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def is_constant_source(source):
|
| 72 |
+
if isinstance(source, ConstantSource):
|
| 73 |
+
return True
|
| 74 |
+
try:
|
| 75 |
+
if source.guard_source() == GuardSource.CONSTANT:
|
| 76 |
+
return True
|
| 77 |
+
except NotImplementedError:
|
| 78 |
+
pass
|
| 79 |
+
|
| 80 |
+
return False
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def reconstruct_getitem(
|
| 84 |
+
source: Union["GetItemSource", "ODictGetItemSource"], codegen, index_is_slice
|
| 85 |
+
):
|
| 86 |
+
source.base.reconstruct(codegen)
|
| 87 |
+
if isinstance(source.index, Source):
|
| 88 |
+
source.index.reconstruct(codegen)
|
| 89 |
+
else:
|
| 90 |
+
if index_is_slice:
|
| 91 |
+
assert isinstance(source, GetItemSource)
|
| 92 |
+
codegen.append_output(codegen.create_load_const(source.unpack_slice()))
|
| 93 |
+
else:
|
| 94 |
+
codegen.append_output(codegen.create_load_const(source.index))
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
@dataclasses.dataclass(frozen=True)
|
| 98 |
+
class LocalSource(Source):
|
| 99 |
+
local_name: str
|
| 100 |
+
cell_or_freevar: bool = False
|
| 101 |
+
|
| 102 |
+
def reconstruct(self, codegen):
|
| 103 |
+
codegen.append_output(codegen.create_load(self.local_name))
|
| 104 |
+
|
| 105 |
+
def guard_source(self):
|
| 106 |
+
return GuardSource.LOCAL
|
| 107 |
+
|
| 108 |
+
def name(self):
|
| 109 |
+
return f"L[{repr(self.local_name)}]"
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
@dataclasses.dataclass(frozen=True)
|
| 113 |
+
class SyntheticLocalSource(Source):
|
| 114 |
+
local_name: str
|
| 115 |
+
|
| 116 |
+
def reconstruct(self, codegen):
|
| 117 |
+
codegen.append_output(codegen.create_load(self.local_name))
|
| 118 |
+
|
| 119 |
+
def guard_source(self):
|
| 120 |
+
return GuardSource.SYNTHETIC_LOCAL
|
| 121 |
+
|
| 122 |
+
def name(self):
|
| 123 |
+
return f"SYNTHETIC_LOCAL[{self.local_name!r}]"
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
@dataclasses.dataclass(frozen=True)
|
| 127 |
+
class RandomValueSource(Source):
|
| 128 |
+
random_call_index: int
|
| 129 |
+
|
| 130 |
+
def guard_source(self):
|
| 131 |
+
return GuardSource.RANDOM_VALUE
|
| 132 |
+
|
| 133 |
+
def reconstruct(self, codegen):
|
| 134 |
+
codegen.append_output(codegen.create_load(codegen.tx.output.random_values_var))
|
| 135 |
+
codegen.append_output(codegen.create_load_const(self.random_call_index))
|
| 136 |
+
codegen.append_output(create_instruction("BINARY_SUBSCR"))
|
| 137 |
+
|
| 138 |
+
def name(self):
|
| 139 |
+
return f"random_value_{self.random_call_index}"
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
@dataclasses.dataclass(frozen=True)
|
| 143 |
+
class GlobalSource(Source):
|
| 144 |
+
global_name: str
|
| 145 |
+
|
| 146 |
+
def reconstruct(self, codegen):
|
| 147 |
+
codegen.append_output(codegen.create_load_global(self.global_name, add=True))
|
| 148 |
+
|
| 149 |
+
def guard_source(self):
|
| 150 |
+
return GuardSource.GLOBAL
|
| 151 |
+
|
| 152 |
+
def name(self):
|
| 153 |
+
return f"G[{repr(self.global_name)}]"
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
@dataclasses.dataclass(frozen=True)
|
| 157 |
+
class GlobalWeakRefSource(Source):
|
| 158 |
+
global_name: str
|
| 159 |
+
|
| 160 |
+
def reconstruct(self, codegen):
|
| 161 |
+
codegen.add_push_null(
|
| 162 |
+
lambda: codegen.append_output(
|
| 163 |
+
codegen.create_load_global(self.global_name, add=True)
|
| 164 |
+
)
|
| 165 |
+
)
|
| 166 |
+
codegen.extend_output(create_call_function(0, False))
|
| 167 |
+
|
| 168 |
+
def guard_source(self):
|
| 169 |
+
return GuardSource.GLOBAL
|
| 170 |
+
|
| 171 |
+
def name(self):
|
| 172 |
+
return f"G[{repr(self.global_name)}]()"
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
@dataclasses.dataclass(frozen=True)
|
| 176 |
+
class WeakRefCallSource(ChainedSource):
|
| 177 |
+
def reconstruct(self, codegen):
|
| 178 |
+
codegen.add_push_null(lambda: self.base.reconstruct(codegen))
|
| 179 |
+
codegen.extend_output(create_call_function(0, False))
|
| 180 |
+
|
| 181 |
+
def guard_source(self):
|
| 182 |
+
return self.base.guard_source()
|
| 183 |
+
|
| 184 |
+
def name(self):
|
| 185 |
+
return f"{self.base.name()}()"
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
@dataclasses.dataclass(frozen=True)
|
| 189 |
+
class AttrSource(ChainedSource):
|
| 190 |
+
member: str
|
| 191 |
+
|
| 192 |
+
def __post_init__(self):
|
| 193 |
+
assert self.base, "Can't construct an AttrSource without a valid base source"
|
| 194 |
+
if "." in self.member:
|
| 195 |
+
member_parts = self.member.split(".")
|
| 196 |
+
object.__setattr__(
|
| 197 |
+
self, "base", AttrSource(self.base, ".".join(member_parts[:-1]))
|
| 198 |
+
)
|
| 199 |
+
object.__setattr__(self, "member", member_parts[-1])
|
| 200 |
+
|
| 201 |
+
def reconstruct(self, codegen):
|
| 202 |
+
self.base.reconstruct(codegen)
|
| 203 |
+
codegen.extend_output(codegen.create_load_attrs(self.member))
|
| 204 |
+
|
| 205 |
+
def guard_source(self):
|
| 206 |
+
return self.base.guard_source()
|
| 207 |
+
|
| 208 |
+
def name(self):
|
| 209 |
+
if not self.member.isidentifier():
|
| 210 |
+
return f"getattr({self.base.name()}, {self.member!r})"
|
| 211 |
+
return f"{self.base.name()}.{self.member}"
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
# Represents tensor.grad source. It could be represented by AttrSource as well.
|
| 215 |
+
# But, we could access grad field on tensor directly in C++ without going
|
| 216 |
+
# through the Python bytecodes. Therefore, we use a separate source for grad
|
| 217 |
+
# field.
|
| 218 |
+
@dataclasses.dataclass(frozen=True)
|
| 219 |
+
class GradSource(ChainedSource):
|
| 220 |
+
member: str = "grad"
|
| 221 |
+
|
| 222 |
+
def reconstruct(self, codegen):
|
| 223 |
+
self.base.reconstruct(codegen)
|
| 224 |
+
codegen.extend_output(codegen.create_load_attrs(self.member))
|
| 225 |
+
|
| 226 |
+
def guard_source(self):
|
| 227 |
+
return self.base.guard_source()
|
| 228 |
+
|
| 229 |
+
def name(self):
|
| 230 |
+
return f"{self.base.name()}.{self.member}"
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
@dataclasses.dataclass(frozen=True)
|
| 234 |
+
class ParamBufferSource(AttrSource):
|
| 235 |
+
def guard_source(self):
|
| 236 |
+
return _GUARD_SOURCE_SPECIALIZED_NN_MODULE[self.base.guard_source()]
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
# Special AttrSource to differentiate module._buffers or module._parameters
|
| 240 |
+
@dataclasses.dataclass(frozen=True)
|
| 241 |
+
class UnspecializedParamBufferSource(AttrSource):
|
| 242 |
+
pass
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
# This source is intended to be used in places where a source is needed but it is expected
|
| 246 |
+
# that the symbol will be simplified out later on. Symbols with ephemeral sources are
|
| 247 |
+
# prioritized to be simplified out when e.g. compared against a symbol without an ephemeral
|
| 248 |
+
# source. Guarding on this source is an error.
|
| 249 |
+
#
|
| 250 |
+
# Example: During subclass view fake-ification, any close-over ViewFunc state should be
|
| 251 |
+
# symbolicized / fake-ified to avoid invalid specialization during view replay. This source
|
| 252 |
+
# is useful for symbols utilized in the middle of the view chain that are not expected to be
|
| 253 |
+
# present within the final view shape metadata.
|
| 254 |
+
@dataclasses.dataclass(frozen=True)
|
| 255 |
+
class EphemeralSource(Source):
|
| 256 |
+
desc: Optional[str] = None
|
| 257 |
+
|
| 258 |
+
def guard_source(self):
|
| 259 |
+
return GuardSource.EPHEMERAL
|
| 260 |
+
|
| 261 |
+
def name(self):
|
| 262 |
+
return f"<ephemeral{': ' + self.desc if self.desc is not None else ''}>"
|
| 263 |
+
|
| 264 |
+
def make_guard(self):
|
| 265 |
+
raise NotImplementedError
|
| 266 |
+
|
| 267 |
+
def is_ephemeral(self):
|
| 268 |
+
return True
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
class TensorProperty(enum.Enum):
|
| 272 |
+
SIZE = 0
|
| 273 |
+
STRIDE = 1
|
| 274 |
+
STORAGE_OFFSET = 2
|
| 275 |
+
|
| 276 |
+
def method_name(self):
|
| 277 |
+
if self is TensorProperty.SIZE:
|
| 278 |
+
return "size"
|
| 279 |
+
elif self is TensorProperty.STRIDE:
|
| 280 |
+
return "stride"
|
| 281 |
+
elif self is TensorProperty.STORAGE_OFFSET:
|
| 282 |
+
return "storage_offset"
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
@dataclasses.dataclass(frozen=True)
|
| 286 |
+
class TensorPropertySource(ChainedSource):
|
| 287 |
+
prop: TensorProperty
|
| 288 |
+
idx: Optional[int] = None # None for STORAGE_OFFSET
|
| 289 |
+
|
| 290 |
+
def __post_init__(self):
|
| 291 |
+
assert self.base is not None
|
| 292 |
+
if self.prop is TensorProperty.STORAGE_OFFSET:
|
| 293 |
+
assert self.idx is None
|
| 294 |
+
else:
|
| 295 |
+
assert self.idx is not None
|
| 296 |
+
|
| 297 |
+
def reconstruct(self, codegen):
|
| 298 |
+
def gen_fn():
|
| 299 |
+
self.base.reconstruct(codegen)
|
| 300 |
+
codegen.append_output(codegen.create_load_attr(self.prop.method_name()))
|
| 301 |
+
|
| 302 |
+
codegen.add_push_null(gen_fn)
|
| 303 |
+
if self.idx is not None:
|
| 304 |
+
codegen.append_output(codegen.create_load_const(self.idx))
|
| 305 |
+
codegen.extend_output(
|
| 306 |
+
create_call_function(1 if self.idx is not None else 0, False)
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
def guard_source(self):
|
| 310 |
+
return self.base.guard_source()
|
| 311 |
+
|
| 312 |
+
def name(self):
|
| 313 |
+
if self.prop is TensorProperty.SIZE:
|
| 314 |
+
return f"{self.base.name()}.size()[{self.idx}]"
|
| 315 |
+
elif self.prop is TensorProperty.STRIDE:
|
| 316 |
+
return f"{self.base.name()}.stride()[{self.idx}]"
|
| 317 |
+
elif self.prop is TensorProperty.STORAGE_OFFSET:
|
| 318 |
+
assert self.idx is None
|
| 319 |
+
return f"{self.base.name()}.storage_offset()"
|
| 320 |
+
else:
|
| 321 |
+
raise AssertionError(f"unhandled {self.prop}")
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
@dataclasses.dataclass(frozen=True)
|
| 325 |
+
class NegateSource(ChainedSource):
|
| 326 |
+
def __post_init__(self):
|
| 327 |
+
assert self.base is not None
|
| 328 |
+
|
| 329 |
+
def reconstruct(self, codegen):
|
| 330 |
+
raise NotImplementedError
|
| 331 |
+
|
| 332 |
+
def guard_source(self):
|
| 333 |
+
return self.base.guard_source()
|
| 334 |
+
|
| 335 |
+
def name(self):
|
| 336 |
+
# NB: use method call so that function stripping regexes work
|
| 337 |
+
return f"{self.base.name()}.__neg__()"
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
@dataclasses.dataclass(frozen=True)
|
| 341 |
+
class ConvertIntSource(ChainedSource):
|
| 342 |
+
def __post_init__(self):
|
| 343 |
+
assert self.base is not None
|
| 344 |
+
|
| 345 |
+
def reconstruct(self, codegen):
|
| 346 |
+
self.base.reconstruct(codegen)
|
| 347 |
+
|
| 348 |
+
def guard_source(self):
|
| 349 |
+
return self.base.guard_source()
|
| 350 |
+
|
| 351 |
+
def name(self):
|
| 352 |
+
return f"cast_symbool_to_symint_guardless({self.base.name()})"
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
@dataclasses.dataclass(frozen=True)
|
| 356 |
+
class FlattenScriptObjectSource(ChainedSource):
|
| 357 |
+
def __post_init__(self):
|
| 358 |
+
assert self.base is not None
|
| 359 |
+
|
| 360 |
+
def reconstruct(self, codegen):
|
| 361 |
+
self.base.reconstruct(codegen)
|
| 362 |
+
|
| 363 |
+
def guard_source(self):
|
| 364 |
+
return self.base.guard_source()
|
| 365 |
+
|
| 366 |
+
def name(self):
|
| 367 |
+
return f"{self.base.name()}.__obj_flatten__()"
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
@dataclasses.dataclass(frozen=True)
|
| 371 |
+
class ScriptObjectQualifiedNameSource(ChainedSource):
|
| 372 |
+
def __post_init__(self):
|
| 373 |
+
assert self.base is not None
|
| 374 |
+
|
| 375 |
+
def reconstruct(self, codegen):
|
| 376 |
+
self.base.reconstruct(codegen)
|
| 377 |
+
|
| 378 |
+
def guard_source(self):
|
| 379 |
+
return self.base.guard_source()
|
| 380 |
+
|
| 381 |
+
def name(self):
|
| 382 |
+
return f"{self.base.name()}._type().qualified_name()"
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
class AttrProxySource(ChainedSource):
|
| 386 |
+
def reconstruct(self, codegen):
|
| 387 |
+
self.base.reconstruct(codegen)
|
| 388 |
+
|
| 389 |
+
def guard_source(self):
|
| 390 |
+
return self.base.guard_source()
|
| 391 |
+
|
| 392 |
+
def name(self):
|
| 393 |
+
return f"{self.base.name()}.get_base()"
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
@dataclasses.dataclass(frozen=True)
|
| 397 |
+
class DefaultsSource(ChainedSource):
|
| 398 |
+
idx_key: Union[int, str]
|
| 399 |
+
is_kw: bool = False
|
| 400 |
+
field: str = dataclasses.field(init=False, repr=False, compare=False)
|
| 401 |
+
_name: str = dataclasses.field(init=False, repr=False, compare=False)
|
| 402 |
+
|
| 403 |
+
def __post_init__(self):
|
| 404 |
+
assert (
|
| 405 |
+
self.base
|
| 406 |
+
), "Base must be a valid source in order to properly track and guard this Defaults to its origin."
|
| 407 |
+
if self.is_kw:
|
| 408 |
+
assert isinstance(self.idx_key, str)
|
| 409 |
+
object.__setattr__(self, "field", "__kwdefaults__")
|
| 410 |
+
object.__setattr__(
|
| 411 |
+
self, "_name", f"{self.base.name()}.{self.field}['{self.idx_key}']"
|
| 412 |
+
)
|
| 413 |
+
else:
|
| 414 |
+
assert isinstance(self.idx_key, int)
|
| 415 |
+
object.__setattr__(self, "field", "__defaults__")
|
| 416 |
+
object.__setattr__(
|
| 417 |
+
self, "_name", f"{self.base.name()}.{self.field}[{self.idx_key}]"
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
def reconstruct(self, codegen):
|
| 421 |
+
self.base.reconstruct(codegen)
|
| 422 |
+
codegen.extend_output(codegen.create_load_attrs(self.field))
|
| 423 |
+
codegen.append_output(codegen.create_load_const(self.idx_key))
|
| 424 |
+
codegen.append_output(create_instruction("BINARY_SUBSCR"))
|
| 425 |
+
|
| 426 |
+
def guard_source(self):
|
| 427 |
+
return self.base.guard_source()
|
| 428 |
+
|
| 429 |
+
def name(self):
|
| 430 |
+
return self._name
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
@dataclasses.dataclass(frozen=True)
|
| 434 |
+
class GetItemSource(ChainedSource):
|
| 435 |
+
index: Any
|
| 436 |
+
index_is_slice: bool = False
|
| 437 |
+
|
| 438 |
+
def __post_init__(self):
|
| 439 |
+
assert self.base is not None
|
| 440 |
+
if isinstance(self.index, slice):
|
| 441 |
+
# store the hashable version of the slice so the whole GetItemSource is hashable
|
| 442 |
+
super().__setattr__("index", self.index.__reduce__())
|
| 443 |
+
super().__setattr__("index_is_slice", True)
|
| 444 |
+
|
| 445 |
+
def reconstruct(self, codegen):
|
| 446 |
+
reconstruct_getitem(self, codegen, index_is_slice=self.index_is_slice)
|
| 447 |
+
codegen.append_output(create_instruction("BINARY_SUBSCR"))
|
| 448 |
+
|
| 449 |
+
def guard_source(self):
|
| 450 |
+
return self.base.guard_source()
|
| 451 |
+
|
| 452 |
+
def unpack_slice(self):
|
| 453 |
+
assert self.index_is_slice
|
| 454 |
+
slice_class, slice_args = self.index
|
| 455 |
+
return slice_class(*slice_args)
|
| 456 |
+
|
| 457 |
+
def name(self):
|
| 458 |
+
# Index can be of following types
|
| 459 |
+
# 1) ConstDictKeySource
|
| 460 |
+
# 2) enum.Enum
|
| 461 |
+
# 3) index is a slice - example 1:4
|
| 462 |
+
# 4) index is a constant - example string, integer
|
| 463 |
+
if isinstance(self.index, Source):
|
| 464 |
+
if not isinstance(self.index, ConstDictKeySource):
|
| 465 |
+
raise ValueError(
|
| 466 |
+
"GetItemSource index must be a constant, enum or ConstDictKeySource"
|
| 467 |
+
)
|
| 468 |
+
return f"{self.base.name()}[{self.index.name()}]"
|
| 469 |
+
elif self.index_is_slice:
|
| 470 |
+
return f"{self.base.name()}[{self.unpack_slice()!r}]"
|
| 471 |
+
elif isinstance(self.index, enum.Enum):
|
| 472 |
+
return f"{self.base.name()}[{enum_repr(self.index, self.guard_source().is_local())}]"
|
| 473 |
+
else:
|
| 474 |
+
return f"{self.base.name()}[{self.index!r}]"
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
@dataclasses.dataclass(frozen=True)
|
| 478 |
+
class ConstDictKeySource(GetItemSource):
|
| 479 |
+
def is_dict_key(self):
|
| 480 |
+
return True
|
| 481 |
+
|
| 482 |
+
def reconstruct(self, codegen):
|
| 483 |
+
codegen.add_push_null(
|
| 484 |
+
lambda: codegen.load_import_from(utils.__name__, "dict_keys_getitem")
|
| 485 |
+
)
|
| 486 |
+
self.base.reconstruct(codegen)
|
| 487 |
+
codegen.append_output(codegen.create_load_const(self.index))
|
| 488 |
+
codegen.extend_output(create_call_function(2, False))
|
| 489 |
+
|
| 490 |
+
def name(self):
|
| 491 |
+
# The list creation will be CSE'd by PyExprCSEPass
|
| 492 |
+
return f"list({self.base.name()}.keys())[{self.index!r}]"
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
@dataclasses.dataclass(frozen=True)
|
| 496 |
+
class TupleIteratorGetItemSource(GetItemSource):
|
| 497 |
+
def reconstruct(self, codegen):
|
| 498 |
+
codegen.add_push_null(
|
| 499 |
+
lambda: codegen.load_import_from(utils.__name__, "tuple_iterator_getitem")
|
| 500 |
+
)
|
| 501 |
+
self.base.reconstruct(codegen)
|
| 502 |
+
codegen.append_output(codegen.create_load_const(self.index))
|
| 503 |
+
codegen.extend_output(create_call_function(2, False))
|
| 504 |
+
|
| 505 |
+
def name(self):
|
| 506 |
+
return f"___tuple_iterator_getitem({self.base.name()}, {self.index!r})"
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
@dataclasses.dataclass(frozen=True)
|
| 510 |
+
class TypeSource(ChainedSource):
|
| 511 |
+
def __post_init__(self):
|
| 512 |
+
assert self.base is not None
|
| 513 |
+
|
| 514 |
+
def reconstruct(self, codegen):
|
| 515 |
+
codegen.add_push_null(lambda: codegen.load_import_from("builtins", "type"))
|
| 516 |
+
self.base.reconstruct(codegen)
|
| 517 |
+
codegen.extend_output(create_call_function(1, False))
|
| 518 |
+
|
| 519 |
+
def guard_source(self):
|
| 520 |
+
return self.base.guard_source()
|
| 521 |
+
|
| 522 |
+
def name(self):
|
| 523 |
+
return f"type({self.base.name()})"
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
@dataclasses.dataclass(frozen=True)
|
| 527 |
+
class ODictGetItemSource(ChainedSource):
|
| 528 |
+
index: Any
|
| 529 |
+
|
| 530 |
+
def __post_init__(self):
|
| 531 |
+
assert self.base is not None
|
| 532 |
+
|
| 533 |
+
def reconstruct(self, codegen):
|
| 534 |
+
codegen.add_push_null(
|
| 535 |
+
lambda: codegen.append_output(
|
| 536 |
+
codegen._create_load_const(collections.OrderedDict.__getitem__)
|
| 537 |
+
)
|
| 538 |
+
)
|
| 539 |
+
reconstruct_getitem(self, codegen, index_is_slice=False)
|
| 540 |
+
codegen.extend_output(create_call_function(2, False))
|
| 541 |
+
|
| 542 |
+
def guard_source(self):
|
| 543 |
+
return self.base.guard_source()
|
| 544 |
+
|
| 545 |
+
def name(self):
|
| 546 |
+
if isinstance(self.index, type):
|
| 547 |
+
rep = f'__load_module("{self.index.__module__}").{self.index.__qualname__}'
|
| 548 |
+
return f"___odict_getitem({self.base.name()}, {rep})"
|
| 549 |
+
elif isinstance(self.index, Source):
|
| 550 |
+
return f"___odict_getitem({self.base.name()}, {self.index.name()})"
|
| 551 |
+
else:
|
| 552 |
+
return f"___odict_getitem({self.base.name()}, {self.index!r})"
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
@dataclasses.dataclass(frozen=True)
|
| 556 |
+
class OptimizerSource(ChainedSource):
|
| 557 |
+
def reconstruct(self, codegen):
|
| 558 |
+
self.base.reconstruct(codegen)
|
| 559 |
+
|
| 560 |
+
def guard_source(self):
|
| 561 |
+
return self.base.guard_source()
|
| 562 |
+
|
| 563 |
+
def name(self):
|
| 564 |
+
return self.base.name()
|
| 565 |
+
|
| 566 |
+
|
| 567 |
+
@dataclasses.dataclass(frozen=True)
|
| 568 |
+
class NNModuleSource(ChainedSource):
|
| 569 |
+
def reconstruct(self, codegen):
|
| 570 |
+
self.base.reconstruct(codegen)
|
| 571 |
+
|
| 572 |
+
def guard_source(self):
|
| 573 |
+
return _GUARD_SOURCE_SPECIALIZED_NN_MODULE[self.base.guard_source()]
|
| 574 |
+
|
| 575 |
+
def name(self):
|
| 576 |
+
return self.base.name()
|
| 577 |
+
|
| 578 |
+
|
| 579 |
+
@dataclasses.dataclass(frozen=True)
|
| 580 |
+
class UnspecializedNNModuleSource(NNModuleSource):
|
| 581 |
+
def guard_source(self):
|
| 582 |
+
return _GUARD_SOURCE_UNSPECIALIZED_NN_MODULE[self.base.guard_source()]
|
| 583 |
+
|
| 584 |
+
|
| 585 |
+
@dataclasses.dataclass(frozen=True)
|
| 586 |
+
class UnspecializedBuiltinNNModuleSource(UnspecializedNNModuleSource):
|
| 587 |
+
def guard_source(self):
|
| 588 |
+
return _GUARD_SOURCE_UNSPECIALIZED_BUILTIN_NN_MODULE[self.base.guard_source()]
|
| 589 |
+
|
| 590 |
+
|
| 591 |
+
@dataclasses.dataclass(frozen=True)
|
| 592 |
+
class FSDPNNModuleSource(NNModuleSource):
|
| 593 |
+
def guard_source(self):
|
| 594 |
+
return _GUARD_SOURCE_FSDP_MODULE[self.base.guard_source()]
|
| 595 |
+
|
| 596 |
+
|
| 597 |
+
@dataclasses.dataclass(frozen=True)
|
| 598 |
+
class GlobalStateSource(Source):
|
| 599 |
+
def name(self):
|
| 600 |
+
return ""
|
| 601 |
+
|
| 602 |
+
def guard_source(self):
|
| 603 |
+
return GuardSource.GLOBAL
|
| 604 |
+
|
| 605 |
+
|
| 606 |
+
@dataclasses.dataclass(frozen=True)
|
| 607 |
+
class TorchFunctionModeStackSource(Source):
|
| 608 |
+
ind: int
|
| 609 |
+
|
| 610 |
+
def name(self):
|
| 611 |
+
return ""
|
| 612 |
+
|
| 613 |
+
def _get_index(self):
|
| 614 |
+
from .variables.torch_function import TorchFunctionModeStackVariable
|
| 615 |
+
|
| 616 |
+
return TorchFunctionModeStackVariable.get_mode_index(self.ind)
|
| 617 |
+
|
| 618 |
+
def reconstruct(self, codegen):
|
| 619 |
+
codegen.add_push_null(
|
| 620 |
+
lambda: codegen.load_import_from(
|
| 621 |
+
utils.__name__, "get_torch_function_mode_stack_at"
|
| 622 |
+
)
|
| 623 |
+
)
|
| 624 |
+
codegen.extend_output([codegen.create_load_const(self._get_index())])
|
| 625 |
+
codegen.extend_output(create_call_function(1, False))
|
| 626 |
+
|
| 627 |
+
def guard_source(self):
|
| 628 |
+
return GuardSource.GLOBAL
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
@dataclasses.dataclass(frozen=True)
|
| 632 |
+
class ConstantSource(Source):
|
| 633 |
+
source_name: str
|
| 634 |
+
|
| 635 |
+
def reconstruct(self, codegen):
|
| 636 |
+
codegen.append_output(codegen.create_load_global(self.source_name, add=False))
|
| 637 |
+
|
| 638 |
+
def guard_source(self):
|
| 639 |
+
return GuardSource.CONSTANT
|
| 640 |
+
|
| 641 |
+
def name(self):
|
| 642 |
+
return self.source_name
|
| 643 |
+
|
| 644 |
+
def make_guard(self, fn):
|
| 645 |
+
raise NotImplementedError
|
| 646 |
+
|
| 647 |
+
|
| 648 |
+
@dataclasses.dataclass(frozen=True)
|
| 649 |
+
class NumpyTensorSource(ChainedSource):
|
| 650 |
+
def name(self) -> str:
|
| 651 |
+
return f"___from_numpy({self.base.name()})"
|
| 652 |
+
|
| 653 |
+
def guard_source(self):
|
| 654 |
+
return self.base.guard_source()
|
| 655 |
+
|
| 656 |
+
def reconstruct(self, codegen):
|
| 657 |
+
codegen.add_push_null(lambda: codegen.load_import_from("torch", "as_tensor"))
|
| 658 |
+
self.base.reconstruct(codegen)
|
| 659 |
+
codegen.extend_output(create_call_function(1, False))
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
@dataclasses.dataclass(frozen=True)
|
| 663 |
+
class SubclassAttrListSource(ChainedSource):
|
| 664 |
+
def name(self) -> str:
|
| 665 |
+
return f"{self.base.name()}.__tensor_flatten__()[0]"
|
| 666 |
+
|
| 667 |
+
def guard_source(self):
|
| 668 |
+
return self.base.guard_source()
|
| 669 |
+
|
| 670 |
+
|
| 671 |
+
# NB: We don't expect you to actually ever generate guards against this
|
| 672 |
+
# source, it is ephemeral
|
| 673 |
+
@dataclasses.dataclass(frozen=True)
|
| 674 |
+
class FloatTensorSource(ChainedSource):
|
| 675 |
+
def name(self) -> str:
|
| 676 |
+
return f"___as_tensor({self.base.name()})"
|
| 677 |
+
|
| 678 |
+
def guard_source(self):
|
| 679 |
+
return self.base.guard_source()
|
| 680 |
+
|
| 681 |
+
|
| 682 |
+
@dataclasses.dataclass(frozen=True)
|
| 683 |
+
class CallMethodItemSource(ChainedSource):
|
| 684 |
+
def name(self) -> str:
|
| 685 |
+
return f"{self.base.name()}.item()"
|
| 686 |
+
|
| 687 |
+
def guard_source(self):
|
| 688 |
+
return self.base.guard_source()
|
| 689 |
+
|
| 690 |
+
|
| 691 |
+
# This is a synthetic source that is associated with the singleton
|
| 692 |
+
# shape env guard we always register for all frames. We get the actual
|
| 693 |
+
# guard contents from the ambient ShapeEnv
|
| 694 |
+
@dataclasses.dataclass(frozen=True)
|
| 695 |
+
class ShapeEnvSource(Source):
|
| 696 |
+
def name(self):
|
| 697 |
+
return ""
|
| 698 |
+
|
| 699 |
+
def guard_source(self):
|
| 700 |
+
return GuardSource.SHAPE_ENV
|
| 701 |
+
|
| 702 |
+
|
| 703 |
+
@dataclasses.dataclass(frozen=True)
|
| 704 |
+
class BackwardStateSource(Source):
|
| 705 |
+
def name(self):
|
| 706 |
+
return ""
|
| 707 |
+
|
| 708 |
+
def guard_source(self):
|
| 709 |
+
return GuardSource.BACKWARD_STATE
|
| 710 |
+
|
| 711 |
+
|
| 712 |
+
def is_from_local_source(source: Source, *, allow_cell_or_freevar=True):
|
| 713 |
+
if isinstance(source, ChainedSource):
|
| 714 |
+
return is_from_local_source(
|
| 715 |
+
source.base, allow_cell_or_freevar=allow_cell_or_freevar
|
| 716 |
+
)
|
| 717 |
+
if not isinstance(source, LocalSource):
|
| 718 |
+
return False
|
| 719 |
+
if not allow_cell_or_freevar and source.cell_or_freevar:
|
| 720 |
+
return False
|
| 721 |
+
return True
|
| 722 |
+
|
| 723 |
+
|
| 724 |
+
def is_from_unspecialized_param_buffer_source(source: Source):
|
| 725 |
+
if isinstance(source, UnspecializedParamBufferSource):
|
| 726 |
+
return True
|
| 727 |
+
if isinstance(source, ChainedSource):
|
| 728 |
+
return is_from_unspecialized_param_buffer_source(source.base)
|
| 729 |
+
return False
|
| 730 |
+
|
| 731 |
+
|
| 732 |
+
def is_from_flatten_script_object_source(source: Source):
|
| 733 |
+
if isinstance(source, FlattenScriptObjectSource):
|
| 734 |
+
return True
|
| 735 |
+
elif isinstance(source, ChainedSource):
|
| 736 |
+
return is_from_flatten_script_object_source(source.base)
|
| 737 |
+
return False
|
| 738 |
+
|
| 739 |
+
|
| 740 |
+
def is_from_optimizer_source(source: Source):
|
| 741 |
+
if isinstance(source, OptimizerSource):
|
| 742 |
+
return True
|
| 743 |
+
if isinstance(source, ChainedSource):
|
| 744 |
+
return is_from_optimizer_source(source.base)
|
| 745 |
+
return False
|
| 746 |
+
|
| 747 |
+
|
| 748 |
+
# TODO: can probably write a generic "test this on everything in the chain"
|
| 749 |
+
# helper
|
| 750 |
+
def is_from_defaults(source: Source):
|
| 751 |
+
if isinstance(source, DefaultsSource):
|
| 752 |
+
return True
|
| 753 |
+
if isinstance(source, ChainedSource):
|
| 754 |
+
return is_from_defaults(source.base)
|
| 755 |
+
return False
|
| 756 |
+
|
| 757 |
+
|
| 758 |
+
def is_cell_contents(source: Source):
|
| 759 |
+
return isinstance(source, AttrSource) and source.member == "cell_contents"
|
pllava/lib/python3.10/site-packages/torch/_dynamo/trace_rules.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/utils.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (3.93 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/base.cpython-310.pyc
ADDED
|
Binary file (12.3 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builder.cpython-310.pyc
ADDED
|
Binary file (58 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builtin.cpython-310.pyc
ADDED
|
Binary file (52.7 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/constant.cpython-310.pyc
ADDED
|
Binary file (8.56 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/ctx_manager.cpython-310.pyc
ADDED
|
Binary file (37.3 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/dicts.cpython-310.pyc
ADDED
|
Binary file (33.1 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/distributed.cpython-310.pyc
ADDED
|
Binary file (13.3 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/functions.cpython-310.pyc
ADDED
|
Binary file (35.1 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/higher_order_ops.cpython-310.pyc
ADDED
|
Binary file (49.4 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/iter.cpython-310.pyc
ADDED
|
Binary file (14.3 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/lazy.cpython-310.pyc
ADDED
|
Binary file (6.16 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/lists.cpython-310.pyc
ADDED
|
Binary file (31 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/misc.cpython-310.pyc
ADDED
|
Binary file (50.4 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/nn_module.cpython-310.pyc
ADDED
|
Binary file (29.2 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/optimizer.cpython-310.pyc
ADDED
|
Binary file (9.23 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/script_object.cpython-310.pyc
ADDED
|
Binary file (3.29 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/sdpa.cpython-310.pyc
ADDED
|
Binary file (3.31 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/tensor.cpython-310.pyc
ADDED
|
Binary file (38 kB). View file
|
|
|