diff --git a/.gitattributes b/.gitattributes index 1c2371099a11d2cbf7c40b0da771837597d63ff6..24017d9c75c771bf2aafc7a8878205bb78e5a6eb 100644 --- a/.gitattributes +++ b/.gitattributes @@ -308,3 +308,5 @@ pllava/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=l pllava/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text pllava/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_wester.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text pllava/lib/python3.10/site-packages/sympy/solvers/__pycache__/solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +pllava/lib/python3.10/site-packages/sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +pllava/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/pllava/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc b/pllava/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff98cca6ba7ec9a5410288dcd77e9e789e5f69ce --- /dev/null +++ b/pllava/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cec32ae3a222bc4422bccf19ad22cbd790c72380a493add8286b0ac9426d4f2 +size 106614 diff --git a/pllava/lib/python3.10/site-packages/sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc b/pllava/lib/python3.10/site-packages/sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b05398e86b839e8b7be0fe09b5145fb1da3c950a --- /dev/null +++ b/pllava/lib/python3.10/site-packages/sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aec3146ddca95163efdce7fc532f74276fd61b5eb913b4d7f0de225ae548bfd2 +size 112034 diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/__pycache__/__init__.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d985d55f77881e1fc3b398c886b70d155d76048 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/__pycache__/__init__.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/__pycache__/guards.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/__pycache__/guards.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..357b0954568c1c9fa61a2dd9e7cad2a50d34a5eb Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/__pycache__/guards.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/__pycache__/mutation_guard.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/__pycache__/mutation_guard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..405e1f06cd991d4123bc2a25e78258bd19be2220 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/__pycache__/mutation_guard.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__init__.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/__init__.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81a24bded898647363472a9ec0b92ee6849277ac Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/__init__.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/debugging.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/debugging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3418cb93774f47b5b3b57eb1537fa172e8e93041 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/debugging.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/registry.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc7eb812bf9d0e80f42216659499a1250c77bf3d Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/registry.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/cache_size.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/cache_size.py new file mode 100644 index 0000000000000000000000000000000000000000..c5a793aa06c47ebf800ae705d85cbcc484de9c46 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/cache_size.py @@ -0,0 +1,185 @@ +# mypy: allow-untyped-defs +import logging +import types +import weakref +from dataclasses import dataclass +from typing import Tuple + +from torch._guards import CompileId + +from . import config + + +log = logging.getLogger(__name__) +""" +[Note on cache size limit] + +Background - TorchDynamo cache is a linked list. Each cache entry is a +(check_fn, out_code, next pointer). These are stored on the f_code's co_extra +scratch space. When a frame is invoked, we walk this linked list and run +check_fn in each cache_entry to decide if the frame needs recompilation. If none +of the check_fn's returns True, we recompile and add a new entry. To ensure we +don't end up recompiling infinitely, we put limits on the cache size. + +There are two limits +1) cache_size_limit +2) accumulated_cache_size_limit + + +Earlier we used to have only limit - maximum number of entries in 1 cache line +(which is now represented by (2) above). So, why do we need two limits? Lets try +to understand that. + +In general, we want our cache limit value to be a small number (e.g. 8 or even +lower). This ensures that for frames that cause too many recompilation fall to +eager quickly. However, there is another problem that prevents us from lowering +the value of cache_size_limit. This is due to ID_MATCH'd guards. Today, we put +ID_MATCH guards on nn module if there is a graph break. This means we will have +many recompilations for the same code object because the ID_MATCH guard fails +for different instances of the nn module. This is a common pattern in how models +are authored. Therefore, this requires us to keep the cache_size_limit high. + +We resolve this by introducing these two limits. The first limit (1) limits the +number of cache entries that have an ID_MATCH'd guard for an nn module instance. +And, (2)nd limit becomes a safeguard mechanism to have a maximum compilations +for a code object. One important question is - what is the limit for the code +object that does not have any ID_MATCH guard? For such code objects, we choose +(1) as the cache size limit. + +Lets take an example to understand how these limits help. Suppose, we have 16 +instances of a nn module and we ID_MATCH on the self object. Further, suppose +the inputs to these functions have varying batch size, leading to one +recompilation. In total, there will be 32 recompilations, and therefore 32 cache +entries on the forward code object. In the older case when we had only 1 limit, +our cache size limit must be >= 32 to capture all these recompilations. Now, +suppose there is a separate function in the same program which is very dynamic +and unsuitable for compilation. Such a function will need to undergo 32 +compilations to burst the cache and fallback to eager. These 32 recompilations +are too many and we want to fallback for these compilation-unfriendly functions +sooner. + +In the new scenario, we can have (1) cache_size_limit = 2, (2) +accumulated_cache_size_limit = 32. This means that each ID_MATCH'd object can +have maximum of two cache entries, and the maximum number of cache entries +(irrespective of ID_MATCH obj) is 32. This covers the case of forward code +object which has 32 recompilations. For the other function, the one unsuitable +for recompilation, our limit is 2. So, we will burst the cache in just 2 +recompilations. In this manner, these 2 limits help us resolve the tension +mentioned earlier. +""" + + +@dataclass +class CacheSizeRelevantForFrame: + """ + We track the number of cache entries that have same id_match objects as the + given frame. + + TODO(janimesh) - Consider adding a map from tuple_of_match_ids to count - + https://github.com/pytorch/pytorch/pull/107496#discussion_r1304564682 - this + could be useful for debugging as well. + """ + + # Total number of CacheEntry objects in the Dynamo linked list + num_cache_entries: int = 0 + + # Number of CacheEntry objects having same ID_MATCH'd objects as given frame. + num_cache_entries_with_same_id_matched_objs: int = 0 + + def will_compilation_exceed(self, limit: int) -> bool: + # Checks if a compilation will exceed the given limit (thats why >=). + return ( + self.will_compilation_exceed_accumulated_limit() + or self.will_compilation_exceed_specific_limit(limit) + ) + + def will_compilation_exceed_accumulated_limit(self) -> bool: + return self.num_cache_entries >= config.accumulated_cache_size_limit + + def will_compilation_exceed_specific_limit(self, limit: int) -> bool: + return self.num_cache_entries_with_same_id_matched_objs >= limit + + +def _get_weakref_from_f_locals(frame: types.FrameType, local_name: str): + obj = frame.f_locals.get(local_name, None) + weak_id = None + try: + weak_id = weakref.ref(obj) + except TypeError: + pass # cannot weakref bool object + return weak_id + + +def _has_same_id_matched_objs(frame: types.FrameType, cache_entry) -> bool: + """ + Checks if the ID_MATCH'd objects saved on cache_entry are same as the ones + in frame.f_locals. + """ + if not cache_entry: + return False + + for ( + local_name, + weakref_from_cache_entry, + ) in cache_entry.check_fn.id_matched_objs.items(): + if weakref_from_cache_entry() is not None: + weakref_from_frame = _get_weakref_from_f_locals(frame, local_name) + if weakref_from_frame != weakref_from_cache_entry: + return False + + # Also covers the case where no ID_MATCH objects are saved in frame.f_locals + return True + + +def compute_cache_size( + frame: types.FrameType, cache_entry +) -> CacheSizeRelevantForFrame: + # Walk the linked list to calculate the cache size + num_cache_entries = 0 + num_cache_entries_with_same_id_matched_objs = 0 + + while cache_entry: + num_cache_entries += 1 + # Track the number of cache entries having same ID_MATCH'd objects as + # that of frame.f_locals. This will be used later to compare against the + # cache_size_limit. + if _has_same_id_matched_objs(frame, cache_entry): + num_cache_entries_with_same_id_matched_objs += 1 + cache_entry = cache_entry.next + + return CacheSizeRelevantForFrame( + num_cache_entries, num_cache_entries_with_same_id_matched_objs + ) + + +def is_recompilation(cache_size: CacheSizeRelevantForFrame) -> bool: + """ + If the frame (earlier parsed by compute_cache_size) has more than 1 cache + entry with same ID_MATCH'd objects, then its a recompilation. + """ + # Note that you can have multiple entries in the cache but still not a + # recompile, e.g., you can have 64 nn module instances, each one having an + # ID_MATCH guard, and each one having just 1 cache entry in the cache. In + # this case, we can have 64 entries in the cache, but no recompilation + # because there is only one entry for each id_matched_obj. + return cache_size.will_compilation_exceed(1) + + +def exceeds_cache_size_limit( + cache_size: CacheSizeRelevantForFrame, compile_id: CompileId +) -> Tuple[bool, str]: + """ + Checks if we are exceeding the cache size limit. + """ + if cache_size.will_compilation_exceed_accumulated_limit(): + return True, "accumulated_cache_size_limit" + if cache_size.will_compilation_exceed_specific_limit(config.cache_size_limit): + return True, "cache_size_limit" + # NOTE this check is needed in the case that the frame's cache doesn't grow + # and we keep recompiling. This can happen if the guard check_fn becomes invalidated, + # e.g. due to guarded objects being freed. This technically makes the + # will_compilation_exceed_accumulated_limit check unnecessary, but we will keep the + # check in case we have a better fix in the future. + if compile_id.frame_compile_id >= config.accumulated_cache_size_limit: + return True, "accumulated_cache_size_limit" + return False, "" diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/codegen.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/codegen.py new file mode 100644 index 0000000000000000000000000000000000000000..5cc8361a974eb65de316009be7dcae05626056eb --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/codegen.py @@ -0,0 +1,511 @@ +# mypy: allow-untyped-defs +import collections +import dataclasses +import re +import sys +import types +from typing import Counter, Dict, List, Optional + +import torch.nn + +from . import utils +from .bytecode_transformation import ( + add_push_null, + add_push_null_call_function_ex, + create_call_function, + create_call_method, + create_dup_top, + create_instruction, + create_load_method, + create_rot_n, + Instruction, +) +from .exc import unimplemented +from .source import AttrSource, Source +from .utils import is_safe_constant, rot_n_helper +from .variables.base import VariableTracker +from .variables.nn_module import NNModuleVariable +from .variables.tensor import ( + NumpyNdarrayVariable, + SymNodeVariable, + TensorVariable, + UnspecializedPythonVariable, +) +from .variables.torch_function import TensorWithTFOverrideVariable + + +@dataclasses.dataclass +class GraphOutputEntry: + index: int + variable: VariableTracker + + +class PyCodegen: + """ + Helper class uses for constructing Python bytecode + """ + + def __init__( + self, + tx=None, + root: Optional[torch.nn.Module] = None, + graph_output_var: Optional[str] = None, + tempvars=None, + ) -> None: + self.root = root + self.top_of_stack: Optional[VariableTracker] = None + self.uses: Counter[VariableTracker] = collections.Counter() + self.graph_outputs: Dict[int, GraphOutputEntry] = {} + self._output: List[Instruction] = [] + self.tempvars = tempvars or {} + self.tx = tx + self.graph_output_var = graph_output_var + self.code_options = self.tx.output.code_options + self.cell_and_freevars = self.tx.cell_and_freevars + self.new_var = self.tx.output.new_var + self.mutable_side_effects_from_source = False + self.value_from_source: bool = True + + def restore_stack(self, stack_values, *, value_from_source=True): + prior = self.mutable_side_effects_from_source + self.mutable_side_effects_from_source = True + prev = self.value_from_source + self.value_from_source &= value_from_source + try: + self.foreach(stack_values) + finally: + self.mutable_side_effects_from_source = prior + self.value_from_source = prev + + def graph_output_vars(self): + return [x.variable for x in self.graph_outputs.values()] + + def call_reconstruct(self, value): + res = value.reconstruct(self) + assert res is None, f"reconstruct!=None {value}" + + def add_push_null(self, gen_fn, call_function_ex=False): + """ + `gen_fn` generates instructions via PyCodegen methods + that push a single callable to the stack. + + `add_push_null` pushes a NULL to the stack before or after the + instructions generated by `gen_fn`, depending on Python version. + + Will attempt to use the NULL push bit for instructions + with such bits (LOAD_GLOBAL 3.11+, LOAD_ATTR 3.12+, LOAD_SUPER_ATTR). + """ + old_len = len(self._output) + if sys.version_info < (3, 13): + # gen_fn may DUP_TOP instead if TOS is not cleared. + # Will cause problems since NULL will be pushed right + # before the generated instructions in <= 3.12 + self.clear_tos() + gen_fn() + # inplace modify self._output + added_insts = self._output[old_len:] + del self._output[old_len:] + if call_function_ex: + self._output.extend(add_push_null_call_function_ex(added_insts)) + else: + self._output.extend(add_push_null(added_insts)) + if sys.version_info >= (3, 13): + # NULL will be at top of stack + self.clear_tos() + + def __call__(self, value, allow_cache=True): + """Generate code such that top-of-stack (TOS) is set to value""" + if isinstance(value, Source): + self.call_reconstruct(value) + self.clear_tos() + return + + assert isinstance(value, VariableTracker) + output = self._output + graph_outputs = self.graph_outputs + + if self.top_of_stack is value and allow_cache: + output.append(create_dup_top()) + return + + if self.mutable_side_effects_from_source: + # this is needed to get aliasing relationships right + # value.mutable_local.source will get mutated to hold `value` + # mutable_side_effects_from_source=False is used to codegen the mutation + # mutable_side_effects_from_source=True is used to codegen a reference + from .side_effects import MutableSideEffects + + if isinstance(value.mutable_local, MutableSideEffects): + self(value.mutable_local.source) + return + + if allow_cache: + if value.mutable_local and value.mutable_local in self.tempvars: + output.append(self.create_load(self.tempvars[value.mutable_local])) + self.top_of_stack = value + return + if self.tempvars.get(value) is not None: + output.append(self.create_load(self.tempvars[value])) + self.top_of_stack = value + return + + if value.source is not None and allow_cache and self.value_from_source: + self.call_reconstruct(value.source) + elif value.is_python_constant() and is_safe_constant( + value.as_python_constant() + ): + output.append(self.create_load_const(value.as_python_constant())) + elif isinstance(value, TensorWithTFOverrideVariable): + graph_outputs_key = self.add_graph_output(value) + + self.add_push_null( + lambda: self.load_import_from(utils.__name__, "to_subclass") + ) + self.load_graph_output(graph_outputs[graph_outputs_key].index) + output.append( + self.create_load_global( + value.global_mangled_class_name(self.tx), add=True + ) + ) + output.extend(create_call_function(2, False)) + elif ( + isinstance(value, SymNodeVariable) + and value.python_type() == float + and not self.tx.export + ): + # This is a little unusual; force the output convention to be a + # Tensor here. Don't do this for export because this is + # apparently load bearing for export tests (but I am a bit + # doubtful it actually works in the real world) + # NB: It works to add_graph_output on a computed expression + # as_tensor here, because we memoize as_tensor calls on + # SymNodeVariable! + graph_outputs_key = self.add_graph_output(value.as_tensor(self.tx)) + + def gen_fn(): + self.load_graph_output(graph_outputs[graph_outputs_key].index) + output.append(self.create_load_attr("item")) + + self.add_push_null(gen_fn) + output.extend(create_call_function(0, False)) + elif isinstance( + value, + ( + TensorVariable, + SymNodeVariable, + UnspecializedPythonVariable, + NumpyNdarrayVariable, + ), + ): + graph_outputs_key = self.add_graph_output(value) + + if isinstance(value, NumpyNdarrayVariable): + self.add_push_null( + lambda: self.load_import_from(utils.__name__, "to_numpy_helper") + ) + self.load_graph_output(graph_outputs[graph_outputs_key].index) + output.extend(create_call_function(1, False)) + elif isinstance(value, UnspecializedPythonVariable) and value.need_unwrap: + + def gen_fn(): + self.load_graph_output(graph_outputs[graph_outputs_key].index) + output.append(self.create_load_attr("item")) + + self.add_push_null(gen_fn) + output.extend(create_call_function(0, False)) + else: + self.load_graph_output(graph_outputs[graph_outputs_key].index) + elif isinstance(value, NNModuleVariable): + parts = value.module_key.split(".") + if parts[0] in self.code_options["co_varnames"]: + output.append(self.create_load(parts[0])) + parts = parts[1:] + else: + assert self.root is not None + output.append(self.create_load_output(self.root)) + for part in parts: + output.append(self.create_load_attr(part)) + else: + self.uses[value] += 1 + try: + self.call_reconstruct(value) + except NotImplementedError: + unimplemented(f"reconstruct: {value}") + if allow_cache and value in self.tempvars: + self._output.append(create_dup_top()) + self.add_cache(value) + + self.top_of_stack = value + + def add_graph_output(self, value): + graph_outputs_key = id(value.as_proxy()) + if graph_outputs_key not in self.graph_outputs: + self.graph_outputs[graph_outputs_key] = GraphOutputEntry( + len(self.graph_outputs), value + ) + return graph_outputs_key + + def load_graph_output(self, index): + output = self._output + output.append(self.create_load(self.graph_output_var)) + output.append(self._create_load_const(index)) + output.append(create_instruction("BINARY_SUBSCR")) + + def add_cache(self, value): + var = self.new_var() + self.tempvars[value] = var + if value.mutable_local: + self.tempvars[value.mutable_local] = var + self._output.append(self.create_store(var)) + + def foreach(self, items): + for i in items: + self(i) + + def setup_globally_cached(self, name, value): + """Store value in a new global""" + name = re.sub(r"[^a-zA-Z0-9_]+", "_", name) + f_globals = self.tx.f_globals + if name in f_globals: + assert id(f_globals[name]) == id(value) + else: + f_globals[name] = value + return [self.create_load_global(name, add=True)] + + def clear_tos(self): + self.top_of_stack = None + + def append_output(self, inst): + assert isinstance(inst, Instruction) + self._output.append(inst) + self.clear_tos() + + def extend_output(self, insts): + assert all(isinstance(x, Instruction) for x in insts) + self._output.extend(insts) + self.clear_tos() + + def get_instructions(self) -> List[Instruction]: + return self._output + + def create_load(self, name) -> Instruction: + if name in self.cell_and_freevars(): + return create_instruction("LOAD_DEREF", argval=name) + assert name in self.code_options["co_varnames"], f"{name} missing" + return create_instruction("LOAD_FAST", argval=name) + + def create_load_closure(self, name) -> Instruction: + assert name in self.cell_and_freevars() + inst_name = "LOAD_FAST" if sys.version_info >= (3, 13) else "LOAD_CLOSURE" + return create_instruction(inst_name, argval=name) + + def create_store(self, name) -> Instruction: + if name in self.cell_and_freevars(): + return create_instruction("STORE_DEREF", argval=name) + assert name in self.code_options["co_varnames"] + return create_instruction("STORE_FAST", argval=name) + + def create_load_global(self, name, add=False) -> Instruction: + if add: + self.tx.output.update_co_names(name) + assert name in self.code_options["co_names"], f"{name} not in co_names" + return create_instruction("LOAD_GLOBAL", argval=name) + + def create_load_const(self, value) -> Instruction: + assert is_safe_constant(value), f"unsafe constant {value}" + return self._create_load_const(value) + + def _create_load_const(self, value) -> Instruction: + return create_instruction("LOAD_CONST", argval=value) + + create_load_output = _create_load_const + + def load_method(self, name): + self.tx.output.update_co_names(name) + self.append_output(create_load_method(name)) + + def call_method(self, nargs): + self.extend_output(create_call_method(nargs)) + + def create_load_attr(self, name) -> Instruction: + if name not in self.code_options["co_names"]: + self.code_options["co_names"] += (name,) + return create_instruction("LOAD_ATTR", argval=name) + + def load_attr(self, name): + self.append_output(self.create_load_attr(name)) + + def create_load_attrs(self, names): + return [self.create_load_attr(name) for name in names.split(".")] + + def create_store_attr(self, name) -> Instruction: + if name not in self.code_options["co_names"]: + self.code_options["co_names"] += (name,) + return create_instruction("STORE_ATTR", argval=name) + + def store_attr(self, name): + self.append_output(self.create_store_attr(name)) + + def load_function_name(self, fn_name, push_null, num_on_stack=0): + """Load the global fn_name on the stack num_on_stack down""" + output = [] + if push_null and sys.version_info >= (3, 11): + output.extend(add_push_null(self.create_load_global(fn_name, add=True))) + if num_on_stack > 0: + output.extend( + [ + *self.rot_n(num_on_stack + 2), + *self.rot_n(num_on_stack + 2), + ] + ) + else: + output.extend( + [ + self.create_load_global(fn_name, add=True), + *self.rot_n(num_on_stack + 1), + ] + ) + return output + + def rot_n(self, n): + try: + return create_rot_n(n) + except AttributeError: + # desired rotate bytecode doesn't exist, generate equivalent bytecode + return [ + create_instruction("BUILD_TUPLE", arg=n), + self._create_load_const(rot_n_helper(n)), + *create_rot_n(2), + create_instruction("CALL_FUNCTION_EX", arg=0), + create_instruction("UNPACK_SEQUENCE", arg=n), + ] + + def pop_null(self): + # POP_TOP doesn't work for null, so we pop nulls by pushing in a + # nop function, calling it (which consumes the null), and popping the result. + assert sys.version_info >= (3, 11) + return [ + self._create_load_const(lambda: None), + # 3.13 swapped NULL and callable + *( + (create_instruction("SWAP", arg=2),) + if sys.version_info >= (3, 13) + else () + ), + *create_call_function(0, False), + create_instruction("POP_TOP"), + ] + + def pop_top(self): + self.append_output(create_instruction("POP_TOP")) + + def call_function(self, nargs: int, push_null: bool): + self.extend_output(create_call_function(nargs, push_null=push_null)) + + def dup_top(self): + self.append_output(create_dup_top()) + + def store(self, varname): + self.append_output(self.create_store(varname)) + + def make_function_with_closure( + self, fn_name: str, code: types.CodeType, push_null: bool, num_on_stack=0 + ): + freevars = code.co_freevars + assert freevars + output = self._output + + def gen_fn(): + for var in freevars: + assert var in self.cell_and_freevars() + inst_name = ( + "LOAD_FAST" if sys.version_info >= (3, 13) else "LOAD_CLOSURE" + ) + output.append(create_instruction(inst_name, argval=var)) + output.append(create_instruction("BUILD_TUPLE", arg=len(freevars))) + output.append(self.create_load_const(code)) + if sys.version_info < (3, 11): + output.append(self.create_load_const(fn_name)) + if sys.version_info >= (3, 13): + output.extend( + [ + create_instruction("MAKE_FUNCTION"), + create_instruction("SET_FUNCTION_ATTRIBUTE", arg=0x08), + ] + ) + else: + output.append(create_instruction("MAKE_FUNCTION", arg=0x08)) + + if push_null and sys.version_info >= (3, 11): + self.add_push_null(gen_fn) + output.extend(self.rot_n(num_on_stack + 2)) + output.extend(self.rot_n(num_on_stack + 2)) + else: + gen_fn() + output.extend(self.rot_n(num_on_stack + 1)) + self.clear_tos() + + def create_load_python_module(self, mod) -> Instruction: + """ + Generate a LOAD_GLOBAL instruction to fetch a given python module. + """ + output = self.tx.output + global_scope = output.global_scope + name = re.sub(r"^.*[.]", "", mod.__name__) + if global_scope.get(name, None) is mod: + return self.create_load_global(name, add=True) + prefix = f"___module_{name}" + global_name = self.tx.output.install_global_by_id(prefix, mod) + return self.create_load_global(global_name, add=True) + + def make_call_generated_code(self, fn_name: str) -> None: + """Call the generated code function stored in fn_name""" + self.extend_output(self.load_function_name(fn_name, True)) + + graphargs = self.tx.output.graphargs + for arg in graphargs: + if arg.pass_arg_as_tensor: + self.add_push_null( + lambda: self.extend_output( + [ + self.create_load_python_module(torch), + self.create_load_attr("as_tensor"), + ] + ) + ) + self.call_reconstruct(arg) + self.extend_output(create_call_function(1, False)) + else: + self.call_reconstruct(arg) + + self.extend_output(create_call_function(len(graphargs), False)) + + def load_import_from(self, module_name, object_name) -> None: + self(AttrSource(self.tx.import_source(module_name), object_name)) + + def create_call_function_kw(self, nargs, kw_names, push_null) -> List[Instruction]: + if sys.version_info >= (3, 13): + output = create_call_function(nargs, push_null) + assert output[-1].opname == "CALL" + output.insert(-1, self.create_load_const(kw_names)) + output[-1] = create_instruction("CALL_KW", arg=nargs) + return output + elif sys.version_info >= (3, 11): + output = create_call_function(nargs, push_null) + if sys.version_info >= (3, 12): + idx = -1 + expected_inst = "CALL" + else: + idx = -2 + expected_inst = "PRECALL" + assert output[idx].opname == expected_inst + kw_names_inst = create_instruction("KW_NAMES", argval=kw_names) + output.insert(idx, kw_names_inst) + return output + return [ + self.create_load_const(kw_names), + create_instruction("CALL_FUNCTION_KW", arg=nargs), + ] + + def create_delete(self, value) -> Instruction: + return create_instruction("DELETE_FAST", argval=value) diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/comptime.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/comptime.py new file mode 100644 index 0000000000000000000000000000000000000000..972d79d48fa8b2e841aa491e315798c7e56e0a51 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/comptime.py @@ -0,0 +1,401 @@ +# mypy: allow-untyped-defs +# This file establishes the public comptime interface to Dynamo. +# This allows Dynamo users to execute arbitrary Python code while +# Dynamo is symbolically evaluating their original programs. +# +# The goal of the public API is to give users rope, without actually +# leaking private implementation details of Dynamo. + +import builtins +import dis +import time +import traceback +from typing import Optional, Union + +import torch +from torch.fx.experimental.symbolic_shapes import free_symbols + +from .exc import unimplemented +from .variables import NewCellVariable +from .variables.constant import ConstantVariable +from .variables.misc import ClosureVariable +from .variables.tensor import SymNodeVariable + + +class ComptimeVar: + """ + A ComptimeVar represents a Python value, at some particular point + in time, in the Python code we are symbolically evaluating with + torchdynamo. This must be distinguished from a runtime value, as + at compile-time there are some properties of the variable we + do not know (for example, if the ComptimeVar represents a Tensor, + we only know metadata about the tensor; we do NOT know what the + actual data in the Tensor is.) + """ + + def __init__(self, v) -> None: + self.__variable = v + + def as_proxy(self): + """ + Returns an fx.Proxy (or tuple/list of fx.Proxy) representing + this variable in the FX graph we are assembling to pass + to the user compiler. + + This method only works for variables we actually track in + the FX graph, aka Tensors (and ints, if you are compiling + with dynamic shapes). In particular, if you have a list + or tuple of tensors, you will get a list/tuple of proxies + (not a single proxy representing the entire list/tuple). + """ + return self.__variable.as_proxy() + + def is_proxy(self): + """ + Returns True if as_proxy() would succeed. + """ + return self.__variable.is_proxy() + + def as_fake(self): + """ + Returns a "fake" value (either a FakeTensor or a SymInt) + representing the variable in question. This only works + for variables that denote Tensor or int. You can use + this to query metadata; e.g., v.as_fake().size(0) will + tell you the compile-time known size of the tensor. + + WARNING: Do NOT mutate the returned tensor. + """ + return self.__variable.as_proxy().node.meta["example_value"] + + def size(self, dim: Optional[int] = None) -> Union[int, torch.SymInt]: + """ + Returns the size of the tensor (if dim is None) or the size + at the dimension dim. The returned size may be a SymInt. + """ + return self.as_fake().size(dim) + + def python_type(self): + """ + Returns what type(v) would have returned for the variable + at compile time. + """ + return self.__variable.python_type() + + def as_python_constant(self): + """ + Returns the Python value this variable would have, but only if it is + completely known at compile-time (e.g., it is constant). + + WARNING: Do NOT mutate the returned constant. The returned constant + may or may not correspond to the actual value this variable may take + on at runtime; for example, if the variable in question is a constant + list, we may return a copy of that list. + """ + return self.__variable.as_python_constant() + + def is_python_constant(self): + """ + Returns True if as_python_constant would succeed. + """ + return self.__variable.is_python_constant() + + def is_dynamic(self): + if isinstance(self.__variable, SymNodeVariable): + fs = free_symbols(self.__variable.sym_num) + return bool(fs) + return False + + def force_static(self): + """ + Forces that a value is static, inducing a guard on its specific value + """ + if isinstance(self.__variable, SymNodeVariable): + self.__variable.evaluate_expr() + elif isinstance(self.__variable, ConstantVariable): + # TODO: Maybe complain if this isn't a int/bool/float variable + pass + else: + raise AssertionError( + f"cannot force {self.__variable} ({type(self.__variable)}) static" + ) + + def _i_will_not_complain_if_bc_breaks_VariableTracker(self): + """ + Returns the internal data structure VariableTracker that Dynamo uses + to represent variables at compile time. There are no BC guarantees on + this API and WE RESERVE THE RIGHT TO BREAK YOUR CODE if you rely on + it. + """ + return self.__variable + + def __repr__(self) -> str: + return self.__variable.debug_repr() + + # TODO: API for adding a custom guard + + +class ComptimeContext: + """ + This context class provides access to a public API for Dynamo's internals. + If there is something here you would find useful that is missing, please + file a feature request at https://github.com/pytorch/pytorch/ + """ + + def __init__(self, tx) -> None: + self.__tx = tx + + def get_local(self, name: str, *, stacklevel=0) -> ComptimeVar: + """ + Retrieve the compile-time known information about a local. + """ + tx = self.__get_tx(stacklevel) + + # This is analogous to LOAD_DEREF + if hasattr(tx, "closure_cells") and name in tx.closure_cells: + cell = tx.closure_cells[name] + if isinstance(cell, ClosureVariable): + return ComptimeVar(tx.output.root_tx.symbolic_locals[cell.name]) + else: + return ComptimeVar(tx.output.side_effects.load_cell(cell)) + else: + r = tx.symbolic_locals[name] + if isinstance(r, NewCellVariable): + return ComptimeVar(tx.output.side_effects.load_cell(r)) + else: + return ComptimeVar(r) + + def graph_break(self, msg="ComptimeContext.graph_break"): + """ + Manually trigger a graph break + """ + unimplemented(msg) + + def graph(self): + """ + Retrieve the partially constructed FX graph that would be + passed to the user compiler after compilation. + """ + return self.__tx.output.graph + + def assert_static(self, val): + """ + Asserts that the int is static (and not dynamic, per dynamic shapes) + """ + assert ( + not val.is_dynamic() + ), "expected static but got dynamic (run with TORCH_LOGS=dynamic for more info)" + + def print_graph(self, *, verbose=True, file=None): + """ + Print the partially constructed FX graph that would be passed + to the user compiler after compilation. + """ + print( + self.__tx.output.graph.python_code("self", verbose=verbose).src, file=file + ) + + def parent(self): + return ComptimeContext(self.__tx.parent) + + def __get_tx(self, stacklevel): + tx = self.__tx + for _ in range(stacklevel): + tx = tx.parent + return tx + + def print(self, val, *, file=None): + print(repr(val), file=file) + + def print_disas(self, *, file=None, stacklevel=0): + """ + Print the current series of opcodes being executed (not including + parent frames), including where you are in the particular opcode + stream. + """ + tx = self.__get_tx(stacklevel) + print( + dis.Bytecode( + tx.f_code, + current_offset=tx.instructions[tx.instruction_pointer].offset, + ).dis(), + file=file, + ) + + def print_value_stack(self, *, file=None, stacklevel=0): + """ + Print the current Python value stack. Note that this is NOT the same + as the traceback; use print_bt() to print that. Note that at + stacklevel=0, this will typically be empty, as comptime cannot + currently be used in an expression context where there would be + intermediates on the stack. If you would find this useful, please + file a bug at https://github.com/pytorch/pytorch/ + + NB: Stack grows downwards in our print + """ + tx = self.__get_tx(stacklevel) + for s in tx.stack: + print(f"- {s.debug_repr()}", file=file) + + def print_locals(self, *, file=None, stacklevel=0): + """ + Print all of the locals available in the current context. + By default this view is very limited; you can get more information + about any individual local using get_local(). + """ + tx = self.__get_tx(stacklevel) + for k, v in tx.symbolic_locals.items(): + print(f"{k} = {v.debug_repr()}", file=file) + + def print_bt(self, *, file=None, stacklevel=0): + """ + Print the user code backtrace, starting at the beginning of the + frame Dynamo started evaluating. Note that this MAY NOT go all + the way to the torch.compile invocation, as we may have done + a graph break and are compiling an intermediate frame as the + starting point. If you think the other behavior would be better, + file a bug at https://github.com/pytorch/pytorch/ + """ + stack = [] + tx = self.__get_tx(stacklevel) + while tx is not None: + stack.append(tx.frame_summary()) + tx = getattr(tx, "parent", None) + print( + "".join(traceback.StackSummary.from_list(reversed(stack)).format()), + file=file, + ) + + def print_guards(self, *, file=None): + """ + Print the currently installed guards for the Dynamo context. + This does NOT include guards associated with variables that + may or may not be installed in the future if those variables + are used. + """ + # TODO: improve print format, current guard format is extremely + # verbose + print( + "\n".join(f"{repr(guard)}" for guard in sorted(self.__tx.output.guards)), + file=file, + ) + + def _i_will_not_complain_if_bc_breaks_InstructionTranslator(self): + """ + Returns the internal data structure InstructionTranslator that Dynamo + uses to track state of symbolic evaluation. There are no BC + guarantees on this API and WE RESERVE THE RIGHT TO BREAK YOUR CODE if + you rely on it. + """ + return self.__tx + + def sleep(self, sec): + time.sleep(sec) + + +class _Comptime: + @staticmethod + def __call__(fn, fallback_fn=lambda: None): + """fn gets called at compile time in TorchDynamo, calls fallback_fn otherwise""" + fallback_fn() + + # Convenience wrappers that are more compact to use + + @staticmethod + def graph_break(): + comptime(lambda ctx: ctx.graph_break()) + + @staticmethod + def print(e): + comptime(lambda ctx: ctx.print(ctx.get_local("e")), lambda: print(e)) + + @staticmethod + def print_graph(): + comptime(lambda ctx: ctx.print_graph()) + + @staticmethod + def print_disas(*, stacklevel=0): + comptime( + lambda ctx: ctx.print_disas( + stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1 + ) + ) + + @staticmethod + def print_value_stack(*, stacklevel=0): + comptime( + lambda ctx: ctx.print_value_stack( + stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1 + ) + ) + + # This is a more useful variant of print_value_stack that can be used + # in an expression context; e.g., x + print_value_stack_and_return(y + z), + # you will see x on the stack prior to the addition operation + @staticmethod + def print_value_stack_and_return(e, *, stacklevel=0): + comptime( + lambda ctx: ctx.print_value_stack( + stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1 + ) + ) + return e + + @staticmethod + def print_locals(*, stacklevel=0): + comptime( + lambda ctx: ctx.print_locals( + stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1 + ) + ) + + @staticmethod + def print_bt(*, stacklevel=0): + comptime( + lambda ctx: ctx.print_bt( + stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1 + ) + ) + + @staticmethod + def print_guards(): + comptime(lambda ctx: ctx.print_guards()) + + @staticmethod + def assert_static(val): + comptime(lambda ctx: ctx.assert_static(ctx.get_local("val"))) + + @staticmethod + def force_static(val): + comptime(lambda ctx: ctx.get_local("val").force_static()) + + @staticmethod + def breakpoint(): + """ + Like pdb breakpoint(), but drop into pdb whenever this line + of code is compiled by dynamo. Use it by putting + this in your model code:: + + from torch._dynamo.comptime import comptime + comptime.breakpoint() + + And then, inside pdb, you can access 'ctx' to query things + about the compilation context:: + + (Pdb) !ctx.print_bt() + (Pdb) !ctx.print_locals() + (Pdb) p ctx.get_local("attention").as_fake() + """ + + def inner(inner_ctx): + ctx = inner_ctx.parent() + builtins.breakpoint() + + comptime(inner) + + @staticmethod + def sleep(sec): + comptime(lambda ctx: ctx.sleep(ctx.get_local("sec").as_python_constant())) + + +comptime = _Comptime() diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/logging.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..55bf1b1d199a518a4d225d65f889bd655caf69c9 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/logging.py @@ -0,0 +1,59 @@ +# mypy: allow-untyped-defs +import itertools +import logging + +from torch.hub import _Faketqdm, tqdm + + +# Disable progress bar by default, not in dynamo config because otherwise get a circular import +disable_progress = True + + +# Return all loggers that torchdynamo/torchinductor is responsible for +def get_loggers(): + return [ + logging.getLogger("torch.fx.experimental.symbolic_shapes"), + logging.getLogger("torch._dynamo"), + logging.getLogger("torch._inductor"), + ] + + +# Creates a logging function that logs a message with a step # prepended. +# get_step_logger should be lazily called (i.e. at runtime, not at module-load time) +# so that step numbers are initialized properly. e.g.: + +# @functools.lru_cache(None) +# def _step_logger(): +# return get_step_logger(logging.getLogger(...)) + +# def fn(): +# _step_logger()(logging.INFO, "msg") + +_step_counter = itertools.count(1) + +# Update num_steps if more phases are added: Dynamo, AOT, Backend +# This is very inductor centric +# _inductor.utils.has_triton() gives a circular import error here + +if not disable_progress: + try: + import triton # noqa: F401 + + num_steps = 3 + except ImportError: + num_steps = 2 + pbar = tqdm(total=num_steps, desc="torch.compile()", delay=0) + + +def get_step_logger(logger): + if not disable_progress: + pbar.update(1) + if not isinstance(pbar, _Faketqdm): + pbar.set_postfix_str(f"{logger.name}") + + step = next(_step_counter) + + def log(level, msg, **kwargs): + logger.log(level, "Step %s: %s", step, msg, **kwargs) + + return log diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/__pycache__/functools.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/__pycache__/functools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f72b7e900e8236a6bf50b2fb952228c8b3704f18 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/__pycache__/functools.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/__pycache__/loader.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/__pycache__/loader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..102e365a073ebd4ab1a498976f78d71745e2b3e7 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/__pycache__/loader.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/__pycache__/os.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/__pycache__/os.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecd2753c2d28953e2755d9f814d22a95b10e5cbc Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/__pycache__/os.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/builtins.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/builtins.py new file mode 100644 index 0000000000000000000000000000000000000000..737b7c138091830904768af0e8468146b19188e8 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/builtins.py @@ -0,0 +1,48 @@ +""" +Python polyfills for builtins +""" + +from __future__ import annotations + +import builtins +from typing import Iterable, TypeVar + +from ..decorators import substitute_in_graph + + +__all__ = [ + "all", + "any", + "enumerate", +] + + +_T = TypeVar("_T") + + +@substitute_in_graph(builtins.all, can_constant_fold_through=True) +def all(iterable: Iterable[object], /) -> bool: + for elem in iterable: + if not elem: + return False + return True + + +@substitute_in_graph(builtins.any, can_constant_fold_through=True) +def any(iterable: Iterable[object], /) -> bool: + for elem in iterable: + if elem: + return True + return False + + +@substitute_in_graph(builtins.enumerate, is_embedded_type=True) # type: ignore[arg-type] +def enumerate(iterable: Iterable[_T], start: int = 0) -> Iterable[tuple[int, _T]]: + if not isinstance(start, int): + raise TypeError( + f"{type(start).__name__!r} object cannot be interpreted as an integer" + ) + + for x in iterable: + yield start, x + start += 1 diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/functools.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/functools.py new file mode 100644 index 0000000000000000000000000000000000000000..98fd9597773b35833f9ae20bc2b5c98b2c407556 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/functools.py @@ -0,0 +1,6 @@ +""" +Python polyfills for functools +""" + + +__all__ = [] # type: ignore[var-annotated] diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/itertools.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/itertools.py new file mode 100644 index 0000000000000000000000000000000000000000..a829d2e9b8808135f891e1ae5fdba7630b104f94 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/itertools.py @@ -0,0 +1,85 @@ +""" +Python polyfills for itertools +""" + +from __future__ import annotations + +import itertools +from typing import Iterable, Iterator, TypeVar + +from ..decorators import substitute_in_graph + + +__all__ = [ + "chain", + "chain_from_iterable", + "islice", + "tee", +] + + +_T = TypeVar("_T") + + +# Reference: https://docs.python.org/3/library/itertools.html#itertools.chain +@substitute_in_graph(itertools.chain, is_embedded_type=True) # type: ignore[arg-type] +def chain(*iterables: Iterable[_T]) -> Iterator[_T]: + for iterable in iterables: + yield from iterable + + +@substitute_in_graph(itertools.chain.from_iterable) # type: ignore[arg-type] +def chain_from_iterable(iterable: Iterable[Iterable[_T]], /) -> Iterator[_T]: + return itertools.chain(*iterable) + + +chain.from_iterable = chain_from_iterable # type: ignore[method-assign] + + +# Reference: https://docs.python.org/3/library/itertools.html#itertools.islice +@substitute_in_graph(itertools.islice, is_embedded_type=True) # type: ignore[arg-type] +def islice(iterable: Iterable[_T], /, *args: int | None) -> Iterator[_T]: + s = slice(*args) + start = 0 if s.start is None else s.start + stop = s.stop + step = 1 if s.step is None else s.step + if start < 0 or (stop is not None and stop < 0) or step <= 0: + raise ValueError( + "Indices for islice() must be None or an integer: 0 <= x <= sys.maxsize.", + ) + + if stop is None: + # TODO: use indices = itertools.count() and merge implementation with the else branch + # when we support infinite iterators + next_i = start + for i, element in enumerate(iterable): + if i == next_i: + yield element + next_i += step + else: + indices = range(max(start, stop)) + next_i = start + for i, element in zip(indices, iterable): + if i == next_i: + yield element + next_i += step + + +# Reference: https://docs.python.org/3/library/itertools.html#itertools.tee +@substitute_in_graph(itertools.tee) +def tee(iterable: Iterable[_T], n: int = 2, /) -> tuple[Iterator[_T], ...]: + iterator = iter(iterable) + shared_link = [None, None] + + def _tee(link) -> Iterator[_T]: # type: ignore[no-untyped-def] + try: + while True: + if link[1] is None: + link[0] = next(iterator) + link[1] = [None, None] + value, link = link + yield value + except StopIteration: + return + + return tuple(_tee(shared_link) for _ in range(n)) diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/loader.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/loader.py new file mode 100644 index 0000000000000000000000000000000000000000..24478e1b5a0f965aacd7a6c04b561fccdf3f11b6 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/loader.py @@ -0,0 +1,35 @@ +# Used to load and initialize polyfill handlers when importing torch._dynamo +# Please add a new import when adding a new polyfill module. + +import importlib +from typing import Tuple, TYPE_CHECKING + +from .. import polyfills, trace_rules + + +if TYPE_CHECKING: + from types import ModuleType + + +# See also the TYPE_CHECKING block in torch/_dynamo/polyfills/__init__.py +POLYFILLED_MODULE_NAMES: Tuple[str, ...] = ( + "builtins", + "functools", + "itertools", + "os", + "sys", +) +POLYFILLED_MODULES: Tuple["ModuleType", ...] = tuple( + importlib.import_module(f".{submodule}", package=polyfills.__name__) + for submodule in POLYFILLED_MODULE_NAMES +) + + +# Unregister the builtin functions from _builtin_function_ids to let them to be +# dispatched with the appropriate VariableTracker type. Otherwise, they will be +# dispatched with BuiltinVariable if present in _builtin_function_ids. +for polyfill_module in POLYFILLED_MODULES: + for polyfill_name in polyfill_module.__all__: + polyfill_handler = getattr(polyfill_module, polyfill_name) + original_fn = polyfill_handler.__torch_dynamo_original__ + trace_rules._builtin_function_ids.remove(id(original_fn)) diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/os.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/os.py new file mode 100644 index 0000000000000000000000000000000000000000..5388816b82674215ef683778e7425217c76e0c17 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/polyfills/os.py @@ -0,0 +1,36 @@ +""" +Python polyfills for os +""" + +from __future__ import annotations + +import os +from typing import AnyStr + +from ..decorators import substitute_in_graph + + +__all__ = ["fspath"] + + +# Copied from os.py in the standard library +@substitute_in_graph(os.fspath, can_constant_fold_through=True) +def fspath(path: AnyStr | os.PathLike[AnyStr]) -> AnyStr: + if isinstance(path, (str, bytes)): + return path + + path_type = type(path) + try: + path_repr = path_type.__fspath__(path) # type: ignore[arg-type] + except AttributeError: + if hasattr(path_type, "__fspath__"): + raise + raise TypeError( + f"expected str, bytes or os.PathLike object, not {path_type.__name__}", + ) from None + if isinstance(path_repr, (str, bytes)): + return path_repr # type: ignore[return-value] + raise TypeError( + f"expected {path_type.__name__}.__fspath__() to return str or bytes, " + f"not {type(path_repr).__name__}", + ) diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/profiler.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..b06fead4c845e790a667d655294b836b87c1160b --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/profiler.py @@ -0,0 +1,156 @@ +# mypy: allow-untyped-defs +import dataclasses +import os +from typing import Any, List + +import torch + +from .utils import print_once + + +@dataclasses.dataclass +class ProfileMetrics: + microseconds: float = 0.0 + operators: int = 0 + fusions: int = 0 + graphs: int = 0 + + def __iadd__(self, other: "ProfileMetrics"): + self.microseconds += other.microseconds + self.operators += other.operators + self.fusions += other.fusions + return self + + def __add__(self, other: "ProfileMetrics"): + assert isinstance(other, ProfileMetrics) + return ProfileMetrics( + self.microseconds + other.microseconds, + self.operators + other.operators, + self.fusions + other.fusions, + ) + + def __truediv__(self, other): + if isinstance(other, int): + other = ProfileMetrics(other, other, other) + return ProfileMetrics( + self.microseconds / max(1, other.microseconds), + self.operators / max(1, other.operators), + self.fusions / max(1, other.fusions), + ) + + def __str__(self) -> str: + return f"{self.operators:4.0%} ops {self.microseconds:4.0%} time" + + def tocsv(self): + return [self.operators, self.microseconds] + + +class ProfileResult: + def __init__(self, captured, total, unique_graphs) -> None: + self.captured: ProfileMetrics = captured or ProfileMetrics() + self.total: ProfileMetrics = total or ProfileMetrics() + self.unique_graphs: int = unique_graphs + + def __iadd__(self, other: "ProfileResult"): + self.captured += other.captured + self.total += other.total + self.unique_graphs += other.unique_graphs + return self + + def percent(self): + return self.captured / self.total + + def __str__(self) -> str: + return ( + f"{self.unique_graphs:2} graphs {self.captured.graphs:2} graph calls " + f"{self.captured.operators:4}/{self.total.operators:4} = " + + str(self.percent()) + ) + + def tocsv(self): + return [ + self.unique_graphs, + self.captured.graphs, + self.captured.operators, + self.total.operators, + ] + self.percent().tocsv() + + +def should_print_missing(): + return os.environ.get("TORCHDYNAMO_PRINT_MISSING") == "1" + + +def print_missing(stack): + if any("/torch/autograd/profiler.py" in x for x in stack): + return + stack = [ + x for x in stack if ("> ".join(stack[-3:])) + + +class Profiler: + unique_graphs = 0 + + def __init__(self) -> None: + self.prof = torch.profiler.profile( + activities=[torch.profiler.ProfilerActivity.CPU], + with_stack=should_print_missing(), + ) + + def results(self): + captured_regions = 0 + captured_ops = 0 + captured_microseconds = 0 + total_ops = 0 + total_microseconds = 0 + + last_op_end_time = -1 + captured_region_end_time = -1 + events = sorted(self.prof.events(), key=lambda x: x.time_range.start) + for e in events: + if e.name == "TORCHDYNAMO": + captured_region_end_time = e.time_range.end + captured_regions += 1 + # ignore `handle = torch.zeros(1)` in record_function.__init__() + total_ops -= 1 + elif e.time_range.start >= last_op_end_time: + last_op_end_time = e.time_range.end + if e.time_range.end <= captured_region_end_time: + captured_ops += 1 + captured_microseconds += e.time_range.elapsed_us() + elif should_print_missing(): + print_missing(e.stack) + total_ops += 1 + total_microseconds += e.time_range.elapsed_us() + else: + pass # ops recursively called from other ops (ignored) + + unique_graphs = Profiler.unique_graphs + Profiler.unique_graphs = 0 + # we counted one extra op that is part of the profiler setup code + total_ops -= 1 + + return ProfileResult( + captured=ProfileMetrics( + microseconds=captured_microseconds, + operators=captured_ops, + fusions=captured_ops - captured_regions, + graphs=captured_regions, + ), + total=ProfileMetrics( + microseconds=total_microseconds, + operators=total_ops, + fusions=total_ops - 1, + ), + unique_graphs=unique_graphs, + ) + + +def fx_insert_profiling(gm: torch.fx.GraphModule, example_inputs: List[Any]): + def _wrapped(*args): + with torch.profiler.record_function("TORCHDYNAMO"): + return gm.forward(*args) + + Profiler.unique_graphs += 1 + return _wrapped diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/repro/__init__.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/repro/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/__init__.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..860e23b56863be6fdfaee7b797b85988d1cc75a3 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/__init__.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_aot.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_aot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed8757b6a151bb79960a595671a38acd25c83524 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_aot.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/repro/after_aot.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/repro/after_aot.py new file mode 100644 index 0000000000000000000000000000000000000000..a21a20cdf2e6118d9eb540cdc727fb2339617533 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/repro/after_aot.py @@ -0,0 +1,966 @@ +# mypy: allow-untyped-defs +import argparse +import copy +import functools +import io +import logging +import os +import shutil +import subprocess +import sys +import textwrap +import uuid +from importlib import import_module +from tempfile import TemporaryFile +from typing import Any, Callable, Dict, Union + +import torch +import torch.fx as fx +import torch.nn as nn +from torch._dynamo.debug_utils import ( + _cuda_system_info_comment, + AccuracyError, + backend_accuracy_fails, + BuckTargetWriter, + cast_to_fp64, + extra_imports, + generate_config_string, + helper_for_dump_minify, + InputReader, + InputWriter, + MAX_CONSTANT_NUMEL_INLINE, + minifier_dir, + NNModuleToString, + NopInputReader, + same_two_models, +) +from torch._dynamo.utils import clone_inputs, counters, same +from torch.fx.experimental.proxy_tensor import make_fx +from torch.fx.experimental.symbolic_shapes import ( + fx_placeholder_targets, + has_free_symbols, +) +from torch.hub import tqdm + +from .. import config + + +log = logging.getLogger(__name__) + + +inductor_config = import_module("torch._inductor.config") +use_buck = inductor_config.is_fbcode() + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# MAIN ENTRY POINT +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def wrap_compiler_debug(unconfigured_compiler_fn, compiler_name: str): + """ + Minifier for Fx Graph modules after Aot Autograd has finished. We wrap both + forward and backward call separately with the backend compiler_fn - like + inductor or nvfuser. Intercepting after Aot Autograd presents neat + abstraction, where all the params are lifted as graph inputs, making it easy + to save the graph as a string. + """ + + @functools.wraps(unconfigured_compiler_fn) + def debug_wrapper(gm, example_inputs, **kwargs): + from torch._subclasses import FakeTensorMode + + compiler_fn = functools.partial(unconfigured_compiler_fn, **kwargs) + + from torch._functorch.aot_autograd import get_aot_graph_name + + graph_name = get_aot_graph_name() + + # TODO: why do we need to deepcopy the original graph? + orig_graph = copy.deepcopy(gm.graph) + assert config.repro_after in ("dynamo", "aot", None) + + try: + # Call the compiler_fn - which is either aot_autograd or inductor + # with fake inputs + inner_compiled_fn = compiler_fn(gm, example_inputs) + except Exception as e: + # TODO: Failures here are troublesome because no real inputs, + # need a different serialization strategy + if config.repro_after == "aot": + if config.repro_level == 1: + dump_compiler_graph_state( + fx.GraphModule(gm, orig_graph), + example_inputs, + compiler_name, + ) + elif config.repro_level == 2: + dump_to_minify( + fx.GraphModule(gm, orig_graph), + example_inputs, + compiler_name, + ) + log.error("CompilerError") + raise + + # We may run regular PyTorch compute that may trigger Dynamo, do NOT + # recursively attempt to accuracy minify in that case! + def deferred_for_real_inputs(real_inputs): + # This is a bit obscure: if we recursively try to accuracy minify + # the SAME function, this would trigger. But most of the time + # we should never hit this branch + if config.repro_after != "aot": + return inner_compiled_fn(real_inputs) + with config.patch(repro_after=None): + return inner_debug_fn(real_inputs) + + def inner_debug_fn(real_inputs): + """ + Aot Autograd fw_compiler and bw_compiler can have fake tensors. So, + example_inputs can be fake tensors. We can call compiler_fn (which is + inductor or nvfuser) with fake tensors but the actually compiled_fn + should be called with real tensors. Therefore, the actual invocation + is deferred. + """ + # Copy the tensor attrs like shape, stride etc by converting to Fake Tensor + # because inductor clears the tensor list in its codegen. And example_inputs + # are available only for the first invocation. + fake_mode = FakeTensorMode() + copy_tensor_attrs = [ + fake_mode.from_tensor(x) if isinstance(x, torch.Tensor) else x + for x in real_inputs + ] + if config.repro_level == 3: + # Always dump the original module in case we have segfaults + dump_to_minify( + fx.GraphModule(gm, orig_graph), real_inputs, compiler_name + ) + + if config.repro_level == 4: + if compiler_name != "inductor": + raise NotImplementedError( + "Accuracy minification is supported for inductor only" + ) + failed = not same_two_models( + gm, + inner_compiled_fn, + real_inputs, + only_fwd=True, + ignore_non_fp=config.repro_ignore_non_fp, + ) + + if failed: + log.warning( + "Accuracy failed for the AOT Autograd graph %s", graph_name + ) + dump_compiler_graph_state( + fx.GraphModule(gm, orig_graph), + real_inputs, + f"{compiler_name}_accuracy", + ) + dump_to_minify( + fx.GraphModule(gm, orig_graph), + real_inputs, + f"{compiler_name}_accuracy", + ) + raise AccuracyError("Bad accuracy detected") + else: + # Call the compiled function with real inputs + return inner_compiled_fn(real_inputs) + else: + try: + # Call the compiled function with real inputs + out = inner_compiled_fn(real_inputs) + # sync cuda kernels to ensure IMA detection + for arg in example_inputs: + if isinstance(arg, torch.Tensor) and arg.is_cuda: + torch.cuda.synchronize() + break + return out + except Exception as e: + if config.repro_level == 1: + dump_compiler_graph_state( + fx.GraphModule(gm, orig_graph), + copy_tensor_attrs, + compiler_name, + ) + elif config.repro_level == 2: + dump_to_minify( + fx.GraphModule(gm, orig_graph), + copy_tensor_attrs, + compiler_name, + ) + raise + + if config.repro_after == "aot": + compiled_fn = deferred_for_real_inputs + compiled_fn._boxed_call = True # type: ignore[attr-defined] + return compiled_fn + else: + return inner_compiled_fn + + return debug_wrapper + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# DUMP REPROS +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def generate_compiler_repro_string(gm, args, *, stable_output=False, save_dir=None): + model_str = textwrap.dedent( + f""" +import torch +from torch import tensor, device +import torch.fx as fx +from torch._dynamo.testing import rand_strided +from math import inf +import torch._inductor.inductor_prims + +{generate_config_string(stable_output=stable_output)} + +isolate_fails_code_str = None + +{extra_imports} + + """ + ) + if not stable_output: + model_str += f"# torch version: {torch.version.__version__}\n" + if hasattr(torch.version, "cuda"): + model_str += f"# torch cuda version: {torch.version.cuda}\n" + if hasattr(torch.version, "git_version"): + model_str += f"# torch git version: {torch.version.git_version}\n\n\n" + model_str += _cuda_system_info_comment() + + model_str += NNModuleToString.convert(gm) + + # get hint shape/stride when dynamic shape enabled + def hint_if_symint(x): + return tuple(i.node.hint if isinstance(i, torch.SymInt) else i for i in x) + + writer = InputWriter(save_dir) + for placeholder, arg in zip(fx_placeholder_targets(gm), args): + if isinstance(arg, (int, torch.SymInt)): + writer.symint(placeholder, arg) + elif isinstance(arg, torch.Tensor): + # TODO: improve these names with FQN + writer.tensor(placeholder, arg) + else: + raise TypeError(f"arg is neither SymInt/int nor torch.Tensor, {arg}") + + model_str += "\n".join(writer.lines()) + "\n" + + model_str += "mod = Repro()\n" + return model_str + + +def save_graph_repro( + fd, + gm, + args, + compiler_name, + *, + stable_output=False, + save_dir=None, + command="run", + accuracy=None, + tracing_mode=None, + check_str=None, +): + if any( + isinstance(arg, torch.fx.experimental._backward_state.BackwardState) + for arg in args + ): + fd.write( + "Repro is not generated due to existence of BackwardState in graph input" + ) + return + fd.write( + generate_compiler_repro_string( + gm, + args, + stable_output=stable_output, + save_dir=save_dir, + ) + ) + if accuracy is None: + accuracy = "_accuracy" in compiler_name + if tracing_mode is None: + tracing_mode = "real" + if any(has_free_symbols(a) for a in args): + tracing_mode = "symbolic" + fd.write("if __name__ == '__main__':\n") + fd.write(" from torch._dynamo.repro.after_aot import run_repro\n") + fd.write( + f" with torch.no_grad():\n" + f" run_repro(mod, load_args, accuracy={accuracy!r}, command={command!r}, " + f"save_dir={save_dir!r}, tracing_mode={tracing_mode!r}, check_str={check_str!r})\n" + f" # To run it separately, do \n" + f" # mod, args = run_repro(mod, load_args, accuracy={accuracy!r}, command='get_args', " + f"save_dir={save_dir!r}, tracing_mode={tracing_mode!r}, check_str={check_str!r})\n" + f" # mod(*args)" + ) + + +def dump_compiler_graph_state(gm, args, compiler_name, *, accuracy=None): + subdir = os.path.join(minifier_dir(), "checkpoints") + if not os.path.exists(subdir): + os.makedirs(subdir, exist_ok=True) + file_name = os.path.join(subdir, f"{len(gm.graph.nodes)}.py") + log.warning( + "Writing checkpoint with %s nodes to %s", len(gm.graph.nodes), file_name + ) + with open(file_name, "w") as fd: + save_graph_repro( + fd, gm, args, compiler_name, save_dir=subdir, accuracy=accuracy + ) + curdir = os.getcwd() + repro_path = os.path.join(curdir, "repro.py") + try: + shutil.copyfile(file_name, repro_path) + log.warning("Copying repro file for convenience to %s", repro_path) + if use_buck: + BuckTargetWriter(file_name).write() + except OSError: + log.warning("No write permissions for %s", repro_path) + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# DUMP MINIFIER +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def dump_to_minify(gm, args, compiler_name: str): + out = io.StringIO() + # TODO: factor this out + subdir = os.path.join(minifier_dir(), "checkpoints") + if not os.path.exists(subdir): + os.makedirs(subdir, exist_ok=True) + save_graph_repro(out, gm, args, compiler_name, save_dir=subdir, command="minify") + return helper_for_dump_minify(out.getvalue()) + + +def isolate_fails( + fx_g, + args, + compiler_name: str, + env=None, + save_dir=None, + accuracy=None, + tracing_mode=None, + check_str=None, +): + if env is None: + env = {} + subdir = os.path.join(os.getcwd(), "isolate") + if not os.path.exists(subdir): + os.makedirs(subdir, exist_ok=True) + file_name = os.path.join(subdir, f"{str(uuid.uuid4())[:5]}.py") + with open(file_name, "w") as fd: + save_graph_repro( + fd, + fx_g, + args, + compiler_name, + save_dir=save_dir, + command="minifier-query", + accuracy=accuracy, + tracing_mode=tracing_mode, + check_str=check_str, + ) + # with open(file_name, "r") as fd: + # print(fd.read()) + new_env = os.environ.copy() + new_env = {**new_env, **env} + stdout, stderr = TemporaryFile(), TemporaryFile() + + if use_buck: + cmd = BuckTargetWriter(file_name).write(print_msg=False) + else: + cmd = ["python", file_name] + + p = subprocess.Popen( + cmd, + cwd=subdir, + stdout=stdout, + stderr=stderr, + env=new_env, + ) + p.wait() + + stdout.seek(0) + stderr.seek(0) + print( + textwrap.indent(stdout.read().decode("utf-8"), prefix=">> "), file=sys.stdout + ) + print( + textwrap.indent(stderr.read().decode("utf-8"), prefix=">> "), file=sys.stderr + ) + # print(f"Isolated test failed - {file_name}") + return p.returncode != 0 + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# MINIFIER TOOLS +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def inductor_fails(fx_g, args, check_str=None): + has_cuda = False + for arg in args: + if isinstance(arg, torch.Tensor) and arg.is_cuda: + has_cuda = True + break + + def sync(): + if has_cuda: + # Ensures that segfaults are surfaced + torch.cuda.synchronize() + + from torch._inductor.compile_fx import compile_fx_inner + + try: + result = fx_g(*args) + assert isinstance(result, (tuple, list)) + assert not any(isinstance(x, (tuple, list)) for x in result) + except Exception: + return False + + sync() + + try: + compile_mod = compile_fx_inner(fx_g, args) + compile_mod(args) + sync() + except Exception as e: + if check_str is not None and check_str not in repr(e): + return False + print(repr(e)) + return True + return False + + +def inductor_accuracy_fails( + fx_g, args, check_str=None, *, require_fp64=False, ignore_non_fp=False +): + from torch._inductor.compile_fx import compile_fx_inner + + return backend_aot_accuracy_fails( + fx_g, + args, + compile_fx_inner, + require_fp64=require_fp64, + ignore_non_fp=ignore_non_fp, + ) + + +backend_aot_accuracy_fails = functools.partial(backend_accuracy_fails, only_fwd=True) + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # +# REPRO MAIN +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # + + +def repro_common(options, mod, load_args): + # Invariant for graphs we generate with the repro script + assert not any(mod.named_parameters()) + for n, b in mod.named_buffers(): + if b.numel() > MAX_CONSTANT_NUMEL_INLINE: + log.warning( + "Constant %s was not serialized, generated random data instead. " + "If you think this is affecting you, please comment on " + "https://github.com/pytorch/pytorch/issues/100468", + n, + ) + + if not hasattr(load_args, "_version"): + log.warning( + "load_args does not have a _version attribute, please file a bug to PyTorch " + "and describe how you generate this repro script" + ) + else: + if load_args._version > 0: + log.warning( + "load_args is version %s, but this version of PyTorch only supports " + "version 0. We will try to run it anyway but there may be an incompatibility; " + "if so, try upgrading your version of PyTorch.", + load_args._version, + ) + + nop_reader = NopInputReader() + load_args(nop_reader) + + with tqdm(desc="Loading inputs", total=nop_reader.total) as pbar: + input_reader = InputReader(save_dir=options.save_dir, pbar=pbar) + load_args(input_reader) + args = input_reader.args + + # Turn mod into a GraphModule the slow way + # TODO: speed this up + mod = make_fx(mod, tracing_mode=options.tracing_mode)(*args) + + torch._inductor.config.generate_intermediate_hooks = True + + return mod, args + + +ACCURACY_FAILS: Dict[str, Callable[[nn.Module, Any], bool]] = { + "": inductor_fails, + # This might look inverted but it's not. strict_accuracy means "we will + # minify any time we see anything that diverges", whereas accuracy is more + # conservative, and will only minify if there is a meaningful fp64 + # divergence + "accuracy": functools.partial( + inductor_accuracy_fails, require_fp64=True, ignore_non_fp=True + ), + "strict_accuracy": inductor_accuracy_fails, +} + + +def repro_minifier_query(options, mod, load_args): + mod, args = repro_common(options, mod, load_args) + fail_fn = functools.partial( + ACCURACY_FAILS[options.accuracy], check_str=options.check_str + ) + if fail_fn(mod, args): + sys.exit(1) + else: + sys.exit(0) + + +def repro_minify(options, mod, load_args): + from functorch.compile import minifier + + mod, args = repro_common(options, mod, load_args) + compiler_name = "inductor_accuracy" if options.accuracy != "" else "inductor" + + favored_device = 1 if torch.cuda.device_count() >= 2 else 0 + env_variables = {"CUDA_VISIBLE_DEVICES": str(favored_device)} + + module_fails: Any + if options.isolate: + module_fails = functools.partial( + isolate_fails, + env=env_variables, + compiler_name=compiler_name, + save_dir=options.save_dir, + accuracy=options.accuracy, + tracing_mode=options.tracing_mode, + ) + else: + module_fails = ACCURACY_FAILS[options.accuracy] + + minifier( + mod, + args, + module_fails=functools.partial(module_fails, check_str=options.check_str), + dump_state=functools.partial( + dump_compiler_graph_state, compiler_name=compiler_name + ), + save_dir=options.save_dir, + offload_to_disk=options.offload_to_disk, + skip_offload=options.skip_saving_eager_intermediates, + skip_sanity=options.skip_sanity, + max_granularity=options.max_granularity, + ) + + +def repro_analyze(options, mod, load_args): + from torch._inductor.compile_fx import compile_fx_inner + from torch._inductor.hooks import intermediate_hook + + mod, args = repro_common(options, mod, load_args) + + # TODO: The logic for cloning inputs/models here is intentionally + # modeled off of run_fwd_maybe_bwd, but arguably it is better not to + # clone inputs (as you are doubling your effective GPU memory usage). + # It is certainly faster though! It probably makes sense to let the + # user specify the offload strategy. + + with tqdm(desc="Compiling"): + compiled = compile_fx_inner(mod, args) + total = counters["inductor"]["intermediate_hooks"] + + known_names = set() + + def save_hook(name, val): + known_names.add(name) + if not options.skip_saving_inductor_intermediates: + writer.write_tensor(os.path.join("inductor", name), val) + pbar.update(1) # type: ignore[has-type] + + writer = torch.utils._content_store.ContentStoreWriter( + options.save_dir, stable_hash=options.stable_hash + ) + reader = torch.utils._content_store.ContentStoreReader(options.save_dir) + + new_args = clone_inputs(args) + with intermediate_hook(save_hook), tqdm( + desc="Saving inductor intermediates", total=total + ) as pbar: + compiled(new_args) + assert not new_args + + def compare_tuples(tuple1, tuple2): + diff_indices = [i for i in range(len(tuple1)) if tuple1[i] != tuple2[i]] + diff_values = [(tuple1[i], tuple2[i]) for i in diff_indices] + + if not diff_values: + return None + else: + return " and ".join(f"{a} != {b}" for a, b in diff_values) + + def check_hook(name, val): + meta = writer.compute_tensor_metadata(val) + meta2 = reader.read_tensor_metadata(os.path.join("inductor", name)) + reason = compare_tuples(meta, meta2) + if reason is not None: + pbar.write(f"NONDETERMINISTIC INDUCTOR at {name} ({reason})") + pbar.update(1) + + if not options.skip_check_deterministic: + new_args = clone_inputs(args) + with intermediate_hook(check_hook), tqdm( + desc="Checking inductor determinism", total=total + ) as pbar: + compiled(new_args) + assert not new_args + + class WriterInterp(fx.Interpreter): + def __init__(self, mod, subdir) -> None: + super().__init__(mod) + self.subdir = subdir + + def run_node(self, n): + r = super().run_node(n) + name = n.name + if name in known_names: + pbar.update(1) + writer.write_tensor(os.path.join(self.subdir, name), r) + return r + + # NB: the module cast doesn't actually do anything, since there are no + # parameters/buffers on the module + if not options.skip_saving_float64_intermediates: + new_mod, new_args = cast_to_fp64(copy.deepcopy(mod), clone_inputs(args)) + with tqdm(desc="Saving float64 intermediates", total=total) as pbar: + WriterInterp(new_mod, "float64").boxed_run(new_args) + assert not new_args + + class ExactReaderInterp(fx.Interpreter): + def run_node(self, n): + r = super().run_node(n) + name = n.name + if name in known_names: + meta = writer.compute_tensor_metadata(r) + meta2 = reader.read_tensor_metadata(os.path.join("float64", name)) + reason = compare_tuples(meta, meta2) + if reason is not None: + pbar.write(f"NONDETERMINISTIC FLOAT64 at {name} ({reason})") + pbar.update(1) + return r + + # TODO: check eager determinism + + if not options.skip_check_deterministic: + new_mod, new_args = cast_to_fp64(copy.deepcopy(mod), clone_inputs(args)) + with tqdm(desc="Checking float64 determinism", total=total) as pbar: + ExactReaderInterp(new_mod).boxed_run(new_args) + assert not new_args + + # Now that we've saved everything, interp through the eager graph + # and do comparisons + class ReaderInterp(fx.Interpreter): + def run_node(self, n): + r = super().run_node(n) + name = n.name + if name in known_names: + inductor = reader.read_tensor(os.path.join("inductor", name)) + float64 = reader.read_tensor(os.path.join("float64", name)) + logged = False + + def log_error(msg, *args): + nonlocal logged + logged = True + pbar.write(f"DIVERGED at {name}: {msg % args}") + + if not same( + r, + inductor, + float64, + tol=torch._dynamo.config.repro_tolerance, + equal_nan=True, + log_error=log_error, + ): + assert logged + pbar.update(1) + return r + + with tqdm(desc="Checking divergence", total=total) as pbar: + ReaderInterp(mod).boxed_run(args) + assert not args + + +def repro_get_args(options, mod, load_args): + mod, args = repro_common(options, mod, load_args) + return mod, args + + +def repro_run(options, mod, load_args): + from torch._inductor.compile_fx import compile_fx_inner + + mod, args = repro_common(options, mod, load_args) + + from torch.cuda import synchronize + + compiled = compile_fx_inner(mod, args) + + if options.accuracy != "": + # We don't really respect --accuracy vs --strict-accuracy here, it + # seems counterintuitive + if not same_two_models( + mod, + compiled, + args, + only_fwd=True, + ignore_non_fp=config.repro_ignore_non_fp, + ): + raise AccuracyError("Bad accuracy detected") + else: + need_sync = False + for arg in args: + if isinstance(arg, torch.Tensor) and arg.is_cuda: + need_sync = True + break + ref = compiled(list(args)) + if need_sync: + synchronize() # ensure segfaults are surfaced + return lambda: compiled(list(args)) + + +# TODO: lazily load the inputs or something, rather than cloning them +def run_repro( + mod, + load_args, + *, + command="run", + accuracy: Union[bool, str] = "", + save_dir=None, + tracing_mode=None, + patch_code=None, + check_str=None, + **kwargs, +): + for k in kwargs: + log.warning( + "Unrecognized kwarg %s; perhaps this repro was made on a newer version of PyTorch", + k, + ) + + if accuracy is True: + accuracy = "accuracy" + elif accuracy is False: + accuracy = "" + + if patch_code is not None: + log.warning( + "patch_code no longer works on this version of PyTorch, silently ignoring" + ) + + parser = argparse.ArgumentParser( + description=f"""\ +An after_aot repro script, typically triggering a bug in PyTorch Inductor. +When run with no arguments, this script defaults to running '{command}'. +Extra flags may be available; to find out more, try '{command} --help'. +There are also alternate subcommands available, see below. + +default settings on this script: + {accuracy=} + {tracing_mode=} + {save_dir=} + {check_str=} +""", + formatter_class=argparse.RawTextHelpFormatter, + ) + + def common_flags(parser): + accuracy_group = parser.add_mutually_exclusive_group() + accuracy_group.add_argument( + "--no-accuracy", + dest="accuracy", + action="store_const", + const="", + default=accuracy, + help="do not test accuracy, just run the module and see if it errors", + ) + accuracy_group.add_argument( + "--accuracy", + action="store_const", + const="accuracy", + default=accuracy, + help="""\ +test if the RMSE between the compiled module and the fp64 reference is greater +than eager and the fp64 reference. This is usually more reliable than the +standard allclose test, as we expect numeric differences from compiling, often +improving accuracy over eager. RMSE test allows for compiled module to +diverge greatly from eager, as long as this divergence moves it closer to the +'true' mathematical value of the network. Caveats: (1) double precision can +still suffer from rounding error, so it is not a perfect reference (see for +example 'Herbie: Automatically Improving Floating Point Accuracy') for +approaches that detect the necessary working precision and compute it in +arbitrary precision floating point; unfortunately, this is not practical for +tensor computation; (2) if there are not enough samples in the output being +compared, we may get unlucky and have an unlucky greater RMSE than eager; this +could be overcome by applying a more rigorous statistical test at some +p-value, which we leave for future work. +""", + ) + accuracy_group.add_argument( + "--strict-accuracy", + dest="accuracy", + action="store_const", + const="strict_accuracy", + default=accuracy, + help="""\ +by default, when doing accuracy minification we will reject reductions which +change the divergence from a floating point divergence to a integral/boolean +divergence. This is because some operations like ReLU involve temporarily +sharp boundaries that smooth out again afterwards; without requiring +divergence on floating point, the minifier will often fixate on divergent +boolean tensor even though this is not the true source of the divergence. +However, rejecting these reductions makes it more difficult for the minifier +to make process. Using this option will let the minifier progress for ALL +divergences--you just might not end up with a useful repro in the end.""", + ) + + parser.add_argument( + "--save-dir", + type=str, + default=save_dir, + metavar="DIR", + help="directory where saved inputs live", + ) + parser.add_argument( + "--no-save-dir", + dest="save_dir", + action="store_const", + const=None, + help="don't use any directory for saved inputs", + ) + parser.add_argument( + "--tracing-mode", + type=str, + metavar="{real,fake,symbolic}", + default=tracing_mode, + help="how to trace the repro module into a GraphModule with metadata", + ) + + subparsers = parser.add_subparsers( + dest="command", metavar="{run,minify,analyze}", required=True + ) + + parser_run = subparsers.add_parser( + "run", + help="just run the repro", + ) + common_flags(parser_run) + + parser_minify = subparsers.add_parser( + "minify", help="run the minifier on the repro" + ) + common_flags(parser_minify) + parser_get_args = subparsers.add_parser("get_args", help="get the args") + common_flags(parser_get_args) + parser_minify_isolate = parser_minify.add_mutually_exclusive_group() + parser_minify_isolate.add_argument( + "--isolate", + action="store_true", + default=True, + help="run in separate processes to avoid interference (default)", + ) + parser_minify_isolate.add_argument( + "--no-isolate", + dest="isolate", + action="store_false", + help="speed up by running all compilation in same process", + ) + parser_minify.add_argument( + "--skip-saving-eager-intermediates", + action="store_true", + help="skip saving eager intermediates on --minify", + ) + # TODO: make this an option for --analyze too + parser_minify.add_argument( + "--offload-to-disk", + action="store_true", + help="during minification, offload delta debugging intermediates to disk. Use if you're OOMing", + ) + parser_minify.add_argument( + "--skip-sanity", + action="store_true", + help="skip sanity check at beginning of minification on original graph", + ) + parser_minify.add_argument( + "--max-granularity", + type=int, + default=None, + help="start at this granularity and work down; must be power of 2", + ) + parser_minify.add_argument( + "--check-str", + type=str, + default=check_str, + help="require minified program to fail with error containing this string", + ) + + parser_analyze = subparsers.add_parser( + "analyze", help="run the accuracy analyzer on the repro" + ) + common_flags(parser_analyze) + parser_analyze.add_argument( + "--skip-saving-inductor-intermediates", + action="store_true", + help="skip saving inductor intermediates on --analyze", + ) + parser_analyze.add_argument( + "--skip-saving-float64-intermediates", + action="store_true", + help="skip saving float64 intermediates", + ) + parser_analyze.add_argument( + "--skip-check-deterministic", + action="store_true", + help="skip checking that the network is deterministic", + ) + parser_analyze.add_argument( + "--stable-hash", + action="store_true", + help="use SHA-1 checksum instead of fast (but possibly unsound) hash", + ) + + # Run the repro in the context of minification, inverting exit code meaning + parser_minifier_query = subparsers.add_parser( + "minifier-query", + ) + common_flags(parser_minifier_query) + parser_minifier_query.add_argument( + "--check-str", + type=str, + default=check_str, + help="require minified program to fail with error containing this string", + ) + + args = None + if len(sys.argv) <= 1: + args = [command, *sys.argv[1:]] + + options = parser.parse_args(args) + COMMAND_FNS = { + "minify": repro_minify, + "analyze": repro_analyze, + "minifier-query": repro_minifier_query, + "run": repro_run, + "get_args": repro_get_args, + } + return COMMAND_FNS[options.command](options, mod, load_args) diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/resume_execution.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/resume_execution.py new file mode 100644 index 0000000000000000000000000000000000000000..132e9e4081bceb4ac53d2ec5627839f8b9b807c9 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/resume_execution.py @@ -0,0 +1,720 @@ +# mypy: allow-untyped-defs +import copy +import dataclasses +import sys +import types +from typing import Any, cast, Dict, List, Optional, Tuple + +from .bytecode_transformation import ( + create_call_function, + create_call_method, + create_dup_top, + create_instruction, + create_jump_absolute, + create_load_method, + Instruction, + InstructionExnTabEntry, + transform_code_object, + unique_id, +) +from .utils import ExactWeakKeyDictionary + + +# taken from code.h in cpython +CO_OPTIMIZED = 0x0001 +CO_NEWLOCALS = 0x0002 +CO_VARARGS = 0x0004 +CO_VARKEYWORDS = 0x0008 +CO_NESTED = 0x0010 +CO_GENERATOR = 0x0020 +CO_NOFREE = 0x0040 +CO_COROUTINE = 0x0080 +CO_ITERABLE_COROUTINE = 0x0100 +CO_ASYNC_GENERATOR = 0x0200 + +# trace_rules.py import this constant for consistency +TORCH_DYNAMO_RESUME_IN_PREFIX = "torch_dynamo_resume_in" + + +def _initial_push_null(insts): + if sys.version_info >= (3, 11): + insts.append(create_instruction("PUSH_NULL")) + if sys.version_info < (3, 13): + insts.append(create_instruction("SWAP", arg=2)) + + +@dataclasses.dataclass(frozen=True) +class ReenterWith: + stack_index: int + target_values: Optional[Tuple[Any, ...]] = None + + # If we do not want to destroy the stack, we can do the same thing as a + # `SETUP_WITH` block, only that we store the context manager in a local_symbol + def try_except(self, code_options, cleanup: List[Instruction]): + """ + Codegen based off of: + load args + enter context + try: + (rest) + finally: + exit context + """ + # NOTE: we assume that TOS is a context manager CLASS! + load_args = [] + if self.target_values: + load_args = [ + create_instruction("LOAD_CONST", argval=val) + for val in self.target_values + ] + ctx_name = unique_id(f"___context_manager_{self.stack_index}") + if ctx_name not in code_options["co_varnames"]: + code_options["co_varnames"] += (ctx_name,) + for name in ["__enter__", "__exit__"]: + if name not in code_options["co_names"]: + code_options["co_names"] += (name,) + + except_jump_target = create_instruction( + "NOP" if sys.version_info < (3, 11) else "PUSH_EXC_INFO" + ) + cleanup_complete_jump_target = create_instruction("NOP") + + setup_finally: List[Instruction] = [] + _initial_push_null(setup_finally) + + # TODO(williamwen42) call method order is wrong for 3.13+ - will fix later + setup_finally.extend( + [ + *load_args, + *create_call_function(len(load_args), False), + create_instruction("STORE_FAST", argval=ctx_name), + create_instruction("LOAD_FAST", argval=ctx_name), + create_load_method("__enter__"), + *create_call_method(0), + create_instruction("POP_TOP"), + ] + ) + + if sys.version_info < (3, 11): + setup_finally.append( + create_instruction("SETUP_FINALLY", target=except_jump_target) + ) + else: + exn_tab_begin = create_instruction("NOP") + exn_tab_end = create_instruction("NOP") + exn_tab_begin.exn_tab_entry = InstructionExnTabEntry( + exn_tab_begin, + exn_tab_end, + except_jump_target, + self.stack_index + 1, + False, + ) + setup_finally.append(exn_tab_begin) + + def create_reset(): + return [ + create_instruction("LOAD_FAST", argval=ctx_name), + create_load_method("__exit__"), + create_instruction("LOAD_CONST", argval=None), + create_dup_top(), + create_dup_top(), + *create_call_method(3), + create_instruction("POP_TOP"), + ] + + if sys.version_info < (3, 9): + epilogue = [ + create_instruction("POP_BLOCK"), + create_instruction("BEGIN_FINALLY"), + except_jump_target, + *create_reset(), + create_instruction("END_FINALLY"), + ] + elif sys.version_info < (3, 11): + epilogue = [ + create_instruction("POP_BLOCK"), + *create_reset(), + create_instruction("JUMP_FORWARD", target=cleanup_complete_jump_target), + except_jump_target, + *create_reset(), + create_instruction("RERAISE"), + cleanup_complete_jump_target, + ] + else: + finally_exn_tab_end = create_instruction("RERAISE", arg=0) + finally_exn_tab_target = create_instruction("COPY", arg=3) + except_jump_target.exn_tab_entry = InstructionExnTabEntry( + except_jump_target, + finally_exn_tab_end, + finally_exn_tab_target, + self.stack_index + 2, + True, + ) + epilogue = [ + exn_tab_end, + *create_reset(), + create_instruction("JUMP_FORWARD", target=cleanup_complete_jump_target), + except_jump_target, # PUSH_EXC_INFO + *create_reset(), + finally_exn_tab_end, # RERAISE 0 + finally_exn_tab_target, # COPY 3 + create_instruction("POP_EXCEPT"), + create_instruction("RERAISE", arg=1), + cleanup_complete_jump_target, + ] + + cleanup[:] = epilogue + cleanup + return setup_finally + + def __call__(self, code_options, cleanup): + """ + Codegen based off of: + with ctx(args): + (rest) + """ + # NOTE: we assume that TOS is a context manager CLASS! + load_args = [] + if self.target_values: + load_args = [ + create_instruction("LOAD_CONST", argval=val) + for val in self.target_values + ] + if sys.version_info < (3, 9): + with_cleanup_start = create_instruction("WITH_CLEANUP_START") + begin_finally = create_instruction("BEGIN_FINALLY") + cleanup[:] = [ + create_instruction("POP_BLOCK"), + begin_finally, + with_cleanup_start, + create_instruction("WITH_CLEANUP_FINISH"), + create_instruction("END_FINALLY"), + ] + cleanup + + return [ + *load_args, + create_instruction("CALL_FUNCTION", arg=len(load_args)), + create_instruction("SETUP_WITH", target=with_cleanup_start), + create_instruction("POP_TOP"), + ], None + elif sys.version_info < (3, 11): + with_except_start = create_instruction("WITH_EXCEPT_START") + pop_top_after_with_except_start = create_instruction("POP_TOP") + + cleanup_complete_jump_target = create_instruction("NOP") + + cleanup[:] = [ + create_instruction("POP_BLOCK"), + create_instruction("LOAD_CONST", argval=None), + create_instruction("DUP_TOP"), + create_instruction("DUP_TOP"), + create_instruction("CALL_FUNCTION", arg=3), + create_instruction("POP_TOP"), + create_instruction("JUMP_FORWARD", target=cleanup_complete_jump_target), + with_except_start, + create_instruction( + "POP_JUMP_IF_TRUE", target=pop_top_after_with_except_start + ), + create_instruction("RERAISE"), + pop_top_after_with_except_start, + create_instruction("POP_TOP"), + create_instruction("POP_TOP"), + create_instruction("POP_EXCEPT"), + create_instruction("POP_TOP"), + cleanup_complete_jump_target, + ] + cleanup + + return [ + *load_args, + create_instruction("CALL_FUNCTION", arg=len(load_args)), + create_instruction("SETUP_WITH", target=with_except_start), + create_instruction("POP_TOP"), + ], None + else: + pop_top_after_with_except_start = create_instruction("POP_TOP") + cleanup_complete_jump_target = create_instruction("NOP") + + def create_load_none(): + return create_instruction("LOAD_CONST", argval=None) + + exn_tab_1_begin = create_instruction("POP_TOP") + exn_tab_1_end = create_instruction("NOP") + exn_tab_1_target = create_instruction("PUSH_EXC_INFO") + exn_tab_2_end = create_instruction("RERAISE", arg=2) + exn_tab_2_target = create_instruction("COPY", arg=3) + + exn_tab_1_begin.exn_tab_entry = InstructionExnTabEntry( + exn_tab_1_begin, + exn_tab_1_end, + exn_tab_1_target, + self.stack_index + 1, + True, + ) + exn_tab_1_target.exn_tab_entry = InstructionExnTabEntry( + exn_tab_1_target, + exn_tab_2_end, + exn_tab_2_target, + self.stack_index + 3, + True, + ) + pop_top_after_with_except_start.exn_tab_entry = InstructionExnTabEntry( + pop_top_after_with_except_start, + pop_top_after_with_except_start, + exn_tab_2_target, + self.stack_index + 3, + True, + ) + + cleanup[:] = [ + exn_tab_1_end, + create_load_none(), + create_load_none(), + create_load_none(), + *create_call_function(2, False), + create_instruction("POP_TOP"), + create_instruction("JUMP_FORWARD", target=cleanup_complete_jump_target), + exn_tab_1_target, # PUSH_EXC_INFO + create_instruction("WITH_EXCEPT_START"), + create_instruction( + "POP_JUMP_FORWARD_IF_TRUE" + if sys.version_info < (3, 12) + else "POP_JUMP_IF_TRUE", + target=pop_top_after_with_except_start, + ), + exn_tab_2_end, # RERAISE 2 + exn_tab_2_target, # COPY 3 + create_instruction("POP_EXCEPT"), + create_instruction("RERAISE", arg=1), + pop_top_after_with_except_start, + create_instruction("POP_EXCEPT"), + create_instruction("POP_TOP"), + create_instruction("POP_TOP"), + cleanup_complete_jump_target, + ] + cleanup + + ret: List[Instruction] = [] + _initial_push_null(ret) + ret.extend( + [ + *load_args, + *create_call_function(len(load_args), False), + create_instruction("BEFORE_WITH"), + exn_tab_1_begin, # POP_TOP + ] + ) + return ret, exn_tab_1_target + + +@dataclasses.dataclass +class ResumeFunctionMetadata: + code: types.CodeType + instructions: List[Instruction] = dataclasses.field(default_factory=list) + # Python 3.11+ fields + # NOTE: Python 3.11 removed blocks, but for our purposes, a "block" consists + # of instructions of all exception table entries that have the same target. + + # map from PUSH_EXC_INFO's in the prefix to original block target offset + prefix_block_target_offset_remap: List[int] = dataclasses.field( + default_factory=list + ) + # map from new block target offsets to original block target offsets + block_target_offset_remap: Optional[Dict[int, int]] = None + + +def _filter_iter(l1, l2, cond): + """ + Two-pointer conditional filter. + e.g. _filter_iter(insts, sorted_offsets, lambda i, o: i.offset == o) + returns the instructions with offsets in sorted_offsets + """ + it = iter(l2) + res: List[Instruction] = [] + try: + cur = next(it) + for val in l1: + if cond(val, cur): + res.append(val) + cur = next(it) + except StopIteration: + pass + return res + + +def _load_tuple_and_call(tup): + insts: List[Instruction] = [] + _initial_push_null(insts) + for val in tup: + insts.append(create_instruction("LOAD_CONST", argval=val)) + insts.extend(create_call_function(len(tup), False)) + return insts + + +class ContinueExecutionCache: + cache = ExactWeakKeyDictionary() + generated_code_metadata = ExactWeakKeyDictionary() + + @classmethod + def lookup(cls, code, lineno, *key): + if code not in cls.cache: + cls.cache[code] = {} + key = tuple(key) + if key not in cls.cache[code]: + cls.cache[code][key] = cls.generate(code, lineno, *key) + return cls.cache[code][key] + + @classmethod + def generate( + cls, + code, + lineno, + offset: int, + setup_fn_target_offsets: Tuple[int], # only used in Python 3.11+ + nstack: int, + argnames: Tuple[str], + argnames_null: Tuple[str], + setup_fns: Tuple[ReenterWith], + stack_ctx_vars: Tuple[int, Tuple[Any]], + argnames_ctx_vars: Tuple[str, Tuple[Any]], + null_idxes: Tuple[int], + ) -> types.CodeType: + assert offset is not None + assert not ( + code.co_flags + & (CO_GENERATOR | CO_COROUTINE | CO_ITERABLE_COROUTINE | CO_ASYNC_GENERATOR) + ) + assert code.co_flags & CO_OPTIMIZED + if code in ContinueExecutionCache.generated_code_metadata: + return cls.generate_based_on_original_code_object( + code, + lineno, + offset, + setup_fn_target_offsets, + nstack, + argnames, + argnames_null, + setup_fns, + stack_ctx_vars, + argnames_ctx_vars, + null_idxes, + ) + + is_py311_plus = sys.version_info >= (3, 11) + meta = ResumeFunctionMetadata(code) + + def update(instructions: List[Instruction], code_options: Dict[str, Any]): + meta.instructions = copy.deepcopy(instructions) + + args = [f"___stack{i}" for i in range(nstack)] + args.extend(v for v in argnames if v not in args) + freevars = tuple(code_options["co_cellvars"] or []) + tuple( + code_options["co_freevars"] or [] + ) + freevars = tuple(sorted(freevars)) + code_options[ + "co_name" + ] = f"{TORCH_DYNAMO_RESUME_IN_PREFIX}_{code_options['co_name']}_at_{lineno}" + if is_py311_plus: + qualified_path = code_options["co_qualname"].rsplit(".", maxsplit=1) + if len(qualified_path) == 1: + code_options["co_qualname"] = code_options["co_name"] + else: + assert len(qualified_path) == 2 + module_name, co_name = qualified_path + code_options[ + "co_qualname" + ] = f"{module_name}.{TORCH_DYNAMO_RESUME_IN_PREFIX}_{co_name}_at_{lineno}" + code_options["co_firstlineno"] = lineno + code_options["co_cellvars"] = () + code_options["co_freevars"] = freevars + code_options["co_argcount"] = len(args) + code_options["co_posonlyargcount"] = 0 + code_options["co_kwonlyargcount"] = 0 + code_options["co_varnames"] = tuple( + args + + [v for v in argnames_null if v not in args] + + [v for v in code_options["co_varnames"] if v not in args] + ) + code_options["co_flags"] = code_options["co_flags"] & ~( + CO_VARARGS | CO_VARKEYWORDS + ) + target = next(i for i in instructions if i.offset == offset) + + prefix = [] + if is_py311_plus: + if freevars: + prefix.append( + create_instruction("COPY_FREE_VARS", arg=len(freevars)) + ) + prefix.append(create_instruction("RESUME", arg=0)) + + cleanup: List[Instruction] = [] + hooks = {fn.stack_index: fn for fn in setup_fns} + hook_target_offsets = { + fn.stack_index: setup_fn_target_offsets[i] + for i, fn in enumerate(setup_fns) + } + offset_to_inst = {inst.offset: inst for inst in instructions} + # map old hook targets to new targets generated by the hook + old_hook_target_remap = {} + null_idxes_i = 0 + stack_ctx_vars_d = dict(stack_ctx_vars) # type: ignore[var-annotated,arg-type] + for i in range(nstack): + while ( + null_idxes_i < len(null_idxes) + and null_idxes[null_idxes_i] == i + null_idxes_i + ): + prefix.append(create_instruction("PUSH_NULL")) + null_idxes_i += 1 + prefix.append(create_instruction("LOAD_FAST", argval=f"___stack{i}")) + if i in hooks: + hook = hooks.pop(i) + hook_insts, exn_target = hook(code_options, cleanup) + prefix.extend(hook_insts) + if is_py311_plus: + hook_target_offset = hook_target_offsets.pop(i) + old_hook_target = offset_to_inst[hook_target_offset] + meta.prefix_block_target_offset_remap.append(hook_target_offset) + old_hook_target_remap[old_hook_target] = exn_target + real_i = i + null_idxes_i + if real_i in stack_ctx_vars_d: + # NOTE: we assume that current stack var is a context manager CLASS! + # Load args for context variable and construct it + prefix.extend(_load_tuple_and_call(stack_ctx_vars_d[real_i])) + + if is_py311_plus: + # reverse the mapping since targets of later/nested contexts are inserted + # into the mapping later, but show up earlier in the prefix. + meta.prefix_block_target_offset_remap = list( + reversed(meta.prefix_block_target_offset_remap) + ) + + assert not hooks + + # NOTE: we assume that local var is a context manager CLASS! + # initialize inactive context vars in argnames + for name, vals in argnames_ctx_vars: + prefix.append(create_instruction("LOAD_FAST", argval=name)) + prefix.extend(_load_tuple_and_call(vals)) + prefix.append(create_instruction("STORE_FAST", argval=name)) + + # 3.12+: store NULL into variables that were NULL + if argnames_null: + assert sys.version_info >= (3, 12) + for v in argnames_null: + assert v not in args + prefix.extend( + [ + create_instruction("PUSH_NULL"), + create_instruction("STORE_FAST", argval=v), + ] + ) + + prefix.append(create_jump_absolute(target)) + + # because the line number table monotonically increases from co_firstlineno + # remove starts_line for any instructions before the graph break instruction + # this will ensure the instructions after the break have the correct line numbers + for inst in instructions: + if inst.offset == target.offset: + break + inst.starts_line = None + if sys.version_info >= (3, 11): + inst.positions = None + + if cleanup: + prefix.extend(cleanup) + prefix.extend(cls.unreachable_codes(code_options)) + + # remap original instructions' exception table entries + if old_hook_target_remap: + assert is_py311_plus + for inst in instructions: + if ( + inst.exn_tab_entry + and inst.exn_tab_entry.target in old_hook_target_remap + ): + inst.exn_tab_entry.target = old_hook_target_remap[ + inst.exn_tab_entry.target + ] + + # TODO(jansel): add dead code elimination here + instructions[:] = prefix + instructions + + new_code = transform_code_object(code, update) + ContinueExecutionCache.generated_code_metadata[new_code] = meta + return new_code + + @staticmethod + def unreachable_codes(code_options) -> List[Instruction]: + """Codegen a `raise None` to make analysis work for unreachable code""" + return [ + create_instruction("LOAD_CONST", argval=None), + create_instruction("RAISE_VARARGS", arg=1), + ] + + @classmethod + def generate_based_on_original_code_object( + cls, code, lineno, offset: int, setup_fn_target_offsets: Tuple[int, ...], *args + ): + """ + This handles the case of generating a resume into code generated + to resume something else. We want to always generate starting + from the original code object so that if control flow paths + converge we only generated 1 resume function (rather than 2^n + resume functions). + """ + + meta: ResumeFunctionMetadata = ContinueExecutionCache.generated_code_metadata[ + code + ] + new_offset = None + + def find_new_offset( + instructions: List[Instruction], code_options: Dict[str, Any] + ): + nonlocal new_offset + (target,) = (i for i in instructions if i.offset == offset) + # match the functions starting at the last instruction as we have added a prefix + (new_target,) = ( + i2 + for i1, i2 in zip(reversed(instructions), reversed(meta.instructions)) + if i1 is target + ) + assert target.opcode == new_target.opcode + new_offset = new_target.offset + + transform_code_object(code, find_new_offset) + + if sys.version_info >= (3, 11): + # setup_fn_target_offsets currently contains the target offset of + # each setup_fn, based on `code`. When we codegen the resume function + # based on the original code object, `meta.code`, the offsets in + # setup_fn_target_offsets must be based on `meta.code` instead. + if not meta.block_target_offset_remap: + block_target_offset_remap = meta.block_target_offset_remap = {} + + def remap_block_offsets( + instructions: List[Instruction], code_options: Dict[str, Any] + ): + # NOTE: each prefix block generates exactly one PUSH_EXC_INFO, + # so we can tell which block a prefix PUSH_EXC_INFO belongs to, + # by counting. Then we can use meta.prefix_block-target_offset_remap + # to determine where in the original code the PUSH_EXC_INFO offset + # replaced. + prefix_blocks: List[Instruction] = [] + for inst in instructions: + if len(prefix_blocks) == len( + meta.prefix_block_target_offset_remap + ): + break + if inst.opname == "PUSH_EXC_INFO": + prefix_blocks.append(inst) + + # offsets into prefix + for inst, o in zip( + prefix_blocks, meta.prefix_block_target_offset_remap + ): + block_target_offset_remap[cast(int, inst.offset)] = o + + # old bytecode targets are after the prefix PUSH_EXC_INFO's + old_start_offset = ( + cast(int, prefix_blocks[-1].offset) if prefix_blocks else -1 + ) + # offsets into old bytecode + old_inst_offsets = sorted( + n for n in setup_fn_target_offsets if n > old_start_offset + ) + targets = _filter_iter( + instructions, old_inst_offsets, lambda inst, o: inst.offset == o + ) + new_targets = _filter_iter( + zip(reversed(instructions), reversed(meta.instructions)), + targets, + lambda v1, v2: v1[0] is v2, + ) + for new, old in zip(new_targets, targets): + block_target_offset_remap[old.offset] = new[1].offset + + transform_code_object(code, remap_block_offsets) + + # if offset is not in setup_fn_target_offsets, it is an error + setup_fn_target_offsets = tuple( + meta.block_target_offset_remap[n] for n in setup_fn_target_offsets + ) + return ContinueExecutionCache.lookup( + meta.code, lineno, new_offset, setup_fn_target_offsets, *args + ) + + +""" +# partially finished support for with statements + +def convert_locals_to_cells( + instructions: List[Instruction], + code_options: Dict[str, Any]): + + code_options["co_cellvars"] = tuple( + var + for var in code_options["co_varnames"] + if var not in code_options["co_freevars"] + and not var.startswith("___stack") + ) + cell_and_free = code_options["co_cellvars"] + code_options["co_freevars"] + for inst in instructions: + if str(inst.argval).startswith("___stack"): + continue + elif inst.opname == "LOAD_FAST": + inst.opname = "LOAD_DEREF" + elif inst.opname == "STORE_FAST": + inst.opname = "STORE_DEREF" + elif inst.opname == "DELETE_FAST": + inst.opname = "DELETE_DEREF" + else: + continue + inst.opcode = dis.opmap[inst.opname] + assert inst.argval in cell_and_free, inst.argval + inst.arg = cell_and_free.index(inst.argval) + +def patch_setup_with( + instructions: List[Instruction], + code_options: Dict[str, Any] +): + nonlocal need_skip + need_skip = True + target_index = next( + idx for idx, i in enumerate(instructions) if i.offset == offset + ) + assert instructions[target_index].opname == "SETUP_WITH" + convert_locals_to_cells(instructions, code_options) + + stack_depth_before = nstack + stack_effect(instructions[target_index].opcode, + instructions[target_index].arg) + + inside_with = [] + inside_with_resume_at = None + stack_depth = stack_depth_before + idx = target_index + 1 + for idx in range(idx, len(instructions)): + inst = instructions[idx] + if inst.opname == "BEGIN_FINALLY": + inside_with_resume_at = inst + break + elif inst.target is not None: + unimplemented("jump from with not supported") + elif inst.opname in ("BEGIN_FINALLY", "WITH_CLEANUP_START", "WITH_CLEANUP_FINISH", "END_FINALLY", + "POP_FINALLY", "POP_EXCEPT", + "POP_BLOCK", "END_ASYNC_FOR"): + unimplemented("block ops not supported") + inside_with.append(inst) + stack_depth += stack_effect(inst.opcode, inst.arg) + assert inside_with_resume_at + + instructions = [ + create_instruction("LOAD_FAST", f"___stack{i}") for i in range(nstack) + ] + [ + create_instruction("SETUP_WITH", target=instructions[target_index].target) + ... call the function ... + unpack_tuple + ] + [ + create_instruction("JUMP_ABSOLUTE", target=inside_with_resume_at) + ] +""" diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/source.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/source.py new file mode 100644 index 0000000000000000000000000000000000000000..2d3a4424167da9a75d2f62bd51e2813de0baeadd --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/source.py @@ -0,0 +1,759 @@ +# mypy: allow-untyped-defs +import collections +import dataclasses +import enum +from typing import Any, Optional, Union + +from torch._guards import ChainedSource, GuardSource, Source + +from . import utils +from .bytecode_transformation import create_call_function, create_instruction +from .utils import enum_repr + + +# It shouldn't be supported to construct an NNModuleVariable inside an FSDP module, +# so those cases are omitted intentionally + +# represents nn.Modules tracked with NNModuleVariable (specialized is implicit in the variable name) +_GUARD_SOURCE_SPECIALIZED_NN_MODULE = { + GuardSource.LOCAL: GuardSource.LOCAL_SPECIALIZED_NN_MODULE, + GuardSource.GLOBAL: GuardSource.GLOBAL_SPECIALIZED_NN_MODULE, + GuardSource.LOCAL_SPECIALIZED_NN_MODULE: GuardSource.LOCAL_SPECIALIZED_NN_MODULE, + GuardSource.GLOBAL_SPECIALIZED_NN_MODULE: GuardSource.GLOBAL_SPECIALIZED_NN_MODULE, + # Just to ensure that guard_source() works + GuardSource.LOCAL_UNSPECIALIZED_NN_MODULE: GuardSource.LOCAL_UNSPECIALIZED_NN_MODULE, + GuardSource.GLOBAL_UNSPECIALIZED_NN_MODULE: GuardSource.GLOBAL_UNSPECIALIZED_NN_MODULE, + GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE: GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE, + GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE: GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE, +} + +# represents nn.Modules tracked with UnspecializedNNModuleVariable +_GUARD_SOURCE_UNSPECIALIZED_NN_MODULE = { + GuardSource.LOCAL: GuardSource.LOCAL_UNSPECIALIZED_NN_MODULE, + GuardSource.GLOBAL: GuardSource.GLOBAL_UNSPECIALIZED_NN_MODULE, + GuardSource.LOCAL_UNSPECIALIZED_NN_MODULE: GuardSource.LOCAL_UNSPECIALIZED_NN_MODULE, + GuardSource.GLOBAL_UNSPECIALIZED_NN_MODULE: GuardSource.GLOBAL_UNSPECIALIZED_NN_MODULE, + # this happens for an UnspecializedNNModule submodule on a NNModuleVariable + GuardSource.LOCAL_SPECIALIZED_NN_MODULE: GuardSource.LOCAL_UNSPECIALIZED_NN_MODULE, + GuardSource.GLOBAL_SPECIALIZED_NN_MODULE: GuardSource.GLOBAL_UNSPECIALIZED_NN_MODULE, + # Just to ensure that guard_source() works + GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE: GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE, + GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE: GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE, +} + +# represents nn.Modules tracked with UnspecializedBuiltinNNModuleVariable +_GUARD_SOURCE_UNSPECIALIZED_BUILTIN_NN_MODULE = { + GuardSource.LOCAL: GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE, + GuardSource.GLOBAL: GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE, + GuardSource.LOCAL_UNSPECIALIZED_NN_MODULE: GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE, + GuardSource.GLOBAL_UNSPECIALIZED_NN_MODULE: GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE, + GuardSource.LOCAL_SPECIALIZED_NN_MODULE: GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE, + GuardSource.GLOBAL_SPECIALIZED_NN_MODULE: GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE, + # Just to ensure that guard_source() works + GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE: GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE, + GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE: GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE, +} + +_GUARD_SOURCE_FSDP_MODULE = { + GuardSource.LOCAL: GuardSource.LOCAL_FSDP_MODULE, + GuardSource.GLOBAL: GuardSource.GLOBAL_FSDP_MODULE, + GuardSource.LOCAL_SPECIALIZED_NN_MODULE: GuardSource.LOCAL_FSDP_MODULE, + GuardSource.GLOBAL_SPECIALIZED_NN_MODULE: GuardSource.GLOBAL_FSDP_MODULE, + GuardSource.LOCAL_FSDP_MODULE: GuardSource.LOCAL_FSDP_MODULE, + GuardSource.GLOBAL_FSDP_MODULE: GuardSource.GLOBAL_FSDP_MODULE, + GuardSource.LOCAL_UNSPECIALIZED_NN_MODULE: GuardSource.LOCAL_FSDP_MODULE, + GuardSource.GLOBAL_UNSPECIALIZED_NN_MODULE: GuardSource.GLOBAL_FSDP_MODULE, + GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE: GuardSource.LOCAL_FSDP_MODULE, + GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE: GuardSource.GLOBAL_FSDP_MODULE, +} + + +def is_constant_source(source): + if isinstance(source, ConstantSource): + return True + try: + if source.guard_source() == GuardSource.CONSTANT: + return True + except NotImplementedError: + pass + + return False + + +def reconstruct_getitem( + source: Union["GetItemSource", "ODictGetItemSource"], codegen, index_is_slice +): + source.base.reconstruct(codegen) + if isinstance(source.index, Source): + source.index.reconstruct(codegen) + else: + if index_is_slice: + assert isinstance(source, GetItemSource) + codegen.append_output(codegen.create_load_const(source.unpack_slice())) + else: + codegen.append_output(codegen.create_load_const(source.index)) + + +@dataclasses.dataclass(frozen=True) +class LocalSource(Source): + local_name: str + cell_or_freevar: bool = False + + def reconstruct(self, codegen): + codegen.append_output(codegen.create_load(self.local_name)) + + def guard_source(self): + return GuardSource.LOCAL + + def name(self): + return f"L[{repr(self.local_name)}]" + + +@dataclasses.dataclass(frozen=True) +class SyntheticLocalSource(Source): + local_name: str + + def reconstruct(self, codegen): + codegen.append_output(codegen.create_load(self.local_name)) + + def guard_source(self): + return GuardSource.SYNTHETIC_LOCAL + + def name(self): + return f"SYNTHETIC_LOCAL[{self.local_name!r}]" + + +@dataclasses.dataclass(frozen=True) +class RandomValueSource(Source): + random_call_index: int + + def guard_source(self): + return GuardSource.RANDOM_VALUE + + def reconstruct(self, codegen): + codegen.append_output(codegen.create_load(codegen.tx.output.random_values_var)) + codegen.append_output(codegen.create_load_const(self.random_call_index)) + codegen.append_output(create_instruction("BINARY_SUBSCR")) + + def name(self): + return f"random_value_{self.random_call_index}" + + +@dataclasses.dataclass(frozen=True) +class GlobalSource(Source): + global_name: str + + def reconstruct(self, codegen): + codegen.append_output(codegen.create_load_global(self.global_name, add=True)) + + def guard_source(self): + return GuardSource.GLOBAL + + def name(self): + return f"G[{repr(self.global_name)}]" + + +@dataclasses.dataclass(frozen=True) +class GlobalWeakRefSource(Source): + global_name: str + + def reconstruct(self, codegen): + codegen.add_push_null( + lambda: codegen.append_output( + codegen.create_load_global(self.global_name, add=True) + ) + ) + codegen.extend_output(create_call_function(0, False)) + + def guard_source(self): + return GuardSource.GLOBAL + + def name(self): + return f"G[{repr(self.global_name)}]()" + + +@dataclasses.dataclass(frozen=True) +class WeakRefCallSource(ChainedSource): + def reconstruct(self, codegen): + codegen.add_push_null(lambda: self.base.reconstruct(codegen)) + codegen.extend_output(create_call_function(0, False)) + + def guard_source(self): + return self.base.guard_source() + + def name(self): + return f"{self.base.name()}()" + + +@dataclasses.dataclass(frozen=True) +class AttrSource(ChainedSource): + member: str + + def __post_init__(self): + assert self.base, "Can't construct an AttrSource without a valid base source" + if "." in self.member: + member_parts = self.member.split(".") + object.__setattr__( + self, "base", AttrSource(self.base, ".".join(member_parts[:-1])) + ) + object.__setattr__(self, "member", member_parts[-1]) + + def reconstruct(self, codegen): + self.base.reconstruct(codegen) + codegen.extend_output(codegen.create_load_attrs(self.member)) + + def guard_source(self): + return self.base.guard_source() + + def name(self): + if not self.member.isidentifier(): + return f"getattr({self.base.name()}, {self.member!r})" + return f"{self.base.name()}.{self.member}" + + +# Represents tensor.grad source. It could be represented by AttrSource as well. +# But, we could access grad field on tensor directly in C++ without going +# through the Python bytecodes. Therefore, we use a separate source for grad +# field. +@dataclasses.dataclass(frozen=True) +class GradSource(ChainedSource): + member: str = "grad" + + def reconstruct(self, codegen): + self.base.reconstruct(codegen) + codegen.extend_output(codegen.create_load_attrs(self.member)) + + def guard_source(self): + return self.base.guard_source() + + def name(self): + return f"{self.base.name()}.{self.member}" + + +@dataclasses.dataclass(frozen=True) +class ParamBufferSource(AttrSource): + def guard_source(self): + return _GUARD_SOURCE_SPECIALIZED_NN_MODULE[self.base.guard_source()] + + +# Special AttrSource to differentiate module._buffers or module._parameters +@dataclasses.dataclass(frozen=True) +class UnspecializedParamBufferSource(AttrSource): + pass + + +# This source is intended to be used in places where a source is needed but it is expected +# that the symbol will be simplified out later on. Symbols with ephemeral sources are +# prioritized to be simplified out when e.g. compared against a symbol without an ephemeral +# source. Guarding on this source is an error. +# +# Example: During subclass view fake-ification, any close-over ViewFunc state should be +# symbolicized / fake-ified to avoid invalid specialization during view replay. This source +# is useful for symbols utilized in the middle of the view chain that are not expected to be +# present within the final view shape metadata. +@dataclasses.dataclass(frozen=True) +class EphemeralSource(Source): + desc: Optional[str] = None + + def guard_source(self): + return GuardSource.EPHEMERAL + + def name(self): + return f"" + + def make_guard(self): + raise NotImplementedError + + def is_ephemeral(self): + return True + + +class TensorProperty(enum.Enum): + SIZE = 0 + STRIDE = 1 + STORAGE_OFFSET = 2 + + def method_name(self): + if self is TensorProperty.SIZE: + return "size" + elif self is TensorProperty.STRIDE: + return "stride" + elif self is TensorProperty.STORAGE_OFFSET: + return "storage_offset" + + +@dataclasses.dataclass(frozen=True) +class TensorPropertySource(ChainedSource): + prop: TensorProperty + idx: Optional[int] = None # None for STORAGE_OFFSET + + def __post_init__(self): + assert self.base is not None + if self.prop is TensorProperty.STORAGE_OFFSET: + assert self.idx is None + else: + assert self.idx is not None + + def reconstruct(self, codegen): + def gen_fn(): + self.base.reconstruct(codegen) + codegen.append_output(codegen.create_load_attr(self.prop.method_name())) + + codegen.add_push_null(gen_fn) + if self.idx is not None: + codegen.append_output(codegen.create_load_const(self.idx)) + codegen.extend_output( + create_call_function(1 if self.idx is not None else 0, False) + ) + + def guard_source(self): + return self.base.guard_source() + + def name(self): + if self.prop is TensorProperty.SIZE: + return f"{self.base.name()}.size()[{self.idx}]" + elif self.prop is TensorProperty.STRIDE: + return f"{self.base.name()}.stride()[{self.idx}]" + elif self.prop is TensorProperty.STORAGE_OFFSET: + assert self.idx is None + return f"{self.base.name()}.storage_offset()" + else: + raise AssertionError(f"unhandled {self.prop}") + + +@dataclasses.dataclass(frozen=True) +class NegateSource(ChainedSource): + def __post_init__(self): + assert self.base is not None + + def reconstruct(self, codegen): + raise NotImplementedError + + def guard_source(self): + return self.base.guard_source() + + def name(self): + # NB: use method call so that function stripping regexes work + return f"{self.base.name()}.__neg__()" + + +@dataclasses.dataclass(frozen=True) +class ConvertIntSource(ChainedSource): + def __post_init__(self): + assert self.base is not None + + def reconstruct(self, codegen): + self.base.reconstruct(codegen) + + def guard_source(self): + return self.base.guard_source() + + def name(self): + return f"cast_symbool_to_symint_guardless({self.base.name()})" + + +@dataclasses.dataclass(frozen=True) +class FlattenScriptObjectSource(ChainedSource): + def __post_init__(self): + assert self.base is not None + + def reconstruct(self, codegen): + self.base.reconstruct(codegen) + + def guard_source(self): + return self.base.guard_source() + + def name(self): + return f"{self.base.name()}.__obj_flatten__()" + + +@dataclasses.dataclass(frozen=True) +class ScriptObjectQualifiedNameSource(ChainedSource): + def __post_init__(self): + assert self.base is not None + + def reconstruct(self, codegen): + self.base.reconstruct(codegen) + + def guard_source(self): + return self.base.guard_source() + + def name(self): + return f"{self.base.name()}._type().qualified_name()" + + +class AttrProxySource(ChainedSource): + def reconstruct(self, codegen): + self.base.reconstruct(codegen) + + def guard_source(self): + return self.base.guard_source() + + def name(self): + return f"{self.base.name()}.get_base()" + + +@dataclasses.dataclass(frozen=True) +class DefaultsSource(ChainedSource): + idx_key: Union[int, str] + is_kw: bool = False + field: str = dataclasses.field(init=False, repr=False, compare=False) + _name: str = dataclasses.field(init=False, repr=False, compare=False) + + def __post_init__(self): + assert ( + self.base + ), "Base must be a valid source in order to properly track and guard this Defaults to its origin." + if self.is_kw: + assert isinstance(self.idx_key, str) + object.__setattr__(self, "field", "__kwdefaults__") + object.__setattr__( + self, "_name", f"{self.base.name()}.{self.field}['{self.idx_key}']" + ) + else: + assert isinstance(self.idx_key, int) + object.__setattr__(self, "field", "__defaults__") + object.__setattr__( + self, "_name", f"{self.base.name()}.{self.field}[{self.idx_key}]" + ) + + def reconstruct(self, codegen): + self.base.reconstruct(codegen) + codegen.extend_output(codegen.create_load_attrs(self.field)) + codegen.append_output(codegen.create_load_const(self.idx_key)) + codegen.append_output(create_instruction("BINARY_SUBSCR")) + + def guard_source(self): + return self.base.guard_source() + + def name(self): + return self._name + + +@dataclasses.dataclass(frozen=True) +class GetItemSource(ChainedSource): + index: Any + index_is_slice: bool = False + + def __post_init__(self): + assert self.base is not None + if isinstance(self.index, slice): + # store the hashable version of the slice so the whole GetItemSource is hashable + super().__setattr__("index", self.index.__reduce__()) + super().__setattr__("index_is_slice", True) + + def reconstruct(self, codegen): + reconstruct_getitem(self, codegen, index_is_slice=self.index_is_slice) + codegen.append_output(create_instruction("BINARY_SUBSCR")) + + def guard_source(self): + return self.base.guard_source() + + def unpack_slice(self): + assert self.index_is_slice + slice_class, slice_args = self.index + return slice_class(*slice_args) + + def name(self): + # Index can be of following types + # 1) ConstDictKeySource + # 2) enum.Enum + # 3) index is a slice - example 1:4 + # 4) index is a constant - example string, integer + if isinstance(self.index, Source): + if not isinstance(self.index, ConstDictKeySource): + raise ValueError( + "GetItemSource index must be a constant, enum or ConstDictKeySource" + ) + return f"{self.base.name()}[{self.index.name()}]" + elif self.index_is_slice: + return f"{self.base.name()}[{self.unpack_slice()!r}]" + elif isinstance(self.index, enum.Enum): + return f"{self.base.name()}[{enum_repr(self.index, self.guard_source().is_local())}]" + else: + return f"{self.base.name()}[{self.index!r}]" + + +@dataclasses.dataclass(frozen=True) +class ConstDictKeySource(GetItemSource): + def is_dict_key(self): + return True + + def reconstruct(self, codegen): + codegen.add_push_null( + lambda: codegen.load_import_from(utils.__name__, "dict_keys_getitem") + ) + self.base.reconstruct(codegen) + codegen.append_output(codegen.create_load_const(self.index)) + codegen.extend_output(create_call_function(2, False)) + + def name(self): + # The list creation will be CSE'd by PyExprCSEPass + return f"list({self.base.name()}.keys())[{self.index!r}]" + + +@dataclasses.dataclass(frozen=True) +class TupleIteratorGetItemSource(GetItemSource): + def reconstruct(self, codegen): + codegen.add_push_null( + lambda: codegen.load_import_from(utils.__name__, "tuple_iterator_getitem") + ) + self.base.reconstruct(codegen) + codegen.append_output(codegen.create_load_const(self.index)) + codegen.extend_output(create_call_function(2, False)) + + def name(self): + return f"___tuple_iterator_getitem({self.base.name()}, {self.index!r})" + + +@dataclasses.dataclass(frozen=True) +class TypeSource(ChainedSource): + def __post_init__(self): + assert self.base is not None + + def reconstruct(self, codegen): + codegen.add_push_null(lambda: codegen.load_import_from("builtins", "type")) + self.base.reconstruct(codegen) + codegen.extend_output(create_call_function(1, False)) + + def guard_source(self): + return self.base.guard_source() + + def name(self): + return f"type({self.base.name()})" + + +@dataclasses.dataclass(frozen=True) +class ODictGetItemSource(ChainedSource): + index: Any + + def __post_init__(self): + assert self.base is not None + + def reconstruct(self, codegen): + codegen.add_push_null( + lambda: codegen.append_output( + codegen._create_load_const(collections.OrderedDict.__getitem__) + ) + ) + reconstruct_getitem(self, codegen, index_is_slice=False) + codegen.extend_output(create_call_function(2, False)) + + def guard_source(self): + return self.base.guard_source() + + def name(self): + if isinstance(self.index, type): + rep = f'__load_module("{self.index.__module__}").{self.index.__qualname__}' + return f"___odict_getitem({self.base.name()}, {rep})" + elif isinstance(self.index, Source): + return f"___odict_getitem({self.base.name()}, {self.index.name()})" + else: + return f"___odict_getitem({self.base.name()}, {self.index!r})" + + +@dataclasses.dataclass(frozen=True) +class OptimizerSource(ChainedSource): + def reconstruct(self, codegen): + self.base.reconstruct(codegen) + + def guard_source(self): + return self.base.guard_source() + + def name(self): + return self.base.name() + + +@dataclasses.dataclass(frozen=True) +class NNModuleSource(ChainedSource): + def reconstruct(self, codegen): + self.base.reconstruct(codegen) + + def guard_source(self): + return _GUARD_SOURCE_SPECIALIZED_NN_MODULE[self.base.guard_source()] + + def name(self): + return self.base.name() + + +@dataclasses.dataclass(frozen=True) +class UnspecializedNNModuleSource(NNModuleSource): + def guard_source(self): + return _GUARD_SOURCE_UNSPECIALIZED_NN_MODULE[self.base.guard_source()] + + +@dataclasses.dataclass(frozen=True) +class UnspecializedBuiltinNNModuleSource(UnspecializedNNModuleSource): + def guard_source(self): + return _GUARD_SOURCE_UNSPECIALIZED_BUILTIN_NN_MODULE[self.base.guard_source()] + + +@dataclasses.dataclass(frozen=True) +class FSDPNNModuleSource(NNModuleSource): + def guard_source(self): + return _GUARD_SOURCE_FSDP_MODULE[self.base.guard_source()] + + +@dataclasses.dataclass(frozen=True) +class GlobalStateSource(Source): + def name(self): + return "" + + def guard_source(self): + return GuardSource.GLOBAL + + +@dataclasses.dataclass(frozen=True) +class TorchFunctionModeStackSource(Source): + ind: int + + def name(self): + return "" + + def _get_index(self): + from .variables.torch_function import TorchFunctionModeStackVariable + + return TorchFunctionModeStackVariable.get_mode_index(self.ind) + + def reconstruct(self, codegen): + codegen.add_push_null( + lambda: codegen.load_import_from( + utils.__name__, "get_torch_function_mode_stack_at" + ) + ) + codegen.extend_output([codegen.create_load_const(self._get_index())]) + codegen.extend_output(create_call_function(1, False)) + + def guard_source(self): + return GuardSource.GLOBAL + + +@dataclasses.dataclass(frozen=True) +class ConstantSource(Source): + source_name: str + + def reconstruct(self, codegen): + codegen.append_output(codegen.create_load_global(self.source_name, add=False)) + + def guard_source(self): + return GuardSource.CONSTANT + + def name(self): + return self.source_name + + def make_guard(self, fn): + raise NotImplementedError + + +@dataclasses.dataclass(frozen=True) +class NumpyTensorSource(ChainedSource): + def name(self) -> str: + return f"___from_numpy({self.base.name()})" + + def guard_source(self): + return self.base.guard_source() + + def reconstruct(self, codegen): + codegen.add_push_null(lambda: codegen.load_import_from("torch", "as_tensor")) + self.base.reconstruct(codegen) + codegen.extend_output(create_call_function(1, False)) + + +@dataclasses.dataclass(frozen=True) +class SubclassAttrListSource(ChainedSource): + def name(self) -> str: + return f"{self.base.name()}.__tensor_flatten__()[0]" + + def guard_source(self): + return self.base.guard_source() + + +# NB: We don't expect you to actually ever generate guards against this +# source, it is ephemeral +@dataclasses.dataclass(frozen=True) +class FloatTensorSource(ChainedSource): + def name(self) -> str: + return f"___as_tensor({self.base.name()})" + + def guard_source(self): + return self.base.guard_source() + + +@dataclasses.dataclass(frozen=True) +class CallMethodItemSource(ChainedSource): + def name(self) -> str: + return f"{self.base.name()}.item()" + + def guard_source(self): + return self.base.guard_source() + + +# This is a synthetic source that is associated with the singleton +# shape env guard we always register for all frames. We get the actual +# guard contents from the ambient ShapeEnv +@dataclasses.dataclass(frozen=True) +class ShapeEnvSource(Source): + def name(self): + return "" + + def guard_source(self): + return GuardSource.SHAPE_ENV + + +@dataclasses.dataclass(frozen=True) +class BackwardStateSource(Source): + def name(self): + return "" + + def guard_source(self): + return GuardSource.BACKWARD_STATE + + +def is_from_local_source(source: Source, *, allow_cell_or_freevar=True): + if isinstance(source, ChainedSource): + return is_from_local_source( + source.base, allow_cell_or_freevar=allow_cell_or_freevar + ) + if not isinstance(source, LocalSource): + return False + if not allow_cell_or_freevar and source.cell_or_freevar: + return False + return True + + +def is_from_unspecialized_param_buffer_source(source: Source): + if isinstance(source, UnspecializedParamBufferSource): + return True + if isinstance(source, ChainedSource): + return is_from_unspecialized_param_buffer_source(source.base) + return False + + +def is_from_flatten_script_object_source(source: Source): + if isinstance(source, FlattenScriptObjectSource): + return True + elif isinstance(source, ChainedSource): + return is_from_flatten_script_object_source(source.base) + return False + + +def is_from_optimizer_source(source: Source): + if isinstance(source, OptimizerSource): + return True + if isinstance(source, ChainedSource): + return is_from_optimizer_source(source.base) + return False + + +# TODO: can probably write a generic "test this on everything in the chain" +# helper +def is_from_defaults(source: Source): + if isinstance(source, DefaultsSource): + return True + if isinstance(source, ChainedSource): + return is_from_defaults(source.base) + return False + + +def is_cell_contents(source: Source): + return isinstance(source, AttrSource) and source.member == "cell_contents" diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/trace_rules.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/trace_rules.py new file mode 100644 index 0000000000000000000000000000000000000000..01cc4dc51be129bf09c6c5235bfacae9583919d9 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/trace_rules.py @@ -0,0 +1,3637 @@ +# mypy: allow-untyped-defs +import _collections_abc +import _weakrefset +import abc +import builtins +import collections +import contextlib +import copy +import copyreg +import dataclasses +import enum +import functools +import importlib +import inspect +import linecache +import logging +import multiprocessing +import operator +import os +import posixpath +import random +import re +import selectors +import signal +import sys +import tempfile +import threading +import tokenize +import traceback +import types +import typing +import unittest +import weakref +from collections import defaultdict +from pathlib import Path +from typing import Any, Callable, cast, Dict, List, Optional, Set, Type, Union + +import torch +import torch._inductor.test_operators +import torch.distributed +import torch.utils._content_store +from torch.utils import _config_module + +from .resume_execution import TORCH_DYNAMO_RESUME_IN_PREFIX +from .utils import getfile, hashable, NP_SUPPORTED_MODULES, unwrap_if_wrapper +from .variables import ( + BuiltinVariable, + FunctionalCallVariable, + FunctorchHigherOrderVariable, + NestedUserFunctionVariable, + PolyfilledFunctionVariable, + SkipFunctionVariable, + TorchInGraphFunctionVariable, + UserFunctionVariable, + UserMethodVariable, +) + + +np: Optional[types.ModuleType] = None +try: + import numpy as np +except ModuleNotFoundError: + pass + + +if typing.TYPE_CHECKING: + from .variables.base import VariableTracker + + +""" +A note on skip/inline rules: + +Dynamo consults this file to determine whether function should be inlined or skipped. + +A skip applies at the frame boundary, meaning dynamo either triggers a graph break +at the beginning of the frame or attempts to trace/inline the whole frame. When skipping +a frame, recursively called frames are still traced by dynamo unless also skipped. + +Skipfiles (skipped at the file level instead of function level) still apply on a +frame-by-frame boundary as dynamo traces, but apply to all functions in that file. + +@skip is a helper decorator that can be applied to your function to cause it to be +included here. + +Dynamo skip/inline rules & priorities are defined as follows: +* Inline is the default behavior and will be used unless explicitly skipped. +* Dynamo has two SKIPLIST: BUILTIN_SKIPLIST and THIRDPARTY_SKIPLIST. + * BUILTIN_SKIPLIST contains builtin python modules, such as abc, collections, etc. + * THIRDPARTY_SKIPLIST contains common third party libraries, such as numpy, pandas, etc. +* Functions in these two SKIPLISTs are always skipped, except: + * They have explicitly defined rule in `manual_torch_name_rule_map`; + * The corresponding python module has been put into MOD_INLINELIST. +* PyTorch(torch) is in the BUILTIN_SKIPLIST by default, but there are many cases + where we want inline the functions under torch namespace. + We should specify inline for the functions in `manual_torch_name_rule_map` or + put the corresponding python module into MOD_INLINELIST to make dynamo inline them. +* If you call functions under skipped modules/files, Dynamo will wrap these functions + as SkipFunctionVariable. There are a few functions(e.g, collections.OrderedDict) that + we have special handling at SkipFunctionVariable.call_function. + +Overall: *_INLINELIST has precedence over *_SKIPLIST has precedence over DEFAULT (inline) + +To figure out what the behavior is, check the following list in order: +* `manual_torch_name_rule_map` (Inline if YES) +* MOD_INLINELIST (Inline if YES) +* BUILTIN_SKIPLIST & THIRDPARTY_SKIPLIST (Skip if YES) +* Inline by default + +In general, if you want to force inline a function or module, please consider adding +the function's python module to MOD_INLINELIST first. +Use the `manual_torch_name_rule_map` only when there are other functions under the same module that +you don't want to inline them. +""" + +""" +Map of function objects to their tracing rules (Dynamo variables). +* TorchInGraphFunctionVariable: The functions should be put into the FX graph or can be constant folded. E.g., + - torch.add: should be put into the FX graph. + - torch.is_floating_point: constant folded. +* SkipFunctionVariable: The objects should be skipped from tracing. +* UserFunctionVariable: The functions should be inlined. + +For developers: If you add/remove a torch level API, it may trigger failures from +test/dynamo/test_trace_rules.py:test_torch_name_rule_map_updated. To fix the failures: +If you are adding a new torch level API or Dynamo implementation: +* Add the name with the corresponding tracing rule to this map + if you are adding a new in graph function or Dynamo implementation for an existing function. +* Remove the object name from test/dynamo/test_trace_rules.ignored_c_binding_in_graph_function_names if it's there. + +If you are removing an existing torch level API: +* Remove the entry represented the API from this map or test/dynamo/test_trace_rules.ignored_c_binding_in_graph_function_names + depends on where it is. + + +""" +manual_torch_name_rule_map = { + "torch.onnx.is_in_onnx_export": TorchInGraphFunctionVariable, + "torch.onnx.operators.shape_as_tensor": TorchInGraphFunctionVariable, + "torch.overrides.is_tensor_like": TorchInGraphFunctionVariable, + "torch.jit.is_scripting": TorchInGraphFunctionVariable, + "torch.jit.is_tracing": TorchInGraphFunctionVariable, + "torch.jit.annotate": TorchInGraphFunctionVariable, + "torch.distributed.is_available": TorchInGraphFunctionVariable, + "torch.distributed.is_initialized": TorchInGraphFunctionVariable, + "torch.distributed.get_rank": TorchInGraphFunctionVariable, + "torch.distributed.get_world_size": TorchInGraphFunctionVariable, + "torch.distributed.tensor._api.DTensor#from_local": TorchInGraphFunctionVariable, + "torch.distributed.distributed_c10d._get_group_size_by_name": TorchInGraphFunctionVariable, + "torch.distributed.distributed_c10d._resolve_group_name_by_ranks_and_tag": TorchInGraphFunctionVariable, + "torch.distributed.distributed_c10d._get_group_tag": TorchInGraphFunctionVariable, + "torch.distributed.distributed_c10d.get_process_group_ranks": TorchInGraphFunctionVariable, + "torch._utils.is_compiling": TorchInGraphFunctionVariable, + "torch.fx._symbolic_trace.is_fx_tracing": TorchInGraphFunctionVariable, + "torch._dynamo.external_utils.is_compiling": TorchInGraphFunctionVariable, + "torch.compiler.is_compiling": TorchInGraphFunctionVariable, + "torch.compiler.is_dynamo_compiling": TorchInGraphFunctionVariable, + "torch.autograd._profiler_enabled": SkipFunctionVariable, + "torch._C._to_dlpack": SkipFunctionVariable, + "torch.to_dlpack": SkipFunctionVariable, + # We graph break on RNG state setters or getters like + # `torch.get_rng_state` or `torch.set_rng_state`. These functions + # are not aten operations and therefore they are completely ignored + # by the AOT dispatcher. As a result, the AOT graph does not have + # these setter or getter functions, producing an incorrect graph + # when it comes to rng states. + "torch.default_generator#get_state": SkipFunctionVariable, + "torch._C.Generator#get_state": SkipFunctionVariable, + "torch.get_rng_state": SkipFunctionVariable, + "torch.cuda.get_rng_state": SkipFunctionVariable, + "torch.default_generator#set_state": SkipFunctionVariable, + "torch._C.Generator#set_state": SkipFunctionVariable, + "torch.set_rng_state": SkipFunctionVariable, + "torch.cuda.set_rng_state": SkipFunctionVariable, + # https://github.com/pytorch/pytorch/issues/107187 + "torch.manual_seed": SkipFunctionVariable, + # https://github.com/pytorch/pytorch/issues/93501 + "torch.nn.utils.rnn.pack_padded_sequence": SkipFunctionVariable, + "torch.nn.Parameter": TorchInGraphFunctionVariable, + "torch.nn.Buffer": TorchInGraphFunctionVariable, + "torch._nested_tensor_from_mask": SkipFunctionVariable, + "torch._nested_from_padded": SkipFunctionVariable, + "torch.nested.nested_tensor_from_jagged": UserFunctionVariable, + # symbol operators implemented in Python + "torch.sym_not": TorchInGraphFunctionVariable, + "torch.sym_float": TorchInGraphFunctionVariable, + "torch.sym_int": TorchInGraphFunctionVariable, + "torch.sym_max": TorchInGraphFunctionVariable, + "torch.sym_min": TorchInGraphFunctionVariable, + "torch.sym_sqrt": TorchInGraphFunctionVariable, + "torch.sym_ite": TorchInGraphFunctionVariable, + "torch.Tensor#_make_wrapper_subclass": SkipFunctionVariable, + "torch.Tensor#__init__": SkipFunctionVariable, + "torch.cuda.set_device": SkipFunctionVariable, + "torch.cuda.current_device": SkipFunctionVariable, + "torch._C.autocast_decrement_nesting": SkipFunctionVariable, + "torch._C.autocast_increment_nesting": SkipFunctionVariable, + "torch.autograd.grad": SkipFunctionVariable, + "torch.autograd.backward": SkipFunctionVariable, + "torch._C.clear_autocast_cache": SkipFunctionVariable, + "torch.distributions.constraints.is_dependent": SkipFunctionVariable, + "torch.jit.isinstance": SkipFunctionVariable, + "torch._C.set_anomaly_enabled": SkipFunctionVariable, + "torch._C.set_autocast_cache_enabled": SkipFunctionVariable, + "torch._C.set_autocast_cpu_dtype": SkipFunctionVariable, + "torch._C.set_autocast_cpu_enabled": SkipFunctionVariable, + "torch._C.set_autocast_enabled": SkipFunctionVariable, + "torch._C.set_autocast_gpu_dtype": SkipFunctionVariable, + "torch._C.set_autocast_ipu_dtype": SkipFunctionVariable, + "torch._C.set_autocast_ipu_enabled": SkipFunctionVariable, + "torch._C.set_autocast_xla_dtype": SkipFunctionVariable, + "torch._C.set_autocast_xla_enabled": SkipFunctionVariable, + "torch.resize_as_": SkipFunctionVariable, + "torch.resize_as_sparse_": SkipFunctionVariable, + "torch.get_default_device": TorchInGraphFunctionVariable, + # functorch/vmap + "torch._functorch.vmap._check_int_or_none": UserFunctionVariable, + "torch._functorch.vmap._check_out_dims_is_int_or_int_pytree": UserFunctionVariable, + "torch._functorch.vmap._check_randomness_arg": UserFunctionVariable, + "torch._functorch.vmap._chunked_vmap": UserFunctionVariable, + "torch._functorch.vmap._concat_chunked_outputs": UserFunctionVariable, + "torch._functorch.vmap._create_batched_inputs": UserFunctionVariable, + "torch._functorch.vmap._flat_vmap": UserFunctionVariable, + "torch._functorch.vmap._flatten_chunks_output": UserFunctionVariable, + "torch._functorch.vmap._get_chunked_inputs": UserFunctionVariable, + "torch._functorch.vmap._get_name": UserFunctionVariable, + "torch._functorch.vmap._maybe_remove_batch_dim": UserFunctionVariable, + "torch._functorch.vmap._num_outputs": UserFunctionVariable, + "torch._functorch.vmap._process_batched_inputs": UserFunctionVariable, + "torch._functorch.vmap._unwrap_batched": UserFunctionVariable, + "torch._functorch.vmap._validate_and_get_batch_size": UserFunctionVariable, + "torch._functorch.vmap.doesnt_support_saved_tensors_hooks": UserFunctionVariable, + "torch._functorch.vmap.get_chunk_sizes": UserFunctionVariable, + # lazy_load_decompositions uses a lock that is not supported yet in dynamo + # "torch._functorch.vmap.lazy_load_decompositions": UserFunctionVariable, + "torch._functorch.vmap.restore_vmap": UserFunctionVariable, + "torch._functorch.apis.vmap": UserFunctionVariable, + "torch._functorch.vmap.unwrap_batched": UserFunctionVariable, + "torch._functorch.vmap.vmap_impl": FunctorchHigherOrderVariable, + "torch._functorch.vmap.wrap_batched": UserFunctionVariable, + # functorch/grad + "torch._functorch.eager_transforms.grad_impl": FunctorchHigherOrderVariable, + "torch._functorch.apis.grad_and_value": UserFunctionVariable, + "torch._functorch.eager_transforms._as_tuple": UserFunctionVariable, + "torch._functorch.eager_transforms._check_unique_non_empty": UserFunctionVariable, + "torch._functorch.eager_transforms._create_differentiable": UserFunctionVariable, + "torch._functorch.eager_transforms._slice_argnums": UserFunctionVariable, + "torch._functorch.eager_transforms._undo_create_differentiable": UserFunctionVariable, + "torch._functorch.eager_transforms._validate_and_wrap_argnum": UserFunctionVariable, + "torch._functorch.eager_transforms._validate_and_wrap_argnums": UserFunctionVariable, + "torch._functorch.eager_transforms._wrap_all_tensors": UserFunctionVariable, + "torch._functorch.eager_transforms._wrap_tensor_for_grad": UserFunctionVariable, + # functorch/jacrev + "torch._functorch.eager_transforms.jacrev": FunctorchHigherOrderVariable, + "torch._functorch.eager_transforms.error_if_complex": UserFunctionVariable, + "torch._functorch.eager_transforms._chunked_standard_basis_for_": UserFunctionVariable, + "torch._functorch.eager_transforms._safe_zero_index": UserFunctionVariable, + # functorch/vjp + "torch._functorch.eager_transforms.vjp": FunctorchHigherOrderVariable, + "torch._functorch.eager_transforms._vjp_with_argnums": UserFunctionVariable, + "torch._functorch.eager_transforms.assert_non_empty_tensor_output": UserFunctionVariable, + # functorch/jvp + "torch._functorch.eager_transforms._jvp_with_argnums": UserFunctionVariable, + "torch._functorch.eager_transforms.jvp": FunctorchHigherOrderVariable, + "torch._functorch.eager_transforms._replace_args": UserFunctionVariable, + "torch._functorch.eager_transforms.safe_unpack_dual": UserFunctionVariable, + "torch._functorch.eager_transforms.assert_non_empty_list_of_tensors": UserFunctionVariable, + "torch._functorch.eager_transforms.assert_output_is_tensor_or_tensors": UserFunctionVariable, + "torch.autograd.forward_ad.enter_dual_level": UserFunctionVariable, + "torch.autograd.forward_ad.exit_dual_level": UserFunctionVariable, + "torch.autograd.forward_ad.make_dual": UserFunctionVariable, + "torch.autograd.forward_ad.unpack_dual": UserFunctionVariable, + # functorch/linearize + "torch._functorch.eager_transforms.linearize": FunctorchHigherOrderVariable, + # functorch/jacfwd + "torch._functorch.eager_transforms.jacfwd": FunctorchHigherOrderVariable, + "torch._functorch.eager_transforms._construct_standard_basis_for": UserFunctionVariable, + "torch._functorch.eager_transforms.safe_unflatten": UserFunctionVariable, + # functorch/hessian + "torch._functorch.eager_transforms.hessian": FunctorchHigherOrderVariable, + # functional_call + "torch._functorch.functional_call.functional_call": FunctionalCallVariable, + "torch.nn.utils.stateless._groupby_tensor": TorchInGraphFunctionVariable, + # functorch/deprecated + "torch._functorch.deprecated.jvp": UserFunctionVariable, + "torch._functorch.deprecated.hessian": UserFunctionVariable, + "torch._functorch.deprecated.jacfwd": UserFunctionVariable, + "torch._functorch.deprecated.jacrev": UserFunctionVariable, + "torch._functorch.deprecated.grad": UserFunctionVariable, + "torch._functorch.deprecated.grad_and_value": UserFunctionVariable, + "torch._functorch.deprecated.vjp": UserFunctionVariable, + # everything else + "torch._constrain_as_size": UserFunctionVariable, + "torch._tensor._convert": UserFunctionVariable, + "torch.jit._unwrap_optional": UserFunctionVariable, + "torch.backends.mha.get_fastpath_enabled": UserFunctionVariable, + "torch._C._functorch._add_batch_dim": TorchInGraphFunctionVariable, + "torch._C._functorch._remove_batch_dim": TorchInGraphFunctionVariable, + "torch._C._functorch._wrap_for_grad": TorchInGraphFunctionVariable, + "torch._C._functorch._unwrap_for_grad": TorchInGraphFunctionVariable, + "torch._C._functorch.maybe_current_level": TorchInGraphFunctionVariable, + "torch._C._functorch.is_batchedtensor": TorchInGraphFunctionVariable, + "torch._dynamo.mark_static": UserFunctionVariable, + "torch.fx.experimental.symbolic_shapes.guard_size_oblivious": TorchInGraphFunctionVariable, + "torch.cuda._get_device_properties": TorchInGraphFunctionVariable, + "torch.utils.hooks.BackwardHook": TorchInGraphFunctionVariable, + "torch.sparse_bsc_tensor": SkipFunctionVariable, + "torch.sparse_bsr_tensor": SkipFunctionVariable, + "torch.sparse_csc_tensor": SkipFunctionVariable, + "torch.sparse_csr_tensor": SkipFunctionVariable, + "torch.sparse_compressed_tensor": SkipFunctionVariable, + "torch._C._autograd._unsafe_set_version_counter": TorchInGraphFunctionVariable, + # avoid skipping user defined modules in distributed unit tests + "torch/testing/_internal/common_fsdp.py#forward": UserFunctionVariable, + f"torch/testing/_internal/common_fsdp.py#{TORCH_DYNAMO_RESUME_IN_PREFIX}": UserFunctionVariable, + "torch/testing/_internal/distributed/_tensor/common_dtensor.py#forward": UserFunctionVariable, + f"torch/testing/_internal/distributed/_tensor/common_dtensor.py#{TORCH_DYNAMO_RESUME_IN_PREFIX}": UserFunctionVariable, + "torch/testing/_internal/common_distributed.py#forward": UserFunctionVariable, + f"torch/testing/_internal/common_distributed.py#{TORCH_DYNAMO_RESUME_IN_PREFIX}": UserFunctionVariable, +} + + +# In graph functions (including constant folding) that are C bindings +torch_c_binding_in_graph_functions = dict.fromkeys( + [ + "math.acos", + "math.acosh", + "math.asin", + "math.asinh", + "math.atan", + "math.atan2", + "math.atanh", + "math.ceil", + "math.comb", + "math.copysign", + "math.cos", + "math.cosh", + "math.degrees", + "math.dist", + "math.erf", + "math.erfc", + "math.exp", + "math.expm1", + "math.fabs", + "math.factorial", + "math.floor", + "math.fmod", + "math.frexp", + "math.fsum", + "math.gamma", + "math.gcd", + "math.hypot", + "math.isclose", + "math.isfinite", + "math.isinf", + "math.isnan", + "math.isqrt", + "math.ldexp", + "math.lgamma", + "math.log", + "math.log10", + "math.log1p", + "math.log2", + "math.modf", + "math.nextafter", + "math.perm", + "math.pow", + "math.prod", + "math.radians", + "math.remainder", + "math.sin", + "math.sinh", + "math.tan", + "math.tanh", + "math.trunc", + "math.ulp", + "torch._adaptive_avg_pool2d", + "torch._adaptive_avg_pool3d", + "torch._add_batch_dim", + "torch._add_relu_", + "torch._add_relu", + "torch._addmm_activation", + "torch._aminmax", + "torch._amp_foreach_non_finite_check_and_unscale_", + "torch._amp_update_scale_", + "torch._assert_async", + "torch._assert_tensor_metadata", + "torch._batch_norm_impl_index", + "torch._C._activate_gpu_trace", + "torch._C._add_cached_tensor", + "torch._C._add_docstr", + "torch._C._are_functorch_transforms_active", + "torch._C._autograd_init", + "torch._C._awaitable_nowait", + "torch._C._awaitable_wait", + "torch._C._awaitable", + "torch._C._backport_for_mobile_from_buffer_to_buffer", + "torch._C._backport_for_mobile_from_buffer", + "torch._C._backport_for_mobile_to_buffer", + "torch._C._backport_for_mobile", + "torch._C._broadcast_coalesced", + "torch._C._broadcast_out", + "torch._C._broadcast", + "torch._C._c10d_init", + "torch._C._calculate_package_version_based_on_upgraders", + "torch._C._can_use_flash_attention", + "torch._C._can_use_mem_efficient_attention", + "torch._C._can_use_cudnn_attention", + "torch._C._check_onnx_proto", + "torch._C._check_sparse_tensor_invariants", + "torch._C._collect_all", + "torch._C._commit_update", + "torch._C._compile_graph_to_code_table", + "torch._C._construct_CUDA_Tensor_From_Storage_And_Metadata", + "torch._C._construct_storage_from_data_pointer", + "torch._C._conv_determine_backend_memory_format", + "torch._C._cpu._is_avx2_supported", + "torch._C._cpu._is_avx512_supported", + "torch._C._cpu._is_avx512_vnni_supported", + "torch._C._cpu._is_avx512_bf16_supported", + "torch._C._cpu._is_amx_tile_supported", + "torch._C._cpu._init_amx", + "torch._C._crash_if_aten_asan", + "torch._C._crash_if_csrc_asan", + "torch._C._crash_if_csrc_ubsan", + "torch._C._crash_if_debug_asserts_fail", + "torch._C._crash_if_vptr_ubsan", + "torch._C._create_function_from_graph", + "torch._C._create_function_from_trace_with_dict", + "torch._C._create_function_from_trace", + "torch._C._create_graph_by_tracing", + "torch._C._create_module_with_type", + "torch._C._create_object_with_type", + "torch._C._cuda_attach_out_of_memory_observer", + "torch._C._cuda_beginAllocateCurrentStreamToPool", + "torch._C._cuda_canDeviceAccessPeer", + "torch._C._cuda_changeCurrentAllocator", + "torch._C._cuda_checkPoolLiveAllocations", + "torch._C._cuda_clearCublasWorkspaces", + "torch._C._cuda_cudaCachingAllocator_raw_alloc", + "torch._C._cuda_cudaCachingAllocator_raw_delete", + "torch._C._cuda_cudaCachingAllocator_set_allocator_settings", + "torch._C._cuda_cudaHostAllocator", + "torch._C._cuda_customAllocator", + "torch._C._cuda_emptyCache", + "torch._C._cuda_endAllocateCurrentStreamToPool", + "torch._C._cuda_exchangeDevice", + "torch._C._cuda_get_conv_benchmark_empty_cache", + "torch._C._cuda_get_cudnn_benchmark_limit", + "torch._C._cuda_get_sync_debug_mode", + "torch._C._cuda_getAllocator", + "torch._C._cuda_getAllocatorBackend", + "torch._C._cuda_getArchFlags", + "torch._C._cuda_getCheckpointState", + "torch._C._cuda_getCompiledVersion", + "torch._C._cuda_getCurrentBlasHandle", + "torch._C._cuda_getCurrentRawStream", + "torch._C._cuda_getCurrentStream", + "torch._C._cuda_getDefaultStream", + "torch._C._cuda_getDevice", + "torch._C._cuda_getDeviceCount", + "torch._C._cuda_hasPrimaryContext", + "torch._C._cuda_init", + "torch._C._cuda_ipc_collect", + "torch._C._cuda_isCurrentStreamCapturing", + "torch._C._cuda_isHistoryEnabled", + "torch._C._cuda_isInBadFork", + "torch._C._cuda_jiterator_compile_and_launch_kernel", + "torch._C._cuda_lock_mutex", + "torch._C._cuda_maybeExchangeDevice", + "torch._C._cuda_memorySnapshot", + "torch._C._cuda_memoryStats", + "torch._C._cuda_record_memory_history_legacy", + "torch._C._cuda_record_memory_history", + "torch._C._cuda_releasePool", + "torch._C._cuda_resetAccumulatedMemoryStats", + "torch._C._cuda_resetPeakMemoryStats", + "torch._C._cuda_set_cudnn_benchmark_limit", + "torch._C._cuda_set_sync_debug_mode", + "torch._C._cuda_setCheckpointPoolState", + "torch._C._cuda_setDevice", + "torch._C._cuda_setMemoryFraction", + "torch._C._cuda_setStream", + "torch._C._cuda_sleep", + "torch._C._cuda_synchronize", + "torch._C._cuda_unlock_mutex", + "torch._C._cudnn_set_conv_benchmark_empty_cache", + "torch._C._cudnn.getCompileVersion", + "torch._C._cudnn.getRuntimeVersion", + "torch._C._cudnn.getVersionInt", + "torch._C._current_autograd_node", + "torch._C._current_graph_task_execution_order", + "torch._C._current_graph_task_id", + "torch._C._cxx_flags", + "torch._C._debug_get_fusion_group_inlining", + "torch._C._debug_only_are_vmap_fallback_warnings_enabled", + "torch._C._debug_only_display_vmap_fallback_warnings", + "torch._C._debug_set_autodiff_subgraph_inlining", + "torch._C._debug_set_fusion_group_inlining", + "torch._C._demangle", + "torch._C._disabled_torch_dispatch_impl", + "torch._C._disabled_torch_function_impl", + "torch._C._dispatch_call_boxed", + "torch._C._dispatch_check_all_invariants", + "torch._C._dispatch_check_invariants", + "torch._C._dispatch_dump_table", + "torch._C._dispatch_dump", + "torch._C._dispatch_find_dangling_impls", + "torch._C._dispatch_find_schema_or_throw", + "torch._C._dispatch_get_all_op_names", + "torch._C._dispatch_get_backend_keyset_from_autograd", + "torch._C._dispatch_get_registrations_for_dispatch_key", + "torch._C._dispatch_has_backend_fallback", + "torch._C._dispatch_has_computed_kernel_for_dispatch_key", + "torch._C._dispatch_has_kernel_for_any_dispatch_key", + "torch._C._dispatch_has_kernel_for_dispatch_key", + "torch._C._dispatch_has_kernel", + "torch._C._dispatch_is_alias_key", + "torch._C._dispatch_is_included_in_alias", + "torch._C._dispatch_is_main_interpreter", + "torch._C._dispatch_isTensorSubclassLike", + "torch._C._dispatch_key_for_device", + "torch._C._dispatch_key_name", + "torch._C._dispatch_key_parse", + "torch._C._dispatch_key_set", + "torch._C._dispatch_keys", + "torch._C._dispatch_keyset_full_after", + "torch._C._dispatch_keyset_full", + "torch._C._dispatch_keyset_to_string", + "torch._C._dispatch_library", + "torch._C._dispatch_num_backends", + "torch._C._dispatch_print_registrations_for_dispatch_key", + "torch._C._dispatch_pystub", + "torch._C._dispatch_set_report_error_callback", + "torch._C._dispatch_tls_is_dispatch_key_excluded", + "torch._C._dispatch_tls_is_dispatch_key_included", + "torch._C._dispatch_tls_local_exclude_set", + "torch._C._dispatch_tls_local_include_set", + "torch._C._dispatch_tls_set_dispatch_key_excluded", + "torch._C._dispatch_tls_set_dispatch_key_included", + "torch._C._dist_autograd_init", + "torch._C._dump_local_tls_set", + "torch._C._dump_upgraders_map", + "torch._C._enable_mobile_interface_call_export", + "torch._C._enter_dual_level", + "torch._C._error_if_any_worker_fails", + "torch._C._exit_dual_level", + "torch._C._export_operator_list", + "torch._C._export_opnames", + "torch._C._faulty_agent_init", + "torch._C._fft.fft_fft", + "torch._C._fft.fft_fft2", + "torch._C._fft.fft_fftfreq", + "torch._C._fft.fft_fftn", + "torch._C._fft.fft_fftshift", + "torch._C._fft.fft_hfft", + "torch._C._fft.fft_hfft2", + "torch._C._fft.fft_hfftn", + "torch._C._fft.fft_ifft", + "torch._C._fft.fft_ifft2", + "torch._C._fft.fft_ifftn", + "torch._C._fft.fft_ifftshift", + "torch._C._fft.fft_ihfft", + "torch._C._fft.fft_ihfft2", + "torch._C._fft.fft_ihfftn", + "torch._C._fft.fft_irfft", + "torch._C._fft.fft_irfft2", + "torch._C._fft.fft_irfftn", + "torch._C._fft.fft_rfft", + "torch._C._fft.fft_rfft2", + "torch._C._fft.fft_rfftfreq", + "torch._C._fft.fft_rfftn", + "torch._C._free_And_Remove_DeleterFn", + "torch._C._freeze_module", + "torch._C._from_dlpack", + "torch._C._functionality_to_backend_keys", + "torch._C._functionalization_reapply_views_tls", + "torch._C._fuse_to_static_module", + "torch._C._gather_out", + "torch._C._gather", + "torch._C._generate_upgraders_graph", + "torch._C._get_autograd_fallback_mode", + "torch._C._get_backcompat_broadcast_warn", + "torch._C._get_backcompat_keepdim_warn", + "torch._C._get_blas_preferred_backend", + "torch._C._get_caught_jit_exception_class_name", + "torch._C._get_caught_jit_exception_original_msg", + "torch._C._get_constant_bool_symnode", + "torch._C._get_cpp_backtrace", + "torch._C._get_cpu_capability", + "torch._C._get_cublas_allow_bf16_reduced_precision_reduction", + "torch._C._get_cublas_allow_fp16_reduced_precision_reduction", + "torch._C._get_cublas_allow_tf32", + "torch._C._get_cudnn_allow_tf32", + "torch._C._get_cudnn_benchmark", + "torch._C._get_cudnn_deterministic", + "torch._C._get_cudnn_enabled", + "torch._C._get_custom_class_python_wrapper", + "torch._C._get_default_device", + "torch._C._get_deterministic_algorithms_warn_only", + "torch._C._get_deterministic_algorithms", + "torch._C._get_deterministic_fill_uninitialized_memory", + "torch._C._get_dispatch_mode", + "torch._C._get_dispatch_stack_at", + "torch._C._get_file_format", + "torch._C._get_flash_sdp_enabled", + "torch._C._get_float32_matmul_precision", + "torch._C._get_function_stack_at", + "torch._C._get_graph_executor_optimize", + "torch._C._get_linalg_preferred_backend", + "torch._C._get_math_sdp_enabled", + "torch._C._get_math_sdp_allow_fp16_bf16_reduction", + "torch._C._get_max_operator_version", + "torch._C._get_mem_efficient_sdp_enabled", + "torch._C._get_mkldnn_enabled", + "torch._C._get_cudnn_sdp_enabled", + "torch._C._set_sdp_use_cudnn", + "torch._C._get_mobile_model_contained_types_from_buffer", + "torch._C._get_mobile_model_contained_types", + "torch._C._get_model_bytecode_version_from_buffer", + "torch._C._get_model_bytecode_version", + "torch._C._get_model_extra_files_from_buffer", + "torch._C._get_model_extra_files", + "torch._C._get_model_ops_and_info_from_buffer", + "torch._C._get_model_ops_and_info", + "torch._C._get_module_info_from_flatbuffer", + "torch._C._get_nnpack_enabled", + "torch._C._get_obj_in_tls", + "torch._C._get_operation_overload", + "torch._C._get_operator_version_map", + "torch._C._get_privateuse1_backend_name", + "torch._C._get_qengine", + "torch._C._get_schema", + "torch._C._get_nested_int", + "torch._C._get_tensor_metadata", + "torch._C._get_tracing_state", + "torch._C._get_upgrader_ranges", + "torch._C._get_upgraders_entry_map", + "torch._C._get_upgraders_map_size", + "torch._C._get_value_trace", + "torch._C._get_version_calculator_flag", + "torch._C._get_warnAlways", + "torch._C._graph_pool_handle", + "torch._C._group_tensors_by_device_and_dtype", + "torch._C._hack_do_not_use_clone_module_with_class", + "torch._C._has_distributed", + "torch._C._has_Standard_Deleter", + "torch._C._has_storage", + "torch._C._has_tensorexpr_cpp_tests", + "torch._C._run_tensorexpr_cpp_tests", + "torch._C._has_torch_function_unary", + "torch._C._has_torch_function_variadic", + "torch._C._has_torch_function", + "torch._C._import_ir_module_from_package", + "torch._C._increment_version", + "torch._C._infer_size", + "torch._C._init_names", + "torch._C._initExtension", + "torch._C._is_alias_of", + "torch._C._is_any_autocast_enabled", + "torch._C._is_cached_tensor", + "torch._C._is_flash_attention_available", + "torch._C._is_fwd_grad_enabled", + "torch._C._is_key_in_tls", + "torch._C._is_multithreading_enabled", + "torch._C._is_torch_function_enabled", + "torch._C._is_torch_function_mode_enabled", + "torch._C._is_tracing", + "torch._C._is_view_replay_enabled", + "torch._C._is_xnnpack_enabled", + "torch._C._itt.is_available", + "torch._C._itt.mark", + "torch._C._itt.rangePop", + "torch._C._itt.rangePush", + "torch._C._ivalue_debug_python_object", + "torch._C._ivalue_tags_match", + "torch._C._jit_assert_is_instance", + "torch._C._jit_can_fuse_on_cpu_legacy", + "torch._C._jit_can_fuse_on_cpu", + "torch._C._jit_can_fuse_on_gpu", + "torch._C._jit_cat_wo_conditionals", + "torch._C._jit_check_alias_annotation", + "torch._C._jit_clear_class_registry", + "torch._C._jit_debug_fuser_num_cached_kernel_specs", + "torch._C._jit_debug_module_iterators", + "torch._C._jit_decay_packed_param_input_types", + "torch._C._jit_decomposition_graph_for_node", + "torch._C._jit_differentiate", + "torch._C._jit_erase_non_input_shape_information", + "torch._C._jit_flatten", + "torch._C._jit_fuser_get_fused_kernel_code", + "torch._C._jit_get_all_schemas", + "torch._C._jit_get_custom_class_schemas", + "torch._C._jit_get_emit_hooks", + "torch._C._jit_get_inline_everything_mode", + "torch._C._jit_get_logging_option", + "torch._C._jit_get_num_profiled_runs", + "torch._C._jit_get_operation", + "torch._C._jit_get_schemas_for_operator", + "torch._C._jit_get_te_cuda_pointwise_block_count", + "torch._C._jit_get_te_cuda_pointwise_block_size", + "torch._C._jit_get_te_cuda_pointwise_loop_levels", + "torch._C._jit_get_te_generate_block_code", + "torch._C._jit_get_te_must_use_llvm_cpu", + "torch._C._jit_get_tracer_state_warn", + "torch._C._jit_has_cpp_tests", + "torch._C._jit_init", + "torch._C._jit_interpret_graph", + "torch._C._jit_is_onnx_log_enabled", + "torch._C._jit_is_script_object", + "torch._C._jit_llga_enabled", + "torch._C._jit_nvfuser_can_be_enabled", + "torch._C._jit_nvfuser_clear_comparison_callback", + "torch._C._jit_nvfuser_enabled", + "torch._C._jit_nvfuser_horizontal_mode", + "torch._C._jit_nvfuser_set_comparison_callback", + "torch._C._jit_nvfuser_single_node_mode", + "torch._C._jit_object_is_non_holding", + "torch._C._jit_onnx_convert_pattern_from_subblock", + "torch._C._jit_onnx_create_full_scope_name", + "torch._C._jit_onnx_list_model_parameters", + "torch._C._jit_onnx_log", + "torch._C._jit_opt_conditionals", + "torch._C._jit_override_can_fuse_on_cpu_legacy", + "torch._C._jit_override_can_fuse_on_cpu", + "torch._C._jit_override_can_fuse_on_gpu", + "torch._C._jit_pass_autocast", + "torch._C._jit_pass_batch_mm", + "torch._C._jit_pass_canonicalize_graph_fuser_ops", + "torch._C._jit_pass_canonicalize", + "torch._C._jit_pass_complete_shape_analysis", + "torch._C._jit_pass_concat_frozen_linear", + "torch._C._jit_pass_constant_loop_unrolling", + "torch._C._jit_pass_constant_pooling", + "torch._C._jit_pass_constant_propagation_immutable_types", + "torch._C._jit_pass_constant_propagation", + "torch._C._jit_pass_convert_frozen_ops_to_mkldnn", + "torch._C._jit_pass_create_autodiff_subgraphs", + "torch._C._jit_pass_create_functional_graphs", + "torch._C._jit_pass_cse", + "torch._C._jit_pass_custom_pattern_based_rewrite_graph", + "torch._C._jit_pass_custom_pattern_based_rewrite", + "torch._C._jit_pass_dbr_quant_remove_redundant_aliases", + "torch._C._jit_pass_dce_allow_deleting_nodes_with_side_effects", + "torch._C._jit_pass_dce", + "torch._C._jit_pass_decompose_ops", + "torch._C._jit_pass_dedup_module_uses", + "torch._C._jit_pass_erase_number_types", + "torch._C._jit_pass_erase_shape_information", + "torch._C._jit_pass_filter_non_tensor_arguments", + "torch._C._jit_pass_fixup_onnx_controlflow_node", + "torch._C._jit_pass_fold_convbn", + "torch._C._jit_pass_fold_frozen_conv_add_or_sub", + "torch._C._jit_pass_fold_frozen_conv_bn", + "torch._C._jit_pass_fold_frozen_conv_mul_or_div", + "torch._C._jit_pass_fold_frozen_linear_bn", + "torch._C._jit_pass_fold_prepacking_ops", + "torch._C._jit_pass_functional_to_inplace_activation", + "torch._C._jit_pass_fuse_add_relu", + "torch._C._jit_pass_fuse_addmm", + "torch._C._jit_pass_fuse_clamp_w_prepacked_linear_conv", + "torch._C._jit_pass_fuse_frozen_conv_add_relu", + "torch._C._jit_pass_fuse_linear", + "torch._C._jit_pass_fuse_quantized_add_relu", + "torch._C._jit_pass_fuse_tensorexprs", + "torch._C._jit_pass_fuse", + "torch._C._jit_pass_inline_fork_wait", + "torch._C._jit_pass_inline_functional_graphs", + "torch._C._jit_pass_inline", + "torch._C._jit_pass_inplace_to_functional_activation", + "torch._C._jit_pass_insert_observer_method_for_ondevice_ptq", + "torch._C._jit_pass_insert_observers", + "torch._C._jit_pass_insert_prepack_unpack", + "torch._C._jit_pass_insert_prepacked_ops", + "torch._C._jit_pass_insert_quant_dequant_for_ondevice_ptq", + "torch._C._jit_pass_insert_quant_dequant", + "torch._C._jit_pass_integer_value_refinement", + "torch._C._jit_pass_lint", + "torch._C._jit_pass_loop_unrolling", + "torch._C._jit_pass_lower_all_tuples", + "torch._C._jit_pass_lower_graph", + "torch._C._jit_pass_metal_fold_prepacking_ops", + "torch._C._jit_pass_metal_fuse_clamp_w_prepacked_conv", + "torch._C._jit_pass_metal_insert_prepacked_ops", + "torch._C._jit_pass_metal_optimize_for_mobile", + "torch._C._jit_pass_onnx_assign_output_shape", + "torch._C._jit_pass_onnx_assign_scoped_names_for_node_and_value", + "torch._C._jit_pass_onnx_autograd_function_process", + "torch._C._jit_pass_onnx_block", + "torch._C._jit_pass_onnx_cast_all_constant_to_floating", + "torch._C._jit_pass_onnx_clear_scope_records", + "torch._C._jit_pass_onnx_constant_fold", + "torch._C._jit_pass_onnx_deduplicate_initializers", + "torch._C._jit_pass_onnx_eliminate_unused_items", + "torch._C._jit_pass_onnx_eval_peephole", + "torch._C._jit_pass_onnx_function_extraction", + "torch._C._jit_pass_onnx_function_substitution", + "torch._C._jit_pass_onnx_graph_shape_type_inference", + "torch._C._jit_pass_onnx_lint", + "torch._C._jit_pass_onnx_node_shape_type_inference", + "torch._C._jit_pass_onnx_peephole", + "torch._C._jit_pass_onnx_preprocess_caffe2", + "torch._C._jit_pass_onnx_preprocess", + "torch._C._jit_pass_onnx_quantization_insert_permutes", + "torch._C._jit_pass_onnx_remove_inplace_ops_for_onnx", + "torch._C._jit_pass_onnx_remove_print", + "torch._C._jit_pass_onnx_scalar_type_analysis", + "torch._C._jit_pass_onnx_set_dynamic_input_shape", + "torch._C._jit_pass_onnx_track_scope_attributes", + "torch._C._jit_pass_onnx_unpack_quantized_weights", + "torch._C._jit_pass_onnx", + "torch._C._jit_pass_optimize_for_inference", + "torch._C._jit_pass_optimize_for_mobile", + "torch._C._jit_pass_optimize_frozen_graph", + "torch._C._jit_pass_pattern_based_rewrite", + "torch._C._jit_pass_peephole_list_idioms", + "torch._C._jit_pass_peephole", + "torch._C._jit_pass_prepare_division_for_onnx", + "torch._C._jit_pass_propagate_device", + "torch._C._jit_pass_propagate_dtype", + "torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute", + "torch._C._jit_pass_propagate_shapes_on_graph", + "torch._C._jit_pass_quant_finalize_for_ondevice_ptq", + "torch._C._jit_pass_quant_finalize", + "torch._C._jit_pass_quant_fusion", + "torch._C._jit_pass_refine_integer_values", + "torch._C._jit_pass_refine_tuple_types", + "torch._C._jit_pass_remove_dropout", + "torch._C._jit_pass_remove_expands", + "torch._C._jit_pass_remove_inplace_ops", + "torch._C._jit_pass_remove_mutation", + "torch._C._jit_pass_replace_old_ops_with_upgraders", + "torch._C._jit_pass_replicate_dequantize", + "torch._C._jit_pass_run_decompositions", + "torch._C._jit_pass_specialize_autogradzero", + "torch._C._jit_pass_swap_functional_linear", + "torch._C._jit_pass_transform_conv1d_to_conv2d", + "torch._C._jit_pass_transpose_frozen_linear", + "torch._C._jit_pass_vulkan_fold_prepacking_ops", + "torch._C._jit_pass_vulkan_fuse_clamp_w_prepacked_conv", + "torch._C._jit_pass_vulkan_insert_prepacked_ops", + "torch._C._jit_pass_vulkan_optimize_for_mobile", + "torch._C._jit_register_decomposition_for_schema", + "torch._C._jit_register_shape_compute_graph_for_node", + "torch._C._jit_resolve_packet", + "torch._C._jit_run_cpp_tests", + "torch._C._jit_script_class_compile", + "torch._C._jit_script_compile_overload", + "torch._C._jit_script_compile", + "torch._C._jit_script_interface_compile", + "torch._C._jit_set_autocast_mode", + "torch._C._jit_set_bailout_depth", + "torch._C._jit_set_emit_hooks", + "torch._C._jit_set_fusion_strategy", + "torch._C._jit_set_inline_everything_mode", + "torch._C._jit_set_llga_enabled", + "torch._C._jit_set_logging_option", + "torch._C._jit_set_logging_stream", + "torch._C._jit_set_num_profiled_runs", + "torch._C._jit_set_nvfuser_enabled", + "torch._C._jit_set_nvfuser_guard_mode", + "torch._C._jit_set_nvfuser_horizontal_mode", + "torch._C._jit_set_nvfuser_single_node_mode", + "torch._C._jit_set_nvfuser_skip_node_kind", + "torch._C._jit_set_onnx_log_enabled", + "torch._C._jit_set_onnx_log_output_stream", + "torch._C._jit_set_profiling_executor", + "torch._C._jit_set_profiling_mode", + "torch._C._jit_set_symbolic_shapes_test_mode", + "torch._C._jit_set_te_cuda_pointwise_block_count", + "torch._C._jit_set_te_cuda_pointwise_block_size", + "torch._C._jit_set_te_cuda_pointwise_loop_levels", + "torch._C._jit_set_te_generate_block_code", + "torch._C._jit_set_te_must_use_llvm_cpu", + "torch._C._jit_set_texpr_dynamic_shape_enabled", + "torch._C._jit_set_texpr_fuser_enabled", + "torch._C._jit_set_texpr_reductions_enabled", + "torch._C._jit_set_tracer_state_warn", + "torch._C._jit_set_utf8_decoding_ignore", + "torch._C._jit_shape_compute_graph_for_node", + "torch._C._jit_symbolic_shapes_test_mode_enabled", + "torch._C._jit_texpr_dynamic_shape_enabled", + "torch._C._jit_texpr_fallback_allowed", + "torch._C._jit_texpr_fuser_enabled", + "torch._C._jit_texpr_reductions_enabled", + "torch._C._jit_texpr_set_fallback_allowed", + "torch._C._jit_to_backend_selective", + "torch._C._jit_to_backend", + "torch._C._jit_to_static_module", + "torch._C._jit_trace_graph", + "torch._C._jit_trace_module", + "torch._C._jit_tree_views.FalseLiteral", + "torch._C._jit_tree_views.NoneLiteral", + "torch._C._jit_tree_views.TrueLiteral", + "torch._C._jit_try_infer_type", + "torch._C._jit_unflatten", + "torch._C._last_executed_optimized_graph", + "torch._C._len_torch_dispatch_stack", + "torch._C._len_torch_function_stack", + "torch._C._linalg._linalg_eigvals", + "torch._C._linalg.linalg_cholesky_ex", + "torch._C._linalg.linalg_cholesky", + "torch._C._linalg.linalg_cond", + "torch._C._linalg.linalg_cross", + "torch._C._linalg.linalg_det", + "torch._C._linalg.linalg_diagonal", + "torch._C._linalg.linalg_eig", + "torch._C._linalg.linalg_eigh", + "torch._C._linalg.linalg_eigvals", + "torch._C._linalg.linalg_eigvalsh", + "torch._C._linalg.linalg_householder_product", + "torch._C._linalg.linalg_inv_ex", + "torch._C._linalg.linalg_inv", + "torch._C._linalg.linalg_ldl_factor_ex", + "torch._C._linalg.linalg_ldl_factor", + "torch._C._linalg.linalg_ldl_solve", + "torch._C._linalg.linalg_lstsq", + "torch._C._linalg.linalg_lu_factor_ex", + "torch._C._linalg.linalg_lu_factor", + "torch._C._linalg.linalg_lu_solve", + "torch._C._linalg.linalg_lu", + "torch._C._linalg.linalg_matmul", + "torch._C._linalg.linalg_matrix_exp", + "torch._C._linalg.linalg_matrix_norm", + "torch._C._linalg.linalg_matrix_power", + "torch._C._linalg.linalg_matrix_rank", + "torch._C._linalg.linalg_multi_dot", + "torch._C._linalg.linalg_norm", + "torch._C._linalg.linalg_pinv", + "torch._C._linalg.linalg_qr", + "torch._C._linalg.linalg_slogdet", + "torch._C._linalg.linalg_solve_ex", + "torch._C._linalg.linalg_solve_triangular", + "torch._C._linalg.linalg_solve", + "torch._C._linalg.linalg_svd", + "torch._C._linalg.linalg_svdvals", + "torch._C._linalg.linalg_tensorinv", + "torch._C._linalg.linalg_tensorsolve", + "torch._C._linalg.linalg_vander", + "torch._C._linalg.linalg_vecdot", + "torch._C._linalg.linalg_vector_norm", + "torch._C._llvm_enabled", + "torch._C._load_for_lite_interpreter_from_buffer", + "torch._C._load_for_lite_interpreter", + "torch._C._load_jit_module_from_bytes", + "torch._C._load_jit_module_from_file", + "torch._C._load_mobile_module_from_bytes", + "torch._C._load_mobile_module_from_file", + "torch._C._log_api_usage_metadata", + "torch._C._log_api_usage_once", + "torch._C._logging_set_logger", + "torch._C._meta_in_tls_dispatch_include", + "torch._C._mps_acquireEvent", + "torch._C._mps_currentAllocatedMemory", + "torch._C._mps_deviceSynchronize", + "torch._C._mps_driverAllocatedMemory", + "torch._C._mps_recommendedMaxMemory", + "torch._C._mps_elapsedTimeOfEvents", + "torch._C._mps_emptyCache", + "torch._C._mps_get_default_generator", + "torch._C._mps_is_available", + "torch._C._mps_is_in_bad_fork", + "torch._C._mps_is_on_macos_13_or_newer", + "torch._C._mps_profilerStartTrace", + "torch._C._mps_profilerStopTrace", + "torch._C._mps_queryEvent", + "torch._C._mps_recordEvent", + "torch._C._mps_releaseEvent", + "torch._C._mps_setMemoryFraction", + "torch._C._mps_synchronizeEvent", + "torch._C._mps_waitForEvent", + "torch._C._multiprocessing_init", + "torch._C._nccl_all_gather", + "torch._C._nccl_all_reduce", + "torch._C._nccl_broadcast", + "torch._C._nccl_init_rank", + "torch._C._nccl_reduce_scatter", + "torch._C._nccl_reduce", + "torch._C._nccl_unique_id", + "torch._C._nccl_version_suffix", + "torch._C._nccl_version", + "torch._C._nested.nested_tensor", + "torch._C._nested.nested_to_padded_tensor", + "torch._C._new_symbolic_shape_symbol", + "torch._C._nn_module_to_mobile", + "torch._C._nn._conv_depthwise2d", + "torch._C._nn._pad_circular", + "torch._C._nn._pad_enum", + "torch._C._nn._parse_to", + "torch._C._nn._test_ambiguous_defaults", + "torch._C._nn._test_optional_filled_intlist", + "torch._C._nn._test_optional_floatlist", + "torch._C._nn._test_optional_intlist", + "torch._C._nn._test_string_default", + "torch._C._nn._test_warn_in_autograd", + "torch._C._nn._upsample_bicubic2d_aa", + "torch._C._nn._upsample_bilinear2d_aa", + "torch._C._nn._upsample_nearest_exact1d", + "torch._C._nn._upsample_nearest_exact2d", + "torch._C._nn._upsample_nearest_exact3d", + "torch._C._nn.adaptive_avg_pool2d", + "torch._C._nn.adaptive_avg_pool3d", + "torch._C._nn.adaptive_max_pool2d", + "torch._C._nn.adaptive_max_pool3d", + "torch._C._nn.avg_pool2d", + "torch._C._nn.avg_pool3d", + "torch._C._nn.binary_cross_entropy", + "torch._C._nn.col2im", + "torch._C._nn.conv_depthwise3d", + "torch._C._nn.cross_entropy_loss", + "torch._C._nn.elu_", + "torch._C._nn.elu", + "torch._C._nn.flatten_dense_tensors", + "torch._C._nn.fractional_max_pool2d", + "torch._C._nn.fractional_max_pool3d", + "torch._C._nn.gelu_", + "torch._C._nn.gelu", + "torch._C._nn.glu", + "torch._C._nn.hardsigmoid_", + "torch._C._nn.hardsigmoid", + "torch._C._nn.hardswish_", + "torch._C._nn.hardswish", + "torch._C._nn.hardtanh_", + "torch._C._nn.hardtanh", + "torch._C._nn.huber_loss", + "torch._C._nn.im2col", + "torch._C._nn.l1_loss", + "torch._C._nn.leaky_relu_", + "torch._C._nn.leaky_relu", + "torch._C._nn.linear", + "torch._C._nn.log_sigmoid", + "torch._C._nn.max_pool2d_with_indices", + "torch._C._nn.max_pool3d_with_indices", + "torch._C._nn.max_unpool2d", + "torch._C._nn.max_unpool3d", + "torch._C._nn.mish_", + "torch._C._nn.mish", + "torch._C._nn.mkldnn_linear", + "torch._C._nn.mkldnn_reorder_conv2d_weight", + "torch._C._nn.mkldnn_reorder_conv3d_weight", + "torch._C._nn.mse_loss", + "torch._C._nn.multi_margin_loss", + "torch._C._nn.multilabel_margin_loss", + "torch._C._nn.nll_loss_nd", + "torch._C._nn.nll_loss", + "torch._C._nn.nll_loss2d", + "torch._C._nn.one_hot", + "torch._C._nn.pad_sequence", + "torch._C._nn.pad", + "torch._C._nn.reflection_pad1d", + "torch._C._nn.reflection_pad2d", + "torch._C._nn.reflection_pad3d", + "torch._C._nn.relu6_", + "torch._C._nn.relu6", + "torch._C._nn.replication_pad1d", + "torch._C._nn.replication_pad2d", + "torch._C._nn.replication_pad3d", + "torch._C._nn.rrelu_with_noise_", + "torch._C._nn.rrelu_with_noise", + "torch._C._nn.scaled_dot_product_attention", + "torch._C._nn.silu_", + "torch._C._nn.silu", + "torch._C._nn.slow_conv_dilated2d", + "torch._C._nn.slow_conv_dilated3d", + "torch._C._nn.slow_conv_transpose2d", + "torch._C._nn.slow_conv_transpose3d", + "torch._C._nn.slow_conv3d", + "torch._C._nn.smooth_l1_loss", + "torch._C._nn.soft_margin_loss", + "torch._C._nn.softplus", + "torch._C._nn.softshrink", + "torch._C._nn.thnn_conv2d", + "torch._C._nn.unflatten_dense_tensors", + "torch._C._nn.upsample_bicubic2d", + "torch._C._nn.upsample_bilinear2d", + "torch._C._nn.upsample_linear1d", + "torch._C._nn.upsample_nearest1d", + "torch._C._nn.upsample_nearest2d", + "torch._C._nn.upsample_nearest3d", + "torch._C._nn.upsample_trilinear3d", + "torch._C._non_sym_sizes", + "torch._C._overlaps", + "torch._C._parallel_info", + "torch._C._parse_dispatch_key", + "torch._C._parse_source_def", + "torch._C._pop_torch_dispatch_stack", + "torch._C._pop_torch_function_stack", + "torch._C._propagate_and_assign_input_shapes", + "torch._C._propagate_shapes", + "torch._C._propagate_xla_data", + "torch._C._push_on_torch_dispatch_stack", + "torch._C._push_on_torch_function_stack", + "torch._C._quantize_ondevice_ptq_dynamic", + "torch._C._register_py_class_for_device", + "torch._C._remove_cached_tensor", + "torch._C._remove_worker_pids", + "torch._C._rename_privateuse1_backend", + "torch._C._replace_", + "torch._C._replace_overloaded_method_decl", + "torch._C._resolve_type_from_object", + "torch._C._resolve_type", + "torch._C._rocm_is_backward_pass", + "torch._C._rpc_init", + "torch._C._run_emit_module_hook", + "torch._C._save_jit_module_to_bytes", + "torch._C._save_jit_module", + "torch._C._save_mobile_module_to_bytes", + "torch._C._save_mobile_module", + "torch._C._save_parameters", + "torch._C._scatter_out", + "torch._C._scatter", + "torch._C._select_conv_backend", + "torch._C._select_batch_norm_backend", + "torch._C._set_autograd_fallback_mode", + "torch._C._set_backcompat_broadcast_warn", + "torch._C._set_backcompat_keepdim_warn", + "torch._C._set_blas_preferred_backend", + "torch._C._set_cached_tensors_enabled", + "torch._C._set_check_sparse_tensor_invariants", + "torch._C._set_conj", + "torch._C._set_cublas_allow_bf16_reduced_precision_reduction", + "torch._C._set_cublas_allow_fp16_reduced_precision_reduction", + "torch._C._set_cublas_allow_tf32", + "torch._C._set_cudnn_allow_tf32", + "torch._C._set_cudnn_benchmark", + "torch._C._set_cudnn_deterministic", + "torch._C._set_cudnn_enabled", + "torch._C._set_default_dtype", + "torch._C._set_default_mobile_cpu_allocator", + "torch._C._set_default_tensor_type", + "torch._C._set_deterministic_algorithms", + "torch._C._set_deterministic_fill_uninitialized_memory", + "torch._C._set_dispatch_mode", + "torch._C._set_float32_matmul_precision", + "torch._C._set_fwd_grad_enabled", + "torch._C._set_grad_enabled", + "torch._C._set_graph_executor_optimize", + "torch._C._set_linalg_preferred_backend", + "torch._C._set_meta_in_tls_dispatch_include", + "torch._C._set_mkldnn_enabled", + "torch._C._set_multithreading_enabled", + "torch._C._set_neg", + "torch._C._set_nnpack_enabled", + "torch._C._set_print_stack_traces_on_fatal_signal", + "torch._C._set_qengine", + "torch._C._set_sdp_use_flash", + "torch._C._set_sdp_use_math", + "torch._C._set_math_sdp_allow_fp16_bf16_reduction", + "torch._C._set_sdp_use_mem_efficient", + "torch._C._set_should_use_format_with_string_table", + "torch._C._set_storage_access_error_msg", + "torch._C._set_tensor_metadata", + "torch._C._set_tracing_state", + "torch._C._set_value_trace", + "torch._C._set_view_replay_enabled", + "torch._C._set_warnAlways", + "torch._C._set_worker_pids", + "torch._C._set_worker_signal_handlers", + "torch._C._should_allow_numbers_as_tensors", + "torch._C._show_config", + "torch._C._sparse._sparse_addmm", + "torch._C._sparse._sparse_log_softmax", + "torch._C._sparse._sparse_mm_reduce_impl", + "torch._C._sparse._sparse_mm", + "torch._C._sparse._sparse_softmax", + "torch._C._sparse._spdiags", + "torch._C._sparse.sparse_sampled_addmm", + "torch._C._special.special_airy_ai", + "torch._C._special.special_bessel_j0", + "torch._C._special.special_bessel_j1", + "torch._C._special.special_bessel_y0", + "torch._C._special.special_bessel_y1", + "torch._C._special.special_chebyshev_polynomial_t", + "torch._C._special.special_chebyshev_polynomial_u", + "torch._C._special.special_chebyshev_polynomial_v", + "torch._C._special.special_chebyshev_polynomial_w", + "torch._C._special.special_digamma", + "torch._C._special.special_entr", + "torch._C._special.special_erf", + "torch._C._special.special_erfc", + "torch._C._special.special_erfcx", + "torch._C._special.special_erfinv", + "torch._C._special.special_exp2", + "torch._C._special.special_expit", + "torch._C._special.special_expm1", + "torch._C._special.special_gammainc", + "torch._C._special.special_gammaincc", + "torch._C._special.special_gammaln", + "torch._C._special.special_hermite_polynomial_h", + "torch._C._special.special_hermite_polynomial_he", + "torch._C._special.special_i0", + "torch._C._special.special_i0e", + "torch._C._special.special_i1", + "torch._C._special.special_i1e", + "torch._C._special.special_laguerre_polynomial_l", + "torch._C._special.special_legendre_polynomial_p", + "torch._C._special.special_log_ndtr", + "torch._C._special.special_log_softmax", + "torch._C._special.special_log1p", + "torch._C._special.special_logit", + "torch._C._special.special_logsumexp", + "torch._C._special.special_modified_bessel_i0", + "torch._C._special.special_modified_bessel_i1", + "torch._C._special.special_modified_bessel_k0", + "torch._C._special.special_modified_bessel_k1", + "torch._C._special.special_multigammaln", + "torch._C._special.special_ndtr", + "torch._C._special.special_ndtri", + "torch._C._special.special_polygamma", + "torch._C._special.special_psi", + "torch._C._special.special_round", + "torch._C._special.special_scaled_modified_bessel_k0", + "torch._C._special.special_scaled_modified_bessel_k1", + "torch._C._special.special_shifted_chebyshev_polynomial_t", + "torch._C._special.special_shifted_chebyshev_polynomial_u", + "torch._C._special.special_shifted_chebyshev_polynomial_v", + "torch._C._special.special_shifted_chebyshev_polynomial_w", + "torch._C._special.special_sinc", + "torch._C._special.special_softmax", + "torch._C._special.special_spherical_bessel_j0", + "torch._C._special.special_xlog1py", + "torch._C._special.special_xlogy", + "torch._C._special.special_zeta", + "torch._C._stash_obj_in_tls", + "torch._C._storage_id", + "torch._C._storage_Use_Count", + "torch._C._supported_qengines", + "torch._C._te.abs", + "torch._C._te.acos", + "torch._C._te.annotate_input_shapes", + "torch._C._te.asin", + "torch._C._te.atan", + "torch._C._te.atan2", + "torch._C._te.ceil", + "torch._C._te.Compute", + "torch._C._te.Compute2", + "torch._C._te.construct_codegen", + "torch._C._te.cos", + "torch._C._te.cosh", + "torch._C._te.erf", + "torch._C._te.erfc", + "torch._C._te.exp", + "torch._C._te.expm1", + "torch._C._te.fixup_missing_shape_info", + "torch._C._te.floor", + "torch._C._te.fmod", + "torch._C._te.frac", + "torch._C._te.ifThenElse", + "torch._C._te.is_graph_compilable", + "torch._C._te.isnan", + "torch._C._te.lgamma", + "torch._C._te.log", + "torch._C._te.log10", + "torch._C._te.log1p", + "torch._C._te.log2", + "torch._C._te.lower", + "torch._C._te.make_shapes_symbolic", + "torch._C._te.pow", + "torch._C._te.Reduce", + "torch._C._te.remainder", + "torch._C._te.remove_graph_output", + "torch._C._te.remove_unused_self_argument", + "torch._C._te.replace_list_output_with_tuple", + "torch._C._te.round", + "torch._C._te.rsqrt", + "torch._C._te.sigmoid", + "torch._C._te.simplify", + "torch._C._te.sin", + "torch._C._te.sinh", + "torch._C._te.sqrt", + "torch._C._te.tan", + "torch._C._te.tanh", + "torch._C._te.trim_graph", + "torch._C._te.trunc", + "torch._C._tensor_impl_raw_handle", + "torch._C._test_only_add_entry_to_op_version_map", + "torch._C._test_only_populate_upgraders", + "torch._C._test_only_remove_entry_to_op_version_map", + "torch._C._test_only_remove_upgraders", + "torch._C._to_functionality_key", + "torch._C._tracer_set_force_outplace", + "torch._C._tracer_set_get_unique_name_fn", + "torch._C._tracer_warn_use_python", + "torch._C._unset_default_mobile_cpu_allocator", + "torch._C._unset_dispatch_mode", + "torch._C._valgrind_supported_platform", + "torch._C._valgrind_toggle_and_dump_stats", + "torch._C._valgrind_toggle", + "torch._C._verbose.mkl_set_verbose", + "torch._C._verbose.mkldnn_set_verbose", + "torch._C._vmapmode_decrement_nesting", + "torch._C._vmapmode_increment_nesting", + "torch._C._warn_deprecation", + "torch._C._warn", + "torch._C._will_engine_execute_node", + "torch._C._wrap_tensor_impl", + "torch._C.fork", + "torch._C.get_autocast_cpu_dtype", + "torch._C.get_autocast_dtype", + "torch._C.get_autocast_gpu_dtype", + "torch._C.get_autocast_ipu_dtype", + "torch._C.get_autocast_xla_dtype", + "torch._C.get_default_dtype", + "torch._C.get_num_interop_threads", + "torch._C.get_num_threads", + "torch._C.import_ir_module_from_buffer", + "torch._C.import_ir_module", + "torch._C.init_num_threads", + "torch._C.is_anomaly_check_nan_enabled", + "torch._C.is_anomaly_enabled", + "torch._C.is_autocast_cache_enabled", + "torch._C.is_autocast_cpu_enabled", + "torch._C.is_autocast_enabled", + "torch._C.is_autocast_ipu_enabled", + "torch._C.is_autocast_xla_enabled", + "torch._C.is_grad_enabled", + "torch._C.is_inference_mode_enabled", + "torch._C.merge_type_from_type_comment", + "torch._C.parse_ir", + "torch._C.parse_schema", + "torch._C.parse_type_comment", + "torch._C.read_vitals", + "torch._C.set_vital", + "torch._C.unify_type_list", + "torch._C.vitals_enabled", + "torch._C.wait", + "torch._cast_Byte", + "torch._cast_Char", + "torch._cast_Double", + "torch._cast_Float", + "torch._cast_Half", + "torch._cast_Int", + "torch._cast_Long", + "torch._cast_Short", + "torch._choose_qparams_per_tensor", + "torch._chunk_cat", + "torch._coalesce", + "torch._compute_linear_combination", + "torch._conj_copy", + "torch._conj_physical", + "torch._conj", + "torch._convert_indices_from_coo_to_csr", + "torch._convert_indices_from_csr_to_coo", + "torch._convert_weight_to_int4pack", + "torch._convolution_mode", + "torch._convolution", + "torch._copy_from_and_resize", + "torch._copy_from", + "torch._cslt_compress", + "torch._cslt_sparse_mm", + "torch._ctc_loss", + "torch._cudnn_ctc_loss", + "torch._cudnn_init_dropout_state", + "torch._cudnn_rnn_flatten_weight", + "torch._cudnn_rnn", + "torch._cufft_clear_plan_cache", + "torch._cufft_get_plan_cache_max_size", + "torch._cufft_get_plan_cache_size", + "torch._cufft_set_plan_cache_max_size", + "torch._cummax_helper", + "torch._cummin_helper", + "torch._debug_has_internal_overlap", + "torch._dim_arange", + "torch._dirichlet_grad", + "torch._disable_functionalization", + "torch._efficientzerotensor", + "torch._embedding_bag_forward_only", + "torch._embedding_bag", + "torch._empty_affine_quantized", + "torch._empty_per_channel_affine_quantized", + "torch._enable_functionalization", + "torch._euclidean_dist", + "torch._fake_quantize_learnable_per_channel_affine", + "torch._fake_quantize_learnable_per_tensor_affine", + "torch._fake_quantize_per_tensor_affine_cachemask_tensor_qparams", + "torch._fft_c2c", + "torch._fft_c2r", + "torch._fft_r2c", + "torch._fill_mem_eff_dropout_mask_", + "torch._foobar", + "torch._foreach_abs_", + "torch._foreach_abs", + "torch._foreach_acos_", + "torch._foreach_acos", + "torch._foreach_add_", + "torch._foreach_add", + "torch._foreach_addcdiv_", + "torch._foreach_addcdiv", + "torch._foreach_addcmul_", + "torch._foreach_addcmul", + "torch._foreach_asin_", + "torch._foreach_asin", + "torch._foreach_atan_", + "torch._foreach_atan", + "torch._foreach_ceil_", + "torch._foreach_ceil", + "torch._foreach_clamp_max_", + "torch._foreach_clamp_max", + "torch._foreach_clamp_min_", + "torch._foreach_clamp_min", + "torch._foreach_copy_", + "torch._foreach_cos_", + "torch._foreach_cos", + "torch._foreach_cosh_", + "torch._foreach_cosh", + "torch._foreach_div_", + "torch._foreach_div", + "torch._foreach_erf_", + "torch._foreach_erf", + "torch._foreach_erfc_", + "torch._foreach_erfc", + "torch._foreach_exp_", + "torch._foreach_exp", + "torch._foreach_expm1_", + "torch._foreach_expm1", + "torch._foreach_floor_", + "torch._foreach_floor", + "torch._foreach_frac_", + "torch._foreach_frac", + "torch._foreach_lerp_", + "torch._foreach_lerp", + "torch._foreach_lgamma_", + "torch._foreach_lgamma", + "torch._foreach_log_", + "torch._foreach_log", + "torch._foreach_log10_", + "torch._foreach_log10", + "torch._foreach_log1p_", + "torch._foreach_log1p", + "torch._foreach_log2_", + "torch._foreach_log2", + "torch._foreach_maximum_", + "torch._foreach_maximum", + "torch._foreach_minimum_", + "torch._foreach_minimum", + "torch._foreach_mul_", + "torch._foreach_mul", + "torch._foreach_neg_", + "torch._foreach_neg", + "torch._foreach_norm", + "torch._foreach_pow_", + "torch._foreach_pow", + "torch._foreach_reciprocal_", + "torch._foreach_reciprocal", + "torch._foreach_round_", + "torch._foreach_round", + "torch._foreach_sigmoid_", + "torch._foreach_sigmoid", + "torch._foreach_sign_", + "torch._foreach_sign", + "torch._foreach_sin_", + "torch._foreach_sin", + "torch._foreach_sinh_", + "torch._foreach_sinh", + "torch._foreach_sqrt_", + "torch._foreach_sqrt", + "torch._foreach_sub_", + "torch._foreach_sub", + "torch._foreach_tan_", + "torch._foreach_tan", + "torch._foreach_tanh_", + "torch._foreach_tanh", + "torch._foreach_trunc_", + "torch._foreach_trunc", + "torch._foreach_zero_", + "torch._freeze_functional_tensor", + "torch._from_functional_tensor", + "torch._functional_assert_async", + "torch._functional_sym_constrain_range_for_size", + "torch._functional_sym_constrain_range", + "torch._functionalize_are_all_mutations_hidden_from_autograd", + "torch._functionalize_commit_update", + "torch._functionalize_enable_reapply_views", + "torch._functionalize_has_data_mutation", + "torch._functionalize_has_metadata_mutation", + "torch._functionalize_is_multi_output_view", + "torch._functionalize_mark_mutation_hidden_from_autograd", + "torch._functionalize_replace", + "torch._functionalize_sync", + "torch._functionalize_was_storage_changed", + "torch._fused_adam_", + "torch._fused_adamw_", + "torch._fused_dropout", + "torch._fused_moving_avg_obs_fq_helper", + "torch._fused_sdp_choice", + "torch._fw_primal_copy", + "torch._grid_sampler_2d_cpu_fallback", + "torch._has_compatible_shallow_copy_type", + "torch._histogramdd_bin_edges", + "torch._histogramdd_from_bin_cts", + "torch._histogramdd_from_bin_tensors", + "torch._index_put_impl_", + "torch._indices_copy", + "torch._int_mm", + "torch._is_all_true", + "torch._is_any_true", + "torch._is_functional_tensor", + "torch._is_zerotensor", + "torch._linalg_check_errors", + "torch._linalg_det", + "torch._linalg_eigh", + "torch._linalg_eigvals", + "torch._linalg_slogdet", + "torch._linalg_solve_ex", + "torch._linalg_svd", + "torch._log_softmax_backward_data", + "torch._log_softmax", + "torch._logcumsumexp", + "torch._lstm_mps", + "torch._lu_with_info", + "torch._make_dep_token", + "torch._make_dual_copy", + "torch._make_dual", + "torch._make_per_channel_quantized_tensor", + "torch._make_per_tensor_quantized_tensor", + "torch._masked_scale", + "torch._masked_softmax", + "torch._mirror_autograd_meta_to", + "torch._mixed_dtypes_linear", + "torch._mkldnn_reshape", + "torch._mkldnn_transpose_", + "torch._mkldnn_transpose", + "torch._mps_convolution_transpose", + "torch._mps_convolution", + "torch._native_batch_norm_legit_no_training", + "torch._native_batch_norm_legit", + "torch._native_multi_head_attention", + "torch._neg_view_copy", + "torch._neg_view", + "torch._nested_from_padded_and_nested_example", + "torch._nested_tensor_from_mask_left_aligned", + "torch._nested_tensor_from_tensor_list", + "torch._nested_tensor_softmax_with_shape", + "torch._nested_view_from_buffer_copy", + "torch._nested_view_from_buffer", + "torch._nnpack_available", + "torch._nnpack_spatial_convolution", + "torch._pack_padded_sequence", + "torch._pad_packed_sequence", + "torch._pin_memory", + "torch._prelu_kernel", + "torch._propagate_xla_data", + "torch._remove_batch_dim", + "torch._reshape_alias_copy", + "torch._reshape_from_tensor", + "torch._resize_output_", + "torch._rowwise_prune", + "torch._sample_dirichlet", + "torch._saturate_weight_to_fp16", + "torch._scaled_dot_product_attention_math", + "torch._scaled_dot_product_efficient_attention", + "torch._scaled_dot_product_flash_attention", + "torch._scaled_dot_product_flash_attention_for_cpu", + "torch._scaled_dot_product_cudnn_attention", + "torch._scaled_mm", + "torch._shape_as_tensor", + "torch._sobol_engine_draw", + "torch._sobol_engine_ff_", + "torch._sobol_engine_initialize_state_", + "torch._sobol_engine_scramble_", + "torch._softmax_backward_data", + "torch._softmax", + "torch._sparse_broadcast_to_copy", + "torch._sparse_broadcast_to", + "torch._sparse_csr_prod", + "torch._sparse_csr_sum", + "torch._sparse_log_softmax_backward_data", + "torch._sparse_semi_structured_addmm", + "torch._sparse_semi_structured_linear", + "torch._sparse_semi_structured_mm", + "torch._sparse_softmax_backward_data", + "torch._sparse_sparse_matmul", + "torch._sparse_sum", + "torch._stack", + "torch._standard_gamma_grad", + "torch._standard_gamma", + "torch._test_autograd_multiple_dispatch_view_copy", + "torch._test_autograd_multiple_dispatch_view", + "torch._test_autograd_multiple_dispatch", + "torch._test_check_tensor", + "torch._test_functorch_fallback", + "torch._test_serialization_subcmul", + "torch._to_cpu", + "torch._to_functional_tensor", + "torch._to_sparse_semi_structured", + "torch._transform_bias_rescale_qkv", + "torch._transformer_encoder_layer_fwd", + "torch._trilinear", + "torch._triton_multi_head_attention", + "torch._triton_scaled_dot_attention", + "torch._unique", + "torch._unique2", + "torch._unpack_dual", + "torch._unsafe_index_put", + "torch._unsafe_index", + "torch._unsafe_masked_index_put_accumulate", + "torch._unsafe_masked_index", + "torch._use_cudnn_ctc_loss", + "torch._use_cudnn_rnn_flatten_weight", + "torch._values_copy", + "torch._weight_int4pack_mm", + "torch._weight_int8pack_mm", + "torch._weight_norm_interface", + "torch._weight_norm", + "torch.abs_", + "torch.abs", + "torch.absolute", + "torch.acos_", + "torch.acos", + "torch.acosh_", + "torch.acosh", + "torch.adaptive_avg_pool1d", + "torch.adaptive_max_pool1d", + "torch.add", + "torch.addbmm", + "torch.addcdiv", + "torch.addcmul", + "torch.addmm", + "torch.addmv_", + "torch.addmv", + "torch.addr", + "torch.adjoint", + "torch.affine_grid_generator", + "torch.alias_copy", + "torch.all", + "torch.allclose", + "torch.alpha_dropout_", + "torch.alpha_dropout", + "torch.amax", + "torch.amin", + "torch.aminmax", + "torch.angle", + "torch.any", + "torch.arange", + "torch.arccos_", + "torch.arccos", + "torch.arccosh_", + "torch.arccosh", + "torch.arcsin_", + "torch.arcsin", + "torch.arcsinh_", + "torch.arcsinh", + "torch.arctan_", + "torch.arctan", + "torch.arctan2", + "torch.arctanh_", + "torch.arctanh", + "torch.argmax", + "torch.argmin", + "torch.argsort", + "torch.argwhere", + "torch.as_strided_", + "torch.as_strided_copy", + "torch.as_strided_scatter", + "torch.as_strided", + "torch.as_tensor", + "torch.asarray", + "torch.asin_", + "torch.asin", + "torch.asinh_", + "torch.asinh", + "torch.atan_", + "torch.atan", + "torch.atan2", + "torch.atanh_", + "torch.atanh", + "torch.avg_pool1d", + "torch.baddbmm", + "torch.bartlett_window", + "torch.batch_norm_backward_elemt", + "torch.batch_norm_backward_reduce", + "torch.batch_norm_elemt", + "torch.batch_norm_gather_stats_with_counts", + "torch.batch_norm_gather_stats", + "torch.batch_norm_stats", + "torch.batch_norm_update_stats", + "torch.batch_norm", + "torch.bernoulli", + "torch.bilinear", + "torch.binary_cross_entropy_with_logits", + "torch.bincount", + "torch.binomial", + "torch.bitwise_and", + "torch.bitwise_left_shift", + "torch.bitwise_not", + "torch.bitwise_or", + "torch.bitwise_right_shift", + "torch.bitwise_xor", + "torch.blackman_window", + "torch.bmm", + "torch.broadcast_to", + "torch.bucketize", + "torch.can_cast", + "torch.cat", + "torch.ccol_indices_copy", + "torch.ceil_", + "torch.ceil", + "torch.celu_", + "torch.celu", + "torch.channel_shuffle", + "torch.cholesky_inverse", + "torch.cholesky_solve", + "torch.cholesky", + "torch.choose_qparams_optimized", + "torch.chunk", + "torch.clamp_", + "torch.clamp_max_", + "torch.clamp_max", + "torch.clamp_min_", + "torch.clamp_min", + "torch.clamp", + "torch.clip_", + "torch.clip", + "torch.clone", + "torch.col_indices_copy", + "torch.column_stack", + "torch.combinations", + "torch.complex", + "torch.concat", + "torch.concatenate", + "torch.conj_physical_", + "torch.conj_physical", + "torch.conj", + "torch.constant_pad_nd", + "torch.conv_tbc", + "torch.conv_transpose1d", + "torch.conv_transpose2d", + "torch.conv_transpose3d", + "torch.conv1d", + "torch.conv2d", + "torch.conv3d", + "torch.convolution", + "torch.copysign", + "torch.corrcoef", + "torch.cos_", + "torch.cos", + "torch.cosh_", + "torch.cosh", + "torch.cosine_embedding_loss", + "torch.cosine_similarity", + "torch.count_nonzero", + "torch.cov", + "torch.cross", + "torch.crow_indices_copy", + "torch.ctc_loss", + "torch.cudnn_affine_grid_generator", + "torch.cudnn_batch_norm", + "torch.cudnn_convolution_add_relu", + "torch.cudnn_convolution_relu", + "torch.cudnn_convolution_transpose", + "torch.cudnn_convolution", + "torch.cudnn_grid_sampler", + "torch.cudnn_is_acceptable", + "torch.cummax", + "torch.cummin", + "torch.cumprod", + "torch.cumsum", + "torch.cumulative_trapezoid", + "torch.deg2rad_", + "torch.deg2rad", + "torch.dequantize", + "torch.det", + "torch.detach_", + "torch.detach_copy", + "torch.detach", + "torch.diag_embed", + "torch.diag", + "torch.diagflat", + "torch.diagonal_copy", + "torch.diagonal_scatter", + "torch.diagonal", + "torch.diff", + "torch.digamma", + "torch.dist", + "torch.div", + "torch.divide", + "torch.dot", + "torch.dropout_", + "torch.dropout", + "torch.dsmm", + "torch.dsplit", + "torch.dstack", + "torch.embedding_bag", + "torch.embedding_renorm_", + "torch.embedding", + "torch.empty_like", + "torch.empty_permuted", + "torch.empty_quantized", + "torch.empty_strided", + "torch.empty", + "torch.eq", + "torch.equal", + "torch.erf_", + "torch.erf", + "torch.erfc_", + "torch.erfc", + "torch.erfinv", + "torch.exp_", + "torch.exp", + "torch.exp2_", + "torch.exp2", + "torch.expand_copy", + "torch.expm1_", + "torch.expm1", + "torch.eye", + "torch.fake_quantize_per_channel_affine", + "torch.fake_quantize_per_tensor_affine", + "torch.fbgemm_linear_fp16_weight_fp32_activation", + "torch.fbgemm_linear_fp16_weight", + "torch.fbgemm_linear_int8_weight_fp32_activation", + "torch.fbgemm_linear_int8_weight", + "torch.fbgemm_linear_quantize_weight", + "torch.fbgemm_pack_gemm_matrix_fp16", + "torch.fbgemm_pack_quantized_matrix", + "torch.feature_alpha_dropout_", + "torch.feature_alpha_dropout", + "torch.feature_dropout_", + "torch.feature_dropout", + "torch.fill_", + "torch.fill", + "torch.fix_", + "torch.fix", + "torch.flatten", + "torch.flip", + "torch.fliplr", + "torch.flipud", + "torch.float_power", + "torch.floor_", + "torch.floor_divide", + "torch.floor", + "torch.fmax", + "torch.fmin", + "torch.fmod", + "torch.frac_", + "torch.frac", + "torch.frexp", + "torch.frobenius_norm", + "torch.from_file", + "torch.from_numpy", + "torch.frombuffer", + "torch.full_like", + "torch.full", + "torch.fused_moving_avg_obs_fake_quant", + "torch.gather", + "torch.gcd_", + "torch.gcd", + "torch.ge", + "torch.geqrf", + "torch.ger", + "torch.get_device", + "torch.gradient", + "torch.greater_equal", + "torch.greater", + "torch.grid_sampler_2d", + "torch.grid_sampler_3d", + "torch.grid_sampler", + "torch.group_norm", + "torch.gru_cell", + "torch.gru", + "torch.gt", + "torch.hamming_window", + "torch.hann_window", + "torch.hardshrink", + "torch.heaviside", + "torch.hinge_embedding_loss", + "torch.histc", + "torch.histogram", + "torch.histogramdd", + "torch.hsmm", + "torch.hsplit", + "torch.hspmm", + "torch.hstack", + "torch.hypot", + "torch.i0_", + "torch.i0", + "torch.igamma", + "torch.igammac", + "torch.imag", + "torch.index_add", + "torch.index_copy", + "torch.index_fill", + "torch.index_put_", + "torch.index_put", + "torch.index_reduce", + "torch.index_select", + "torch.indices_copy", + "torch.inner", + "torch.instance_norm", + "torch.int_repr", + "torch.inverse", + "torch.is_complex", + "torch.is_conj", + "torch.is_distributed", + "torch.is_floating_point", + "torch.is_inference", + "torch.is_neg", + "torch.is_nonzero", + "torch.is_same_size", + "torch.is_signed", + "torch.is_vulkan_available", + "torch.isclose", + "torch.isfinite", + "torch.isin", + "torch.isinf", + "torch.isnan", + "torch.isneginf", + "torch.isposinf", + "torch.isreal", + "torch.istft", + "torch.kaiser_window", + "torch.kl_div", + "torch.kron", + "torch.kthvalue", + "torch.layer_norm", + "torch.lcm_", + "torch.lcm", + "torch.ldexp_", + "torch.ldexp", + "torch.le", + "torch.lerp", + "torch.less_equal", + "torch.less", + "torch.lgamma", + "torch.linspace", + "torch.log_", + "torch.log_softmax", + "torch.log", + "torch.log10_", + "torch.log10", + "torch.log1p_", + "torch.log1p", + "torch.log2_", + "torch.log2", + "torch.logaddexp", + "torch.logaddexp2", + "torch.logcumsumexp", + "torch.logdet", + "torch.logical_and", + "torch.logical_not", + "torch.logical_or", + "torch.logical_xor", + "torch.logit_", + "torch.logit", + "torch.logspace", + "torch.logsumexp", + "torch.lstm_cell", + "torch.lstm", + "torch.lt", + "torch.lu_solve", + "torch.lu_unpack", + "torch.margin_ranking_loss", + "torch.masked_fill", + "torch.masked_scatter", + "torch.masked_select", + "torch.matmul", + "torch.matrix_exp", + "torch.matrix_power", + "torch.max_pool1d_with_indices", + "torch.max_pool1d", + "torch.max_pool2d", + "torch.max_pool3d", + "torch.max", + "torch.maximum", + "torch.mean", + "torch.median", + "torch.min", + "torch.minimum", + "torch.miopen_batch_norm", + "torch.miopen_convolution_add_relu", + "torch.miopen_convolution_relu", + "torch.miopen_convolution_transpose", + "torch.miopen_convolution", + "torch.miopen_depthwise_convolution", + "torch.miopen_rnn", + "torch.mkldnn_adaptive_avg_pool2d", + "torch.mkldnn_convolution", + "torch.mkldnn_linear_backward_weights", + "torch.mkldnn_max_pool2d", + "torch.mkldnn_max_pool3d", + "torch.mkldnn_rnn_layer", + "torch.mm", + "torch.mode", + "torch.moveaxis", + "torch.movedim", + "torch.msort", + "torch.mul", + "torch.multinomial", + "torch.multiply", + "torch.mv", + "torch.mvlgamma", + "torch.nan_to_num_", + "torch.nan_to_num", + "torch.nanmean", + "torch.nanmedian", + "torch.nanquantile", + "torch.nansum", + "torch.narrow_copy", + "torch.narrow", + "torch.native_batch_norm", + "torch.native_channel_shuffle", + "torch.native_dropout", + "torch.native_group_norm", + "torch.native_layer_norm", + "torch.native_norm", + "torch.ne", + "torch.neg_", + "torch.neg", + "torch.negative_", + "torch.negative", + "torch.nextafter", + "torch.nonzero_static", + "torch.nonzero", + "torch.norm_except_dim", + "torch.normal", + "torch.not_equal", + "torch.nuclear_norm", + "torch.numel", + "torch.ones_like", + "torch.ones", + "torch.orgqr", + "torch.ormqr", + "torch.outer", + "torch.pairwise_distance", + "torch.pdist", + "torch.permute_copy", + "torch.permute", + "torch.pinverse", + "torch.pixel_shuffle", + "torch.pixel_unshuffle", + "torch.poisson_nll_loss", + "torch.poisson", + "torch.polar", + "torch.polygamma", + "torch.positive", + "torch.pow", + "torch.prelu", + "torch._print", + "torch.prod", + "torch.promote_types", + "torch.put", + "torch.q_per_channel_axis", + "torch.q_per_channel_scales", + "torch.q_per_channel_zero_points", + "torch.q_scale", + "torch.q_zero_point", + "torch.qr", + "torch.quantile", + "torch.quantize_per_channel", + "torch.quantize_per_tensor_dynamic", + "torch.quantize_per_tensor", + "torch.quantized_batch_norm", + "torch.quantized_gru_cell", + "torch.quantized_lstm_cell", + "torch.quantized_max_pool1d", + "torch.quantized_max_pool2d", + "torch.quantized_max_pool3d", + "torch.quantized_rnn_relu_cell", + "torch.quantized_rnn_tanh_cell", + "torch.rad2deg_", + "torch.rad2deg", + "torch.rand_like", + "torch.rand", + "torch.randint_like", + "torch.randint", + "torch.randn_like", + "torch.randn", + "torch.randperm", + "torch.range", + "torch.ravel", + "torch.real", + "torch.reciprocal_", + "torch.reciprocal", + "torch.relu_", + "torch.relu", + "torch.remainder", + "torch.renorm", + "torch.repeat_interleave", + "torch.reshape", + "torch.resolve_conj", + "torch.resolve_neg", + "torch.result_type", + "torch.rms_norm", + "torch.rnn_relu_cell", + "torch.rnn_relu", + "torch.rnn_tanh_cell", + "torch.rnn_tanh", + "torch.roll", + "torch.rot90", + "torch.round_", + "torch.round", + "torch.row_indices_copy", + "torch.row_stack", + "torch.rrelu_", + "torch.rrelu", + "torch.rsqrt_", + "torch.rsqrt", + "torch.rsub", + "torch.saddmm", + "torch.scalar_tensor", + "torch.scatter_add", + "torch.scatter_reduce", + "torch.scatter", + "torch.searchsorted", + "torch.segment_reduce", + "torch.select_copy", + "torch.select_scatter", + "torch.select", + "torch.selu_", + "torch.selu", + "torch.sgn", + "torch.sigmoid_", + "torch.sigmoid", + "torch.sign", + "torch.signal.windows.windows.sqrt", + "torch.signbit", + "torch.sin_", + "torch.sin", + "torch.sinc_", + "torch.sinc", + "torch.sinh_", + "torch.sinh", + "torch.slice_copy", + "torch.slice_scatter", + "torch.slogdet", + "torch.smm", + "torch.softmax", + "torch.sort", + "torch.split_copy", + "torch.split_with_sizes_copy", + "torch.split_with_sizes", + "torch.spmm", + "torch.sqrt_", + "torch.sqrt", + "torch.square_", + "torch.square", + "torch.squeeze_copy", + "torch.squeeze", + "torch.sspaddmm", + "torch.stack", + "torch.std_mean", + "torch.std", + "torch.sub", + "torch.subtract", + "torch.sum", + "torch.svd", + "torch.swapaxes", + "torch.swapdims", + "torch.sym_constrain_range_for_size", + "torch.sym_constrain_range", + "torch.t_copy", + "torch.t", + "torch.take_along_dim", + "torch.take", + "torch.tan_", + "torch.tan", + "torch.tanh_", + "torch.tanh", + "torch.tensor_split", + "torch.tensor", + "torch.threshold_", + "torch.threshold", + "torch.tile", + "torch.topk", + "torch.trace", + "torch.transpose_copy", + "torch.transpose", + "torch.trapezoid", + "torch.trapz", + "torch.triangular_solve", + "torch.tril_indices", + "torch.tril", + "torch.triplet_margin_loss", + "torch.triu_indices", + "torch.triu", + "torch.true_divide", + "torch.trunc_", + "torch.trunc", + "torch.unbind_copy", + "torch.unbind", + "torch.unflatten", + "torch.unfold_copy", + "torch.unsafe_chunk", + "torch.unsafe_split_with_sizes", + "torch.unsafe_split", + "torch.unsqueeze_copy", + "torch.unsqueeze", + "torch.values_copy", + "torch.vander", + "torch.var_mean", + "torch.var", + "torch.vdot", + "torch.view_as_complex_copy", + "torch.view_as_complex", + "torch.view_as_real_copy", + "torch.view_as_real", + "torch.view_copy", + "torch.vsplit", + "torch.vstack", + "torch.where", + "torch.xlogy_", + "torch.xlogy", + "torch.zero_", + "torch.zeros", + "torch.zeros_like", + "torch._fused_sgd_", + "torch.slice_inverse", + "torch._assert_scalar", + "torch._functional_assert_scalar", + ], + TorchInGraphFunctionVariable, +) + + +if sys.version_info >= (3, 9): + torch_c_binding_in_graph_functions["math.lcm"] = TorchInGraphFunctionVariable +if sys.version_info >= (3, 11): + torch_c_binding_in_graph_functions["math.exp2"] = TorchInGraphFunctionVariable + torch_c_binding_in_graph_functions["math.cbrt"] = TorchInGraphFunctionVariable + + +# In graph functions (including constant folding) that are not C bindings +torch_non_c_binding_in_graph_functions = dict.fromkeys( + [ + "torch.__future__.get_overwrite_module_params_on_conversion", + "torch.__future__.set_overwrite_module_params_on_conversion", + "torch.__getattr__", + "torch._assert", + "torch._check_index", + "torch._check_is_size", + "torch._check_not_implemented", + "torch._check_tensor_all_with", + "torch._check_tensor_all", + "torch._check_type", + "torch._check_value", + "torch._check_with", + "torch._check", + "torch._compile._disable_dynamo", + "torch._functorch.apis.chunk_vmap", + "torch._functorch.autograd_function.custom_function_call_functionalize", + "torch._functorch.autograd_function.custom_function_call_grad", + "torch._functorch.autograd_function.custom_function_call_vmap_generate_rule", + "torch._functorch.autograd_function.custom_function_call_vmap", + "torch._functorch.autograd_function.generate_single_level_function", + "torch._functorch.autograd_function.get_tangents_in_dims", + "torch._functorch.autograd_function.has_overriden_vmap_rule", + "torch._functorch.autograd_function.reductify_leaf", + "torch._functorch.autograd_function.reductify", + "torch._functorch.autograd_function.validate_vmap_returns_tuple_of_two_elements", + "torch._functorch.autograd_function.vmapify_autograd_function", + "torch._functorch.autograd_function.wrap_outputs_maintaining_identity", + "torch._functorch.batch_norm_replacement.batch_norm_without_running_stats", + "torch._functorch.batch_norm_replacement.replace_all_batch_norm_modules_", + "torch._functorch.deprecated.combine_state_for_ensemble", + "torch._functorch.deprecated.functionalize", + "torch._functorch.deprecated.get_warning", + "torch._functorch.deprecated.make_functional_with_buffers", + "torch._functorch.deprecated.make_functional", + "torch._functorch.deprecated.setup_docs", + "torch._functorch.deprecated.warn_deprecated", + "torch._functorch.eager_transforms._any_differentiable", + "torch._functorch.eager_transforms._autograd_grad", + "torch._functorch.eager_transforms._vjp_treespec_compare", + "torch._functorch.eager_transforms._set_tensor_requires_grad", + "torch._functorch.eager_transforms._jvp_treespec_compare", + "torch._functorch.eager_transforms._linearize_treespec_compare", + "torch._functorch.eager_transforms._is_differentiable", + "torch._functorch.eager_transforms._maybe_unwrap_functional_tensor", + "torch._functorch.eager_transforms._maybe_wrap_functional_tensor", + "torch._functorch.eager_transforms._unwrap_all_tensors_from_functional", + "torch._functorch.eager_transforms._wrap_all_tensors_to_functional", + "torch._functorch.eager_transforms.assert_flat_tuple_of_tensors", + "torch._functorch.eager_transforms.functionalize", + "torch._functorch.eager_transforms.lazy_dynamo_disable", + "torch._functorch.eager_transforms.noop", + "torch._functorch.pyfunctorch.coerce_cinterpreter", + "torch._functorch.pyfunctorch.dispatch_functorch", + "torch._functorch.pyfunctorch.nested", + "torch._functorch.pyfunctorch.retrieve_current_functorch_interpreter", + "torch._functorch.pyfunctorch.temporarily_pop_interpreter_stack", + "torch._functorch.utils.enable_single_level_autograd_function", + "torch._functorch.utils.exposed_in", + "torch._functorch.utils.unwrap_dead_wrappers", + "torch._functorch.vmap.lazy_load_decompositions", + "torch._guards.compile_context", + "torch._guards.detect_fake_mode", + "torch._guards.tracing", + "torch._higher_order_ops.map._has_potential_branch_input_alias", + "torch._higher_order_ops.map._has_potential_branch_input_mutation", + "torch._higher_order_ops.map._stack_pytree", + "torch._higher_order_ops.map._unstack_pytree", + "torch._higher_order_ops.map.create_fw_bw_graph", + "torch._higher_order_ops.map.map_autograd", + "torch._higher_order_ops.map.map_dense", + "torch._higher_order_ops.map.map_fake_tensor_mode", + "torch._higher_order_ops.map.map_functionalize", + "torch._higher_order_ops.map.map_proxy_torch_dispatch_mode", + "torch._higher_order_ops.map.map_wrapper", + "torch._higher_order_ops.map.trace_map", + "torch._higher_order_ops.out_dtype.elementwise_dtypes", + "torch._higher_order_ops.out_dtype.is_int_mm", + "torch._higher_order_ops.out_dtype.out_dtype_dense", + "torch._higher_order_ops.out_dtype.out_dtype_fake_tensor_mode", + "torch._higher_order_ops.out_dtype.out_dtype_fallback", + "torch._higher_order_ops.out_dtype.out_dtype_func", + "torch._higher_order_ops.out_dtype.out_dtype_proxy", + "torch._higher_order_ops.out_dtype.trace_out_dtype", + "torch._higher_order_ops.utils.autograd_not_implemented_inner", + "torch._higher_order_ops.utils.autograd_not_implemented", + "torch._linalg_utils._symeig", + "torch._linalg_utils.basis", + "torch._linalg_utils.bform", + "torch._linalg_utils.eig", + "torch._linalg_utils.get_floating_dtype", + "torch._linalg_utils.is_sparse", + "torch._linalg_utils.lstsq", + "torch._linalg_utils.matmul", + "torch._linalg_utils.matrix_rank", + "torch._linalg_utils.qform", + "torch._linalg_utils.solve", + "torch._linalg_utils.symeig", + "torch._load_global_deps", + "torch._lowrank._svd_lowrank", + "torch._lowrank.get_approximate_basis", + "torch._lowrank.pca_lowrank", + "torch._lowrank.svd_lowrank", + "torch._ops._compute_keyset", + "torch._ops._get_tensors", + "torch._ops._to_flat_tuple", + "torch._ops.add_cached_op", + "torch._ops.dl_open_guard", + "torch._ops.get_cached_ops", + "torch._ops.key_extractor", + "torch._ops.reset_cached_ops", + "torch._ops.resolve_key", + "torch._preload_cuda_deps", + "torch._register_device_module", + "torch._running_with_deploy", + "torch._utils._dummy_type", + "torch._weights_only_unpickler._get_allowed_globals", + "torch._weights_only_unpickler.load", + "torch.align_tensors", + "torch.amp.autocast_mode._enter_autocast", + "torch.amp.autocast_mode._exit_autocast", + "torch.amp.autocast_mode.autocast_decorator", + "torch.amp.autocast_mode.custom_bwd", + "torch.amp.autocast_mode.custom_fwd", + "torch.are_deterministic_algorithms_enabled", + "torch.atleast_1d", + "torch.atleast_2d", + "torch.atleast_3d", + "torch.autograd._calculate_shape", + "torch.autograd._is_checkpoint_valid", + "torch.autograd._make_grads", + "torch.autograd._register_py_tensor_class_for_device", + "torch.autograd._tensor_or_tensors_to_tuple", + "torch.autograd.forward_ad._maybe_load_decompositions", + "torch.autograd.function._iter_filter", + "torch.autograd.function._iter_jit_values", + "torch.autograd.function._iter_None_tensors", + "torch.autograd.function._iter_tensors_permissive", + "torch.autograd.function._iter_tensors", + "torch.autograd.function._jit_unwrap_structured", + "torch.autograd.function._map_tensor_data", + "torch.autograd.function._nested_map", + "torch.autograd.function._unflatten", + "torch.autograd.function.once_differentiable", + "torch.autograd.function.traceable", + "torch.autograd.functional._as_tuple_nocheck", + "torch.autograd.functional._as_tuple", + "torch.autograd.functional._autograd_grad", + "torch.autograd.functional._check_requires_grad", + "torch.autograd.functional._construct_standard_basis_for", + "torch.autograd.functional._fill_in_zeros", + "torch.autograd.functional._grad_postprocess", + "torch.autograd.functional._grad_preprocess", + "torch.autograd.functional._jacfwd", + "torch.autograd.functional._tuple_postprocess", + "torch.autograd.functional._validate_v", + "torch.autograd.functional.hessian", + "torch.autograd.functional.hvp", + "torch.autograd.functional.jacobian", + "torch.autograd.functional.jvp", + "torch.autograd.functional.vhp", + "torch.autograd.functional.vjp", + "torch.autograd.grad_mode._enter_inference_mode", + "torch.autograd.grad_mode._exit_inference_mode", + "torch.autograd.graph._get_sid", + "torch.autograd.graph._get_tid", + "torch.autograd.graph.allow_mutation_on_saved_tensors", + "torch.autograd.graph.get_gradient_edge", + "torch.autograd.graph.increment_version", + "torch.autograd.graph.register_multi_grad_hook", + "torch.autograd.variable", + "torch.backends.__allow_nonbracketed_mutation", + "torch.backends.cpu.get_cpu_capability", + "torch.backends.cuda.can_use_efficient_attention", + "torch.backends.cuda.can_use_flash_attention", + "torch.backends.cuda.can_use_cudnn_attention", + "torch.backends.cuda.enable_flash_sdp", + "torch.backends.cuda.enable_math_sdp", + "torch.backends.cuda.allow_fp16_bf16_reduction_math_sdp", + "torch.backends.cuda.enable_mem_efficient_sdp", + "torch.backends.cuda.flash_sdp_enabled", + "torch.backends.cuda.is_built", + "torch.backends.cuda.is_flash_attention_available", + "torch.backends.cuda.math_sdp_enabled", + "torch.backends.cuda.fp16_bf16_reduction_math_sdp_allowed", + "torch.backends.cuda.mem_efficient_sdp_enabled", + "torch.backends.cuda.cudnn_sdp_enabled", + "torch.backends.cuda.enable_cudnn_sdp", + "torch.backends.cuda.preferred_blas_library", + "torch.backends.cuda.preferred_linalg_library", + "torch.backends.cuda.sdp_kernel", + "torch.backends.cudnn._init", + "torch.backends.cudnn.flags", + "torch.backends.cudnn.is_acceptable", + "torch.backends.cudnn.is_available", + "torch.backends.cudnn.set_flags", + "torch.backends.cudnn.version", + "torch.backends.disable_global_flags", + "torch.backends.flags_frozen", + "torch.backends.mkl.is_available", + "torch.backends.mkldnn.flags", + "torch.backends.mkldnn.is_available", + "torch.backends.mkldnn.set_flags", + "torch.backends.mps._init", + "torch.backends.mps.is_available", + "torch.backends.mps.is_built", + "torch.backends.mps.is_macos13_or_newer", + "torch.backends.openmp.is_available", + "torch.backends.quantized._get_qengine_id", + "torch.backends.quantized._get_qengine_str", + "torch.block_diag", + "torch.broadcast_tensors", + "torch.cartesian_prod", + "torch.cdist", + "torch.chain_matmul", + "torch.compile", + "torch.compiled_with_cxx11_abi", + "torch._C._cpu._is_avx2_supported", + "torch._C._cpu._is_avx512_supported", + "torch._C._cpu._is_avx512_vnni_supported", + "torch._C._cpu._is_avx512_bf16_supported", + "torch._C._cpu._is_amx_tile_supported", + "torch.cpu._init_amx", + "torch.cpu.current_device", + "torch.cpu.current_stream", + "torch.cpu.device_count", + "torch.cpu.is_available", + "torch.cpu.set_device", + "torch.cpu.stream", + "torch.cpu.synchronize", + "torch.cuda._check_capability", + "torch.cuda._check_cubins", + "torch.cuda._device_count_amdsmi", + "torch.cuda._device_count_nvml", + "torch.cuda._get_amdsmi_handler", + "torch.cuda._get_amdsmi_device_index", + "torch.cuda._get_device", + "torch.cuda._get_generator", + "torch.cuda._get_nvml_device_index", + "torch.cuda._get_pynvml_handler", + "torch.cuda._get_rng_state_offset", + "torch.cuda._is_compiled", + "torch.cuda._lazy_call", + "torch.cuda._lazy_init", + "torch.cuda._memory_viz._block_extra_legacy", + "torch.cuda._memory_viz._block_extra", + "torch.cuda._memory_viz._format_size", + "torch.cuda._memory_viz._format_viz", + "torch.cuda._memory_viz._frame_filter", + "torch.cuda._memory_viz._frame_fmt", + "torch.cuda._memory_viz._frames_fmt", + "torch.cuda._memory_viz._profile_to_snapshot", + "torch.cuda._memory_viz._report_free", + "torch.cuda._memory_viz._write_blocks", + "torch.cuda._memory_viz.calc_active", + "torch.cuda._memory_viz.compare", + "torch.cuda._memory_viz.format_flamegraph", + "torch.cuda._memory_viz.memory", + "torch.cuda._memory_viz.profile_plot", + "torch.cuda._memory_viz.segment_plot", + "torch.cuda._memory_viz.segments", + "torch.cuda._memory_viz.segsum", + "torch.cuda._memory_viz.trace_plot", + "torch.cuda._memory_viz.trace", + "torch.cuda._nvml_based_avail", + "torch.cuda._parse_visible_devices", + "torch.cuda._raw_device_count_amdsmi", + "torch.cuda._raw_device_count_nvml", + "torch.cuda._raw_device_uuid_amdsmi", + "torch.cuda._raw_device_uuid_nvml", + "torch.cuda._register_triton_kernels", + "torch.cuda._set_rng_state_offset", + "torch.cuda._set_stream_by_id", + "torch.cuda._sleep", + "torch.cuda._transform_uuid_to_ordinals", + "torch.cuda._utils._get_device_index", + "torch.cuda.amp.autocast_mode._cast", + "torch.cuda.amp.autocast_mode.custom_bwd", + "torch.cuda.amp.autocast_mode.custom_fwd", + "torch.cuda.amp.common.amp_definitely_not_available", + "torch.amp.grad_scaler._refresh_per_optimizer_state", + "torch.cuda.can_device_access_peer", + "torch.cuda.check_error", + "torch.cuda.clock_rate", + "torch.cuda.cudart", + "torch.cuda.current_blas_handle", + "torch.cuda.current_stream", + "torch.cuda.default_stream", + "torch.cuda.device_count", + "torch.cuda.get_arch_list", + "torch.cuda.get_device_capability", + "torch.cuda.get_device_name", + "torch.cuda.get_device_properties", + "torch.cuda.get_gencode_flags", + "torch.cuda.get_sync_debug_mode", + "torch.cuda.graphs.graph_pool_handle", + "torch.cuda.graphs.is_current_stream_capturing", + "torch.cuda.graphs.make_graphed_callables", + "torch.cuda.init", + "torch.cuda.ipc_collect", + "torch.cuda.is_available", + "torch.cuda.is_bf16_supported", + "torch.cuda.is_initialized", + "torch.cuda.jiterator._create_jit_fn", + "torch.cuda.jiterator._create_multi_output_jit_fn", + "torch.cuda.memory_usage", + "torch.cuda.memory._dump_snapshot", + "torch.cuda.memory._free_mutex", + "torch.cuda.memory._get_current_allocator", + "torch.cuda.memory._host_allocator", + "torch.cuda.memory._record_memory_history_impl", + "torch.cuda.memory._record_memory_history_legacy", + "torch.cuda.memory._record_memory_history", + "torch.cuda.memory._save_memory_usage", + "torch.cuda.memory._save_segment_usage", + "torch.cuda.memory._set_allocator_settings", + "torch.cuda.memory._snapshot", + "torch.cuda.memory.caching_allocator_alloc", + "torch.cuda.memory.caching_allocator_delete", + "torch.cuda.memory.change_current_allocator", + "torch.cuda.memory.empty_cache", + "torch.cuda.memory.get_allocator_backend", + "torch.cuda.memory.list_gpu_processes", + "torch.cuda.memory.max_memory_allocated", + "torch.cuda.memory.max_memory_cached", + "torch.cuda.memory.max_memory_reserved", + "torch.cuda.memory.mem_get_info", + "torch.cuda.memory.memory_allocated", + "torch.cuda.memory.memory_cached", + "torch.cuda.memory.memory_reserved", + "torch.cuda.memory.memory_snapshot", + "torch.cuda.memory.memory_stats_as_nested_dict", + "torch.cuda.memory.memory_stats", + "torch.cuda.memory.memory_summary", + "torch.cuda.memory.reset_accumulated_memory_stats", + "torch.cuda.memory.reset_max_memory_allocated", + "torch.cuda.memory.reset_max_memory_cached", + "torch.cuda.memory.reset_peak_memory_stats", + "torch.cuda.memory.set_per_process_memory_fraction", + "torch.cuda.nccl._check_sequence_type", + "torch.cuda.nccl.all_gather", + "torch.cuda.nccl.all_reduce", + "torch.cuda.nccl.broadcast", + "torch.cuda.nccl.init_rank", + "torch.cuda.nccl.is_available", + "torch.cuda.nccl.reduce_scatter", + "torch.cuda.nccl.reduce", + "torch.cuda.nccl.unique_id", + "torch.cuda.nccl.version", + "torch.cuda.nvtx.mark", + "torch.cuda.nvtx.range_end", + "torch.cuda.nvtx.range_pop", + "torch.cuda.nvtx.range_push", + "torch.cuda.nvtx.range_start", + "torch.cuda.nvtx.range", + "torch.cuda.power_draw", + "torch.cuda.profiler.init", + "torch.cuda.profiler.profile", + "torch.cuda.profiler.start", + "torch.cuda.profiler.stop", + "torch.cuda.random.get_rng_state_all", + "torch.cuda.random.initial_seed", + "torch.cuda.random.manual_seed_all", + "torch.cuda.random.manual_seed", + "torch.cuda.random.seed_all", + "torch.cuda.random.seed", + "torch.cuda.random.set_rng_state_all", + "torch.cuda.set_stream", + "torch.cuda.set_sync_debug_mode", + "torch.cuda.stream", + "torch.cuda.synchronize", + "torch.cuda.temperature", + "torch.cuda.utilization", + "torch.einsum", + "torch.functional._check_list_size", + "torch.functional._consecutive_return_counts", + "torch.functional._consecutive_return_inverse_false", + "torch.functional._consecutive_return_inverse_true", + "torch.functional._consecutive_return_inverse", + "torch.functional._consecutive_return_output", + "torch.functional._lu_impl", + "torch.functional._lu_no_infos", + "torch.functional._lu_with_infos", + "torch.functional._meshgrid", + "torch.functional._return_counts", + "torch.functional._return_inverse_false", + "torch.functional._return_inverse_true", + "torch.functional._return_inverse", + "torch.functional._return_output", + "torch.functional._unique_consecutive_impl", + "torch.functional._unique_impl", + "torch.functional._unravel_index", + "torch.functional.broadcast_shapes", + "torch.functional.lu", + "torch.functional.unique", + "torch.functional.unravel_index", + "torch.futures.collect_all", + "torch.futures.wait_all", + "torch.fx.experimental.const_fold.split_const_subgraphs", + "torch.fx.experimental.proxy_tensor.make_fx", + "torch.get_deterministic_debug_mode", + "torch.get_float32_matmul_precision", + "torch.is_deterministic_algorithms_warn_only_enabled", + "torch.is_storage", + "torch.is_tensor", + "torch.is_warn_always_enabled", + "torch.masked._ops._any", + "torch.masked._ops._apply_docstring_templates", + "torch.masked._ops._canonical_dim", + "torch.masked._ops._combine_input_and_mask", + "torch.masked._ops._generate_docstring", + "torch.masked._ops._input_mask", + "torch.masked._ops._output_mask", + "torch.masked._ops._reduction_identity", + "torch.masked._ops._sparse_coo_flatten_indices", + "torch.masked._ops._sparse_coo_scatter_reduction_helper", + "torch.masked._ops._sparse_coo_where", + "torch.masked._ops._sparse_csr_segment_reduction_helper", + "torch.masked._ops._sparse_csr_where", + "torch.masked._ops._std_var", + "torch.masked._ops._where", + "torch.masked._ops.amax", + "torch.masked._ops.amin", + "torch.masked._ops.argmax", + "torch.masked._ops.argmin", + "torch.masked._ops.corresponding_real_dtype", + "torch.masked._ops.cumprod", + "torch.masked._ops.cumsum", + "torch.masked._ops.log_softmax", + "torch.masked._ops.logaddexp", + "torch.masked._ops.logsumexp", + "torch.masked._ops.mean", + "torch.masked._ops.median", + "torch.masked._ops.norm", + "torch.masked._ops.normalize", + "torch.masked._ops.prod", + "torch.masked._ops.softmax", + "torch.masked._ops.softmin", + "torch.masked._ops.std", + "torch.masked._ops.sum", + "torch.masked._ops.var", + "torch.meshgrid", + "torch.mps._get_default_mps_generator", + "torch.mps.current_allocated_memory", + "torch.mps.driver_allocated_memory", + "torch.mps.empty_cache", + "torch.mps.get_rng_state", + "torch.mps.manual_seed", + "torch.mps.profiler.profile", + "torch.mps.profiler.start", + "torch.mps.profiler.stop", + "torch.mps.seed", + "torch.mps.set_per_process_memory_fraction", + "torch.mps.set_rng_state", + "torch.mps.synchronize", + "torch.nested._internal.nested_tensor.buffer_from_jagged", + "torch.nested._internal.nested_tensor.get_tensor_symint", + "torch.nested._internal.nested_tensor.is_expandable_to", + "torch.nested._internal.nested_tensor.jagged_from_list", + "torch.nested._internal.nested_tensor.jagged_from_tensor_and_lengths", + "torch.nested._internal.nested_tensor.nested_view_from_values_offsets", + "torch.nested._internal.nested_tensor.nested_view_from_values_offsets_lengths", + "torch.nested.as_nested_tensor", + "torch.nested.narrow", + "torch.nested.nested_tensor", + "torch.nn._reduction.get_enum", + "torch.nn._reduction.legacy_get_enum", + "torch.nn._reduction.legacy_get_string", + "torch.nn.factory_kwargs", + "torch.nn.functional.adaptive_avg_pool2d", + "torch.nn.functional.adaptive_avg_pool3d", + "torch.nn.functional.adaptive_max_pool1d_with_indices", + "torch.nn.functional.adaptive_max_pool1d", + "torch.nn.functional.adaptive_max_pool2d_with_indices", + "torch.nn.functional.adaptive_max_pool2d", + "torch.nn.functional.adaptive_max_pool3d_with_indices", + "torch.nn.functional.adaptive_max_pool3d", + "torch.nn.functional.affine_grid", + "torch.nn.functional.alpha_dropout", + "torch.nn.functional.assert_int_or_pair", + "torch.nn.functional.batch_norm", + "torch.nn.functional.binary_cross_entropy_with_logits", + "torch.nn.functional.binary_cross_entropy", + "torch.nn.functional.celu", + "torch.nn.functional.cosine_embedding_loss", + "torch.nn.functional.cross_entropy", + "torch.nn.functional.ctc_loss", + "torch.nn.functional.dropout", + "torch.nn.functional.dropout1d", + "torch.nn.functional.dropout2d", + "torch.nn.functional.dropout3d", + "torch.nn.functional.elu", + "torch.nn.functional.embedding_bag", + "torch.nn.functional.embedding", + "torch.nn.functional.feature_alpha_dropout", + "torch.nn.functional.fold", + "torch.nn.functional.fractional_max_pool2d_with_indices", + "torch.nn.functional.fractional_max_pool2d", + "torch.nn.functional.fractional_max_pool3d_with_indices", + "torch.nn.functional.fractional_max_pool3d", + "torch.nn.functional.gaussian_nll_loss", + "torch.nn.functional.glu", + "torch.nn.functional.grid_sample", + "torch.nn.functional.group_norm", + "torch.nn.functional.gumbel_softmax", + "torch.nn.functional.hardsigmoid", + "torch.nn.functional.hardswish", + "torch.nn.functional.hardtanh", + "torch.nn.functional.hinge_embedding_loss", + "torch.nn.functional.huber_loss", + "torch.nn.functional.instance_norm", + "torch.nn.functional.interpolate", + "torch.nn.functional.kl_div", + "torch.nn.functional.l1_loss", + "torch.nn.functional.layer_norm", + "torch.nn.functional.leaky_relu", + "torch.nn.functional.local_response_norm", + "torch.nn.functional.log_softmax", + "torch.nn.functional.lp_pool1d", + "torch.nn.functional.lp_pool2d", + "torch.nn.functional.margin_ranking_loss", + "torch.nn.functional.max_pool1d_with_indices", + "torch.nn.functional.max_pool1d", + "torch.nn.functional.max_pool2d_with_indices", + "torch.nn.functional.max_pool2d", + "torch.nn.functional.max_pool3d_with_indices", + "torch.nn.functional.max_pool3d", + "torch.nn.functional.max_unpool1d", + "torch.nn.functional.max_unpool2d", + "torch.nn.functional.max_unpool3d", + "torch.nn.functional.mish", + "torch.nn.functional.mse_loss", + "torch.nn.functional.multi_head_attention_forward", + "torch.nn.functional.multi_margin_loss", + "torch.nn.functional.multilabel_margin_loss", + "torch.nn.functional.multilabel_soft_margin_loss", + "torch.nn.functional.nll_loss", + "torch.nn.functional.normalize", + "torch.nn.functional.poisson_nll_loss", + "torch.nn.functional.relu", + "torch.nn.functional.relu6", + "torch.nn.functional.rrelu", + "torch.nn.functional.selu", + "torch.nn.functional.sigmoid", + "torch.nn.functional.silu", + "torch.nn.functional.smooth_l1_loss", + "torch.nn.functional.soft_margin_loss", + "torch.nn.functional.softmax", + "torch.nn.functional.softmin", + "torch.nn.functional.softsign", + "torch.nn.functional.tanh", + "torch.nn.functional.tanhshrink", + "torch.nn.functional.triplet_margin_loss", + "torch.nn.functional.unfold", + "torch.nn.functional.upsample_bilinear", + "torch.nn.functional.upsample_nearest", + "torch.nn.functional.upsample", + "torch.nn.grad._pair", + "torch.nn.grad._single", + "torch.nn.grad._triple", + "torch.nn.grad.conv1d_input", + "torch.nn.grad.conv1d_weight", + "torch.nn.grad.conv2d_input", + "torch.nn.grad.conv2d_weight", + "torch.nn.grad.conv3d_input", + "torch.nn.grad.conv3d_weight", + "torch.nn.modules.activation._is_make_fx_tracing", + "torch.nn.modules.utils._list_with_default", + "torch.nn.modules.utils._ntuple", + "torch.nn.modules.utils._quadruple", + "torch.nn.modules.utils._reverse_repeat_tuple", + "torch.nn.modules.utils.consume_prefix_in_state_dict_if_present", + "torch.nn.parameter.is_lazy", + "torch.norm", + "torch.quantization.default_eval_fn", + "torch.random._seed_custom_device", + "torch.random.fork_rng", + "torch.random.initial_seed", + "torch.random.seed", + "torch.return_types.pytree_register_structseq", + "torch.set_default_device", + "torch.set_default_dtype", + "torch.set_default_tensor_type", + "torch.set_deterministic_debug_mode", + "torch.set_float32_matmul_precision", + "torch.set_warn_always", + "torch.signal.windows.windows._add_docstr", + "torch.signal.windows.windows._window_function_checks", + "torch.signal.windows.windows.bartlett", + "torch.signal.windows.windows.blackman", + "torch.signal.windows.windows.cosine", + "torch.signal.windows.windows.exponential", + "torch.signal.windows.windows.gaussian", + "torch.signal.windows.windows.general_cosine", + "torch.signal.windows.windows.general_hamming", + "torch.signal.windows.windows.hamming", + "torch.signal.windows.windows.hann", + "torch.signal.windows.windows.kaiser", + "torch.signal.windows.windows.merge_dicts", + "torch.signal.windows.windows.nuttall", + "torch.signal.windows.windows.parse_kwargs", + "torch.sparse.semi_structured.to_sparse_semi_structured", + "torch.sparse.sum", + "torch.split", + "torch.stft", + "torch.sym_float", + "torch.sym_int", + "torch.sym_ite", + "torch.sym_max", + "torch.sym_min", + "torch.sym_not", + "torch.tensordot", + "torch.typename", + "torch.unique_consecutive", + "torch.use_deterministic_algorithms", + ], + TorchInGraphFunctionVariable, +) + + +torch_name_rule_map = [ + manual_torch_name_rule_map, + torch_c_binding_in_graph_functions, + torch_non_c_binding_in_graph_functions, +] + + +""" +Generate the torch object - Dynamo tracing rule (the wrapping variable) map. +""" + + +@functools.lru_cache(None) +def get_torch_obj_rule_map() -> Dict[Any, Type["VariableTracker"]]: + d: Dict[Any, Type[VariableTracker]] = {} + for m in torch_name_rule_map: + for k, v in m.items(): # type: ignore[attr-defined] + if ".py#" not in k: + obj = load_object(k) + else: + obj = _module_dir(torch) + k[len("torch/") :] + if obj is not None: + if obj in d and d[obj] != v: + raise AssertionError( + f"Duplicate torch object {obj} with different rules: {v}, {d[obj]}" + ) + else: + d[obj] = v + return d + + +def _load_obj_from_str(fully_qualified_name): + module, obj_name = fully_qualified_name.rsplit(".", maxsplit=1) + return getattr(importlib.import_module(module), obj_name) + + +""" +Load string represented torch objects. +""" + + +def load_object(name): + try: + x = name.split("#") + if len(x) == 2: + obj = _load_obj_from_str(x[0]) + val = getattr(obj, x[1]) + else: + assert len(x) == 1, f"Invalid obj name {name}" + val = _load_obj_from_str(x[0]) + val = unwrap_if_wrapper(val) + except (AttributeError, ImportError): + val = None + return val + + +""" +Get all torch.Tensor methods which are allowed to be in graph functions. +""" + + +@functools.lru_cache(None) +def get_tensor_method(): + s = set() + for name in dir(torch.Tensor): + method = getattr(torch.Tensor, name) + if isinstance( + method, (types.MethodDescriptorType, types.WrapperDescriptorType) + ): + s.add(method) + return frozenset(s) + + +""" +Return if a torch object is ATen op or torch.Tensor method. +""" + + +def is_aten_op_or_tensor_method(obj): + return obj in get_tensor_method() or isinstance( + obj, + (torch._ops.OpOverloadPacket, torch._ops.OpOverload), + ) + + +class FunctionIdSet: + """ + Track a set of `id()`s of objects which are either allowed or not + allowed to go into the generated FX graph. Use to test for torch.*, + numpy.*, builtins.*, etc. + + Support user modification to permit customization of what can be + added to the graph and what will cause a graph break. + """ + + function_ids: Optional[Set[int]] = None + function_names: Optional[Dict[int, str]] = None + + def __init__( + self, lazy_initializer: Callable[[], Union[Dict[int, str], Set[int]]] + ) -> None: + self.lazy_initializer = lazy_initializer + + def __call__(self) -> Set[int]: + if self.function_ids is None: + value = self.lazy_initializer() + if isinstance(value, dict): + self.function_ids = set(value.keys()) + self.function_names = value + else: + assert isinstance(value, set) + self.function_ids = value + return self.function_ids + + def get_name(self, idx: int, default: str): + self() # lazy init + assert self.function_names is not None + return self.function_names.get(idx, default) + + def add(self, idx: int): + function_ids = self() # lazy init + function_ids.add(idx) + + def remove(self, idx: int): + function_ids = self() + if idx in function_ids: + function_ids.remove(idx) + + def __contains__(self, idx: int) -> bool: + return idx in self() + + +@FunctionIdSet +def _allowed_callable_ids() -> Dict[int, str]: + rv: Dict[int, str] = {} + return rv + + +@FunctionIdSet +def _disallowed_callable_ids() -> Dict[int, str]: + rv: Dict[int, str] = {} + return rv + + +@FunctionIdSet +def _builtin_function_ids() -> Dict[int, str]: + # See also torch/_dynamo/polyfills/loader.py, which removes items in _builtin_function_ids + rv = { + id(v): f"builtins.{k}" + for k, v in builtins.__dict__.items() + if not k.startswith("_") and callable(v) + } + rv.update( + { + id(v): f"operator.{k}" + for k, v in operator.__dict__.items() + if not k.startswith("_") and callable(v) + } + ) + rv.update( + { + id(cast): "typing.cast", + id(functools.reduce): "functools.reduce", + id(copy.deepcopy): "copy.deepcopy", + } + ) + return rv + + +@FunctionIdSet +def _numpy_function_ids() -> Dict[int, str]: + rv = {} + for mod in NP_SUPPORTED_MODULES: + rv.update( + { + id(v): f"{mod.__name__}.{k}" + for k, v in mod.__dict__.items() + if callable(v) + and (getattr(v, "__module__", None) or mod.__name__) == mod.__name__ + } + ) + return rv + + +@FunctionIdSet +def _builtin_constant_ids() -> Dict[int, str]: + """ + Collects constant builtins by eliminating callable items. + """ + rv = { + id(v): f"builtins.{k}" + for k, v in builtins.__dict__.items() + if not k.startswith("_") and not callable(v) + } + return rv + + +_lazy_module_init: Dict[str, List[Callable[[], None]]] = defaultdict(list) + + +def add_module_init_func(name: str, init_func: Callable[[], None]) -> None: + """Register a module without eagerly importing it""" + # If the module is already imported, eagerly run init + assert "." not in name, f"Expected a root module name, but got {name}" + assert name not in _lazy_module_init + _lazy_module_init[name].append(init_func) + + +def _maybe_init_lazy_module(obj: object) -> None: + module = getattr(obj, "__module__", None) + if module is None: + return + + base_module = module.split(".")[0] + init_funcs = _lazy_module_init.pop(base_module, None) + if init_funcs is not None: + for fn in init_funcs: + fn() + + +def is_callable_allowed(obj) -> bool: + _maybe_init_lazy_module(obj) + return id(obj) in _allowed_callable_ids + + +def is_callable_disallowed(obj) -> bool: + _maybe_init_lazy_module(obj) + return id(obj) in _disallowed_callable_ids + + +def is_forbidden(obj) -> bool: + _maybe_init_lazy_module(obj) + return inspect.getattr_static(obj, "_dynamo_forbidden", False) + + +def is_builtin_callable(obj) -> bool: + # See also torch/_dynamo/polyfills/loader.py, which removes items in _builtin_function_ids + return id(obj) in _builtin_function_ids + + +def is_builtin_constant(obj) -> bool: + return id(obj) in _builtin_constant_ids + + +def is_numpy(obj) -> bool: + if np is None: + return False + return isinstance(obj, (np.ndarray, np.generic)) or id(obj) in _numpy_function_ids + + +def is_numpy_dtype(obj) -> bool: + if np is None: + return False + return isinstance(obj, np.dtype) + + +def is_numpy_type_info(obj) -> bool: + if np is None: + return False + return isinstance(obj, (np.finfo, np.iinfo)) + + +BUILTIN_SKIPLIST = ( + abc, + collections, + contextlib, + copy, + copyreg, + dataclasses, + enum, + functools, + importlib, + inspect, + linecache, + logging, + multiprocessing, + operator, + posixpath, + random, + re, + selectors, + signal, + tempfile, + threading, + tokenize, + torch, # torch/* is skipped by default unless specified in FUNC_INLINELIST or MOD_INLINELIST + traceback, + types, + typing, + unittest, + weakref, + _collections_abc, + _weakrefset, +) + +# third party libraries skiplist is defined by str, because users may not use these libraries. +# we should use lazy import & skip in the future. +THIRDPARTY_SKIPLIST = ( + "fx2trt_oss", + "hypothesis", + "networkx", + "numpy", + "omegaconf", + "onnx", + "onnxruntime", + "onnx_tf", + "pandas", + "sklearn", + "tabulate", + "tensorflow", + "tensorrt", + "torch2trt", + "tqdm", + "tree", + "tvm", + "xarray", +) + + +def _as_posix_path(path): + posix_path = Path(os.path.normpath(path)).as_posix() + # os.path.normpath and pathlib.Path remove trailing slash, so we need to add it back + if path.endswith((os.path.sep, "/")): + posix_path += "/" + return posix_path + + +def _strip_init_py(s): + # TODO: Once we require py3.9 use removesuffix instead. + suffix = "__init__.py" + if s.endswith(suffix): + s = s[: -len(suffix)] + return _as_posix_path(s) + + +def _module_dir(m: types.ModuleType): + # Protect against a module not exporting __file__ - this can happen for + # frozen modules, for example. + file = getattr(m, "__file__", None) + return file and _strip_init_py(file) + + +# These are legacy workarounds, don't add new modules to this list. +# Please use the MOD_INLINELIST instead to force inline functions under particular modules. +LEGACY_MOD_INLINELIST = { + "torch._dynamo.external_utils", + "torch._export.db.examples", + "torch._export.wrappers", + "torch._functorch.apis", + "torch._functorch.deprecated", + "torch._higher_order_ops.cond", + "torch._higher_order_ops.while_loop", + "torch._higher_order_ops.associative_scan", + "torch.nn.attention.flex_attention", + "torch.ao.quantization.pt2e.export_utils", + "torch.ao.quantization.pt2e.qat_utils", + "torch.ao.quantization.pt2e.representation.rewrite", + "torch.ao.quantization.pt2e.utils", + "torch.ao.quantization.quantizer.xnnpack_quantizer", + "torch.export.unflatten", + "torch.optim", +} + +if torch.distributed.is_available(): + LEGACY_MOD_INLINELIST |= { + "torch.distributed.tensor._api", + "torch.distributed.tensor.device_mesh", + "torch.distributed.device_mesh", + "torch.distributed.algorithms._checkpoint.checkpoint_wrapper", + "torch.distributed.tensor.parallel._data_parallel_utils", + "torch.distributed.tensor.parallel._utils", + "torch.distributed.tensor.parallel.style", + # we have to add replicate to LEGACY_MOD_INLINELIST to ensure + # the forward_hook won't be ignored. + "torch.distributed._composable.replicate", + } + if not torch._dynamo.config.skip_fsdp_hooks: + LEGACY_MOD_INLINELIST.add("torch.distributed._composable.fsdp") + + +# Force inline functions under these modules, even they are in *_SKIPLIST. +# We are using python module name instead of file or directory object to avoid circular dependency. +# Please keep this sorted alphabetically. +MOD_INLINELIST = [ + "torch._decomp", + "torch._dynamo._trace_wrapped_higher_order_op", + "torch._dynamo.comptime", + "torch._dynamo.polyfills", + "torch._functorch.autograd_function", + "torch._functorch.eager_transforms", + "torch._functorch.functional_call", + "torch._functorch.vmap", + "torch._higher_order_ops.associative_scan", + "torch._higher_order_ops.strict_mode", + "torch._higher_order_ops.while_loop", + "torch._inductor.test_operators", + "torch._library.autograd", + "torch._library.custom_ops", + "torch._prims", + "torch._refs", + "torch._tensor", + "torch.amp.autocast_mode", + "torch.ao.nn", + "torch.autograd.function", + "torch.backends.cuda", + "torch.cuda.amp.autocast_mode", + "torch.distributions", + "torch.export._tree_utils", + "torch.fx._pytree", + "torch.fx._symbolic_trace", + "torch.fx.experimental.proxy_tensor", + "torch.fx.passes.shape_prop", + "torch.nn", + "torch.overrides", + "torch.random", + "torch.sparse", + "torch.testing", + "torch.utils._content_store", + "torch.utils._contextlib", + "torch.utils._foreach_utils", + "torch.utils._python_dispatch", + "torch.utils._pytree", + "torch.utils.hooks", +] +assert sorted(set(MOD_INLINELIST)) == MOD_INLINELIST +MOD_INLINELIST = set(MOD_INLINELIST) + + +if torch.distributed.is_available(): + MOD_INLINELIST.add("torch.distributed") + if not torch._dynamo.config.skip_fsdp_hooks: + MOD_INLINELIST.add("torch.distributed._composable.fsdp") + + +@functools.lru_cache(None) +def get_legacy_mod_inlinelist(): + inlinelist = { + _as_posix_path(_module_dir(torch) + m[len("torch.") :].replace(".", "/")) + for m in LEGACY_MOD_INLINELIST + } + return inlinelist + + +@functools.lru_cache(None) +def get_mod_inlinelist(): + inlinelist = { + _as_posix_path(_module_dir(torch) + m[len("torch.") :].replace(".", "/")) + for m in MOD_INLINELIST + } + return inlinelist + + +# skip some standard python builtin libs +SKIP_DIRS = [ + "", + _as_posix_path(_config_module.__file__), + "triton/backends", +] +SKIP_DIRS.extend(map(_as_posix_path, filter(None, map(_module_dir, BUILTIN_SKIPLIST)))) + +SKIP_DIRS_RE = re.compile(r"match nothing^") + +is_fbcode = importlib.import_module("torch._inductor.config").is_fbcode() +# Skip fbcode paths(including torch.package paths) containing +# one of the following strings. +FBCODE_SKIP_DIRS: Set[str] = set() + +FBCODE_SKIP_DIRS_RE = re.compile(f".*({'|'.join(map(re.escape, FBCODE_SKIP_DIRS))})") + +# Remove this after fbcode is fully migrated to tracing through torchrec. +FBCODE_SKIP_TORCHREC_DIRS = { + "torchrec/distributed", + "trochrec/fb/distributed", + "caffe2/torch/fb/sparsenn/pooled_embeddings_modules.py", +} + +FBCODE_SKIP_TORCHREC_DIRS_RE = re.compile( + f".*({'|'.join(re.escape(_as_posix_path(d)) for d in FBCODE_SKIP_TORCHREC_DIRS)})" +) + +# TODO(yanboliang, anijain2305) - There are a few concerns that we should +# resolve +# 1) Audit if torchrec/distributed is even required in FBCODE_SKIPS_DIR +# 2) To inline just one file but skip others in a directory, we could use +# manual_torch_name_rule_map but this one is hard because FBCODE can add unusual +# names like torch_package. +# So, this is a stop gap solution till then. +FBCODE_INLINE_FILES_IN_SKIPPED_DIRS = { + "torchrec/distributed/types.py", +} +FBCODE_INLINE_FILES_IN_SKIPPED_DIRS_RE = re.compile( + f".*({'|'.join(re.escape(_as_posix_path(d)) for d in FBCODE_INLINE_FILES_IN_SKIPPED_DIRS)})" +) + +# torch.optim is a special case, +# we usually want to inline it, but the directory +# structure does not match the module structure +# and we want to skip the functions in optim/lr_scheduler.py +# this has precedence over all other rules in check_file +FORCE_SKIP_FILES = {f"{_module_dir(torch)}optim/lr_scheduler.py"} + + +def _recompile_re(): + global SKIP_DIRS_RE + SKIP_DIRS_RE = re.compile( + rf"^[^\s<]*({'|'.join(re.escape(_as_posix_path(d)) for d in SKIP_DIRS)})" + ) + + +def add(import_name: str): + if isinstance(import_name, types.ModuleType): + return add(import_name.__name__) + assert isinstance(import_name, str) + from importlib.util import find_spec + + module_spec = find_spec(import_name) + if not module_spec: + return + origin = module_spec.origin + if origin is None: + return + SKIP_DIRS.append(_strip_init_py(origin)) + _recompile_re() + + +@dataclasses.dataclass +class SkipResult: + skipped: bool + reason: Optional[str] + + +def check_file(filename, is_inlined_call=False): + """Should skip this file?""" + if filename is None: + return SkipResult(True, "filename is None") + filename = _as_posix_path(filename) + if filename in FORCE_SKIP_FILES: + return SkipResult(True, "FORCE_SKIP_FILES") + if any(filename.startswith(d) for d in get_legacy_mod_inlinelist()): + return SkipResult( + False, + "LEGACY_MOD_INLINELIST", + ) + if is_inlined_call and is_torch_inline_allowed(filename): + return SkipResult( + False, + "MOD_INLINELIST", + ) + if ( + is_fbcode + and FBCODE_SKIP_DIRS + and bool(FBCODE_SKIP_DIRS_RE.match(filename)) + and not bool(FBCODE_INLINE_FILES_IN_SKIPPED_DIRS_RE.match(filename)) + ): + return SkipResult( + True, + "FBCODE_SKIP_DIRS", + ) + + if ( + is_fbcode + and torch._dynamo.config.skip_torchrec + and FBCODE_SKIP_TORCHREC_DIRS + and bool(FBCODE_SKIP_TORCHREC_DIRS_RE.match(filename)) + and not bool(FBCODE_INLINE_FILES_IN_SKIPPED_DIRS_RE.match(filename)) + ): + return SkipResult(True, "FBCODE_SKIP_TORCHREC_DIRS") + + if bool(SKIP_DIRS_RE.match(filename)): + return SkipResult(True, "SKIP_DIRS") + else: + return SkipResult(False, "inlined by default") + + +@dataclasses.dataclass +class FunctionInfo: + py_obj: Optional[object] + name: Optional[str] + filename: str + code: Optional[types.CodeType] + + +""" +This is the main entry point to determine whether an object (function) should be inlined or skipped. +Let's illustrate the logic with an example: + @torch.compile + def f1(x, y): + ...... + f2(x, y) + ...... + + def f2(x, y): + ...... + f3(x, y) + ...... + + def f3(x, y): + ...... + +There are mainly three call sites of check/check_verbose: +* The compile region entrance (like function f1), the correspoinding code is located at eval_frame.py. +* When tracing the recursively called functions (like function f2 and f3). + * Dynamo decides inline/skip everytime it encounters a new recursively function call, and the call site + is in InliningInstructionTranslator.check_inlineable of symbolic_convert.py. + * If f2 is skipped by Dynamo, when evaluating the frame of f3, Dynamo need the inline/skip check again + and the call site is in catch_errors_wrapper.catch_errors of convert_frame.py. +* For global variables and function arguments, Dynamo needs to decide if they are wrapped as SkipFunctionVariable in builder.py. + +`is_inlined_call` is used to indicate if the current function call is inlined (f2 is inlined call if it passes check) +or not (f3 is not inlined call if f2 is skipped). Inside of the `check_verbose` function, there are more rules +to be checked if this `is_inlined_call`. +The reason to have this flag is that if the upper level function call (e.g, f2) is skipped, +we don't want to inline the lower level function call (e.g, f3) by default. +""" + + +def check_verbose(obj, is_inlined_call=False): + if isinstance( + obj, (UserFunctionVariable, UserMethodVariable, NestedUserFunctionVariable) + ): + try: + py_obj = obj.get_function() + except NotImplementedError: + py_obj = None + fi = FunctionInfo(py_obj, obj.get_name(), obj.get_filename(), obj.get_code()) + elif isinstance(obj, types.CodeType): + fi = FunctionInfo(None, obj.co_name, obj.co_filename, obj) + elif isinstance(obj, (types.FunctionType, types.MethodType)): + fi = FunctionInfo( + obj, obj.__name__, getfile(obj), obj.__code__ # type: ignore[union-attr] # FIXME Add MethodType.__code__ to typeshed + ) + else: + fi = FunctionInfo(obj, None, getfile(obj), None) + + # Consulte the central trace rules defined in torch._dynamo.trace_rules. + reasons: Set[str] = set() + rule = lookup_inner(fi.py_obj, fi.name, fi.filename, is_inlined_call, reasons) + if issubclass(rule, (UserFunctionVariable, PolyfilledFunctionVariable)): + return SkipResult( + False, + f"inlined according trace_rules.lookup {reasons.pop()}", + ) + else: + assert rule == SkipFunctionVariable, rule + return SkipResult( + True, + f"skipped according trace_rules.lookup {reasons.pop()}", + ) + + +def check(obj, is_inlined_call=False): + return check_verbose(obj, is_inlined_call).skipped + + +# skip common third party libs +for _name in THIRDPARTY_SKIPLIST: + add(_name) + +_recompile_re() + + +def is_torch_inline_allowed(filename): + return any(filename.startswith(d) for d in get_mod_inlinelist()) + + +@functools.lru_cache(None) +def dynamo_dir(): + import torch._dynamo + + return _module_dir(torch._dynamo) + + +def is_torch(filename): + if filename.startswith(dynamo_dir()): + return False + return filename.startswith(_module_dir(torch)) + + +""" +Main entry point for looking up the trace rule (the Dynamo variable) for a given callable object. +""" + + +def lookup_callable(obj): + if not hashable(obj): + return None + # Custom allow/disallow in graph takes precedence over the general lookup. + if is_callable_disallowed(obj): + return SkipFunctionVariable + if is_callable_allowed(obj): + return TorchInGraphFunctionVariable + if is_builtin_callable(obj): + return BuiltinVariable + return None + + +""" +Main entry point for looking up the trace rule (the Dynamo variable) for a given function object. +E.g, the lookup result of `torch.sin` is `TorchInGraphFunctionVariable`. +""" + + +def lookup(obj): + return lookup_inner(obj) + + +def lookup_inner( + obj, + name=None, + filename=None, + is_direct_call=True, + reasons: Union[None, Set[str]] = None, +): + # Step 1: lookup obj's tracing rule in `torch_name_rule_map`. + # The rules defined in `torch_name_rule_map` mainly includes two parts: + # - Manually defined rules for any functions. + # - The list of torch in graph functions. + try: + can_hash = hashable(obj) + except Exception: + can_hash = False + if not can_hash: + if reasons is not None: + reasons.add("obj is not hashable") + return None + if obj is not None: + if is_aten_op_or_tensor_method(obj): + return TorchInGraphFunctionVariable + rule = get_torch_obj_rule_map().get(obj, None) + if rule is not None: + if reasons is not None: + reasons.add("get_torch_obj_rule_map") + return rule + elif name is not None and filename is not None and not is_direct_call: + if name.startswith(TORCH_DYNAMO_RESUME_IN_PREFIX): + rule = get_torch_obj_rule_map().get( + filename + "#" + TORCH_DYNAMO_RESUME_IN_PREFIX, None + ) + else: + rule = get_torch_obj_rule_map().get(filename + "#" + name, None) + if rule is not None: + if reasons is not None: + reasons.add("get_torch_obj_rule_map") + return rule + + # Step 2: lookup obj's tracing rule by function name. + if is_direct_call: + if name == "patched_init": + if reasons is not None: + reasons.add("func name is patched_init") + return SkipFunctionVariable + elif name == "__torch_function__": + if reasons is not None: + reasons.add("func name is __torch_function__") + return UserFunctionVariable + + if not is_direct_call: + if name == "__getattr__": + # is_direct_call = False indicates that this is the top-level frame + # being traced (i.e., it is not inlined and not called from + # InliningInstructionTranslator). Tracing __getattr__ at the top + # level is unlikely because we inline it for + # UserDefinedObjectVariable. This scenario occurs only for + # UnspecializedNNModuleVariable, where Dynamo directly calls + # __getattr__ during trace time, generating LOAD_ATTR bytecode + # without going through the underlying __getattr__ data structures. + # When this optimized bytecode is executed, Dynamo is triggered + # again on the __getattr__ call. Therefore, we skip Dynamo tracing + # in this case. + if reasons is not None: + reasons.add( + "Tracing __getattr__ as the top level frame, unsuitable for tracing." + ) + return SkipFunctionVariable + + # Step 3: lookup obj's tracing rule by filename. + if filename is None: + filename = getfile(obj) + + skip_result = check_file(filename, is_direct_call) + if reasons is not None: + reasons.add(skip_result.reason) + if skip_result.skipped: + return SkipFunctionVariable + else: + return UserFunctionVariable + + +def clear_lru_cache(): + torch._dynamo.trace_rules.get_torch_obj_rule_map.cache_clear() + torch._dynamo.trace_rules.get_tensor_method.cache_clear() + torch._dynamo.trace_rules.get_legacy_mod_inlinelist.cache_clear() + torch._dynamo.trace_rules.get_mod_inlinelist.cache_clear() + torch._dynamo.trace_rules.dynamo_dir.cache_clear() diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/utils.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7b0cbbb75d2a192abdadd3c131cefb726a8e63b5 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/utils.py @@ -0,0 +1,3181 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import atexit +import collections +import contextlib +import copy +import dataclasses +import datetime +import dis +import enum +import functools +import gc +import importlib +import inspect +import itertools +import linecache +import logging +import math +import operator +import os +import re +import sys +import threading +import time +import types +import typing +import uuid +import warnings +import weakref +from contextlib import contextmanager +from dataclasses import is_dataclass +from functools import lru_cache +from types import MethodWrapperType +from typing import ( + Any, + Callable, + cast, + ClassVar, + Counter, + DefaultDict, + Deque, + Dict, + Iterable, + Iterator, + KeysView, + List, + Optional, + overload, + Set, + Tuple, + Type, + TypeVar, + Union, + ValuesView, +) +from typing_extensions import Literal, TypeGuard + +import torch +import torch._functorch.config +import torch._inductor.config as inductor_config +import torch.fx.experimental.symbolic_shapes +import torch.utils._pytree as pytree +from torch import fx +from torch._C import ( + _get_function_stack_at, + _instruction_counter, + _len_torch_function_stack, + _pop_torch_function_stack, + _push_on_torch_function_stack, +) +from torch._dispatch.python import enable_python_dispatcher +from torch._guards import Source, TracingContext +from torch._subclasses.meta_utils import is_sparse_compressed +from torch._utils_internal import log_chromium_event_internal, log_compilation_event +from torch.fx._utils import _format_graph_code, lazy_format_graph_code +from torch.nn.modules.lazy import LazyModuleMixin +from torch.utils._triton import has_triton, has_triton_package +from torch.utils.hooks import RemovableHandle + + +try: + import numpy as np +except ModuleNotFoundError: + np = None # type: ignore[assignment] + +try: + import torch._logging + import torch._numpy as tnp + from torch._guards import detect_fake_mode # noqa: F401n + from torch._logging import LazyString + + from . import config + + # NOTE: Make sure `NP_SUPPORTED_MODULES` and `NP_TO_TNP_MODULE` are in sync. + if np: + NP_SUPPORTED_MODULES: Tuple[types.ModuleType, ...] = ( + np, + np.fft, + np.linalg, + np.random, + ) + + NP_TO_TNP_MODULE = { + np: tnp, + np.fft: tnp.fft, + np.linalg: tnp.linalg, + np.random: tnp.random, + } + else: + NP_SUPPORTED_MODULES = () + + NP_TO_TNP_MODULE = {} + from torch._subclasses.fake_tensor import FakeTensor, is_fake, maybe_get_fake_mode +except ImportError: + pass + + +T = TypeVar("T") + +unpatched_nn_module_getattr = torch.nn.Module.__getattr__ + +counters: DefaultDict[str, Counter[str]] = collections.defaultdict(collections.Counter) +optimus_scuba_log: Dict[str, Any] = {} +troubleshooting_url = ( + "https://pytorch.org/docs/main/torch.compiler_troubleshooting.html" +) +nnmodule_doc_url = "https://pytorch.org/docs/main/torch.compiler_nn_module.html" +nnmodule_doc_url_msg = f"See {nnmodule_doc_url} for more information and limitations." +log = logging.getLogger(__name__) + +# profiling compilation time by function +compilation_time_metrics: Dict[str, List[float]] = {} + +# profiling compilation time by frame phase +frame_phase_timing: Dict[str, Dict[str, float]] = collections.defaultdict( + lambda: collections.defaultdict(float) +) + +timer_counter = itertools.count() + + +def tabulate( + rows: Union[List[Tuple[str, object]], List[List[object]]], + headers: Union[Tuple[str, ...], List[str]], +) -> str: + try: + import tabulate + + return tabulate.tabulate(rows, headers=headers) + except ImportError: + return "\n".join( + ", ".join(map(str, row)) for row in itertools.chain([headers], rows) + ) + + +curr_frame = 0 + + +# Note: Called for you by dynamo - you almost never ever want to invoke this yourself. +def increment_frame() -> None: + global curr_frame + curr_frame = curr_frame + 1 + + +# Note: Called for you by dynamo - you almost never ever want to invoke this yourself. +def reset_frame_count() -> None: + global curr_frame + frame_phase_timing.clear() + compilation_time_metrics.clear() + curr_frame = 0 + + +op_count = 0 + + +def increment_op_count(cnt: int) -> None: + global op_count + op_count += cnt + + +# Calculate total time spent so far for each phase +# For example, {'entire_frame_compile':8.574629999999999, 'backend_compile':5.26806} +def calculate_time_spent() -> Dict[str, float]: + total_wall_time = 0.0 + total_by_key = {} + for timings in frame_phase_timing.values(): + total_wall_time += timings.get( + "entire_frame_compile", timings.get("inductor_compile", 0) + ) + + for key, timing in timings.items(): + if key not in total_by_key: + total_by_key[key] = timing + else: + total_by_key[key] += timing + + if total_by_key: + total_by_key["total_wall_time"] = total_wall_time + + return total_by_key + + +# Print a report of time spent so far +# Ex: +# TIMING: +# entire_frame_compile:8.574629999999999 +# backend_compile:5.26806 +def print_time_report() -> None: + total_by_key = calculate_time_spent() + + out = "TIMING:" + for key, value in total_by_key.items(): + out = f"{out} {key}:{round(value, 5)}" + + print(out) + + +def _add_time_spent(key: str, phase_name: str, time_spent: float) -> None: + frame_phase_timing[key][phase_name] += time_spent + + +def get_cache_stats() -> Dict[str, Any]: + """Get a bunch of metadata about cache hits and misses to use in chromium events""" + cache_stats = { + "fxgraph_cache_hit": counters["inductor"]["fxgraph_cache_hit"], + "fxgraph_cache_miss": counters["inductor"]["fxgraph_cache_miss"], + "fxgraph_cache_bypass": counters["inductor"]["fxgraph_cache_bypass"], + } + return cache_stats + + +# dynamo_timed is a context manager +# By wrapping a function in dynamo_timed, we can store a record in compilation_time_metrics +# where the key is the functions name. +# For example: +# +# def _foo(...): +# with dynamo_timed("_foo"): +# ... +# +# Would show up as an entry in our timing dict: +# OrderedDict([('_foo', [0.083690, 0.23949, 3.1425e-05])]) +# This is extremely useful for granular debugging. +# +# Although it is tempting to use dynamo_timed as a decorator, please do not. +# In its decorator form it makes cProfile traces less useful as dynamo_timed +# suddenly becomes a bottleneck for lots of function calls (as only one parent +# pointer is recorded). +# +# For a higher-level mode, pass a phase_name into dynamo_timed +# phase_names record an extra record into a separate compilation timing structure, +# one keyed on frame+name rather than function. +# The frame is incremented outside of this function, in def increment_frame() above. +# `fwd_only` is used to identify if this phase or function is only called +# during compiling fwd graphs, e.g, `entire_frame_compile` and `backend_compile`. +# The other phases (`inductor_compile` and `code_gen`) are called for both fwd and bwd graphs. + + +@contextmanager +def dynamo_timed( + key: str, + phase_name: Optional[str] = None, + fwd_only: bool = True, +): + chromium_log: ChromiumEventLogger = get_chromium_event_logger() + if key not in compilation_time_metrics: + compilation_time_metrics[key] = [] + + fail_type: Optional[str] = None + fail_reason: Optional[str] = None + time_spent = float("-inf") + start = time.time_ns() + try: + with torch.profiler.record_function(f"{key} (dynamo_timed)"): + t0 = time.time() + chromium_log.log_event_start(key, start, None) + if phase_name: + chromium_log.log_event_start(phase_name, start) + yield + time_spent = time.time() - t0 + compilation_time_metrics[key].append(time_spent) + except Exception as e: + fail_type = str(type(e)) + fail_reason = str(e) + raise + finally: + # Always log the end event even on exception + if phase_name: + chromium_log.log_event_end( + phase_name, + time.time_ns(), + {"cache_stats": get_cache_stats()}, + start, + ) + chromium_log.log_event_end( + key, time.time_ns(), {"cache_stats": get_cache_stats()}, start + ) + # Only record backward compilation metrics if phase_name is not None! + if phase_name: + frame_key = str(curr_frame) + # fwd only compilation stages: entire_frame_compile, backend_compile. + # use frame_key as time aggregation key. + if fwd_only and fail_type is None: + _add_time_spent(frame_key, phase_name, time_spent) + else: + # fwd + bwd compilation stages: inductor_compile, code_gen. + # use frame_key as time aggregation key for fwd graphs; + # use compile_id as time aggregation key for bwd graphs. + if torch._guards.TracingContext.try_get() is not None: + aot_graph_name = str( + torch._guards.TracingContext.get().aot_graph_name + ) + if ( + "forward" in aot_graph_name or "inference" in aot_graph_name + ) and fail_type is None: + _add_time_spent(frame_key, phase_name, time_spent) + elif "backward" in aot_graph_name: + compile_id = str( + torch._guards.CompileContext.current_compile_id() + ) + if fail_type is None: + _add_time_spent(compile_id, phase_name, time_spent) + + # log backward compilation metrics at the end of `inductor_compile` of bwd graph, + # one record for one bwd graph. + if phase_name == "inductor_compile": + if fail_type is None: + inductor_compile_time = frame_phase_timing[ + compile_id + ].get("inductor_compile", None) + code_gen_time = frame_phase_timing[compile_id].get( + "code_gen", None + ) + else: + inductor_compile_time = None + code_gen_time = None + metrics = BwdCompilationMetrics( + compile_id, + inductor_compile_time, + code_gen_time, + fail_type, + fail_reason, + ) + record_compilation_metrics(metrics) + + +@overload +def compile_times(repr: Literal["str"], aggregate: bool = False) -> str: + ... + + +@overload +def compile_times( + repr: Literal["csv"], aggregate: bool = False +) -> Tuple[List[str], List[object]]: + ... + + +def compile_times(repr="str", aggregate: bool = False): + """ + Get metrics about torchdynamo frontend/backend compilation times. + + Accumulates information from functions tagged with `dynamo_timed`. + + repr='str' returns a printable string for user interaction, and 'csv' + returns headers, rows which can be logged for output + + aggregate causes values from multiple compilations (e.g. split graphs) + to be accumulated into one value. If false, expect more than one value + per metric. + """ + + def fmt_fn(values, item_fn=lambda x: x): + if aggregate: + return item_fn(sum(values)) + return ", ".join(map(item_fn, values)) + + if repr == "str": + rows = [ + (k, fmt_fn(compilation_time_metrics[k], item_fn=lambda x: f"{x:.4f}")) + for k in compilation_time_metrics + ] + out = "TorchDynamo compilation metrics:\n" + out += tabulate(rows, headers=("Function", "Runtimes (s)")) + return out + elif repr == "csv": + values = [ + fmt_fn(v, item_fn=lambda x: f"{x:.6f}") + for v in compilation_time_metrics.values() + ] + headers = list(compilation_time_metrics.keys()) + return headers, values + return None + + +@atexit.register +def dump_compile_times() -> None: + log.info(compile_times(repr="str", aggregate=True)) + + +tensortype_to_dtype = { + torch.FloatTensor: (torch.float32, torch.float), + torch.DoubleTensor: (torch.float64, torch.double), + torch.HalfTensor: (torch.float16, torch.half), + torch.BFloat16Tensor: (torch.bfloat16,), + torch.ByteTensor: (torch.uint8,), + torch.CharTensor: (torch.int8,), + torch.LongTensor: (torch.int64, torch.long), + torch.IntTensor: (torch.int32, torch.int), + torch.ShortTensor: (torch.int16, torch.short), + torch.BoolTensor: (torch.bool,), +} + + +class DuplicateWarningChecker: + def __init__(self, maxsize: int = 4096) -> None: + self.maxsize = maxsize + self.reset() + + def reset(self): + self.set = collections.OrderedDict() + + def add(self, key: Union[str, Tuple[object, object]]) -> bool: + if key in self.set: + self.set.move_to_end(key, last=True) + if not config.verbose: + return False + else: + self.set[key] = None + while len(self.set) > self.maxsize: + self.set.popitem(last=False) + return True + + +graph_break_dup_warning_checker = DuplicateWarningChecker() + + +def setup_compile_debug(): + compile_debug = os.environ.get("TORCH_COMPILE_DEBUG", "0") == "1" + + if compile_debug: + return add_file_handler() + + return contextlib.ExitStack() + + +def reset_graph_break_dup_checker() -> None: + graph_break_dup_warning_checker.reset() + + +def add_file_handler(): + log_path = os.path.join(get_debug_dir(), "torchdynamo") + os.makedirs(log_path, exist_ok=True) + + log_file_handler = logging.FileHandler(os.path.join(log_path, "debug.log")) + logger = logging.getLogger("torch._dynamo") + logger.addHandler(log_file_handler) + + exitstack = contextlib.ExitStack() + exitstack.callback(lambda: logger.removeHandler(log_file_handler)) + return exitstack + + +def setup_log_file(): + exitstack = contextlib.ExitStack() + if config.log_file_name is not None: + log_file_handler = logging.FileHandler(config.log_file_name) + for logger in torch._logging._internal.get_loggers(): + logger.addHandler(log_file_handler) + exitstack.callback(lambda: logger.removeHandler(log_file_handler)) + return exitstack + + return exitstack + + +def gen_record_file_name(exc, code) -> str: + return f"{get_debug_dir()}/error_recordings/\ +{code.co_name}_{type(exc).__name__}_{code.co_firstlineno}.rec" + + +def write_record_to_file(filename: str, exec_record) -> None: + try: + if os.path.exists(filename): + log.warning( + "Unable to write execution record %s; file already exists.", filename + ) + else: + os.makedirs(os.path.dirname(filename), exist_ok=True) + with open(filename, "wb") as f: + exec_record.dump(f) + except Exception: + log.exception("Unable to write execution record %s", filename) + + +def count_calls(g: fx.Graph) -> int: + c = 0 + for n in g.nodes: + if "call" in n.op: + c += 1 + return c + + +def identity(x): + return x + + +def hashable(x): + try: + hash(x) + return True + except TypeError: + return False + # cannot hash writable memoryview object + except ValueError: + return False + + +def nothing(*args, **kwargs): + pass + + +class ExactWeakKeyDictionary: + """Similar to weakref.WeakKeyDictionary, but use `is`/`id` rather than `==` to compare equality""" + + def __init__(self): + self.values = {} + self.refs = {} + + def __getitem__(self, key): + return self.values[id(key)] + + def get(self, key, default=None): + return self.values.get(id(key), default) + + def __contains__(self, key): + return id(key) in self.values + + def __setitem__(self, key, value): + idx = id(key) + if idx not in self.refs: + self.refs[idx] = weakref.ref(key, lambda ref: self._remove_id(idx)) + self.values[idx] = value + + def _remove_id(self, idx): + if idx in self.values: + del self.values[idx] + if idx in self.refs: + del self.refs[idx] + + def clear(self): + self.refs.clear() + self.values.clear() + + +@overload +def istype(obj: object, allowed_types: Type[T]) -> TypeGuard[T]: + ... + + +@overload +def istype( + obj: object, allowed_types: Tuple[Type[List[T]], Type[Tuple[T, ...]]] +) -> TypeGuard[T]: + ... + + +@overload +def istype(obj: object, allowed_types: Iterable[type]) -> bool: + ... + + +def istype(obj, allowed_types): + """isinstance() without subclasses""" + if isinstance(allowed_types, (tuple, list, set)): + return type(obj) in allowed_types + return type(obj) is allowed_types + + +if sys.version_info >= (3, 12): + # Some typing classes moved to C in 3.12, + # which no longer have the _Final mixin. + _builtin_final_typing_classes = ( + typing.ParamSpecArgs, + typing.ParamSpecKwargs, + typing.ParamSpec, + typing.TypeVar, + typing.TypeVarTuple, + typing.TypeAliasType, + ) + + +def is_typing(value): + # _Final catches most of typing classes: + # - Any + # - Callable + # - Union + # ... + # + # NB: we intentionally ignore classes that inherit from Generic, since they + # can be used as both TypingVariable as well as UserDefinedClassVariable. + if sys.version_info >= (3, 12) and isinstance(value, _builtin_final_typing_classes): + return True + return isinstance(value, typing._Final) or value is typing.Generic # type: ignore[attr-defined] + + +def is_numpy_int_type(value): + if not np: + return False + + return istype( + value, + ( + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + ), + ) + + +def is_numpy_float_type(value): + if not np: + return False + + return istype( + value, + ( + np.float16, + np.float32, + np.float64, + ), + ) + + +def is_lru_cache_wrapped_function(value): + return isinstance(value, functools._lru_cache_wrapper) and is_function( + inspect.getattr_static(value, "__wrapped__") + ) + + +def is_function_or_wrapper(value): + return is_function(value) or isinstance( + value, (torch._ops.OpOverloadPacket, torch._ops.OpOverload) + ) + + +def is_function(value): + return isinstance( + value, + ( + types.FunctionType, + types.BuiltinFunctionType, + types.MethodDescriptorType, + types.WrapperDescriptorType, + ), + ) + + +def is_wrapper_or_member_descriptor(value): + return isinstance( + value, + ( + # set up by PyGetSetDef + types.GetSetDescriptorType, + # set by PyMethodDef, e.g. list.append + types.MethodDescriptorType, + # slots - list.__add__ + types.WrapperDescriptorType, + # set up by PyMemberDef + types.MemberDescriptorType, + # wrapper over C functions + types.MethodWrapperType, + ), + ) + + +def unwrap_if_wrapper(fn): + return unwrap_with_attr_name_if_wrapper(fn)[0] + + +def unwrap_with_attr_name_if_wrapper(fn): + # TODO(anijain2305) - Investigate if we can get rid of this function + # unpack @torch._dynamo.optimize()(fn) wrapped function + if is_function(fn) and inspect.getattr_static(fn, "_torchdynamo_inline", False): + fn = inspect.getattr_static(fn, "_torchdynamo_inline", fn) + attr_name = "_torchdynamo_inline" + else: + attr_name = None + return fn, attr_name + + +def is_numpy_ndarray(value): + if not np: + return False + + return istype(value, np.ndarray) + + +def istensor(obj): + """Check of obj is a tensor""" + tensor_list: Tuple[type, ...] = ( + torch.Tensor, + torch.nn.Parameter, + *config.traceable_tensor_subclasses, + ) + tensor_list = tensor_list + (torch._subclasses.FakeTensor,) + return istype(obj, tensor_list) + + +def is_lazy_module(mod): + return isinstance(mod, LazyModuleMixin) + + +@functools.lru_cache(4096) +def print_once(*args): + print(*args) + + +def make_cell(val=None): + """Some black magic to create a cell object that usually only exists in a closure""" + x = val + + def f(): + return x + + assert f.__closure__ is not None and len(f.__closure__) == 1 + return f.__closure__[0] + + +def proxy_args_kwargs(args, kwargs): + try: + proxy_args = tuple(arg.as_proxy() for arg in args) + proxy_kwargs = {key: arg.as_proxy() for key, arg in kwargs.items()} + return proxy_args, proxy_kwargs + except NotImplementedError as e: + from .exc import unimplemented + from .variables.base import typestr + + unimplemented( + f"call_function args: {typestr(*args)} {typestr(*list(kwargs.values()))}", + from_exc=e, + ) + + +@dataclasses.dataclass +class CompilationMetrics: + compile_id: str + frame_key: str + co_name: str + co_filename: str + co_firstlineno: int + cache_size: int + accumulated_cache_size: int + guard_count: Optional[int] + shape_env_guard_count: Optional[int] + graph_op_count: Optional[int] + graph_node_count: Optional[int] + graph_input_count: Optional[int] + start_time: float + entire_frame_compile_time_s: Optional[float] + backend_compile_time_s: Optional[float] + inductor_compile_time_s: Optional[float] + code_gen_time_s: Optional[float] + fail_type: Optional[str] + fail_reason: Optional[str] + fail_user_frame_filename: Optional[str] + fail_user_frame_lineno: Optional[int] + non_compliant_ops: Set[str] + compliant_custom_ops: Set[str] + restart_reasons: Set[str] + dynamo_time_before_restart_s: float + # Sometimes, we will finish analyzing a frame but conclude we don't want + # to install any guarded code. True means we actually decided to install + # a compiled frame + has_guarded_code: bool + possibly_missed_reinplacing_opportunities: Optional[int] + + +@dataclasses.dataclass +class BwdCompilationMetrics: + compile_id: str + inductor_compile_time_s: Optional[float] + code_gen_time_s: Optional[float] + fail_type: Optional[str] + fail_reason: Optional[str] + + +DEFAULT_COMPILATION_METRICS_LIMIT = 64 + + +_compilation_metrics: Deque[ + Union[CompilationMetrics, BwdCompilationMetrics] +] = collections.deque(maxlen=DEFAULT_COMPILATION_METRICS_LIMIT) + + +def record_compilation_metrics( + compilation_metrics: Union[CompilationMetrics, BwdCompilationMetrics] +): + global _compilation_metrics + _compilation_metrics.append(compilation_metrics) + if isinstance(compilation_metrics, CompilationMetrics): + name = "compilation_metrics" + else: + name = "bwd_compilation_metrics" + torch._logging.trace_structured( + name, + lambda: { + k: list(v) if isinstance(v, set) else v + for k, v in dataclasses.asdict(compilation_metrics).items() + }, + ) + if config.log_compilation_metrics: + log_compilation_event(compilation_metrics) + + +def set_compilation_metrics_limit(new_size: int) -> None: + global _compilation_metrics + while len(_compilation_metrics) > new_size: + _compilation_metrics.popleft() + new_deque = collections.deque(_compilation_metrics, maxlen=new_size) + _compilation_metrics = new_deque + + +def clear_compilation_metrics() -> None: + global _compilation_metrics + _compilation_metrics.clear() + + +def get_compilation_metrics() -> List[Union[CompilationMetrics, BwdCompilationMetrics]]: + return list(_compilation_metrics) + + +class ChromiumEventLogger: + """Logs chromium events to structured logs. tlparse will concatenate these into a perfetto UI link. + + See https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview#heading=h.yr4qxyxotyw for + a specification of the Chromium Event JSON format. + """ + + def get_stack(self): + if hasattr(self.tls, "stack"): + return self.tls.stack + else: + self.tls.stack = ["__start__"] + return self.tls.stack + + def __init__(self): + self.tls = threading.local() + # Generate a unique id for this logger, which we can use in scuba to filter down + # to a single python run. + self.id_ = str(uuid.uuid4()) + + # TODO: log to init/id tlparse after I add support for it + log.info("ChromiumEventLogger initialized with id %s", self.id_) + + def log_event_start( + self, + event_name: str, + time_ns: int, + metadata: Optional[Dict[str, Any]] = None, + ) -> None: + """ + Logs the start of a single event. + :param str event_name Name of event to appear in trace + :param time_ns Timestamp in nanoseconds + :param metadata: Any extra metadata associated with this event + """ + event = self._log_timed_event( + event_name, + time_ns, + "B", + metadata, + ) + log_chromium_event_internal(event, self.get_stack(), self.id_) + self.get_stack().append(event_name) + + def reset(self) -> None: + # We this on every compile in case a compile crashes or restarts and we haven't + # cleared the stack. + stack = self.get_stack() + stack.clear() + stack.append("__start__") + + def log_event_end( + self, + event_name: str, + time_ns: int, + metadata: Optional[Dict[str, Any]] = None, + start_time_ns: Optional[int] = None, + ) -> None: + """ + Logs the end of a single event. This function should only be + called after log_event_start with the same event_name. + :param event_name: Name of event to appear in trace + :param time_ns: Timestamp in nanoseconds + :param metadata: Any extra metadata associated with this event + """ + # These stack health checks currently never happen, + # but they're written this way to future proof any weird event + # overlaps in the future. + stack = self.get_stack() + if event_name not in stack: + # Something went wrong, we never called start on this event, + # or it was skipped due to overlapping events below + log.warning("ChromiumEventLogger: Start event not in stack, ignoring") + return + + event = self._log_timed_event( + event_name, + time_ns, + "E", + metadata, + ) + + while event_name != stack[-1]: + # If the event isn't the most recent one to end, pop + # off the stack until it is. + # Since event_name in self.stack, this pop is always safe + log.warning( + "ChromiumEventLogger: Detected overlapping events, fixing stack" + ) + stack.pop() + + log_chromium_event_internal(event, stack, self.id_, start_time_ns) + # Finally pop the actual event off the stack + stack.pop() + + def _log_timed_event( + self, + event_name: str, + time_ns: int, + phase: str, + metadata: Optional[Dict[str, Any]] = None, + ) -> Dict[str, Any]: + """ + Logs a timed event in chromium format. See log_event_start, log_event_end, etc. + """ + event = { + "name": event_name, + "ts": time_ns / 1000, # Chromium events are in micro seconds + "args": metadata, + "ph": phase, + # These categories are needed in all chromium traces + "cat": "dynamo_timed", + "tid": 0, + "pid": 0, # pid should be specified on all logs, we don't personally care about the actual process id + } + torch._logging.trace_structured( + "chromium_event", + payload_fn=lambda: event, + suppress_context=False, + expect_trace_id=False, # Not every chromium event will have a trace_id + ) + return event + + def log_instant_event( + self, + event_name: str, + time_ns: int, + metadata: Optional[Dict[str, Any]] = None, + ) -> None: + """ + Log an instant event with no associated duration. + :param str event_name: Name of event to appear in trace + :param int time_ns Timestamp in nanoseconds + :param Optional[Dict[str, Any]] metadata: Any extra metadata associated with this event + :param str cname optional color for the arrow in the trace + """ + event = { + "name": event_name, + "ts": time_ns / 1000, + "args": metadata, + "ph": "i", + # These categories are needed in all chromium traces + "cat": "dynamo_timed", + "tid": 0, + "pid": 0, + "s": "p", # We use "process" level instant events so they all appear on the same row in the trace. + } + torch._logging.trace_structured( + "chromium_event", + payload_fn=lambda: event, + suppress_context=False, + expect_trace_id=True, + ) + # Log an instant event with the same start and end time + log_chromium_event_internal(event, self.get_stack(), self.id_) + + +CHROMIUM_EVENT_LOG: Optional[ChromiumEventLogger] = None + + +def get_chromium_event_logger() -> ChromiumEventLogger: + global CHROMIUM_EVENT_LOG + if CHROMIUM_EVENT_LOG is None: + CHROMIUM_EVENT_LOG = ChromiumEventLogger() + return CHROMIUM_EVENT_LOG + + +@dataclasses.dataclass +class CleanupHook: + """Remove a global variable when hook is called""" + + scope: Dict[str, Any] + name: str + + def __call__(self, *args): + # Make sure we're not shutting down + if CleanupManager is not None: + CleanupManager.count -= 1 + del self.scope[self.name] + + @staticmethod + def create(scope, name, val): + assert name not in scope + CleanupManager.count += 1 + scope[name] = val + return CleanupHook(scope, name) + + +class CleanupManager(ExactWeakKeyDictionary): + count = 0 + instance: ClassVar[CleanupManager] + + def _remove_id(self, idx): + for hook in self.values[idx]: + hook() + super()._remove_id(idx) + + +CleanupManager.instance = CleanupManager() + + +def clone_tensor(x): + """Clone the tensor and its gradient""" + y = x.clone().requires_grad_(x.requires_grad) + if x.is_leaf and x.grad is not None: + y.grad = x.grad.clone() + return y + + +def clone_input(x, *, dtype=None): + """copy while preserving strides""" + # TODO: this is questionable + if is_fake(x): + # this func fails on fake tensors in __torch_dispatch__ + return x + + def torch_clone(x): + y = torch.clone(x) + if x.is_leaf: + y.requires_grad_(x.requires_grad) + if x.is_leaf and x.grad is not None: + y.grad = clone_input(x.grad, dtype=dtype) + if hasattr(x, "_dynamo_dynamic_indices"): + y._dynamo_dynamic_indices = x._dynamo_dynamic_indices.copy() # type: ignore[attr-defined] + return y + + with torch.no_grad(): + if x.device.type == "xla": + # Access data_ptr() for a xla tensor will cause crash + return torch_clone(x) + + # Handle sparse storage (no stride). + if x.layout is torch.sparse_coo: + return torch.sparse_coo_tensor( + torch_clone(x._indices()), + torch_clone(x._values()), + x.shape, + is_coalesced=x.is_coalesced(), + ) + elif is_sparse_compressed(x): + if x.layout in {torch.sparse_csr, torch.sparse_bsr}: + compressed_indices = x.crow_indices() + plain_indices = x.col_indices() + else: + compressed_indices = x.ccol_indices() + plain_indices = x.row_indices() + return torch.sparse_compressed_tensor( + torch_clone(compressed_indices), + torch_clone(plain_indices), + torch_clone(x.values()), + x.shape, + layout=x.layout, + ) + + needed_size = sum( + (shape - 1) * stride for shape, stride in zip(x.size(), x.stride()) + ) + if x.is_quantized: + result = torch.empty_quantized((needed_size + 32,), x) + else: + result = torch.empty( + needed_size + 32, dtype=dtype or x.dtype, device=x.device + ) + cache_line_offset = ( + (x.data_ptr() - result.data_ptr()) % 32 + ) // x.element_size() + result.as_strided_(x.size(), x.stride(), cache_line_offset) + try: + result.copy_(x.clone()) + if x.is_leaf: + result.requires_grad_(x.requires_grad) + if x.is_leaf and x.grad is not None: + result.grad = clone_input(x.grad, dtype=dtype) + except RuntimeError: + # RuntimeError: unsupported operation: more than one element of the written-to + # tensor refers to a single memory location. Please clone() the tensor before + # performing the operation. + return torch_clone(x) + if hasattr(x, "_dynamo_dynamic_indices"): + result._dynamo_dynamic_indices = x._dynamo_dynamic_indices.copy() # type: ignore[attr-defined] + return result + + +def clone_inputs(example_inputs): + res: Union[Dict[Any, Any], List[Any]] + if type(example_inputs) is dict: + res = dict(example_inputs) + for key, value in res.items(): + if isinstance(value, tuple): + res[key] = clone_inputs(value) + else: + assert isinstance(value, torch.Tensor), type(value) + res[key] = clone_input(value) + return res + + res = list(example_inputs) + for i in range(len(res)): + if isinstance(res[i], torch.Tensor): + res[i] = clone_input(res[i]) + return res + + +def skip_frame_if_in_functorch_mode(val: torch.Tensor): + try: + val.data_ptr() # will throw for functorch tensors + except RuntimeError as e: + from .exc import SkipFrame + + # This will be GradTrackingTensor/BatchedTensor/etc + functorch_subclass_name = re.sub(r"\(.*", "", repr(val)) + raise SkipFrame( + f"torch.compile cannot be run in context: {functorch_subclass_name}" + ) from e + + +@contextmanager +def preserve_rng_state(): + disable_functorch = torch._C._DisableFuncTorch + disable_current_modes = torch.utils._python_dispatch._disable_current_modes + with disable_current_modes(), disable_functorch(): + rng_state = torch.clone(torch.random.get_rng_state()) + skip_frame_if_in_functorch_mode(rng_state) + if torch.cuda.is_available(): + cuda_rng_state = torch.clone(torch.cuda.get_rng_state()) + try: + yield + finally: + with torch.utils._python_dispatch._disable_current_modes(): + torch.random.set_rng_state(rng_state) + if torch.cuda.is_available(): + torch.cuda.set_rng_state(cuda_rng_state) # type: ignore[possibly-undefined] + + +def is_jit_model(model0): + return isinstance( + model0, + ( + torch.jit._trace.TopLevelTracedModule, + torch.jit._script.RecursiveScriptModule, + torch.jit.ScriptFunction, + torch.jit.ScriptModule, + ), + ) + + +def torchscript(model, example_inputs, verbose=False): + if is_jit_model(model): + # already done? + return model + + try: + return torch.jit.trace(model, example_inputs) + except Exception: + try: + return torch.jit.script(model) + except Exception: + if verbose: + log.exception("jit error") + else: + log.error("Both torch.jit.trace and torch.jit.script failed") + return None + + +def getfile(obj): + try: + return inspect.getfile(obj) + except (TypeError, OSError): + return None + + +def is_namedtuple(obj): + """Test if an object is a namedtuple or a torch.return_types.* quasi-namedtuple""" + return is_namedtuple_cls(type(obj)) + + +def is_namedtuple_cls(cls): + """Test if an object is a namedtuple or a (torch.return_types|torch.autograd.forward_ad).* quasi-namedtuple""" + try: + if issubclass(cls, tuple): + bases = getattr(cls, "__bases__", []) or [None] + module = getattr(cls, "__module__", None) + return module in ("torch.return_types", "torch.autograd.forward_ad") or ( + bases[0] is tuple and hasattr(cls, "_make") and hasattr(cls, "_fields") + ) + except TypeError: + pass + return False + + +@functools.lru_cache(1) +def namedtuple_fields(cls): + """Get the fields of a namedtuple or a torch.return_types.* quasi-namedtuple""" + if cls is slice: + return ["start", "stop", "step"] + + assert issubclass(cls, tuple) + if hasattr(cls, "_fields"): + # normal namedtuples + return cls._fields + + @dataclasses.dataclass + class Marker: + index: int + + # frustrating ones e.g. torch.return_types.max + assert cls.__module__ == "torch.return_types" + obj = cls(map(Marker, range(cls.n_fields))) + fields: List[Optional[str]] = [None] * cls.n_fields + for name in dir(obj): + if name[0] != "_" and isinstance(getattr(obj, name), Marker): + fields[getattr(obj, name).index] = name + return fields + + +def checkpoint_params(gm): + with torch.no_grad(): + rng_state = torch.clone(torch.random.get_rng_state()) + if torch.cuda.is_available(): + cuda_rng_state = torch.clone(torch.cuda.get_rng_state()) + saved_state = [] + for param in itertools.chain(gm.parameters(), gm.buffers()): + saved_state.append((param, param._version, torch.clone(param))) + + def restore(): + with torch.no_grad(): + torch.random.set_rng_state(rng_state) + if torch.cuda.is_available(): + torch.cuda.set_rng_state(cuda_rng_state) + for param, version, original_value in saved_state: + if param._version != version: + param.copy_(original_value) + + return restore + + +def timed(model, example_inputs, times=1): + if torch.cuda.is_available(): + synchronize = torch.cuda.synchronize + else: + synchronize = nothing + + synchronize() + gc.collect() + torch.manual_seed(1337) + t0 = time.perf_counter() + for _ in range(times): + result = model(*example_inputs) + synchronize() + t1 = time.perf_counter() + return result, t1 - t0 # type: ignore[possibly-undefined] + + +def check_is_cuda(gm, example_inputs): + return all(x.is_cuda for x in itertools.chain(example_inputs, gm.parameters(True))) + + +@lru_cache(32) +def rot_n_helper(n): + assert n > 1 + vars = [f"v{i}" for i in range(n)] + rotated = reversed(vars[-1:] + vars[:-1]) + fn = eval(f"lambda {','.join(vars)}: ({','.join(rotated)})") + fn.__name__ = f"rot_{n}_helper" + return fn + + +common_constant_types: Set[type] = { + int, + float, + complex, + bool, + str, + bytes, + type(None), + Ellipsis.__class__, + types.CodeType, + torch.device, + torch.dtype, + torch.memory_format, + torch.layout, +} + +if has_triton_package(): + import triton + + common_constant_types.add(triton.language.dtype) + +""" + Difference between is_safe_constant and common_constant_types. + * common_constant_types: Constants would be wrapped by VariableBuilder.wrap_literal + as ConstantVariable. + * is_safe_constant: Constants can be loaded by LOAD_CONST bytecode. +""" + + +def is_safe_constant(v): + if istype(v, (tuple, frozenset)): + return all(map(is_safe_constant, v)) + return isinstance(v, (enum.Enum, type, torch.Size)) or istype( + v, + common_constant_types | {slice}, + ) + + +def specialize_symnode(arg): + from .variables import ConstantVariable, SymNodeVariable + + # Guard and specialize + if isinstance(arg, SymNodeVariable): + return ConstantVariable.create(arg.evaluate_expr()) + + return arg + + +def guard_if_dyn(arg): + from .variables import ConstantVariable + + arg = specialize_symnode(arg) + + if isinstance(arg, ConstantVariable): + return arg.as_python_constant() + + return arg + + +def check_constant_args(args, kwargs): + return all(x.is_python_constant() for x in itertools.chain(args, kwargs.values())) + + +def check_unspec_python_args(args, kwargs): + from .variables.constant import ConstantVariable + from .variables.tensor import UnspecializedPythonVariable + + unspec_count = 0 + for x in itertools.chain(args, kwargs.values()): + if isinstance(x, UnspecializedPythonVariable): + unspec_count += 1 + elif not isinstance(x, ConstantVariable): + return False + return unspec_count > 0 + + +def check_unspec_or_constant_args(args, kwargs): + # A fused version of: + # return check_constant_args(args, kwargs) or check_unspec_python_args(args, kwargs) + from .variables.tensor import UnspecializedPythonVariable + + for x in itertools.chain(args, kwargs.values()): + if not (x.is_python_constant() or isinstance(x, UnspecializedPythonVariable)): + return False + return True + + +def check_numpy_ndarray_args(args, kwargs): + from .variables.tensor import NumpyNdarrayVariable + + return any( + isinstance(x, NumpyNdarrayVariable) + for x in itertools.chain(args, kwargs.values()) + ) + + +dict_keys: Type[KeysView[Any]] = type({}.keys()) +dict_values: Type[ValuesView[Any]] = type({}.values()) +odict_values: Type[ValuesView[Any]] = type(collections.OrderedDict().values()) +tuple_iterator: Type[Iterator[Any]] = type(iter(())) +tuple_iterator_len = tuple_iterator.__length_hint__ # type: ignore[attr-defined] +object_new = object.__new__ + + +def nn_module_new(cls): + obj = object_new(cls) + torch.nn.Module.__init__(obj) + return obj + + +def product(it): + return functools.reduce(operator.mul, it, 1) + + +def tuple_iterator_getitem(it, index): + _, (obj,), start = it.__reduce__() + return obj[start + index] + + +iter_next = next + + +def to_subclass(t, cls): + return t.as_subclass(cls) + + +def dict_keys_getitem(d, n): + return next(itertools.islice(iter(d), n, n + 1)) + + +def enum_repr(value, local): + # enum class can override __str__ method. Use __class__ and name attribute + # to extract the class name and key name. + name = value.__class__.__name__ + val = value.name + scope = "L" if local else "G" + local_name = f'{scope}["{name}"].{val}' + return local_name + + +def set_example_value(node, example_value): + # NB: example_value is a bit of a misnomer, because this is always a fake + # tensor of some sort. Furthermore, these example values serve as the + # runtime state of Dynamo tracing, which means if metadata mutation + # occurs, the example_value gets directly updated (so you can't rely on + # this to accurately reflect what the state of the value was at the time + # the program was traced). + node.meta["example_value"] = example_value + shape_env = TracingContext.get().fake_mode.shape_env + if symbol_to_path := torch.fx.experimental.symbolic_shapes.compute_unbacked_bindings( + shape_env, example_value + ): + node.meta["unbacked_bindings"] = symbol_to_path + + +def _get_fake_tensor(vt): + fake_tensor = vt.as_proxy().node.meta.get("example_value") + if not is_fake(fake_tensor): + from .exc import unimplemented + + unimplemented("Cannot check Tensor object identity without its fake value") + return fake_tensor + + +def iter_contains(items, search, tx, check_tensor_identity=False): + from .variables import ( + BuiltinVariable, + ConstantVariable, + TensorVariable, + VariableTracker, + ) + + if search.is_python_constant(): + found_const = any( + x.is_python_constant() + and x.as_python_constant() == search.as_python_constant() + for x in items + ) + return ConstantVariable.create(found_const) + + must_check_tensor_id = False + if check_tensor_identity and isinstance(search, TensorVariable): + must_check_tensor_id = True + # Match of Tensor means match of FakeTensor + search = _get_fake_tensor(search) + + found: Optional[VariableTracker] = None + for x in items: + if must_check_tensor_id: + if isinstance(x, TensorVariable): + if search is _get_fake_tensor(x): # Object equivalence + return ConstantVariable.create(True) + else: + check = BuiltinVariable(operator.eq).call_function(tx, [x, search], {}) + if found is None: + found = check + else: + found = BuiltinVariable(operator.or_).call_function( + tx, [check, found], {} + ) + if found is None: + found = ConstantVariable.create(False) + return found + + +def key_is_id(k): + """Returns whether it indexes dictionaries using its id""" + return isinstance(k, (torch.Tensor, torch.nn.Module, MethodWrapperType)) + + +def key_to_id(value): + return [id(k) if key_is_id(k) else k for k in value.keys()] + + +def const_repr(x, *, local) -> str: + from .trace_rules import is_builtin_callable + + if isinstance(x, (list, tuple)): + elems_repr = ",".join(const_repr(s, local=local) for s in x) + if isinstance(x, list): + return f"[{elems_repr}]" + else: + assert isinstance(x, tuple) + if len(x) == 1: + return f"({elems_repr},)" + else: + return f"({elems_repr})" + elif isinstance(x, enum.Enum): + # To workaround repr(Enum) returning invalid global reference before python 3.11 + # by calling enum_repr and removing quotes to render enum in guard code. + return enum_repr(x, local=local).replace("'", "") + elif is_builtin_callable(x): + return x.__name__ + elif isinstance(x, type): + + def fullname(o): + klass = o.__class__ + module = klass.__module__ + if module == "builtins": + return klass.__qualname__ # avoid outputs like 'builtins.str' + return module + "." + klass.__qualname__ + + return fullname(x) + else: + return f"{x!r}" + + +def dict_keys_repr(const_keys, *, local) -> str: + keys_str = ",".join(const_repr(s, local=local) for s in const_keys) + return "[" + keys_str + "]" + + +GLOBAL_KEY_PREFIX = "__dict_key" + + +from torch._subclasses import UnsupportedFakeTensorException # noqa: F401 + + +def get_safe_global_name(tx, root, obj): + # The global_mangled_class_name should be different for different + # invocations of torch.compile. Otherwise, we can run into a situation + # where multiple torch.compile invocations re-use the same global name, + # but the global's lifetime is tied to the first invocation (and + # may be deleted when the first torch.compile invocation is deleted) + # We mangle it based off of the output_graph's id. + return f"{root}_{id(obj)}_c{tx.output.compile_id}" + + +def wrap_fake_exception(fn): + try: + return fn() + except UnsupportedFakeTensorException as e: + from .exc import unimplemented + + msg = f"Unsupported: {e.reason} with fake tensor propagation." + log.warning(msg) + unimplemented(msg, from_exc=e) + + +def deepcopy_to_fake_tensor(obj, fake_mode): + with torch._subclasses.fake_tensor.FakeCopyMode(fake_mode): + return wrap_fake_exception(lambda: copy.deepcopy(obj)) + + +def rmse(ref, res): + """ + Calculate root mean squared error + """ + return torch.sqrt(torch.mean(torch.square(ref - res))) + + +def same( + ref, + res, + fp64_ref=None, + cos_similarity=False, + tol=1e-4, + equal_nan=False, + exact_dtype=True, + relax_numpy_equality=False, + ignore_non_fp=False, + log_error=log.error, + use_larger_multiplier_for_smaller_tensor=False, +): + """Check correctness to see if ref and res match""" + if fp64_ref is None: + fp64_ref = ref + if isinstance( + ref, (list, tuple, collections.deque, torch.nn.ParameterList, torch.Size) + ): + assert isinstance( + res, (list, tuple, collections.deque) + ), f"type mismatch {type(ref)} {type(res)}" + if len(ref) != len(res): + log_error("Length mismatch") + return False + return len(ref) == len(res) and all( + same( + ai, + bi, + fp64_refi, + cos_similarity, + tol, + equal_nan, + exact_dtype, + relax_numpy_equality, + ignore_non_fp, + log_error=log_error, + use_larger_multiplier_for_smaller_tensor=use_larger_multiplier_for_smaller_tensor, + ) + for ai, bi, fp64_refi in zip(ref, res, fp64_ref) + ) + elif type(ref).__name__ == "QuestionAnsweringModelOutput": + # This skips checking accuracy for start_logits/end_logits. + # Tentatively, start_logits/end_logits appear to be very prone to + # inaccuracies and is somewhat subsumed by checking the loss. + return same( + ref.loss, + res.loss, + fp64_ref.loss, + cos_similarity, + tol, + equal_nan, + exact_dtype, + relax_numpy_equality, + ignore_non_fp, + log_error=log_error, + use_larger_multiplier_for_smaller_tensor=use_larger_multiplier_for_smaller_tensor, + ) + elif isinstance(ref, dict): + assert isinstance(res, dict) + assert set(ref.keys()) == set( + res.keys() + ), f"keys mismatch {set(ref.keys())} == {set(res.keys())}" + for k in sorted(ref.keys()): + if not ( + same( + ref[k], + res[k], + fp64_ref[k], + cos_similarity=cos_similarity, + tol=tol, + equal_nan=equal_nan, + exact_dtype=exact_dtype, + relax_numpy_equality=relax_numpy_equality, + ignore_non_fp=ignore_non_fp, + log_error=log_error, + use_larger_multiplier_for_smaller_tensor=use_larger_multiplier_for_smaller_tensor, + ) + ): + log_error("Accuracy failed for key name %s", k) + return False + return True + elif isinstance(ref, set): + assert isinstance(res, set) + assert set(ref) == set(res), f"elements mismatch {set(ref)} == {set(res)}" + return True + elif isinstance(ref, (torch.Tensor, float)): + assert not isinstance(ref, torch._subclasses.FakeTensor) + assert not isinstance(res, torch._subclasses.FakeTensor) + + def to_tensor(t): + return t if isinstance(t, torch.Tensor) else torch.tensor(t) + + ref, res, fp64_ref = (to_tensor(val) for val in (ref, res, fp64_ref)) + + if ref.is_sparse: + assert res.is_sparse + ref = ref.to_dense() + res = res.to_dense() + assert isinstance(res, torch.Tensor), f"type mismatch {type(ref)} {type(res)}" + if exact_dtype: + if ref.dtype != res.dtype: + log_error("dtype mismatch %s, %s", ref.dtype, res.dtype) + return False + if ref.dtype == torch.bool: + if ignore_non_fp: + return True + # triton stores bool as int8, so add this for more accurate checking + r = torch.allclose( + ref.to(dtype=torch.uint8), + res.to(dtype=torch.uint8), + atol=tol, + rtol=tol, + equal_nan=equal_nan, + ) + if not r: + log_error("Accuracy failed: uint8 tensor did not match") + return r + + if cos_similarity: + ref = ref.flatten().to(torch.float32) + res = res.flatten().to(torch.float32) + if torch.allclose(ref, res, atol=tol, rtol=tol, equal_nan=True): + # early exit that handles zero/nan better + # cosine_similarity(zeros(10), zeros(10), dim=0) is 0 + return True + score = torch.nn.functional.cosine_similarity(ref, res, dim=0, eps=1e-6) + if score < 0.99: + log.warning("Similarity score=%s", score.cpu().detach().item()) + return score >= 0.99 + else: + if not exact_dtype: + ref = ref.to(res.dtype) + + # First try usual allclose + if torch.allclose(ref, res, atol=tol, rtol=tol, equal_nan=equal_nan): + return True + + # Check error from fp64 version + if fp64_ref.dtype == torch.float64: + # Fix a corner case that res and fp64_ref does not contains NaN and match (with loose tolerance) + # while the ref contains NaN. In this case, RMSE should not match any ways. + # But res is 'BETTER' than ref so we count it pass. + # + # This happens for Super_SloMo when loop ordering after fusion is enabled: + # https://gist.github.com/shunting314/11f235c70f7db0d52718d26f4a701cab + loose_tol = 1e-2 * 4 + if ( + not fp64_ref.isnan().any() + and not res.isnan().any() + and ref.isnan().any() + and torch.allclose( + fp64_ref.to(dtype=res.dtype), + res, + atol=loose_tol, + rtol=loose_tol, + equal_nan=equal_nan, + ) + ): + return True + ref_error = rmse(fp64_ref, ref).item() + # ref unable to produce this with stable numerics in this precision, ignore + if math.isnan(ref_error): + log.warning( + "Found nan in reference. Consider running in higher precision." + ) + + res_error = rmse(fp64_ref, res).item() + + # In the case of using AMP (Automatic Mixed Precision), certain models have + # failed the benchmark's correctness check. However, the end-to-end model's + # accuracy when comparing AMP with FP32 is within a difference of less than 0.1%. + # Thus, it's possible that the correctness check failures for these models are + # false alarms. We use multiplier of 3 instead of 2 to avoid these false alarms. + multiplier = ( + 3.0 if res.dtype in (torch.float16, torch.bfloat16) else 2.0 + ) + + if use_larger_multiplier_for_smaller_tensor and ( + fp64_ref.numel() <= 10 and tol >= 4 * 1e-2 + ): + multiplier = 10.0 + elif use_larger_multiplier_for_smaller_tensor and ( + fp64_ref.numel() <= 500 and tol >= 4 * 1e-2 + ): + multiplier = 5.0 + elif ( + fp64_ref.numel() < 1000 + or (ref.ndim == 4 and ref.shape[-1] == ref.shape[-2] == 1) + # large tol means a benchmark has been specified as REQUIRE_HIGHER_TOLERANCE + or tol >= 2 * 1e-2 + ): + # In the presence of noise, noise might dominate our error + # metric for smaller tensors. + # Similary, for 1x1 kernels, there seems to be high noise with amp. + multiplier = 3.0 + + passes_test = res_error <= (multiplier * ref_error + tol / 10.0) + if ( + not passes_test + and equal_nan + and math.isnan(ref_error) + and math.isnan(res_error) + # Some unit test for the accuracy minifier relies on + # returning false in this case. + and not inductor_config.cpp.inject_relu_bug_TESTING_ONLY + ): + passes_test = True + if not passes_test: + log_error( + "RMSE (res-fp64): %.5f, (ref-fp64): %.5f and shape=%s. res.dtype: %s, multiplier: %f, tol: %f" + ", use_larger_multiplier_for_smaller_tensor: %d", + res_error, + ref_error, + res.size(), + res.dtype, + multiplier, + tol, + use_larger_multiplier_for_smaller_tensor, + ) + return passes_test + + if ignore_non_fp: + return True + + log_error("Accuracy failed: allclose not within tol=%s", tol) + return False + elif isinstance(ref, (str, int, type(None), bool, torch.device)): + if ignore_non_fp: + return True + r = ref == res + if not r: + log_error("Accuracy failed (%s): %s != %s", type(ref), ref, res) + return r + elif is_numpy_int_type(ref) or is_numpy_float_type(ref): + if relax_numpy_equality and not ( + is_numpy_int_type(res) or is_numpy_float_type(res) + ): + ref = ref.item() + r = (type(ref) is type(res)) and (ref == res) + if not r: + log_error("Accuracy failed (numpy): %s != %s", ref, res) + return r + elif is_numpy_ndarray(ref): + return (type(ref) is type(res)) and same( + torch.as_tensor(ref), + torch.as_tensor(res), + fp64_ref, + cos_similarity=cos_similarity, + tol=tol, + equal_nan=equal_nan, + exact_dtype=exact_dtype, + relax_numpy_equality=relax_numpy_equality, + ignore_non_fp=ignore_non_fp, + log_error=log_error, + use_larger_multiplier_for_smaller_tensor=use_larger_multiplier_for_smaller_tensor, + ) + elif type(ref).__name__ in ( + "MaskedLMOutput", + "Seq2SeqLMOutput", + "CausalLMOutputWithCrossAttentions", + "LongformerMaskedLMOutput", + "Instances", + "SquashedNormal", + "Boxes", + "Normal", + "TanhTransform", + "Foo", + "Variable", + ): + assert type(ref) is type(res) + return all( + same( + getattr(ref, key), + getattr(res, key), + getattr(fp64_ref, key), + cos_similarity=cos_similarity, + tol=tol, + equal_nan=equal_nan, + exact_dtype=exact_dtype, + relax_numpy_equality=relax_numpy_equality, + ignore_non_fp=ignore_non_fp, + log_error=log_error, + use_larger_multiplier_for_smaller_tensor=use_larger_multiplier_for_smaller_tensor, + ) + for key in ref.__dict__.keys() + ) + else: + raise RuntimeError(f"unsupported type: {type(ref).__name__}") + + +def format_func_info(code): + short_filename = code.co_filename.split("/")[-1] + return f"'{code.co_name}' ({short_filename}:{code.co_firstlineno})" + + +@contextlib.contextmanager +def disable_cache_limit(): + prior = config.cache_size_limit + config.cache_size_limit = sys.maxsize + prior_acc_limit = config.accumulated_cache_size_limit + config.accumulated_cache_size_limit = sys.maxsize + + try: + yield + finally: + config.cache_size_limit = prior + config.accumulated_cache_size_limit = prior_acc_limit + + +# map from transformed code back to original user code +orig_code_map = ExactWeakKeyDictionary() + +# keep a record of code_obj -> list of guard failure reasons for logging +guard_failures: DefaultDict[Any, List[Any]] = collections.defaultdict(list) + +# Keep a record of graph break reasons for logging +graph_break_reasons: List[torch._dynamo.output_graph.GraphCompileReason] = [] + +# keep record of compiled code, if we are in "error if recompile" +# to track code that dynamo has compiled previously +seen_code_map = ExactWeakKeyDictionary() + + +# return same dir unless user changes config between calls +@functools.lru_cache(None) +def _get_debug_dir(root_dir): + dir_name = ( + "run_" + + datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f") + # use pid to avoid conflicts among ranks + + "-pid_" + + str(os.getpid()) + ) + return os.path.join(root_dir, dir_name) + + +def get_debug_dir(): + debug_root = config.debug_dir_root + return _get_debug_dir(debug_root) + + +def extract_fake_example_value(node, required=True): + if "example_value" in node.meta and is_fake(node.meta["example_value"]): + return node.meta["example_value"] + elif required: + from torch._dynamo.exc import unimplemented + + unimplemented("`FakeTensor` example value was required but not available") + else: + return None + + +def ensure_graph_fake(e, tx): + assert maybe_get_fake_mode(e) is tx.fake_mode + return e + + +def get_fake_values_from_nodes(tx, nodes, allow_non_graph_fake): + def visit(n: torch.fx.Node): + if n.op == "call_function" and "example_value" not in n.meta: + # fake tensor validity is checked inside get_fake_value using + # ensure_graph_fake + return get_fake_value(n, tx, allow_non_graph_fake) + + out = n.meta["example_value"] + if not allow_non_graph_fake and isinstance(out, torch.Tensor): + return ensure_graph_fake(out, tx) + return out + + return torch.fx.node.map_arg(nodes, visit) + + +def get_fake_value(node, tx, allow_non_graph_fake=False): + """ + Run the computation represented by `node` using fake tensors and return the result. + + allow_non_graph_fake: whether to allow the return result to be: + 1. non-fake or 2. fake that is not created by this instance of Dynamo. + If `True`, you must be prepared to deal with such return values, ideally + by further wrapping them as this graph's fakes. + """ + from torch.utils._sympy.value_ranges import ValueRangeError + + from .exc import ( + TorchRuntimeError, + unimplemented, + Unsupported, + UserError, + UserErrorType, + ) + + op = node.op + + # FX Node should always return the same fake value + if "example_value" in node.meta and is_fake(node.meta["example_value"]): + return node.meta["example_value"] + + args, kwargs = get_fake_values_from_nodes( + tx, (node.args, node.kwargs), allow_non_graph_fake + ) + + nnmodule = None + if op == "call_method" and len(args) > 0 and isinstance(args[0], torch.nn.Module): + # If the first argument is nn.Module, should copy to fake mode. + args = (deepcopy_to_fake_tensor(args[0], tx.fake_mode),) + tuple(args[1:]) + + if op == "call_module": + nnmodule = tx.output.nn_modules[node.target] + + if is_lazy_module(nnmodule) and hasattr(nnmodule, "_initialize_hook"): + # In the case of a lazy module, we want to run + # the pre-hooks which initialize it. + # Afterwards, lazy module deletes its pre-hooks + # to avoid treating it as lazy on subsequent recompile. + nnmodule._infer_parameters(nnmodule, args) + + # no matter it's lazy module or not, we should copy to fake mode. + nnmodule = deepcopy_to_fake_tensor(nnmodule, tx.fake_mode) + + try: + with tx.fake_mode, enable_python_dispatcher(): + ret_val = wrap_fake_exception( + lambda: run_node(tx.output, node, args, kwargs, nnmodule) + ) + except Unsupported: + raise + except RuntimeError as e: + cause: BaseException = e + if e.__cause__ is not None: + cause = e.__cause__ + + if isinstance( + cause, torch._subclasses.fake_tensor.DataDependentOutputException + ): + unimplemented( + f"data dependent operator: {cause.func}; " + "to enable, set torch._dynamo.config.capture_scalar_outputs = True" + ) + elif isinstance( + cause, torch._subclasses.fake_tensor.DynamicOutputShapeException + ): + if not torch._dynamo.config.capture_dynamic_output_shape_ops: + unimplemented( + f"dynamic shape operator: {cause.func}; " + "to enable, set torch._dynamo.config.capture_dynamic_output_shape_ops = True" + ) + else: + unimplemented( + f"dynamic shape operator: {cause.func}; " + "Operator does not have a meta kernel that supports dynamic output shapes, " + "please report an issue to PyTorch" + ) + elif isinstance( + cause, torch._subclasses.fake_tensor.UnsupportedOperatorException + ): + op = cause.func + import_suggestion = "" + if isinstance(op, torch._ops.OpOverload): + maybe_pystub = torch._C._dispatch_pystub( + op._schema.name, op._schema.overload_name + ) + if maybe_pystub is not None: + module, ctx = maybe_pystub + import_suggestion = ( + f"It's possible that the support was implemented in " + f"module `{module}` and you may need to `import {module}`" + f"({ctx}), otherwise " + ) + unimplemented( + f"unsupported operator: {cause.func} ({import_suggestion}see " + "https://docs.google.com/document/d/1GgvOe7C8_NVOMLOCwDaYV1mXXyHMXY7ExoewHqooxrs/edit#heading=h.64r4npvq0w0" + " for how to fix)" + ) + elif isinstance( + cause, torch.fx.experimental.symbolic_shapes.GuardOnDataDependentSymNode + ): + raise UserError( # noqa: B904 + UserErrorType.CONSTRAINT_VIOLATION, + str(cause), + case_name="constrain_as_size_example", + ) + elif isinstance(cause, ValueRangeError): + raise UserError(UserErrorType.CONSTRAINT_VIOLATION, e.args[0]) from e + elif isinstance(cause, TypeError) and "argument" in str(cause): + unimplemented(f"TypeError {node.target}: {cause}") + + raise TorchRuntimeError(str(e)).with_traceback(e.__traceback__) from None + + if not allow_non_graph_fake: + _ = pytree.tree_map_only( + torch.Tensor, functools.partial(ensure_graph_fake, tx=tx), ret_val + ) + return ret_val + + +_current_node = threading.local() + + +def get_current_node(): + return getattr(_current_node, "value", None) + + +@contextmanager +def set_current_node(node): + old = get_current_node() + _current_node.value = node + try: + yield + finally: + _current_node.value = old + + +def run_node(tracer, node, args, kwargs, nnmodule): + """ + Runs a given node, with the given args and kwargs. + + Behavior is dictated by a node's op. + + run_node is useful for extracting real values out of nodes. + See get_real_value for more info on common usage. + + Note: The tracer arg is only used for 'get_attr' ops + Note: The nnmodule arg is only used for 'call_module' ops + + Nodes that are not call_function, call_method, call_module, or get_attr will + raise an AssertionError. + """ + op = node.op + + with set_current_node(node): + + def make_error_message(e): + return f"Failed running {op} {node.target}(*{args}, **{kwargs}):\n" + str(e) + + try: + if op == "call_function": + return node.target(*args, **kwargs) + elif op == "call_method": + return getattr(args[0], node.target)(*args[1:], **kwargs) + elif op == "call_module": + assert nnmodule is not None + return nnmodule(*args, **kwargs) + elif op == "get_attr": + return tracer.output_graph.get_submodule(node.target) + elif op == "placeholder": + assert "example_value" in node.meta + return node.meta["example_value"] + + except (NotImplementedError, UnsupportedFakeTensorException) as e: + # NB: mimic how wrap_fake_exception does it + from .exc import unimplemented + + unimplemented(make_error_message(e), from_exc=e) + except Exception as e: + raise RuntimeError(make_error_message(e)).with_traceback( + e.__traceback__ + ) from e + + raise AssertionError(op) + + +def get_real_value(node, tracer): + """ + Run the actual computation represented by `node` and return the result. + This will execute any dependent nodes in the graph as well. + """ + from .exc import TorchRuntimeError + + cache = tracer.real_value_cache + if node in cache: + return cache[node] + + op = node.op + args, kwargs = torch.fx.node.map_arg( # type: ignore[misc] + (node.args, node.kwargs), + lambda n: get_real_value(n, tracer), + ) + + if op == "placeholder" and "grapharg" in node.meta: + return node.meta["grapharg"].example + + if op == "call_module": + nn_module = tracer.output_graph.nn_modules[node.target] + if not is_lazy_module(nn_module): + nn_module = copy.deepcopy(nn_module) + else: + # In the case of a lazy module, we want to run + # the pre-hooks which initialize it + nn_module(*args, **kwargs) + else: + nn_module = None + + try: + real_value = run_node(tracer, node, args, kwargs, nn_module) + cache[node] = real_value + except RuntimeError as e: + raise TorchRuntimeError(str(e)).with_traceback(e.__traceback__) from None + return real_value + + +def assert_no_fake_params_or_buffers(gm): + from torch._subclasses.fake_tensor import FakeTensorConfig, is_fake + + def stack_or_hint(t): + if FakeTensorConfig.debug: + import traceback + + return f"FAKE TENSOR CREATION TRACEBACK: \n {traceback.format_list(t._debug_trace)}" + else: + return "Enable TORCH_FAKE_TENSOR_DEBUG=1 to get creation stack traces on fake tensors." + + for name, buffer in gm.named_buffers(): + assert not is_fake( + buffer + ), f"Unexpected fake buffer {name} {stack_or_hint(buffer)}" + for name, param in gm.named_parameters(): + assert not is_fake( + param + ), f"Unexpected fake param {name} {stack_or_hint(param)}" + + +def fqn(obj: Any): + """ + Returns the fully qualified name of the object. + """ + return f"{obj.__module__}.{obj.__qualname__}" + + +def ifdynstaticdefault(count1, count2): + if torch._dynamo.config.assume_static_by_default: + return count1 + else: + return count2 + + +def import_submodule(mod: types.ModuleType): + """ + Ensure all the files in a given submodule are imported + """ + for filename in sorted(os.listdir(os.path.dirname(cast(str, mod.__file__)))): + if filename.endswith(".py") and filename[0] != "_": + importlib.import_module(f"{mod.__name__}.{filename[:-3]}") + + +def object_has_getattribute(value: Any): + return class_has_getattribute(type(value)) + + +def class_has_getattribute(cls: type): + try: + if isinstance( + inspect.getattr_static(cls, "__getattribute__"), + types.FunctionType, + ): + return True + except AttributeError: + pass + return False + + +def get_custom_getattr(value: Any, ignore_nn_module_getattr: bool = False): + try: + getattr_fn = inspect.getattr_static(type(value), "__getattr__") + except AttributeError: + getattr_fn = None + if ignore_nn_module_getattr and getattr_fn is torch.nn.Module.__getattr__: + # ignore this case of getattr + getattr_fn = None + return getattr_fn + + +class TensorStaticReason(enum.Enum): + PARAMETER = 2 + NOT_TENSOR = 4 + NN_MODULE_PROPERTY = 5 + + +def tensor_static_reason_to_message(reason: TensorStaticReason): + if reason == TensorStaticReason.PARAMETER: + return "mark_dynamic on parameter, parameters are always static today." + if reason == TensorStaticReason.NOT_TENSOR: + return "mark_dynamic on a non tensor, how did this happen?" + if reason == TensorStaticReason.NN_MODULE_PROPERTY: + return "tensor is static because it is nn module associated." + raise AssertionError(f"Illegal reason {reason}") + + +def tensor_always_has_static_shape( + tensor: Union[torch.Tensor, Any], + is_tensor: bool, + tensor_source: Source, +) -> Tuple[bool, Optional[TensorStaticReason]]: + """ + Given a tensor, source, and is_tensor flag, determine if a shape should be static. + + Args: + tensor - the real tensor to evaluate, parameters force a static shape. + is_tensor - internal dynamo check, essentially "is_tensor": target_cls is TensorVariable, + tensors not in a TensorVariable for whatever reason are forced static. + + Returns a tuple, where the first element is the bool of whether or not this tensor should have a static shape. + The second element is a TensorStaticReason, useful for passing to tensor_static_reason_to_message if needed. + """ + from .source import is_from_unspecialized_param_buffer_source + + if ( + tensor_source.guard_source().is_specialized_nn_module() + or tensor_source.guard_source().is_unspecialized_builtin_nn_module() + ) and config.force_nn_module_property_static_shapes: + return True, TensorStaticReason.NN_MODULE_PROPERTY + + if ( + type(tensor) is torch.nn.Parameter + or is_from_unspecialized_param_buffer_source(tensor_source) + ) and config.force_parameter_static_shapes: + return True, TensorStaticReason.PARAMETER + if not is_tensor: + return True, TensorStaticReason.NOT_TENSOR + return False, None + + +def lazy_format_graph_tabular(fn_name, gm): + def inner(): + try: + from tabulate import tabulate # TODO: Check that this is installed + except ImportError: + return ( + "Tabulate module missing, please install tabulate to log the graph in tabular format, logging code instead:\n" + + str(lazy_format_graph_code(fn_name, gm)) + ) + + node_specs = [ + [n.op, n.name, n.target, n.args, n.kwargs] for n in gm.graph.nodes + ] + graph_str = tabulate( + node_specs, headers=["opcode", "name", "target", "args", "kwargs"] + ) + return _format_graph_code(fn_name, gm.forward.__code__.co_filename, graph_str) + + return LazyString(inner) + + +def format_bytecode(prefix, name, filename, line_no, code): + return f"{prefix} {name} {filename} line {line_no} \n{dis.Bytecode(code).dis()}\n" + + +forward_hook_names = ["_forward_pre_hooks", "_forward_hooks"] +backward_hook_names = ["_backward_pre_hooks", "_backward_hooks"] +state_dict_hook_names = [ + "_state_dict_pre_hooks", + "_state_dict_hooks", + "_load_state_dict_pre_hooks", + "_load_state_dict_post_hooks", +] +all_hook_names = forward_hook_names + backward_hook_names + state_dict_hook_names + + +def nn_module_has_global_hooks(): + # This is limited to backward hooks for now because NNModuleVariable + # supports fwd hooks underneath. + return len(torch.nn.modules.module._global_backward_hooks) or len( + torch.nn.modules.module._global_backward_pre_hooks + ) + + +def nn_module_get_all_hooks( + mod, + check_forward_hooks=False, + check_backward_hooks=False, + check_state_dict_hooks=False, +): + """ + Sometimes its useful to differentiate between types of hooks such as forward/backward/pre + hooks executed during module.__call__, and state_dict hooks which are executed separately. + """ + hook_dicts_to_check = [] + check_all_hooks = ( + not check_forward_hooks + and not check_backward_hooks + and not check_state_dict_hooks + ) + if check_forward_hooks or check_all_hooks: + hook_dicts_to_check.extend(forward_hook_names) + if check_backward_hooks or check_all_hooks: + hook_dicts_to_check.extend(backward_hook_names) + if check_state_dict_hooks: + hook_dicts_to_check.extend(state_dict_hook_names) + + all_hooks = [] + for hook_dict_name in hook_dicts_to_check: + hooks = getattr(mod, hook_dict_name, []) + for hook_name in hooks: + hook = hooks[hook_name] + + all_hooks.append(hook) + return all_hooks + + +def nnmodule_has_hooks( + mod, + check_forward_hooks=False, + check_backward_hooks=False, + check_state_dict_hooks=False, +): + """ + Helper function to check if a module has any hooks attached to it. + """ + hooks = nn_module_get_all_hooks( + mod, + check_forward_hooks=check_forward_hooks, + check_backward_hooks=check_backward_hooks, + check_state_dict_hooks=check_state_dict_hooks, + ) + return bool(hooks) + + +def to_numpy_helper(value): + """Convert tensor and tnp.ndarray to numpy.ndarray.""" + if is_fake(value): + return value + if isinstance(value, tnp.ndarray): + return to_numpy_helper(value.tensor) + elif isinstance(value, torch.Tensor): + return value.numpy(force=True) + elif isinstance(value, (tuple, list)): + return type(value)(to_numpy_helper(obj) for obj in value) + else: + return value + + +def numpy_to_tensor(value): + """Convert tnp.ndarray to tensor, leave other types intact. If a list/tuple, loop through it to convert.""" + assert np is not None + if isinstance(value, np.ndarray): + return torch.as_tensor(value) + if isinstance(value, tnp.ndarray): + return value.tensor + elif isinstance(value, (tuple, list)): + return type(value)(numpy_to_tensor(obj) for obj in value) + else: + return value + + +class numpy_to_tensor_wrapper: + def __init__(self, f): + self.f = f + self.__name__ = "wrapped_" + self.f.__name__ + + def __repr__(self): + return f">" + + def __call__(self, *args, **kwargs): + out = self.f(*args, **kwargs) + return numpy_to_tensor(out) + + +def numpy_attr_wrapper(obj, name): + if isinstance(obj, tnp.ndarray): + out = getattr(obj, name) + return numpy_to_tensor(out) + elif isinstance(obj, torch.Tensor): + out = getattr(tnp.ndarray(obj), name) + return numpy_to_tensor(out) + + +class numpy_method_wrapper: + """Convert obj from torch.Tensor to tnp.ndarray and call method. Then convert result back to torch.Tensor.""" + + def __init__(self, method: str): + self.method = method + self.__name__ = "wrapped_" + self.method + + def __repr__(self): + return f">" + + def __call__(self, *args, **kwargs): + obj = args[0] + if isinstance(obj, torch.Tensor): + obj = tnp.ndarray(obj) + method_callable = getattr(obj, self.method) + out = method_callable(*args[1:], **kwargs) + return numpy_to_tensor(out) + + +class numpy_operator_wrapper: + """Implements dunder methods for tnp.ndarray via functions from the operator library""" + + def __init__(self, op: Callable[..., Any]): + self.op = op + self.__name__ = f"wrapped_{op.__name__}" + + def __repr__(self): + return f">" + + def __call__(self, *args, **kwargs): + assert not kwargs + + args = ( + tnp.ndarray(arg) if isinstance(arg, torch.Tensor) else arg for arg in args + ) + out = self.op(*args) + return numpy_to_tensor(out) + + +def defake(x): + if not isinstance(x, FakeTensor): + return x + size: torch._prims_common.ShapeType + stride: torch._prims_common.StrideType + if x._has_symbolic_sizes_strides: + size = [] + for s in x.size(): + if isinstance(s, torch.SymInt): + size.append(s.node.shape_env.size_hint(s.node.expr)) + else: + size.append(s) + stride = [] + for s in x.stride(): + if isinstance(s, torch.SymInt): + stride.append(s.node.shape_env.size_hint(s.node.expr)) + else: + stride.append(s) + else: + size = x.size() + stride = x.stride() + y = torch.empty_strided( + size, + stride, + dtype=x.dtype, + device=x.device, + requires_grad=x.requires_grad, + ) + y.zero_() + return y + + +def is_utils_checkpoint(obj): + # Lazy import to avoid circular dependencies + import torch.utils.checkpoint + + return obj is torch.utils.checkpoint.checkpoint + + +def build_checkpoint_variable(**options): + import torch._higher_order_ops.wrap as higher_order_ops + + from .variables.higher_order_ops import TorchHigherOrderOperatorVariable + + # TODO - This is a temporary situation where we have two versions of + # checkpointing implementation. We will converge on one and remove the other. + activation_checkpoint_op: torch._ops.HigherOrderOperator = ( + higher_order_ops.tag_activation_checkpoint + ) + if torch._functorch.config.functionalize_rng_ops: + activation_checkpoint_op = higher_order_ops.wrap_activation_checkpoint + + return TorchHigherOrderOperatorVariable.make( + activation_checkpoint_op, + **options, + ) + + +def is_compile_supported(device_type): + from .eval_frame import is_dynamo_supported + + compile_supported = is_dynamo_supported() + if device_type == "cpu": + pass + elif device_type == "cuda" and compile_supported: + compile_supported = has_triton() + else: + compile_supported = False + return compile_supported + + +# The following 3.11 source code functions are adapted from +# https://github.com/python/cpython/blob/v3.11.4/Lib/traceback.py +# in order to output source code corresponding to bytecode in 3.11+. +# We need our own versions since we want to support multiline expressions. +def _fix_offset(str: str, offset: int) -> int: + """ + Convert byte offset `offset` of `str` into character offset. + Byte offset is used for 3.11+ instruction column data. + Takes things like unicode characters into consideration. + + Unchanged from CPython implementation. + """ + as_utf8 = str.encode("utf-8") + return len(as_utf8[:offset].decode("utf-8", errors="replace")) + + +@dataclasses.dataclass +class _Anchors: + # inclusive + left_end_lineno: int + left_end_offset: int + right_start_lineno: int + # exclusive + right_start_offset: int + + +def _extract_anchors_from_expr(segment: str) -> Optional[_Anchors]: + """ + Given source code `segment` corresponding to a bytecode + instruction, determine: + - for binary ops, the location of the binary op + - for indexing, the location of the brackets. + `segment` is expected to be a valid Python expression + """ + assert sys.version_info >= (3, 11) + + import ast + + try: + # Without brackets, `segment` is parsed as a statement. + # We expect an expression, so wrap `segment` in + # brackets to handle multi-line expressions. + tree = ast.parse("(\n" + segment + "\n)") + except SyntaxError: + return None + + if len(tree.body) != 1: + return None + + lines = segment.split("\n") + + # get character index given byte offset + def normalize(lineno, offset): + return _fix_offset(lines[lineno], offset) + + # Gets the next valid character index in `lines`, if + # the current location is not valid. Handles empty lines. + def next_valid_char(lineno, col): + while lineno < len(lines) and col >= len(lines[lineno]): + col = 0 + lineno += 1 + assert lineno < len(lines) and col < len(lines[lineno]) + return lineno, col + + # Get the next valid character index in `lines`. + def increment(lineno, col): + col += 1 + lineno, col = next_valid_char(lineno, col) + assert lineno < len(lines) and col < len(lines[lineno]) + return lineno, col + + # Get the next valid character at least on the next line + def nextline(lineno, col): + col = 0 + lineno += 1 + lineno, col = next_valid_char(lineno, col) + assert lineno < len(lines) and col < len(lines[lineno]) + return lineno, col + + statement = tree.body[0] + if isinstance(statement, ast.Expr): + expr = statement.value + if isinstance(expr, ast.BinOp): + # ast gives locations for BinOp subexpressions, e.g. + # ( left_expr ) + ( right_expr ) + # left^^^^^ right^^^^^ + # -2 since end_lineno is 1-indexed and because we added an extra + # bracket to `segment` when calling ast.parse + cur_lineno = cast(int, expr.left.end_lineno) - 2 + cur_col = normalize(cur_lineno, expr.left.end_col_offset) + cur_lineno, cur_col = next_valid_char(cur_lineno, cur_col) + + # Heuristic to find the operator character. + # The original CPython implementation did not look for ), \, or #, + # leading to incorrect anchor location, e.g. + # (x) + (y) + # ~~^~~~~~~ + while (ch := lines[cur_lineno][cur_col]).isspace() or ch in ")\\#": + if ch in "\\#": + cur_lineno, cur_col = nextline(cur_lineno, cur_col) + else: + cur_lineno, cur_col = increment(cur_lineno, cur_col) + + # binary op is 1 or 2 characters long, on the same line + right_col = cur_col + 1 + if ( + right_col < len(lines[cur_lineno]) + and not (ch := lines[cur_lineno][right_col]).isspace() + and ch not in "\\#" + ): + right_col += 1 + # right_col can be invalid since it is exclusive + + return _Anchors(cur_lineno, cur_col, cur_lineno, right_col) + elif isinstance(expr, ast.Subscript): + # ast gives locations for value and slice subexpressions, e.g. + # ( value_expr ) [ slice_expr ] + # value^^^^^ slice^^^^^ + # subscript^^^^^^^^^^^^^^^^^^^^ + # find left bracket (first '[' after value) + left_lineno = cast(int, expr.value.end_lineno) - 2 + left_col = normalize(left_lineno, expr.value.end_col_offset) + left_lineno, left_col = next_valid_char(left_lineno, left_col) + while lines[left_lineno][left_col] != "[": + left_lineno, left_col = increment(left_lineno, left_col) + # find right bracket (final character of expression) + right_lineno = cast(int, expr.end_lineno) - 2 + right_col = normalize(right_lineno, expr.end_col_offset) + return _Anchors(left_lineno, left_col, right_lineno, right_col) + elif isinstance(expr, ast.Call): + # ( func_expr ) (args, kwargs) + # func^^^^^ + # call^^^^^^^^^^^^^^^^^^^^^^^^ + # find left bracket (first '(' after func) + left_lineno = cast(int, expr.func.end_lineno) - 2 + left_col = normalize(left_lineno, expr.func.end_col_offset) + left_lineno, left_col = next_valid_char(left_lineno, left_col) + while lines[left_lineno][left_col] != "(": + left_lineno, left_col = increment(left_lineno, left_col) + # find right bracket (final character of expression) + right_lineno = cast(int, expr.end_lineno) - 2 + right_col = normalize(right_lineno, expr.end_col_offset) + return _Anchors(left_lineno, left_col, right_lineno, right_col) + + return None + + +def get_instruction_source_311(code: types.CodeType, inst: dis.Instruction) -> str: + """ + Python 3.11+ only. Returns lines of source code (from code object `code`) + corresponding to `inst`'s location data, and underlines relevant code to `inst`. + + Example: CALL on `g`: + f(g( + ^^ + h(x))) + ^^^^^ + + We need our own implementation since `format_frame_summary` in + Python's `traceback` module doesn't handle multi-line expressions + (and their anchor extraction code is not completely correct). + """ + assert inst.positions is not None + if inst.positions.lineno is None: + return "" + # The rstrip + "\n" pattern is used throughout this function to handle + # linecache.getline errors. Error lines are treated as empty strings "", but we want + # to treat them as blank lines "\n". + first_line = linecache.getline(code.co_filename, inst.positions.lineno).rstrip() + if inst.positions.end_lineno is None: + return first_line + if inst.positions.col_offset is None or inst.positions.end_col_offset is None: + return first_line + + # character index of the start of the instruction + start_offset = _fix_offset(first_line, inst.positions.col_offset) + # character index of the end of the instruction + # compute later since end may be a different line + end_offset = None + # expression corresponding to the instruction so we can get anchors + segment = "" + # underline markers to be printed - start with `~` marker and replace with `^` later + markers = [] + + # Compute segment and initial markers + if inst.positions.end_lineno == inst.positions.lineno: + end_offset = _fix_offset(first_line, inst.positions.end_col_offset) + segment = first_line[start_offset:end_offset] + markers.append(" " * start_offset + "~" * (end_offset - start_offset)) + else: + segment = first_line[start_offset:] + "\n" + markers.append(" " * start_offset + "~" * (len(first_line) - start_offset)) + last_line = linecache.getline( + code.co_filename, inst.positions.end_lineno + ).rstrip() + end_offset = _fix_offset(last_line, inst.positions.end_col_offset) + for lineno in range(inst.positions.lineno + 1, inst.positions.end_lineno): + line = linecache.getline(code.co_filename, lineno).rstrip() + segment += line + "\n" + # don't underline leading spaces + num_spaces = len(line) - len(line.lstrip()) + markers.append(" " * num_spaces + "~" * (len(line) - num_spaces)) + segment += last_line[:end_offset] + num_spaces = len(last_line) - len(last_line.lstrip()) + markers.append(" " * num_spaces + "~" * (end_offset - num_spaces)) + + anchors: Optional[_Anchors] = None + try: + anchors = _extract_anchors_from_expr(segment) + except AssertionError: + pass + + # replace `~` markers with `^` where necessary + if anchors is None: + markers = [marker.replace("~", "^") for marker in markers] + else: + # make markers mutable + mutable_markers: List[List[str]] = [list(marker) for marker in markers] + + # anchor positions do not take start_offset into account + if anchors.left_end_lineno == 0: + anchors.left_end_offset += start_offset + if anchors.right_start_lineno == 0: + anchors.right_start_offset += start_offset + + # Turn `~`` markers between anchors to `^` + for lineno in range(len(markers)): + for col in range(len(mutable_markers[lineno])): + if lineno < anchors.left_end_lineno: + continue + if lineno == anchors.left_end_lineno and col < anchors.left_end_offset: + continue + if ( + lineno == anchors.right_start_lineno + and col >= anchors.right_start_offset + ): + continue + if lineno > anchors.right_start_lineno: + continue + if mutable_markers[lineno][col] == "~": + mutable_markers[lineno][col] = "^" + + # make markers into strings again + markers = ["".join(marker) for marker in mutable_markers] + + result = "" + for i in range(len(markers)): + result += ( + linecache.getline(code.co_filename, inst.positions.lineno + i).rstrip() + + "\n" + ) + result += markers[i] + "\n" + return result + + +def get_static_address_type(t): + if isinstance(t, torch.Tensor): + return getattr(t, "_dynamo_static_input_type", None) + + return None + + +def is_rng_state_getter_or_setter(value): + getters = ( + # The following two functions are not identical, so don't remove anyone! + torch._C.Generator.get_state, + torch.default_generator.get_state, + torch.get_rng_state, + torch.cuda.get_rng_state, + ) + setters = ( + torch._C.Generator.set_state, + torch.default_generator.set_state, + torch.set_rng_state, + torch.cuda.set_rng_state, + ) + return value in (*setters, *getters) + + +def is_tensor_base_attr_getter(value): + return ( + isinstance(value, types.MethodWrapperType) + and value.__name__ == "__get__" + and value.__self__.__objclass__ is torch._C._TensorBase # type: ignore[attr-defined] + ) + + +def is_torch_function_object(value): + return hasattr(value, "__torch_function__") + + +def has_torch_function(vt: torch._dynamo.variables.base.VariableTracker) -> bool: + from torch._dynamo.variables import LazyVariableTracker, UserDefinedObjectVariable + from torch._dynamo.variables.torch_function import TensorWithTFOverrideVariable + + if isinstance(vt, TensorWithTFOverrideVariable): + return True + + if isinstance(vt, LazyVariableTracker): + LazyVariableTracker.realize(vt) + + return isinstance(vt, UserDefinedObjectVariable) and hasattr( + vt.value, "__torch_function__" + ) + + +# see note [Tensor Fakification and Symbol Caching] +def to_fake_tensor(t, fake_mode): + symbolic_context = None + source = None + if tracing_context := torch._guards.TracingContext.try_get(): + if t in tracing_context.tensor_to_context: + symbolic_context = tracing_context.tensor_to_context[t] + source = symbolic_context.tensor_source + + return fake_mode.from_tensor( + t, static_shapes=False, symbolic_context=symbolic_context, source=source + ) + + +# NB: this works for both classes and instances +def is_frozen_dataclass(value): + return ( + not object_has_getattribute(value) + and not class_has_getattribute(value) + and is_dataclass(value) + and value.__dataclass_params__.frozen + ) + + +def get_first_attr(obj, *attrs): + """ + Return the first available attribute or throw an exception if none is present. + """ + for attr in attrs: + if hasattr(obj, attr): + return getattr(obj, attr) + + raise AssertionError(f"{obj} does not has any of the attributes: {attrs}") + + +@contextlib.contextmanager +def maybe_enable_compiled_autograd(should_enable, fullgraph=True, dynamic=True): + if not should_enable: + yield + else: + + def compiler_fn(gm): + def inner_compiler(gm_, example_inputs_): + torch._dynamo.utils.counters["compiled_autograd"]["compiles"] += 1 + return torch._inductor.compile(gm_, example_inputs_) + + return torch.compile( + gm, backend=inner_compiler, fullgraph=fullgraph, dynamic=dynamic + ) + + with torch._dynamo.compiled_autograd.enable(compiler_fn) as ctx: + yield ctx + + +def invalid_removeable_handle(): + # need a subclass so weakref works + class Invalid(dict): # type: ignore[type-arg] + pass + + return RemovableHandle(Invalid()) + + +# Returns a "proxy" (new object with the same class and dict) for (non-GraphModule) nn.Module's. +# Attribute changes to the original object/proxy will be reflected in the other. +# This is useful for cases where we want a keep-alive reference to a module without increasing +# its reference count. +def nn_module_proxy(mod): + if not isinstance(mod, torch.nn.Module): + return mod + if isinstance(mod, torch.fx.GraphModule): + # Dynamo-generated GM's shouldn't contain user-created GM's + return mod + proxy = mod.__class__.__new__(mod.__class__) + proxy.__dict__ = mod.__dict__ + return proxy + + +class GmWrapper(torch.nn.Module): + def __init__(self, gm, unflatten_fn): + super().__init__() + self.gm = gm + self.unflatten_fn = unflatten_fn + + def forward(self, *args): + args: List[Any] = list(args) + return self.gm(*self.unflatten_fn(args)) + + +def flatten_graph_inputs(gm: torch.fx.GraphModule, inputs, compile_gm): + """ + Mutate inputs so that they are flat and wrap gm such that it + accepts those inputs. This is needed for graphs that take + bumpy inputs. + """ + inputs_idx_to_clear = [ + i + for i, node in enumerate(gm.graph.nodes) + if node.op == "placeholder" and node.meta.get("steal_arg", False) + ] + + if torch._dynamo.compiled_autograd.in_compiled_autograd_region: + # fast path, avoid pytree overhead + # compiled autograd inputs are always a list of tensors, maybe followed by symints + assert inputs_idx_to_clear == [0] + assert isinstance(inputs[0], list) + boxed_inputs_count = len(inputs[0]) + + def flatten_fn(args): + return args[0] + list(args[1:]) + + def unflatten_fn(flat_args): + return (flat_args[:boxed_inputs_count], *flat_args[boxed_inputs_count:]) + + compiled_fn = compile_gm(GmWrapper(gm, unflatten_fn), flatten_fn(inputs)) + else: + # slow path, don't know inputs structure + flat_inputs, spec = pytree.tree_flatten(inputs) + unflatten_fn = functools.partial(pytree.tree_unflatten, treespec=spec) + compiled_fn = compile_gm(GmWrapper(gm, unflatten_fn), flat_inputs) + # note this doesn't check the spec, assuming it is the same + flatten_fn = pytree.arg_tree_leaves + + def wrapper(*args): + flat_args = flatten_fn(args) + + # flat_args is a new list, so we need to clear references from the old list + for i in inputs_idx_to_clear: + args[i].clear() + + # this call is boxed to avoid increasing refcount until we reach aot_module_simplified forward + return compiled_fn(flat_args) + + return wrapper + + +def get_locals_to_steal(maybe_gm): + if not isinstance(maybe_gm, torch.fx.GraphModule) or not hasattr(maybe_gm, "meta"): + return [] + return maybe_gm.meta.get("locals_to_steal", []) + + +def set_locals_to_steal(gm, locals_to_steal): + gm.meta["locals_to_steal"] = locals_to_steal + + +class Lit: + def __init__(self, s): + self.s = s + + def __repr__(self): + return self.s + + +warn_once_cache: Set[str] = set() + + +def warn_once(msg, stacklevel=1): + # Dynamo causes all warnings.warn (in user code and in Dynamo code) to print all the time. + # https://github.com/pytorch/pytorch/issues/128427. + # warn_once is a workaround: if the msg has been warned on before, then we will not + # warn again. + # NB: it's totally ok to store a cache of all the strings: this is what warnings.warn does as well. + if msg in warn_once_cache: + return + warn_once_cache.add(msg) + warnings.warn(msg, stacklevel=stacklevel + 1) + + +def strip_color_from_string(text): + # This regular expression matches ANSI escape codes + ansi_escape = re.compile(r"\x1B[@-_][0-?]*[ -/]*[@-~]") + return ansi_escape.sub("", text) + + +@contextlib.contextmanager +def _disable_saved_tensors_hooks_during_tracing(): + # See NOTE: [Deferring tensor pack/unpack hooks until runtime] + try: + prior = torch._C._autograd._saved_tensors_hooks_set_tracing(True) + yield + finally: + torch._C._autograd._saved_tensors_hooks_set_tracing(prior) + + +def is_parameter_freezing(): + return torch._inductor.config.freezing and not torch.is_grad_enabled() + + +def get_torch_function_mode_stack(filter_ignored=True): + from .variables.torch_function import IGNORED_MODES + + stack = [_get_function_stack_at(i) for i in range(_len_torch_function_stack())] + if filter_ignored: + stack = [mode for mode in stack if type(mode) not in IGNORED_MODES] + + return stack + + +def get_torch_function_mode_stack_at(ind): + assert ind < _len_torch_function_stack() and ind >= 0 + return torch._C._get_function_stack_at(ind) + + +def set_torch_function_mode_stack(stack): + for i in range(_len_torch_function_stack()): + _pop_torch_function_stack() + + for mode in stack: + _push_on_torch_function_stack(mode) + + +def verify_guard_fn_signature(value): + fn = value.__metadata_guard__ + sig = inspect.signature(fn) + if len(sig.parameters) != 2: + from .exc import InternalTorchDynamoError + + raise InternalTorchDynamoError( + "Tensor subclass method __metadata_guard__ must take exactly two subclass metadata arguments" + ) + if fn.__self__ != value.__class__: + from .exc import InternalTorchDynamoError + + raise InternalTorchDynamoError( + "Tensor subclass method __metadata_guard__ must be a classmethod" + ) + + +def does_not_override_dict_iter_methods(user_cls): + return ( + user_cls.items in (dict.items, collections.OrderedDict.items) + and user_cls.values in (dict.values, collections.OrderedDict.values) + and user_cls.keys in (dict.keys, collections.OrderedDict.keys) + and user_cls.__iter__ in (dict.__iter__, collections.OrderedDict.__iter__) + ) + + +# Helper function to extract relevant parts of a tensor's __dict__ to store in node meta. +# To avoid ref cycles, it's important that no tensors are present here, so leave those out. +def _extract_tensor_dict(t): + KEYS_TO_COPY = [ + "_dynamo_static_input_type", + "tag", + ] + + tensor_dict = { + key: copy.copy(t.__dict__[key]) for key in KEYS_TO_COPY if key in t.__dict__ + } + + return tensor_dict + + +# This is useful for reconstructing within the Dynamo graph the non-graph-input objects +# whose lifetime is governed by the user. +# e.g. torch.cuda.Event is a prime example. +user_obj_id_to_weakref: Dict[int, weakref.ReferenceType[object]] = {} + + +def get_user_object_from_id(obj_id): + obj = user_obj_id_to_weakref[obj_id]() + assert obj is not None, "User object is no longer alive" + return obj + + +def store_user_object_weakref(obj): + obj_id = id(obj) + user_obj_id_to_weakref[obj_id] = weakref.ref(obj) + + +class CompileTimeInstructionCounter: + _counter: int = 0 + _id: int = -1 + _depth = 0 + + @classmethod + def start(cls) -> None: + cls._depth = cls._depth + 1 + if cls._depth == 1: + cls._id = _instruction_counter.start() + + @classmethod + def end(cls) -> None: + cls._depth = cls._depth - 1 + if cls._depth == 0: + cls._counter += _instruction_counter.end(cls._id) + cls._id = -1 + + @classmethod + def clear(cls) -> None: + cls._counter = 0 + + @classmethod + def value(cls) -> int: + return cls._counter + + @classmethod + @contextmanager + def record(cls): + try: + if config.record_compile_time_instruction_count: + cls.start() + yield + finally: + if config.record_compile_time_instruction_count: + cls.end() diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/__init__.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84474c90756e48e120a5af92efd764faa063b5c4 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/__init__.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/base.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfbd290bd01d0f2143945bcd6d89d6cb01c65c9e Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/base.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builder.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9b000fcf1d971b056f3f6d83bd0e9b54a65c869 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builder.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builtin.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builtin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..252747f9646169fe0ad37cd0d32f061f6d41673f Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/builtin.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/constant.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/constant.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c05c9c40baf3480b68aced737945f7e24c2c1f6 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/constant.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/ctx_manager.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/ctx_manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..040a667ebfb07bdc47b98c836db76a9f1f17cc77 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/ctx_manager.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/dicts.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/dicts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a6d57efdc5e92ae607b80c4b919f81c06f8c217 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/dicts.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/distributed.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/distributed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36a4a0e1fac2e08af745e0776dc43a182d01af38 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/distributed.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/functions.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc60e18441c9e9e78293a7037e2af74e0eb9f161 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/functions.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/higher_order_ops.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/higher_order_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54fbadfcfef85f912034ceae08eedb76406ba7d3 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/higher_order_ops.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/iter.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/iter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8cc3ea0aaa51431b29aacc9a3ac7451bdd03be89 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/iter.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/lazy.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/lazy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..faf3efe9c72a79ea3a94ad252e34da2ea3267310 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/lazy.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/lists.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/lists.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..772e7fe91488fec7b54730868fae931436062dc5 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/lists.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/misc.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4172845b751bbe490837923690b426051222379 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/misc.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/nn_module.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/nn_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f62a17cf77978190dd9466e51f9ca13c03652ab9 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/nn_module.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/optimizer.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dcf39104ecc705211835ca4c282c4bc786a49c6 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/optimizer.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/script_object.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/script_object.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9eac5d93b83da8d6ab550a4825f23cf0cf0068f6 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/script_object.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/sdpa.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/sdpa.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..406f722f20a7a9e6db414207a2e71d1e15eb3b2b Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/sdpa.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/tensor.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44f87b08ed4a70a3e6e3915d5b2e0c4e45597e07 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/tensor.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/torch.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/torch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..575c692cc07a534d2ede03802692bde89f88b2da Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/torch.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/torch_function.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/torch_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8fe565fa9362a536f5bea1b1f0b77dd3c8471a6 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/torch_function.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/user_defined.cpython-310.pyc b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/user_defined.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a62d0abd6200723fcc61012a925ffc6cba9b8663 Binary files /dev/null and b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/__pycache__/user_defined.cpython-310.pyc differ diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/builtin.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/builtin.py new file mode 100644 index 0000000000000000000000000000000000000000..b6ff05e429d12c0f9c6b153346ea338f6d80a609 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/builtin.py @@ -0,0 +1,2109 @@ +# mypy: ignore-errors + +import contextlib +import functools +import inspect +import itertools +import logging +import math +import operator +import types +from collections import defaultdict, OrderedDict +from collections.abc import KeysView +from typing import Dict, List, TYPE_CHECKING + +import torch +from torch import sym_float, sym_int +from torch.utils._python_dispatch import is_traceable_wrapper_subclass + +from .. import config, variables +from ..exc import ( + AttributeMutationError, + unimplemented, + Unsupported, + UserError, + UserErrorType, +) +from ..guards import GuardBuilder, install_guard +from ..replay_record import DummyModule +from ..source import AttrSource, GetItemSource, is_constant_source, TypeSource +from ..utils import ( + check_constant_args, + check_numpy_ndarray_args, + check_unspec_or_constant_args, + check_unspec_python_args, + does_not_override_dict_iter_methods, + extract_fake_example_value, + get_fake_value, + guard_if_dyn, + is_wrapper_or_member_descriptor, + istype, + numpy_operator_wrapper, + proxy_args_kwargs, + tensortype_to_dtype, +) +from .base import MutableLocal, VariableTracker +from .constant import ConstantVariable +from .ctx_manager import EventVariable, StreamVariable +from .dicts import ( + ConstDictVariable, + DefaultDictVariable, + DictView, + FrozensetVariable, + is_hashable, + SetVariable, +) +from .lists import ( + BaseListVariable, + ListIteratorVariable, + ListVariable, + SizeVariable, + TupleIteratorVariable, + TupleVariable, +) +from .tensor import ( + FakeItemVariable, + supported_comparison_ops, + SymNodeVariable, + TensorVariable, + UnspecializedPythonVariable, +) +from .user_defined import UserDefinedObjectVariable, UserDefinedVariable + + +if TYPE_CHECKING: + from torch._dynamo.symbolic_convert import InstructionTranslator + + +log = logging.getLogger(__name__) + + +IN_PLACE_DESUGARING_MAP = { + operator.iadd: operator.add, + operator.isub: operator.sub, + operator.imul: operator.mul, + operator.ifloordiv: operator.floordiv, + operator.itruediv: operator.truediv, + operator.imod: operator.mod, + operator.imatmul: operator.imatmul, + operator.ilshift: operator.lshift, + operator.irshift: operator.rshift, + operator.ipow: operator.pow, + operator.iand: operator.and_, + operator.ior: operator.or_, + operator.ixor: operator.xor, +} + + +class BuiltinVariable(VariableTracker): + _SENTINEL = object() + _nonvar_fields = { + "fn", + *VariableTracker._nonvar_fields, + } + + @classmethod + def create_with_source(cls, value, source): + install_guard(source.make_guard(GuardBuilder.BUILTIN_MATCH)) + return cls(value, source=source) + + @staticmethod + @functools.lru_cache(None) + def _constant_fold_functions(): + fns = { + abs, + all, + any, + bool, + callable, + chr, + divmod, + float, + getattr, + int, + len, + max, + min, + ord, + pow, + repr, + round, + str, + str.format, + sum, + type, + operator.abs, + operator.pos, + operator.neg, + operator.not_, + operator.truth, + operator.invert, + operator.pow, + operator.mul, + operator.matmul, + operator.floordiv, + operator.truediv, + operator.mod, + operator.add, + operator.sub, + operator.getitem, + operator.length_hint, + operator.lshift, + operator.rshift, + operator.and_, + operator.or_, + operator.xor, + operator.ipow, + operator.imul, + operator.imatmul, + operator.ifloordiv, + operator.itruediv, + operator.imod, + operator.iadd, + operator.isub, + operator.ilshift, + operator.irshift, + operator.iand, + operator.ixor, + operator.ior, + operator.index, + } + from .tensor import supported_comparison_ops + + fns.update(supported_comparison_ops.values()) + fns.update(x for x in math.__dict__.values() if isinstance(x, type(math.sqrt))) + return fns + + def can_constant_fold_through(self): + return self.fn in self._constant_fold_functions() + + @staticmethod + @functools.lru_cache(None) + def _fx_graph_functions(): + fns = { + operator.abs, + operator.pos, + operator.neg, + operator.not_, + operator.invert, + operator.pow, + operator.mul, + operator.matmul, + operator.floordiv, + operator.truediv, + operator.mod, + operator.add, + operator.lt, + operator.gt, + operator.ge, + operator.le, + operator.ne, + operator.eq, + operator.sub, + operator.getitem, + operator.length_hint, + operator.lshift, + operator.rshift, + operator.and_, + operator.or_, + operator.xor, + operator.ipow, + operator.imul, + operator.imatmul, + operator.ifloordiv, + operator.itruediv, + operator.imod, + operator.iadd, + operator.isub, + operator.ilshift, + operator.irshift, + operator.iand, + operator.ixor, + operator.ior, + } + return fns + + @staticmethod + @functools.lru_cache(None) + def _binops(): + # function -> ([forward name, reverse name, in-place name], in-place op) + fns = { + operator.add: (["__add__", "__radd__", "__iadd__"], operator.iadd), + operator.sub: (["__sub__", "__rsub__", "__isub__"], operator.isub), + operator.mul: (["__mul__", "__rmul__", "__imul__"], operator.imul), + operator.truediv: ( + ["__truediv__", "__rtruediv__", "__itruediv__"], + operator.itruediv, + ), + operator.floordiv: ( + ["__floordiv__", "__rfloordiv__", "__ifloordiv__"], + operator.ifloordiv, + ), + operator.mod: (["__mod__", "__rmod__", "__imod__"], operator.imod), + pow: (["__pow__", "__rpow__", "__ipow__"], operator.ipow), + operator.pow: (["__pow__", "__rpow__", "__ipow__"], operator.ipow), + operator.lshift: ( + ["__lshift__", "__rlshift__", "__ilshift__"], + operator.ilshift, + ), + operator.rshift: ( + ["__rshift__", "__rrshift__", "__irshift__"], + operator.irshift, + ), + # NB: The follow binary operators are not supported for now, since the + # corresponding magic methods aren't defined on SymInt / SymFloat: + # operator.matmul + # divmod + # operator.and_ + # operator.or_ + # operator.xor + } + return fns + + @staticmethod + @functools.lru_cache(None) + def _binop_handlers(): + # Multiple dispatch mechanism defining custom binop behavior for certain type + # combinations. Handlers are attempted in order, and will be used if the type checks + # match. They are expected to have the signature: + # fn(tx, arg0: VariableTracker, arg1: VariableTracker) -> VariableTracker + from .dicts import DictKeys, SetVariable + from .functions import BaseUserFunctionVariable, UserFunctionVariable + from .nn_module import NNModuleVariable + from .tensor import supported_const_comparison_ops + from .torch import BaseTorchVariable + from .user_defined import ( + UserDefinedClassVariable, + UserDefinedObjectVariable, + UserDefinedVariable, + ) + + # Override table contains: op_fn -> [list of handlers] + op_handlers = {} + for ( + op, + (magic_method_names, in_place_op), + ) in BuiltinVariable._binops().items(): + op_handlers[op] = [] + op_handlers[in_place_op] = [] + + forward_name, reverse_name, inplace_name = magic_method_names + + # User-defined args (highest precedence) + def user_defined_handler( + tx, + a, + b, + *, + forward_name=forward_name, + reverse_name=reverse_name, + ): + # Manually handle reversing logic if needed (e.g. call __radd__) + + # TODO: If we expand this to handle tensor args, we need to manually + # handle cases like this: + # + # class A(int): + # def __radd__(self, other): + # print("woof") + # torch.randn(3) + A(3) + # + # In this example, A.__radd__() is not called -> nothing is printed, because + # Tensor.__add__ only does a subtype test against int, ignoring the subclass. + # To be fully correct, we should not call A.__radd__() here, and there may be + # other cases to reason about and add exceptions for. + if isinstance(a, UserDefinedVariable): + return a.call_method(tx, forward_name, [b], {}) + else: + return b.call_method(tx, reverse_name, [a], {}) + + op_handlers[op].append( + ((UserDefinedVariable, VariableTracker), user_defined_handler) + ) + op_handlers[op].append( + ((VariableTracker, UserDefinedVariable), user_defined_handler) + ) + + def user_defined_inplace_handler( + tx: "InstructionTranslator", a, b, *, forward_name=inplace_name + ): + return a.call_method(tx, forward_name, [b], {}) + + op_handlers[in_place_op].append( + ((UserDefinedVariable, VariableTracker), user_defined_inplace_handler) + ) + op_handlers[in_place_op].append( + ((VariableTracker, UserDefinedVariable), user_defined_inplace_handler) + ) + + # Dynamic shape args + def dynamic_handler(tx: "InstructionTranslator", a, b, *, fn=op): + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_function", fn, *proxy_args_kwargs([a, b], {}) + ), + ) + + op_handlers[op].append( + ((SymNodeVariable, VariableTracker), dynamic_handler) + ) + op_handlers[op].append( + ((VariableTracker, SymNodeVariable), dynamic_handler) + ) + + # NB: Prefer out-of-place op when calling in-place op to generate valid graph + op_handlers[in_place_op].append( + ((SymNodeVariable, VariableTracker), dynamic_handler) + ) + op_handlers[in_place_op].append( + ((VariableTracker, SymNodeVariable), dynamic_handler) + ) + + # Special cases - lower precedence but still prefer these over constant folding + + # List-like addition (e.g. [1, 2] + [3, 4]) + def tuple_add_handler(tx: "InstructionTranslator", a, b): + return TupleVariable([*a.items, *b.unpack_var_sequence(tx)]) + + def size_add_handler(tx: "InstructionTranslator", a, b): + return SizeVariable([*a.items, *b.unpack_var_sequence(tx)]) + + list_like_addition_handlers = [ + # NB: Prefer the tuple-specific logic over base logic because of + # some SizeVariable weirdness. Specifically, the tuple-specific logic + # drops the subclass type (e.g. SizeVariable) and returns TupleVariables. + ( + (SizeVariable, SizeVariable), + size_add_handler, + ), + ( + (TupleVariable, TupleVariable), + tuple_add_handler, + ), + ( + (TupleVariable, ConstantVariable), + tuple_add_handler, + ), + ( + (ConstantVariable, TupleVariable), + lambda tx, a, b: TupleVariable( + [*a.unpack_var_sequence(tx), *b.items], + ), + ), + ( + ( + ListVariable, + (BaseListVariable, ConstantVariable, ListIteratorVariable), + ), + lambda tx, a, b: ListVariable( + [*a.items, *b.unpack_var_sequence(tx)], mutable_local=MutableLocal() + ), + ), + ( + (BaseListVariable, BaseListVariable), + lambda tx, a, b: type(a)([*a.items, *b.items]), + ), + ] + op_handlers[operator.add].extend(list_like_addition_handlers) + + def list_iadd_handler(tx: "InstructionTranslator", a, b): + if not a.mutable_local or not b.has_unpack_var_sequence(tx): + # Handler doesn't apply + return None + + seq = b.unpack_var_sequence(tx) + tx.output.side_effects.mutation(a) + a.items.extend(seq) + return a + + list_like_iadd_handlers = [ + ( + (ListVariable, VariableTracker), + list_iadd_handler, + ), + ( + (TupleVariable, TupleVariable), + tuple_add_handler, + ), + ( + (TupleVariable, ConstantVariable), + tuple_add_handler, + ), + ] + op_handlers[operator.iadd].extend(list_like_iadd_handlers) + + # List-like expansion (e.g. [1, 2, 3] * 3) + def expand_list_like(tx: "InstructionTranslator", lst, const): + if isinstance(lst, ConstantVariable): + lst, const = const, lst + return lst.__class__( + items=lst.items * const.as_python_constant(), + mutable_local=MutableLocal(), + ) + + list_like_expansion_handlers = [ + ((ListVariable, ConstantVariable), expand_list_like), + ((TupleVariable, ConstantVariable), expand_list_like), + ((ConstantVariable, ListVariable), expand_list_like), + ((ConstantVariable, TupleVariable), expand_list_like), + ] + op_handlers[operator.mul].extend(list_like_expansion_handlers) + + size_or_tuple = (SizeVariable, TupleVariable) + has_set_items = (SetVariable, DictKeys) + + def create_cmp_op_handlers(op): + def compare_by_value(tx: "InstructionTranslator", a, b): + return ConstantVariable(op(a.value, b.value)) + + result = [((ConstantVariable, ConstantVariable), compare_by_value)] + + if op in supported_const_comparison_ops.values(): + # Tensor is None, List is not None, etc + none_result = op(object(), None) + if op.__name__.startswith("is_"): + + def never(tx: "InstructionTranslator", a, b): + return ConstantVariable(none_result) + + obj_op_none = never + none_op_obj = never + else: + + def obj_op_none( + tx: "InstructionTranslator", a, b: ConstantVariable + ): + if b.value is None or b.value is True or b.value is False: + return ConstantVariable(none_result) + + def none_op_obj( + tx: "InstructionTranslator", a: ConstantVariable, b + ): + if a.value is None or a.value is True or a.value is False: + return ConstantVariable(none_result) + + types_that_are_never_none = ( + TensorVariable, + SymNodeVariable, + NNModuleVariable, + BaseListVariable, + UserDefinedVariable, + BaseUserFunctionVariable, + ConstDictVariable, + BaseTorchVariable, + ) + result.extend( + [ + ( + (types_that_are_never_none, ConstantVariable), + obj_op_none, + ), + ( + (ConstantVariable, types_that_are_never_none), + none_op_obj, + ), + ] + ) + + def list_compare_nocheck(tx: "InstructionTranslator", left, right): + return BaseListVariable.list_compare(tx, op, left, right) + + def list_compare_check(tx: "InstructionTranslator", left, right): + if type(left) is not type( + right + ): # Mismatch in BaseListVariable subclasses + unimplemented(f"{op.__name__}({left}, {right})") + return BaseListVariable.list_compare(tx, op, left, right) + + def compare_set_items(tx: "InstructionTranslator", left, right): + return ConstantVariable(op(left.set_items, right.set_items)) + + def compare_via_method(tx: "InstructionTranslator", left, right): + return left.call_method(tx, f"__{op.__name__}__", [right], {}) + + if op.__name__.startswith("is_"): + compare_user_defined = compare_by_value + else: + compare_user_defined = compare_via_method + + op_var = BuiltinVariable(op) + result.extend( + [ + ( + ( + (UserFunctionVariable, BuiltinVariable), + (UserFunctionVariable, BuiltinVariable), + ), + lambda tx, a, b: ConstantVariable(op(a.fn, b.fn)), + ), + ( + ( + NNModuleVariable, + NNModuleVariable, + ), + lambda tx, a, b: ConstantVariable( + op( + tx.output.get_submodule(a.module_key), + tx.output.get_submodule(b.module_key), + ) + ), + ), + ((size_or_tuple, size_or_tuple), list_compare_nocheck), + ( + (variables.BaseListVariable, variables.BaseListVariable), + list_compare_check, + ), + ((has_set_items, has_set_items), compare_set_items), + ( + (UserDefinedObjectVariable, UserDefinedObjectVariable), + compare_user_defined, + ), + ( + (UserDefinedClassVariable, UserDefinedClassVariable), + compare_user_defined, + ), + ( + ( + (StreamVariable, EventVariable, ConstantVariable), + (StreamVariable, EventVariable, ConstantVariable), + ), + compare_by_value, + ), + ( + (TensorVariable, VariableTracker), + op_var._comparison_with_tensor, + ), + ( + (VariableTracker, TensorVariable), + op_var._comparison_with_tensor, + ), + ( + (SymNodeVariable, VariableTracker), + op_var._comparison_with_symnode, + ), + ( + (VariableTracker, SymNodeVariable), + op_var._comparison_with_symnode, + ), + ] + ) + + if op.__name__.startswith("is_"): + + def handle_is(tx: "InstructionTranslator", left, right): + # If the two objects are of different type, we can safely return False + # and True for `is` and `is not`, respectively + if type(left) is not type(right): + return ConstantVariable.create(op.__name__ != "is_") + + result.append(((VariableTracker, VariableTracker), handle_is)) + + return result + + for op in supported_comparison_ops.values(): + assert callable(op) + assert op not in op_handlers + op_handlers[op] = create_cmp_op_handlers(op) + + return op_handlers + + @staticmethod + def _find_binop_handler(op, a_type, b_type): + handlers = BuiltinVariable._binop_handlers().get(op) + if handlers is None: + return None + + matches = [] + for (type1, type2), handler in handlers: + if issubclass(a_type, type1) and issubclass(b_type, type2): + matches.append(handler) + return matches + + def can_insert_in_graph(self): + return self.fn in self._fx_graph_functions() + + def __init__(self, fn, **kwargs) -> None: + super().__init__(**kwargs) + self.fn = fn + + def __str__(self) -> str: + if self.fn is None: + name = "None" + else: + name = self.fn.__name__ + + return f"{self.__class__.__name__}({name})" + + def as_python_constant(self): + return self.fn + + def as_proxy(self): + DTYPE = { + bool: torch.bool, + int: torch.int64, + float: torch.float64, + } + if self.fn in DTYPE: + return DTYPE[self.fn] + return super().as_proxy() + + def reconstruct(self, codegen): + name = self.fn.__name__ + assert self.fn.__module__ == "builtins" + assert name not in codegen.tx.f_globals, "shadowed global" + codegen.append_output(codegen.create_load_global(name, False, add=True)) + + def constant_args(self, *args, **kwargs): + return check_constant_args(args, kwargs) + + def tensor_args(self, *args): + any_tensor = False + for arg in args: + if isinstance(arg, variables.GetAttrVariable): + return False + any_tensor = any_tensor or isinstance(arg, variables.TensorVariable) + return any_tensor + + def tensor_args_type(self, arg_types): + any_tensor = False + for arg_type in arg_types: + if issubclass(arg_type, variables.GetAttrVariable): + return False + any_tensor = any_tensor or issubclass(arg_type, variables.TensorVariable) + return any_tensor + + def python_and_tensor_constant_only(self, *args, **kwargs): + tensor_args = [] + non_tensor_args = [] + for i in itertools.chain(args, kwargs.values()): + if isinstance(i, variables.TensorVariable): + tensor_args.append(i) + else: + non_tensor_args.append(i) + return all( + is_constant_source(t.source) if t.source is not None else False + for t in tensor_args + ) and self.constant_args(*non_tensor_args) + + @staticmethod + def unwrap_unspec_args_kwargs(args, kwargs): + return [x.as_python_constant() for x in args], { + k: v.as_python_constant() for k, v in kwargs.items() + } + + def has_constant_handler(self, args, kwargs): + return self.can_constant_fold_through() and check_unspec_or_constant_args( + args, kwargs + ) + + @staticmethod + def _make_handler(fn, arg_types: List[type], has_kwargs: bool): + from .builder import SourcelessBuilder + from .lazy import LazyVariableTracker + + obj = BuiltinVariable(fn) + handlers = [] + + if any(issubclass(t, LazyVariableTracker) for t in arg_types): + return lambda tx, args, kwargs: obj.call_function( + tx, [v.realize() for v in args], kwargs + ) + + if inspect.isclass(fn) and issubclass(fn, Exception): + + def create_exception_class_object( + tx: "InstructionTranslator", args, kwargs + ): + if fn is AssertionError and not all( + isinstance(x, variables.ConstantVariable) + and isinstance(x.value, str) + for x in args + ): + unimplemented("assert with non-string message") + + return variables.ExceptionVariable(fn, args, **kwargs) + + return create_exception_class_object + + if obj.can_insert_in_graph() and not ( + fn is operator.getitem + and not issubclass(arg_types[0], variables.TensorVariable) + ): + if obj.tensor_args_type(arg_types): + return obj._handle_insert_op_in_graph + elif has_kwargs: + # need runtime check for kwargs + handlers.append(obj._handle_insert_op_in_graph) + + # Handle binary ops (e.g. __add__ / __radd__, __iadd__, etc.) + # NB: Tensor args are handled above and not here + if len(arg_types) == 2 and not has_kwargs: + # Try to find a handler for the arg types; otherwise, fall through to constant handler + binop_handlers = BuiltinVariable._find_binop_handler(fn, *arg_types) + if not binop_handlers: + pass + elif len(binop_handlers) == 1: + (binop_handler,) = binop_handlers + handlers.append(lambda tx, args, _: binop_handler(tx, *args)) + else: + + def call_binop_handlers(tx: "InstructionTranslator", args, _): + for fn in binop_handlers: + rv = fn(tx, *args) + if rv: + return rv + + handlers.append(call_binop_handlers) + + self_handler = getattr(obj, f"call_{fn.__name__}", None) + if self_handler: + + def call_self_handler(tx: "InstructionTranslator", args, kwargs): + try: + result = self_handler(tx, *args, **kwargs) + if result is not None: + return result + except TypeError: + # Check if binding is bad. inspect signature bind is expensive. + # So check only when handler call fails. + try: + inspect.signature(self_handler).bind(tx, *args, **kwargs) + except TypeError as e: + has_constant_handler = obj.has_constant_handler(args, kwargs) + if not has_constant_handler: + log.warning( + "incorrect arg count %s %s and no constant handler", + self_handler, + e, + ) + unimplemented( + f"invalid handler args {self_handler} {args} {kwargs}" + ) + else: + raise + except Unsupported as exc: + has_constant_handler = obj.has_constant_handler(args, kwargs) + if not has_constant_handler: + raise + # Actually, we will handle this just fine + exc.remove_from_stats() + + handlers.append(call_self_handler) + + if obj.can_constant_fold_through(): + builder = SourcelessBuilder.create + + if ( + all(issubclass(x, ConstantVariable) for x in arg_types) + and not has_kwargs + ): + + def constant_fold_handler(tx: "InstructionTranslator", args, kwargs): + # fast path + try: + res = fn( + *[x.as_python_constant() for x in args], + ) + except Exception as exc: + unimplemented(f"constant fold exception: {repr(exc)}") + return builder(tx, res) + + else: + + def constant_fold_handler(tx: "InstructionTranslator", args, kwargs): + # path with a runtime check + if check_unspec_or_constant_args(args, kwargs): + try: + res = fn( + *[x.as_python_constant() for x in args], + **{ + k: v.as_python_constant() for k, v in kwargs.items() + }, + ) + except Exception as exc: + unimplemented(f"constant fold exception: {repr(exc)}") + return builder(tx, res) + + handlers.append(constant_fold_handler) + + error_msg = f"builtin: {fn.__name__} {arg_types} {has_kwargs}" + if len(handlers) == 0: + return lambda *args: unimplemented(error_msg) + elif len(handlers) == 1: + (handler,) = handlers + + def builtin_dispatch(tx: "InstructionTranslator", args, kwargs): + rv = handler(tx, args, kwargs) + if rv: + return rv + unimplemented(error_msg) + + else: + + def builtin_dispatch(tx: "InstructionTranslator", args, kwargs): + for fn in handlers: + rv = fn(tx, args, kwargs) + if rv: + return rv + unimplemented(error_msg) + + return builtin_dispatch + + def _handle_insert_op_in_graph(self, tx: "InstructionTranslator", args, kwargs): + from .builder import wrap_fx_proxy, wrap_fx_proxy_cls + + if kwargs and not self.tensor_args(*args, *kwargs.values()): + return + + fn = self.fn + try: + # Constant fold for constant tensor and python constants + if self.python_and_tensor_constant_only(*args, **kwargs): + from ..bytecode_transformation import unique_id + from .functions import invoke_and_store_as_constant + + return invoke_and_store_as_constant( + tx, fn, unique_id(fn.__name__), args, kwargs + ) + + if fn in IN_PLACE_DESUGARING_MAP and isinstance( + args[0], variables.ConstantVariable + ): + # In-place operators like += usually mustate tensor + # values, but in the edge case of immutable values they + # re-bind the variable. + # + # The easiest way to keep the graph consistent in this + # scenario is to de-sugar eagerly. + fn, args = IN_PLACE_DESUGARING_MAP[fn], [args[0], args[1]] + + if fn is operator.getitem and isinstance(args[1], SymNodeVariable): + # Standard indexing will force specialization due to + # __index__. Rewrite as a regular torch op which will + # trace fine + fn, args = torch.select, [ + args[0], + variables.ConstantVariable.create(0), + args[1], + ] + + # Interaction between ndarray and tensors: + # We prefer the tensor op whenever there are tensors involved + if check_numpy_ndarray_args(args, kwargs) and not any( + type(arg) == variables.TensorVariable for arg in args + ): + proxy = tx.output.create_proxy( + "call_function", + numpy_operator_wrapper(fn), + *proxy_args_kwargs(args, kwargs), + ) + + return wrap_fx_proxy_cls(variables.NumpyNdarrayVariable, tx, proxy) + + proxy = tx.output.create_proxy( + "call_function", + fn, + *proxy_args_kwargs(args, kwargs), + ) + if any(isinstance(arg, FakeItemVariable) for arg in args): + return wrap_fx_proxy_cls( + FakeItemVariable, + tx, + proxy, + ) + elif check_unspec_python_args(args, kwargs): + _args, _kwargs = self.unwrap_unspec_args_kwargs(args, kwargs) + raw_value = fn(*_args, **_kwargs) + + need_unwrap = any( + x.need_unwrap + for x in itertools.chain(args, kwargs.values()) + if isinstance(x, variables.UnspecializedPythonVariable) + ) + + return wrap_fx_proxy_cls( + UnspecializedPythonVariable, + tx, + proxy, + raw_value=raw_value, + need_unwrap=need_unwrap, + ) + elif all(isinstance(x, SymNodeVariable) for x in args): + return SymNodeVariable.create(tx, proxy, None) + else: + # Work around for vision_maskrcnn due to precision difference + # specialize the dividend when float divide by tensor + if fn is operator.truediv and isinstance( + args[0], variables.UnspecializedPythonVariable + ): + args[0] = args[0].convert_to_constant(tx) + return wrap_fx_proxy(tx, proxy) + + except NotImplementedError: + unimplemented(f"partial tensor op: {self} {args} {kwargs}") + + call_function_handler_cache = {} + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if kwargs: + kwargs = {k: v.realize() for k, v in kwargs.items()} + key = (self.fn, *(type(x) for x in args), True) + else: + key = (self.fn, *(type(x) for x in args)) + + handler = self.call_function_handler_cache.get(key) + if not handler: + self.call_function_handler_cache[key] = handler = self._make_handler( + self.fn, [type(x) for x in args], bool(kwargs) + ) + return handler(tx, args, kwargs) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if self.fn is object and name == "__setattr__": + assert len(args) == 3 + assert len(kwargs) == 0 + obj, name_var, val = args + obj = obj.realize() + if ( + isinstance(obj, UserDefinedObjectVariable) + and tx.output.side_effects.is_attribute_mutation(obj) + and name_var.is_python_constant() + ): + return obj.method_setattr_standard(tx, name_var, val) + if self.fn is object and name == "__new__": + assert len(args) == 1 + assert len(kwargs) == 0 + return tx.output.side_effects.track_object_new_from_user_defined_class( + args[0] + ) + if self.fn is dict and name == "fromkeys": + return BuiltinVariable.call_custom_dict_fromkeys(tx, dict, *args, **kwargs) + return super().call_method(tx, name, args, kwargs) + + def _call_int_float(self, tx: "InstructionTranslator", arg): + # Handle cases like int(torch.seed()) + # Also handle sym_float to sym_int cases + if isinstance(arg, (SymNodeVariable, variables.TensorVariable)): + if isinstance(arg, variables.TensorVariable): + item = arg.call_method(tx, "item", [], {}) + else: + item = arg + fn_ = sym_int if self.fn is int else sym_float + from torch._dynamo.variables.builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + fn_, + (item.as_proxy(),), + {}, + ), + ) + + call_int = _call_int_float + call_float = _call_int_float + + def call_str(self, tx: "InstructionTranslator", arg): + # Handle `str` on a user defined function or object + if isinstance(arg, (variables.UserFunctionVariable)): + return variables.ConstantVariable.create(value=str(arg.fn)) + elif isinstance(arg, (variables.UserDefinedObjectVariable)): + # Check if object has __str__ method + if hasattr(arg.value, "__str__"): + str_method = arg.value.__str__ + elif hasattr(arg.value, "__repr__"): + # account for __repr__ functions when __str__ is absent + str_method = arg.value.__repr__ + else: + unimplemented("user defined object has no __str__ or __repr__ method") + + if type(arg.value).__str__ is object.__str__: + # Rely on the object str method + try: + return variables.ConstantVariable.create(value=str_method()) + except AttributeError: + # Graph break + return + elif is_wrapper_or_member_descriptor(str_method): + unimplemented(f"{type(arg.value)} has a C/C++ based str method") + else: + # Overrides for custom str method + # Pass method as function to call tx.inline_user_function_return + bound_method = str_method.__func__ + + try: + # Only supports certain function types + user_func_variable = variables.UserFunctionVariable(bound_method) + except AssertionError as e: + # Won't be able to do inline the str method, return to avoid graph break + log.warning("Failed to create UserFunctionVariable: %s", e) + return + + # Inline the user function + return tx.inline_user_function_return(user_func_variable, [arg], {}) + + def _call_min_max(self, tx: "InstructionTranslator", *args): + if len(args) == 1 and args[0].has_force_unpack_var_sequence(tx): + items = args[0].force_unpack_var_sequence(tx) + return self._call_min_max_seq(tx, items) + elif len(args) == 2: + return self._call_min_max_binary(tx, args[0], args[1]) + elif len(args) > 2: + return self._call_min_max_seq(tx, args) + + def _call_min_max_seq(self, tx: "InstructionTranslator", items): + assert len(items) > 0 + if len(items) == 1: + return items[0] + + return functools.reduce(functools.partial(self._call_min_max_binary, tx), items) + + def _call_min_max_binary(self, tx: "InstructionTranslator", a, b): + if a is None or b is None: + # a or b could be none if we reduce and _call_min_max_binary failed + # to return something + return + if self.tensor_args(a, b): + if not isinstance(a, variables.TensorVariable): + a, b = b, a + assert isinstance(a, variables.TensorVariable) + + # result of an item call is a scalar convert to a tensor + if isinstance(a, FakeItemVariable): + a = variables.TorchInGraphFunctionVariable(torch.tensor).call_function( + tx, [a], {} + ) + + # Dynamic input does not get resolved, rather, gets stored as call_function + if isinstance(a, SymNodeVariable) or isinstance(b, SymNodeVariable): + from .builder import wrap_fx_proxy_cls + + return wrap_fx_proxy_cls( + type(a), + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + self.fn, + *proxy_args_kwargs([a, b], {}), + ), + ) + + # convert min/max to torch ops + if b.is_python_constant(): + if isinstance(a, variables.NumpyNdarrayVariable): + import numpy as np + + fn = variables.NumpyVariable(np.clip) + else: + fn = variables.TorchInGraphFunctionVariable(torch.clamp) + kwargs = {"min": b} if (self.fn is max) else {"max": b} + result = fn.call_function(tx, [a], kwargs) + else: + if isinstance(a, variables.NumpyNdarrayVariable): + import numpy as np + + fn = {max: np.maximum, min: np.minimum}[self.fn] + fn = variables.NumpyVariable(fn) + else: + fn = {max: torch.maximum, min: torch.minimum}[self.fn] + fn = variables.TorchInGraphFunctionVariable(fn) + result = fn.call_function(tx, [a, b], {}) + + # return unspec if both a, b are unspec or const + if all( + isinstance( + i, + ( + variables.UnspecializedPythonVariable, + variables.ConstantVariable, + ), + ) + for i in [a, b] + ): + if any(isinstance(val, FakeItemVariable) for val in [a, b]): + return variables.FakeItemVariable.from_tensor_variable(result) + + if b.is_python_constant(): + raw_b = b.as_python_constant() + else: + raw_b = b.raw_value + if self.fn is max: + raw_res = max(a.raw_value, raw_b) + else: + raw_res = min(a.raw_value, raw_b) + + need_unwrap = any( + x.need_unwrap + for x in [a, b] + if isinstance(x, variables.UnspecializedPythonVariable) + ) + return variables.UnspecializedPythonVariable.from_tensor_variable( + result, raw_res, need_unwrap + ) + # otherwise return tensor + else: + return result + elif isinstance(a, SymNodeVariable) or isinstance(b, SymNodeVariable): + fn = torch.sym_max if self.fn is max else torch.sym_min + proxy = tx.output.create_proxy( + "call_function", fn, *proxy_args_kwargs([a, b], {}) + ) + return SymNodeVariable.create(tx, proxy, None) + + call_min = _call_min_max + call_max = _call_min_max + + def call_abs(self, tx: "InstructionTranslator", arg: "VariableTracker"): + # Call arg.__abs__() + abs_method = BuiltinVariable(getattr).call_function( + tx, [arg, ConstantVariable.create("__abs__")], {} + ) + return abs_method.call_function(tx, [], {}) + + def call_pos(self, tx: "InstructionTranslator", arg: "VariableTracker"): + # Call arg.__pos__() + pos_method = BuiltinVariable(getattr).call_function( + tx, [arg, ConstantVariable.create("__pos__")], {} + ) + return pos_method.call_function(tx, [], {}) + + def call_index(self, tx: "InstructionTranslator", arg: "VariableTracker"): + if isinstance(arg, variables.TensorVariable): + unimplemented("unsupported index(tensor)") + + arg = guard_if_dyn(arg) + constant_value = operator.index(arg) + return variables.ConstantVariable.create(constant_value) + + def call_round(self, tx: "InstructionTranslator", arg, *args, **kwargs): + # Call arg.__round__() + round_method = BuiltinVariable(getattr).call_function( + tx, [arg, ConstantVariable.create("__round__")], {} + ) + return round_method.call_function(tx, args, kwargs) + + def call_range(self, tx: "InstructionTranslator", *args): + if check_unspec_or_constant_args(args, {}): + return variables.RangeVariable(args) + elif self._dynamic_args(*args): + args = [ + variables.ConstantVariable.create(guard_if_dyn(arg)) for arg in args + ] + return variables.RangeVariable(args) + # None no-ops this handler and lets the driving function proceed + return None + + def _dynamic_args(self, *args, **kwargs): + return any(isinstance(x, SymNodeVariable) for x in args) or any( + isinstance(x, SymNodeVariable) for x in kwargs.values() + ) + + def call_slice(self, tx: "InstructionTranslator", *args): + return variables.SliceVariable(args) + + def _dyn_proxy(self, tx: "InstructionTranslator", *args, **kwargs): + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_function", self.fn, *proxy_args_kwargs(args, kwargs) + ), + ) + + # NOTE must handle IteratorVariable separately! + def _call_iter_tuple_list( + self, tx: "InstructionTranslator", obj=None, *args, **kwargs + ): + assert not isinstance(obj, variables.IteratorVariable) + + if self._dynamic_args(*args, **kwargs): + return self._dyn_proxy(tx, *args, **kwargs) + + cls = variables.BaseListVariable.cls_for(self.fn) + if obj is None: + return cls( + [], + mutable_local=MutableLocal(), + ) + elif obj.has_unpack_var_sequence(tx): + if obj.source and not is_constant_source(obj.source): + if isinstance(obj, TupleIteratorVariable): + install_guard( + obj.source.make_guard(GuardBuilder.TUPLE_ITERATOR_LEN) + ) + else: + if ( + getattr(obj, "source", False) + and isinstance(obj, ConstDictVariable) + and not istype(obj, SetVariable) + ): + tx.output.guard_on_key_order.add(obj.source.name()) + + install_guard(obj.source.make_guard(GuardBuilder.SEQUENCE_LENGTH)) + + return cls( + list(obj.unpack_var_sequence(tx)), + mutable_local=MutableLocal(), + ) + + def _call_tuple_list(self, tx, obj=None, *args, **kwargs): + if isinstance(obj, variables.IteratorVariable): + cls = variables.BaseListVariable.cls_for(self.fn) + return cls( + list(obj.force_unpack_var_sequence(tx)), + mutable_local=MutableLocal(), + ) + else: + return self._call_iter_tuple_list(tx, obj, *args, **kwargs) + + def call_iter(self, tx: "InstructionTranslator", obj, *args, **kwargs): + if isinstance(obj, variables.IteratorVariable): + ret = obj + else: + # Handle the case where we are iterating over a tuple, list or iterator + ret = self._call_iter_tuple_list(tx, obj, *args, **kwargs) + + if ret is None: + # If the object doesn't implement a __iter__ method, it will be an error in eager mode when calling iter on it anyway. + # If the object implements a __iter__ method, inlining effectively forwards the call to another iter call + # (e.g. when __iter__ just returns iter(self.list)) or return a user-defined iterator. + return obj.call_method(tx, "__iter__", args, kwargs) + return ret + + call_tuple = _call_tuple_list + call_list = _call_tuple_list + + def call_callable(self, tx: "InstructionTranslator", arg): + from .functions import BaseUserFunctionVariable + from .nn_module import NNModuleVariable + + if isinstance( + arg, + ( + variables.UserDefinedClassVariable, + BaseUserFunctionVariable, + NNModuleVariable, + ), + ): + return variables.ConstantVariable.create(True) + elif isinstance(arg, UserDefinedVariable): + return variables.ConstantVariable.create(callable(arg.value)) + elif isinstance( + arg, + ( + ConstantVariable, + SymNodeVariable, + TensorVariable, + ListVariable, + TupleVariable, + ListIteratorVariable, + ), + ): + return variables.ConstantVariable.create(False) + + def call_cast(self, _, *args, **kwargs): + if len(args) == 2: + return args[1] + + unimplemented(f"unsupported args to builtin cast(): {args} {kwargs}") + + def call_dict(self, tx: "InstructionTranslator", *args, **kwargs): + return BuiltinVariable.call_custom_dict(tx, dict, *args, **kwargs) + + @staticmethod + def call_custom_dict(tx: "InstructionTranslator", user_cls, *args, **kwargs): + from .builder import SourcelessBuilder + + if not kwargs: + if not args: + args = ({},) + assert len(args) == 1 + arg = args[0] + if isinstance(arg, dict): + return ConstDictVariable(arg, user_cls, mutable_local=MutableLocal()) + elif isinstance(arg, variables.ConstDictVariable): + return arg.clone(user_cls=user_cls, mutable_local=MutableLocal()) + elif isinstance( + arg, + ( + ListVariable, + TupleVariable, + ListIteratorVariable, + variables.IteratorVariable, + ), + ): + items = dict( + x.force_unpack_var_sequence(tx) + for x in arg.force_unpack_var_sequence(tx) + ) + return ConstDictVariable(items, user_cls, mutable_local=MutableLocal()) + elif isinstance(arg, variables.MutableMappingVariable): + # This is applicable for user defined objects which seem like dict, but are not really dicts. For + # example, TensorDict derives from MutableMapping. For such cases, we can directly inline the .items + # method and create a new dict. + if does_not_override_dict_iter_methods(type(arg.value)): + # These are implemeted in C, so we will have to manually construct the items + + if tx.output.side_effects.has_pending_mutation(arg): + unimplemented( + f"{user_cls.__name__}.items(): {args} {kwargs} - object is mutated" + ) + + new_dict = dict(arg.value.items()) + return SourcelessBuilder.create(tx, new_dict) + else: + func_var = arg.var_getattr(tx, "items") + if not isinstance(func_var, variables.UserFunctionVariable): + unimplemented(f"{user_cls.__name__}.items(): {args} {kwargs}") + out = tx.inline_user_function_return(func_var, args, kwargs) + if isinstance(out, ConstDictVariable): + return out + return BuiltinVariable(user_cls).call_custom_dict(tx, user_cls, out) + elif not args and kwargs: + items = {ConstantVariable.create(k): v for k, v in kwargs.items()} + return variables.ConstDictVariable( + items, user_cls=user_cls, mutable_local=MutableLocal() + ) + unimplemented(f"{user_cls.__name__}(): {args} {kwargs}") + + @staticmethod + def call_custom_dict_fromkeys( + tx: "InstructionTranslator", user_cls, *args, **kwargs + ): + assert user_cls in {dict, OrderedDict, defaultdict} + if kwargs: + # Only `OrderedDict.fromkeys` accepts `value` passed by keyword + assert user_cls is OrderedDict + assert len(args) == 1 and len(kwargs) == 1 and "value" in kwargs + args = (*args, kwargs.pop("value")) + if len(args) == 0: + raise UserError(TypeError, "fromkeys expected at least 1 argument, got 0") + if len(args) == 1: + args = (*args, ConstantVariable.create(None)) + assert len(args) == 2 + arg, value = args + DictVariableType = ( + ConstDictVariable if user_cls is not defaultdict else DefaultDictVariable + ) + + if isinstance(arg, dict): + arg = [ConstantVariable.create(k) for k in arg.keys()] + return DictVariableType( + dict.fromkeys(arg, value), user_cls, mutable_local=MutableLocal() + ) + elif arg.has_force_unpack_var_sequence(tx): + keys = arg.force_unpack_var_sequence(tx) + if all(is_hashable(v) for v in keys): + return DictVariableType( + dict.fromkeys(keys, value), user_cls, mutable_local=MutableLocal() + ) + unimplemented(f"{user_cls.__name__}.fromkeys(): {args} {kwargs}") + + def call_set(self, tx: "InstructionTranslator", *args, **kwargs): + # Can we merge this implementation and call_dict's one? + assert not kwargs + if not args: + return SetVariable([], mutable_local=MutableLocal()) + assert len(args) == 1 + arg = args[0] + if isinstance(arg, variables.SetVariable): + return arg.clone(mutable_local=MutableLocal()) + elif arg.has_force_unpack_var_sequence(tx): + items = arg.force_unpack_var_sequence(tx) + return SetVariable(items, mutable_local=MutableLocal()) + elif isinstance(arg, variables.UserDefinedObjectVariable) and isinstance( + arg.value, KeysView + ): + iter_fn = arg.var_getattr(tx, "__iter__") + if isinstance(iter_fn, variables.UserMethodVariable): + out = tx.inline_user_function_return(iter_fn, args, kwargs) + if isinstance(out, SetVariable): + return out + return BuiltinVariable(set).call_set(tx, out) + else: + unimplemented(f"set(): {args} {kwargs}") + else: + unimplemented(f"set(): {args} {kwargs}") + + def call_frozenset(self, tx: "InstructionTranslator", *args, **kwargs): + assert not kwargs + if not args: + return FrozensetVariable([]) + assert len(args) == 1 + arg = args[0] + if isinstance(arg, variables.FrozensetVariable): + return FrozensetVariable([x.vt for x in arg.set_items]) + elif arg.has_unpack_var_sequence(tx): + items = arg.unpack_var_sequence(tx) + return FrozensetVariable(items) + else: + unimplemented(f"frozenset(): {args} {kwargs}") + + def call_zip(self, tx: "InstructionTranslator", *args, **kwargs): + if kwargs: + assert len(kwargs) == 1 and "strict" in kwargs + strict = kwargs.pop("strict", False) + args = [ + arg.unpack_var_sequence(tx) if arg.has_unpack_var_sequence(tx) else arg + for arg in args + ] + return variables.ZipVariable(args, strict=strict, mutable_local=MutableLocal()) + + def call_len(self, tx: "InstructionTranslator", *args, **kwargs): + return args[0].call_method(tx, "__len__", args[1:], kwargs) + + def call_getitem(self, tx: "InstructionTranslator", *args, **kwargs): + return args[0].call_method(tx, "__getitem__", args[1:], kwargs) + + def call_isinstance(self, tx: "InstructionTranslator", arg, isinstance_type): + try: + arg_type = arg.python_type() + except NotImplementedError: + unimplemented( + f"isinstance({arg}, {isinstance_type}): can't determine type of {arg}" + ) + + isinstance_type = isinstance_type.as_python_constant() + + if isinstance(arg, variables.TensorVariable) and arg.dtype is not None: + + def _tensor_isinstance(tensor_var, tensor_type): + def check_type(ty): + if ty not in tensortype_to_dtype: + example_val = arg.as_proxy().node.meta["example_value"] + if ( + is_traceable_wrapper_subclass(example_val) + and ty is torch.nn.parameter.Parameter + ): + # N.B: we are calling isinstance directly on the example value. + # torch.nn.Parameter has a meta-class that overrides __isinstance__, + # the isinstance check here allows us to invoke that logic. + return isinstance(example_val, ty) + else: + return issubclass(arg.python_type(), ty) + + dtypes = tensortype_to_dtype[ty] + return arg.dtype in dtypes + + if type(tensor_type) is tuple: + return any(check_type(ty) for ty in tensor_type) + else: + return check_type(tensor_type) + + return variables.ConstantVariable.create( + _tensor_isinstance(arg, isinstance_type) + ) + # UserDefinedObject with C extensions can have torch.Tensor attributes, + # so break graph. + if isinstance(arg, variables.UserDefinedObjectVariable) and isinstance( + arg.value, types.MemberDescriptorType + ): + unimplemented( + f"isinstance called on UserDefinedClass {arg} {isinstance_type}" + ) + # handle __instancecheck__ defined in user class + if ( + isinstance(arg, variables.UserDefinedObjectVariable) + and "__instancecheck__" in isinstance_type.__class__.__dict__ + ): + return variables.ConstantVariable.create( + isinstance_type.__class__.__instancecheck__(isinstance_type, arg.value) + ) + + try: + val = issubclass(arg_type, isinstance_type) + except TypeError: + val = arg_type is isinstance_type + return variables.ConstantVariable.create(val) + + def call_issubclass(self, tx: "InstructionTranslator", left_ty, right_ty): + """Checks if first arg is subclass of right arg""" + try: + left_ty_py = left_ty.as_python_constant() + right_ty_py = right_ty.as_python_constant() + except NotImplementedError: + unimplemented( + f"call_issubclass args not constant left_ty: {left_ty}, right_ty: {right_ty}" + ) + + return variables.ConstantVariable(issubclass(left_ty_py, right_ty_py)) + + def call_super(self, tx: "InstructionTranslator", a, b): + return variables.SuperVariable(a, b) + + def call_next(self, tx: "InstructionTranslator", arg: VariableTracker): + try: + return arg.next_variable(tx) + except Unsupported as ex: + if isinstance(arg, variables.BaseListVariable): + ex.remove_from_stats() + return arg.items[0] + raise + + def call_hasattr(self, tx: "InstructionTranslator", obj, attr): + if attr.is_python_constant(): + name = attr.as_python_constant() + if isinstance(obj, variables.BuiltinVariable): + return variables.ConstantVariable(hasattr(obj.fn, name)) + return obj.call_hasattr(tx, name) + + def call_map(self, tx: "InstructionTranslator", fn, *seqs): + seqs = [ + seq.unpack_var_sequence(tx) if seq.has_unpack_var_sequence(tx) else seq + for seq in seqs + ] + return variables.MapVariable(fn, seqs, mutable_local=MutableLocal()) + + def call_filter(self, tx: "InstructionTranslator", fn, seq): + if seq.has_unpack_var_sequence(tx): + seq_unpacked = seq.unpack_var_sequence(tx) + try: + items = list( + filter( + lambda x: fn.call_function(tx, [x], {}).as_python_constant(), + seq_unpacked, + ) + ) + return variables.TupleVariable(items) + except NotImplementedError: + return + + def call_sum(self, tx: "InstructionTranslator", seq, start=_SENTINEL): + # Special case for sum on tuple of floats and ints + if isinstance(seq, (variables.ListVariable, variables.TupleVariable)) and all( + isinstance(x, variables.ConstantVariable) + and isinstance(x.value, (int, float)) + for x in seq.items + ): + if start is self._SENTINEL: + return variables.ConstantVariable.create( + sum(x.value for x in seq.items), + ) + if isinstance(start, variables.ConstantVariable) and isinstance( + start.value, (int, float) + ): + return variables.ConstantVariable.create( + sum((x.value for x in seq.items), start=start.value), + ) + if seq.has_force_unpack_var_sequence(tx): + if start is self._SENTINEL: + start = variables.ConstantVariable.create(0) + items = seq.force_unpack_var_sequence(tx) + return BuiltinVariable(functools.reduce).call_function( + tx, + [ + BuiltinVariable(operator.add), + variables.TupleVariable(items), + start, + ], + {}, + ) + + def call_reduce( + self, tx: "InstructionTranslator", function, iterable, initial=_SENTINEL + ): + if iterable.has_force_unpack_var_sequence(tx): + items = iterable.force_unpack_var_sequence(tx) + if initial is self._SENTINEL: + value, items = items[0], items[1:] + else: + value = initial + for element in items: + value = function.call_function(tx, [value, element], {}) + return value + + def call_getattr( + self, + tx: "InstructionTranslator", + obj: VariableTracker, + name_var: VariableTracker, + default=None, + ): + from .. import trace_rules + from . import ( + ConstantVariable, + GetAttrVariable, + TorchInGraphFunctionVariable, + UserFunctionVariable, + ) + from .builder import SourcelessBuilder, VariableBuilder + + name = name_var.as_python_constant() + + if not name_var.is_python_constant(): + unimplemented("non-const getattr() name") + + if tx.output.side_effects.is_attribute_mutation(obj): + if isinstance(obj, variables.UnspecializedNNModuleVariable): + if ( + name + in ( + "named_parameters", + "parameters", + "named_buffers", + "buffers", + "named_modules", + "modules", + ) + and obj.is_state_mutated + and tx.output.side_effects.has_pending_mutation(obj) + ): + unimplemented( + f"pending mutation on nn module, so graph breaking at {name!r} call" + ) + + if tx.output.side_effects.has_pending_mutation_of_attr(obj, name): + return tx.output.side_effects.load_attr(obj, name) + + if default is not None: + hasattr_var = self.call_hasattr(tx, obj, name_var) + assert hasattr_var.as_python_constant() in (True, False) + if not hasattr_var.as_python_constant(): + return default + + options = {} + if obj.source: + source = AttrSource(obj.source, name) + options["source"] = source + else: + source = None + + if name in {"__bases__", "__base__", "__flags__"}: + try: + value = obj.as_python_constant() + if isinstance(value, type): + if name == "__bases__": + bases = value.__bases__ + if source is not None: + tuple_args = [ + VariableBuilder(tx, GetItemSource(source, i))(b) + for i, b in enumerate(bases) + ] + else: + tuple_args = [ + SourcelessBuilder.create(tx, b) for b in bases + ] + return variables.TupleVariable(tuple_args, **options) + if name == "__base__": + base = value.__base__ + if source is not None: + return VariableBuilder(tx, source)(base) + return SourcelessBuilder.create(tx, base) + if name == "__flags__": + return ConstantVariable.create(value.__flags__) + except NotImplementedError: + pass + + if isinstance(obj, variables.NNModuleVariable): + return obj.var_getattr(tx, name) + elif isinstance( + obj, + ( + variables.TensorVariable, + variables.NamedTupleVariable, + variables.ConstantVariable, + variables.DistributedVariable, + variables.UserDefinedClassVariable, + variables.UserDefinedObjectVariable, + ), + ): + try: + return obj.var_getattr(tx, name) + except NotImplementedError: + return GetAttrVariable(obj, name, **options) + elif isinstance(obj, TorchInGraphFunctionVariable): + # Get OpOverload from an OpOverloadPacket, e.g., torch.ops.aten.add.default. + member = getattr(obj.value, name) + if isinstance( + member, (torch._ops.OpOverloadPacket, torch._ops.OpOverload) + ) and trace_rules.is_aten_op_or_tensor_method(member): + return TorchInGraphFunctionVariable(member, **options) + elif isinstance(obj, DummyModule): + # TODO(mlazos) - Do we need this? + if obj.is_torch or name not in obj.value.__dict__: + member = getattr(obj.value, name) + else: + member = obj.value.__dict__[name] + + if config.replay_record_enabled: + tx.exec_recorder.record_module_access(obj.value, name, member) + + if source is not None: + return VariableBuilder(tx, source)(member) + else: + return SourcelessBuilder.create(tx, member) + elif istype(obj, UserFunctionVariable) and name in ("__name__", "__module__"): + return ConstantVariable.create(getattr(obj.fn, name)) + else: + try: + return obj.var_getattr(tx, name) + except NotImplementedError: + return GetAttrVariable(obj, name, **options) + + def call_setattr( + self, + tx: "InstructionTranslator", + obj: VariableTracker, + name_var: VariableTracker, + val: VariableTracker, + ): + if isinstance( + obj, + ( + variables.CustomizedDictVariable, + variables.PlacementVariable, + variables.UserDefinedObjectVariable, + ), + ): + return obj.call_method(tx, "__setattr__", [name_var, val], {}) + elif ( + tx.output.side_effects.is_attribute_mutation(obj) + and name_var.is_python_constant() + ): + name = name_var.as_python_constant() + if isinstance(obj, variables.TensorVariable): + from .builder import wrap_fx_proxy + + if name == "requires_grad": + # TODO(voz): Make it work properly + unimplemented( + "mutating requires_grad can introduce a new leaf from non-leaf or vice versa in " + "the middle of the graph, which aot_autograd does not currently know how to handle. " + ) + if name == "data": + # Remove the old reference in tracked fakes - if we don't do this + # new .data value size and shape differences will cause + # tracked fakes to produce incorrect guards. This is sound because the TensorVariable + # coming out of set_() below will be a new one, and get + # installed in tracked fakes. + to_remove = [] + for tf in tx.output.tracked_fakes: + if tf.source == obj.source: + to_remove.append(tf) + for tf in to_remove: + tx.output.tracked_fakes.remove(tf) + + # Step 1 - disable grads + with dynamo_disable_grad(tx), torch.no_grad(): + # Step 2 - call `set_` + out = wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_function", + torch.Tensor.set_, + *proxy_args_kwargs([obj, val], {}), + ), + ) + + # Step 3 - drop the version counter - this is a step required to get + # .data setting to play correctly with the autograd engine. + # Essentially, dynamo is trying to faithfully preserve the (absurd) + # behavior of .data= from eager mode + def _lower_version_count_by_1(x): + version = x._version + if version > 0: + version = version - 1 + torch._C._autograd._unsafe_set_version_counter(x, version) + return x + + tx.output.create_proxy( + "call_function", + _lower_version_count_by_1, + (out.as_proxy(),), + {}, + ) + _lower_version_count_by_1(obj.as_proxy().node.meta["example_value"]) + # This handles options prop, guards and ends with a clone + # Step 4 - replace all reference to the current object with the new one + return out + + tx.output.side_effects.store_attr(obj, name, val) + if name == "_grad": + tx.output.side_effects.store_attr(obj, "grad", val) + + return val + elif isinstance(obj, variables.UserDefinedObjectVariable): + unimplemented( + f"setattr(UserDefinedObjectVariable) {type(obj.value).__setattr__}" + ) + elif isinstance(obj, variables.NNModuleVariable): + if not tx.output.is_root_tracer(): + raise AttributeMutationError( + "Can't inplace modify module params/buffers inside HigherOrderOp" + ) + if name_var.is_python_constant() and isinstance( + val, variables.TensorVariable + ): + assigning_fake_val = get_fake_value(val.as_proxy().node, tx) + + try: + getattr_var = obj.var_getattr(tx, name_var.as_python_constant()) + except AttributeError: + getattr_var = None + + if isinstance(getattr_var, variables.TensorVariable): + # get_fake_val will get the same fake tensor + existing_fake_attr = get_fake_value(getattr_var.as_proxy().node, tx) + + # same tensor identiy, setattr is a no-op + mod_setattr = inspect.getattr_static(obj.module_type, "__setattr__") + if ( + existing_fake_attr is assigning_fake_val + and mod_setattr is torch.nn.Module.__setattr__ + ): + return getattr_var + + obj.convert_to_unspecialized(tx) + # FIXME (tmanlaibaatar) this is utter hack to unblock HuggingFace export + # Export generally doesn't want to allow mutations on objects directly, + # but we don't have good way to do this rn. For now, we make it an undefined + # behaviour and just set attributes directly on the PretrainedConfig object + # for now. + elif isinstance(obj, variables.dicts.HFPretrainedConfigVariable) and tx.export: + if name_var.is_python_constant() and isinstance( + val, variables.ConstantVariable + ): + setattr( + obj.obj, name_var.as_python_constant(), val.as_python_constant() + ) + return ConstantVariable(None) + + def call_delattr( + self, + tx: "InstructionTranslator", + obj: VariableTracker, + name_var: VariableTracker, + ): + return self.call_setattr(tx, obj, name_var, variables.DeletedVariable()) + + def call_type(self, tx: "InstructionTranslator", obj: VariableTracker): + from .builder import SourcelessBuilder, VariableBuilder + + try: + py_type = obj.python_type() + except NotImplementedError as error: + raise UserError( + UserErrorType.INVALID_INPUT, + str(error), + case_name="unknown_python_type", + ) from None + + if obj.source is None: + return SourcelessBuilder.create(tx, py_type) + else: + return VariableBuilder(tx, TypeSource(obj.source))(py_type) + + def call_reversed(self, tx: "InstructionTranslator", obj: VariableTracker): + if obj.has_unpack_var_sequence(tx): + items = list(reversed(obj.unpack_var_sequence(tx))) + return variables.TupleVariable(items) + + def call_sorted(self, tx: "InstructionTranslator", obj: VariableTracker, **kwargs): + if obj.has_force_unpack_var_sequence(tx) and not isinstance( + obj, variables.TensorVariable + ): + unpacked = obj.force_unpack_var_sequence(tx) + if not all(x.is_python_constant() for x in unpacked): + return + function = kwargs.pop("key", None) + reverse = kwargs.pop( + "reverse", ConstantVariable.create(False) + ).as_python_constant() + assert len(kwargs) == 0 + if function: + items = sorted( + unpacked, + key=lambda x: function.call_function( + tx, [x], {} + ).as_python_constant(), + reverse=reverse, + ) + else: + items = sorted( + unpacked, + key=lambda x: x.as_python_constant(), + reverse=reverse, + ) + return variables.ListVariable(items) + + # neg is a constant fold function, so we only get here if constant fold is not valid + def call_neg(self, tx: "InstructionTranslator", a): + if isinstance(a, SymNodeVariable): + return SymNodeVariable.create( + tx, + (operator.neg)(a.as_proxy()), + sym_num=None, + ) + # None no-ops this handler and lets the driving function proceed + return None + + def call_format(self, tx: "InstructionTranslator", _format_string, *args, **kwargs): + format_string = _format_string.as_python_constant() + return variables.StringFormatVariable.create(format_string, args, kwargs) + + def call_id(self, tx: "InstructionTranslator", *args): + if len(args) > 0 and isinstance(args[0], variables.NNModuleVariable): + nn_mod_variable = args[0] + mod = tx.output.get_submodule(nn_mod_variable.module_key) + return variables.ConstantVariable.create(id(mod)) + elif len(args) == 1 and isinstance( + args[0], variables.UserDefinedObjectVariable + ): + install_guard(args[0].source.make_guard(GuardBuilder.ID_MATCH)) + constant_result = id(args[0].value) + return variables.ConstantVariable.create(constant_result) + elif len(args) == 1 and isinstance(args[0], TensorVariable): + tensor_variable = args[0] + return tensor_variable.call_id(tx) + else: + unimplemented(f"call_id with args {args}") + + def call_deepcopy(self, tx: "InstructionTranslator", x): + unimplemented(f"copy.deepcopy {repr(x)}") + + def _comparison_with_tensor(self, tx: "InstructionTranslator", left, right): + from .builder import wrap_fx_proxy_cls + from .tensor import supported_tensor_comparison_op_values + + op = self.fn + + if op in [operator.is_, operator.is_not]: + is_result = ( + isinstance(left, TensorVariable) + and isinstance(right, TensorVariable) + and id(extract_fake_example_value(left.as_proxy().node)) + == id(extract_fake_example_value(right.as_proxy().node)) + ) + if op is operator.is_: + return ConstantVariable.create(is_result) + else: + return ConstantVariable.create(not is_result) + + if op not in supported_tensor_comparison_op_values: + unimplemented(f"{op.__name__}({left}, {right})") + if ( + isinstance(left, TensorVariable) + and isinstance(right, TensorVariable) + and (left.size and right.size) is not None + and left.size != right.size + ): + try: + torch.broadcast_shapes(left.size, right.size) + except RuntimeError: + # not broadcastable, can't be compared + unimplemented(f"{op.__name__}({left}, {right})") + tensor_cls = left if isinstance(left, TensorVariable) else right + proxy = tx.output.create_proxy( + "call_function", op, (left.as_proxy(), right.as_proxy()), {} + ) + return wrap_fx_proxy_cls( + type(tensor_cls), # handle Ndarrays and Tensors + tx, + proxy, + ) + + def _comparison_with_symnode(self, tx: "InstructionTranslator", left, right): + from .tensor import supported_tensor_comparison_op_values + + op = self.fn + + if op not in supported_tensor_comparison_op_values: + unimplemented(f"{op.__name__}({left}, {right})") + + proxy = tx.output.create_proxy( + "call_function", op, (left.as_proxy(), right.as_proxy()), {} + ) + return SymNodeVariable.create( + tx, + proxy, + sym_num=None, + ) + + def call_and_(self, tx: "InstructionTranslator", a, b): + # Rely on constant_handler + if isinstance(a, ConstantVariable) and isinstance(b, ConstantVariable): + return None + if isinstance(a, (SymNodeVariable, ConstantVariable)) and isinstance( + b, (SymNodeVariable, ConstantVariable) + ): + return SymNodeVariable.create( + tx, + tx.output.create_proxy( + "call_function", operator.and_, *proxy_args_kwargs([a, b], {}) + ), + sym_num=None, + ) + if hasattr(a, "set_items") and hasattr(b, "set_items"): + return SetVariable(list(a.set_items & b.set_items)) + # None no-ops this handler and lets the driving function proceed + + def call_or_(self, tx: "InstructionTranslator", a, b): + # Rely on constant_handler + if isinstance(a, ConstantVariable) and isinstance(b, ConstantVariable): + return None + if isinstance(a, (SymNodeVariable, ConstantVariable)) and isinstance( + b, (SymNodeVariable, ConstantVariable) + ): + return SymNodeVariable.create( + tx, + tx.output.create_proxy( + "call_function", operator.or_, *proxy_args_kwargs([a, b], {}) + ), + sym_num=None, + ) + if hasattr(a, "set_items") and hasattr(b, "set_items"): + return SetVariable(list(a.set_items | b.set_items)) + # None no-ops this handler and lets the driving function proceed + return None + + def call_not_(self, tx: "InstructionTranslator", a): + if isinstance(a, SymNodeVariable): + return SymNodeVariable.create( + tx, + tx.output.create_proxy( + "call_function", operator.not_, *proxy_args_kwargs([a], {}) + ), + sym_num=None, + ) + + # Unwrap the underlying ConstDictVariable + if isinstance(a, DictView): + a = a.dv_dict + if isinstance(a, (ListVariable, ConstDictVariable)): + return ConstantVariable.create(len(a.items) == 0) + + return None + + def call_contains( + self, tx: "InstructionTranslator", a: VariableTracker, b: VariableTracker + ): + return a.call_method(tx, "__contains__", [b], {}) + + +@contextlib.contextmanager +def dynamo_disable_grad(tx): + from . import GradModeVariable + + org_value = torch.is_grad_enabled() + gmv = GradModeVariable.create(tx, False) + try: + gmv.enter(tx) + yield + finally: + gmv.exit(tx) diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/ctx_manager.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/ctx_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..301b7f3e819345715b95897f640a2f2396848ffd --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/ctx_manager.py @@ -0,0 +1,1145 @@ +# mypy: ignore-errors +import dataclasses +import inspect +import sys +import warnings +from typing import Callable, Dict, List, Optional, TYPE_CHECKING, Union + +import torch._C +from torch._guards import Guard + +from .. import variables +from ..bytecode_transformation import ( + create_call_function, + create_instruction, + create_setup_with, +) +from ..device_interface import get_interface_for_device +from ..exc import unimplemented, Unsupported +from ..guards import GuardBuilder, install_guard +from ..source import AttrSource, GlobalStateSource +from .base import VariableTracker +from .functions import ( + NestedUserFunctionVariable, + UserFunctionVariable, + UserMethodVariable, + WrappedUserFunctionVariable, + WrappedUserMethodVariable, +) +from .user_defined import UserDefinedObjectVariable + + +if TYPE_CHECKING: + from torch._dynamo.symbolic_convert import InstructionTranslator + + +@dataclasses.dataclass +class ContextMangerState: + """ + Mutating `self` in VariableTracker is not allowed because we copy + them. This is a mutable container pointed to by context managers + that won't get copied, so it is safe to mutate. + """ + + cleanup_fn: Optional[Callable] = None + proxy: Optional[torch.fx.Proxy] = None + + def cleanup(self): + if self.cleanup_fn is not None: + self.cleanup_fn() + self.cleanup_fn = None + + def cleanup_assert(self): + assert self.cleanup_fn, "multiple exits?" + self.cleanup() + + +class ContextWrappingVariable(VariableTracker): + _nonvar_fields = { + "cm_obj", + "target_values", + "initial_values", + "state", + *VariableTracker._nonvar_fields, + } + + def __init__( + self, target_values, initial_values=None, *, state=None, **kwargs + ) -> None: + super().__init__(**kwargs) + self.target_values = target_values + self.initial_values = initial_values + self.state = ContextMangerState() if state is None else state + + def enter(self, tx): + self._call_func(tx, self.target_values) + self.set_cleanup_hook(tx) + return variables.ConstantVariable.create(None) + + def set_cleanup_hook(self, tx: "InstructionTranslator", fn=None): + if fn is None: + + def fn(): + self._call_func(tx, self.initial_values) + + self.state.cleanup_fn = fn + tx.output.add_cleanup_hook(self.state.cleanup) + + def exit(self, tx: "InstructionTranslator", *args): + self.state.cleanup_assert() + return variables.ConstantVariable.create(None) + + def reconstruct_type(self, codegen): + codegen( + AttrSource(codegen.tx.import_source(self.module_name()), self.fn_name()) + ) + + def reconstruct(self, codegen): + codegen.add_push_null(lambda: self.reconstruct_type(codegen)) + target_values = self.target_values + if not target_values: + target_values = () + codegen.extend_output([codegen.create_load_const(val) for val in target_values]) + codegen.extend_output(create_call_function(len(target_values), False)) + + def module_name(self): + raise NotImplementedError("module_name called on base") + + def fn_name(self): + raise NotImplementedError("fn_name called on base") + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + assert len(args) == 1 + if isinstance(args[0], NestedUserFunctionVariable): + args[0] = UserFunctionVariable(args[0].get_function()) + assert isinstance(args[0], (UserMethodVariable, UserFunctionVariable)) + + if isinstance(args[0], UserMethodVariable): + return WrappedUserMethodVariable(args[0], self) + + if isinstance(args[0], UserFunctionVariable): + return WrappedUserFunctionVariable(args[0], self) + + +class GenericContextWrappingVariable(UserDefinedObjectVariable): + # Some methods in ContextWrappingVariable assumes the arguments are + # python contants. Which might not always be the case here. + def __init__(self, cm_obj, **kwargs) -> None: + assert cm_obj is not None + super().__init__( + value=cm_obj, + value_type=cm_obj.__class__, + **kwargs, + ) + self.cm_obj = cm_obj + + def module_name(self): + return self.cm_obj.__module__ + + def fn_name(self): + return type(self.cm_obj).__name__ + + def enter(self, tx): + source = None if self.source is None else AttrSource(self.source, "__enter__") + try: + return variables.UserMethodVariable( + self.cm_obj.__enter__.__func__, + self, + source=source, + ).call_function(tx, [], {}) + except Unsupported as e: + unimplemented( + f"Unsupported context manager {self.cm_obj}'s __enter__ function", + from_exc=e, + ) + + def exit(self, tx: "InstructionTranslator", *args): + source = None if self.source is None else AttrSource(self.source, "__exit__") + try: + x = variables.UserMethodVariable( + self.cm_obj.__exit__.__func__, + self, + source=source, + ).call_function( + tx, + [ + variables.ConstantVariable.create(None), + variables.ConstantVariable.create(None), + variables.ConstantVariable.create(None), + ], + {}, + ) + except Unsupported as e: + unimplemented( + f"Unsupported context manager {self.cm_obj}'s __exit__ function", + from_exc=e, + ) + + tx.generic_context_manager_depth -= 1 + return x + + +class GradInplaceRequiresGradCtxManagerVariable(ContextWrappingVariable): + """represents torch grad requries grad""" + + @staticmethod + def create(tx: "InstructionTranslator", target_values, **kwargs): + return GradInplaceRequiresGradCtxManagerVariable( + target_values=target_values, + initial_values=None, + **kwargs, + ) + + def enter(self, tx): + [enabled] = self.target_values + self.prev_state = torch._C._functorch.get_inplace_requires_grad_allowed() + torch._C._functorch.set_inplace_requires_grad_allowed(enabled) + self.set_cleanup_hook( + tx, + lambda: torch._C._functorch.set_inplace_requires_grad_allowed( + self.prev_state + ), + ) + self.state.proxy = tx.output.create_node( + "call_function", + torch._C._functorch.set_inplace_requires_grad_allowed, + (enabled,), + {}, + ) + return variables.ConstantVariable.create(None) + + def exit(self, tx: "InstructionTranslator", *args): + self.state.cleanup() + tx.output.create_node( + "call_function", + torch._C._functorch.set_inplace_requires_grad_allowed, + (self.prev_state,), + {}, + ) + return variables.ConstantVariable.create(None) + + +class JvpIncrementNestingCtxManagerVariable(ContextWrappingVariable): + """represents torch.func.jvp increment/decrement nesting""" + + # A guard is needed as the grad level is baked into the torch FX graph + # This is fine if jvp is only called from within the function + # being compiled. But the FX graph may be invalid in the case of a jvp + # call from eager that calls the compiled function, as the jvp levels + # may be different. + _guards_singleton = Guard(GlobalStateSource(), GuardBuilder.FUNCTORCH_STACK_MATCH) + + @staticmethod + def create(tx: "InstructionTranslator", **kwargs): + var = JvpIncrementNestingCtxManagerVariable( + target_values=None, + initial_values=None, + **kwargs, + ) + return var + + def enter(self, tx): + install_guard(self._guards_singleton) + jvp_level = torch._functorch.eager_transforms.enter_jvp_nesting() + self.set_cleanup_hook( + tx, lambda: torch._functorch.eager_transforms.exit_jvp_nesting() + ) + self.state.proxy = tx.output.create_node( + "call_function", + torch._C._functorch._jvp_increment_nesting, + (), + {}, + ) + return variables.ConstantVariable.create(jvp_level) + + def exit(self, tx: "InstructionTranslator", *args): + self.state.cleanup() + tx.output.create_node( + "call_function", torch._C._functorch._jvp_decrement_nesting, (), {} + ) + return variables.ConstantVariable.create(None) + + +class SetFwdGradEnabledContextManager(ContextWrappingVariable): + """represents torch.autograd.forward_ad._set_fwd_grad_enabled() to enable/disable fwd grad""" + + @staticmethod + def create(tx: "InstructionTranslator", target_values, **kwargs): + return SetFwdGradEnabledContextManager( + target_values=target_values, + initial_values=None, + **kwargs, + ) + + def enter(self, tx): + [mode] = self.target_values + self.prev_state = torch._C._is_fwd_grad_enabled() + torch._C._set_fwd_grad_enabled(mode) + self.set_cleanup_hook( + tx, + lambda: torch._C._set_fwd_grad_enabled(self.prev_state), + ) + self.state.proxy = tx.output.create_node( + "call_function", + torch._C._set_fwd_grad_enabled, + (mode,), + {}, + ) + return variables.ConstantVariable.create(None) + + def exit(self, tx: "InstructionTranslator", *args): + self.state.cleanup() + tx.output.create_node( + "call_function", + torch._C._set_fwd_grad_enabled, + (self.prev_state,), + {}, + ) + return variables.ConstantVariable.create(None) + + +class DualLevelContextManager(ContextWrappingVariable): + """Represents torch.autograd.forward_ad.dual_level ctx manager""" + + _guards_singleton = Guard(GlobalStateSource(), GuardBuilder.DUAL_LEVEL) + + @staticmethod + def create(tx: "InstructionTranslator", **kwargs): + return DualLevelContextManager( + target_values=None, + initial_values=None, + **kwargs, + ) + + def enter(self, tx): + install_guard(self._guards_singleton) + self.new_level = torch.autograd.forward_ad.enter_dual_level() + self.set_cleanup_hook( + tx, lambda: torch.autograd.forward_ad.exit_dual_level(level=self.new_level) + ) + self.state.proxy = tx.output.create_node( + "call_function", + torch._C._enter_dual_level, + (), + {}, + ) + return variables.ConstantVariable.create(self.new_level) + + def exit(self, tx: "InstructionTranslator", *args): + self.state.cleanup() + tx.output.create_node( + "call_function", + torch._C._exit_dual_level, + (self.new_level,), + {}, + ) + return variables.ConstantVariable.create(None) + + +class GradIncrementNestingCtxManagerVariable(ContextWrappingVariable): + """represents torch.func.grad increment/decrement nesting""" + + # A guard is needed as the grad level is baked into the torch FX graph + # This is fine if grad is only called from within the function + # being compiled. But the FX graph may be invalid in the case of a grad + # call from eager that calls the compiled function, as the grad levels + # may be different. + _guards_singleton = Guard(GlobalStateSource(), GuardBuilder.FUNCTORCH_STACK_MATCH) + + @staticmethod + def create(tx: "InstructionTranslator", **kwargs): + var = GradIncrementNestingCtxManagerVariable( + target_values=None, + initial_values=None, + **kwargs, + ) + return var + + def enter(self, tx): + install_guard(self._guards_singleton) + grad_level = torch._C._functorch._grad_increment_nesting() + self.set_cleanup_hook(tx, lambda: torch._C._functorch._grad_decrement_nesting()) + self.state.proxy = tx.output.create_node( + "call_function", + torch._C._functorch._grad_increment_nesting, + (), + {}, + ) + return variables.ConstantVariable.create(grad_level) + + def exit(self, tx: "InstructionTranslator", *args): + self.state.cleanup() + tx.output.create_node( + "call_function", torch._C._functorch._grad_decrement_nesting, (), {} + ) + return variables.ConstantVariable.create(None) + + +class CatchWarningsCtxManagerVariable(ContextWrappingVariable): + """Delay a call to warnings.catch_warnings""" + + @staticmethod + def create(tx: "InstructionTranslator", catch_warnings_args): + return CatchWarningsCtxManagerVariable( + catch_warnings_args=catch_warnings_args, + target_values=None, + initial_values=None, + ) + + def __init__(self, catch_warnings_args, **kwargs) -> None: + assert isinstance(catch_warnings_args, dict), catch_warnings_args + super().__init__(**kwargs) + self.catch_warnings_args = catch_warnings_args + + def enter(self, tx): + kwargs = { + k: v.as_python_constant() for k, v in self.catch_warnings_args.items() + } + ctx_val = warnings.catch_warnings(**kwargs) + self.set_cleanup_hook(tx, lambda: ctx_val.__exit__(None, None, None)) + return variables.ConstantVariable.create(ctx_val.__enter__()) + + def reconstruct(self, cg): + cg.add_push_null(lambda: cg.load_import_from("warnings", "catch_warnings")) + cg.foreach(self.catch_warnings_args.values()) + keys = tuple(self.catch_warnings_args.keys()) + cg.extend_output(cg.create_call_function_kw(len(keys), keys, False)) + + +class VmapIncrementNestingCtxManagerVariable(ContextWrappingVariable): + """represents torch VMap increment/decrement nesting""" + + # A guard is needed as the vmap level is baked into the torch FX graph + # generated. This is fine if vmap is only called from within the function + # being compiled. But the FX graph may be invalid in the case of a vmap + # call from eager that calls the compiled function, as the vmap levels + # may be different. + _guards_singleton = Guard(GlobalStateSource(), GuardBuilder.FUNCTORCH_STACK_MATCH) + + @staticmethod + def create(tx: "InstructionTranslator", target_values, **kwargs): + var = VmapIncrementNestingCtxManagerVariable( + target_values=target_values, + initial_values=None, + **kwargs, + ) + return var + + def enter(self, tx): + install_guard(self._guards_singleton) + batch_size, randomness = self.target_values + vmap_level = torch._C._functorch._vmap_increment_nesting(batch_size, randomness) + self.set_cleanup_hook(tx, lambda: torch._C._functorch._vmap_decrement_nesting()) + self.state.proxy = tx.output.create_node( + "call_function", + torch._C._functorch._vmap_increment_nesting, + (batch_size, randomness), + {}, + ) + return variables.ConstantVariable.create(vmap_level) + + def exit(self, tx: "InstructionTranslator", *args): + self.state.cleanup() + tx.output.create_node( + "call_function", torch._C._functorch._vmap_decrement_nesting, (), {} + ) + return variables.ConstantVariable.create(None) + + +class GradModeVariable(ContextWrappingVariable): + """represents torch.{no_grad,enable_grad,set_grad_mode}()""" + + _guards_singleton = Guard(GlobalStateSource(), GuardBuilder.GRAD_MODE) + + @staticmethod + def create(tx: "InstructionTranslator", target_value, initialized=False, **kwargs): + var = GradModeVariable( + target_values=[target_value], + initial_values=[torch.is_grad_enabled()], + **kwargs, + ) + if initialized: + var._call_func(tx, var.target_values) + return var + + def __init__( + self, target_values, initial_values=None, initialized=True, **kwargs + ) -> None: + super().__init__( + target_values=target_values, initial_values=initial_values, **kwargs + ) + install_guard(self._guards_singleton) + + def enter(self, tx): + self._call_func(tx, self.target_values) + return variables.ConstantVariable.create(None) + + def exit(self, tx: "InstructionTranslator", *args): + self._call_func(tx, self.initial_values) + return variables.ConstantVariable.create(None) + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ): + self._call_func(tx, self.initial_values) # undo eager initialization + return super().call_function(tx, args, kwargs) + + def _call_func(self, tx: "InstructionTranslator", values): + assert len(values) == 1 + value = values[0] + # Coalesce grad mode mutations + if torch.is_grad_enabled() != value: + tx.output.create_node( + "call_function", torch._C._set_grad_enabled, (value,), {} + ) + torch._C._set_grad_enabled(value) + + def module_name(self): + return "torch" + + def fn_name(self): + return "set_grad_enabled" + + +class InferenceModeVariable(ContextWrappingVariable): + @staticmethod + def create(tx: "InstructionTranslator", target_value, **kwargs): + var = InferenceModeVariable( + [target_value], initial_values=torch.is_inference_mode_enabled(), **kwargs + ) + return var + + def __init__( + self, + target_values, + initial_values=None, + **kwargs, + ) -> None: + if initial_values is None: + # This must be called here since function defaults are evaluated at import time + initial_values = torch.is_inference_mode_enabled() + super().__init__( + target_values=target_values, initial_values=initial_values, **kwargs + ) + self.target_values = target_values + + def exit(self, tx: "InstructionTranslator", *args): + self.state.cleanup_assert() + tx.output.create_node( + "call_function", + torch.autograd.grad_mode._exit_inference_mode, + (self.state.proxy,), + {}, + ) + + def enter(self, tx): + ctx = torch.autograd.grad_mode._enter_inference_mode(*self.target_values) + self.set_cleanup_hook( + tx, lambda: torch.autograd.grad_mode._exit_inference_mode(ctx) + ) + self.state.proxy = tx.output.create_node( + "call_function", + torch.autograd.grad_mode._enter_inference_mode, + (*self.target_values,), + {}, + ) + + def module_name(self): + return "torch" + + def fn_name(self): + return "inference_mode" + + +class CUDADeviceVariable(ContextWrappingVariable): + """represents torch.cuda.device""" + + @staticmethod + def create(tx: "InstructionTranslator", device, **kwargs): + var = CUDADeviceVariable( + target_values=[torch.cuda._get_device_index(device, optional=True)], + initial_values=None, + **kwargs, + ) + return var + + def __init__( + self, + target_values, + initial_values=None, + **kwargs, + ) -> None: + super().__init__( + target_values=target_values, initial_values=initial_values, **kwargs + ) + self.target_values = target_values + + def exit(self, tx: "InstructionTranslator", *args): + self.state.cleanup_assert() + tx.output.create_node( + "call_function", + torch.cuda._maybe_exchange_device, + (self.state.proxy,), + {}, + ) + return variables.ConstantVariable.create(False) + + def enter(self, tx): + prev_idx = torch.cuda._exchange_device(*self.target_values) + self.set_cleanup_hook(tx, lambda: torch.cuda._maybe_exchange_device(prev_idx)) + self.state.proxy = tx.output.create_node( + "call_function", + torch.cuda._exchange_device, + (*self.target_values,), + {}, + ) + + def module_name(self): + return "torch.cuda" + + def fn_name(self): + return "device" + + +class TorchFunctionDisableVariable(ContextWrappingVariable): + """represents whether torch function overrides are enabled or not""" + + _guards_singleton = Guard(GlobalStateSource(), GuardBuilder.TORCH_FUNCTION_STATE) + + @staticmethod + def create(tx: "InstructionTranslator", **kwargs): + var = TorchFunctionDisableVariable( + target_values=[False], + initial_values=[tx.output.torch_function_enabled], + **kwargs, + ) + # mlazos: I think this is here to make sure we don't reinvoke on clone() + var._call_func(tx, [False]) + var.set_cleanup_hook(tx) + return var + + def __init__(self, target_values, initial_values=None, **kwargs) -> None: + super().__init__( + target_values=target_values, initial_values=initial_values, **kwargs + ) + install_guard(self._guards_singleton) + + def enter(self, tx): + return variables.ConstantVariable.create(None) + + def _call_func(self, tx: "InstructionTranslator", values): + assert len(values) == 1 + tx.output.set_torch_function_state(values[0]) + + +class DeterministicAlgorithmsVariable(ContextWrappingVariable): + """represents torch.{are_deterministic_algorithms_enabled,use_deterministic_algorithms}()""" + + _guards_singleton = Guard( + GlobalStateSource(), GuardBuilder.DETERMINISTIC_ALGORITHMS + ) + + @staticmethod + def create(tx: "InstructionTranslator", target_value, **kwargs): + var = DeterministicAlgorithmsVariable( + target_values=[target_value], + initial_values=[torch.are_deterministic_algorithms_enabled()], + **kwargs, + ) + var._call_func(tx, [target_value]) + var.set_cleanup_hook(tx) + return var + + def __init__(self, target_values, initial_values=None, **kwargs) -> None: + super().__init__( + target_values=target_values, initial_values=initial_values, **kwargs + ) + install_guard(self._guards_singleton) + + def enter(self, tx): + return variables.ConstantVariable.create(None) + + def _call_func(self, tx: "InstructionTranslator", values): + assert len(values) == 1 + value = values[0] + tx.output.create_node( + "call_function", torch._C._set_deterministic_algorithms, (value,), {} + ), + torch._C._set_deterministic_algorithms(value) + + def module_name(self): + return "torch" + + def fn_name(self): + return "use_deterministic_algorithms" + + +class DisabledSavedTensorsHooksVariable(ContextWrappingVariable): + """represents torch.autograd.graph.disable_saved_tensors_hook.""" + + @staticmethod + def create(tx: "InstructionTranslator", target_value, **kwargs): + var = DisabledSavedTensorsHooksVariable( + target_values=[target_value], + initial_values=[ + torch._C._autograd._saved_tensors_hooks_get_disabled_error_message() + ], + **kwargs, + ) + var._call_func(tx, [target_value]) + var.set_cleanup_hook(tx) + return var + + def __init__(self, target_values, initial_values=None, **kwargs) -> None: + super().__init__( + target_values=target_values, initial_values=initial_values, **kwargs + ) + + def enter(self, tx): + return variables.ConstantVariable.create(None) + + def _call_func(self, tx: "InstructionTranslator", values): + assert len(values) == 1 + value = values[0] + if value is not None: + # Disable `saved_tensors_hooks` with message (`value`) + # OR + # we are exiting this context and restoring the previous message. + tx.output.create_node( + "call_function", + torch._C._autograd._saved_tensors_hooks_disable, + (value,), + {}, + ) + torch._C._autograd._saved_tensors_hooks_disable(value) + else: + # We are exiting this context and if prev_message was None, we re-enable `saved_tensors_hooks`. + tx.output.create_node( + "call_function", torch._C._autograd._saved_tensors_hooks_enable, (), {} + ) + torch._C._autograd._saved_tensors_hooks_enable() + + def module_name(self): + return "torch.autograd.graph" + + def fn_name(self): + return "disable_saved_tensors_hooks" + + +class AutocastModeVariable(ContextWrappingVariable): + @staticmethod + def create(func, args, kwargs): + assert func in [ + torch.amp.autocast_mode.autocast, + torch.cuda.amp.autocast, + torch.cpu.amp.autocast, + ] + # device_type : str, + # dtype : Optional[_dtype] = None, + # enabled : bool = True, + # cache_enabled : Optional[bool] = None):cache_enabled + bound_args = inspect.signature(func).bind(*args, **kwargs) + bound_args.apply_defaults() + target_values = [] + kwargs.clear() + + for key in ["device_type", "dtype", "enabled", "cache_enabled"]: + if key == "device_type" and func in [ + torch.cuda.amp.autocast, + torch.cpu.amp.autocast, + ]: + arg = "cuda" if func is torch.cuda.amp.autocast else "cpu" + else: + arg = bound_args.arguments[key] + if isinstance(arg, VariableTracker): + target_values.append(arg.as_python_constant()) + else: + target_values.append(arg) + + var = AutocastModeVariable(target_values, initial_values=None, **kwargs) + return var + + def __init__(self, target_values, initial_values=None, **kwargs) -> None: + super().__init__( + target_values=target_values, initial_values=initial_values, **kwargs + ) + self.target_values = target_values + + def exit(self, tx: "InstructionTranslator", *args): + self.state.cleanup_assert() + tx.output.create_node( + "call_function", torch.amp._exit_autocast, (self.state.proxy,), {} + ) + + def enter(self, tx): + ctx = torch.amp._enter_autocast(*self.target_values) + self.set_cleanup_hook(tx, lambda: torch.amp._exit_autocast(ctx)) + self.state.proxy = tx.output.create_node( + "call_function", torch.amp._enter_autocast, (*self.target_values,), {} + ) + + def module_name(self): + return "torch.amp.autocast_mode" + + def fn_name(self): + return "autocast" + + +class NullContextVariable(ContextWrappingVariable): + """ + This class represents Python contextlib.nullcontext. + It's used as a placeholder for other context managers that Dynamo doesn't + support yet, e.g, torch.autograd.profiler.record_function. + """ + + def __init__(self, target_values=None, **kwargs) -> None: + super().__init__(target_values=target_values, **kwargs) + + def enter(self, tx): + return variables.ConstantVariable.create(None) + + def exit(self, tx: "InstructionTranslator", *args): + return variables.ConstantVariable.create(None) + + def module_name(self): + return "contextlib" + + def fn_name(self): + return "nullcontext" + + +class StreamContextVariable(ContextWrappingVariable): + @staticmethod + def create(tx: "InstructionTranslator", target_value, **kwargs): + from .builder import wrap_fx_proxy_cls + + current_stream_method = get_interface_for_device( + target_value.device + ).current_stream + current_stream = wrap_fx_proxy_cls( + StreamVariable, + tx, + tx.output.create_proxy( + "call_function", + current_stream_method, + (None,), + {}, + ), + ) + return StreamContextVariable( + target_values=[target_value], + initial_values=[current_stream], + device=target_value.device, + **kwargs, + ) + + def __init__(self, target_values, device, initial_values=None, **kwargs) -> None: + super().__init__( + target_values=target_values, initial_values=initial_values, **kwargs + ) + self.device = device + self.set_stream = get_interface_for_device(self.device).set_stream + self.set_stream_id = get_interface_for_device(self.device)._set_stream_by_id + + def enter(self, tx): + # stream generated inside the traced function + if self.target_values[0].as_proxy() is not None: + tx.output.create_proxy( + "call_function", + self.set_stream, + (self.target_values[0].as_proxy(),), + {}, + ) + # stream passed from outside the traced function + else: + stream = self.target_values[0].value + tx.output.create_proxy( + "call_function", + self.set_stream_id, + (stream.stream_id, stream.device_index, stream.device_type), + {}, + ) + self.set_stream(self.target_values[0].value) + self.set_cleanup_hook(tx, lambda: self.set_stream(self.initial_values[0].value)) + + def exit(self, tx: "InstructionTranslator", *args): + tx.output.create_proxy( + "call_function", + self.set_stream, + (self.initial_values[0].as_proxy(),), + {}, + ) + self.state.cleanup_assert() + + +class PreserveVersionContextVariable(ContextWrappingVariable): + """ + Wraps torch.autograd._unsafe_preserve_version_counter + """ + + @staticmethod + def constructor(tx): + return variables.LambdaVariable( + lambda tensor: PreserveVersionContextVariable( + tensor, + tensor.var_getattr(tx, "_version"), + ) + ) + + def __init__(self, tensor, prev_version, **kwargs) -> None: + kwargs.setdefault("target_values", None) + super().__init__(**kwargs) + self.tensor = tensor + self.prev_version = prev_version + + def enter(self, tx): + pass + + def exit(self, tx: "InstructionTranslator", *args): + from ..tensor_version_op import _unsafe_set_version_counter + + return variables.TorchInGraphFunctionVariable( + _unsafe_set_version_counter + ).call_function(tx, [self.tensor, self.prev_version], {}) + + def reconstruct(self, codegen): + unimplemented( + "torch.autograd._unsafe_preserve_version_counter with graph break" + ) + + +class FSDPParamGroupUseTrainingStateVariable(ContextWrappingVariable): + _guards_singleton = Guard(GlobalStateSource(), GuardBuilder.FSDP_TRAINING_STATE) + + @staticmethod + def create(tx: "InstructionTranslator", param_group_var, target_value, **kwargs): + var = FSDPParamGroupUseTrainingStateVariable( + param_group_var=param_group_var, + target_values=[target_value], + initial_values=[param_group_var.value._training_state], + **kwargs, + ) + return var + + def __init__( + self, param_group_var, target_values, initial_values=None, **kwargs + ) -> None: + super().__init__( + target_values=target_values, initial_values=initial_values, **kwargs + ) + self.param_group_var = param_group_var + install_guard(self._guards_singleton) + + def enter(self, tx): + self._call_func(tx, self.target_values) + return variables.ConstantVariable.create(None) + + def exit(self, tx: "InstructionTranslator", *args): + self._call_func(tx, self.initial_values) + return variables.ConstantVariable.create(None) + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ): + self._call_func(tx, self.initial_values) # undo eager initialization + return super().call_function(tx, args, kwargs) + + def _call_func(self, tx: "InstructionTranslator", values): + assert len(values) == 1 + value = values[0] + if self.param_group_var.value._training_state != value: + self.param_group_var.call_method( + tx, + "__setattr__", + ( + variables.ConstantVariable.create("_training_state"), + variables.EnumVariable(value), + ), + {}, + ) + self.param_group_var.value._training_state = value + + def module_name(self): + return "torch.distributed._composable.fsdp._fsdp_param_group.FSDPParamGroup" + + def fn_name(self): + return "use_training_state" + + +class StreamVariable(VariableTracker): + def __init__(self, proxy, value, device, **kwargs) -> None: + if proxy is not None and "example_value" in proxy.node.meta: + assert proxy.node.meta["example_value"] == value + assert ( + value.device.type == device.type + ), "stream value is not equal to the passed device" + super().__init__(**kwargs) + self.proxy = proxy + self.value = value + self.device = device + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + assert hasattr(self.value, name), f"no stream method found named {name}" + assert name in [ + "wait_stream", + "synchronize", + "query", + "record_event", + "wait_event", + ], f" unsupported stream method {name}" + + from ..utils import proxy_args_kwargs + from .builder import wrap_fx_proxy_cls + + if name in ("wait_stream", "synchronize", "wait_event"): + tx.output.create_proxy( + "call_method", name, *proxy_args_kwargs([self] + args, kwargs) + ) + return variables.ConstantVariable(None) + elif name == "query": + return wrap_fx_proxy_cls( + target_cls=variables.ConstantVariable, + tx=tx, + proxy=tx.output.create_proxy( + "call_method", name, *proxy_args_kwargs([self] + args, kwargs) + ), + ) + elif name == "record_event": + return wrap_fx_proxy_cls( + target_cls=EventVariable, + tx=tx, + proxy=tx.output.create_proxy( + "call_method", name, *proxy_args_kwargs([self] + args, kwargs) + ), + ) + else: + unimplemented(self.device + " stream method " + name + " unsupported") + + def as_proxy(self): + return self.proxy + + def reconstruct(self, codegen): + # If we got here, this stream is fully subsumed by the graph - this means it is + # not an input or global + assert not self.source + # Since we just proved that - for other such structures, like lists and dicts, reconstruction + # is fine and sound according to dynamo principles of treating collectives. However, + # streams are special in that we want to preserve the identity of the stream as the same as in the graph + # Normally, we would do this via codegen for the proxy mapping to an output - we cannot do this yet, as we do not + # yet have a plan for how we want to handle the case where the stream is used as an input or an output. Pending + # design, to unblock current work, we lift the stream into a global and then codegen bytecode to load it from there. + prefix = f"_stream_{self.device}" + name = codegen.tx.output.install_global_by_id(prefix, self.value) + codegen.append_output(codegen.create_load_global(name, add=True)) + + +class EventVariable(VariableTracker): + def __init__(self, proxy, value, **kwargs) -> None: + if proxy is not None and "example_value" in proxy.node.meta: + assert proxy.node.meta["example_value"] == value + super().__init__(**kwargs) + self.proxy = proxy + self.value = value + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + from ..utils import proxy_args_kwargs + from .builder import wrap_fx_proxy_cls + + if name in ("wait", "record", "synchronize"): + tx.output.create_proxy( + "call_method", name, *proxy_args_kwargs([self] + args, kwargs) + ) + return variables.ConstantVariable(None) + elif name == "query": + return wrap_fx_proxy_cls( + target_cls=variables.ConstantVariable, + tx=tx, + proxy=tx.output.create_proxy( + "call_method", name, *proxy_args_kwargs([self] + args, kwargs) + ), + ) + else: + unimplemented(f"event method {name} unsupported") + + def as_proxy(self): + return self.proxy + + def reconstruct(self, codegen): + # If we got here, this event is fully subsumed by the graph - this means it is + # not an input or global + assert not self.source + # Similar to stream handling, we lift the event into a global and then codegen bytecode to load it from there. + prefix = "_event" + name = codegen.tx.output.install_global_by_id(prefix, self.value) + codegen.append_output(codegen.create_load_global(name, add=True)) + + +class WithExitFunctionVariable(VariableTracker): + _nonvar_fields = { + "target", + *VariableTracker._nonvar_fields, + } + + def __init__( + self, + ctx: Union[ContextWrappingVariable, GenericContextWrappingVariable], + target, + **kwargs, + ) -> None: + super().__init__(**kwargs) + assert isinstance( + ctx, (ContextWrappingVariable, GenericContextWrappingVariable) + ) + self.ctx = ctx + self.target = target + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + assert not kwargs + return self.ctx.exit(tx, *args) + + def reconstruct(self, codegen): + # Note here we reconstruct the context manager rather than the + # exit function. The handler generated by BlockStackEntry + # will re-enter the context in the resume function. + self.ctx.reconstruct_type(codegen) + if codegen.tx.output.partial_convert: + if sys.version_info >= (3, 11): + codegen.append_output(create_instruction("PUSH_NULL")) + if sys.version_info < (3, 13): + codegen.append_output(create_instruction("SWAP", arg=2)) + codegen.extend_output( + [codegen.create_load_const(val) for val in self.ctx.target_values] + ) + codegen.extend_output( + create_call_function(len(self.ctx.target_values), False) + ) + codegen.append_output(create_setup_with(self.target)) + codegen.append_output(create_instruction("POP_TOP")) diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/dicts.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/dicts.py new file mode 100644 index 0000000000000000000000000000000000000000..e3ca0b43c0a6a75d7a4cc6901c24de189a246a5f --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/dicts.py @@ -0,0 +1,1046 @@ +# mypy: ignore-errors + +import collections +import dataclasses +import functools +import inspect +import sys +from typing import Dict, List, Optional, TYPE_CHECKING + +from torch._subclasses.fake_tensor import is_fake + +from .. import polyfills, variables +from ..bytecode_transformation import create_call_function, create_instruction +from ..eval_frame import skip_code +from ..exc import raise_observed_exception, unimplemented +from ..guards import GuardBuilder, install_guard +from ..source import AttrSource, GetItemSource +from ..utils import dict_keys, dict_values, istype, specialize_symnode +from .base import MutableLocal, VariableTracker +from .constant import ConstantVariable + + +if TYPE_CHECKING: + from torch._dynamo.symbolic_convert import InstructionTranslator + + +# [Adding a new supported class within the keys of ConstDictVarialble] +# - Add its tracker type to is_hashable +# - (perhaps) Define how it is compared in _HashableTracker._eq_impl + + +def is_hashable(x): + if isinstance(x, variables.TensorVariable): + # Tensors are hashable if they have an example_value (a fake tensor) + # Most VT's should have one. + # It'd be nice if at some point we could assert that they all have one + return x.as_proxy().node.meta.get("example_value") is not None + elif isinstance(x, variables.TupleVariable): + return all(is_hashable(e) for e in x.items) + else: + return isinstance( + x, + ( + variables.BuiltinVariable, + variables.SymNodeVariable, + variables.ConstantVariable, + variables.EnumVariable, + variables.user_defined.UserDefinedClassVariable, + variables.UserFunctionVariable, + variables.SkipFunctionVariable, + variables.misc.NumpyVariable, + variables.NNModuleVariable, + variables.UnspecializedNNModuleVariable, + variables.MethodWrapperVariable, + variables.TorchInGraphFunctionVariable, + variables.TypingVariable, + variables.FunctoolsPartialVariable, + ), + ) + + +class ConstDictVariable(VariableTracker): + _nonvar_fields = { + "user_cls", + *VariableTracker._nonvar_fields, + } + + class _HashableTracker: + """ + Auxiliary opaque internal class that wraps a VariableTracker and makes it hashable + This should not be seen or touched by anything outside of ConstDictVariable and its children + Note that it's also fine to put VTs into dictionaries and sets, but doing so does not take into account aliasing + """ + + def __init__(self, vt) -> None: + # We specialize SymNodes + vt = specialize_symnode(vt) + # TODO Temorarily remove to figure out what keys are we breaking on + # and add proper support for them + if not is_hashable(vt): + unimplemented(f"Dict key of type {type(vt)}. Key: {vt}") + self.vt = vt + + @property + def underlying_value(self): + if isinstance(self.vt, variables.TensorVariable): + x = self.vt.as_proxy().node.meta["example_value"] + elif isinstance(self.vt, variables.TupleVariable): + Hashable = ConstDictVariable._HashableTracker + x = tuple(Hashable(e).underlying_value for e in self.vt.items) + elif isinstance(self.vt, variables.NNModuleVariable): + return self.vt.module + elif isinstance(self.vt, variables.UnspecializedNNModuleVariable): + return self.vt.value + elif isinstance(self.vt, variables.UserFunctionVariable): + return self.vt.get_function() + else: + x = self.vt.as_python_constant() + return x + + def __hash__(self): + return hash(self.underlying_value) + + @staticmethod + def _eq_impl(a, b): + # TODO: Put this in utils and share it between variables/builtin.py and here + if type(a) != type(b): + return False + elif isinstance(a, tuple): + Hashable = ConstDictVariable._HashableTracker + return len(a) == len(b) and all( + Hashable._eq_impl(u, v) for u, v in zip(a, b) + ) + elif is_fake(a): + return a is b + else: + return a == b + + def __eq__(self, other: "ConstDictVariable._HashableTracker") -> bool: + Hashable = ConstDictVariable._HashableTracker + assert isinstance(other, Hashable) or ConstantVariable.is_literal( + other + ), type(other) + if isinstance(other, Hashable): + return Hashable._eq_impl(self.underlying_value, other.underlying_value) + + # constant + return Hashable._eq_impl(self.underlying_value, other) + + def __init__( + self, items: Dict[VariableTracker, VariableTracker], user_cls=dict, **kwargs + ) -> None: + super().__init__(**kwargs) + + Hashable = ConstDictVariable._HashableTracker + + # Keys will just be HashableTrackers when cloning, in any other case they'll be VariableTrackers + assert all( + isinstance(x, (VariableTracker, Hashable)) + and isinstance(v, VariableTracker) + for x, v in items.items() + ) + + def make_hashable(key): + return key if isinstance(key, Hashable) else Hashable(key) + + self.items = {make_hashable(x): v for x, v in items.items()} + self.user_cls = user_cls + + def as_proxy(self): + return {k.vt.as_proxy(): v.as_proxy() for k, v in self.items.items()} + + def debug_repr(self): + return ( + "{" + + ", ".join( + f"{k.vt.debug_repr()}: {v.debug_repr()}" for k, v in self.items.items() + ) + + "}" + ) + + def as_python_constant(self): + return { + k.vt.as_python_constant(): v.as_python_constant() + for k, v in self.items.items() + } + + def keys_as_python_constant(self): + return {k.vt.as_python_constant(): v for k, v in self.items.items()} + + def python_type(self): + return self.user_cls + + def __contains__(self, vt) -> bool: + assert isinstance(vt, VariableTracker) + Hashable = ConstDictVariable._HashableTracker + return ( + is_hashable(vt) + and Hashable(vt) in self.items + and not isinstance(self.items[Hashable(vt)], variables.DeletedVariable) + ) + + def len(self): + return len( + [ + x + for x in self.items.values() + if not isinstance(x, variables.DeletedVariable) + ] + ) + + def reconstruct(self, codegen): + # instructions to load collections.OrderedDict if necessary + if self.user_cls is collections.OrderedDict: + codegen.add_push_null( + lambda: codegen.extend_output( + [ + codegen.create_load_python_module(collections), + codegen.create_load_attr("OrderedDict"), + ] + ) + ) + # instructions to build the dict keys and values + for key, value in self.items.items(): + codegen(key.vt) + codegen(value) + # BUILD_MAP and calling collections.OrderedDict if necessary + if self.user_cls is collections.OrderedDict: + codegen.extend_output( + [ + create_instruction("BUILD_MAP", arg=len(self.items)), + *create_call_function(1, False), + ] + ) + # BUILD_MAP only if user_cls is dict + else: + codegen.append_output(create_instruction("BUILD_MAP", arg=len(self.items))) + + def getitem_const_raise_exception_if_absent( + self, tx: "InstructionTranslator", arg: VariableTracker + ): + key = ConstDictVariable._HashableTracker(arg) + if key not in self.items: + raise_observed_exception(KeyError, tx, self) + return self.items[key] + + def getitem_const(self, tx: "InstructionTranslator", arg: VariableTracker): + key = ConstDictVariable._HashableTracker(arg) + if key not in self.items: + unimplemented(f"dict KeyError: {arg.value}") + return self.items[key] + + def maybe_getitem_const(self, arg: VariableTracker): + key = ConstDictVariable._HashableTracker(arg) + if key not in self.items: + return None + return self.items[key] + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + from . import ( + BuiltinVariable, + ConstantVariable, + ListIteratorVariable, + ListVariable, + TupleVariable, + UserDefinedObjectVariable, + ) + + Hashable = ConstDictVariable._HashableTracker + + arg_hashable = args and is_hashable(args[0]) + + if name == "__getitem__": + assert len(args) == 1 + return self.getitem_const_raise_exception_if_absent(tx, args[0]) + elif name == "items": + assert not (args or kwargs) + if self.source: + tx.output.guard_on_key_order.add(self.source.name()) + return TupleVariable( + [TupleVariable([k.vt, v]) for k, v in self.items.items()] + ) + elif name == "keys": + if self.source: + tx.output.guard_on_key_order.add(self.source.name()) + assert not (args or kwargs) + return DictKeys(self) + elif name == "values": + if self.source: + tx.output.guard_on_key_order.add(self.source.name()) + assert not (args or kwargs) + return DictValues(self) + elif name == "copy": + assert not (args or kwargs) + return self.clone(items=self.items.copy(), mutable_local=MutableLocal()) + elif name == "__len__": + assert not (args or kwargs) + return ConstantVariable.create(len(self.items)) + elif name == "__setitem__" and arg_hashable and self.mutable_local: + assert not kwargs and len(args) == 2 + tx.output.side_effects.mutation(self) + self.items[Hashable(args[0])] = args[1] + return ConstantVariable.create(None) + elif name == "__delitem__" and arg_hashable and self.mutable_local: + tx.output.side_effects.mutation(self) + self.items.__delitem__(Hashable(args[0])) + return ConstantVariable.create(None) + elif name in ("pop", "get") and len(args) in (1, 2) and args[0] not in self: + # missing item, return the default value + if len(args) == 1: + return ConstantVariable(None) + else: + return args[1] + elif name == "pop" and arg_hashable and self.mutable_local: + tx.output.side_effects.mutation(self) + return self.items.pop(Hashable(args[0])) + elif name == "clear": + tx.output.side_effects.mutation(self) + self.items.clear() + return ConstantVariable.create(None) + elif ( + name == "update" + and len(args) == 1 + and isinstance( + args[0], + ( + ConstDictVariable, + ListVariable, + TupleVariable, + ListIteratorVariable, + variables.IteratorVariable, + UserDefinedObjectVariable, + ), + ) + and self.mutable_local + ): + tx.output.side_effects.mutation(self) + if isinstance(args[0], ConstDictVariable): + dict_vt = args[0] + else: + dict_vt = BuiltinVariable.call_custom_dict(tx, dict, args[0]) + self.items.update(dict_vt.items) + # Wrap strings + kwargs = { + Hashable(ConstantVariable.create(k)): v for k, v in kwargs.items() + } + self.items.update(kwargs) + return ConstantVariable.create(None) + elif name in ("get", "__getattr__") and args[0] in self: + return self.getitem_const(tx, args[0]) + elif name == "__contains__" and len(args) == 1: + return ConstantVariable.create(args[0] in self) + elif name == "setdefault" and arg_hashable and self.mutable_local: + assert not kwargs + assert len(args) <= 2 + value = self.maybe_getitem_const(args[0]) + if value is not None: + return value + else: + if len(args) == 1: + x = ConstantVariable.create(None) + else: + x = args[1] + tx.output.side_effects.mutation(self) + self.items[Hashable(args[0])] = x + return x + else: + return super().call_method(tx, name, args, kwargs) + + def unpack_var_sequence(self, tx): + return [x.vt for x in self.items.keys()] + + def call_hasattr(self, tx, name): + # dict not allow setting arbitrary attributes. To check for hasattr, we can just check the __dict__ of the dict. + # OrderedDict though requires side effects tracking because it supports arbitrary setattr. + if self.user_cls is dict: + if name in self.user_cls.__dict__: + return ConstantVariable.create(True) + return ConstantVariable.create(False) + unimplemented(f"hasattr on {self.user_cls} is not supported") + + +class DefaultDictVariable(ConstDictVariable): + def __init__(self, items, user_cls, default_factory=None, **kwargs) -> None: + super().__init__(items, user_cls, **kwargs) + assert user_cls is collections.defaultdict + self.default_factory = default_factory + + def is_python_constant(self): + # Return false for unsupported defaults. This ensures that a bad handler + # path is not taken in BuiltinVariable for getitem. + if self.default_factory not in [list, tuple, dict] and not self.items: + return False + return super().is_python_constant() + + def debug_repr(self): + return ( + f"defaultdict({self.default_factory.debug_repr()}, {super().debug_repr()})" + ) + + @staticmethod + def is_supported_arg(arg): + if isinstance(arg, variables.BuiltinVariable): + return arg.fn in (list, tuple, dict, set) + else: + return isinstance(arg, variables.functions.BaseUserFunctionVariable) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if name == "__getitem__": + assert len(args) == 1 + + if args[0] in self: + return self.getitem_const(tx, args[0]) + else: + if self.default_factory is None: + raise KeyError(f"{args[0]}") + else: + default_var = self.default_factory.call_function(tx, [], {}) + super().call_method( + tx, "__setitem__", (args[0], default_var), kwargs + ) + return default_var + else: + return super().call_method(tx, name, args, kwargs) + + +# TODO: Implementing this via inheritance rather than composition is a +# footgun, because self method calls in dict will route back to the set +# implementation, which is almost assuredly wrong +class SetVariable(ConstDictVariable): + """We model a sets as dictonary with None values""" + + def __init__( + self, + items: List[VariableTracker], + **kwargs, + ) -> None: + items = dict.fromkeys(items, SetVariable._default_value()) + super().__init__(items, **kwargs) + + def debug_repr(self): + if not self.items: + return "set()" + else: + return "{" + ",".join(k.vt.debug_repr() for k in self.items.keys()) + "}" + + @property + def set_items(self): + return set(self.items.keys()) + + @staticmethod + def _default_value(): + # Variable to fill in he keys of the dictinary + return ConstantVariable.create(None) + + def as_proxy(self): + return {k.vt.as_proxy() for k in self.set_items} + + def python_type(self): + return set + + def as_python_constant(self): + return {k.vt.as_python_constant() for k in self.set_items} + + def reconstruct(self, codegen): + codegen.foreach([x.vt for x in self.set_items]) + codegen.append_output(create_instruction("BUILD_SET", arg=len(self.set_items))) + + def call_method( + self, + tx, + name, + args: List[VariableTracker], + kwargs: Dict[str, VariableTracker], + ) -> "VariableTracker": + from . import ListVariable, TupleVariable + + # We foward the calls to the dictionary model + if name == "add": + assert not kwargs + assert len(args) == 1 + name = "__setitem__" + args = (args[0], SetVariable._default_value()) + elif name == "pop": + assert not kwargs + assert not args + # Choose an item at random and pop it via the Dict.pop method + result = self.set_items.pop().vt + super().call_method(tx, name, (result,), kwargs) + return result + elif name == "isdisjoint": + assert not kwargs + assert len(args) == 1 + return variables.UserFunctionVariable( + polyfills.set_isdisjoint + ).call_function(tx, [self, args[0]], {}) + elif name == "intersection": + assert not kwargs + assert len(args) == 1 + return variables.UserFunctionVariable( + polyfills.set_intersection + ).call_function(tx, [self, args[0]], {}) + elif name == "union": + assert not kwargs + assert len(args) == 1 + return variables.UserFunctionVariable(polyfills.set_union).call_function( + tx, [self, args[0]], {} + ) + elif name == "difference": + assert not kwargs + assert len(args) == 1 + return variables.UserFunctionVariable( + polyfills.set_difference + ).call_function(tx, [self, args[0]], {}) + elif ( + name == "update" + and len(args) == 1 + and isinstance( + args[0], + ( + SetVariable, + ListVariable, + TupleVariable, + ), + ) + and self.mutable_local + ): + if isinstance(args[0], (ListVariable, TupleVariable)): + arg = SetVariable(args[0].unpack_var_sequence(tx)) + else: + arg = args[0] + return super().call_method(tx, "update", (arg,), kwargs) + elif name == "remove": + assert not kwargs + assert len(args) == 1 + if args[0] not in self: + unimplemented("key does not exist") + return super().call_method(tx, "pop", args, kwargs) + elif name == "discard": + assert not kwargs + assert len(args) == 1 + if args[0] in self: + return super().call_method(tx, "pop", args, kwargs) + else: + return ConstantVariable.create(value=None) + return super().call_method(tx, name, args, kwargs) + + def getitem_const(self, tx: "InstructionTranslator", arg: VariableTracker): + raise RuntimeError("Illegal to getitem on a set") + + +class FrozensetVariable(SetVariable): + def __init__( + self, + items: List[VariableTracker], + **kwargs, + ) -> None: + super().__init__(items, **kwargs) + + def debug_repr(self): + if not self.items: + return "frozenset()" + else: + return "{" + ",".join(k.vt.debug_repr() for k in self.items.keys()) + "}" + + @property + def set_items(self): + return self.items.keys() + + def python_type(self): + return frozenset + + def as_python_constant(self): + return {k.vt.as_python_constant() for k in self.set_items} + + def reconstruct(self, codegen): + codegen.foreach([x.vt for x in self.set_items]) + codegen.add_push_null( + lambda: codegen.extend_output( + [ + codegen.create_load_global("frozenset"), + ] + ) + ) + codegen.extend_output(create_call_function(0, False)) + + def call_method( + self, + tx, + name, + args: List[VariableTracker], + kwargs: Dict[str, VariableTracker], + ) -> "VariableTracker": + if name in ["add", "pop", "update", "remove", "discard", "clear"]: + raise RuntimeError(f"Illegal call_method {name} on a frozenset") + return super().call_method(tx, name, args, kwargs) + + +class DictView(VariableTracker): + """ + Models _PyDictViewObject + + This is an "abstract" class. Subclasses will override kv and the items method + """ + + kv: Optional[str] = None + + def __init__(self, dv_dict: ConstDictVariable, **kwargs) -> None: + super().__init__(**kwargs) + assert self.kv in ("keys", "values") + assert isinstance(dv_dict, ConstDictVariable) + self.dv_dict = dv_dict + + @property + def view_items(self): + return getattr(self.dv_dict.items, self.kv)() + + @property + def view_items_vt(self): + # Returns an iterable of the unpacked items + # Implement in the subclasses + raise NotImplementedError + + def unpack_var_sequence(self, tx): + def unwrap(x): + return x.vt if self.kv == "keys" else x + + return [unwrap(x) for x in self.view_items] + + def reconstruct(self, codegen): + codegen(self.dv_dict) + codegen.load_method(self.kv) + codegen.call_method(0) + + def call_method( + self, + tx, + name, + args: List["VariableTracker"], + kwargs: Dict[str, "VariableTracker"], + ) -> "VariableTracker": + if name == "__len__": + return self.dv_dict.call_method(tx, name, args, kwargs) + return super().call_method(tx, name, args, kwargs) + + +class DictKeys(DictView): + kv = "keys" + + @property + def set_items(self): + return set(self.view_items) + + @property + def view_items_vt(self): + # Returns an iterable of the unpacked items + return [x.vt for x in self.view_items] + + def python_type(self): + return dict_keys + + def call_method( + self, + tx, + name, + args: List["VariableTracker"], + kwargs: Dict[str, "VariableTracker"], + ) -> "VariableTracker": + if name == "__contains__": + return self.dv_dict.call_method(tx, name, args, kwargs) + return super().call_method(tx, name, args, kwargs) + + +class DictValues(DictView): + # DictValues is an iterable but cannot be compared. + kv = "values" + + @property + def view_items_vt(self): + return list(self.view_items) + + def python_type(self): + return dict_values + + +def _is_matching_transformers_cls(cls) -> bool: + mod = sys.modules.get("transformers.file_utils") + if mod is None: + mod = sys.modules.get("transformers.utils.generic") + return mod is not None and issubclass(cls, mod.ModelOutput) + + +def _is_matching_diffusers_cls(cls) -> bool: + mod = sys.modules.get("diffusers.utils") + return mod is not None and issubclass(cls, mod.BaseOutput) + + +def _call_hasattr_customobj( + self, tx: "InstructionTranslator", name: str +) -> "VariableTracker": + """Shared method between DataClassVariable and CustomizedDictVariable where items are attrs""" + if tx.output.side_effects.is_attribute_mutation(self): + try: + result = tx.output.side_effects.load_attr(self, name, deleted_ok=True) + return variables.ConstantVariable.create( + not isinstance(result, variables.DeletedVariable) + ) + except KeyError: + pass + if name in self.items or hasattr(self.user_cls, name): + return ConstantVariable(True) + elif istype(self.mutable_local, MutableLocal) and self.source is None: + # Something created locally can't have any extra fields on it + return ConstantVariable(False) + elif self.source: + # Maybe add a guard + try: + example = tx.output.root_tx.get_example_value(self.source) + install_guard( + AttrSource(self.source, name).make_guard(GuardBuilder.HASATTR) + ) + return ConstantVariable(hasattr(example, name)) + except KeyError: + pass + unimplemented( + f"hasattr({self.__class__.__name__}, {name}) {self.mutable_local} {self.source}" + ) + + +class CustomizedDictVariable(ConstDictVariable): + @staticmethod + def is_matching_cls_hf(cls): + return _is_matching_transformers_cls(cls) or _is_matching_diffusers_cls(cls) + + @staticmethod + def is_matching_cls(cls): + # True if using default OrderedDict.__init__ and did not implement __post_init__ + if ( + issubclass(cls, collections.OrderedDict) + and cls is not collections.OrderedDict + and cls.__init__ is collections.OrderedDict.__init__ + and not hasattr(cls, "__post_init__") + ): + return True + # hack for HF usecase: + # assume dataclass annotation for ModelOutput subclass + # assume self.create is AA to ModelOutput.__post_init__ + return CustomizedDictVariable.is_matching_cls_hf(cls) + + @classmethod + def is_matching_object(cls, obj): + return cls.is_matching_cls(type(obj)) + + # called from user_defined.py + # when is_matching_cls(cls) is true + @classmethod + def create(cls, user_cls, args, kwargs, options): + # avoid tracing when returning ModelOutput from forward func + for attr_name in ("__init__", "__post_init__", "__setattr__", "__setitem__"): + if hasattr(user_cls, attr_name): + fn = getattr(user_cls, attr_name) + assert callable(fn), f"expect callable attr {attr_name}" + if hasattr(fn, "__code__"): + skip_code(fn.__code__) + + if dataclasses.is_dataclass(user_cls): + # @dataclass CustomDict(a=1, b=2) + bound = inspect.signature(user_cls).bind(*args, **kwargs) + bound.apply_defaults() + + def make_var(x): + if isinstance(x, VariableTracker): + return x + elif ConstantVariable.is_literal(x): + return ConstantVariable.create(x) + else: + unimplemented( + "expect VariableTracker or ConstantVariable.is_literal" + ) + + bound_args = {} + if cls.is_matching_cls_hf(user_cls): + # Skip none + for k, v in bound.arguments.items(): + if isinstance(v, ConstantVariable) and v.value is None or v is None: + continue + bound_args[k] = v + else: + bound_args = bound.arguments + + items = { + ConstantVariable.create(k): make_var(v) for k, v in bound_args.items() + } + elif not args: + # CustomDict(a=1, b=2) in the general (non-dataclass) case. + items = {ConstantVariable.create(k): v for k, v in kwargs.items()} + elif len(args) == 1 and isinstance(args[0], ConstDictVariable) and not kwargs: + # CustomDict({'a': 1, 'b': 2}) + items = args[0].items + else: + unimplemented("custom dict init with args/kwargs unimplemented") + + return cls(items, user_cls, **options) + + # called from builder.py + @classmethod + def wrap(cls, builder, obj): + user_cls = type(obj) + + if not cls.is_matching_cls_hf(user_cls): + unimplemented("custom non-hf dict subclass wrap unimplemented") + + items = builder.__class__(tx=builder.tx, source=builder.source)( + collections.OrderedDict(obj) + ).items + + keys = [f.name for f in dataclasses.fields(user_cls)] + for key in keys: + # __init__ function of a dataclass might not have yet defined the key + if hasattr(obj, key): + val = getattr(obj, key) + var = builder.__class__( + tx=builder.tx, source=AttrSource(builder.source, key) + )(val) + if val is not None: + key = ConstantVariable.create(key) + items[key] = var + return cls(items, user_cls) + + def __init__(self, items, user_cls, **options) -> None: + super().__init__(items, user_cls, **options) + assert self.is_matching_cls(user_cls) + + def as_proxy(self): + raise NotImplementedError + + # 'RETURN_VALUE triggered compile' + # called from torch/_dynamo/codegen.py + def reconstruct(self, codegen): + is_hf_model_output = self.is_matching_cls_hf(self.user_cls) + + def gen_fn1(): + # If the user class is a ModelOutput, then wrap the instance creation in + # torch._dynamo.disable(). Even though we mark the __post_init__ as skip + # in `create` function, this is not enough. TorchDynamo can still get + # triggered on the child functions of __post_init__. This upsets export. + # Since, we know that ModelOutput __post_init__ is not worth optimizing, + # we just wrap the instance creation in torch._dynamo.disable(), + # regardless whether its export or not. + if is_hf_model_output: + # load torch._dynamo.disable + def gen_fn2(): + codegen.append_output(codegen.create_load_global("torch", add=True)) + codegen.append_output(codegen.create_load_attr("_dynamo")) + codegen.append_output(codegen.create_load_attr("disable")) + + codegen.add_push_null(gen_fn2) + + codegen.extend_output([codegen._create_load_const(self.user_cls)]) + + if is_hf_model_output: + # Wrap user_cls with disable + codegen.extend_output(create_call_function(1, False)) + + codegen.add_push_null(gen_fn1) + + # All the keys are just wrapped strings + d = self.keys_as_python_constant() + codegen.foreach(d.values()) + keys = tuple(d.keys()) + codegen.extend_output(codegen.create_call_function_kw(len(keys), keys, False)) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + fn = getattr(self.user_cls, name) + source = None if self.source is None else AttrSource(self.source, name) + + if hasattr(fn, "__objclass__") and fn.__objclass__ in ( + dict, + collections.OrderedDict, + ): + # for python dict method without overridden + return super().call_method(tx, name, args, kwargs) + elif name in ( + "__getitem__", + "to_tuple", + "__setitem__", + "__setattr__", + "__post_init__", + ): + # for user overridden method + return tx.inline_user_function_return( + variables.UserFunctionVariable(fn, source=source), + [self] + list(args), + kwargs, + ) + elif fn is getattr(collections.OrderedDict, name, None): + return super().call_method(tx, name, args, kwargs) + + unimplemented(f"custom dict: call_method unimplemented name={name}") + + def var_getattr(self, tx: "InstructionTranslator", name: str) -> "VariableTracker": + name_vt = ConstantVariable.create(name) + if name_vt in self: + return self.call_method(tx, "__getitem__", [name_vt], {}) + if dataclasses.is_dataclass(self.user_cls): + defaults = {f.name: f.default for f in dataclasses.fields(self.user_cls)} + if name in defaults: + assert variables.ConstantVariable.is_literal(defaults[name]) + return variables.ConstantVariable.create(defaults[name]) + return super().var_getattr(tx, name) + + call_hasattr = _call_hasattr_customobj + + +@functools.lru_cache(None) +def _install_PretrainedConfig_patch(): + import transformers + + # We need to monkeypatch transformers here, sadly. + # TODO(voz): Upstream to transformers lib + + def _dynamo_overriden_transformers_eq(self, other): + if not hasattr(other, "__dict__"): + return False + return self.__dict__ == other.__dict__ + + transformers.configuration_utils.PretrainedConfig.__eq__ = ( + _dynamo_overriden_transformers_eq + ) + + +class HFPretrainedConfigVariable(VariableTracker): + """ + Hack for HuggingFace PretrainedConfig + """ + + @staticmethod + def is_matching_cls(cls): + mod = sys.modules.get("transformers.configuration_utils") + is_match = mod is not None and issubclass(cls, mod.PretrainedConfig) + + # Lazily install monkeypatch the first time we see it in dynamo + if is_match: + _install_PretrainedConfig_patch() + return is_match + + @classmethod + def is_matching_object(cls, obj): + return cls.is_matching_cls(type(obj)) + + def __init__(self, obj, **kwargs) -> None: + super().__init__(**kwargs) + self.obj = obj + assert self.is_matching_cls(type(obj)) + + def var_getattr(self, tx: "InstructionTranslator", name: str) -> "VariableTracker": + from .builder import VariableBuilder + + try: + attr_value = getattr(self.obj, name) + attr_source = AttrSource(self.source, name) + return VariableBuilder(tx, attr_source)(attr_value) + + except AttributeError: + unimplemented(f"getattr({self.value}, {name})") + + def call_hasattr(self, tx: "InstructionTranslator", name: str) -> "VariableTracker": + return variables.ConstantVariable.create(hasattr(self.obj, name)) + + +class PythonSysModulesVariable(VariableTracker): + """Special case for sys.modules. + + Without this we will guard on the exact set of modules imported in the + lifetime of the python program. + """ + + def python_type(self): + return dict + + def reconstruct(self, codegen): + codegen.add_push_null( + lambda: codegen.extend_output( + [ + codegen.create_load_python_module(sys), + codegen.create_load_attr("modules"), + ] + ) + ) + + def call_method( + self, + tx: "InstructionTranslator", + name, + args: List[VariableTracker], + kwargs: Dict[str, VariableTracker], + ): + if name == "__getitem__": + return self.call_getitem(tx, *args, **kwargs) + elif name == "get": + return self.call_get(tx, *args, **kwargs) + elif name == "__contains__": + return self.call_contains(tx, *args, **kwargs) + unimplemented(f"sys.modules.{name}(*{args}, **{kwargs})") + + def _contains_helper(self, tx: "InstructionTranslator", key: VariableTracker): + k = key.as_python_constant() + has_key = k in sys.modules + install_guard( + self.make_guard( + functools.partial(GuardBuilder.DICT_CONTAINS, key=k, invert=not has_key) + ) + ) + return k, has_key + + def call_contains(self, tx: "InstructionTranslator", key: VariableTracker): + k, has_key = self._contains_helper(tx, key) + return ConstantVariable.create(value=has_key) + + def call_get( + self, + tx: "InstructionTranslator", + key: VariableTracker, + default: Optional[VariableTracker] = None, + ): + from .builder import VariableBuilder + + k, has_key = self._contains_helper(tx, key) + + if has_key: + return VariableBuilder( + tx, + GetItemSource(self.source, k), + )(sys.modules[k]) + + if default is not None: + return default + + return ConstantVariable.create(value=None) + + def call_getitem(self, tx: "InstructionTranslator", key: VariableTracker): + from .builder import VariableBuilder + + k, has_key = self._contains_helper(tx, key) + return VariableBuilder( + tx, + GetItemSource(self.source, k), + )(sys.modules[k]) diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/functions.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/functions.py new file mode 100644 index 0000000000000000000000000000000000000000..90c2d0665aebc2f5e3cde22e89207c61e0aa6244 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/functions.py @@ -0,0 +1,1133 @@ +# mypy: ignore-errors + +import collections +import functools +import inspect +import itertools +import types +from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, TypeVar, Union + +import torch + +from .. import polyfills, variables +from ..bytecode_transformation import create_call_function, create_rot_n +from ..exc import unimplemented, Unsupported +from ..guards import GuardBuilder, install_guard +from ..source import AttrSource, ConstantSource, DefaultsSource, GetItemSource +from ..utils import ( + check_constant_args, + check_unspec_or_constant_args, + identity, + is_function, + is_wrapper_or_member_descriptor, + istype, + make_cell, +) +from .base import MutableLocal, typestr, VariableTracker +from .constant import ConstantVariable + + +try: + from torch.distributed._composable.fsdp import _fsdp_param_group +except ModuleNotFoundError: + _fsdp_param_group = None + + +if TYPE_CHECKING: + from torch._dynamo.symbolic_convert import InstructionTranslator + from torch._guards import Source + + +_F = TypeVar("_F", bound=Callable) + + +def wrap_bound_arg(tx: "InstructionTranslator", val, source=None): + # Source propagation is best effort since not every object we encounter has a source to begin with. + if isinstance(val, VariableTracker): + return val + elif not source: + from torch._dynamo.variables.builder import SourcelessBuilder + + return SourcelessBuilder.create(tx, val) + else: + # Create a lazy variable to avoid guarding on __defaults__ unless really + # needed. + return variables.LazyVariableTracker.create(val, source) + + +def wrap_args_kwargs(tx: "InstructionTranslator", result): + for k, v in list(result.items()): + if isinstance(v, (tuple, dict)): + # args/kwargs + result[k] = wrap_bound_arg(tx, v) + + +def init_cellvars(parent, result, code): + closure_cells = {} + side_effects = parent.output.side_effects + + # for name in itertools.chain(code.co_cellvars, code.co_freevars): + for name in code.co_cellvars: + closure_cells[name] = side_effects.track_cell_new() + if name in result: + side_effects.store_cell(closure_cells[name], result.pop(name)) + + return closure_cells + + +def _create_nested_fn( + code, f_globals, name, defaults, closure, kwdefaults, annotations +): + from types import FunctionType + + func = FunctionType(code, f_globals, name, defaults, closure) + func.__kwdefaults__ = kwdefaults + + if isinstance(annotations, tuple): + from itertools import pairwise + + annotations = dict(pairwise(annotations)) + + # TypeError: __annotations__ must be set to a dict object + assert annotations is None or isinstance(annotations, dict) + func.__annotations__ = annotations + + return func + + +class BaseUserFunctionVariable(VariableTracker): + def get_filename(self): + return self.get_code().co_filename + + def get_name(self): + return self.get_code().co_name + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + return tx.inline_user_function_return(self, [*self.self_args(), *args], kwargs) + + def call_hasattr(self, tx: "InstructionTranslator", name: str) -> VariableTracker: + result = False + + try: + result = hasattr(self.get_function(), name) + except NotImplementedError: + if name == "__name__" and isinstance(self, NestedUserFunctionVariable): + result = True + return variables.ConstantVariable.create(result) + + def inspect_parameter_names(self): + return list(inspect.signature(self.get_function()).parameters) + + def closure_vars(self, tx): + return {} + + +class UserFunctionVariable(BaseUserFunctionVariable): + """Some unsupported user-defined global function""" + + _nonvar_fields = { + "fn", + "is_constant", + *BaseUserFunctionVariable._nonvar_fields, + } + + @classmethod + def create_with_source(cls, value, source): + install_guard(source.make_guard(GuardBuilder.CLOSURE_MATCH)) + return cls(value, source=source) + + def __init__(self, fn, is_constant=False, **kwargs) -> None: + super().__init__(**kwargs) + if getattr(fn, "_dynamo_marked_constant", False): + # This method should be treated as a constant for the purposes of compilation + self.is_constant = True + else: + self.is_constant = False + + assert isinstance( + fn, (types.FunctionType, torch.jit.ScriptFunction) + ), f"expected FunctionType found {typestr(fn)} {fn}" + # unpack @torch._dynamo.optimize()(fn) wrapped function + fn = inspect.getattr_static(fn, "_torchdynamo_inline", fn) + self.fn: types.FunctionType = fn + + def as_python_constant(self): + if istype(self, UserFunctionVariable): + return self.fn + # subclasses (such as methods) usually aren't a constant + return super().as_python_constant() + + def self_args(self): + return [] + + def get_function(self): + return self.fn + + def get_code(self): + return self.fn.__code__ + + def python_type(self): + return types.FunctionType + + def has_self(self): + return getattr(self.fn, "__self__", None) is not None + + def get_globals(self): + return self.fn.__globals__ + + def bind_args(self, parent, args, kwargs): + assert not self.is_constant + tx = parent.output.root_tx + wrap = functools.partial(wrap_bound_arg, tx=tx) + + fn: types.FunctionType = self.fn + defaults = fn.__defaults__ or [] + defaults_sources = [ + None if self.source is None else DefaultsSource(self.source, idx) + for idx, _ in enumerate(defaults) + ] + fake_func = types.FunctionType( + fn.__code__, + fn.__globals__, + fn.__name__, + tuple( + [ + wrap(val=arg, source=source) + for arg, source in zip(defaults, defaults_sources) + ] + ), + fn.__closure__, + ) + if fn.__kwdefaults__: + kwdefaults_sources = { + k: None + if self.source is None + else DefaultsSource(self.source, k, is_kw=True) + for k in fn.__kwdefaults__ + } + fake_func.__kwdefaults__ = { + k: wrap(val=v, source=kwdefaults_sources[k]) + for k, v in fn.__kwdefaults__.items() + } + + bound = inspect.signature(fake_func).bind(*args, **kwargs) + bound.apply_defaults() + result = dict(bound.arguments.items()) + + wrap_args_kwargs(tx, result) + closure_cells = init_cellvars(parent, result, fn.__code__) + closure = self.fn.__closure__ or () + assert len(closure) == len(self.fn.__code__.co_freevars) + for idx, name, cell in zip( + itertools.count(), self.fn.__code__.co_freevars, closure + ): + if name == "__class__": + source = AttrSource(self.source, "__class__") if self.source else None + result[name] = variables.UserDefinedClassVariable( + cell.cell_contents, + source=source, + ) + else: + var = tx.match_nested_cell(name, cell) + if var is not None: + # optimization for cleaner codegen + result[name] = var + elif self.source: + from .builder import VariableBuilder + + side_effects = parent.output.side_effects + if cell in side_effects: + out = side_effects[cell] + else: + closure_cell = GetItemSource( + AttrSource(self.source, "__closure__"), idx + ) + closure_cell_contents = AttrSource( + closure_cell, "cell_contents" + ) + try: + contents_var = VariableBuilder( + parent, closure_cell_contents + )(cell.cell_contents) + except ValueError: + # Cell has not yet been assigned + contents_var = variables.DeletedVariable() + + if ( + closure_cell_contents.name() + not in tx.mutated_closure_cell_contents + ): + # Optimistically don't allocate the cell, to + # reduce the number of side effects. This is + # important for cond, as without it, any accesses + # to closures create side effects and cond doesn't + # support side effects. If we're wrong and this + # closure cell gets written to, we will restart + # the analysis with this cell's name in the + # mutated list here + result[name] = contents_var + continue + + # cells are written to with "cell_contents", + # so the source should just be the closure_cell, not its contents + out = side_effects.track_cell_existing(closure_cell, cell) + side_effects.store_cell( + out, + contents_var, + ) + + result[name] = out + + else: + from .builder import SourcelessBuilder + + result[name] = SourcelessBuilder.create(tx, cell.cell_contents) + + return result, closure_cells + + def export_freevars(self, parent, child): + pass + + def var_getattr(self, tx: "InstructionTranslator", name: str): + source = AttrSource(self.source, name) if self.source else None + try: + subobj = inspect.getattr_static(self.fn, name) + except AttributeError: + options = {"source": source} + return variables.GetAttrVariable(self, name, **options) + if source: + return variables.LazyVariableTracker.create(subobj, source) + from .builder import SourcelessBuilder + + return SourcelessBuilder.create(tx, subobj) + + def call_hasattr(self, tx: "InstructionTranslator", name: str) -> VariableTracker: + result = hasattr(self.fn, name) + return variables.ConstantVariable.create(result) + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if self.is_constant: + return invoke_and_store_as_constant( + tx, self.fn, self.get_name(), args, kwargs + ) + + return super().call_function(tx, args, kwargs) + + +class UserMethodVariable(UserFunctionVariable): + """Some unsupported user-defined method""" + + def __init__(self, fn, obj, **kwargs) -> None: + super().__init__(fn=fn, **kwargs) + self.obj = obj + + def __str__(self) -> str: + return f"{self.__class__.__name__}({self.fn}, {self.obj})" + + def self_args(self): + return [self.obj] + + def python_type(self): + return types.MethodType + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + # For nn.Module methods, redirecting to NNModuleVariable.call_method for optimized solution + # rather than simple inlining. E.g, putting `call_method` op in FX graph for `forward` method + # since we ensure `forward` of allowed modules can be traced by AOT safely. + # Note this is not only for allowed modules, as user customized modules can extend from + # allowed modules but using parent's `forward` method, which is also covered by this branch. + + # If we are tracing the higher order op, we want Dynamo to step inside + # the module call so that Dynamo can see the underlying parameters and + # buffers and raise them as inputs to the graph. The is_root_tracer + # check bypasses the if condition for non-root tracers and directly + # calls the super().call_function at the end, which is basically + # equivalent of inlining the method. + if tx.output.is_root_tracer() and isinstance( + self.obj, variables.NNModuleVariable + ): + module_attr = getattr(self.fn, "__module__", "") + # inline torch.nn.utils.parametrize + if ( + module_attr is not None + and module_attr.startswith("torch.nn.") + and module_attr != "torch.nn.utils.parametrize" + or self.is_constant + ): + return self.obj.call_method( + tx, self.fn.__name__, args, kwargs, constant=self.is_constant + ) + elif ( + _fsdp_param_group is not None + and self.fn is _fsdp_param_group.FSDPParamGroup.use_training_state + ): + return variables.TorchCtxManagerClassVariable(self.fn).call_function( + tx, (self.obj, *args), kwargs + ) + if self.is_constant: + fn = getattr(self.obj.value, self.fn.__name__) + return invoke_and_store_as_constant(tx, fn, self.get_name(), args, kwargs) + return super().call_function(tx, args, kwargs) + + def inspect_parameter_names(self): + return super().inspect_parameter_names()[1:] + + +class WrappedUserMethodVariable(UserMethodVariable): + def __init__(self, wrapped, context, **kwargs) -> None: + kwargs.pop("fn", None) + kwargs.pop("obj", None) + super().__init__(wrapped.fn, wrapped.obj, **kwargs) + self.wrapped = wrapped + self.context = context + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + self.context.enter(tx) + result = super().call_function(tx, args, kwargs) + self.context.exit(tx) + return result + + +class WrappedUserFunctionVariable(UserFunctionVariable): + def __init__(self, wrapped, context, **kwargs) -> None: + kwargs.pop("fn", None) + kwargs.pop("obj", None) + super().__init__(wrapped.fn, **kwargs) + self.wrapped = wrapped + self.context = context + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + self.context.enter(tx) + result = super().call_function(tx, args, kwargs) + self.context.exit(tx) + return result + + +def invoke_and_store_as_constant(tx: "InstructionTranslator", fn, name, args, kwargs): + def convert(x): + if isinstance(x, variables.TensorVariable): + return x.get_real_value() + return x.as_python_constant() + + args = [convert(x) for x in args] + kwargs = {k: convert(v) for k, v in kwargs.items()} + res = fn(*args, **kwargs) + return tx.output.register_attr_or_module( + res, + name, + source=ConstantSource(name), + ) + + +class NestedUserFunctionVariable(BaseUserFunctionVariable): + _nonvar_fields = { + "closure_scope", + "f_globals", + *BaseUserFunctionVariable._nonvar_fields, + } + + def __init__( + self, + fn_name, + code, + f_globals, + defaults, + kwdefaults, + annotations, + closure, + closure_scope, + wrapped_reconstructible=None, + **kwargs, + ) -> None: + super().__init__(**kwargs) + assert isinstance(fn_name.as_python_constant(), str) + assert isinstance(code.as_python_constant(), types.CodeType) + assert isinstance(f_globals, dict) + self.fn_name = fn_name + self.code = code + self.f_globals = f_globals + self.defaults = defaults + self.kwdefaults = kwdefaults + self.annotations = annotations + self.closure = closure + if closure is None: + closure_scope = None + self.closure_scope = closure_scope + # Either a source or a VT with .can_reconstruct() == True + self.wrapped_reconstructible: Optional[ + Union[Source, VariableTracker] + ] = wrapped_reconstructible + + def self_args(self): + return [] + + def get_code(self): + return self.code.as_python_constant() + + def get_function(self): + if self.closure: + raise NotImplementedError + func = types.FunctionType( + self.code.as_python_constant(), + self.f_globals, + self.fn_name.as_python_constant(), + ) + if self.defaults: + func.__defaults__ = self.defaults.as_python_constant() + if self.kwdefaults: + func.__kwdefaults__ = self.kwdefaults.as_python_constant() + if self.annotations: + annotations = self.annotations.as_python_constant() + if isinstance(annotations, tuple): + from itertools import pairwise + + annotations = dict(pairwise(annotations)) + + # TypeError: __annotations__ must be set to a dict object + assert isinstance(annotations, dict) + func.__annotations__ = annotations + return func + + def has_closure(self): + return self.closure is not None + + def has_self(self): + return False + + def get_globals(self): + return self.f_globals + + def bind_args(self, parent, args, kwargs): + from .misc import InlinedClosureVariable + + code = self.get_code() + func = types.FunctionType( + code, + self.f_globals, + self.fn_name.as_python_constant(), + tuple(self.defaults.items) if self.defaults else None, + tuple(make_cell(None) for _ in range(len(self.get_code().co_freevars))), + ) + if self.kwdefaults: + func.__kwdefaults__ = self.kwdefaults.keys_as_python_constant() + bound = inspect.signature(func).bind(*args, **kwargs) + bound.apply_defaults() + result = dict(bound.arguments.items()) + wrap_args_kwargs(parent.output.root_tx, result) + closure_cells = init_cellvars(parent, result, code) + + for idx, name in enumerate(code.co_freevars): + cell = self.closure.items[idx] + assert name not in result + if isinstance(cell, InlinedClosureVariable): + # InlinedClosureVariable's are created from LOAD_CLOSURE's from + # InliningInstructionTranslators when the variable name is not found in closure_cells. + # They should remain outside of closure_cells, so that our callee (the + # InliningInstructionTranslator that traces `func`) handles + # the cell correctly - that is, the cell's contents are treated as if they + # are local variables, like in UserFunctionVariable's bind_args for freevars. + cand = parent + while cand and name not in cand.symbolic_locals: + cand = cand.parent + if cand is None: + raise RuntimeError( + f"Couldn't find {name} in the symbolic_locals of the inline interpreter stack" + ) + result[name] = cand.symbolic_locals[name] + else: + closure_cells[name] = self.closure.items[idx] + + return result, closure_cells + + def export_freevars(self, parent, child): + code = self.get_code() + for var in code.co_freevars: + if var in child.symbolic_locals: + parent.symbolic_locals[var] = child.symbolic_locals[var] + + def reconstruct(self, codegen): + codegen.add_push_null( + lambda: codegen.load_import_from(__name__, "_create_nested_fn") + ) + codegen(self.code) + codegen.extend_output([codegen._create_load_const(self.f_globals)]) + codegen(ConstantVariable.create(self.code.value.co_name)) + + if self.defaults: + codegen(self.defaults) + else: + codegen.extend_output([codegen.create_load_const(None)]) + + if self.closure: + codegen(self.closure) + else: + codegen.extend_output([codegen.create_load_const(None)]) + + if self.kwdefaults: + codegen(self.kwdefaults) + else: + codegen.extend_output([codegen.create_load_const(None)]) + + if self.annotations: + try: + annotations = self.annotations.as_python_constant() + codegen.extend_output([codegen._create_load_const(annotations)]) + except NotImplementedError: + codegen(self.annotations) + else: + codegen.extend_output([codegen.create_load_const(None)]) + + codegen.extend_output(create_call_function(7, False)) + + if self.wrapped_reconstructible: + codegen.add_push_null( + lambda: codegen.load_import_from("functools", "wraps") + ) + codegen(self.wrapped_reconstructible) + codegen.extend_output(create_call_function(1, False)) + codegen.extend_output(create_rot_n(2)) + codegen.extend_output(create_call_function(1, True)) + + +class SkipFunctionVariable(VariableTracker): + _nonvar_fields = { + "value", + "reason", + *VariableTracker._nonvar_fields, + } + + def __init__(self, value, reason=None, **kwargs) -> None: + super().__init__(**kwargs) + self.value = value + self.reason = reason + + def as_python_constant(self): + return self.value + + @classmethod + def create_with_source(cls, value, source): + if not is_wrapper_or_member_descriptor(value): + # These descriptors are not guaranteed to return the same object on + # attribute lookup. They are unlikely to be changed, so we can skip + # guarding them. + install_guard(source.make_guard(GuardBuilder.FUNCTION_MATCH)) + return cls(value, source=source) + + @staticmethod + @functools.lru_cache(None) + def fold_through_function_to_wrapper(): + return { + collections.namedtuple: variables.UserDefinedClassVariable, + } + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if inspect.getattr_static(self.value, "_torchdynamo_disable", False): + unimplemented(f"call torch._dynamo.disable() wrapped function {self.value}") + # Fold through the functions(e.g, collections.namedtuple) + # that inputs & outputs are all python constants + elif ( + self.value in self.fold_through_function_to_wrapper().keys() + and check_constant_args(args, kwargs) + ): + value = self.value( + *[x.as_python_constant() for x in args], + **{k: v.as_python_constant() for k, v in kwargs.items()}, + ) + return self.fold_through_function_to_wrapper().get(self.value)( + value, mutable_local=MutableLocal() + ) + elif ( + self.value is functools.wraps + and not kwargs + and len(args) == 1 + and ( + args[0].source is not None or args[0].can_reconstruct(tx.output.root_tx) + ) + ): + + def wraps(fn): + if isinstance(fn, variables.NestedUserFunctionVariable): + if args[0].source: + reconstructible = args[0].source + else: + reconstructible = args[0] + return fn.clone(wrapped_reconstructible=reconstructible) + unimplemented(f"functools.wraps({fn})") + + return variables.LambdaVariable(wraps) + else: + try: + path = inspect.getfile(self.value) + msg = f"'skip function {self.value.__qualname__} in file {path}'" + except TypeError: + known_python_builtin_modules = {"_abc", "_warnings"} + if self.value.__module__ in known_python_builtin_modules: + msg = ( + f"Graph break due to unsupported Python builtin {self.value.__module__}.{self.value.__qualname__}. " + f"Please file an issue on GitHub " + f"so the PyTorch team can add support for it. " + ) + elif ( + self.value.__module__ is not None + and self.value.__module__.startswith("optree") + ): + msg = ( + f"Graph break for an optree C/C++ function {self.value.__module__}.{self.value.__qualname__}." + f" Consider using torch.utils._pytree - " + f"https://github.com/pytorch/pytorch/blob/main/torch/utils/_pytree.py" + ) + # also warn on it because most users won't see the graph break message + torch._dynamo.utils.warn_once(msg) + else: + msg = ( + f"Graph break due to unsupported builtin {self.value.__module__}.{self.value.__qualname__}. " + f"This function is either a Python builtin (e.g. _warnings.warn) " + f"or a third-party C/C++ Python extension (perhaps created with pybind). " + f"If it is a Python builtin, please file an issue on GitHub " + f"so the PyTorch team can add support for it and see the next case for a workaround. " + f"If it is a third-party C/C++ Python extension, please " + f"either wrap it into a PyTorch-understood custom operator " + f"(see https://pytorch.org/tutorials/advanced/custom_ops_landing_page.html " + f"for more details) or, if it is traceable, use " + f"torch.compiler.allow_in_graph." + ) + # also warn on it because most users won't see the graph break message + torch._dynamo.utils.warn_once(msg) + msg += f"', {self.reason}'" if self.reason else "" + unimplemented(msg) + + +class WrapperUserFunctionVariable(VariableTracker): + """ + Used to represent a wrapper object that contains the actual callable as an + attribute. For example, torch.jit.script/trace have the original function at + their _torchdynamo_inline attribute. Similarly, functions with + __script_if_tracing_wrapper have the original attr at "__original_fn". + """ + + def __init__(self, wrapper_obj, attr_to_trace, **kwargs) -> None: + super().__init__(**kwargs) + self.wrapper_obj = wrapper_obj + self.attr_to_trace = attr_to_trace + + def var_getattr(self, tx: "InstructionTranslator", name): + if name == self.attr_to_trace: + val = getattr(self.wrapper_obj, self.attr_to_trace) + if self.source: + from .builder import VariableBuilder + + return VariableBuilder(tx, AttrSource(self.source, name))(val) + else: + from .builder import SourcelessBuilder + + return SourcelessBuilder.create(tx, val) + + return super().var_getattr(tx, name) + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + return variables.UserFunctionVariable( + polyfills.getattr_and_trace + ).call_function( + tx, [self, variables.ConstantVariable(self.attr_to_trace), *args], kwargs + ) + + +def _traceable_collective_remaps(): + # We can't rely on importing from distributed, since it's not always built + if torch.distributed.is_available(): + from torch.distributed._functional_collectives import ( + traceable_collective_remaps, + ) + + return traceable_collective_remaps + return {} + + +def _traceable_collectives_source(tx: "InstructionTranslator", fn): + assert torch.distributed.is_available(), "Illegal invocation." + assert fn in _traceable_collective_remaps().values() + + inner_name = fn.__name__ + path_source = tx.import_source("torch.distributed._functional_collectives") + return AttrSource(path_source, inner_name) + + +class CollectiveFunctionRewriteVariable(UserFunctionVariable): + """ + Some of the torch.distributed.* collective APIs are possible to rewrite to 'traceable' collectives. + + This class provides both a way to check if a function is remappable, and perform the remapping. + + In the case that a function is 'remappable' but only for some combinations of call-time arguments, + we check the args at `call_function` time and fall back to graph-breaking if needed. This is no worse + than status-quo as we currently graph-break on all distributed.* collectives. + """ + + def __init__(self, fn, *, replacement_var, **kwargs) -> None: + super().__init__(fn, **kwargs) + assert isinstance(replacement_var, UserFunctionVariable) + self.replacement_var = replacement_var + + @staticmethod + def create(tx: "InstructionTranslator", old_fn, source, **options): + new_fn, new_source = CollectiveFunctionRewriteVariable.rewrite(tx, old_fn) + return CollectiveFunctionRewriteVariable( + old_fn, + replacement_var=UserFunctionVariable(new_fn, source=new_source, **options), + source=source, + **options, + ) + + @staticmethod + def can_rewrite(variable): + return ( + inspect.isfunction(variable) and variable in _traceable_collective_remaps() + ) + + @staticmethod + def rewrite(tx: "InstructionTranslator", fn): + new_fn = _traceable_collective_remaps()[fn] + return new_fn, _traceable_collectives_source(tx, new_fn) + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + # call_function must check any unsupported arguments and graph-break. + # It's safe to assume args/kwargs from orig_fn map 1:1 to args/kwargs of remapped_fn, + # since that's the contract for putting a mapping in `traceable_collective_remaps` + import torch.distributed as dist + from torch.distributed._functional_collectives import REDUCE_OP_TO_STR + + # Merge args into kwargs so positional and keyword args + # can be processed the same way. + signature = inspect.signature(self.fn) + kwargs = dict(signature.bind(*args, **kwargs).arguments) + args = () + + if "async_op" in kwargs and kwargs["async_op"].as_python_constant(): + unimplemented( + f"CollectiveFunctionRewriteVariable can't support async_op=True for {self.fn}" + ) + + if self.fn in ( + dist.all_reduce, + dist.reduce_scatter_tensor, + dist._reduce_scatter_base, + ): + reduce_op_var = kwargs.get("op") + reduce_op = ( + reduce_op_var.value + if reduce_op_var is not None + else signature.parameters["op"].default + ) + if reduce_op not in REDUCE_OP_TO_STR: + raise ValueError(f"Unsupported all_reduce op: {reduce_op}") + kwargs["op"] = variables.ConstantVariable.create( + REDUCE_OP_TO_STR[reduce_op] + ) + return self.replacement_var.call_function(tx, args, kwargs) + + +class FunctoolsPartialVariable(VariableTracker): + def __init__(self, func: VariableTracker, args, keywords, **kwargs) -> None: + super().__init__(**kwargs) + self.func = func + assert isinstance(args, list) + self.args = args + assert isinstance(keywords, dict) + self.keywords = keywords + + def reconstruct(self, codegen): + codegen.add_push_null(lambda: codegen.load_import_from("functools", "partial")) + codegen(self.func) + if self.args: + codegen.foreach(self.args) + if not self.keywords: + codegen.extend_output(create_call_function(len(self.args) + 1, False)) + return + + codegen.foreach(self.keywords.values()) + keys = tuple(self.keywords.keys()) + codegen.extend_output( + codegen.create_call_function_kw(len(keys) + len(self.args) + 1, keys, False) + ) + + def get_function(self): + return self.as_python_constant() + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + merged_args = self.args + args + merged_kwargs = {**self.keywords, **kwargs} + return self.func.call_function(tx, merged_args, merged_kwargs) + + def call_hasattr(self, tx: "InstructionTranslator", name: str) -> VariableTracker: + # functools.partial uses slots, so attributes are constant + return variables.ConstantVariable.create( + hasattr(functools.partial(identity), name) + ) + + def as_python_constant(self): + return functools.partial( + self.func.as_python_constant(), + *[arg.as_python_constant() for arg in self.args], + **{k: v.as_python_constant() for k, v in self.keywords.items()}, + ) + + def guard_as_python_constant(self): + """Similar to as_python_constant(), but add ID_MATCH guards to try to force things to become constants""" + return functools.partial( + self.func.guard_as_python_constant(), + *[v.guard_as_python_constant() for v in self.args], + **{k: v.guard_as_python_constant() for k, v in self.keywords.items()}, + ) + + +class PolyfilledFunctionVariable(VariableTracker): + _nonvar_fields = { + "fn", + "wrapped_fn", + "traceable_fn", + *VariableTracker._nonvar_fields, + } + + @classmethod + @functools.lru_cache(None) + def _get_polyfill_handlers(cls) -> Dict[Callable[..., Any], types.FunctionType]: + return {} + + @classmethod + def create_with_source(cls, value, source): + install_guard(source.make_guard(GuardBuilder.FUNCTION_MATCH)) + + return cls(value, source=source) + + def __init__(self, fn: _F, **kwargs) -> None: + super().__init__(**kwargs) + self.fn: _F = fn + + handler = self._get_polyfill_handlers().get(fn, fn) + assert callable(handler), f"Polyfill handler {handler} is not callable for {fn}" + for candidate_attr in ( + "__torch_dynamo_polyfill__", # registered polyfill + "__python_implementation__", # self handler from third-party libraries + ): + candidate = getattr(handler, candidate_attr, None) + if candidate: + assert callable(candidate) + traceable_fn = candidate + break + else: + raise RuntimeError( + f"Polyfill handler {handler} does not have a traceable function" + ) + + self.wrapped_fn: _F = handler + self.traceable_fn: _F = traceable_fn + + @property + def polyfill_fn(self) -> _F: + return self.traceable_fn + + def can_constant_fold_through(self): + return getattr( + self.wrapped_fn, "__torch_dynamo_can_constant_fold_through__", False + ) + + def get_function(self): + return self.as_python_constant() + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + from torch._dynamo.variables.builder import SourcelessBuilder + + if self.can_constant_fold_through() and check_unspec_or_constant_args( + args, kwargs + ): + result = ( + self.fn( # use the original function which is faster than the polyfill + *[x.as_python_constant() for x in args], + **{k: v.as_python_constant() for k, v in kwargs.items()}, + ) + ) + return SourcelessBuilder.create(tx, result) + + traceable_function_variable = SourcelessBuilder.create(tx, self.traceable_fn) + return traceable_function_variable.call_function(tx, args, kwargs) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if name == "__call__": + return self.call_function(tx, args, kwargs) + + method = getattr(self.fn, name, None) + assert method is not None, f"Member {name} not found in {self.fn}" + assert is_function(method), f"Member {name} is not callable in {self.fn}" + options = {} + if self.source: + options["source"] = AttrSource(self.source, name) + polyfilled_method_variable = PolyfilledFunctionVariable(method, **options) + return polyfilled_method_variable.call_function(tx, args, kwargs) + + def as_python_constant(self): + return self.fn + + +from torch._higher_order_ops.triton_kernel_wrap import TritonHOPifier + + +class DynamoTritonHOPifier(TritonHOPifier): + def raise_unsupported(self, msg): + raise Unsupported(msg) + + def is_callable(self, maybe_callable): + return isinstance( + maybe_callable, (NestedUserFunctionVariable, UserFunctionVariable) + ) + + def get_value(self, val): + return val.value + + def check_grid(self, grid): + from .lists import BaseListVariable + + if isinstance(grid, BaseListVariable): + return grid.as_proxy() + else: + unimplemented(f"grid for the triton kernel is {type(grid)}") + + def call_grid(self, grid, meta, tx): + meta = {variables.ConstantVariable.create(k): v for k, v in meta.items()} + grid = grid.call_function(tx, [meta], {}) + return grid + + def call_HOP(self, variable, grids, combined_args_raw, tx): + from .constant import ConstantVariable + from .dicts import ConstDictVariable + + combined_args = { + variables.ConstantVariable.create(k): v + for k, v in combined_args_raw.items() + } + + from torch._higher_order_ops.triton_kernel_wrap import ( + kernel_side_table, + triton_kernel_wrapper_mutation, + ) + + # Combine args and kwargs and pass as a dict so that if user defined triton + # kernel uses variables as 'grid' or 'kernel', it does not conflict with + # parameters of the wrapper function + constant_args = { + k: v.as_python_constant() + for k, v in combined_args_raw.items() + if isinstance(v, ConstantVariable) + } + non_constant_args = { + k: v + for k, v in combined_args.items() + if not isinstance(v, ConstantVariable) + } + + constant_args_idx = kernel_side_table.add_constant_args(constant_args) + meta = ConstDictVariable(non_constant_args, dict) + tx.output.create_proxy( + "call_function", + triton_kernel_wrapper_mutation, + (), + { + "kernel_idx": variable.kernel_idx, + "constant_args_idx": constant_args_idx, + "grid": grids, + "kwargs": meta.as_proxy(), + }, + ) + + return variables.ConstantVariable( + None, + ) + + +dynamo_triton_hopifier_singleton = DynamoTritonHOPifier() + + +class TritonKernelVariable(VariableTracker): + def __init__(self, kernel, kernel_idx, grid, **kwargs) -> None: + super().__init__(**kwargs) + dynamo_triton_hopifier_singleton.init_variable(self, kernel, kernel_idx, grid) + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + return dynamo_triton_hopifier_singleton.call_triton_kernel( + self, args, kwargs, tx + ) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if name == "__getitem__": + return dynamo_triton_hopifier_singleton.call_getitem(self, args) + elif name == "run": + return dynamo_triton_hopifier_singleton.call_run(self, args, kwargs, tx) + + # Bail out to parent's implementation + return super().call_method(tx, name, args, kwargs) diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/iter.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/iter.py new file mode 100644 index 0000000000000000000000000000000000000000..1f8dac8811f53a451a651cbc189003f7084500bb --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/iter.py @@ -0,0 +1,475 @@ +# mypy: ignore-errors + +import itertools +import operator +import sys +from typing import Dict, List, Optional, TYPE_CHECKING, Union + +from .. import polyfills, variables +from ..bytecode_transformation import create_call_function, create_instruction +from ..exc import ( + handle_observed_exception, + ObservedUserStopIteration, + raise_observed_exception, + unimplemented, + UserError, +) +from .base import MutableLocal, VariableTracker +from .constant import ConstantVariable + + +if TYPE_CHECKING: + from torch._dynamo.symbolic_convert import InstructionTranslator + + +MAX_ITERATOR_LIMIT = 100 * 1024 # 100k + + +class ItertoolsVariable(VariableTracker): + def __init__(self, value, **kwargs) -> None: + super().__init__(**kwargs) + self.value = value + + def __repr__(self) -> str: + return f"ItertoolsVariable({self.value})" + + def as_python_constant(self): + return self.value + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if ( + self.value is itertools.product + and not kwargs + and all(arg.has_unpack_var_sequence(tx) for arg in args) + ): + seqs = [arg.unpack_var_sequence(tx) for arg in args] + items = [] + for item in itertools.product(*seqs): + items.append(variables.TupleVariable(list(item))) + return variables.ListIteratorVariable(items, mutable_local=MutableLocal()) + elif self.value is itertools.accumulate: + from .builtin import BuiltinVariable + + if any(key not in ["initial", "func"] for key in kwargs.keys()): + unimplemented( + "Unsupported kwargs for itertools.accumulate: " + f"{','.join(set(kwargs.keys()) - {'initial', 'func'})}" + ) + + acc = kwargs.get("initial") + + if len(args) in [1, 2] and args[0].has_unpack_var_sequence(tx): + seq = args[0].unpack_var_sequence(tx) + + if "func" in kwargs and len(args) == 1: + func = kwargs["func"].call_function + elif len(args) == 2: + func = args[1].call_function + elif len(args) == 1: + # Default to operator.add + func = BuiltinVariable(operator.add).call_function + else: + unimplemented( + "itertools.accumulate can only accept one of: `func` kwarg, pos 2 arg" + ) + else: + unimplemented("Unsupported arguments for itertools.accumulate") + + items = [] + if acc is not None: + items.append(acc) + for item in seq: + if acc is None: + acc = item + else: + try: + acc = func(tx, [acc, item], {}) + except Exception as e: + unimplemented( + f"Unexpected failure in invoking function during accumulate. Failed running func {func}({item}{acc})", + from_exc=e, + ) + items.append(acc) + + return variables.ListIteratorVariable(items, mutable_local=MutableLocal()) + elif ( + self.value is itertools.combinations + and not kwargs + and len(args) == 2 + and args[0].has_unpack_var_sequence(tx) + and args[1].is_python_constant() + ): + iterable = args[0].unpack_var_sequence(tx) + r = args[1].as_python_constant() + + items = [] + for item in itertools.combinations(iterable, r): + items.append(variables.TupleVariable(list(item))) + return variables.ListIteratorVariable(items, mutable_local=MutableLocal()) + elif self.value is itertools.groupby: + if any(kw != "key" for kw in kwargs.keys()): + unimplemented( + "Unsupported kwargs for itertools.groupby: " + f"{','.join(set(kwargs.keys()) - {'key'})}" + ) + + def retrieve_const_key(key): + if isinstance(key, variables.SymNodeVariable): + return key.evaluate_expr() + elif isinstance(key, variables.ConstantVariable): + return key.as_python_constant() + else: + unimplemented( + "Unsupported key type for itertools.groupby: " + str(type(key)) + ) + + if len(args) == 1 and args[0].has_unpack_var_sequence(tx): + seq = args[0].unpack_var_sequence(tx) + keyfunc = ( + ( + lambda x: ( + retrieve_const_key( + kwargs.get("key").call_function(tx, [x], {}) + ) + ) + ) + if "key" in kwargs + else None + ) + else: + unimplemented("Unsupported arguments for itertools.groupby") + + result = [] + try: + for k, v in itertools.groupby(seq, key=keyfunc): + result.append( + variables.TupleVariable( + [ + variables.ConstantVariable.create(k) + if variables.ConstantVariable.is_literal(k) + else k, + variables.ListIteratorVariable( + list(v), mutable_local=MutableLocal() + ), + ], + mutable_local=MutableLocal(), + ) + ) + except Exception as e: + unimplemented( + "Unexpected failure when calling itertools.groupby", + from_exc=e, + ) + return variables.ListIteratorVariable(result, mutable_local=MutableLocal()) + elif self.value is itertools.repeat: + if len(args) < 2: + return variables.RepeatIteratorVariable( + *args, mutable_local=MutableLocal() + ) + + from .builder import SourcelessBuilder + + return tx.inline_user_function_return( + SourcelessBuilder.create(tx, polyfills.repeat), args, kwargs + ) + elif self.value is itertools.count: + return variables.CountIteratorVariable(*args, mutable_local=MutableLocal()) + elif self.value is itertools.cycle: + return variables.CycleIteratorVariable(*args, mutable_local=MutableLocal()) + elif self.value is itertools.dropwhile: + return variables.UserFunctionVariable(polyfills.dropwhile).call_function( + tx, args, kwargs + ) + elif self.value is itertools.zip_longest: + return variables.UserFunctionVariable(polyfills.zip_longest).call_function( + tx, args, kwargs + ) + else: + return super().call_function(tx, args, kwargs) + + +class IteratorVariable(VariableTracker): + def __init__(self, **kwargs) -> None: + super().__init__(**kwargs) + + def next_variable(self, tx): + unimplemented("abstract method, must implement") + + # NOTE: only call when unpacking this iterator safely done eagerly! + # Normally, iterators are accessed lazily. + # Example of safe eager unpacking: list(map(f, seq)) + # Example of unsafe eager unpacking: list(islice(map(f, seq), 5)) + def force_unpack_var_sequence(self, tx) -> List[VariableTracker]: + result = [] + while True: + try: + result.append(self.next_variable(tx)) + except ObservedUserStopIteration: + handle_observed_exception(tx) + break + return result + + # don't call force_unpack_var_sequence since it can mutate + # IteratorVariable state! + def has_force_unpack_var_sequence(self, tx) -> bool: + return True + + +class RepeatIteratorVariable(IteratorVariable): + def __init__(self, item: VariableTracker, **kwargs) -> None: + super().__init__(**kwargs) + self.item = item + + # Repeat needs no mutation, clone self + def next_variable(self, tx): + return self.item + + def reconstruct(self, codegen): + codegen.add_push_null( + lambda: codegen.extend_output( + [ + codegen.create_load_python_module(itertools), + codegen.create_load_attr("repeat"), + ] + ) + ) + codegen(self.item) + codegen.extend_output(create_call_function(1, False)) + + +class CountIteratorVariable(IteratorVariable): + def __init__(self, item: int = 0, step: int = 1, **kwargs) -> None: + super().__init__(**kwargs) + if not isinstance(item, VariableTracker): + item = ConstantVariable.create(item) + if not isinstance(step, VariableTracker): + step = ConstantVariable.create(step) + self.item = item + self.step = step + + def next_variable(self, tx): + assert self.mutable_local + old_item = self.item + tx.output.side_effects.mutation(self) + self.item = self.item.call_method(tx, "__add__", [self.step], {}) + return old_item + + def reconstruct(self, codegen): + codegen.add_push_null( + lambda: codegen.extend_output( + [ + codegen.create_load_python_module(itertools), + codegen.create_load_attr("count"), + ] + ) + ) + codegen(self.item) + codegen(self.step) + codegen.extend_output(create_call_function(2, False)) + + +class CycleIteratorVariable(IteratorVariable): + def __init__( + self, + iterator: IteratorVariable, + saved: List[VariableTracker] = None, + saved_index: int = 0, + item: Optional[VariableTracker] = None, + **kwargs, + ) -> None: + if saved is None: + saved = [] + super().__init__(**kwargs) + self.iterator = iterator + self.saved = saved + self.saved_index = saved_index + self.item = item + + def next_variable(self, tx): + assert self.mutable_local + + if self.iterator is not None: + try: + new_item = self.iterator.next_variable(tx) + if len(self.saved) > MAX_ITERATOR_LIMIT: + unimplemented( + "input iterator to itertools.cycle has too many items" + ) + tx.output.side_effects.mutation(self) + self.saved.append(new_item) + self.item = new_item + if self.item is None: + return self.next_variable(tx) + return self.item + except ObservedUserStopIteration: + handle_observed_exception(tx) + self.iterator = None + return self.next_variable(tx) + elif len(self.saved) > 0: + tx.output.side_effects.mutation(self) + self.saved_index = (self.saved_index + 1) % len(self.saved) + return self.item + else: + raise_observed_exception(StopIteration, tx, self) + + +class ZipVariable(IteratorVariable): + """ + Represents zip(*iterables) + """ + + _nonvar_fields = { + "index", + "strict", + *IteratorVariable._nonvar_fields, + } + + def __init__( + self, + iterables: List[Union[List[VariableTracker], VariableTracker]], + strict: bool = False, + **kwargs, + ) -> None: + super().__init__(**kwargs) + assert isinstance(iterables, list) + # can be list[Variable] or VariableTracker (with next_variable implemented) + self.iterables = iterables + self.index = 0 + self.strict = strict + + def python_type(self): + return zip + + def has_unpack_var_sequence(self, tx) -> bool: + return all( + isinstance(it, list) or it.has_unpack_var_sequence(tx) + for it in self.iterables + ) + + def unpack_var_sequence(self, tx) -> List["VariableTracker"]: + assert self.has_unpack_var_sequence(tx) + iterables = [] + for it in self.iterables: + if isinstance(it, list): + iterables.append(it[self.index :]) + else: + iterables.append(it.unpack_var_sequence(tx)) + kwargs = {"strict": self.strict} if self.strict else {} + zipped = zip(*iterables, **kwargs) + return [variables.TupleVariable(list(var)) for var in zipped] + + def next_variable(self, tx): + assert self.mutable_local + old_index = self.index + args = [] + + def get_item(it): + if isinstance(it, list): + if old_index >= len(it): + raise_observed_exception(StopIteration, tx, self) + return it[old_index] + else: + return it.next_variable(tx) + + try: + for idx, it in enumerate(self.iterables): + args.append(get_item(it)) + except ObservedUserStopIteration: + if self.strict: + if idx == 0: + # all other iterables should be exhausted + for it in self.iterables: + try: + get_item(it) + except ObservedUserStopIteration: + handle_observed_exception(tx) + continue + # no ObservedUserStopIteration - fall through to UserError + break + else: + # all iterables exhausted, raise original error + raise + handle_observed_exception(tx) + raise UserError( + ValueError, + "zip() has one argument of len differing from others", + ) from None + raise + + tx.output.side_effects.mutation(self) + self.index += 1 + return variables.TupleVariable(args) + + def reconstruct_items(self, codegen): + for it in self.iterables: + if isinstance(it, list): + remaining_items = it[self.index :] + codegen.foreach(remaining_items) + codegen.append_output( + create_instruction("BUILD_TUPLE", arg=len(remaining_items)) + ) + else: + codegen(it) + + def reconstruct(self, codegen): + codegen.add_push_null( + lambda: codegen.load_import_from("builtins", "zip"), call_function_ex=True + ) + self.reconstruct_items(codegen) + codegen.append_output( + create_instruction("BUILD_TUPLE", arg=len(self.iterables)) + ) + if sys.version_info >= (3, 10): + codegen.extend_output( + [ + codegen.create_load_const("strict"), + codegen.create_load_const(self.strict), + create_instruction("BUILD_MAP", arg=1), + create_instruction("CALL_FUNCTION_EX", arg=1), + ] + ) + else: + codegen.append_output(create_instruction("CALL_FUNCTION_EX", arg=0)) + + +class MapVariable(ZipVariable): + """ + Represents map(fn, *iterables) + """ + + def __init__( + self, + fn: VariableTracker, + iterables: List[Union[List[VariableTracker], VariableTracker]], + **kwargs, + ) -> None: + super().__init__(iterables, **kwargs) + self.fn = fn + + def python_type(self): + return map + + def has_unpack_var_sequence(self, tx) -> bool: + return False + + def next_variable(self, tx): + args = super().next_variable(tx) + return self.fn.call_function(tx, args.items, {}) + + def reconstruct(self, codegen): + codegen.add_push_null( + lambda: codegen.load_import_from("builtins", "map"), call_function_ex=True + ) + codegen(self.fn) + self.reconstruct_items(codegen) + codegen.extend_output( + [ + create_instruction("BUILD_TUPLE", arg=len(self.iterables) + 1), + create_instruction("CALL_FUNCTION_EX", arg=0), + ] + ) diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/lists.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/lists.py new file mode 100644 index 0000000000000000000000000000000000000000..30916e0b6996987552fb514f345476a0bfd5a088 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/lists.py @@ -0,0 +1,1010 @@ +# mypy: ignore-errors + +import collections +import functools +import inspect +import operator +import types +from typing import Dict, List, Optional, TYPE_CHECKING + +import torch +import torch.fx +from torch._guards import Source + +from .. import polyfills, variables +from ..bytecode_transformation import create_call_function, create_instruction +from ..exc import raise_observed_exception, unimplemented +from ..source import AttrSource +from ..utils import ( + get_fake_value, + guard_if_dyn, + is_namedtuple, + istype, + iter_contains, + Lit, + namedtuple_fields, + odict_values, + set_example_value, +) +from .base import MutableLocal, VariableTracker +from .constant import ConstantVariable +from .functions import UserFunctionVariable, UserMethodVariable +from .iter import IteratorVariable + + +if TYPE_CHECKING: + from torch._dynamo.symbolic_convert import InstructionTranslator + + +class BaseListVariable(VariableTracker): + @staticmethod + def cls_for_instance(obj): + if is_namedtuple(obj): + return functools.partial(NamedTupleVariable, tuple_cls=type(obj)) + return BaseListVariable.cls_for(type(obj)) + + @staticmethod + def cls_for(obj): + return { + iter: ListIteratorVariable, + list: ListVariable, + slice: SliceVariable, + torch.Size: SizeVariable, + tuple: TupleVariable, + odict_values: ListVariable, + torch.nn.ParameterList: ListVariable, + torch.nn.ModuleList: ListVariable, + collections.deque: DequeVariable, + }[obj] + + def __init__( + self, + items: List[VariableTracker], + **kwargs, + ) -> None: + super().__init__(**kwargs) + assert isinstance(items, list) + assert all(isinstance(x, VariableTracker) for x in items) + self.items: List[VariableTracker] = items + + def _as_proxy(self): + return [x.as_proxy() for x in self.items] + + def modified(self, items, **kwargs): + return type(self)(items, **kwargs) + + @property + def value(self): + return self.as_python_constant() + + def debug_repr_helper(self, prefix, suffix): + return prefix + ", ".join(i.debug_repr() for i in self.items) + suffix + + def as_python_constant(self): + return self.python_type()([x.as_python_constant() for x in self.items]) + + def as_proxy(self): + assert self.python_type() is not SizeVariable + return self.python_type()(self._as_proxy()) + + def getitem_const(self, tx: "InstructionTranslator", arg: VariableTracker): + from .tensor import SymNodeVariable + + if isinstance(arg, SymNodeVariable): + index = arg.sym_num + else: + index = arg.as_python_constant() + + if isinstance(index, slice): + # Set source to None because slicing a list gives a new local + return self.clone( + items=self.items[index], + source=None, + mutable_local=MutableLocal() if self.mutable_local else None, + ) + else: + assert isinstance(index, (int, torch.SymInt)) + return self.items[index] + + def unpack_var_sequence(self, tx): + return list(self.items) + + def call_method( + self, + tx, + name, + args: List["VariableTracker"], + kwargs: Dict[str, "VariableTracker"], + ) -> "VariableTracker": + if name == "__getitem__": + from .tensor import TensorVariable + + assert not kwargs and len(args) == 1 + if isinstance(args[0], TensorVariable): + value = get_fake_value(args[0].as_proxy().node, tx) + if value.constant is not None and value.constant.numel() == 1: + value = variables.ConstantVariable.create(value.constant.item()) + else: + unimplemented("__getitem__ with non-constant tensor") + else: + value = args[0] + return self.getitem_const(tx, value) + elif name == "__contains__": + assert len(args) == 1 + assert not kwargs + return iter_contains(self.unpack_var_sequence(tx), args[0], tx) + elif name == "index": + from .builder import SourcelessBuilder + + return tx.inline_user_function_return( + SourcelessBuilder.create(tx, polyfills.index), + [self] + list(args), + kwargs, + ) + + return super().call_method(tx, name, args, kwargs) + + @staticmethod + def list_compare(tx: "InstructionTranslator", op, left, right): + return variables.UserFunctionVariable(polyfills.list_cmp).call_function( + tx, [variables.BuiltinVariable(op), left, right], {} + ) + + +class RangeVariable(BaseListVariable): + def __init__(self, items, **kwargs) -> None: + items_to_map = items + start = variables.ConstantVariable.create(0) + stop = None + step = variables.ConstantVariable.create(1) + + if len(items_to_map) == 1: + (stop,) = items_to_map + elif len(items_to_map) == 2: + start, stop = items_to_map + elif len(items_to_map) == 3: + start, stop, step = items_to_map + else: + raise AssertionError + + assert stop is not None + super().__init__([start, stop, step], **kwargs) + + def debug_repr(self): + return self.debug_repr_helper("range(", ")") + + def python_type(self): + return range + + def start(self): + return self.items[0].as_python_constant() + + def stop(self): + return self.items[1].as_python_constant() + + def step(self): + return self.items[2].as_python_constant() + + def range_length(self): + lo = self.start() + hi = self.stop() + step = self.step() + + assert step != 0 + if step > 0 and lo < hi: + return 1 + (hi - 1 - lo) // step + elif step < 0 and lo > hi: + return 1 + (lo - 1 - hi) // (0 - step) + else: + return 0 + + def _get_slice_indices(self, length, slice): + step_is_negative = 0 + + if slice.step is None: + step = 1 + step_is_negative = False + else: + step = slice.step + step_is_negative = slice.step < 0 + + # Find lower and upper bounds for start and stop. + if step_is_negative: + lower = -1 + upper = length + lower + else: + lower = 0 + upper = length + + # Compute start + if slice.start is None: + start = upper if step_is_negative else lower + else: + start = slice.start + + if start < 0: + start += length + if start < lower: + start = lower + else: + if start > upper: + start = upper + + # Compute stop. + if slice.stop is None: + stop = lower if step_is_negative else upper + + else: + stop = slice.stop + + if stop < 0: + stop += length + if stop < lower: + stop = lower + else: + if stop > upper: + stop = upper + + return [start, stop, step] + + def apply_index(self, index): + length = self.range_length() + if index < 0: + index = length + index + + if index < 0 or index >= length: + raise IndexError(f"index {index} is out of range") + + return variables.ConstantVariable.create(self.start() + (index * self.step())) + + def apply_slice(self, slice): + (slice_start, slice_stop, slice_step) = self._get_slice_indices( + self.range_length(), slice + ) + + def compute_item(index): + return self.start() + (index * self.step()) + + sub_step = self.step() * slice_step + sub_start = compute_item(slice_start) + sub_stop = compute_item(slice_stop) + + result = RangeVariable( + [ + variables.ConstantVariable.create(x) + for x in [sub_start, sub_stop, sub_step] + ], + mutable_local=MutableLocal() if self.mutable_local else None, + ) + return result + + def as_python_constant(self): + return range(*[x.as_python_constant() for x in self.items]) + + def getitem_const(self, tx: "InstructionTranslator", arg: VariableTracker): + # implementations mimics https://github.com/python/cpython/blob/main/Objects/rangeobject.c + index = arg.as_python_constant() + + if isinstance(index, slice): + return self.apply_slice(index) + else: + return self.apply_index(index) + + def as_proxy(self): + return self.python_type()(*self._as_proxy()) + + def unpack_var_sequence(self, tx=None): + return [variables.ConstantVariable.create(x) for x in self.as_python_constant()] + + def reconstruct(self, codegen): + assert "range" not in codegen.tx.f_globals + codegen.add_push_null( + lambda: codegen.append_output(codegen.create_load_python_module(range)) + ) + codegen.foreach(self.items) + codegen.extend_output(create_call_function(3, False)) + + def var_getattr(self, tx: "InstructionTranslator", name): + fields = ["start", "stop", "step"] + if name not in fields: + unimplemented(f"range.{name}") + return self.items[fields.index(name)] + + +class CommonListMethodsVariable(BaseListVariable): + """ + Implement methods common to List and other List-like things + """ + + def call_method( + self, + tx, + name, + args: List["VariableTracker"], + kwargs: Dict[str, "VariableTracker"], + ) -> "VariableTracker": + from .tensor import SymNodeVariable + + if name == "append" and self.mutable_local: + assert not kwargs + (arg,) = args + tx.output.side_effects.mutation(self) + self.items.append(arg) + return ConstantVariable.create(None) + elif ( + name == "extend" + and self.mutable_local + and args + and args[0].has_force_unpack_var_sequence(tx) + ): + assert not kwargs + (arg,) = args + seq = arg.force_unpack_var_sequence(tx) + tx.output.side_effects.mutation(self) + self.items.extend(seq) + return ConstantVariable.create(None) + elif name == "insert" and self.mutable_local: + assert not kwargs + idx, value = args + if isinstance(idx, SymNodeVariable): + const_idx = idx.evaluate_expr() + else: + const_idx = idx.as_python_constant() + tx.output.side_effects.mutation(self) + self.items.insert(const_idx, value) + return ConstantVariable.create(None) + elif name == "pop" and self.mutable_local: + assert not kwargs + tx.output.side_effects.mutation(self) + return self.items.pop(*[a.as_python_constant() for a in args]) + elif name == "clear" and self.mutable_local: + assert not kwargs and not args + tx.output.side_effects.mutation(self) + self.items.clear() + return ConstantVariable.create(None) + elif ( + name == "__setitem__" + and self.mutable_local + and args + and args[0].is_python_constant() + ): + assert not kwargs + key, value = args + tx.output.side_effects.mutation(self) + if isinstance(key, SliceVariable): + self.items[key.as_python_constant()] = list(value.items) + else: + self.items[key.as_python_constant()] = value + return ConstantVariable.create(None) + elif name == "copy": + # List copy() doesn't have args and kwargs + assert not kwargs + assert not args + items = list(self.items) + return self.modified(items, mutable_local=MutableLocal()) + elif name == "reverse" and self.mutable_local: + assert not kwargs + assert not args + self.items.reverse() + tx.output.side_effects.mutation(self) + return ConstantVariable.create(None) + else: + return super().call_method(tx, name, args, kwargs) + + +class ListVariable(CommonListMethodsVariable): + def python_type(self): + return list + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(length={len(self.items)})" + + def debug_repr(self): + return self.debug_repr_helper("[", "]") + + def reconstruct(self, codegen): + codegen.foreach(self.items) + codegen.append_output(create_instruction("BUILD_LIST", arg=len(self.items))) + + def call_method( + self, + tx, + name, + args: List["VariableTracker"], + kwargs: Dict[str, "VariableTracker"], + ) -> "VariableTracker": + if ( + name == "__setitem__" + and self.mutable_local + and args + and args[0].is_python_constant() + ): + assert not kwargs + key, value = args + tx.output.side_effects.mutation(self) + if isinstance(key, SliceVariable): + if not value.has_force_unpack_var_sequence(tx): + unimplemented( + f"Missing dynamo support for expanding {value} into a list for slice assignment." + ) + self.items[key.as_python_constant()] = value.force_unpack_var_sequence( + tx + ) + else: + self.items[key.as_python_constant()] = value + return ConstantVariable.create(None) + else: + return super().call_method(tx, name, args, kwargs) + + def var_getattr(self, tx, name): + if name == "__class__": + source = AttrSource(self.source, name) if self.source else None + class_type = self.python_type() + if class_type is list: + return variables.BuiltinVariable(class_type, source=source) + else: + return variables.UserDefinedClassVariable(class_type, source=source) + return super().var_getattr(tx, name) + + def call_hasattr(self, tx: "InstructionTranslator", name: str) -> "VariableTracker": + if self.python_type() is not list: + return super().call_hasattr(tx, name) + return variables.ConstantVariable.create(hasattr([], name)) + + +class DequeVariable(CommonListMethodsVariable): + def python_type(self): + return collections.deque + + def debug_repr(self): + return self.debug_repr_helper("deque([", "])") + + def reconstruct(self, codegen): + assert "deque" not in codegen.tx.f_globals + codegen.add_push_null( + lambda: codegen.append_output( + codegen.create_load_python_module(collections.deque) + ) + ) + codegen.foreach(self.items) + codegen.extend_output( + [ + create_instruction("BUILD_LIST", arg=len(self.items)), + *create_call_function(1, False), + ] + ) + + def call_method( + self, + tx, + name, + args: List["VariableTracker"], + kwargs: Dict[str, "VariableTracker"], + ) -> "VariableTracker": + if ( + name == "__setitem__" + and self.mutable_local + and args + and args[0].is_python_constant() + ): + assert not kwargs + key, value = args + assert key.is_python_constant() and isinstance( + key.as_python_constant(), int + ) + tx.output.side_effects.mutation(self) + self.items[key.as_python_constant()] = value + return ConstantVariable.create(None) + elif ( + name == "extendleft" + and self.mutable_local + and args[0].has_force_unpack_var_sequence(tx) + ): + assert not kwargs + + (arg,) = args + prefix = arg.force_unpack_var_sequence(tx) + prefix.reverse() + tx.output.side_effects.mutation(self) + self.items = prefix + list(self.items) + return ConstantVariable.create(None) + elif name == "popleft" and self.mutable_local: + assert not args + assert not kwargs + item = self.items[0] + tx.output.side_effects.mutation(self) + self.items = self.items[1:] + return item + elif name == "appendleft" and self.mutable_local: + assert not kwargs + tx.output.side_effects.mutation(self) + self.items = [args[0]] + list(self.items) + return ConstantVariable.create(None) + else: + return super().call_method(tx, name, args, kwargs) + + +class TupleVariable(BaseListVariable): + def python_type(self): + return tuple + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(length={len(self.items)})" + + def debug_repr(self): + return self.debug_repr_helper("(", ")") + + def reconstruct(self, codegen): + codegen.foreach(self.items) + codegen.append_output(create_instruction("BUILD_TUPLE", arg=len(self.items))) + + def call_method( + self, + tx, + name, + args: List["VariableTracker"], + kwargs: Dict[str, "VariableTracker"], + ) -> "VariableTracker": + return super().call_method(tx, name, args, kwargs) + + def var_getattr(self, tx, name): + if name == "__class__": + source = AttrSource(self.source, name) if self.source else None + class_type = self.python_type() + if class_type is tuple: + return variables.BuiltinVariable(class_type, source=source) + else: + return variables.UserDefinedClassVariable(class_type, source=source) + return super().var_getattr(tx, name) + + def call_hasattr(self, tx: "InstructionTranslator", name: str) -> "VariableTracker": + if self.python_type() is not tuple: + return super().call_hasattr(tx, name) + return variables.ConstantVariable.create(hasattr((), name)) + + +class SizeVariable(TupleVariable): + """torch.Size(...)""" + + _nonvar_fields = { + "proxy", + *TupleVariable._nonvar_fields, + } + + def __init__( + self, + items: List[VariableTracker], + proxy: Optional[torch.fx.Proxy] = None, + **kwargs, + ) -> None: + self.proxy = proxy + super().__init__(items, **kwargs) + + def debug_repr(self): + return self.debug_repr_helper("torch.Size([", "])") + + def python_type(self): + return torch.Size + + def as_proxy(self): + if self.proxy is not None: + return self.proxy + + # torch.Size needs special handling. Normally, we pun a list-like + # container to directly contain Proxy/Node objects from FX, and FX + # knows to look inside containers (via map_aggregate). But torch.Size + # is weird; although it subclasses from tuple, it doesn't allow + # members which aren't int-like (rejecting Proxy and Node). This + # means we can't use the normal representation trick + # torch.Size([proxy0, proxy1]). I looked into seeing if I could + # relax torch.Size in PyTorch proper, but if torch.Size constructor + # sees a type that it doesn't recognize, it will try to call + # __index__() on it, so there is no BC way to actually change this + # behavior (though it occurs to me that I could have just added a + # YOLO no checking alternate constructor.) + # + # To work around this problem, I represent a torch.Size proxy as + # a straight up proxy, that would have been constructed by taking + # the constituent proxies as arguments. This trick can be generally + # used for any construct that we need a proxy for but we can't + # directly represent as an aggregate; I don't see very many examples + # of this in torchdynamo though! + + # Look for a proxy. If there are none, do the legacy behavior + tracer = None + proxies = self._as_proxy() + for proxy in proxies: + if isinstance(proxy, torch.fx.Proxy): + tracer = proxy.tracer + break + + if tracer is None: + return torch.Size(proxies) + + proxy = tracer.create_proxy("call_function", torch.Size, (proxies,), {}) + set_example_value( + proxy.node, + torch.Size( + [ + p.node.meta["example_value"] if not isinstance(p, int) else p + for p in proxies + ] + ), + ) + return proxy + + def reconstruct(self, codegen): + codegen.add_push_null(lambda: codegen.load_import_from("torch", "Size")) + codegen.foreach(self.items) + build_torch_size = [ + create_instruction("BUILD_TUPLE", arg=len(self.items)), + ] + create_call_function(1, False) + codegen.extend_output(build_torch_size) + + def unpack_var_sequence(self, tx): + return list(self.items) + + def numel(self, tx): + from .builtin import BuiltinVariable + from .tensor import SymNodeVariable + + const_result = 1 + sym_sizes = [] + + for v in self.items: + if isinstance(v, ConstantVariable): + const_result *= v.value + else: + assert isinstance(v, SymNodeVariable), type(v) + # Delay proxy calls until we know it will be necessary + sym_sizes.append(v) + + result = ConstantVariable.create(const_result) + if sym_sizes and const_result == 1: + # Skip multiplying by 1 + result, *sym_sizes = sym_sizes + + if not sym_sizes or const_result == 0: + return result + + mul = BuiltinVariable(operator.mul) + for v in sym_sizes: + result = mul.call_function(tx, [result, v], {}) + return result + + def call_method( + self, + tx, + name, + args: List["VariableTracker"], + kwargs: Dict[str, "VariableTracker"], + ) -> "VariableTracker": + if name == "__getitem__": + assert not kwargs and len(args) == 1 + out = self.get_item_dyn(tx, args[0]) + return out + elif name == "numel": + assert not args and not kwargs + return self.numel(tx) + + return super().call_method(tx, name, args, kwargs) + + def get_item_dyn(self, tx: "InstructionTranslator", arg: VariableTracker): + from .tensor import SymNodeVariable + + if isinstance(arg, SymNodeVariable): + index = arg.sym_num + else: + index = arg.as_python_constant() + if isinstance(index, slice): + return SizeVariable(self.items[index]) + else: + assert isinstance(index, (int, torch.SymInt)) + return self.items[index] + + def call_hasattr(self, tx: "InstructionTranslator", name: str) -> "VariableTracker": + return variables.ConstantVariable.create(hasattr(torch.Size, name)) + + +class NamedTupleVariable(TupleVariable): + _nonvar_fields = { + "tuple_cls", + *TupleVariable._nonvar_fields, + } + + def __init__(self, items, tuple_cls, **kwargs) -> None: + super().__init__(items, **kwargs) + self.tuple_cls = tuple_cls + + def debug_repr(self): + return repr(self.tuple_cls(*(Lit(x.debug_repr()) for x in self.items))) + + def python_type(self): + return self.tuple_cls + + def as_python_constant(self): + return self.python_type()(*[x.as_python_constant() for x in self.items]) + + def as_proxy(self): + assert self.python_type() is not SizeVariable + return self.python_type()(*self._as_proxy()) + + def reconstruct(self, codegen): + create_fn = getattr(self.tuple_cls, "_make", self.tuple_cls) + codegen.add_push_null( + lambda: codegen.append_output(codegen._create_load_const(create_fn)) + ) + codegen.foreach(self.items) + codegen.extend_output( + [ + create_instruction("BUILD_TUPLE", arg=len(self.items)), + ] + + create_call_function(1, False) + ) + + def var_getattr(self, tx: "InstructionTranslator", name): + def check_and_create_method(): + method = inspect.getattr_static(self.tuple_cls, name, None) + if isinstance(method, classmethod): + # We need the unbounded cls method to avoid the inline __self__ + return UserMethodVariable( + method.__func__, + variables.UserDefinedClassVariable(self.tuple_cls), + ) + elif isinstance(method, staticmethod): + return UserFunctionVariable(method.__func__) + elif inspect.isfunction(method): + return UserMethodVariable(method, self) + else: + return None + + fields = namedtuple_fields(self.tuple_cls) + if name not in fields: + method = check_and_create_method() + if not method: + return super().var_getattr(tx, name) + return method + return self.items[fields.index(name)] + + def call_hasattr(self, tx: "InstructionTranslator", name: str) -> "VariableTracker": + return variables.ConstantVariable.create(hasattr(self.tuple_cls, name)) + + +class SliceVariable(BaseListVariable): + def __init__(self, items, **kwargs) -> None: + items_to_map = items + start, stop, step = [variables.ConstantVariable.create(None)] * 3 + + if len(items_to_map) == 1: + (stop,) = items_to_map + elif len(items_to_map) == 2: + start, stop = items_to_map + elif len(items_to_map) == 3: + start, stop, step = items_to_map + else: + raise AssertionError + + if isinstance(start, variables.TensorVariable) or isinstance( + stop, variables.TensorVariable + ): + unimplemented("Dynamic slicing on data-dependent value is not supported") + + super().__init__([start, stop, step], **kwargs) + + def debug_repr(self): + return self.debug_repr_helper("slice(", ")") + + def as_proxy(self): + return slice(*self._as_proxy()) + + def python_type(self): + return slice + + def as_python_constant(self): + return slice(*[guard_if_dyn(x) for x in self.items]) + + def reconstruct(self, codegen): + codegen.foreach(self.items) + codegen.append_output(create_instruction("BUILD_SLICE", arg=len(self.items))) + + def var_getattr(self, tx: "InstructionTranslator", name): + fields = ["start", "stop", "step"] + if name not in fields: + unimplemented(f"slice.{name}") + return self.items[fields.index(name)] + + +class ListIteratorVariable(IteratorVariable): + _nonvar_fields = { + "index", + *IteratorVariable._nonvar_fields, + } + + def __init__(self, items, index: int = 0, **kwargs) -> None: + super().__init__(**kwargs) + assert isinstance(items, list) + # Removing this check as it slows things down too much + # https://github.com/pytorch/pytorch/pull/87533#issuecomment-1287574492 + + # assert all(isinstance(x, VariableTracker) for x in items) + self.items = items + self.index = index + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(length={len(self.items)}, index={repr(self.index)})" + + def next_variable(self, tx): + assert self.mutable_local + old_index = self.index + if old_index >= len(self.items): + raise_observed_exception(StopIteration, tx, self) + + tx.output.side_effects.mutation(self) + self.index += 1 + return self.items[old_index] + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ): + if name == "__contains__": + assert len(args) == 1 + assert not kwargs + return iter_contains(self.items[self.index :], args[0], tx) + + return super().call_method(tx, name, args, kwargs) + + def python_type(self): + return type(iter([])) + + def as_python_constant(self): + if self.index > 0: + raise NotImplementedError + return iter([x.as_python_constant() for x in self.items]) + + def unpack_var_sequence(self, tx): + return list(self.items[self.index :]) + + def force_unpack_var_sequence(self, tx) -> List[VariableTracker]: + return self.unpack_var_sequence(tx) + + def reconstruct(self, codegen): + remaining_items = self.items[self.index :] + codegen.foreach(remaining_items) + codegen.extend_output( + [ + create_instruction("BUILD_TUPLE", arg=len(remaining_items)), + create_instruction("GET_ITER"), + ] + ) + + +class TupleIteratorVariable(ListIteratorVariable): + pass + + +class RestrictedListSubclassVariable(ListVariable): + """ + This is a special case of UserDefinedObjectVariable where: + 1) The user subclasses list + 2) None of the list methods are overriden, merely some new methods are added + + In these cases, we can prevent graph breaks by not using the general + UserDefinedObjectVariable machinery and instead treating it like + a ListVariable. + """ + + _nonvar_fields = {"user_cls", "user_cls_source", *ListVariable._nonvar_fields} + _allowed_names = { + "__call__", + "__module__", + "__dict__", + "__doc__", + "__name__", + "__qualname__", + } + _disallowed_names = { + "__getattribute__", + "__getattr__", + "__setattr__", + } + + @classmethod + def _is_non_conflicting_subclass( + cls, + user_cls: type, + python_cls: type, + ): + """Ensures user_cls inherits from python_cls (e.g. list) and does not override any methods on python_cls""" + if ( + not istype(user_cls, type) + or user_cls.__bases__ != (python_cls,) + or user_cls.__mro__ != (user_cls, python_cls, object) + ): + return False # not subclass + return not any( + hasattr(python_cls, name) or name in cls._disallowed_names + for name in set(user_cls.__dict__.keys()) - cls._allowed_names + ) + + @classmethod + def is_matching_cls(cls, user_cls: type): + return cls._is_non_conflicting_subclass(user_cls, list) + + def __init__( + self, items, *, user_cls: type, user_cls_source: Source, **kwargs + ) -> None: + super().__init__(items=items, **kwargs) + self.user_cls = user_cls + self.user_cls_source = user_cls_source + assert istype(user_cls, type) + assert isinstance(user_cls_source, Source) + + def debug_repr(self): + # The constructor is safe as no methods, including __init__, are + # allowed to be overridden + # NB: This is guaranteed to print like a list, as __repr__ cannot be + # overridden, this is... well, it's OK I guess (consistent with + # eager), but it could be misleading. You will have to query type + # instead for details. + return repr(self.user_cls([Lit(x.debug_repr()) for x in self.items])) + + def python_type(self): + return self.user_cls + + def as_proxy(self): + return [x.as_proxy() for x in self.items] + + def as_python_constant(self): + raise NotImplementedError + + def is_python_constant(self): + return False + + @property + def value(self): + raise AttributeError("value") + + def modified(self, items, **kwargs): + return type(self)( + items, + user_cls=self.user_cls, + user_cls_source=self.user_cls_source, + **kwargs, + ) + + def reconstruct(self, codegen): + codegen.add_push_null(lambda: codegen(self.user_cls_source)) + super().reconstruct(codegen) + codegen.extend_output(create_call_function(1, False)) + + def call_method( + self, + tx, + name, + args: List["VariableTracker"], + kwargs: Dict[str, "VariableTracker"], + ) -> "VariableTracker": + if name in self.user_cls.__dict__: + method = self.user_cls.__dict__[name] + if isinstance(method, types.FunctionType): + # inline the method + source = AttrSource(self.user_cls_source, name) + return UserMethodVariable(method, self, source=source).call_function( + tx, args, kwargs + ) + unimplemented( + f"RestrictedListSubclassVariable method {self.user_cls.__name__}.{name}" + ) + return super().call_method(tx, name, args, kwargs) + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + return self.call_method(tx, "__call__", args, kwargs) diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/misc.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..2b62c0c1acc1162c76dba5b7d567a020ab808a5b --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/misc.py @@ -0,0 +1,1723 @@ +# mypy: ignore-errors +import collections +import dataclasses +import functools +import inspect +import itertools +import random +import re +import sys +import types +from typing import Dict, List, Optional, TYPE_CHECKING + +import torch._C +import torch._numpy as tnp +import torch.utils._pytree as pytree + +from .. import config, variables +from ..bytecode_transformation import create_call_function, create_instruction +from ..create_parameter_op import do_not_convert_to_tracable_parameter +from ..exc import unimplemented +from ..guards import GuardBuilder, install_guard +from ..mutation_guard import unpatched_nn_module_init +from ..source import ( + AttrSource, + DefaultsSource, + GetItemSource, + ODictGetItemSource, + TypeSource, +) +from ..utils import ( + check_unspec_or_constant_args, + identity, + is_tensor_base_attr_getter, + proxy_args_kwargs, + set_example_value, +) +from .base import VariableTracker +from .functions import ( + NestedUserFunctionVariable, + UserFunctionVariable, + UserMethodVariable, + wrap_bound_arg, +) +from .user_defined import call_random_fn, is_standard_setattr, UserDefinedObjectVariable + + +if TYPE_CHECKING: + from torch._dynamo.symbolic_convert import InstructionTranslator + + +class NO_SUCH_SUBOBJ: + pass + + +class SuperVariable(VariableTracker): + _nonvar_fields = { + "specialized", + *VariableTracker._nonvar_fields, + } + + def __init__(self, typevar, objvar=None, specialized=False, **kwargs) -> None: + super().__init__(**kwargs) + # typevar is the fist argument to super(). In the case where no argument + # is provided to super(), it is the __class__ object where + # the super() function is being called + self.typevar = typevar + # objvar here must be an instance or subtype of typevar. + # In the case where super() is called without arguments, it is the first argument + # to the current function where super() is called from (self for regular method, + # cls for a classmethod) + self.objvar = objvar + self.specialized = specialized # directly get attr from self.typevar if true + + def reconstruct(self, codegen): + codegen.add_push_null(lambda: codegen(variables.BuiltinVariable(super))) + codegen(self.typevar) + if self.objvar is not None: + codegen(self.objvar) + codegen.extend_output(create_call_function(2, False)) + else: + codegen.extend_output(create_call_function(1, False)) + + def _resolved_getattr_and_source(self, tx: "InstructionTranslator", name): + assert self.objvar, "1-arg super not implemented" + if self.specialized: + return getattr(self.typevar.as_python_constant(), name) + search_type = self.typevar.as_python_constant() + + # The rest of this function does two things: + # - Walk the mro to find where the attribute comes from to be + # able to provide accurate source + # - Call the getattr to get the object + + # Find the class object, where the function lives. + # When objvar is "self", use type(self), when objvar is "cls", use it as-is + type_to_use = self.objvar.python_type() + type_to_use_source = ( + TypeSource(self.objvar.source) if self.objvar.source else None + ) + if issubclass(type_to_use, type): + type_to_use = self.objvar.value + type_to_use_source = self.objvar.source + + source = None + resolved_class = None + resolved_attr = None + search_mro = type_to_use.__mro__ + + try: + start_index = search_mro.index(search_type) + 1 + except ValueError: + # Corner case where the typevar is not in the mro of the objvar + # https://github.com/python/cpython/blob/3.11/Objects/typeobject.c#L8843-L8844 + return getattr(super(search_type, type_to_use), name), None + # Implemented based on https://github.com/python/cpython/blob/3.11/Objects/typeobject.c#L8812 + # super has its getattro implementation. The key point is that instead of calling getattr, it checks the + # attribute in the class __dict__ + for index in range(start_index, len(search_mro)): + # Dont call getattr, just check the __dict__ of the class + if resolved_getattr := search_mro[index].__dict__.get(name, NO_SUCH_SUBOBJ): + if resolved_getattr is not NO_SUCH_SUBOBJ: + # Equivalent of something like type(L['self']).__mro__[1].attr_name + if type_to_use_source: + source = AttrSource( + GetItemSource( + AttrSource(type_to_use_source, "__mro__"), index + ), + name, + ) + return resolved_getattr, source + + unimplemented("Unable to resolve super getattr") + + def var_getattr(self, tx: "InstructionTranslator", name: str) -> "VariableTracker": + # Check if getattr is a constant. If not, delay the actual work by + # wrapping the result in GetAttrVariable. Mostly super is called with a + # method, so most of the work is delayed to call_function. + # + # We could have just implemented a const_getattr. However, super is + # special when it comes to finding sources. Compared to other VTs, super + # requires the attr name to walk the mro and find the actual source (and + # not just AttrSource). + value, source = self._resolved_getattr_and_source(self, name) + if not variables.ConstantVariable.is_literal(value): + return GetAttrVariable(self, name) + if source: + install_guard(source.make_guard(GuardBuilder.CONSTANT_MATCH)) + return variables.ConstantVariable.create(value, source=source) + return variables.ConstantVariable.create(value) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + inner_fn, source = self._resolved_getattr_and_source(self, name) + if inner_fn is object.__init__: + return LambdaVariable(identity) + elif inner_fn is torch.nn.Module.__init__: + objvar = self.objvar + from ..side_effects import AttributeMutationNew + + if ( + isinstance(objvar, variables.UserDefinedObjectVariable) + and isinstance(objvar.mutable_local, AttributeMutationNew) + and not (args or kwargs) + ): + with do_not_convert_to_tracable_parameter(): + return variables.UserFunctionVariable( + unpatched_nn_module_init, source=source + ).call_function(tx, [self.objvar] + args, kwargs) + else: + unimplemented("super() nn.Module.__init__") + elif self.objvar.source and inner_fn is object.__new__: + return tx.output.side_effects.track_object_new_from_user_defined_class( + self.objvar + ) + elif isinstance(inner_fn, staticmethod) and isinstance( + inner_fn.__func__, types.FunctionType + ): + return variables.UserFunctionVariable( + inner_fn.__func__, source=source + ).call_function(tx, args, kwargs) + elif isinstance(inner_fn, classmethod) and isinstance( + inner_fn.__func__, types.FunctionType + ): + return variables.UserMethodVariable( + inner_fn.__func__, self.objvar, source=source + ).call_function(tx, args, kwargs) + elif isinstance(inner_fn, types.FunctionType): + return variables.UserFunctionVariable( + inner_fn, source=source + ).call_function(tx, [self.objvar] + args, kwargs) + elif isinstance(inner_fn, types.MethodType): + return variables.UserMethodVariable( + inner_fn.__func__, self.objvar, source=source + ).call_function(tx, args, kwargs) + elif ( + inner_fn is collections.OrderedDict.__getitem__ + and isinstance(self.objvar, variables.UserDefinedObjectVariable) + and self.objvar.source + and len(args) == 1 + and len(kwargs) == 0 + and args[0].is_python_constant() + ): + from .builder import VariableBuilder + + key = args[0].as_python_constant() + return VariableBuilder(tx, ODictGetItemSource(self.objvar.source, key))( + collections.OrderedDict.__getitem__(self.objvar.value, key) + ) + elif inner_fn in ( + collections.OrderedDict.__setitem__, + object.__setattr__, + ) and isinstance(self.objvar, variables.CustomizedDictVariable): + assert not kwargs and len(args) == 2 + return super(variables.CustomizedDictVariable, self.objvar).call_method( + tx, "__setitem__", args, kwargs + ) + elif inner_fn is collections.OrderedDict.__getitem__ and isinstance( + self.objvar, variables.CustomizedDictVariable + ): + return super(variables.CustomizedDictVariable, self.objvar).call_method( + tx, "__getitem__", args, kwargs + ) + elif is_standard_setattr(inner_fn) and isinstance( + self.objvar, UserDefinedObjectVariable + ): + return self.objvar.method_setattr_standard(tx, *args, **kwargs) + elif inner_fn is object.__delattr__: + attr = args[0] + try: + attr = attr.as_python_constant() + except NotImplementedError: + unimplemented(f"non-const delattr attr: {attr}") + if not tx.output.side_effects.is_attribute_mutation(self.objvar): + unimplemented(f"delattr({self.objvar}, {attr}, ...)") + + tx.output.side_effects.store_attr( + self.objvar, attr, variables.DeletedVariable() + ) + return variables.ConstantVariable(None) + + unimplemented(f"non-function or method super: {inner_fn}") + + +class ExceptionVariable(VariableTracker): + def __init__(self, exc_type, args, **kwargs) -> None: + super().__init__(**kwargs) + self.exc_type = exc_type + self.args = args + + def reconstruct(self, codegen): + codegen.add_push_null( + lambda: codegen.load_import_from("builtins", self.exc_type.__name__) + ) + codegen.foreach(self.args) + codegen.call_function(len(self.args), False) + + +class UnknownVariable(VariableTracker): + """ + It could be anything! + """ + + +class DelayGraphBreakVariable(UnknownVariable): + """ + Used to insert a dummy variable in the stack to do the graph break at CALL_FUNCTION. + """ + + +class ComptimeVariable(VariableTracker): + """ + This variable is special, it lets you execute arbitrary code at + Dynamo compile time + """ + + def reconstruct(self, codegen): + raise NotImplementedError("comptime is special form") + + def var_getattr(self, tx: "InstructionTranslator", name: str) -> "VariableTracker": + from ..comptime import comptime + + # To support the comptime.print_graph convenience accessors + from .functions import UserFunctionVariable + + return UserFunctionVariable( + getattr(comptime, name), source=AttrSource(self.source, name) + ) + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + from ..comptime import ComptimeContext + + # TODO: support an expression form as well + + assert not kwargs + # Second argument is runtime lambda, ignored + assert len(args) <= 2 + fn = args[0] + if isinstance(fn, UserFunctionVariable): + fn.get_function()(ComptimeContext(tx)) + elif isinstance(fn, NestedUserFunctionVariable): + # We have to manually bind the freevars ourselves + code = fn.get_code() + assert not fn.closure, ( + "comptime function must not have free variables, " + f"but these variables were free: {code.co_freevars}" + ) + func = types.FunctionType( + code, + fn.f_globals, + fn.fn_name.as_python_constant(), + tuple(fn.defaults.items) if fn.defaults else None, + # We could automatically promote free variables into + # ComptimeVar but this is confusing if you access + # a free variable that we actually DO have the runtime + # value for + # tuple(make_cell(ComptimeVar(i)) for i in fn.closure.items) + (), + ) + func(ComptimeContext(tx)) + else: + raise RuntimeError(f"unsupported argument to comptime: {type(fn)}") + + return variables.ConstantVariable.create(None) + + +class ClosureVariable(UnknownVariable): + _nonvar_fields = { + "name", + *UnknownVariable._nonvar_fields, + } + + def __init__(self, name, **kwargs) -> None: + super().__init__(**kwargs) + self.name = name + + def reconstruct(self, codegen): + codegen.append_output(codegen.create_load_closure(self.name)) + + +# closure variable created by an inlined function +class InlinedClosureVariable(UnknownVariable): + _nonvar_fields = { + "name", + *UnknownVariable._nonvar_fields, + } + + def __init__(self, name, **kwargs) -> None: + super().__init__(**kwargs) + self.name = name + + def reconstruct(self, codegen): + codegen.append_output(codegen.create_load_closure(self.name)) + + +class NewCellVariable(VariableTracker): + def __init__(self, **kwargs) -> None: + super().__init__(**kwargs) + + +class NewGlobalVariable(VariableTracker): + def __init__(self, **kwargs) -> None: + super().__init__(**kwargs) + + +class InspectSignatureVariable(VariableTracker): + """represents inspect.signature(...)""" + + _nonvar_fields = { + "signature", + "parameters", + *VariableTracker._nonvar_fields, + } + + @staticmethod + def create(callable, **kwargs): + if kwargs: + unimplemented(f"inspect.signature with {kwargs}") + return InspectSignatureVariable( + callable, mutable_local=variables.base.MutableLocal() + ) + + def __init__(self, inspected: VariableTracker, **kwargs) -> None: + super().__init__(**kwargs) + self.inspected = inspected + + if isinstance(self.inspected, UserMethodVariable): + self.fn = self.inspected.get_function() + self.signature = inspect.signature(self.fn) + self.parameters = list(self.signature.parameters.items())[1:] + elif isinstance(self.inspected, UserFunctionVariable): + self.fn = self.inspected.get_function() + self.signature = inspect.signature(self.fn) + self.parameters = list(self.signature.parameters.items()) + else: + self.fn = self.inspected.as_python_constant() + self.signature = inspect.signature(self.fn) + self.parameters = list(self.signature.parameters.items()) + + def var_getattr(self, tx: "InstructionTranslator", name: str) -> "VariableTracker": + if name == "parameters": + return variables.ConstDictVariable( + { + variables.ConstantVariable.create( + param[0] + ): InspectParameterVariable(param[1]) + for param in self.parameters + }, + user_cls=dict, + ) + return super().var_getattr(tx, name) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if name == "bind": + if not hasattr(self.fn, "__kwdefaults__"): + unimplemented( + f"inspect.signature.bind with {self.fn} without __kwdefaults__" + ) + obj = self.signature.bind(*args, **kwargs) + + # wrap function defaults in VTs + defaults = {} + if self.fn.__kwdefaults__: + wrap = functools.partial(wrap_bound_arg, tx=tx) + kwdefaults_sources = { + k: None + if self.source is None + else DefaultsSource(self.source, k, is_kw=True) + for k in self.fn.__kwdefaults__ + } + defaults = { + k: wrap(val=v, source=kwdefaults_sources[k]) + for k, v in self.fn.__kwdefaults__.items() + } + + return InspectBoundArgumentsVariable( + obj, + defaults, + self, + ) + return super().call_method(tx, name, args, kwargs) + + def reconstruct(self, codegen): + codegen.add_push_null( + lambda: codegen.extend_output( + [ + codegen.create_load_python_module(inspect), + codegen.create_load_attr("signature"), + ] + ) + ) + codegen(self.inspected) + codegen.extend_output(create_call_function(1, False)) + + +class InspectParameterVariable(VariableTracker): + """represents inspect.Parameter(...)""" + + def __init__(self, value, **kwargs) -> None: + super().__init__(**kwargs) + self.value = value + + def var_getattr(self, tx: "InstructionTranslator", name: str) -> "VariableTracker": + from .builder import SourcelessBuilder, VariableBuilder + + try: + attr_value = getattr(self.value, name) + if self.source: + attr_source = AttrSource(self.source, name) + return VariableBuilder(tx, attr_source)(attr_value) + else: + return SourcelessBuilder.create(tx, attr_value) + except AttributeError: + unimplemented(f"getattr({self.value}, {name})") + + +class InspectBoundArgumentsVariable(VariableTracker): + """represents inspect.signature(...).bind(...)""" + + _nonvar_fields = { + "bound_arguments", + "packed_vars", + *VariableTracker._nonvar_fields, + } + + # NOTE: we keep track of changes to arguments via bound_arguments_var, + # but we still keep a copy of the inspect.BoundArguments object in order + # to get the correct args/kwargs. + def __init__( + self, + bound_arguments: inspect.BoundArguments, + defaults: Dict[str, VariableTracker], + signature: InspectSignatureVariable, + **kwargs, + ): + super().__init__(**kwargs) + self.bound_arguments = bound_arguments + self.defaults = defaults + # used to convert from VT to tuple/dict when updating bound_arguments + self.packed_vars = set() + + arguments_dict = {} + for key, val in bound_arguments.arguments.items(): + key_var = variables.ConstantVariable(key) + # convert val to VT + if isinstance(val, tuple): + arguments_dict[key_var] = variables.TupleVariable(list(val)) + self.packed_vars.add(key) + elif isinstance(val, dict): + self.packed_vars.add(key) + arguments_dict[key_var] = variables.ConstDictVariable( + {variables.ConstantVariable(k): v for k, v in val.items()} + ) + elif isinstance(val, VariableTracker): + arguments_dict[key_var] = val + else: + unimplemented( + "inspect.signature(...).bind(...).arguments contains non-variable/tuple/dict" + ) + + self.bound_arguments_var = variables.ConstDictVariable( + arguments_dict, + type(bound_arguments.arguments), + mutable_local=variables.base.MutableLocal(), + ) + self.signature = signature + + def _update_bound_arguments(self): + for key, val in self.bound_arguments_var.items.items(): + true_val = val + if key.underlying_value in self.packed_vars: + if isinstance(val, variables.TupleVariable): + true_val = tuple(val.items) + elif isinstance(val, variables.ConstDictVariable): + true_val = {k.underlying_value: v for k, v in val.items.items()} + else: + unimplemented( + "inspect.signature(...).bind(...) cannot update bound arguments" + ) + self.bound_arguments.arguments[key.underlying_value] = true_val + + def var_getattr(self, tx: "InstructionTranslator", name: str) -> "VariableTracker": + if name == "arguments": + return self.bound_arguments_var + elif name == "args": + self._update_bound_arguments() + return variables.TupleVariable(list(self.bound_arguments.args)) + elif name == "kwargs": + self._update_bound_arguments() + kw = { + variables.ConstantVariable(key): val + for key, val in self.bound_arguments.kwargs.items() + } + return variables.ConstDictVariable(kw) + elif name == "signature": + return self.signature + return super().var_getattr(tx, name) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if name == "apply_defaults": + # mimic calling apply_defaults + for key, val in self.defaults.items(): + key_var = variables.ConstantVariable(key) + if key_var not in self.bound_arguments_var: + self.bound_arguments_var.call_method( + tx, "__setitem__", [key_var, val], {} + ) + + # actually apply the changes + self._update_bound_arguments() + + return variables.ConstantVariable(None) + return super().call_method(tx, name, args, kwargs) + + def reconstruct(self, codegen): + # reconstruct inspect.signature(...).bind(*bound_arguments.args, **bound_arguments.kwargs) + # NOTE the reconstructed inspect.signature(...) object might not be the same object + # as the Signature object that originally created the BoundArguments object. + self._update_bound_arguments() + + def gen_fn(): + codegen(self.signature) + codegen.append_output(codegen.create_load_attr("bind")) + + codegen.add_push_null(gen_fn, call_function_ex=True) + + codegen.foreach(self.bound_arguments.args) + codegen.append_output( + create_instruction("BUILD_TUPLE", arg=len(self.bound_arguments.args)) + ) + + for key, val in self.bound_arguments.kwargs.items(): + codegen.append_output(codegen.create_load_const(key)) + codegen(val) + codegen.extend_output( + [ + create_instruction("BUILD_MAP", arg=len(self.bound_arguments.kwargs)), + create_instruction("CALL_FUNCTION_EX", arg=1), + ] + ) + + +def produce_trampoline_autograd_apply(fn_cls): + def trampoline_autograd_apply(*args, **kwargs): + return fn_cls.apply(*args, **kwargs) + + trampoline_autograd_apply._origin = produce_trampoline_autograd_apply + return trampoline_autograd_apply + + +class AutogradFunctionVariable(VariableTracker): + """represents a torch.autograd.Function subclass""" + + _nonvar_fields = { + "fn_cls", + *VariableTracker._nonvar_fields, + } + + def __init__(self, fn_cls, **kwargs) -> None: + super().__init__(**kwargs) + self.fn_cls = fn_cls + + def call_apply(self, tx: "InstructionTranslator", args, kwargs): + requires_grad = False + + def visit(node): + nonlocal requires_grad + if isinstance(node, variables.TensorVariable): + if node.requires_grad is not False: + requires_grad = True + if isinstance(node, variables.NNModuleVariable): + if node.is_training(tx): + requires_grad = True + + VariableTracker.visit(visit, (args, kwargs)) + + if ( + requires_grad + and torch.is_grad_enabled() + and config.capture_autograd_function + ): + from torch._functorch.autograd_function import ( + autograd_function_forward_rewritten, + ) + from torch.autograd.function import _is_setup_context_defined + + forward_fn = self.fn_cls.forward + + is_setup_ctx_defined = _is_setup_context_defined(self.fn_cls.setup_context) + if is_setup_ctx_defined: + # If setup_context is defined, we generate a new forward function which includes + # the original forward and setup_context function, and trace the new forward function. + forward_fn = autograd_function_forward_rewritten( + self.fn_cls.forward, self.fn_cls.setup_context + ) + + vjp_fn = self.fn_cls.vjp # type: ignore[attr-defined] + if vjp_fn is not torch.autograd.Function.vjp: + unimplemented("NYI - User defind vjp") + + jvp_fn = self.fn_cls.jvp # type: ignore[attr-defined] + if jvp_fn is not torch.autograd.Function.jvp: + unimplemented("NYI - User defind jvp") + + from .higher_order_ops import AutogradFunctionApplyVariable + + source = self.source + if source is None: + source = AttrSource( + tx.import_source(self.fn_cls.__module__), self.fn_cls.__name__ + ) + + val = AutogradFunctionApplyVariable( + forward_fn, + self.fn_cls.backward, + source, + source=AttrSource(source, member="apply"), + ).call_function(tx, args, kwargs) + # Inside of AutogradFunctionApplyVariable.call_function, we use sourceless variable wrapping + # the forward function, as we don't want to generate guards for new_forward.__closure__ + # if forward is rewritten by autograd_function_forward_rewritten. + # But we still need to generate correct guards for the original forward and setup_context + # functions, so we have to add guards manually. + if self.source: + fwd_src = AttrSource(self.source, "forward") + install_guard(fwd_src.make_guard(GuardBuilder.FUNCTION_MATCH)) + if is_setup_ctx_defined: + setup_ctx_src = AttrSource(self.source, "setup_context") + install_guard(setup_ctx_src.make_guard(GuardBuilder.FUNCTION_MATCH)) + + return val + + if self.source: + source = AttrSource(self.source, "forward") + else: + source = None + + fn = self.fn_cls.forward + ctx = AutogradFunctionContextVariable.create(tx, args, kwargs) + args = [ctx, *args] + if isinstance(fn, types.FunctionType): + return variables.UserFunctionVariable(fn, source=source).call_function( + tx, args, kwargs + ) + elif isinstance(fn, types.MethodType): + return variables.UserMethodVariable( + fn.__func__, + variables.UserDefinedClassVariable(self.fn_cls), + source=source, + ).call_function(tx, args, kwargs) + else: + unimplemented( + f"non-function or method in subclass of torch.autograd.Function: {fn}" + ) + + def call_backward(self, tx: "InstructionTranslator", args, kwargs): + fn = self.fn_cls.backward + self.source = AttrSource(self.source, "backward") + assert type(args[0].value) is torch._dynamo.external_utils.FakeBackwardCFunction + assert isinstance(fn, types.FunctionType) + + return variables.UserFunctionVariable(fn, source=self.source).call_function( + tx, args, kwargs + ) + + def call_function(self, tx: "InstructionTranslator", args, kwargs): + return AutogradFunctionVariable(self.fn_cls) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ): + from ..trace_rules import is_callable_allowed + from .builder import wrap_fx_proxy + + if name == "apply": + if is_callable_allowed(self.fn_cls): + trampoline_autograd_apply = produce_trampoline_autograd_apply( + self.fn_cls + ) + return wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + trampoline_autograd_apply, + *proxy_args_kwargs(args, kwargs), + ), + ) + else: + return self.call_apply(tx, args, kwargs) + + elif name == "backward": + return self.call_backward(tx, args, kwargs) + else: + from .. import trace_rules + + source = AttrSource(self.source, name) if self.source is not None else None + try: + obj = inspect.getattr_static(self.fn_cls, name) + except AttributeError: + obj = None + + if isinstance(obj, staticmethod): + func = obj.__get__(self.fn_cls) + if source is not None: + return ( + trace_rules.lookup(func) + .create_with_source(func, source=source) + .call_function(tx, args, kwargs) + ) + else: + return trace_rules.lookup(func)(func).call_function( + tx, args, kwargs + ) + elif isinstance(obj, classmethod): + return variables.UserMethodVariable( + obj.__func__, self, source=source + ).call_function(tx, args, kwargs) + else: + unimplemented(f"Unsupported method: {name}") + + +@dataclasses.dataclass +class SavedTensorBox: + tensors: List[VariableTracker] = dataclasses.field(default_factory=list) + + +class AutogradFunctionContextVariable(UserDefinedObjectVariable): + """ + Tracks an autograd.Function() context using mutation tracking in side_effects.py + """ + + _nonvar_fields = { + "proxy", + "inference", + "saved_tensors", + *UserDefinedObjectVariable._nonvar_fields, + } + + def __init__( + self, + value, + value_type=None, + inference=False, + proxy=None, + saved_tensors=None, + needs_input_grad=None, + non_differentiable=None, + **kwargs, + ) -> None: + super().__init__(value=value, value_type=value_type, **kwargs) + self.inference = inference + self.proxy = proxy + self.saved_tensors = saved_tensors + self.needs_input_grad = needs_input_grad + self.non_differentiable = non_differentiable + + @staticmethod + def create(tx: "InstructionTranslator", args=None, kwargs=None): + needs_input_grad = None + if args and not kwargs: + needs_input_grad = tuple( + isinstance(x, variables.TensorVariable) and x.requires_grad + for x in args + ) + proxy = tx.output.create_proxy( + "call_function", torch.autograd.function.FunctionCtx, (), {} + ) + out = tx.output.side_effects.track_object_new( + None, + torch.autograd.function.FunctionCtx, + functools.partial( + AutogradFunctionContextVariable, + inference=True, + proxy=proxy, + saved_tensors=SavedTensorBox(), + needs_input_grad=needs_input_grad, + ), + {}, + ) + set_example_value(proxy.node, out.value) + + return out + + def as_proxy(self): + if self.proxy is None: + unimplemented("proxy not set") + return self.proxy + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if name == "__setattr__": + return super().call_method(tx, name, args, kwargs) + elif name == "mark_non_differentiable": + assert len(kwargs) == 0 + self.non_differentiable = proxy_args_kwargs(args, {})[0] + return variables.ConstantVariable.create(None) + + if name != "save_for_backward": + unimplemented(f"autograd.Function context method: {name}") + if self.saved_tensors is None: + unimplemented( + "save_for_backward only supported on a newly constructed FunctionCtx" + ) + + if not self.inference: + assert self.source and not kwargs + tx.output.side_effects.track_save_for_backward(self, args) + + # In eager mode, multiple calls to .save_for_backward() will overwrite previous calls. + if len(self.saved_tensors.tensors) > 0: + self.saved_tensors.tensors = [] + for arg in args: + self.saved_tensors.tensors.append(arg) + return variables.ConstantVariable.create(None) + + def var_getattr(self, tx: "InstructionTranslator", name): + if name in ["save_for_backward", "mark_non_differentiable"]: + return LambdaVariable( + lambda *args, **kwargs: self.call_method(tx, name, args, kwargs) + ) + if name == "saved_tensors" and self.saved_tensors is not None: + return variables.TupleVariable(list(self.saved_tensors.tensors)) + if name == "needs_input_grad": + if self.needs_input_grad is not None: + return variables.ConstantVariable.create(self.needs_input_grad) + if self.source: + from .builder import VariableBuilder + + return VariableBuilder(tx, AttrSource(self.source, "needs_input_grad"))( + self.value.needs_input_grad + ) + return super().var_getattr(tx, name) + + +class AutogradEngineVariable(UserDefinedObjectVariable): + """ + Represents a torch._C._ImperativeEngine instance. + """ + + def __init__( + self, + value, + value_type=None, + **kwargs, + ) -> None: + super().__init__(value=value, value_type=value_type, **kwargs) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if name == "queue_callback": + if torch._dynamo.compiled_autograd.compiled_autograd_enabled: + assert ( + tx.one_graph + ), "queue_callback() is only supported when Compiled Autograd is enabled with fullgraph=True" + return variables.UserFunctionVariable( + torch._dynamo.external_utils.FakeCompiledAutogradEngine.queue_callback, + source=self.source, + ).call_function( + tx, + (tx.output.side_effects.get_ca_final_callbacks_var(), *args), + kwargs, + ) + else: + unimplemented( + "queue_callback() is only supported when Compiled Autograd is enabled with fullgraph=True" + ) + else: + unimplemented(f"torch._C._ImperativeEngine method: {name}") + + +class LambdaVariable(VariableTracker): + def __init__(self, fn, **kwargs) -> None: + super().__init__(**kwargs) + self.fn = fn + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + return self.fn(*args, **kwargs) + + +class GetAttrVariable(VariableTracker): + _nonvar_fields = { + "name", + *VariableTracker._nonvar_fields, + } + + def __init__(self, obj, name, **kwargs) -> None: + super().__init__(**kwargs) + assert isinstance(obj, VariableTracker) + assert isinstance(name, str) + self.obj = obj + self.name = name + + def __str__(self) -> str: + return f"{self.__class__.__name__}({self.obj}, {self.name})" + + @staticmethod + def create_getattr_proxy(base_proxy: torch.fx.Proxy, attr): + return getattr(base_proxy, attr) + + def as_proxy(self): + return GetAttrVariable.create_getattr_proxy(self.obj.as_proxy(), self.name) + + def const_getattr(self, tx: "InstructionTranslator", name): + if not isinstance(self.obj, variables.NNModuleVariable): + raise NotImplementedError + step1 = tx.output.get_submodule(self.obj.module_key) + if self.name not in step1.__dict__: + raise NotImplementedError + step2 = inspect.getattr_static(step1, self.name) + if name not in step2.__dict__: + raise NotImplementedError + return inspect.getattr_static(step2, name) + + def reconstruct(self, codegen): + codegen(self.obj) + codegen.extend_output(codegen.create_load_attrs(self.name)) + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + return self.obj.call_method(tx, self.name, args, kwargs) + + def call_method( + self, + tx, + name, + args: List[VariableTracker], + kwargs: Dict[str, VariableTracker], + ) -> VariableTracker: + if ( + name in ("__getitem__", "get") + and self.name == "__dict__" + and not kwargs + and args[0].is_python_constant() + and isinstance( + self.obj, + ( + variables.UserDefinedObjectVariable, + variables.NNModuleVariable, + variables.UserDefinedClassVariable, + ), + ) + ): + obj = self.obj + key = args[0].as_python_constant() + if obj.has_key_in_generic_dict(tx, key): + # redirect to var_getattr on the original obj + return obj.var_getattr(tx, key) + + # Return the default value for get + if name == "get": + if len(args) == 2: + return args[1] + else: + return variables.ConstantVariable(None) + + elif ( + name == "__contains__" + and self.name == "__dict__" + and len(args) == 1 + and args[0].is_python_constant() + and not kwargs + and isinstance( + self.obj, + ( + variables.UserDefinedObjectVariable, + variables.NNModuleVariable, + variables.UserDefinedClassVariable, + ), + ) + ): + obj = self.obj + key = args[0].as_python_constant() + if obj.has_key_in_generic_dict(tx, key): + return variables.ConstantVariable(True) + else: + return variables.ConstantVariable(False) + + return super().call_method(tx, name, args, kwargs) + + +class MethodWrapperVariable(VariableTracker): + def __init__(self, method_wrapper, **kwargs) -> None: + super().__init__(**kwargs) + self.method_wrapper = method_wrapper + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if is_tensor_base_attr_getter(self.method_wrapper) and isinstance( + args[0], variables.TensorVariable + ): + assert len(args) == 1 and len(kwargs) == 0 + + return args[0].var_getattr(tx, self.method_wrapper.__self__.__name__) + + super().call_function(tx, args, kwargs) + + def is_python_constant(self): + return True + + def as_python_constant(self): + return self.method_wrapper + + +class GetSetDescriptorVariable(VariableTracker): + def __init__(self, desc, **kwargs) -> None: + super().__init__(**kwargs) + self.desc = desc + + def var_getattr(self, tx: "InstructionTranslator", name): + if name == "__get__" and self.source: + from .builder import VariableBuilder + + return VariableBuilder(tx, AttrSource(self.source, "__get__"))( + self.desc.__get__ + ) + else: + return super().var_getattr(tx, name) + + def is_python_constant(self): + return True + + def as_python_constant(self): + return self.desc + + +class PythonModuleVariable(VariableTracker): + _nonvar_fields = { + "value", + "is_torch", + *VariableTracker._nonvar_fields, + } + + def __init__(self, value: types.ModuleType, **kwargs) -> None: + super().__init__(**kwargs) + self.value = value + self.is_torch = self.value is torch or self.value.__name__.startswith("torch.") + + def python_type(self): + return types.ModuleType + + def as_python_constant(self): + return self.value + + def __repr__(self) -> str: + return f"PythonModuleVariable({self.value})" + + def call_hasattr(self, tx: "InstructionTranslator", name): + result = hasattr(self.value, name) + return variables.ConstantVariable.create(result) + + def var_getattr(self, tx: "InstructionTranslator", name): + if tx.output.side_effects.has_pending_mutation_of_attr(self, name): + return tx.output.side_effects.load_attr(self, name) + + from .builder import SourcelessBuilder, VariableBuilder + + if self.is_torch or name not in self.value.__dict__: + attr_value = getattr(self.value, name) + else: + attr_value = self.value.__dict__[name] + + if self.source: + new_source = AttrSource(self.source, name) + return VariableBuilder(tx, new_source)(attr_value) + else: + return SourcelessBuilder.create(tx, attr_value) + + +class TypingVariable(VariableTracker): + def __init__(self, value, **kwargs) -> None: + super().__init__(**kwargs) + self.value = value + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if name == "__getitem__" and len(args) == 1: + return variables.ConstantVariable.create( + self.value[args[0].as_python_constant()], + ) + unimplemented("typing") + + def as_python_constant(self): + return self.value + + +@functools.lru_cache(maxsize=1) +def get_np_to_tnp_map(): + from ..utils import NP_TO_TNP_MODULE + + np_fn_to_tnp_fn = {} + + for np_mod, tnp_mod in NP_TO_TNP_MODULE.items(): + for fn_name, tnp_fn in tnp_mod.__dict__.items(): + if callable(tnp_fn): + # some internal details do leak from tnp + # which are not part of numpy API. + if np_fn := getattr(np_mod, fn_name, None): + np_fn_to_tnp_fn[np_fn] = tnp_fn + + return np_fn_to_tnp_fn + + +class NumpyVariable(VariableTracker): + """ + Wrapper around `numpy.*`. Currently, is able to trace a small subset of numpy functions as well as numpy dtypes. + """ + + constant_fold_functions = (tnp.issubdtype,) + + def __init__(self, value, **kwargs) -> None: + super().__init__(**kwargs) + self.value = value + + @classmethod + def can_constant_fold_through(cls, fn): + mod = fn.__module__.split(".") + assert len(mod) >= 2 and mod[:2] == ["torch", "_numpy"] + return fn in cls.constant_fold_functions + + @classmethod + def get_constant_collection_for_func(cls, fn): + mod = fn.__module__.split(".") + assert len(mod) >= 2 and mod[:2] == ["torch", "_numpy"] + return np_constant_collections_map.get(fn, None) + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if not config.trace_numpy: + unimplemented(f"numpy.{self.value}()") + + from ..utils import numpy_to_tensor_wrapper + from .tensor import NumpyNdarrayVariable + + func = get_np_to_tnp_map().get(self.value) + if func is None: + unimplemented( + f"Can't find numpy function {self.value} in torch._numpy. " + " Please file an issue to request support for this function." + ) + + # We are dealing with a function that produces a const collection type (np.dtype, np.iinfo/np.finfo) + if ( + collection_variable_typ := self.get_constant_collection_for_func(func) + ) is not None: + try: + return collection_variable_typ( + self.value( + *[x.as_python_constant() for x in args], + **{k: v.as_python_constant() for k, v in kwargs.items()}, + ) + ) + except NotImplementedError: + unimplemented( + f"{self.value.__name__} with non-const args: {args} {kwargs}" + ) + else: + if ( + func.__module__ == "torch._numpy.random" + and config.use_numpy_random_stream + ): + msg = f"delegate '{func.__qualname__}' to NumPy itself via " + msg += f"confg.use_numpy_random_stream={config.use_numpy_random_stream}" + unimplemented(msg) + + args, kwargs = NumpyNdarrayVariable.patch_args(func.__name__, args, kwargs) + + if self.can_constant_fold_through(func) and ( + check_unspec_or_constant_args(args, kwargs) + ): + # constant fold + return variables.ConstantVariable.create( + self.as_python_constant()( + *[x.as_python_constant() for x in args], + **{k: v.as_python_constant() for k, v in kwargs.items()}, + ), + ) + + # TODO Add all the functions that go from constants to constants to can_constant_fold_through + proxy = tx.output.create_proxy( + "call_function", + numpy_to_tensor_wrapper(func), + *proxy_args_kwargs(args, kwargs), + ) + return NumpyNdarrayVariable.create(tx, proxy) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + unimplemented("numpy") + + def as_python_constant(self): + return self.value + + def as_proxy(self): + if config.trace_numpy and isinstance(self.value, type): + # This handles numpy dtype attributes such as np.float32 + # We return a string as we don't want to serialize non-PyTorch objects in the output FX graph + # In torch/_numpy we normalize strings to their dtypes when the input is a dtype, as NumPy does + return self.value.__name__ + + return super().as_proxy() + + +# Used to keep track of NULLs pushed on the stack for Python 3.11 function calls +class NullVariable(VariableTracker): + def __init__(self, **kwargs) -> None: + super().__init__(**kwargs) + + def __str__(self) -> str: + return "NullVariable" + + def reconstruct(self, codegen): + if sys.version_info < (3, 11): + unimplemented("cannot reconstruct NullVariable in < Python 3.11") + codegen.append_output(create_instruction("PUSH_NULL")) + + +class DeletedVariable(VariableTracker): + """Marker used to implement delattr()""" + + +class StringFormatVariable(VariableTracker): + """ + Represents a call to str.format(), we delay calling format until after the graph. + """ + + _nonvar_fields = {"format_string", *VariableTracker._nonvar_fields} + + @classmethod + def create(cls, format_string, sym_args, sym_kwargs): + if all( + x.is_python_constant() + for x in itertools.chain(sym_args, sym_kwargs.values()) + ): + return variables.ConstantVariable.create( + format_string.format( + *[v.as_python_constant() for v in sym_args], + **{k: v.as_python_constant() for k, v in sym_kwargs.items()}, + ) + ) + return cls(format_string, list(sym_args), dict(sym_kwargs)) + + def __init__(self, format_string, sym_args, sym_kwargs, **kwargs) -> None: + super().__init__(**kwargs) + assert isinstance(format_string, str) + self.format_string = format_string + self.sym_args = sym_args + self.sym_kwargs = sym_kwargs + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.format_string!r}, {self.sym_args!r}, {self.sym_kwargs!r})" + + def reconstruct(self, codegen): + codegen.add_push_null( + lambda: codegen.extend_output( + [ + codegen.create_load_const(self.format_string), + codegen.create_load_attr("format"), + ] + ), + call_function_ex=True, + ) + codegen(variables.TupleVariable(self.sym_args)) + kwargs = { + variables.ConstantVariable.create(k): v for k, v in self.sym_kwargs.items() + } + codegen(variables.ConstDictVariable(kwargs)) + codegen.append_output(create_instruction("CALL_FUNCTION_EX", arg=1)) + + +class DebuggingVariable(VariableTracker): + """ + Represents a call to a debugging function like print(), or something + registered to config.reorderable_logging_functions. + """ + + def __init__(self, value, **kwargs) -> None: + super().__init__(**kwargs) + self.value = value + + @staticmethod + def is_reorderable_logging_function(obj): + return ( + callable(obj) + and isinstance(obj, (types.FunctionType, types.BuiltinFunctionType)) + and obj in torch._dynamo.config.reorderable_logging_functions + ) + + def call_function(self, tx: "InstructionTranslator", args, kwargs): + if tx.export: + # For export cases, we can just make debugging functions no-ops + return + + if not self.can_reorder_logs(self.value, args, kwargs): + unimplemented( + f"Reordering debugging function {self.value} " + f"with inputs {args} {kwargs} is not yet implemented." + ) + + tx.debug_locals.append((self, list(args))) + + def reconstruct(self, codegen): + return self.source.reconstruct(codegen) + + @staticmethod + def can_reorder_logs(fn, args, kwargs) -> True: + """ + Run some additional checks for what sort of function calls can we + actually reorder. + """ + + allowed_input_types = ( + variables.TensorVariable, + variables.ConstantVariable, + StringFormatVariable, + ) + + flat_args = pytree.tree_leaves([args, kwargs]) + for arg in flat_args: + if not isinstance(arg, allowed_input_types): + return False + + return True + + +class LoggingLoggerVariable(VariableTracker): + """ + Represents a call to any of logging.Logger methods + """ + + def __init__(self, value, **kwargs) -> None: + super().__init__(**kwargs) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if tx.export: + # For export cases, we can just make debugging functions no-ops + return + unimplemented("Logger not supported for non-export cases") + + +class ConstantLikeVariable(VariableTracker): + """self.value is a compile-time constant, but not a literal""" + + _error_prefix = "ConstantLikeVariable" + try: + from numpy import ( + dtype as np_dtype, + floating as np_floating, + generic as np_generic, + ) + except ImportError: + np_floating = type("invalid_type", (), {}) + np_dtype = type("invalid_type", (), {}) + + def __init__(self, value, **kwargs) -> None: + super().__init__(**kwargs) + self.value = value + + def as_python_constant(self): + return self.value + + def call_method( + self, + tx, + name, + args: List[VariableTracker], + kwargs: Dict[str, VariableTracker], + ) -> VariableTracker: + try: + # we only support constant propagation for methods + cargs = [x.as_python_constant() for x in args] + ckwargs = {k: v.as_python_constant() for k, v in kwargs.items()} + except NotImplementedError: + unimplemented(f"{self._error_prefix}.{name}(*{args}, **{kwargs})") + + result = getattr(self.value, name)(*cargs, **ckwargs) + + if variables.ConstantVariable.is_literal(result): + return variables.ConstantVariable.create(result) + if isinstance(result, re.Match): + return ConstantRegexMatchVariable(result) + + unimplemented(f"{self._error_prefix}.{name}() -> {result}") + + def var_getattr(self, tx: "InstructionTranslator", name: str) -> VariableTracker: + result = getattr(self.value, name) + if isinstance(result, self.np_floating): + result = float(result) + if isinstance(result, self.np_dtype): + return NumpyDTypeVariable(result) + if isinstance(result, type) and issubclass(result, self.np_generic): + # things like x.dtype.type + return NumpyVariable(result) + if variables.ConstantVariable.is_literal(result): + return variables.ConstantVariable.create(result) + return GetAttrVariable(self, name) + + +class RegexPatternVariable(ConstantLikeVariable): + _error_prefix = "re.Pattern" + + +class ConstantRegexMatchVariable(ConstantLikeVariable): + _error_prefix = "re.Match" + + +class TorchVersionVariable(ConstantLikeVariable): + _error_prefix = "torch.__version__" + + def __init__(self, **kwargs) -> None: + kwargs.setdefault("value", torch.__version__) + assert kwargs["value"] is torch.__version__ + super().__init__(**kwargs) + + +class NumpyTypeInfoVariable(ConstantLikeVariable): + _error_prefix = "np.iinfo/np.finfo" + + +class NumpyDTypeVariable(ConstantLikeVariable): + _error_prefix = "np.dtype[...]" + + def as_proxy(self): + """Similar to how numpy dtype descriptors (e.g. np.float32 ) are handled by NumpyVariable: + + np.dtype() objects are serialized as strings, torch._numpy wrappers will normalize to the torch dtype. + This also handles unsupported things nicely (i.e. structured arrays and object arrays). + """ + return self.value.type.__name__ + + +np_constant_collections_map = { + tnp.finfo: NumpyTypeInfoVariable, + tnp.iinfo: NumpyTypeInfoVariable, + tnp.dtype: NumpyDTypeVariable, +} + + +class RandomClassVariable(VariableTracker): + """random.Random""" + + def __init__(self, **kwargs) -> None: + super().__init__(**kwargs) + + def call_function(self, tx: "InstructionTranslator", args, kwargs): + if len(args) > 1: + unimplemented("random.Random() with > 1 arg") + elif kwargs: + unimplemented("random.Random() with kwargs") + seed = variables.ConstantVariable.create(None) if len(args) == 0 else args[0] + return RandomVariable(seed=seed, mutable_local=variables.base.MutableLocal()) + + +class RandomVariable(VariableTracker): + """random.Random() + + Implemented by wrapping a VariableTracker around a random.Random object. + The supported methods for the random.Random object cannot be overriden. + Assumes that random objects behave the same given a set seed or state. + """ + + _nonvar_fields = { + "random", + *VariableTracker._nonvar_fields, + } + + _supported_fn_names = { + "random", + "randint", + "randrange", + "uniform", + } + + def __init__( + self, + rand: Optional[random.Random] = None, + seed: Optional[VariableTracker] = None, + **kwargs, + ) -> None: + super().__init__(**kwargs) + if rand is not None: + assert self.is_supported_random_obj(rand) + self.random = random.Random() + self.random.setstate(rand.getstate()) + else: + seed = seed.as_python_constant() if seed is not None else None + self.random = random.Random(seed) + + def python_type(self): + return random.Random + + def as_python_constant(self): + return self.random + + @staticmethod + def is_supported_random_obj(val): + if type(val) is not random.Random: + return False + for name in itertools.chain( + RandomVariable._supported_fn_names, ("seed", "getstate", "setstate") + ): + if not hasattr(val, name): + return False + meth = getattr(val, name) + if inspect.isbuiltin(meth): + # e.g. random.Random.random + if meth != getattr(random.Random, name).__get__(val): + return False + else: + if getattr(meth, "__func__", None) is not getattr(random.Random, name): + return False + return True + + @staticmethod + def check_state(state): + assert type(state) is tuple + assert type(state[0]) is int + assert type(state[1]) is tuple + assert all(type(x) is int for x in state[1]) + assert state[2] is None or type(state[2]) is float + + @staticmethod + def wrap_state(state): + RandomVariable.check_state(state) + return variables.TupleVariable( + [ + variables.ConstantVariable.create(state[0]), + variables.TupleVariable( + [variables.ConstantVariable.create(x) for x in state[1]] + ), + variables.ConstantVariable.create(state[2]), + ] + ) + + @staticmethod + def unwrap_state(state): + state_obj = state.as_python_constant() + RandomVariable.check_state(state_obj) + return state_obj + + def call_method( + self, + tx, + name, + args: List[VariableTracker], + kwargs: Dict[str, VariableTracker], + ) -> VariableTracker: + if name == "seed": + tx.output.side_effects.mutation(self) + self.random.seed( + *[x.as_python_constant() for x in args], + **{key: val.as_python_constant() for key, val in kwargs.items()}, + ) + return variables.ConstantVariable.create(None) + elif name == "getstate": + return self.wrap_state(self.random.getstate()) + elif name == "setstate": + tx.output.side_effects.mutation(self) + self.random.setstate(self.unwrap_state(args[0])) + return variables.ConstantVariable.create(None) + elif name in self._supported_fn_names: + tx.output.side_effects.mutation(self) + state = self.random.getstate() + + def call_random_meth(*args, **kwargs): + r = random.Random() + r.setstate(state) + return getattr(r, name)(*args, **kwargs) + + # self.random state not actually updated by call_random_meth, so update here + # by calling the method + getattr(self.random, name)( + *[x.as_python_constant() for x in args], + **{k: v.as_python_constant() for k, v in kwargs.items()}, + ) + + return call_random_fn(tx, call_random_meth, args, kwargs) + return super().call_method(tx, name, args, kwargs) + + def reconstruct(self, codegen): + codegen.add_push_null( + lambda: codegen.extend_output( + [ + codegen.create_load_python_module(random), + codegen.create_load_attr("Random"), + ] + ) + ) + codegen.call_function(0, False) + # NOTE using add_push_null may result in NULL being duplicated + # so defer the push_null to call_function + codegen.dup_top() + codegen.load_attr("setstate") + codegen(self.wrap_state(self.random.getstate())) + codegen.call_function(1, True) + codegen.pop_top() diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/nn_module.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/nn_module.py new file mode 100644 index 0000000000000000000000000000000000000000..9c4fb05df95e34928e4c598c8546d2ab1ebc60f1 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/nn_module.py @@ -0,0 +1,1180 @@ +# mypy: ignore-errors + +import functools +import inspect +import itertools +import types +from contextlib import contextmanager, nullcontext +from typing import Dict, List, TYPE_CHECKING + +import torch.nn + +from .. import trace_rules, variables +from ..exc import ( + raise_observed_exception, + unimplemented, + UnspecializeRestartAnalysis, + Unsupported, +) +from ..guards import GuardBuilder, install_guard +from ..mutation_guard import GenerationTracker +from ..source import ( + AttrSource, + ConstDictKeySource, + FSDPNNModuleSource, + GetItemSource, + NNModuleSource, + UnspecializedBuiltinNNModuleSource, + UnspecializedNNModuleSource, +) +from ..utils import ( + get_custom_getattr, + get_fake_value, + is_lazy_module, + is_namedtuple, + is_safe_constant, + istensor, + istype, + nnmodule_has_hooks, + object_has_getattribute, + proxy_args_kwargs, + set_example_value, +) +from .base import MutableLocal, typestr, VariableTracker +from .functions import invoke_and_store_as_constant +from .lazy import LazyVariableTracker +from .lists import SliceVariable +from .user_defined import UserDefinedObjectVariable + + +if TYPE_CHECKING: + from torch._dynamo.symbolic_convert import InstructionTranslator + + +def initialize_lazy_module(tx: "InstructionTranslator", mod, args, kwargs): + """ + Fairly coupled helper used by NNModuleVariable and UnspecializedNNModuleVariable. + + Used to cause lazy module to be initialized (and delete its init hook) before tracing. Especially + useful now that 'allowed' modules graph-break on hooks, calling this first ensures there is no hook + by the time we trace __call__ and thus no graph-break for lazy allowed modules. + """ + if hasattr(mod, "_initialize_hook"): + + def convert_to_fake(x): + if is_namedtuple(x): + return type(x)(*(convert_to_fake(elem) for elem in x)) + elif isinstance(x, dict): + return {k: convert_to_fake(v) for k, v in x.items()} + elif isinstance(x, (list, tuple, set)): + return type(x)(convert_to_fake(elem) for elem in x) + elif isinstance(x, torch.fx.Proxy): + return get_fake_value(x.node, tx) + else: + return x + + proxy_args, proxy_kwargs = proxy_args_kwargs(args, kwargs) + fake_args = [convert_to_fake(arg) for arg in proxy_args] + fake_kwargs = {k: convert_to_fake(v) for k, v in proxy_kwargs.items()} + mod._infer_parameters(mod, fake_args, fake_kwargs) + + +@contextmanager +def record_nn_module_stack(module_key: str, source, tx, mod: torch.nn.Module): + fully_qualified_name = source.name() + try: + tx.nn_module_stack[module_key] = (fully_qualified_name, mod.__class__) + yield + finally: + del tx.nn_module_stack[module_key] + + +def guard_to_detect_forward_monkeypatching(source, mod): + # Users sometimes patch the forward method of a nn module instance to + # perform optimizations like quantization. Though this is not a good + # software practice, but python allows this and Dynamo needs to detect + # this patching. + # + # One way to do this is to add an ID_MATCH guard on every function + # getting inlined (https://github.com/pytorch/pytorch/pull/124975). But + # this increased guard overhead by around 20%. + # + # To keep the guard overhead down, we just guard on the `forward` being + # not present in the mod __dict__. The common case of patching forward + # method adds `forward` in the instance __dict__, whereas the unpatched + # `forward` sits in the type(mod).__dict__ + if source: + if "forward" in mod.__dict__ and callable(mod.__dict__["forward"]): + # Monkeypatched forward method, add an ID_MATCH guard on forward function + fwd = mod.__dict__["forward"] + forward_source = AttrSource(source, "forward") + if type(fwd) is types.MethodType: + forward_source = AttrSource(forward_source, "__func__") + install_guard(forward_source.make_guard(GuardBuilder.CLOSURE_MATCH)) + else: + # Common case - check that the forward key is absent in mod __dict__ + install_guard( + source.make_guard( + functools.partial( + GuardBuilder.NOT_PRESENT_IN_GENERIC_DICT, attr="forward" + ) + ) + ) + + +class NNModuleVariable(VariableTracker): + _nonvar_fields = { + "module_type", + "module_key", + "module", + "nn_module_stack_source", + *VariableTracker._nonvar_fields, + } + + def __init__( + self, module_type: type, module_key: str, module: torch.nn.Module, **kwargs + ) -> None: + super().__init__(**kwargs) + self.module_type = module_type + self.module_key = module_key + self.module = module + assert self.source + self.nn_module_stack_source = self.source + + def get_nn_module_stack_source(self): + return self.nn_module_stack_source or self.source + + def set_nn_module_stack_source(self, source): + self.nn_module_stack_source = source + + def python_type(self): + return self.module_type + + def _wrap_submodule( + self, tx: "InstructionTranslator", source, submod, *key_extra, **options + ): + return + + def unpack_var_sequence(self, tx): + # implement list/iter/tuple/etc calls + base = tx.output.get_submodule(self.module_key) + if isinstance(base, torch.nn.ModuleDict): + result = [] + for name, submod in base.items(): + name_var = variables.ConstantVariable.create(name) + tx.output.register_attr_or_module( + submod, + self.module_key, + name, + source=NNModuleSource(GetItemSource(self.source, name)), + ) + result.append(name_var) + return result + + assert isinstance( + base, (torch.nn.ModuleList, torch.nn.ParameterList, torch.nn.Sequential) + ), typestr(base) + assert self.source + result = [] + for idx, submod in enumerate(base): + result.append( + tx.output.register_attr_or_module( + submod, + self.module_key, + idx, + source=NNModuleSource(GetItemSource(self.source, idx)), + ) + ) + return result + + def call_hasattr(self, tx: "InstructionTranslator", name: str) -> "VariableTracker": + mod = tx.output.get_submodule(self.module_key) + result = hasattr(mod, name) + install_guard( + NNModuleSource(AttrSource(self.source, name)).make_guard( + GuardBuilder.HASATTR + ) + ) + return variables.ConstantVariable.create(result) + + def is_training(self, tx): + mod = tx.output.get_submodule(self.module_key) + return getattr(mod, "training", False) + + def convert_to_unspecialized(self, tx): + """Restart analysis treating this module as an UnspecializedNNModuleVariable""" + mod = tx.output.get_submodule(self.module_key) + GenerationTracker.tag(mod) + + # Mark the class dynamic unless its module initialization + if tx.f_code.co_name != "__init__": + GenerationTracker.mark_class_dynamic(type(mod)) + raise UnspecializeRestartAnalysis + + def has_key_in_generic_dict(self, tx: "InstructionTranslator", key): + base = tx.output.get_submodule(self.module_key) + + if object_has_getattribute(base): + unimplemented("NNModuleVariable with custom __getattribute__") + + if tx.output.side_effects.has_pending_mutation_of_attr(self, key): + mutated_attr = tx.output.side_effects.load_attr(self, key, deleted_ok=True) + return not isinstance(mutated_attr, variables.DeletedVariable) + + base_dict = object.__getattribute__(base, "__dict__") + return key in base_dict + + def _custom_getattr_fallback(self, base, tx, name, options): + """Check for a __getattr__ and handle it specially if it is implemented""" + if object_has_getattribute(base): + unimplemented("torch.nn.Module with a custom __getattribute__ defined") + + getattr_fn = get_custom_getattr(base, ignore_nn_module_getattr=True) + if getattr_fn is None: + return None + + if not isinstance(getattr_fn, types.FunctionType): + unimplemented("torch.nn.Module with a non-function custom __getattr__") + + return variables.UserMethodVariable(getattr_fn, self, **options).call_function( + tx, [variables.ConstantVariable.create(name)], {} + ) + + def var_getattr(self, tx: "InstructionTranslator", name): + from .builder import VariableBuilder + + if self.source: + source = AttrSource(self.source, name) + else: + source = None + + base = tx.output.get_submodule(self.module_key) + base_dict = object.__getattribute__(base, "__dict__") + object_member = True + all_class_attribute_names = set() + for x in inspect.getmro(base.__class__): + all_class_attribute_names.update(x.__dict__.keys()) + + if not self.source: + unimplemented("GETATTR with no source") + + if name == "__dict__": + return variables.GetAttrVariable(self, name, source=source) + + if name in base_dict: + subobj = base_dict[name] + elif ( + "_modules" in base_dict + and name in base_dict["_modules"] + and name not in all_class_attribute_names + ): + subobj = base_dict["_modules"][name] + elif "_parameters" in base_dict and name in base_dict["_parameters"]: + subobj = base_dict["_parameters"][name] + elif "_buffers" in base_dict and name in base_dict["_buffers"]: + subobj = base_dict["_buffers"][name] + else: + try: + subobj = inspect.getattr_static(base, name) + object_member = False + except AttributeError: + # see if we can fallback to __getattr__, which is not checked by getattr_static + result = self._custom_getattr_fallback( + base=base, tx=tx, name=name, options={"source": source} + ) + if result is not None: + return result + # if we can't find a __getattr__, just raise the AttributeError + raise + + if name == "forward": + guard_to_detect_forward_monkeypatching(self.source, base) + + if name == "__class__" and not object_member: + return variables.UserDefinedClassVariable(base.__class__, source=source) + + if object_member: + out = VariableBuilder(tx, NNModuleSource(source))(subobj) + + if isinstance(out, (NNModuleVariable, UnspecializedNNModuleVariable)): + # nn_module_stack source is BC surface area. Ensure that + # mod._modules["linear"] is reflected as mod.linear for + # nn_module_stack. + out.set_nn_module_stack_source( + AttrSource(self.get_nn_module_stack_source(), name) + ) + return out + + else: + if istype(subobj, property): + if self.source: + # Read the class attribute to reach the property + source = AttrSource(AttrSource(self.source, "__class__"), name) + # Get the getter function + source = AttrSource(source, "fget") + return variables.UserFunctionVariable( + subobj.fget, + source=source, + ).call_function(tx, [(self)], {}) + elif istype(subobj, classmethod): + return variables.UserMethodVariable( + subobj.__func__, + variables.UserDefinedObjectVariable(type(base)), + source=source, + ) + elif istype(subobj, staticmethod): + return variables.UserFunctionVariable( + subobj.__get__(base), source=source + ) + elif istype(subobj, types.FunctionType): + return variables.UserMethodVariable(subobj, self, source=source) + elif is_safe_constant(subobj) or istensor(subobj): + # Support possibly common cases of class members + return VariableBuilder(tx, NNModuleSource(source))(subobj) + else: + unimplemented( + f"class property {name} - {typestr(base)} {typestr(subobj)}" + ) + + return variables.GetAttrVariable(self, name, source=source) + + def call_function( + self, + tx, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + mod = tx.output.get_submodule(self.module_key) + + with record_nn_module_stack( + self.module_key, self.get_nn_module_stack_source(), tx, mod + ): + is_lazy = is_lazy_module(mod) + if ( + isinstance(mod, torch.nn.Sequential) + and mod.__class__.forward is torch.nn.Sequential.forward + ): + if nnmodule_has_hooks(mod): + # We do not want to unroll sequential if it has hooks, since evaporating it + # will cause hooks to not fire! + # This terminates and restart the tracing process + self.convert_to_unspecialized(tx) + + # Unroll sequential + assert ( + not is_lazy + ), "Expected lazy sequential isn't a valid combination?" + assert not kwargs + (arg,) = args + # TODO: Use named_children when it supports remove_duplicate=False. + for child_name, submod in mod._modules.items(): + tx.call_function( + tx.output.register_attr_or_module( + submod, + self.module_key, + child_name, + source=NNModuleSource(AttrSource(self.source, child_name)), + ), + [arg], + {}, + ) + arg = tx.pop() + return arg + + if is_lazy: + # The module type will change after it is called + if mod.cls_to_become is not None: + self.module_type = mod.cls_to_become + + # The pre-hook runs to initialize the module shapes, then deletes itself. After this, + # the module is more or less not lazy and can be treated as a normal module regardless of + # is_allowed or other variations. + initialize_lazy_module(tx, mod, args, kwargs) + + # If we are tracing the higher order op, we want Dynamo to step + # inside the module call so that Dynamo can see the underlying + # parameters and buffers and raise them as inputs to the graph. + # + # NB: torch.nn.utils.parametrize changes the class type of a + # parametrized module such that its __module__ points to + # "torch.nn.utils.parametrize". + if ( + tx.output.is_root_tracer() + and mod.__module__.startswith(("torch.nn.", "torch.ao.")) + and mod.__module__ != "torch.nn.utils.parametrize" + ): + if nnmodule_has_hooks( + mod, check_forward_hooks=True, check_backward_hooks=True + ): + # End of fn, this bubbles up and restarts tracing. + self.convert_to_unspecialized(tx) + + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_module", + self.module_key, + *proxy_args_kwargs(args, kwargs), + ), + ) + else: + assert self.source, ( + "Must provide a valid source in order to inline, " + "since inlined function may have default args which must be guarded." + ) + if isinstance(mod, torch.fx.GraphModule): + # TODO: do we want to support __call__ for GM's? + # If so at least some changes are needed, we don't allow inlining + # the call_wrapped currently, and maybe other issues too + fn = mod.forward + fn_source = AttrSource(self.source, "forward") + else: + fn = mod._call_impl + fn_source = AttrSource(self.source, "_call_impl") + if istype(fn, types.MethodType): + fn = fn.__func__ + fn_source = AttrSource(fn_source, "__func__") + args = [self] + args + else: + assert istype(fn, types.FunctionType) + return tx.inline_user_function_return( + variables.UserFunctionVariable(fn, source=fn_source), + args, + kwargs, + ) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + constant=False, + ) -> "VariableTracker": + from . import ConstantVariable, ListIteratorVariable, TupleVariable + + key = self.module_key + module = tx.output.get_submodule(key) + + def generic_call_method_helper(name): + # Helper function to put a `call_method` node in FX graph, + # with nn.Module as the first arg. + mod_proxy = tx.output.create_proxy( + "get_attr", + self.module_key, + (), + {}, + ) + set_example_value(mod_proxy.node, module) + + proxy_args, proxy_kwargs = proxy_args_kwargs(args, kwargs) + + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_method", + name, + args=(mod_proxy, *proxy_args), + kwargs=proxy_kwargs, + ), + ) + + if name in ["_call_impl", "_wrapped_call_impl"]: + # Example: `self.layer.__call__(x)` + # This is used for explicit calling `__call__` in a forward function. + # Dynamo inlines `__call__`, includes hooks. + return self.call_function(tx, args, kwargs) + elif name == "forward": + # Example: `self.layer.forward(x)` + # This is used for explicit calling `forward` in a forward function. + # Dynamo puts `call_method` node in FX, doesn't trigger hooks. + with record_nn_module_stack( + self.module_key, self.get_nn_module_stack_source(), tx, module + ): + return generic_call_method_helper(name) + + if name == "_check_input_dim" and trace_rules.is_torch_inline_allowed( + inspect.getfile(module.__class__._check_input_dim) + ): + return ConstantVariable.create(True) + + if name == "_get_item_by_idx": + assert args[1].is_python_constant() + assert isinstance(args[0], TupleVariable) + mod_var = args[0].items[args[1].value] + if isinstance(mod_var, UnspecializedNNModuleVariable): + return mod_var + key = mod_var.module_key + submod = tx.output.get_submodule(key) + return tx.output.register_attr_or_module( + submod, + key, + key, + source=NNModuleSource(GetItemSource(self.source, key)), + ) + + if constant: + fn = getattr(module, name) + name = f"{module.__class__.__name__}_{name}_result" + return invoke_and_store_as_constant(tx, fn, name, args, kwargs) + + def assert_all_args_kwargs_const(): + if not all( + x.is_python_constant() for x in itertools.chain(args, kwargs.values()) + ): + unimplemented(f"non-const NNModule method {name}") + + def get_kwargs(*names): + assert_all_args_kwargs_const() + fn = getattr(module, name) + bound_args = inspect.signature(fn).bind( + *([x.as_python_constant() for x in args]), + **{k: v.as_python_constant() for k, v in kwargs.items()}, + ) + bound_args.apply_defaults() + bound_args = bound_args.arguments + return {k: bound_args[k] for k in names} + + def wrap_values(items): + result = [] + for name, submod in items: + result.append( + tx.output.register_attr_or_module( + submod, + key, + name, + source=NNModuleSource(gen_source(self.source, name)), + ) + ) + return ListIteratorVariable(result, mutable_local=MutableLocal()) + + def named_embed(name, obj): + return TupleVariable( + [ + ConstantVariable.create(name), + tx.output.register_attr_or_module( + obj, + key, + name, + source=NNModuleSource(gen_source(self.source, name)), + ), + ] + ) + + def gen_source(source, name): + name_split = name.split(".") + if name_split[0] == "": + return source + while len(name_split) > 0: + x = name_split.pop(0) + source = AttrSource(source, x) + return source + + if name == "named_children": + tx.output.guard_on_key_order.add(AttrSource(self.source, "_modules").name()) + assert not (args or kwargs) + result = [] + for name, submod in module.named_children(): + result.append(named_embed(name, submod)) + return ListIteratorVariable(result, mutable_local=MutableLocal()) + elif name == "named_parameters": + tx.output.guard_on_key_order.add( + AttrSource(self.source, "_parameters").name() + ) + result = [] + for name, param in module.named_parameters( + **get_kwargs("prefix", "recurse") + ): + result.append(named_embed(name, param)) + return ListIteratorVariable(result, mutable_local=MutableLocal()) + elif name == "named_buffers": + tx.output.guard_on_key_order.add(AttrSource(self.source, "_buffers").name()) + result = [] + for name, buffer in module.named_buffers( + **get_kwargs("prefix", "recurse", "remove_duplicate") + ): + result.append(named_embed(name, buffer)) + return ListIteratorVariable(result, mutable_local=MutableLocal()) + elif name == "named_modules": + tx.output.guard_on_key_order.add(AttrSource(self.source, "_modules").name()) + result = [] + for name, submod in module.named_modules( + **get_kwargs("memo", "prefix", "remove_duplicate") + ): + result.append(named_embed(name, submod)) + return ListIteratorVariable(result, mutable_local=MutableLocal()) + elif name == "children": + tx.output.guard_on_key_order.add(AttrSource(self.source, "_modules").name()) + assert not (args or kwargs) + return wrap_values(module.named_children()) + elif name == "modules": + tx.output.guard_on_key_order.add(AttrSource(self.source, "_modules").name()) + return wrap_values(module.named_modules()) + elif name == "parameters": + tx.output.guard_on_key_order.add( + AttrSource(self.source, "_parameters").name() + ) + return wrap_values(module.named_parameters(**get_kwargs("recurse"))) + elif name == "buffers": + tx.output.guard_on_key_order.add(AttrSource(self.source, "_buffers").name()) + return wrap_values(module.named_buffers(**get_kwargs("recurse"))) + elif name == "keys": + assert not (args or kwargs) + result = [] + for name in module.keys(): + result.append(ConstantVariable.create(name)) + return ListIteratorVariable(result, mutable_local=MutableLocal()) + elif name == "values": + assert not (args or kwargs) + return wrap_values(module.items()) + elif name == "items": + assert not (args or kwargs) + result = [] + for name, submod in module.items(): + result.append(named_embed(name, submod)) + return ListIteratorVariable(result, mutable_local=MutableLocal()) + elif name == "__len__": + assert not (args or kwargs) + return ConstantVariable.create(len(module)) + elif ( + name == "__contains__" + and isinstance(module, (torch.nn.ModuleDict, torch.nn.ParameterDict)) + and args + and args[0].is_python_constant() + ): + return ConstantVariable.create( + args[0].as_python_constant() in module._modules + ) + elif name == "__getitem__": + assert not kwargs and len(args) == 1 + builtin_supported = ( + torch.nn.ModuleDict.__getitem__, + torch.nn.ModuleList.__getitem__, + torch.nn.ParameterDict.__getitem__, + torch.nn.ParameterList.__getitem__, + torch.nn.Sequential.__getitem__, + ) + + if type(module).__getitem__ not in builtin_supported: + assert isinstance(args[0], variables.ConstantVariable), typestr(args[0]) + key = args[0].as_python_constant() + assert isinstance(key, (str, int)) + fn = getattr(module, name).__func__ + + assert isinstance(fn, types.FunctionType) + + src = AttrSource(AttrSource(self.source, name), "__func__") + return tx.inline_user_function_return( + variables.UserFunctionVariable(fn, source=src), + [self] + list(args), + kwargs, + ) + + assert self.source + + if isinstance(args[0], SliceVariable): + # TODO(anijain2305,export-team) - Remove this if condition when inlining of inbuilt nn modules is + # enabled for export. + if tx.output.export: + # Build a TupleVariable of NNModules + result = [] + + # Turn the slice into the list of integers + keys = list(range(len(module)))[args[0].as_python_constant()] + for idx, submod in enumerate(module[args[0].as_python_constant()]): + key = keys[idx] + src = NNModuleSource(GetItemSource(self.source, key)) + result.append( + tx.output.register_attr_or_module( + submod, + key, + source=src, + ) + ) + + new_module = module[args[0].as_python_constant()] + new_module_variable = tx.output.register_attr_or_module( + new_module, + f"{self}.__getitem__(slice)", + source=NNModuleSource( + GetItemSource(self.source, args[0].as_python_constant()) + ), + ) + return new_module_variable + else: + # slice on nn module results in a creation of new module instance, so we need to make it sourceless. + # Convert to unspecialized so that UnspecializedNNModule variable can take care of it. + self.convert_to_unspecialized(tx) + + from .tensor import SymNodeVariable + + if isinstance(args[0], SymNodeVariable): + key = args[0].evaluate_expr(tx.output) + elif args[0].is_python_constant(): + key = args[0].as_python_constant() + else: + unimplemented(f"getitem on NNModuleVariable with key {args[0]}") + + submod = module[key] + return tx.output.register_attr_or_module( + submod, + self.module_key, + key, + source=NNModuleSource(GetItemSource(self.source, key)), + ) + elif ( + name == "_get_abs_string_index" + or ( + isinstance(module, torch.nn.modules.conv._ConvNd) + and name == "_conv_forward" + ) + or ( + isinstance(module, torch.nn.modules.conv._ConvTransposeNd) + and name == "_output_padding" + ) + ): + # Inline the function + fn = getattr(module, name).__func__ + fn_source = AttrSource(AttrSource(self.source, name), "__func__") + return tx.inline_user_function_return( + variables.UserFunctionVariable(fn, source=fn_source), + [self] + args, + kwargs, + ) + # A loose heuristic, but seems to be generally good before we drop into the + # manual handling of inputs + elif ( + name in module.__class__.__dict__ + and callable(module.__class__.__dict__[name]) + and all( + isinstance(x, variables.TensorVariable) + for x in itertools.chain(args, kwargs.values()) + ) + ): + return generic_call_method_helper(name) + else: + return super().call_method(tx, name, args, kwargs) + + +class UnspecializedNNModuleVariable(UserDefinedObjectVariable): + _nonvar_fields = { + "value_type", + "is_state_mutated", + "nn_module_stack_source", + *UserDefinedObjectVariable._nonvar_fields, + } + + """ + The above class will specialize on the id() of a module and place + parameters on the torch.fx.GraphModule. Giving one graph per + module instance. This version treats nn.Modules() like other user + defined objects and will pass parameters into the FX graph as inputs. + Giving one graph per module class. + """ + + def __init__(self, value, **kwargs) -> None: + if type(value) is torch.jit._script.RecursiveScriptModule: + raise Unsupported( + "ScriptModules aren't supported in UnspecializedNNModuleVariable" + " becuase their .forward function isn't a static member of their type" + ) + if "value_type" in kwargs: + lazy_value_to_become = getattr(kwargs["value_type"], "cls_to_become", None) + if type(value) is lazy_value_to_become: + # We may have cloned a variabletracker for a LazyModule earlier (e.g. tracking side-effects) + # and then later we called and mutated the LazyModule into a MaterializedModule. + # We do not do the mutation upon first seeing a LazyModule since we preserve eager semantics to only + # mutate upon first call, but this requires we update multiple copies of the VariableTracker post-mutation. + kwargs["value_type"] = type(value) + + super().__init__(value=value, **kwargs) + self.is_state_mutated = False + # nn_module_stack_source is used to ensure BC for nn_module_stack. + # Downstream users prefer mod.linear instead of mod._modules['linear'] + # as the module stack. When Dynamo inlines the __getattr__ method, we + # cannot use self.source for nn_module_stack because it will be similar + # to mod._modules['linear']. In these cases, we set the + # nn_module_stack_source appropriately to resemble mod.linear. + self.nn_module_stack_source = self.source + + def _wrap_source(self, attr_source): + if not isinstance(attr_source, UnspecializedNNModuleSource): + return UnspecializedNNModuleSource(attr_source) + return attr_source + + def get_nn_module_stack_source(self): + return self.nn_module_stack_source or self.source + + def set_nn_module_stack_source(self, source): + self.nn_module_stack_source = source + + @staticmethod + @functools.lru_cache(None) + def _nn_module_method_ids(): + # Allow __setattr__ to fall through to base class handler + supported = {torch.nn.Module.__setattr__, torch.nn.Module.__init__} + return { + id(x.__code__) + for x in torch.nn.Module.__dict__.values() + if hasattr(x, "__code__") and x not in supported + } + + def unpack_var_sequence(self, tx): + try: + fn = inspect.getattr_static(self.value_type, "__iter__") + except AttributeError as e: + raise NotImplementedError from e + + if fn in ( + torch.nn.ModuleList.__iter__, + torch.nn.ParameterList.__iter__, + torch.nn.Sequential.__iter__, + ): + # The program can mutate the nn module object but the saved `value` + # will not reflect the mutations. So, trace through the `__iter__` + # function to reflect any tracked mutations. + return tx.inline_user_function_return( + variables.UserFunctionVariable(fn), + [ + self, + ], + {}, + ).unpack_var_sequence(tx) + + return super().unpack_var_sequence(tx) + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + mod = self.value + # see comment on lazy module handling in NNModuleVariable.call_function for context + if is_lazy_module(mod): + if mod.cls_to_become is not None: + self.value_type = mod.cls_to_become + initialize_lazy_module(tx, mod, args, kwargs) + name = "_call_impl" + fn = getattr(self.value_type, name) + + # Check if we can short circuit nn.Module._call_impl to the forward + # method. NB - This is done to reduce the compile time of Dynamo. + if fn is torch.nn.Module._call_impl and "forward" not in mod.__dict__: + forward_method = inspect.getattr_static(mod, "forward") + if isinstance(forward_method, types.FunctionType): + globals_vt = tx.nn_modules_globals_vt + if not ( + self.var_getattr(tx, "_backward_hooks").realize().len() + or self.var_getattr(tx, "_backward_pre_hooks").realize().len() + or self.var_getattr(tx, "_forward_hooks").realize().len() + or self.var_getattr(tx, "_forward_pre_hooks").realize().len() + or globals_vt.var_getattr(tx, "_global_backward_pre_hooks").len() + or globals_vt.var_getattr(tx, "_global_backward_hooks").len() + or globals_vt.var_getattr(tx, "_global_forward_hooks").len() + or globals_vt.var_getattr(tx, "_global_forward_pre_hooks").len() + ): + name = "forward" + fn = self.value_type.forward + + if self.source: + source = AttrSource(AttrSource(self.source, "__class__"), name) + else: + source = None + + guard_to_detect_forward_monkeypatching(self.source, mod) + + ctx = ( + record_nn_module_stack( + str(id(mod)), self.get_nn_module_stack_source(), tx, mod + ) + if self.source + else nullcontext() + ) + with ctx: + return variables.UserFunctionVariable(fn, source=source).call_function( + tx, [self] + list(args), kwargs + ) + + def trace_supported_methods( + self, tx: "InstructionTranslator", method, name, args, kwargs + ): + def get_kwargs(*names): + fn = getattr(self.value, name) + bound_args = inspect.signature(fn).bind( + *([x.as_python_constant() for x in args]), + **{k: v.as_python_constant() for k, v in kwargs.items()}, + ) + bound_args.apply_defaults() + bound_args = bound_args.arguments + return {k: bound_args[k] for k in names} + + def get_current_parameters(module_var): + params_dict = module_var.var_getattr(tx, "_parameters").realize().items + assert isinstance(params_dict, dict) + params_list = list(params_dict.values()) + params_list = [param.realize() for param in params_list] + # Account for mod.param = None + params_list = [ + param + for param in params_list + if isinstance(param, variables.TensorVariable) + ] + return params_list + + def collect_parameters(module_var, recurse): + params_list = [] + assert isinstance(module_var, UnspecializedNNModuleVariable) + params_list = get_current_parameters(module_var) + modules_dict = module_var.var_getattr(tx, "_modules").realize() + if recurse: + for submodule_var in modules_dict.items.values(): + assert isinstance(submodule_var, UnspecializedNNModuleVariable) + params_list.extend(collect_parameters(submodule_var, recurse)) + return params_list + + if method is torch.nn.Module.parameters: + if self.source: + tx.output.guard_on_key_order.add( + AttrSource(self.source, "_parameters").name() + ) + recurse = get_kwargs("recurse")["recurse"] + params_list = collect_parameters(self, recurse=recurse) + + # Account for duplicated params + deduplicated_params = list(dict.fromkeys(params_list).keys()) + + return variables.ListIteratorVariable( + deduplicated_params, mutable_local=MutableLocal() + ) + else: + raise AssertionError( + "Discrepancy between is_supported_nn_module_method and trace_supported_methods" + ) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if name in ["_call_impl", "_wrapped_call_impl"]: + fn = getattr(self.value_type, name) + if self.source: + source = AttrSource(AttrSource(self.source, "__class__"), name) + else: + source = None + + return variables.UserFunctionVariable(fn, source=source).call_function( + tx, [self] + list(args), kwargs + ) + + if name not in getattr(self.value, "__dict__", {}): + try: + method = inspect.getattr_static(type(self.value), name) + except AttributeError: + method = None + + if self.is_supported_nn_module_method(method): + return self.trace_supported_methods(tx, method, name, args, kwargs) + + if isinstance(method, staticmethod): + source = AttrSource( + AttrSource(AttrSource(self.source, "__class__"), name), "__func__" + ) + return tx.inline_user_function_return( + variables.UserFunctionVariable(method.__func__, source=source), + args, + kwargs, + ) + + if ( + hasattr(method, "__code__") + and id(method.__code__) in self._nn_module_method_ids() + ): + unimplemented(f"UnspecializedNNModuleVariable missing {name}") + + # "_parameters" in self.value.__dict__ checks that module is initialized + if name == "__setattr__" and "_parameters" in self.value.__dict__: + # Record if mutations happens on parameters/buffers/modules. The + # mutations on these are not tracked by base class + # UserDefinedObject vt. This will be used later to graph break + # on seeing a paramters() and family calls. + # TODO(anijain2305) - This might not be needed if we let Dynamo + # inline both getattr and setattr. In that case, it should see + # the lowest level dicts - _parameters and family and + # automatically track mutations on those. Investigate if that + # can be done. + attr_name = args[0].as_python_constant() + value = args[1] + + # This is reverse engineered by looking at nn module __setattr__ + # logic. + if ( + isinstance(value, variables.TensorVariable) + and value.python_type() is torch.nn.Parameter + ) or attr_name in self.value.__dict__["_parameters"]: + # Handle parameters + self.is_state_mutated = True + elif attr_name in self.value.__dict__["_buffers"]: + # Handle buffers + self.is_state_mutated = True + elif ( + isinstance( + value, + ( + variables.NNModuleVariable, + variables.UnspecializedNNModuleVariable, + ), + ) + or attr_name in self.value.__dict__["_modules"] + ): + # Handle submodules + self.is_state_mutated = True + + if method is torch.nn.Module.__setattr__ and isinstance( + args[1], variables.DeletedVariable + ): + # Trace through __delattr__ to track mutations on the module + # members like `_modules``. + return tx.inline_user_function_return( + variables.UserFunctionVariable(torch.nn.Module.__delattr__), + [self, args[0]], + kwargs, + ) + + return super().call_method(tx, name, args, kwargs) + + def getattr_helper(self, tx: "InstructionTranslator", field, name_vt): + dict_vt = self.var_getattr(tx, field) + if isinstance(dict_vt, variables.ConstDictVariable): + return dict_vt.maybe_getitem_const(name_vt) + return None + + def var_getattr(self, tx: "InstructionTranslator", name): + # Allow skipping of empty hook dict guards on inbuilt nn modules + if name in ( + "_backward_hooks", + "_backward_pre_hooks", + "_forward_hooks", + "_forward_pre_hooks", + ): + # For empty hooks, make an EMPTY_NN_MODULE_HOOKS_DICT. This allows us to control the installation of empty + # hooks guard via skip_nnmodule_hook_guards + if not tx.output.side_effects.has_pending_mutation_of_attr( + self, name + ) and self.value.__module__.startswith(("torch.nn.", "torch.ao.")): + hooks_dict = getattr(self.value, name) + if isinstance(hooks_dict, dict) and len(hooks_dict) == 0: + if self.source: + hooks_source = AttrSource(self.source, name) + install_guard( + hooks_source.make_guard( + GuardBuilder.EMPTY_NN_MODULE_HOOKS_DICT + ) + ) + return variables.ConstDictVariable({}) + + # For non-empty hook dicts, one way is to just fallback to VariableBuilder and create a ConstDictVariable. + # However, ConstDictVariable guards on keys. This can cause recompiles when the same hook is installed for + # differnt nn module instances, because the key keeps changing (look more into RemovableHandle to understand why + # key changes - also related https://github.com/pytorch/pytorch/issues/125836). Here, we carefully craft a + # ConstDictVariable to avoid any guard on the keys. + if ( + self.source + and name + in ( + "_forward_pre_hooks", + "_forward_hooks", + ) + and not tx.output.side_effects.has_pending_mutation_of_attr(self, name) + ): + hooks_dict = getattr(self.value, name) + hooks_dict_source = AttrSource(self.source, name) + install_guard(hooks_dict_source.make_guard(GuardBuilder.SEQUENCE_LENGTH)) + tx.output.guard_on_key_order.add(hooks_dict_source.name()) + + def build_key_value(i, k, v): + # Make key sourceless to avoid any guard on it + key = variables.ConstantVariable.create(k) + + # Instead of using dict[key] to access the value, use a dict[dict.keys()[index]] to access the + # value. This removes the reliance on the actual key value. + source_key = ConstDictKeySource(hooks_dict_source, i) + source_value = GetItemSource(hooks_dict_source, source_key) + value = LazyVariableTracker.create(v, source_value) + return key, value + + result = dict( + build_key_value(i, k, v) for i, (k, v) in enumerate(hooks_dict.items()) + ) + + return variables.ConstDictVariable( + result, type(hooks_dict), source=hooks_dict_source + ) + return super().var_getattr(tx, name) + + def manually_trace_nn_module_getattr(self, tx: "InstructionTranslator", name): + """ + Dynamo tracing of nn.Module __getattr__ can be expensive if the model + has deep submodule hierarchy. Since the __getattr__ is stable, we can + directly look into the underlying datastructures. This saves a lot of + compilation time. + """ + name_vt = variables.ConstantVariable(name) + out = self.getattr_helper(tx, "_parameters", name_vt) + if out is None: + out = self.getattr_helper(tx, "_modules", name_vt) + if out is None: + out = self.getattr_helper(tx, "_buffers", name_vt) + if out is None: + raise_observed_exception(AttributeError, tx, self) + return out + + +class UnspecializedBuiltinNNModuleVariable(UnspecializedNNModuleVariable): + """ + Differentiates between builtin nn modules (e.g. torch.nn.Linear) and user defined nn modules. + """ + + def _wrap_source(self, attr_source): + if not isinstance(attr_source, UnspecializedBuiltinNNModuleSource): + return UnspecializedBuiltinNNModuleSource(attr_source) + return attr_source + + +class FSDPManagedNNModuleVariable(UnspecializedNNModuleVariable): + """ + Tracing behavior: trace into submodules and treat them as Unspecialized, do not + register parameters to the top-level, treat them as function inputs. + + Guards behavior: if 'skip_fsdp_guards', many guards that would be installed + by a vanilla UnspecializedNNModuleVariable are simply dropped, on the basis + that a user wrapping their model in FSDP(model) is already opting into a + requirement to not modify internal model state, which would already break FSDP without + compilation. + """ + + def __init__(self, value, **kwargs) -> None: + source = kwargs.get("source", None) + assert ( + source is not None + ), "FSDPManagedNNModule depends on having an accurate source to control guarding." + + super().__init__(value=value, **kwargs) + self.source = source + + def _wrap_source(self, attr_source): + if not isinstance( + attr_source, (FSDPNNModuleSource, UnspecializedNNModuleSource) + ): + if torch._dynamo.config.skip_fsdp_guards: + return FSDPNNModuleSource(attr_source) + else: + return UnspecializedNNModuleSource(attr_source) + return attr_source diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/optimizer.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..f238027aca169022bf8d4ee1e3fb82b5db0cb70f --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/optimizer.py @@ -0,0 +1,355 @@ +# mypy: ignore-errors + +import weakref +from typing import Dict, List, TYPE_CHECKING + +import torch +from torch.utils._pytree import tree_map_only + +from ..guards import GuardBuilder, install_guard +from ..source import ( + AttrSource, + ConstDictKeySource, + GetItemSource, + GlobalWeakRefSource, + GradSource, +) +from ..utils import GLOBAL_KEY_PREFIX +from .constant import ConstantVariable +from .dicts import ConstDictVariable +from .lists import ListVariable +from .misc import GetAttrVariable +from .user_defined import UserDefinedObjectVariable + + +if TYPE_CHECKING: + from torch._dynamo.symbolic_convert import InstructionTranslator + + from .base import VariableTracker + + +class ArgMappingException(Exception): + pass + + +class GuardInstallException(Exception): + pass + + +class OptimizerVariable(UserDefinedObjectVariable): + _nonvar_fields = { + "grad_to_source", + "tensor_to_source", + "static_tensor_names", + *UserDefinedObjectVariable._nonvar_fields, + } + + def __init__( + self, + value, + grad_to_source=None, + static_tensor_names=None, + tensor_to_source=None, + **kwargs, + ) -> None: + super().__init__(value, **kwargs) + self.grad_to_source = grad_to_source or {} + self.tensor_to_source = tensor_to_source or {} + self.static_tensor_names = static_tensor_names or set() + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + """This is an optimization to avoid tracing the very slow initialization of the optimizer""" + if name == "_init_group": + try: + self.graph_break_if_pending_mutation(tx) + self.move_step_if_cpu() + py_args, py_kwargs = self.get_python_args(*args, **kwargs) + ret_val = self.value._init_group(*py_args, **py_kwargs) + self.map_sources_and_install_guards(tx) + self.update_list_args(tx, args, kwargs, py_args, py_kwargs) + # stash a weak_ptr to optimizer to invalidate code + # if the optimizer object dies + mangled_name = f"__optimizer_{id(self.value)}" + tx.store_global_weakref_by_id(mangled_name, self.value) + self.create_finalizer(tx) + + # This is currently safe only because the only actual `ret_val`s returned + # by the `_init_group` of existing optimizers are properties that are invariant + # to the input tensors (e.g. dtype, layout). Changing these would trigger a + # recompilation and hence never result in the wrong specialization of `ret_val`. + return ConstantVariable.create(ret_val) + except (ArgMappingException, GuardInstallException) as _: + # trace normally if we can't map args or install guards correctly + pass + + return super().call_method(tx, name, args, kwargs) + + def var_getattr(self, tx: "InstructionTranslator", name): + # Note: this allows us to intercept the call in call_method + # in the typical case, we return a UserMethodVariable + # which will directly inline + if name in ("_init_group", "step"): + return GetAttrVariable(self, name, source=AttrSource(self.source, name)) + + if name == "param_groups": + from ..decorators import mark_static_address + + for group in self.value.param_groups: + for p in group["params"]: + mark_static_address(p) + + self._set_capturable(tx) + + return super().var_getattr(tx, name) + + def graph_break_if_pending_mutation(self, tx): + # If there are pending mutations on a parameter (due to using closure) + # then we need to graph break to allow the python version of the parameter + # to update, so that running _init_group will initialize the states with + # the correct values + for g in self.value.param_groups: + for p in g["params"]: + side_effects = tx.output.side_effects + variable = side_effects.id_to_variable.get(id(p), None) + if variable and side_effects.has_pending_mutation(variable): + from ..exc import Unsupported + + raise Unsupported("Pending mutation on parameter") + + def _set_capturable(self, tx): + from . import LazyVariableTracker + from .builder import VariableBuilder + + # We only set capturable if params are on cuda + # and the state is not initialized + def safe_to_set_capturable(group): + all_uninitialized = True + all_gpu = True + + for p in group.get("params", []): + all_gpu &= p.is_cuda or p.is_xpu + all_uninitialized &= p not in self.value.state + + return "capturable" in group and all_uninitialized and all_gpu + + # track indices to not set so we don't need to + # in the variable tracker realize the whole state + # we handle guarding the state specially + for ind, group in enumerate(self.value.param_groups): + if safe_to_set_capturable(group): + group["capturable"] = True + + param_groups_vt = LazyVariableTracker.realize_all( + VariableBuilder(tx, AttrSource(self.source, "param_groups"))( + self.value.param_groups + ) + ) + for ind, param_group_vt in enumerate(param_groups_vt.items): + key = ConstDictVariable._HashableTracker( + ConstantVariable.create("capturable") + ) + param_group_vt.items[key] = ConstantVariable.create(True) + + def get_python_args(self, *args, **kwargs): + """Get python values equivalent to the variable tracker args""" + + def map_arg(arg): + if isinstance(arg, ConstantVariable): + return arg.as_python_constant() + elif isinstance(arg, ListVariable) and not arg.items: + return [] + elif ( + isinstance(arg, ConstDictVariable) + and isinstance(arg.source, GetItemSource) + and isinstance(arg.source.base, AttrSource) + and arg.source.base.member == "param_groups" + ): + return self.value.param_groups[arg.source.index] + + raise ArgMappingException + + new_args = [map_arg(arg) for arg in args] + new_kwargs = {k: map_arg(v) for k, v in kwargs.items()} + + return new_args, new_kwargs + + # If users load an old state dictionary, + # it's possible that step could be on the cpu + # if this is the case, move it to the GPU + # corresponding to the parameter + # in most cases this is a no-op because the state is empty + def move_step_if_cpu(self): + for p, state in self.value.state.items(): + if "step" in state and state["step"].is_cpu: + state["step"] = state["step"].to(p.device) + + def map_sources_and_install_guards(self, tx): + from ..decorators import mark_static_address + from .builder import VariableBuilder + from .lazy import LazyVariableTracker + + self.grad_to_source = {} + self.tensor_to_source = {} + + # Tracing the _init_group is expensive. But we still have to insert the + # necessary guards for _init_group. So, we manually handle insertion of + # guards. We also want to mark all the tensors inside the state dict to + # be static address. + + # Mark all the tensors in the state dict to be static address. This has + # to be done first because the variable builder relies on the static + # address annotation. + def mark_static(x): + mark_static_address(x) + + tree_map_only(torch.Tensor, mark_static, self.value.state) + + # Recursively realize the variable trackers for optim.state and + # optim.param_groups, which recursively install the necessary guards. + param_groups_vt = LazyVariableTracker.realize_all( + VariableBuilder(tx, AttrSource(self.source, "param_groups"))( + self.value.param_groups + ) + ) + + state_vt = VariableBuilder(tx, AttrSource(self.source, "state"))( + self.value.state + ) + + # We need to realize the top level state dict to populate + # the guard locals + state_vt.realize() + + # Populate self.grad_to_source and self.tensor_to_source so that we can + # manually update_list_args + for g_ind, (group, group_vt) in enumerate( + zip(self.value.param_groups, param_groups_vt.items) + ): + # we assume here that all params within a param group + # are initialized similarly + if len(group["params"]) > 0: + for param in group["params"]: + if param.grad is not None: + key_index = None + for i, k in enumerate(self.value.state.keys()): + if k is param: + key_index = i + break + if key_index: + state_source = AttrSource(self.source, "state") + LazyVariableTracker.realize_all( + VariableBuilder( + tx, + GetItemSource( + state_source, + ConstDictKeySource(state_source, key_index), + ), + )(self.value.state[param]) + ) + break + + group_source = group_vt.source + params_vt = group_vt.getitem_const(tx, ConstantVariable.create("params")) + for p_ind, (p, p_vt) in enumerate( + zip(group["params"], params_vt.unpack_var_sequence(tx)) + ): + param_source = p_vt.source + self.tensor_to_source[p] = param_source + grad_source = GradSource( + param_source, + "grad", + ) + + if p.grad is not None: + self.grad_to_source[p.grad] = grad_source + else: + install_guard(grad_source.make_guard(GuardBuilder.CONSTANT_MATCH)) + + # We have to again iterate over the state dict to collect the + # tensor_to_source dict. This is used for the finalizer. + state_source = AttrSource(self.source, "state") + for idx, (p, value) in enumerate(self.value.state.items()): + p_state_source = GetItemSource( + state_source, ConstDictKeySource(state_source, idx) + ) + for k, v in value.items(): + if ( + isinstance(v, torch.Tensor) + and v not in self.grad_to_source + and v not in self.tensor_to_source + ): + self.tensor_to_source[v] = GetItemSource(p_state_source, k) + + def wrap_tensor(self, tx: "InstructionTranslator", tensor_value): + """Wrap state tensor in a TensorVariable""" + from ..decorators import mark_static_address + from .builder import VariableBuilder + + # If we have a source for a tensor already use it, + # if we have not seen a tensor before, stash and use a + # global weak ref source, since it must be an optimizer tensor + # that we have missed + + if tensor_value in self.tensor_to_source: + # mark these tensors as static for cudagraphs + mark_static_address(tensor_value) + builder = VariableBuilder(tx, self.tensor_to_source[tensor_value]) + self.static_tensor_names.add(tx.output.module_key_name(builder.name)) + elif tensor_value in self.grad_to_source: + builder = VariableBuilder(tx, self.grad_to_source[tensor_value]) + else: + # mark these tensors as static for cudagraphs + mark_static_address(tensor_value) + + global_name = tx.store_global_weakref_by_id(GLOBAL_KEY_PREFIX, tensor_value) + builder = VariableBuilder(tx, GlobalWeakRefSource(global_name)) + self.static_tensor_names.add(tx.output.module_key_name(builder.name)) + + result = builder(tensor_value) + return result + + def update_list_args( + self, tx: "InstructionTranslator", args, kwargs, py_args, py_kwargs + ): + """Update the args and kwargs to the traced optimizer call""" + for arg, py_arg in zip(args, py_args): + if isinstance(arg, ListVariable): + assert isinstance( + py_arg, list + ), "py_arg should be a list in optimizer variable" + for i, val in enumerate(py_arg): + tx.output.side_effects.mutation(arg) + if isinstance(val, torch.Tensor): + arg.items.append(self.wrap_tensor(tx, val)) + else: + from .builder import SourcelessBuilder, VariableBuilder + + if arg.source: + arg.items.append( + VariableBuilder(tx, GetItemSource(arg.source, i))(val) + ) + else: + arg.items.append(SourcelessBuilder.create(tx, val)) + + def create_finalizer(self, tx): + names_to_delete = self.static_tensor_names + value = self.value + tc = tx.output.tracing_context + + def init_finalizer(gm): + def clear_static_tensor_refs(): + for name in names_to_delete: + gm._buffers.pop(name, None) + gm._parameters.pop(name, None) + if tc.params_flat: + tc.params_flat.clear() + + weakref.finalize(value, clear_static_tensor_refs) + + tx.output.add_graph_finalizer(init_finalizer) diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/sdpa.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/sdpa.py new file mode 100644 index 0000000000000000000000000000000000000000..611450ae6cf9ad75d6b40153c6577594e47bfd5f --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/sdpa.py @@ -0,0 +1,97 @@ +# mypy: ignore-errors + +from inspect import getattr_static +from typing import TYPE_CHECKING + +from ..bytecode_transformation import create_call_function +from ..exc import Unsupported +from .base import VariableTracker + + +if TYPE_CHECKING: + from torch._dynamo.symbolic_convert import InstructionTranslator + + +class SDPAParamsVariable(VariableTracker): + """Represents the c++ params struct for scaled dot product attention. + This is a read-only container.""" + + @staticmethod + def create(tx: "InstructionTranslator", value, source): + from torch.backends.cuda import SDPAParams + + from ..source import AttrSource + from .builder import VariableBuilder + from .torch import TorchInGraphFunctionVariable + + query_var = VariableBuilder(tx, AttrSource(source, "query"))(value.query) + key_var = VariableBuilder(tx, AttrSource(source, "key"))(value.key) + value_var = VariableBuilder(tx, AttrSource(source, "value"))(value.value) + attn_mask_var = VariableBuilder(tx, AttrSource(source, "attn_mask"))( + value.attn_mask + ) + dropout_var = VariableBuilder(tx, AttrSource(source, "dropout"))(value.dropout) + is_causal_var = VariableBuilder(tx, AttrSource(source, "is_causal"))( + value.is_causal + ) + enable_gqa_var = VariableBuilder(tx, AttrSource(source, "enable_gqa"))( + value.enable_gqa + ) + param_vars = [ + query_var, + key_var, + value_var, + attn_mask_var, + dropout_var, + is_causal_var, + enable_gqa_var, + ] + return TorchInGraphFunctionVariable(SDPAParams).call_function( + tx, param_vars, {} + ) + + def __init__(self, proxy, param_vars, **kwargs) -> None: + self.proxy = proxy + self.param_vars = param_vars + super().__init__(**kwargs) + + def reconstruct(self, codegen): + assert self.source is None + assert self.param_vars is not None + codegen.add_push_null( + lambda: codegen.load_import_from("torch._C", "_SDPAParams") + ) + codegen.foreach(self.param_vars) + codegen.extend_output(create_call_function(len(self.param_vars), False)) + + def as_proxy(self): + return self.proxy + + def var_getattr(self, tx: "InstructionTranslator", name: str) -> VariableTracker: + import torch._C + + from ..source import AttrSource + from .builder import wrap_fx_proxy + from .misc import GetAttrVariable + + try: + getattr_static(torch._C._SDPAParams, name) + except AttributeError: + # Using raise from is too verbose here + raise Unsupported( + f"Unsupported torch._C._SDPAParams attribute {name}" + ) from None + + proxy = GetAttrVariable.create_getattr_proxy(self.as_proxy(), name) + if self.source is not None: + return wrap_fx_proxy( + tx=tx, proxy=proxy, source=AttrSource(self.source, name) + ) + else: + return wrap_fx_proxy(tx=tx, proxy=proxy) + + @staticmethod + def is_sdpa_params(value): + from torch.backends.cuda import SDPAParams + + return value is SDPAParams diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/tensor.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..088313bdbf551dfb4d9054788a3a1a4fe2947df1 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/tensor.py @@ -0,0 +1,1391 @@ +# mypy: ignore-errors + +import functools +import inspect +import logging +import operator +import textwrap +import traceback +import types +import unittest +from typing import Dict, List, TYPE_CHECKING + +import sympy + +import torch._numpy as tnp +import torch.fx +import torch.random +from torch._dynamo import compiled_autograd +from torch._subclasses.meta_utils import is_sparse_any +from torch.fx.experimental.symbolic_shapes import ( + guard_scalar, + GuardOnDataDependentSymNode, + has_free_symbols, + is_symbolic, + SymTypes, +) +from torch.utils._python_dispatch import is_traceable_wrapper_subclass + +from .. import config, variables +from .._trace_wrapped_higher_order_op import trace_wrapped +from ..exc import unimplemented, UserError, UserErrorType +from ..external_utils import call_hook_from_backward_state +from ..guards import GuardBuilder, install_guard +from ..source import AttrSource +from ..utils import ( + fqn, + get_custom_getattr, + get_fake_value, + get_real_value, + guard_if_dyn, + object_has_getattribute, + product, + proxy_args_kwargs, + set_example_value, + tensortype_to_dtype, +) +from .base import VariableTracker +from .constant import ConstantVariable +from .lists import SizeVariable + + +try: + import numpy as np +except ModuleNotFoundError: + np = None + + +if TYPE_CHECKING: + from torch._dynamo.symbolic_convert import InstructionTranslator + + +log = logging.getLogger(__name__) + +# Ops that allow tensor tensor +supported_tensor_comparison_ops = { + ">": operator.gt, + "<": operator.lt, + ">=": operator.ge, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, +} +# Ops that allow tensor None +supported_const_comparison_ops = { + "is": operator.is_, + "is not": operator.is_not, + "==": operator.eq, + "!=": operator.ne, +} +supported_comparison_ops = { + **supported_tensor_comparison_ops, + **supported_const_comparison_ops, +} +supported_tensor_comparison_op_values = dict.fromkeys( + supported_tensor_comparison_ops.values() +) +supported_const_comparison_op_values = dict.fromkeys( + supported_const_comparison_ops.values() +) + + +class TensorVariable(VariableTracker): + """A torch.Tensor input or an intermediate value in the FX graph""" + + _nonvar_fields = { + "proxy", + "dtype", + "device", + "layout", + "ndim", + "size", + "stride", + "requires_grad", + "is_quantized", + "is_contiguous", + "is_sparse", + "class_type", + "specialized_value", + "_is_name_set", + *VariableTracker._nonvar_fields, + } + + def get_real_value(self): + """ + Get the actual value represented by this variable if computation is run + using the user-provided inputs. + NOTE: this runs actual tensor computation and may be + slow and memory-intensive. + """ + return get_real_value(self.proxy.node, self.proxy.tracer) + + def __init__( + self, + proxy: torch.fx.Proxy, + *, + dtype, + device, + layout, + ndim, + requires_grad, + is_quantized, + is_sparse, + class_type, + has_grad_fn, + size=None, + stride=None, + is_contiguous=None, + _is_name_set=None, + **kwargs, + ) -> None: + super().__init__(**kwargs) + self.proxy = proxy + self.dtype = dtype + self.device = device + self.layout = layout + self.ndim = ndim + self.size = size + self.stride = stride + self.requires_grad = requires_grad + self.is_quantized = is_quantized + self.is_contiguous = is_contiguous + self.is_sparse = is_sparse + self.class_type = class_type + self.has_grad_fn = has_grad_fn + if _is_name_set is None: + # no need to rename inputs + _is_name_set = self.proxy.node.op == "placeholder" + self._is_name_set: bool = _is_name_set + + def debug_repr(self): + # TODO: strip off fake tensor from repr here + return repr(self.proxy.node.meta["example_value"]) + + def as_proxy(self): + return self.proxy + + def python_type(self): + return self.class_type + + @staticmethod + def specialize(value: torch.Tensor): + props = { + "dtype": value.dtype, + "device": value.device, + "layout": value.layout, + "ndim": int(value.ndim), + "requires_grad": value.requires_grad, + "is_quantized": value.is_quantized, + "is_sparse": value.is_sparse, + "class_type": type(value), + } + try: + props["has_grad_fn"] = value.grad_fn is not None + except Exception: + # Workaround for issues with create_parameter_op in Dynamo. Reading + # grad_fn should never cause an issue. + props["has_grad_fn"] = False + + if is_sparse_any(value) and not has_free_symbols(value): + props["size"] = tuple( + [int(s) if is_symbolic(s) else s for s in value.size()] + ) + elif not has_free_symbols(value): + # this is a fully static shape, and the keys on props here inform specialization. + # We have to cast to int here, because these might get accessed as ConstantVariable, which has + # a strict no-symint policy. If we got here due to not having free symbols, this is a known constant + # already. We could remove the discrepancy here, by having ConstantVariable be more permissive for + # constant backed SymInts, but that assert being strict has led to some good signal in hunting bugs, and + # I'd like to keep it around for now. + props["size"] = tuple( + # the non is_symbolic case applies to the jagged layout + # NestedTensor case as singleton ints are not symbolic + [int(s) if is_symbolic(s) else s for s in value.size()] + ) + props["stride"] = tuple(value.stride()) + if torch._C._functorch.is_batchedtensor(value): + # Batched tensors does not support contiguity patterns, so + # we refrain from computing the `is_contiguous` property + props["is_contiguous"] = None + else: + props["is_contiguous"] = tuple( + [ + x + for x in torch._prims_common._memory_formats + if value.is_contiguous(memory_format=x) + ] + ) + return props + + def dynamic_getattr(self, tx: "InstructionTranslator", name): + fake_val = self.proxy.node.meta["example_value"] + # For getattrs on tensors without sources, + # we can do better than the default (creating a GetAttrVariable) + # if: + # (1) the tensor is a traceable tensor subclass + # (2) We are getattr'ing an inner tensor from that subclass + if not self.source and is_traceable_wrapper_subclass(fake_val): + fake_val = self.proxy.node.meta["example_value"] + attrs, ctx = fake_val.__tensor_flatten__() + proxy = getattr(self.as_proxy(), name) + example_value = getattr(fake_val, name) + if name in attrs: + # attrs returned from tensor_flatten are always tensors + assert isinstance(example_value, torch.Tensor) + from .builder import wrap_fx_proxy + + return wrap_fx_proxy(tx=tx, proxy=proxy, example_value=example_value) + # any other attributes on the subclass (that are not methods) + # are assumed to be constant metadata. + elif not callable(example_value): + from .builder import SourcelessBuilder + + return SourcelessBuilder.create(tx, example_value) + + if not (self.source and self.source.subguards_allowed()): + raise NotImplementedError + + # For local source, we associate the real value. We use this real value + # for implementing getattr fallthrough on the variable tracker base class. + + # Note - this scope construction is mirrored in guards + # A subsequent PR will introduce a util. + scope = {"L": tx.output.local_scope, "G": tx.output.global_scope} + try: + # We raise in case we get a typerror bug w/ SuperSource. + # SuperSource has bugs in it atm, and can produce code like + # eval("super(L['mod'].model.model.encoder.embed_positions.forward__class__, + # L['mod'].model.model.encoder.embed_positions)", scope) + # Which is incorrect, and violates the invariant that all sources should be eval()-able against the scope. + _input_associated_real_value = eval(self.source.name(), scope) + except Exception as exc: + raise NotImplementedError from exc + + if _input_associated_real_value is None: + raise NotImplementedError + + if object_has_getattribute(_input_associated_real_value): + raise NotImplementedError + + if get_custom_getattr(_input_associated_real_value): + raise NotImplementedError + + real_value = getattr(_input_associated_real_value, name) + if callable(real_value): + # Callables have more nuanced handling, and we should let the existing system delegate here. + # Raising was past behavior and so should always be sound to fall back. + # Note - at a certain point we may want to handle + raise NotImplementedError + + from ..guards import GuardBuilder + from .builder import VariableBuilder + + attr_source = AttrSource(self.source, name) + install_guard(attr_source.make_guard(GuardBuilder.HASATTR)) + return VariableBuilder(tx, attr_source)(real_value) + + def method_attr_ndim(self, tx): + if self.ndim is not None: + return ConstantVariable.create(self.ndim) + else: + return self.call_method(tx, "dim", [], {}) + + def method_attr_dtype(self, tx): + if self.dtype is not None: + return ConstantVariable.create(self.dtype) + + def method_attr_device(self, tx): + if self.device is not None: + return ConstantVariable.create(self.device) + + def method_attr_layout(self, tx): + if self.layout is not None: + return ConstantVariable.create(self.layout) + + def method_attr_is_cuda(self, tx): + if self.device is not None: + return ConstantVariable.create(self.device.type == "cuda") + + def method_attr_shape(self, tx): + if self.size is not None: + sizes = [variables.ConstantVariable.create(x) for x in self.size] + return SizeVariable(sizes) + else: + return self.call_method(tx, "size", [], {}) + + def method_attr_requires_grad(self, tx): + if self.requires_grad is not None: + return ConstantVariable.create(self.requires_grad) + + def method_attr_is_quantized(self, tx): + if self.is_quantized is not None: + return ConstantVariable.create(self.is_quantized) + + def method_attr_is_sparse(self, tx): + if self.is_sparse is not None: + return ConstantVariable.create(self.is_sparse) + + def method_attr_data(self, tx): + return variables.TorchInGraphFunctionVariable( + torch._C._autograd._get_data_attr + ).call_function(tx, [self], {}) + + def method_attr_grad_fn(self, tx): + if self.has_grad_fn: + unimplemented("TensorVariable has a grad_fn") + else: + return variables.ConstantVariable(None) + + def method_attr__version(self, tx): + from ..tensor_version_op import _tensor_version + + return variables.TorchInGraphFunctionVariable(_tensor_version).call_function( + tx, [self], {} + ) + + def call_hasattr(self, tx: "InstructionTranslator", name): + from . import GetAttrVariable + from .builtin import BuiltinVariable + + try: + var = BuiltinVariable(getattr).call_function( + tx, [self, ConstantVariable(name)], {} + ) + # in the event that TensorVariable returns NotImplemented + # BuiltinVariable.call_getattr returns GetAttrVariable + ret_val = not isinstance(var, GetAttrVariable) + except AttributeError: + ret_val = False + + if self.source: + install_guard( + AttrSource(self.source, name).make_guard(GuardBuilder.HASATTR) + ) + + return ConstantVariable(ret_val) + + def var_getattr(self, tx: "InstructionTranslator", name): + from . import UserDefinedClassVariable + + if self.is_strict_mode(tx) and name in self._strict_mode_banned_ops(): + unimplemented(f"Illegal getattr invocation {name} in strict mode") + + if name == "__class__": + return UserDefinedClassVariable(self.python_type()) + + handler = getattr(self, f"method_attr_{name}", None) + result = handler(tx) if handler is not None else None + + # Add a guard for type matching, these guards are checked before tensor guards + # In some cases, a . guard can be evaluated first, and break if + # is later changed to another type + if ( + result is not None + and self.source + and self.source.subguards_allowed() + and not ( + name not in ("grad", "requires_grad") and result.is_python_constant() + ) + ): + install_guard(self.make_guard(GuardBuilder.TYPE_MATCH)) + result.source = AttrSource(self.source, name) + + # It's hard to get inplace view (metadata mutation) on graph input work properly across + # dynamo/aot/inductor, just fall back. + if self.source is not None and hasattr(torch.ops.aten, name): + fn = getattr(torch.ops.aten, name) + if ( + hasattr(fn, "overloads") + and hasattr(fn, fn.overloads()[0]) + and torch.Tag.inplace_view in getattr(fn, fn.overloads()[0]).tags + ): + # Delay the graph break to the actual call of unsqueeze_/resize_/resize_as_ etc. + return variables.misc.DelayGraphBreakVariable( + source=AttrSource(self.source, name) + ) + + # For attributes (not methods) that were not caught in the special handling above, + # (e.g. tensor.real), we handle these generically, assuming that the output type is + # a tensor. + if result is None and name != "grad": + + def try_generic_attr_handling(): + from .builder import wrap_fx_proxy + from .misc import GetAttrVariable + + try: + static_attr = inspect.getattr_static(torch.Tensor, name) + except AttributeError: + return None + + # Make sure this is an attribute, not a method. + # type(torch.Tensor.H) should be "getset_descriptor" + # This is a because of CPython implementation, see THPVariableType: + # these attributes are implemented under tp_getset, which appear + # as `getset_descriptor`s, (compared to, say, methods which appear + # as `method_descriptor`s) + if type(static_attr) != types.GetSetDescriptorType: + return None + + proxy = GetAttrVariable.create_getattr_proxy(self.as_proxy(), name) + if self.source is not None: + return wrap_fx_proxy( + tx=tx, proxy=proxy, source=AttrSource(self.source, name) + ) + else: + return wrap_fx_proxy(tx=tx, proxy=proxy) + + result = try_generic_attr_handling() + + if result is None: + result = self.dynamic_getattr(tx, name) + + if result is None: + raise NotImplementedError + return result + + def call_id(self, tx): + if not self.source: + unimplemented("call_id not supported for sourceless TensorVariable") + + # For local source, we associate the real value. We use this real value + scope = {"L": tx.output.local_scope, "G": tx.output.global_scope} + try: + _input_associated_real_value = eval(self.source.name(), scope) + except Exception as exc: + unimplemented(f"error getting associated real value: {exc}") + + if _input_associated_real_value is None: + unimplemented("call_id without associated real value") + + install_guard(self.source.make_guard(GuardBuilder.ID_MATCH)) + id_value = id(_input_associated_real_value) + return ConstantVariable.create(id_value) + + def has_unpack_var_sequence(self, tx): + return self.ndim > 0 + + def unpack_var_sequence(self, tx: "InstructionTranslator", idxes=None): + from .builder import wrap_fx_proxy_cls + + if self.size: + size_len = len(self.size) + else: + size_var = self.call_method(tx, "size", [], {}) + assert isinstance(size_var, SizeVariable) + size_len = len(size_var.items) + # Ensure we don't unpack a scalar tensor. + assert size_len != 0, "Can't unpack scalar tensors." + + if self.size: + length = self.size[0] + else: + dyn_length = self.call_method(tx, "size", [ConstantVariable.create(0)], {}) + # SymNodeVariable for symbolic sizes, ConstantVariable for constants OR values produced through + # symbolic_shapes, but that end up as int/sympy.Integer + assert isinstance(dyn_length, (SymNodeVariable, ConstantVariable)) + if isinstance(dyn_length, SymNodeVariable): + length = dyn_length.evaluate_expr(tx.output) + else: + length = dyn_length.value + + if idxes is None: + idxes = range(length) + else: + assert ( + len(idxes) == length + ), f"Can't unpack a tensor of {length} rows into a tuple of {len(idxes)} elements." + return [ + wrap_fx_proxy_cls(target_cls=type(self), tx=tx, proxy=self.as_proxy()[i]) + for i in idxes + ] + + def _strict_mode_banned_ops(self): + return torch._dynamo.config._autograd_backward_strict_mode_banned_ops + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + if self.is_strict_mode(tx) and name in self._strict_mode_banned_ops(): + unimplemented(f"Illegal method invocation {name} in strict mode") + + """ + Dispatch to a method-specific handler defined below. If the + handler returns None (or doesn't exist) we put the method call + in the graph. + """ + try: + handler_method = getattr(self, f"method_{name}") + except AttributeError: + pass + else: + try: + result = handler_method(*args, **kwargs) + if result: + return result + except TypeError as e: + unimplemented(f"unhandled args for {name}: {e}") + + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_method", + name, + *proxy_args_kwargs([self, *args], kwargs), + ), + ) + + def method_size(self, *args, **kwargs): + return self._method_size_stride("size", *args, **kwargs) + + def method_stride(self, *args, **kwargs): + return self._method_size_stride("stride", *args, **kwargs) + + def _method_size_stride(self, name, dim=None): + dim = guard_if_dyn(dim) + + def make_const_size_variable(x, **options): + return SizeVariable( + [ConstantVariable.create(y, **options) for y in x], **options + ) + + RetVariable = ( + make_const_size_variable if name == "size" else ConstantVariable.create + ) + + # Technically, this should not be necessary, but I'm including it + # for enhanced BC, in case example_value is sometimes not set + # (it really should always be set though!) + if (r := getattr(self, name)) is not None: + if dim is None: + return RetVariable(r) + else: + return ConstantVariable.create(r[dim]) + + # It might still be constant! Consult the fake tensor and see + if (fake := self.proxy.node.meta.get("example_value")) is not None: + if dim is None: + fake_r = getattr(fake, name)() + if not has_free_symbols(fake_r): + # int conversion for safety, in case a SymInt refined + # to constant + return RetVariable(tuple(int(r) for r in fake_r)) + else: + fake_r = getattr(fake, name)(dim) + if not has_free_symbols(fake_r): + return ConstantVariable.create(int(fake_r)) + + def method_numel(self): + if self.size is not None: + return ConstantVariable.create(product(self.size)) + + # It might still be constant! Consult the fake tensor and see + if (fake := self.proxy.node.meta.get("example_value")) is not None: + fake_r = fake.numel() + if not has_free_symbols(fake_r): + return ConstantVariable.create(int(fake_r)) + + method_nelement = method_numel + + def method_dim(self): + if self.ndim is not None: + return ConstantVariable.create(self.ndim) + + method_ndimension = method_dim + + def method_is_floating_point(self): + if self.dtype is not None: + return ConstantVariable.create(self.dtype.is_floating_point) + + def method_is_complex(self): + if self.dtype is not None: + return ConstantVariable.create(self.dtype.is_complex) + + def method_is_contiguous(self, memory_format=None): + memory_format = ( + memory_format.as_python_constant() + if memory_format is not None + else torch.contiguous_format + ) + if self.is_contiguous is not None: + return ConstantVariable.create(memory_format in self.is_contiguous) + elif (fake := self.proxy.node.meta.get("example_value")) is not None: + return ConstantVariable.create( + fake.is_contiguous(memory_format=memory_format) + ) + + def method_type(self, dtype=None, non_blocking=False, **kwargs): + if ( + dtype is None + and self.dtype is not None + and isinstance(self.device, torch.device) + ): + tensortype = next( + k for k, v in tensortype_to_dtype.items() if self.dtype in v + ) + if self.device.type == "cuda": + return ConstantVariable.create(f"torch.cuda.{tensortype.__name__}") + else: + return ConstantVariable.create(f"torch.{tensortype.__name__}") + elif ( + dtype is not None + and fqn(type(dtype.as_python_constant())) == "torch.tensortype" + ): + # torch.FloatTensor, etc. are all of type "torch.tensortype". + # torch.fx's tracer fails on these types, because it doesn't support arguments of torch.tensortype type. + # So, we pass it in as a string (which is also supported, see above implementation for .type() with 0 args) + tensor_type = dtype.as_python_constant() + tensor_type_const = ConstantVariable.create(fqn(tensor_type)) + + from ..symbolic_convert import InstructionTranslator + from .builder import wrap_fx_proxy + + tx = InstructionTranslator.current_tx() + + if non_blocking: + kwargs = {"non_blocking": non_blocking, **kwargs} + + return wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_method", + "type", + *proxy_args_kwargs([self, tensor_type_const], kwargs), + ), + ) + + def method_as_subclass(self, cls): + if isinstance(cls, TensorSubclassVariable) and cls.source: + from ..symbolic_convert import InstructionTranslator + from .builder import VariableBuilder + from .torch_function import TensorWithTFOverrideVariable + + tx = InstructionTranslator.current_tx() + + # [Note: __torch_function__] coerce this tensor variable into a TensorWithTFOverrideVariable + # in eager, this is just a type change. This isn't sound if a __torch_function__ tensor subclass + # defines a constructor, but if only a __torch_function__ impl is defined, this is okay to call. + # It is up to the user whether this is correct behavior or not. + py_cls = cls.as_python_constant() + torch_fn = VariableBuilder( + tx, + AttrSource(AttrSource(cls.source, "__torch_function__"), "__func__"), + )(py_cls.__torch_function__.__func__) + + return TensorWithTFOverrideVariable.from_tensor_var( + tx, self, py_cls, torch_fn + ) + + def method_get_device(self): + if isinstance(self.device, torch.device): + index = self.device.index if self.device.type != "cpu" else -1 + return ConstantVariable.create(index) + + def method_element_size(self): + return ConstantVariable.create(self.dtype.itemsize) + + def method_numpy(self, *, force=False): + if not config.trace_numpy: + unimplemented("Tensor.numpy(). config.trace_numpy is False") + if not np: + unimplemented("Tensor.numpy(). NumPy is not available") + if self.layout != torch.strided: + raise TypeError( + f"can't convert {self.layout} layout tensor to numpy. Use Tensor.dense() first" + ) + from ..symbolic_convert import InstructionTranslator + + tx = InstructionTranslator.current_tx() + + # We don't check that the tensor is on CPU when force is False, as this + # allows us to execute NumPy code on CUDA. Same for requires_grad=True + if force and force.as_python_constant(): + # If the user set force=True we try to preserve the semantics (no gradients, move to CPU...) + t = self.call_method(tx, "detach", [], {}) + proxy = tx.output.create_proxy("call_method", "cpu", (t.as_proxy(),), {}) + else: + # Hacky way to create a view of self that will be marked as NumpyNdarrayVariable + proxy = tx.output.create_proxy( + "call_method", "view_as", *proxy_args_kwargs([self, self], {}) + ) + return NumpyNdarrayVariable.create(tx, proxy) + + def method_tolist(self): + from ..symbolic_convert import InstructionTranslator + from .builder import SourcelessBuilder + + tx = InstructionTranslator.current_tx() + + def tolist(tensor, sub_proxy): + def wrap(i, sub_proxy): + # Sigh, we forgot to gate this, so this data dependent is on + # by default and is load bearing in CI + with unittest.mock.patch.object( + tx.fake_mode, "allow_scalar_outputs", True + ): + return SymNodeVariable.create( + tx, + sub_proxy.item(), + ) + + if tensor.dtype not in [ + torch.int8, + torch.int16, + torch.int32, + torch.int64, + ]: + unimplemented("Input tensor for tolist must be an integer tensor") + + if tensor.dim() == 0: + return wrap(tensor, sub_proxy) + + if tensor.dim() == 1: + return [wrap(val, sub_proxy[i]) for i, val in enumerate(tensor)] + + return [ + tolist(sub_tensor, sub_proxy=sub_proxy[i]) + for i, sub_tensor in enumerate(tensor) + ] + + tensor = self.as_proxy().node.meta["example_value"] + out = tolist(tensor, self.as_proxy()) + return SourcelessBuilder.create(tx, out) + + def method_backward(self, *args, **kwargs): + unimplemented("Tensor.backward") + + def method_data_ptr(self, *args, **kwargs): + unimplemented("Tensor.data_ptr") + + def method_item(self, *args, **kwargs): + if not config.capture_scalar_outputs: + self._warn_capture_scalar_outputs() + unimplemented("Tensor.item") + + @staticmethod + @functools.lru_cache(None) + def _warn_capture_scalar_outputs(): + user_stack = torch._guards.TracingContext.extract_stack() + user_stack_formatted = "".join(traceback.format_list(user_stack)) + log.warning( + textwrap.dedent( + """\ + Graph break from `Tensor.item()`, consider setting: + torch._dynamo.config.capture_scalar_outputs = True + or: + env TORCHDYNAMO_CAPTURE_SCALAR_OUTPUTS=1 + to include these operations in the captured graph. + + Graph break: from user code at: + %s + """ + ), + user_stack_formatted, + ) + + def method___len__(self): + from ..symbolic_convert import InstructionTranslator + + tx = InstructionTranslator.current_tx() + return self.call_method(tx, "size", [ConstantVariable.create(0)], {}) + + def method_addcmul_(self, tensor1, tensor2, *, value=None): + from ..symbolic_convert import InstructionTranslator + + tx = InstructionTranslator.current_tx() + if value is not None: + from .. import polyfills + from .builder import SourcelessBuilder + + return tx.inline_user_function_return( + SourcelessBuilder.create(tx, polyfills.addcmul_inplace), + [self, tensor1, tensor2, value], + {}, + ) + + def method___setitem__(self, key, value): + def has_bool_key(v): + if isinstance(v, TensorVariable): + return v.dtype in (torch.bool, torch.int8) + elif isinstance(v, variables.TupleVariable): + return any(has_bool_key(item) for item in v.items) + else: + return False + + if ( + has_bool_key(key) + and isinstance(value, TensorVariable) + and value.requires_grad + and torch.is_grad_enabled() + ): + unimplemented( + "boolean masking setitem backwards, see https://github.com/pytorch/pytorch/issues/114123" + ) + from ..symbolic_convert import InstructionTranslator + + tx = InstructionTranslator.current_tx() + tx.output.create_proxy( + "call_function", + operator.setitem, + *proxy_args_kwargs([self, key, value], {}), + ) + return ConstantVariable.create(None) + + def method_resize_(self, *args, **kwargs): + unimplemented("Tensor.resize_") + + def method_resize_as_(self, *args, **kwargs): + unimplemented("Tensor.resize_as_") + + def method_sparse_resize_(self, *args, **kwargs): + unimplemented("Tensor.sparse_resize_") + + def method_sparse_resize_and_clear_(self, *args, **kwargs): + unimplemented("Tensor.sparse_resize_and_clear_") + + def method_set_(self, *args, **kwargs): + if len(args) > 1: + # torch.Tensor.set_() has several overloads. + # aten::set_.source_Tensor(Tensor) gets special handling + # in AOTAutograd and functionalization, because it is the most common + # overload and is used by FSDP. + # graph-breaking on aten::set_source_Tensor_storage_offset for now, + # unless we find that we need to make it work. + unimplemented("Tensor.set_.source_Tensor_storage_offset") + + def method_add_(self, other, *, alpha=None): + if alpha is not None: + from ..symbolic_convert import InstructionTranslator + + tx = InstructionTranslator.current_tx() + result = variables.TorchInGraphFunctionVariable(torch.mul).call_function( + tx, [other, alpha], {} + ) + return self.call_method(tx, "add_", [result], {}) + + def method_addcdiv_(self, tensor1, tensor2, *, value=None): + from ..symbolic_convert import InstructionTranslator + + tx = InstructionTranslator.current_tx() + if value is not None: + result = variables.TorchInGraphFunctionVariable(torch.div).call_function( + tx, [tensor1, tensor2], {} + ) + result = variables.TorchInGraphFunctionVariable(torch.mul).call_function( + tx, [result, value], {} + ) + return self.call_method(tx, "add_", [result], {}) + + def method___contains__(self, arg): + from ..symbolic_convert import InstructionTranslator + + tx = InstructionTranslator.current_tx() + + # Rewrite __contains__ here so that downstream passes can trace through + # without dealing with unbacked symbool. Roughly the code we translate is: + # def __contains__(self, x): + # return (x == self).any().item() + result = variables.TorchInGraphFunctionVariable(torch.eq).call_function( + tx, [self, arg], {} + ) + result = variables.TorchInGraphFunctionVariable(torch.any).call_function( + tx, [result], {} + ) + return result.call_method(tx, "item", [], {}) + + def method_redistribute(self, *args, **kwargs): + from ..symbolic_convert import InstructionTranslator + + tx = InstructionTranslator.current_tx() + # rewrite non-primitive args/kwargs to be included in the on-the-fly prim function + # and rewrite args to have only proxyable args, then insert call_function + args_as_value = [x.as_python_constant() for x in args] + kwargs_as_value = {k: v.as_python_constant() for k, v in kwargs.items()} + + def redistribute_fn_with_prim_types(x): + return x.redistribute(*args_as_value, **kwargs_as_value) + + # attach the same function name for better debugging + redistribute_fn_with_prim_types.__name__ = "prim_redistribute" + + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + redistribute_fn_with_prim_types, + *proxy_args_kwargs([self], {}), + ), + ) + + def method_to_local(self, *args, **kwargs): + from ..symbolic_convert import InstructionTranslator + + tx = InstructionTranslator.current_tx() + # rewrite non-primitive args/kwargs to be included in the on-the-fly prim function + # and rewrite args to have only proxyable args, then insert call_function + args_as_value = [x.as_python_constant() for x in args] + kwargs_as_value = {k: v.as_python_constant() for k, v in kwargs.items()} + + def to_local_fn_with_prim_types(x): + return x.to_local(*args_as_value, **kwargs_as_value) + + # attach the same function name for better debugging + to_local_fn_with_prim_types.__name__ = "prim_to_local" + + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + to_local_fn_with_prim_types, + *proxy_args_kwargs([self], {}), + ), + ) + + def method_register_hook(self, *args, **kwargs): + return self._method_register_hook("register_hook", *args, **kwargs) + + def method_register_post_accumulate_grad_hook(self, *args, **kwargs): + return self._method_register_hook( + "register_post_accumulate_grad_hook", *args, **kwargs + ) + + def _method_register_hook(self, name: str, hook: VariableTracker): + # Note - do not arbitrarily add hooks here - make sure they match the same contract + # see [On tensor.register_hook] + from ..symbolic_convert import InstructionTranslator + + tx = InstructionTranslator.current_tx() + + if not self.source: + if not compiled_autograd.compiled_autograd_enabled: + # TODO(voz): + # We can relax this by speculating the callable and ensuring that it doesn't modify arbitrary + # python state. + # We *Must* be in compiled_autograd here because backward hooks can contain anything, and it is unsafe to run + # them in a compiled bwd without re-entering dynamo as compiled_autograd does. + # + # Discussion point 1 - Should we bypass this if nopython/fullgraph = True? + # No. Because this was going to be a graph break anyway - this check does not + # introduce new graph breaks where there were none. + # + # Discussion point 2 - Should we defer this check to backwards? + # No. Because compiled autograd is not yet ready for prime time. As such, if we defer, a user + # would have no recourse - their forward traces just fine, but will fail at backwards unless + # compiled_autograd is enabled. If compiled_autograd fails (there are a lot of failures today) + # then they have nothing they can do except disable compile. + unimplemented( + "Compilation of intermediate hooks requires compiled autograd" + ) + + hook_name, bw_state_proxy = tx.output.add_backward_state_hook(hook) + + def _register_hook_trampoline(tensor, bw_state): + register_hook = getattr(tensor, name) + register_hook( + functools.partial( + trace_wrapped, + fn=call_hook_from_backward_state, + bw_state=bw_state, + hook_name=hook_name, + ) + ) + # TODO(jansel): returning None here is wrong, it should be + # RemovableHandle, but we need some extra work to support + # this properly. + return None + + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_function", + _register_hook_trampoline, + (self.as_proxy(), bw_state_proxy), + {}, + ), + ) + + handle_variable = variables.RemovableHandleVariable( + mutable_local=variables.base.MutableLocal(), + ) + tx.output.side_effects.register_hook(self, hook, handle_variable, name) + return handle_variable + + def method_requires_grad_(self, requires_grad=True): + if requires_grad is not True: + requires_grad = requires_grad.as_python_constant() + + if self.as_proxy().node.meta["example_value"].requires_grad != requires_grad: + unimplemented("Tensor.requires_grad_") + else: + return self + + def method_new(self, *args, **kwargs): + # Convert x.new(torch.Size) into x.new_empty(torch.Size), + # as Tensor.new acts differently with a Size input versus a tuple input. + if (len(args) == 1 and isinstance(args[0], SizeVariable)) or ( + len(args) >= 1 + and all( + isinstance(a, ConstantVariable) and a.python_type() == int for a in args + ) + ): + from ..symbolic_convert import InstructionTranslator + + return self.call_method( + InstructionTranslator.current_tx(), "new_empty", args, kwargs + ) + + def method_untyped_storage(self): + return UntypedStorageVariable( + self, self.as_proxy().node.meta["example_value"].untyped_storage() + ) + + def set_name_hint(self, name: str): + if not self._is_name_set: + self.proxy.node._rename(name) + self._is_name_set = True + + +class SymNodeVariable(VariableTracker): + """ + Represents a symbolic scalar, either int, float or bool. This is most commonly used to + handle symbolic size computation, e.g., tensor.size(0), but it is also used to + handle logic like float_tensor.item() or unspecialized float inputs. + """ + + _nonvar_fields = { + "proxy", + "sym_num", + *VariableTracker._nonvar_fields, + } + + def debug_repr(self): + return repr(self.sym_num) + + @classmethod + def create(cls, tx, proxy, sym_num=None, **options): + if sym_num is None: + sym_num = get_fake_value(proxy.node, tx) + if "example_value" in proxy.node.meta: + assert proxy.node.meta["example_value"] == sym_num + set_example_value(proxy.node, sym_num) + + if isinstance(sym_num, (sympy.Integer, int, bool)): + sym_num = int(sym_num) if isinstance(sym_num, sympy.Integer) else sym_num + return ConstantVariable.create(sym_num) + + return SymNodeVariable(proxy, sym_num, **options) + + def __init__(self, proxy, sym_num, **kwargs) -> None: + super().__init__(**kwargs) + self.proxy = proxy + # TODO: Should we allow non SymTypes here? Today it is allowed + self.sym_num = sym_num + self._tensor_var = None + + def python_type(self): + if isinstance(self.sym_num, SymTypes): + return self.sym_num.node.pytype + else: + return type(self.sym_num) + + def as_proxy(self): + return self.proxy + + def as_tensor(self, tx): + if self._tensor_var is None: + from .builder import SourcelessBuilder + + self._tensor_var = SourcelessBuilder.create( + tx, torch.scalar_tensor + ).call_function(tx, [self], {}) + return self._tensor_var + + def evaluate_expr(self, output_graph=None): + try: + return guard_scalar(self.sym_num) + except GuardOnDataDependentSymNode as e: + raise UserError( # noqa: B904 + UserErrorType.ANTI_PATTERN, + f"Consider annotating your code using torch._check*(). {str(e)}", + case_name="constrain_as_size_example", + ) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_method", + name, + *proxy_args_kwargs([self, *args], kwargs), + ), + ) + + +class NumpyNdarrayVariable(TensorVariable): + """ + Represents a np.ndarray, but backed by torch Tensor via torch._numpy.ndarray. + Use this for Tensor.numpy() call. + """ + + @staticmethod + def create(tx: "InstructionTranslator", proxy, **options): + from .builder import wrap_fx_proxy_cls + + return wrap_fx_proxy_cls( + target_cls=NumpyNdarrayVariable, + tx=tx, + proxy=proxy, + **options, + ) + + def var_getattr(self, tx: "InstructionTranslator", name): + # NB: This INTENTIONALLY does not call super(), because there is + # no intrinsic reason ndarray properties are related to Tensor + # properties. The inheritance here is for implementation sharing. + + from ..utils import numpy_attr_wrapper + from .builder import wrap_fx_proxy + + result = None + + example_value = self.as_proxy().node.meta["example_value"] + example_ndarray = tnp.ndarray(example_value) + + def insert_into_graph(): + return wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_function", numpy_attr_wrapper, (self.as_proxy(), name), {} + ), + ) + + if name in ["T", "real", "imag"]: + proxy = tx.output.create_proxy( + "call_function", + numpy_attr_wrapper, + (self.as_proxy(), name), + {}, + ) + result = NumpyNdarrayVariable.create(tx, proxy) + + # These are awkward to implement. The standard playbook for torch._numpy + # interop is to trace a call into the torch._numpy wrapper which works for + # Tensor operations. However, we don't want to do this for calls + # that don't return Tensors, because in those cases we may not want + # to trace the attribute access into the graph at all (it is sort + # of harmless to do so, because AOTAutograd will eliminate them, + # but it's best not to trace them in to begin with.) But in any + # case, tracing these into the graph is like trying to fit a square + # peg into a round hole; best not to do it. So instead we + # painstakingly implement these by hand + # + # NB: only ALWAYS specialized attributes can go here; notably, + # size/shape not allowed! + elif name in ("ndim", "itemsize"): + return ConstantVariable.create(getattr(example_ndarray, name)) + elif name in ("shape", "stride"): + if not has_free_symbols(r := getattr(example_ndarray, name)): + return ConstantVariable.create(tuple(int(r) for r in r)) + return insert_into_graph() + elif name == "size": + if not has_free_symbols(r := example_ndarray.size): + return ConstantVariable.create(int(r)) + return insert_into_graph() + elif name in ["base", "flags", "dtype"]: + unimplemented(f"TODO: add support for ndarray.{name}") + elif name in ["__version__"]: + unimplemented("delegate np.__version__ to NumPy") + if result is None: + raise NotImplementedError + return result + + @staticmethod + def patch_args(name, args, kwargs): + if name == "clip": + kwargs_rename = {"a_min": "min", "a_max": "max"} + kwargs = {kwargs_rename.get(k, k): v for k, v in kwargs.items()} + return args, kwargs + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + from ..utils import numpy_method_wrapper + + args, kwargs = self.patch_args(name, args, kwargs) + + if name in ["__len__", "size", "tolist"]: + # delegate back to TensorVariable + return super().call_method(tx, name, args, kwargs) + if name in ("tostring", "tobytes"): + unimplemented(f"{name} is not modelled in torch._numpy") + proxy = tx.output.create_proxy( + "call_function", + numpy_method_wrapper(name), + *proxy_args_kwargs([self] + list(args), kwargs), + ) + return NumpyNdarrayVariable.create(tx, proxy) + + def python_type(self): + return np.ndarray + + +class UnspecializedPythonVariable(TensorVariable): + """ + This is a 1-element tensor represents unspecialized python float/int. + """ + + _nonvar_fields = { + "raw_value", + "need_unwrap", + *TensorVariable._nonvar_fields, + } + + def __init__( + self, proxy: torch.fx.Proxy, *, raw_value=None, need_unwrap=True, **kwargs + ) -> None: + super().__init__(proxy, **kwargs) + self.raw_value = raw_value + self.need_unwrap = need_unwrap + + @classmethod + def from_tensor_variable(cls, tensor_variable, raw_value, need_unwrap=True): + # Convert a `TensorVariable` instance into an `UnspecializedPythonVariable` instance. + return UnspecializedPythonVariable( + **dict(tensor_variable.__dict__), + raw_value=raw_value, + need_unwrap=need_unwrap, + ) + + +class FakeItemVariable(TensorVariable): + """An unspecialized python variable which prevents access to the underlying raw value. + This is needed if item is called on a FakeTensor.""" + + _nonvar_fields = { + "need_unwrap", + *TensorVariable._nonvar_fields, + } + + def __init__(self, proxy: torch.fx.Proxy, **kwargs) -> None: + need_unwrap = kwargs.pop("need_unwrap", False) + super().__init__(proxy, **kwargs) + self.need_unwrap = need_unwrap + + @classmethod + def from_tensor_variable(cls, tensor_variable): + return FakeItemVariable(**dict(tensor_variable.__dict__)) + + +class TensorSubclassVariable(VariableTracker): + def __init__(self, value, *args, **kwargs) -> None: + self.value = value + super().__init__(*args, **kwargs) + + def call_function( + self, + tx: "InstructionTranslator", + args: List[VariableTracker], + kwargs: Dict[str, VariableTracker], + ) -> VariableTracker: + if len(args) == 1 and isinstance(args[0], TensorVariable): + from .builder import VariableBuilder + from .torch_function import TensorWithTFOverrideVariable + + torch_fn = VariableBuilder( + tx, AttrSource(self.source, "__torch_function__") + )(self.value.__torch_function__) + + return TensorWithTFOverrideVariable.from_tensor_var( + tx, args[0], self.value, torch_fn + ) + + return super().call_function(tx, args, kwargs) + + def as_python_constant(self): + return self.value + + +class UntypedStorageVariable(VariableTracker): + _nonvar_fields = { + "example_value", + *VariableTracker._nonvar_fields, + } + + def __init__( + self, + from_tensor: TensorVariable, + example_value: torch.UntypedStorage, + **kwargs, + ) -> None: + super().__init__(**kwargs), + self.from_tensor = from_tensor + # Example_value will always have device="meta" + self.example_value = example_value + + def call_method( + self, + tx, + name, + args: List[VariableTracker], + kwargs: Dict[str, VariableTracker], + ) -> VariableTracker: + if name == "size": + assert not args + assert not kwargs + result = self.example_value.size() + if not has_free_symbols(result): + # avoid creating a node in the graph + return ConstantVariable.create(int(result)) + else: + from ..external_utils import untyped_storage_size + from .builder import wrap_fx_proxy + + return wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_function", + untyped_storage_size, + (self.from_tensor.as_proxy(),), + {}, + ), + ) + if name == "resize_" and len(args) == 1: + assert not kwargs + tx.output.create_proxy( + "call_function", + torch.ops.inductor.resize_storage_bytes_, + (self.from_tensor.as_proxy(), args[0].as_proxy()), + {}, + ) + return self + + return super().call_method(tx, name, args, kwargs) + + def reconstruct(self, codegen): + codegen(self.from_tensor) + codegen.load_method("untyped_storage") + codegen.call_method(0) diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/torch.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/torch.py new file mode 100644 index 0000000000000000000000000000000000000000..3dfda1cfec83da9c1de76114857fa63f786709cb --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/torch.py @@ -0,0 +1,1120 @@ +# mypy: allow-untyped-decorators +# mypy: allow-untyped-defs +import functools +import inspect +import logging +import math +import re +from typing import Dict, List, TYPE_CHECKING + +import torch._C +import torch._refs +import torch.fx +import torch.nn +import torch.onnx.operators +from torch._guards import TracingContext +from torch._logging import warning_once +from torch._streambase import _StreamBase +from torch.utils._python_dispatch import is_traceable_wrapper_subclass_type + +from .. import config, polyfills, variables +from ..codegen import PyCodegen +from ..create_parameter_op import ( + can_convert_to_tracable_parameter, + new_parameter_placeholder, + tracable_create_parameter, +) +from ..device_interface import get_registered_device_interfaces +from ..exc import unimplemented +from ..guards import GuardBuilder, install_guard +from ..source import SyntheticLocalSource +from ..utils import ( + check_unspec_or_constant_args, + guard_if_dyn, + has_torch_function, + hashable, + product, + proxy_args_kwargs, + unwrap_if_wrapper, +) +from .base import VariableTracker +from .ctx_manager import ( + AutocastModeVariable, + NullContextVariable, + TorchFunctionDisableVariable, +) +from .distributed import DistributedVariable, ProcessGroupVariable +from .lists import ListVariable, TupleVariable +from .torch_function import ( + can_dispatch_torch_function, + dispatch_torch_function, + TorchFunctionModeStackVariable, +) + + +try: + import numpy as np +except ModuleNotFoundError: + np = None # type: ignore[assignment] + +try: + from torch.distributed._composable.fsdp import _fsdp_param_group +except ModuleNotFoundError: + _fsdp_param_group = None # type: ignore[assignment] + + +if TYPE_CHECKING: + from torch._dynamo.symbolic_convert import InstructionTranslator + + +log = logging.getLogger(__name__) + +supported_ctx_manager_classes = dict.fromkeys( + [ + torch.profiler.profiler.profile, + torch.autograd.forward_ad._set_fwd_grad_enabled, + torch.autograd.forward_ad.dual_level, + torch.autograd.profiler.profile, + torch.autograd.profiler.record_function, + torch._C.DisableTorchFunctionSubclass, + torch._functorch.vmap.vmap_increment_nesting, + torch._functorch.eager_transforms.grad_increment_nesting, + torch._functorch.eager_transforms.jvp_increment_nesting, + torch._functorch.eager_transforms.enable_inplace_requires_grad, + torch.amp.autocast_mode.autocast, + torch.autograd.grad_mode.enable_grad, + torch.autograd.grad_mode.inference_mode, + torch.autograd.grad_mode.no_grad, + torch.autograd.grad_mode.set_grad_enabled, + torch.autograd.graph.disable_saved_tensors_hooks, + torch.cpu.amp.autocast_mode.autocast, + torch.cuda.amp.autocast_mode.autocast, + ] +) + + +REWRITE_OPS_TO_TENSOR_SIZE_METHOD = dict.fromkeys( + [ + torch.onnx.operators.shape_as_tensor, + torch._shape_as_tensor, + ] +) + +constant_fold_functions = [ + torch._assert, + torch._utils._get_device_index, + torch._C._get_cublas_allow_tf32, + torch._C._is_any_autocast_enabled, + torch.cuda.get_device_properties, + torch.cuda.is_available, + torch.distributed.is_available, + torch.get_autocast_dtype, + torch.get_autocast_gpu_dtype, + torch.get_default_dtype, + torch.is_autocast_cache_enabled, + torch.is_autocast_cpu_enabled, + torch.is_autocast_enabled, + torch.is_complex, + torch.is_floating_point, + torch.nn.functional._Reduction.get_enum, # type: ignore[attr-defined] + torch.promote_types, + torch._C._get_privateuse1_backend_name, + torch.autograd._is_checkpoint_valid, +] +if torch.distributed.is_available(): + constant_fold_functions.extend( + [ + torch.distributed.is_initialized, + torch.distributed.get_rank, + torch.distributed.get_world_size, + ] + ) +# Convert to dict for O(1) access times +constant_fold_functions = dict.fromkeys(constant_fold_functions) + + +tracing_state_functions = { + torch.jit.is_scripting: False, + torch.jit.is_tracing: False, + torch._C._get_tracing_state: None, + torch.fx._symbolic_trace.is_fx_tracing: False, + torch.onnx.is_in_onnx_export: False, + torch._dynamo.external_utils.is_compiling: True, + torch._utils.is_compiling: True, + torch.compiler.is_compiling: True, + torch.compiler.is_dynamo_compiling: True, + torch.nn.modules.activation._is_make_fx_tracing: False, +} + +bin_ops = dict.fromkeys(["add", "sub", "mul", "div", "sqrt"]) + + +class BaseTorchVariable(VariableTracker): + """common base for all torch.* functions, classes, modules and other things""" + + @classmethod + def create_with_source(cls, value, source): + install_guard(source.make_guard(GuardBuilder.FUNCTION_MATCH)) + return cls(value, source=source) + + def __init__(self, value, **kwargs) -> None: + super().__init__(**kwargs) + self.value = value + + def reconstruct(self, codegen): + try: + name = f"{self.value.__module__}.{self.value.__name__}" + except Exception: + name = f"torch_obj_{id(self.value)}" + unique_var_name = "__" + re.sub(r"[^a-zA-Z0-9_]+", "_", name) + codegen.extend_output( + codegen.setup_globally_cached(unique_var_name, self.value) + ) + + def as_proxy(self): + return self.value + + def as_python_constant(self): + return self.value + + def call_hasattr(self, tx: "InstructionTranslator", name): + result = hasattr(self.value, name) + return variables.ConstantVariable.create(result) + + def can_constant_fold_through(self): + if self.value in constant_fold_functions: + return True + return getattr(self.value, "__module__", None) == "math" + + +class TorchCtxManagerClassVariable(BaseTorchVariable): + """Points to a context manager class in torch.* that dynamo has implementations""" + + def __repr__(self) -> str: + return f"TorchCtxManagerClassVariable({self.value})" + + @staticmethod + def is_matching_cls(value): + # Unwrap if it's a functools.lru_cache wrapper + value = unwrap_if_wrapper(value) + # We can't do isinstance(value, type) check because some ctx managers + # are implemented as a function decorated by contextlib.contextmanager, + # E.g., torch._functorch.vmap.vmap_increment_nesting. + return ( + # Context manager type or function with @contextmanager is callable + callable(value) + and ( + hashable(value) # accesses value.__hash__() + and value in supported_ctx_manager_classes + ) + ) + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + from . import ( + DisabledSavedTensorsHooksVariable, + DualLevelContextManager, + FSDPParamGroupUseTrainingStateVariable, + GradIncrementNestingCtxManagerVariable, + GradInplaceRequiresGradCtxManagerVariable, + GradModeVariable, + InferenceModeVariable, + JvpIncrementNestingCtxManagerVariable, + SetFwdGradEnabledContextManager, + StreamVariable, + VmapIncrementNestingCtxManagerVariable, + ) + + if self.value is torch.no_grad: + if len(args) == 1 and isinstance( + args[0], variables.functions.BaseUserFunctionVariable + ): + ctx = GradModeVariable.create(tx, False) + return ctx.call_function(tx, args, kwargs) + else: + return GradModeVariable.create(tx, False) + elif self.value is torch.enable_grad: + if len(args) == 1 and isinstance( + args[0], variables.functions.BaseUserFunctionVariable + ): + ctx = GradModeVariable.create(tx, True) + return ctx.call_function(tx, args, kwargs) + return GradModeVariable.create(tx, True) + elif self.value is torch.set_grad_enabled and len(args) == 1: + return GradModeVariable.create( + tx, args[0].as_python_constant(), initialized=True + ) + elif self.value is torch.inference_mode: + assert len(args) <= 1 and len(kwargs) == 0 + inf_mode = args[0].as_python_constant() if len(args) == 1 else True + return InferenceModeVariable.create(tx, inf_mode) + elif inspect.isclass(self.value) and issubclass(self.value, _StreamBase): + from torch._dynamo.variables.builder import wrap_fx_proxy_cls + + return wrap_fx_proxy_cls( + StreamVariable, + tx, + tx.output.create_proxy( + "call_function", + self.value, + (), + {}, + ), + ) + elif self.value in ( + torch.amp.autocast_mode.autocast, + torch.cuda.amp.autocast, + torch.cpu.amp.autocast, + ): + return AutocastModeVariable.create(self.value, args, kwargs) + elif self.value in ( + torch.profiler.profile, + torch.profiler.record_function, + torch.autograd.profiler.profile, + torch.autograd.profiler.record_function, + ): + warning_once(log, "Profiler function %s will be ignored", self.value) + return NullContextVariable() + elif self.value is torch._C.DisableTorchFunctionSubclass: + assert not (args or kwargs) + return TorchFunctionDisableVariable.create(tx) + elif self.value is torch._functorch.vmap.vmap_increment_nesting: + assert len(args) == 2 + return VmapIncrementNestingCtxManagerVariable.create( + tx, + [guard_if_dyn(x) for x in args], + ) + elif self.value is torch._functorch.eager_transforms.jvp_increment_nesting: + assert len(args) == 0 + return JvpIncrementNestingCtxManagerVariable.create(tx) + elif self.value is torch.autograd.forward_ad._set_fwd_grad_enabled: + assert len(args) == 1 + return SetFwdGradEnabledContextManager.create( + tx, + [guard_if_dyn(x) for x in args], + ) + elif self.value is torch.autograd.forward_ad.dual_level: + assert len(args) == 0 + return DualLevelContextManager.create(tx) + elif self.value is torch._functorch.eager_transforms.grad_increment_nesting: + assert len(args) == 0 + return GradIncrementNestingCtxManagerVariable.create(tx) + elif ( + self.value is torch._functorch.eager_transforms.enable_inplace_requires_grad + ): + assert len(args) == 1 + return GradInplaceRequiresGradCtxManagerVariable.create( + tx, + [guard_if_dyn(x) for x in args], + ) + elif self.value is torch.autograd.graph.disable_saved_tensors_hooks: + assert len(args) == 1 + return DisabledSavedTensorsHooksVariable.create( + tx, args[0].as_python_constant() + ) + elif ( + _fsdp_param_group is not None + and self.value is _fsdp_param_group.FSDPParamGroup.use_training_state + ): + assert len(args) == 2 + return FSDPParamGroupUseTrainingStateVariable.create( + tx, args[0], args[1].as_python_constant() + ) + + return super().call_function(tx, args, kwargs) + + +class TorchInGraphFunctionVariable(BaseTorchVariable): + """Points to a torch function/method that should be put in FX graph""" + + def __repr__(self) -> str: + return f"TorchInGraphFunctionVariable({self.value})" + + def get_function(self): + return self.value + + @staticmethod + @functools.lru_cache(None) + def _get_handlers(): + """Build a dict from function -> method to handle it so that we are O(1) + in terms of the number of function with special handling.""" + handlers = {} + + def register(*fns): + def _register(handler): + for fn in fns: + assert fn not in handlers, fn + handlers[fn] = handler + return handler + + assert callable(fns[0]) + return _register + + from torch.backends.cuda import SDPAParams + + from . import ( + ConstantVariable, + DeterministicAlgorithmsVariable, + GradModeVariable, + StreamContextVariable, + SymNodeVariable, + TensorVariable, + UserDefinedObjectVariable, + ) + from .builder import SourcelessBuilder, wrap_fx_proxy, wrap_fx_proxy_cls + + @register(*tracing_state_functions) + def handle_tracing_state_functions( + self, tx: "InstructionTranslator", *args, **kwargs + ): + assert not args and not kwargs + # See: https://github.com/pytorch/pytorch/issues/110765 + if self.value in ( + torch._utils.is_compiling, + torch._dynamo.external_utils.is_compiling, + torch.compiler.is_compiling, + torch.compiler.is_dynamo_compiling, + ): + tx.mark_inconsistent_side_effects() + return ConstantVariable.create(tracing_state_functions[self.value]) + + @register(torch.overrides.get_default_nowrap_functions.__wrapped__) + def handle_get_default_nowrap_functions( + self, tx: "InstructionTranslator", *args, **kwargs + ): + # [Note: __torch_function__] we return empty here because we restrict + # the set of functions that we trace __torch_function__ on to + # functions outside of the actual set. Implementing this properly will require implementing + # some variable types to track and compare tensor getset descriptors + return SourcelessBuilder.create( + tx, torch.overrides.get_default_nowrap_functions() + ) + + @register(torch.ops.inductor.accumulate_grad_.default) + def handle_accumulate_grad_(self, tx: "InstructionTranslator", *args, **kwargs): + return tx.inline_user_function_return( + SourcelessBuilder.create(tx, polyfills.accumulate_grad), args, kwargs + ) + + @register(math.radians) + def handle_radians(self, tx: "InstructionTranslator", *args, **kwargs): + if not check_unspec_or_constant_args(args, kwargs): + # Use polyfill to convert math.radians(x) into math.pi * x / 180.0 + return tx.inline_user_function_return( + SourcelessBuilder.create(tx, polyfills.radians), args, kwargs + ) + + @register(torch.is_tensor, torch.overrides.is_tensor_like) + def handle_is_tensor(self, tx: "InstructionTranslator", arg): + if isinstance(arg, TensorVariable) or ( + self.value is torch.overrides.is_tensor_like + and isinstance(arg, UserDefinedObjectVariable) + and hasattr(arg.value, "__torch_function__") + ): + return ConstantVariable.create(True) + else: + return ConstantVariable.create(False) + + @register( + torch.is_floating_point, + torch.is_complex, + ) + def handle_is_floating_point(self, tx: "InstructionTranslator", input): + input_arg = input + if isinstance(input_arg, TensorVariable) and input_arg.dtype is not None: + if self.value is torch.is_floating_point: + return ConstantVariable.create(input_arg.dtype.is_floating_point) + elif self.value is torch.is_complex: + return ConstantVariable.create(input_arg.dtype.is_complex) + else: + raise AssertionError(f"calling {self.value}") + + @register(torch.numel) + def handle_numel(self, tx: "InstructionTranslator", input): + if isinstance(input, TensorVariable) and input.size is not None: + return ConstantVariable.create(product(input.size)) + elif isinstance(input, TensorVariable): + # Workaround dynamic shapes issue + return input.call_method(tx, "numel", [], {}) + + @register(*REWRITE_OPS_TO_TENSOR_SIZE_METHOD) + def handle_tensor_size_rewrites(self, tx: "InstructionTranslator", input): + assert isinstance(input, TensorVariable) + return input.call_method(tx, "size", [], {}) + + @register( + torch.nn.modules.utils._single, + torch.nn.modules.utils._pair, + torch.nn.modules.utils._triple, + torch.nn.modules.utils._quadruple, + torch.nn.modules.utils._ntuple, + ) + def handle_ntuple(self, tx: "InstructionTranslator", *args, **kwargs): + return self._call_ntuple(tx, args, kwargs) + + @register(torch.is_grad_enabled) + def handle_is_grad_enabled(self, tx): + install_guard(GradModeVariable._guards_singleton) + return ConstantVariable.create(torch.is_grad_enabled()) + + @register(torch.use_deterministic_algorithms) + def handle_use_deterministic_algorithms( + self, tx: "InstructionTranslator", mode, warn_only=False + ): + if warn_only and warn_only.as_python_constant(): + unimplemented("torch.use_deterministic_algorithms(warn_only=True)") + return DeterministicAlgorithmsVariable.create(tx, mode.as_python_constant()) + + @register(torch.are_deterministic_algorithms_enabled) + def handle_are_deterministic_algorithms_enabled(self, tx): + install_guard(DeterministicAlgorithmsVariable._guards_singleton) + return ConstantVariable.create(torch.are_deterministic_algorithms_enabled()) + + @register(torch._C._is_torch_function_enabled) + def handle_is_torch_function_enabled(self, tx): + install_guard(TorchFunctionDisableVariable._guards_singleton) + return ConstantVariable.create(tx.output.torch_function_enabled) + + @register( + torch.overrides.has_torch_function, + torch.overrides.has_torch_function_variadic, + torch.overrides.has_torch_function_unary, + ) + def handle_has_torch_function(self, tx: "InstructionTranslator", *args): + elems = ( + args[0].unpack_var_sequence(tx) + if len(args) == 1 and isinstance(args[0], TupleVariable) + else args + ) + return ConstantVariable.create( + any(has_torch_function(x) for x in elems), + ) + + @register( + *dict.fromkeys( # remove duplicates + device_interface.stream + for _, device_interface in get_registered_device_interfaces() + ) + ) + def handle_device_interface_stream(self, tx: "InstructionTranslator", stream): + return StreamContextVariable.create(tx, stream) + + @register(torch.from_numpy) + def handle_from_numpy(self, tx: "InstructionTranslator", *args): + if not config.trace_numpy: + unimplemented("torch.from_numpy. config.trace_numpy is False") + if not np: + unimplemented("torch.from_numpy. NumPy is not available") + return wrap_fx_proxy_cls( + target_cls=TensorVariable, + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + torch.as_tensor, + *proxy_args_kwargs(args, {}), + ), + example_value=None, + ) + + @register(torch.jit.annotate) + def handle_jit_annotate(self, tx: "InstructionTranslator", the_type, the_value): + return the_value + + @register(torch.backends.cudnn.is_acceptable) + def handle_cudnn_is_acceptable( + self, tx: "InstructionTranslator", tensor, *extra + ): + # is_acceptable(tensor) returns true if + # (a) tensor dtype/device are supported by cudnn + # (b) cudnn is available + # (c) some initialization has completed + # technically, it depends on some global state from (c) (torch.backends.cudnn.__cudnn_version) + assert not extra, "Expect 1 input to cudnn.is_acceptable" + assert isinstance( + tensor, TensorVariable + ), "Expect input to cudnn.is_acceptable to be a tensor" + tensor_inp = torch.tensor(0, dtype=tensor.dtype, device=tensor.device) + return ConstantVariable.create( + torch.backends.cudnn.is_acceptable(tensor_inp) + ) + + @register(torch.utils.hooks.BackwardHook) + def handle_backward_hook(self, tx: "InstructionTranslator", *args, **kwargs): + return variables.BackwardHookVariable.create(tx, *args, **kwargs) + + @register(torch.nn.Parameter) + def handle_parameter(self, tx: "InstructionTranslator", *args, **kwargs): + return self.call_nn_parameter(tx, *args, **kwargs) + + @register(torch.ops.aten.sym_size, torch.ops.aten.sym_size.int) + def handle_sym_size(self_, tx, self, dim=None): + # we see this when retracing already traced code + if dim is not None: + return self.call_method(tx, "size", [dim], {}) + + @register(torch.ops.aten.sym_stride, torch.ops.aten.sym_stride.int) + def handle_sym_stride(self_, tx, self, dim=None): + if dim is not None: + return self.call_method(tx, "stride", [dim], {}) + + @register(torch.addcdiv) + def handle_addcdiv(self, tx: "InstructionTranslator", *args, **kwargs): + if len(args) == 3 and "value" in kwargs and len(kwargs) == 1: + # decompose addcdiv into constituent ops, prevents a graph break due to converting + # value to a scalar + result = TorchInGraphFunctionVariable(torch.div).call_function( + tx, [*args[1:]], {} + ) + result = TorchInGraphFunctionVariable(torch.mul).call_function( + tx, [result, kwargs["value"]], {} + ) + return TorchInGraphFunctionVariable(torch.add).call_function( + tx, [args[0], result], {} + ) + + @register(torch._foreach_lerp_) + def handle_inplace_foreach_lerp_scalar( + self, tx: "InstructionTranslator", *args, **kwargs + ): + if len(args) == 3 and not isinstance(args[2], ListVariable) and not kwargs: + return tx.inline_user_function_return( + SourcelessBuilder.create(tx, polyfills.foreach_lerp_inplace), + args, + kwargs, + ) + + @register(torch._foreach_pow) + def handle_foreach_pow_scalar( + self, tx: "InstructionTranslator", *args, **kwargs + ): + # In eager it's more performant to call item() from within the C op implementation + # in compile, it's more performant to not graph break. + if len(args) == 2 and isinstance(args[0], TensorVariable) and not kwargs: + return tx.inline_user_function_return( + SourcelessBuilder.create(tx, polyfills.foreach_pow_scalar), + args, + kwargs, + ) + + @register(torch._assert) + def handle_assert(self, tx: "InstructionTranslator", condition, message): + if (condition.is_python_constant() and condition.as_python_constant()) or ( + isinstance(condition, variables.SymNodeVariable) + and condition.evaluate_expr() + ): + return ConstantVariable(None) + + @register(SDPAParams) + def handle_sdpa_params(self, tx: "InstructionTranslator", *args, **kwargs): + return wrap_fx_proxy( + tx, + proxy=tx.output.create_proxy( + "call_function", + torch._C._SDPAParams, + *proxy_args_kwargs(args, kwargs), + ), + param_vars=args, + ) + + if DistributedVariable.is_available(): + from torch.distributed.distributed_c10d import ( + _get_group_size_by_name, + _get_group_tag, + _rank_not_in_group, + _resolve_group_name_by_ranks_and_tag, + get_process_group_ranks, + ) + from torch.distributed.tensor import DTensor + + @register( + _get_group_size_by_name, + _get_group_tag, + _rank_not_in_group, + get_process_group_ranks, + _resolve_group_name_by_ranks_and_tag, + ) + def handle_constant_processgroup_functions( + self, tx: "InstructionTranslator", *args + ): + # because the input is a "ProcessGroupVariable", we'll be guarding on its + # ID_MATCH based on how it was constructed. + + # We desugar it at trace-time into ranks by directly calling util + # bake the result into the trace + if len(args) == 1: + # group or group name + assert isinstance(args[0], (ProcessGroupVariable, ConstantVariable)) + elif len(args) == 2: + # ranks + tag + assert isinstance(args[0], ListVariable) and isinstance( + args[1], ConstantVariable + ) + else: + raise AssertionError( + f"Invalid group value ({args}) for constant pg " + f"function {self.value}" + ) + args_as_value = [arg.as_python_constant() for arg in args] + invocation_result = self.value(*args_as_value) + + # Note - while we *could* cook up sources around invocations, like a FunctionSource + # the space of invoking functions in the middle of the guard chain is very iffy. As such, + # guard propagation via options is the best we can do. + return SourcelessBuilder.create(tx, invocation_result) + + @register(DTensor.from_local) + def handle_from_local(self, tx: "InstructionTranslator", *args, **kwargs): + # rewrite non-primitive args/kwargs to be included in the on-the-fly prim function + # and rewrite args to have only proxyable args, then insert call_function + args_as_value = [x.as_python_constant() for x in args[1:]] + kwargs_as_value = { + k: v.as_python_constant() + for k, v in kwargs.items() + if k not in ["shape", "stride"] + } + kwargs_to_be_proxied = { + k: kwargs[k] for k in ["shape", "stride"] if k in kwargs + } + + def fn_with_prim_types(x, shape=None, stride=None): + return self.value( + x, *args_as_value, **kwargs_as_value, shape=shape, stride=stride + ) + + # attach the same function name for better debugging + fn_with_prim_types.__name__ = "prim " + self.value.__name__ + + return wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + fn_with_prim_types, + *proxy_args_kwargs( + [args[0]], + kwargs_to_be_proxied, + ), + ), + ) + + @register(torch.nested.nested_tensor) + def handle_nested_tensor( + self, + tx: "InstructionTranslator", + tensor_list=None, + *args, + layout=None, + **kwargs, + ): + from .lists import BaseListVariable + + if layout and layout.as_python_constant() == torch.strided: + unimplemented("torch.compile does not support strided NestedTensor") + if not isinstance(tensor_list, BaseListVariable): + unimplemented("nested_tensor with non-list input") + + @register(torch.nn.functional.one_hot) + def handle_one_hot(self, tx: "InstructionTranslator", *args, **kwargs): + if len(args) + len(kwargs) == 1 or ( + len(args) == 2 + and args[1].is_python_constant() + and args[1].as_python_constant() == -1 + ): + unimplemented( + "torch.nn.functional.one_hot with data-dependent output shape" + ) + + @register(torch.fx.experimental.symbolic_shapes.guard_size_oblivious) + def handle_guard_size_oblivious(self, tx: "InstructionTranslator", expr): + if isinstance(expr, SymNodeVariable): + # TODO: this probably should be folded somewhere else but I'm not sure where + # TODO: some of the other symbolic_shapes special tools can also get this treatment too + return variables.ConstantVariable.create( + torch.fx.experimental.symbolic_shapes.guard_size_oblivious( + expr.sym_num + ) + ) + elif isinstance(expr, ConstantVariable): + return expr + + @register(torch._C._autograd._unsafe_set_version_counter) + def handle_unsafe_set_version_counter( + self, tx: "InstructionTranslator", *args, **kwargs + ): + from ..tensor_version_op import _unsafe_set_version_counter + + return TorchInGraphFunctionVariable( + _unsafe_set_version_counter + ).call_function(tx, [*args], kwargs) + + @register(torch.tensor) + def handle_torch_tensor(self, tx: "InstructionTranslator", *args, **kwargs): + def check_any_unspec(x): + # NB: This includes UnspecializedPythonVariable + if isinstance(x, (TensorVariable, SymNodeVariable)): + return True + elif isinstance(x, (ListVariable, TupleVariable)): + return any(check_any_unspec(y) for y in x.items) + # TODO: there maybe other recursive structures you need to + # check + else: + return False + + data_arg = None + if args: + data_arg = args[0] + elif "data" in kwargs: + data_arg = kwargs["data"] + + # NB: OK to pass torch.tensor(tensor), this will trace fine + if not isinstance(data_arg, TensorVariable) and check_any_unspec(data_arg): + # This is slower and less canonical, so only use it if we + # have to + return TorchInGraphFunctionVariable(torch._refs.tensor).call_function( + tx, [*args], kwargs + ) + + @register(torch._C._pop_torch_function_stack) + def handle_pop_torch_function( + self, tx: "InstructionTranslator", *args, **kwargs + ): + assert not args and not kwargs + if not tx.symbolic_torch_function_mode_stack: + raise unimplemented("Popping from an empty torch function mode stack") + TorchFunctionModeStackVariable.register_mutation(tx) + return tx.symbolic_torch_function_mode_stack.pop() + + @register(torch._C._push_on_torch_function_stack) + def handle_push_torch_function( + self, tx: "InstructionTranslator", *args, **kwargs + ): + assert len(args) == 1 and not kwargs + TorchFunctionModeStackVariable.register_mutation(tx) + tx.symbolic_torch_function_mode_stack.append(args[0]) + return ConstantVariable.create(None) + + @register(torch._C._len_torch_function_stack) + def handle_len_torch_function( + self, tx: "InstructionTranslator", *args, **kwargs + ): + assert not args and not kwargs + return ConstantVariable.create(len(tx.symbolic_torch_function_mode_stack)) + + @register(torch.set_default_device) + def handle_set_default_device( + self, tx: "InstructionTranslator", *args, **kwargs + ): + # Today this is inserted in the graph, once TF mode + # handling is complete, we can trace the device context + # like any other TF mode and remove this special handling + # Insert the TF mode representing the device context at + # the bottom of the stack to match the eager semantics + # Running the graph will ensure that the DeviceContext mode is + # at the correct position in the stack + TorchFunctionModeStackVariable.register_mutation(tx) + if args[0].is_python_constant() and args[0].as_python_constant() is None: + TorchFunctionModeStackVariable.clear_default_device(tx) + else: + TorchFunctionModeStackVariable.register_device_context_insertion(tx) + + return None + + return handlers + + def call_function( + self, + tx: "InstructionTranslator", + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + from . import ConstantVariable, SymNodeVariable, TensorVariable + from .builder import wrap_fx_proxy + + if self.can_constant_fold_through() and check_unspec_or_constant_args( + args, kwargs + ): + # constant fold + return ConstantVariable.create( + self.as_python_constant()( + *[x.as_python_constant() for x in args], + **{k: v.as_python_constant() for k, v in kwargs.items()}, + ), + ) + + special_handler = self._get_handlers().get(self.value) + if special_handler: + result = special_handler(self, tx, *args, **kwargs) + if result: + return result + + if can_dispatch_torch_function(tx, args, kwargs): + return dispatch_torch_function(tx, self, args, kwargs) + else: + any_symints_or_symfloats = any(isinstance(x, SymNodeVariable) for x in args) + + all_ints_or_floats = all( + isinstance(x, (variables.ConstantVariable, variables.SymNodeVariable)) + for x in args + ) + if ( + getattr(self.value, "__module__", "") == "torch" + and self.value.__name__ in bin_ops + and any_symints_or_symfloats + and all_ints_or_floats + ): + msg = f"""\ +Calling {str(self.value)} on only torch.SymInt arguments is not yet supported. +To support this behavior, we need to allow const-propping tensors that store symint data. +For now, dynamo will explicitly graph break when it encounters user code with this behavior. +""" + log.warning(msg) + unimplemented(msg) + + # TODO(voz): Replace w/ dynamic shape rewrite table. + # Ideally, we would be able to do this at ctor time, but alas we need a combination + # of value + args to determine this. + fn_ = self.value + if any_symints_or_symfloats: + torch_sym_op = f"_sym_{self.value.__name__}" + if getattr(self.value, "__module__", None) == "math" and hasattr( + torch, torch_sym_op + ): + fn_ = getattr(torch, torch_sym_op) + + fake_out_shape = None + if "out" in kwargs and isinstance(kwargs["out"], variables.TensorVariable): + # Calling fake tensor propagation can mutate the out= tensor in + # tx.output.tracked_fakes. tracked_fakes are used to apply + # symbolic_shape guards. Mutating them destroys the information + # prior to tracing, which is essential for creating right + # guards. So save the shape now, and check later if it has + # changed. If it has, graph break. + fake_out_shape = kwargs["out"].proxy.node.meta["example_value"].shape + + tensor_variable = wrap_fx_proxy( + tx=tx, + proxy=tx.output.create_proxy( + "call_function", + fn_, + *proxy_args_kwargs(args, kwargs), + ), + ) + + if ( + isinstance(tensor_variable, TensorVariable) + and "requires_grad" in kwargs + and kwargs["requires_grad"].as_python_constant() + ): + unimplemented( + """factory functions that return tensors that require grad are not supported. +Either create the tensor outside the compiled region, or do not set the tensor to require_grad""" + ) + + if "out" in kwargs and not ( + isinstance(kwargs["out"], variables.ConstantVariable) + and kwargs["out"].as_python_constant() is None + ): + # out variants of torch operators like torch.sort and + # torch.sigmoid mutate the tensors in the out field. Track such + # tensors and rewrite the symbolic locals. + if isinstance(tensor_variable, TupleVariable): + assert isinstance(kwargs["out"], (TupleVariable, ListVariable)) + output_tensor_names = [ + tx.find_symbolic_locals_name(x) for x in kwargs["out"].items + ] + for idx, name in enumerate(output_tensor_names): + if name in tx.symbolic_locals: + tx.symbolic_locals[name] = tensor_variable.items[idx] + for out_tensor, result_tensor in zip( + kwargs["out"].items, tensor_variable.items + ): + if ( + out_tensor.source + and out_tensor in tx.output.graphargs + and isinstance(out_tensor, variables.TensorVariable) + and isinstance(result_tensor, variables.TensorVariable) + and out_tensor.size != result_tensor.size + ): + # It's hard to get out variants with resizing on graph inputs work + # properly across dynamo/aot/inductor, just fall back. + unimplemented("out variants with resizing on graph inputs") + elif isinstance(tensor_variable, TensorVariable): + assert isinstance(kwargs["out"], TensorVariable) + assert "example_value" in kwargs["out"].proxy.node.meta + fake_tensor = tensor_variable.proxy.node.meta["example_value"] + fake_out = kwargs["out"].proxy.node.meta["example_value"] + if ( + kwargs["out"].source + and kwargs["out"] in tx.output.graphargs + and fake_out_shape != fake_tensor.shape + ): + # It's hard to get out variants with resizing on graph inputs work + # properly across dynamo/aot/inductor, just fall back. + unimplemented("out variants with resizing on graph inputs") + if not torch._prims_common.is_contiguous(fake_out): + # It's difficult to handle strides correctly in functionalization + # when calling an out= op with a non-contiguous out argument + unimplemented( + "out= op was called where output tensor was non-contiguous" + ) + name = tx.find_symbolic_locals_name(kwargs["out"]) + if name in tx.symbolic_locals: + tx.symbolic_locals[name] = tensor_variable + elif ( + isinstance(tensor_variable, ConstantVariable) + and tensor_variable.value is None + ): + # Handle out-variant custom ops that return None. + if isinstance(kwargs["out"], TensorVariable): + assert "example_value" in kwargs["out"].proxy.node.meta + fake_out = kwargs["out"].proxy.node.meta["example_value"] + if not torch._prims_common.is_contiguous(fake_out): + # It's difficult to handle strides correctly in functionalization + # when calling an out= op with a non-contiguous out argument + unimplemented( + "out= op was called where output tensor was non-contiguous" + ) + elif isinstance(kwargs["out"], ListVariable): + for idx, x in enumerate(kwargs["out"].items): + assert "example_value" in x.proxy.node.meta # type: ignore[attr-defined] + fake_out = x.proxy.node.meta["example_value"] # type: ignore[attr-defined] + if not torch._prims_common.is_contiguous(fake_out): + # It's difficult to handle strides correctly in functionalization + # when calling an out= op with a non-contiguous out argument + unimplemented( + "out= op was called where some of the output tensors were non-contiguous" + ) + else: + unimplemented(f"out variant of {type(kwargs['out'])}") + + return tensor_variable + + def _call_ntuple(self, tx: "InstructionTranslator", args, kwargs): + """inline behavior of torch.nn.modules.utils._ntuple""" + if self.value is torch.nn.modules.utils._ntuple: + count = args[0].as_python_constant() + else: + count = self.value.__closure__[0].cell_contents + assert isinstance(count, int) + assert not kwargs + + def handle_ntuple(value): + if value.has_unpack_var_sequence(tx): + return variables.TupleVariable( + list(value.unpack_var_sequence(tx)), + ) + elif value.is_python_constant(): + # constant prop through it + return variables.ConstantVariable.create( + torch.nn.modules.utils._ntuple(count)(value.as_python_constant()), + ) + else: + unimplemented(f"torch.nn.modules.utils._ntuple({value})") + + if self.value is torch.nn.modules.utils._ntuple: + return variables.LambdaVariable(handle_ntuple) + else: + return handle_ntuple(args[0]) + + @classmethod + def call_nn_parameter(cls, tx, data=None, requires_grad=True): + """A call to torch.nn.Parameter() gets lifted to before the graph""" + if tx.export: + unimplemented("nn parameter construction not supported with export") + + if isinstance(requires_grad, variables.VariableTracker): + try: + requires_grad = requires_grad.as_python_constant() + except NotImplementedError: + unimplemented("Parameter(requires_grad=...) not constant") + + if not isinstance(data, variables.TensorVariable): + unimplemented(f"Parameter(data={data}) not implemented") + + # this results in cleaner graphs, but only works for inputs + if data.source: + return cls._nn_param_via_prefix_insert(tx, data, requires_grad) + + if is_traceable_wrapper_subclass_type(data.class_type): + unimplemented("Parameter constructor with tensor subclass NYI") + + if not can_convert_to_tracable_parameter(): + unimplemented("Workaround for issues with nn_parameter construction") + + try: + shape = tuple(data.var_getattr(tx, "shape").as_python_constant()) + dtype = data.var_getattr(tx, "dtype").as_python_constant() + device = data.var_getattr(tx, "device").as_python_constant() + except NotImplementedError as e: + unimplemented(f"Parameter not python_constant: {e}") + + placeholder = tx.output.synthetic_graph_input( + new_parameter_placeholder, [shape, dtype, device, requires_grad] + ) + if data.requires_grad: + data = data.call_method(tx, "detach", [], {}) + + from .builder import wrap_fx_proxy + + result = wrap_fx_proxy( + tx, + tx.output.create_proxy( + "call_function", + tracable_create_parameter, + (data.as_proxy(), placeholder.as_proxy()), + {}, + ), + ) + assert isinstance(result, variables.TensorVariable) + result.class_type = torch.nn.Parameter + + # TODO(jansel/bdhirsh) - There is some issue with + # tracable_create_paramter. It does not seem to use the right + # grad_enabled. Since this is parameter, we can just override the + # has_grad_fn field to False to workaround the issue. + result.has_grad_fn = False + + # In reconstruct() should use the original parameter. The one returned by the graph will be an alias. + result.source = placeholder.source + + # TODO(jansel): if the new param falls out of scope, currently it won't get freed until + # the end of the graph. We should fix this. + return result + + @staticmethod + def _nn_param_via_prefix_insert(tx: "InstructionTranslator", data, requires_grad): + # Alternate version if we have a .source + from .builder import VariableBuilder + + varname = tx.output.new_var() + + # construct the nn.Parmeter before the graph save it to varname + cg = PyCodegen(tx) + cg.add_push_null(lambda: cg.load_import_from("torch.nn", "Parameter")) + cg(data.source) + cg(variables.ConstantVariable(requires_grad)) + cg.call_function(2, False) + cg.store(varname) + tx.output.pregraph_bytecode.extend(cg.get_instructions()) + + data_node = data.as_proxy().node + if data_node.op not in ("placeholder", "get_attr"): + unimplemented( + "Unexpected type of data placeholder op for parameter construction" + ) + + # add the newly constructed nn.Parameter as a graph input + source = SyntheticLocalSource(varname) + example_value = torch.nn.Parameter( + tx.output.example_value_from_input_node(data.as_proxy().node) + ) + result = VariableBuilder(tx, source)(example_value) + # No need to guard on this since we already guarded on `data`. + # These guards would fail since varname doesn't exist until after the function starts + TracingContext.get().guards_context.dynamo_guards.remove_guards_with_source( + source + ) + return result diff --git a/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/torch_function.py b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/torch_function.py new file mode 100644 index 0000000000000000000000000000000000000000..4b3188507fe4b51faa22946066b35ed89e2d54a3 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_dynamo/variables/torch_function.py @@ -0,0 +1,389 @@ +# mypy: ignore-errors + +import inspect +from typing import Dict, List, TYPE_CHECKING + +import torch.utils._pytree as pytree +from torch._guards import Source +from torch.overrides import _get_overloaded_args, get_default_nowrap_functions +from torch.utils._device import DeviceContext + +from ..exc import unimplemented +from ..guards import GuardBuilder, install_guard +from ..source import AttrSource, GlobalSource, TorchFunctionModeStackSource, TypeSource +from ..utils import get_safe_global_name, has_torch_function, is_tensor_base_attr_getter +from .base import VariableTracker +from .constant import ConstantVariable +from .ctx_manager import ContextWrappingVariable +from .lists import TupleVariable +from .tensor import TensorSubclassVariable, TensorVariable +from .user_defined import UserDefinedObjectVariable + + +if TYPE_CHECKING: + from torch._dynamo.symbolic_convert import InstructionTranslator + + +# [Note: __torch_function__] This feature is a prototype and has some rough edges (contact mlazos with issues): +# At a high level, a torch function tensor subclass is represented as a TensorWithTFOverrideVariable, which dispatches +# __torch_function__ on attribute accesses, method calls, and torch API calls. +# The following is not supported: +# - triggering __torch_function__ on tensor subclass non-tensor custom attributes +# - graph breaking on mutating guardable tensor properties within a __torch_function__ context, this can cause +# excessive recompiles in certain degenerate cases +# - Matching the exact eager behavior of *ignoring* __torch_function__ objects in non-tensor argument positions of Torch API calls + +# The following is supported: +# - static method impls of __torch_function__ on custom objects; this will trigger on torch API calls with the object as +# any argument +# - triggering __torch_function__ on torch API calls with tensor subclass arguments +# - __torch_function__ calls on base tensor attribute access and method calls for tensor subclass instances +# - matches the dispatch ordering behavior of eager __torch_function__ with subclass/object argumnents in any argument position + +# See https://docs.google.com/document/d/1WBxBSvW3NXhRp9ncmtokJloMLCtF4AYNhJaffvHe8Kw/edit#heading=h.vacn73lozd9w +# for more information on the design. + +# To enable subclass behavior, add your tensor subclass type to traceable_tensor_subclasses in dynamo/config.py + + +banned_attrs = [ + fn.__self__.__name__ + for fn in get_default_nowrap_functions() + if is_tensor_base_attr_getter(fn) +] + +# Today set default device is placed in the graph and guarded on separately +# so we should not trace through it. In the future we can trace it once +# mode tracing is implemented and not put in the graph, but this is more +# of a BE project and can be evaluated later +IGNORED_MODES = {DeviceContext} + + +class TorchFunctionModeStackVariable(VariableTracker): + """Fake VT to use as a dummy object, indicating the presence of torch function mode stack mutation""" + + # singleton value representing the global torch function mode stack + # singleton (it exists in C++) + stack_value_singleton = object() + + # offset is used to track if we have inserted/removed a + # device context which is always placed at the bottom of the stack + # if a device context is inserted, the graph will run this mutation + # so when we want to reconstruct any other modes on the stack + # their indices should be shifted right by 1 (+1) + # Conversely, if there was a device context on the stack, and the graph + # mutates the stack to remove that context (set default device to None) + # each of the indices of other modes should be shifted left by 1 (-1) + offset = 0 + + def __init__(self, source, symbolic_stack): + self.source = source + self.symbolic_stack = symbolic_stack + + @classmethod + def reset(cls): + cls.offset = 0 + + @classmethod + def register_mutation(cls, tx: "InstructionTranslator"): + if cls.stack_value_singleton not in tx.output.side_effects: + var = cls( + source=Source(), symbolic_stack=tx.symbolic_torch_function_mode_stack + ) + tx.output.side_effects.track_mutable(cls.stack_value_singleton, var) + tx.output.side_effects.mutation(var) + + @classmethod + def register_device_context_insertion(cls, tx: "InstructionTranslator"): + stack = tx.symbolic_torch_function_mode_stack + if stack and cls.is_device_context(stack[0]): + return + else: + cls.offset += 1 + tx.symbolic_torch_function_mode_stack.insert( + 0, + TorchFunctionModeVariable( + None, source=TorchFunctionModeStackSource(-cls.offset) + ), + ) + + @classmethod + def clear_default_device(cls, tx: "InstructionTranslator"): + stack = tx.symbolic_torch_function_mode_stack + if stack and cls.is_device_context(stack[0]): + stack.popleft() + cls.offset -= 1 + + @staticmethod + def is_device_context(var): + return isinstance(var.value, DeviceContext) or var.value is None + + @classmethod + def get_mode_index(cls, ind): + return ind + cls.offset + + +class TorchFunctionModeVariable(ContextWrappingVariable): + def __init__(self, value, **kwargs): + super().__init__(value, **kwargs) + self.value = value + + @staticmethod + def get_global_mangled_name(tx, val): + return get_safe_global_name( + tx, f"__torch_function_mode_{val.__class__.__name__}", val + ) + + def reconstruct(self, codegen): + # We don't support locally created torch function modes yet + assert self.source + self.source.reconstruct(codegen) + + def _call_func(self, tx, values): + unimplemented("torch function mode context manager is not supported yet") + + +def _get_all_args(args, kwargs): + return _flatten_vts(pytree.arg_tree_leaves(*args, **kwargs)) + + +def _flatten_vts(vts): + from collections import deque + + from .dicts import ConstDictVariable + from .lazy import LazyVariableTracker + from .lists import ListVariable + + vts = deque(vts) + output = [] + + while vts: + vt = vts.pop() + LazyVariableTracker.realize_all(vt) + if isinstance(vt, ListVariable): + vts.extend(vt.items) + elif isinstance(vt, ConstDictVariable): + vts.extend(vt.items.values()) + else: + output.append(vt) + + return output + + +def _get_subclass_type(var): + assert isinstance(var, (TensorWithTFOverrideVariable, UserDefinedObjectVariable)) + return var.python_type() + + +def _get_subclass_type_var(tx: "InstructionTranslator", var): + assert isinstance(var, (TensorWithTFOverrideVariable, UserDefinedObjectVariable)) + if isinstance(var, TensorWithTFOverrideVariable): + return var.class_type_var(tx) + elif isinstance(var, UserDefinedObjectVariable): + from .builder import SourcelessBuilder, VariableBuilder + + if var.source: + return VariableBuilder(tx, TypeSource(var.source))(var.python_type()) + else: + return SourcelessBuilder.create(tx, var.python_type()) + + +def _is_attr_overidden(tx: "InstructionTranslator", var, name): + import torch + + overridden = False + try: + attr_val = inspect.getattr_static(var.python_type(), name) + overridden |= attr_val != getattr(torch.Tensor, name) + except AttributeError: + pass + + return overridden + + +def call_torch_function( + tx, torch_function_type, torch_function_var, fn, types, args, kwargs +): + from .builder import SourcelessBuilder + + # signature: + # def __torch_function__(cls, func, types, args=(), kwargs=None): + tf_args = ( + torch_function_type, + fn, + types, + SourcelessBuilder.create(tx, tuple(args)), + SourcelessBuilder.create(tx, kwargs), + ) + return tx.inline_user_function_return(torch_function_var, tf_args, {}) + + +def build_torch_function_fn(tx: "InstructionTranslator", value, source): + from .builder import SourcelessBuilder, VariableBuilder + + if source: + return VariableBuilder( + tx, + AttrSource(AttrSource(source, "__torch_function__"), "__func__"), + )(value.__torch_function__.__func__) + else: + return SourcelessBuilder.create(tx, value.__torch_function__.__func__) + + +def can_dispatch_torch_function(tx: "InstructionTranslator", args, kwargs): + return tx.output.torch_function_enabled and any( + has_torch_function(arg) for arg in _get_all_args(args, kwargs) + ) + + +def dispatch_torch_function(tx: "InstructionTranslator", fn, args, kwargs): + """Gathers all args that are TensorWithTFOverrideVariable and dispatches based on the ordering in _get_overloaded_args""" + + all_args = _get_all_args(args, kwargs) + overloaded_args = _get_overloaded_args( + [arg for arg in all_args if has_torch_function(arg)], + _get_subclass_type, + ) + + for arg in overloaded_args: + res = arg.call_torch_function( + tx, + fn, + TupleVariable([_get_subclass_type_var(tx, arg) for arg in overloaded_args]), + args, + kwargs, + ) + + if not (isinstance(res, ConstantVariable) and res.value is NotImplemented): + return res + + unimplemented( + f"All __torch_function__ overrides for call {fn} with args {args} and kwargs {kwargs} returned NotImplemented" + ) + + +class TensorWithTFOverrideVariable(TensorVariable): + """ + Represents a tensor subclass instance with a __torch_function__ override. + """ + + def __init__(self, *args, **kwargs) -> None: + self.torch_function_fn = kwargs.pop("torch_function_fn") + super().__init__(*args, **kwargs) + + @classmethod + def from_tensor_var(cls, tx, tensor_var, class_type, torch_function_fn): + import torch + + kwargs = dict(tensor_var.__dict__) + assert ( + kwargs.pop("class_type") is torch.Tensor + ), "invalid class type in TensorWithTFOverrideVariable.from_tensor_var" + var = cls(torch_function_fn=torch_function_fn, class_type=class_type, **kwargs) + var.install_global(tx) + return var + + def install_global(self, tx): + # stash the subclass type to rewrap an output tensor if needed + # this is needed because the actual type needs to be available + # each time the compiled artifact is run and outputs a wrapped tensor. + if self.global_mangled_class_name(tx) not in tx.output.global_scope: + # Safe because global_mangled_class_name figures it out + tx.output.install_global_unsafe( + self.global_mangled_class_name(tx), self.class_type + ) + + def python_type(self): + return self.class_type + + def class_type_var(self, tx): + return TensorSubclassVariable( + self.class_type, source=GlobalSource(self.global_mangled_class_name(tx)) + ) + + def global_mangled_class_name(self, tx): + return get_safe_global_name( + tx, f"__subclass_{self.class_type.__name__}", self.class_type + ) + + def var_getattr(self, tx: "InstructionTranslator", name): + # [Note: __torch_function__] We currently only support attributes that are defined on + # base tensors, custom attribute accesses will graph break. + import torch + + from .builder import SourcelessBuilder + + if name in banned_attrs: + unimplemented( + f"Accessing {name} on a tensor subclass with a __torch_function__ override is not supported" + ) + + if _is_attr_overidden(tx, self, name): + unimplemented( + f"Accessing overridden method/attribute {name} on a tensor" + " subclass with a __torch_function__ override is not supported" + ) + + if tx.output.torch_function_enabled and hasattr(torch.Tensor, name): + if self.source: + install_guard( + AttrSource(AttrSource(self.source, "__class__"), name).make_guard( + GuardBuilder.FUNCTION_MATCH + ) + ) + get_fn = SourcelessBuilder.create(tx, getattr(torch.Tensor, name).__get__) + + return self.call_torch_function( + tx, + get_fn, + TupleVariable([self.class_type_var(tx)]), + [self], + {}, + ) + else: + return super().var_getattr(tx, name) + + def call_torch_function(self, tx: "InstructionTranslator", fn, types, args, kwargs): + return call_torch_function( + tx, + self.class_type_var(tx), + self.torch_function_fn, + fn, + types, + args, + kwargs, + ) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + # This code block implements inlining the __torch_function__ override + # of `call_method`. + if tx.output.torch_function_enabled: + import torch + + from .builder import SourcelessBuilder, VariableBuilder + + if _is_attr_overidden(tx, self, name): + unimplemented( + f"Calling overridden method {name} on a tensor" + " subclass with a __torch_function__ override is not supported" + ) + + # [Note: __torch_function__] Currently we only support methods that are defined on tensor + # we will graph break in other cases this will need a bigger overhaul of extracting methods/comparing them for equality + # We've established with the above check that the method is not overridden, so we guard that the method is the same + # as the impl defined on tensor and retrieve it + if self.source: + func_var = VariableBuilder( + tx, AttrSource(AttrSource(self.source, "__class__"), name) + )(inspect.getattr_static(self.python_type(), name)) + else: + func_var = SourcelessBuilder.create(tx, getattr(torch.Tensor, name)) + return dispatch_torch_function(tx, func_var, [self] + args, kwargs) + else: + return super().call_method(tx, name, args, kwargs) diff --git a/pllava/lib/python3.10/site-packages/torch/_subclasses/__init__.py b/pllava/lib/python3.10/site-packages/torch/_subclasses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cdc42f39cbddaf5bdc919cef88d5f049fdba2634 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/_subclasses/__init__.py @@ -0,0 +1,17 @@ +import torch +from torch._subclasses.fake_tensor import ( + DynamicOutputShapeException, + FakeTensor, + FakeTensorMode, + UnsupportedFakeTensorException, +) +from torch._subclasses.fake_utils import CrossRefFakeMode + + +__all__ = [ + "FakeTensor", + "FakeTensorMode", + "UnsupportedFakeTensorException", + "DynamicOutputShapeException", + "CrossRefFakeMode", +] diff --git a/pllava/lib/python3.10/site-packages/transformers/kernels/deformable_detr/cuda/ms_deform_attn_cuda.cuh b/pllava/lib/python3.10/site-packages/transformers/kernels/deformable_detr/cuda/ms_deform_attn_cuda.cuh new file mode 100644 index 0000000000000000000000000000000000000000..34f8ae9cb77bbaa8cb4dd25e0cb86632db9ad05d --- /dev/null +++ b/pllava/lib/python3.10/site-packages/transformers/kernels/deformable_detr/cuda/ms_deform_attn_cuda.cuh @@ -0,0 +1,1467 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#include + +#include +#include + +#include +#include +#include + +#include +#include + +#include + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ + i < (n); \ + i += blockDim.x * gridDim.x) + + +at::Tensor ms_deform_attn_cuda_forward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const int im2col_step) +{ + AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); + AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous"); + AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous"); + AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous"); + AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous"); + + AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); + AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); + AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor"); + AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); + AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); + + const int batch = value.size(0); + const int spatial_size = value.size(1); + const int num_heads = value.size(2); + const int channels = value.size(3); + + const int num_levels = spatial_shapes.size(0); + + const int num_query = sampling_loc.size(1); + const int num_point = sampling_loc.size(4); + + const int im2col_step_ = std::min(batch, im2col_step); + + AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); + + auto output = at::zeros({batch, num_query, num_heads, channels}, value.options()); + + const int batch_n = im2col_step_; + auto output_n = output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels}); + auto per_value_size = spatial_size * num_heads * channels; + auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; + auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; + for (int n = 0; n < batch/im2col_step_; ++n) + { + auto columns = output_n.select(0, n); + AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_forward_cuda", ([&] { + ms_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(), + value.data() + n * im2col_step_ * per_value_size, + spatial_shapes.data(), + level_start_index.data(), + sampling_loc.data() + n * im2col_step_ * per_sample_loc_size, + attn_weight.data() + n * im2col_step_ * per_attn_weight_size, + batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, + columns.data()); + + })); + } + + output = output.view({batch, num_query, num_heads*channels}); + + return output; +} + + +std::vector ms_deform_attn_cuda_backward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const at::Tensor &grad_output, + const int im2col_step) +{ + + AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); + AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous"); + AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous"); + AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous"); + AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous"); + AT_ASSERTM(grad_output.is_contiguous(), "grad_output tensor has to be contiguous"); + + AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); + AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); + AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor"); + AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); + AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); + AT_ASSERTM(grad_output.type().is_cuda(), "grad_output must be a CUDA tensor"); + + const int batch = value.size(0); + const int spatial_size = value.size(1); + const int num_heads = value.size(2); + const int channels = value.size(3); + + const int num_levels = spatial_shapes.size(0); + + const int num_query = sampling_loc.size(1); + const int num_point = sampling_loc.size(4); + + const int im2col_step_ = std::min(batch, im2col_step); + + AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); + + auto grad_value = at::zeros_like(value); + auto grad_sampling_loc = at::zeros_like(sampling_loc); + auto grad_attn_weight = at::zeros_like(attn_weight); + + const int batch_n = im2col_step_; + auto per_value_size = spatial_size * num_heads * channels; + auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; + auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; + auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels}); + + for (int n = 0; n < batch/im2col_step_; ++n) + { + auto grad_output_g = grad_output_n.select(0, n); + AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_backward_cuda", ([&] { + ms_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(), + grad_output_g.data(), + value.data() + n * im2col_step_ * per_value_size, + spatial_shapes.data(), + level_start_index.data(), + sampling_loc.data() + n * im2col_step_ * per_sample_loc_size, + attn_weight.data() + n * im2col_step_ * per_attn_weight_size, + batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, + grad_value.data() + n * im2col_step_ * per_value_size, + grad_sampling_loc.data() + n * im2col_step_ * per_sample_loc_size, + grad_attn_weight.data() + n * im2col_step_ * per_attn_weight_size); + + })); + } + + return { + grad_value, grad_sampling_loc, grad_attn_weight + }; +} + +const int CUDA_NUM_THREADS = 1024; +inline int GET_BLOCKS(const int N, const int num_threads) +{ + return (N + num_threads - 1) / num_threads; +} + + +template +__device__ scalar_t ms_deform_attn_im2col_bilinear(const scalar_t* &bottom_data, + const int &height, const int &width, const int &nheads, const int &channels, + const scalar_t &h, const scalar_t &w, const int &m, const int &c) +{ + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + } + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + + +template +__device__ void ms_deform_attn_col2im_bilinear(const scalar_t* &bottom_data, + const int &height, const int &width, const int &nheads, const int &channels, + const scalar_t &h, const scalar_t &w, const int &m, const int &c, + const scalar_t &top_grad, + const scalar_t &attn_weight, + scalar_t* &grad_value, + scalar_t* grad_sampling_loc, + scalar_t* grad_attn_weight) +{ + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + const scalar_t top_grad_value = top_grad * attn_weight; + scalar_t grad_h_weight = 0, grad_w_weight = 0; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + grad_h_weight -= hw * v1; + grad_w_weight -= hh * v1; + atomicAdd(grad_value+ptr1, w1*top_grad_value); + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + grad_h_weight -= lw * v2; + grad_w_weight += hh * v2; + atomicAdd(grad_value+ptr2, w2*top_grad_value); + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + grad_h_weight += hw * v3; + grad_w_weight -= lh * v3; + atomicAdd(grad_value+ptr3, w3*top_grad_value); + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + grad_h_weight += lw * v4; + grad_w_weight += lh * v4; + atomicAdd(grad_value+ptr4, w4*top_grad_value); + } + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + *grad_attn_weight = top_grad * val; + *grad_sampling_loc = width * grad_w_weight * top_grad_value; + *(grad_sampling_loc + 1) = height * grad_h_weight * top_grad_value; +} + + +template +__device__ void ms_deform_attn_col2im_bilinear_gm(const scalar_t* &bottom_data, + const int &height, const int &width, const int &nheads, const int &channels, + const scalar_t &h, const scalar_t &w, const int &m, const int &c, + const scalar_t &top_grad, + const scalar_t &attn_weight, + scalar_t* &grad_value, + scalar_t* grad_sampling_loc, + scalar_t* grad_attn_weight) +{ + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + const scalar_t top_grad_value = top_grad * attn_weight; + scalar_t grad_h_weight = 0, grad_w_weight = 0; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + grad_h_weight -= hw * v1; + grad_w_weight -= hh * v1; + atomicAdd(grad_value+ptr1, w1*top_grad_value); + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + grad_h_weight -= lw * v2; + grad_w_weight += hh * v2; + atomicAdd(grad_value+ptr2, w2*top_grad_value); + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + grad_h_weight += hw * v3; + grad_w_weight -= lh * v3; + atomicAdd(grad_value+ptr3, w3*top_grad_value); + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + grad_h_weight += lw * v4; + grad_w_weight += lh * v4; + atomicAdd(grad_value+ptr4, w4*top_grad_value); + } + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + atomicAdd(grad_attn_weight, top_grad * val); + atomicAdd(grad_sampling_loc, width * grad_w_weight * top_grad_value); + atomicAdd(grad_sampling_loc + 1, height * grad_h_weight * top_grad_value); +} + + +template +__global__ void ms_deformable_im2col_gpu_kernel(const int n, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *data_col) +{ + CUDA_KERNEL_LOOP(index, n) + { + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + scalar_t *data_col_ptr = data_col + index; + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + scalar_t col = 0; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const scalar_t *data_value_ptr = data_value + (data_value_ptr_init_offset + level_start_id * qid_stride); + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + col += ms_deform_attn_im2col_bilinear(data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col) * weight; + } + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + } + } + *data_col_ptr = col; + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2]; + __shared__ scalar_t cache_grad_attn_weight[blockSize]; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + if (tid == 0) + { + scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0]; + int sid=2; + for (unsigned int tid = 1; tid < blockSize; ++tid) + { + _grad_w += cache_grad_sampling_loc[sid]; + _grad_h += cache_grad_sampling_loc[sid + 1]; + _grad_a += cache_grad_attn_weight[tid]; + sid += 2; + } + + + *grad_sampling_loc = _grad_w; + *(grad_sampling_loc + 1) = _grad_h; + *grad_attn_weight = _grad_a; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2]; + __shared__ scalar_t cache_grad_attn_weight[blockSize]; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s=blockSize/2; s>0; s>>=1) + { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1]; + } + __syncthreads(); + } + + if (tid == 0) + { + *grad_sampling_loc = cache_grad_sampling_loc[0]; + *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1]; + *grad_attn_weight = cache_grad_attn_weight[0]; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v1(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + extern __shared__ int _s[]; + scalar_t* cache_grad_sampling_loc = (scalar_t*)_s; + scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + if (tid == 0) + { + scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0]; + int sid=2; + for (unsigned int tid = 1; tid < blockDim.x; ++tid) + { + _grad_w += cache_grad_sampling_loc[sid]; + _grad_h += cache_grad_sampling_loc[sid + 1]; + _grad_a += cache_grad_attn_weight[tid]; + sid += 2; + } + + + *grad_sampling_loc = _grad_w; + *(grad_sampling_loc + 1) = _grad_h; + *grad_attn_weight = _grad_a; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + extern __shared__ int _s[]; + scalar_t* cache_grad_sampling_loc = (scalar_t*)_s; + scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1) + { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1]; + if (tid + (s << 1) < spre) + { + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)]; + } + } + __syncthreads(); + } + + if (tid == 0) + { + *grad_sampling_loc = cache_grad_sampling_loc[0]; + *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1]; + *grad_attn_weight = cache_grad_attn_weight[0]; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + extern __shared__ int _s[]; + scalar_t* cache_grad_sampling_loc = (scalar_t*)_s; + scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1) + { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1]; + if (tid + (s << 1) < spre) + { + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)]; + } + } + __syncthreads(); + } + + if (tid == 0) + { + atomicAdd(grad_sampling_loc, cache_grad_sampling_loc[0]); + atomicAdd(grad_sampling_loc + 1, cache_grad_sampling_loc[1]); + atomicAdd(grad_attn_weight, cache_grad_attn_weight[0]); + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +__global__ void ms_deformable_col2im_gpu_kernel_gm(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear_gm( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + grad_sampling_loc, grad_attn_weight); + } + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +void ms_deformable_im2col_cuda(cudaStream_t stream, + const scalar_t* data_value, + const int64_t* data_spatial_shapes, + const int64_t* data_level_start_index, + const scalar_t* data_sampling_loc, + const scalar_t* data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t* data_col) +{ + const int num_kernels = batch_size * num_query * num_heads * channels; + const int num_actual_kernels = batch_size * num_query * num_heads * channels; + const int num_threads = CUDA_NUM_THREADS; + ms_deformable_im2col_gpu_kernel + <<>>( + num_kernels, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, + batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, data_col); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in ms_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); + } + +} + +template +void ms_deformable_col2im_cuda(cudaStream_t stream, + const scalar_t* grad_col, + const scalar_t* data_value, + const int64_t * data_spatial_shapes, + const int64_t * data_level_start_index, + const scalar_t * data_sampling_loc, + const scalar_t * data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t* grad_value, + scalar_t* grad_sampling_loc, + scalar_t* grad_attn_weight) +{ + const int num_threads = (channels > CUDA_NUM_THREADS)?CUDA_NUM_THREADS:channels; + const int num_kernels = batch_size * num_query * num_heads * channels; + const int num_actual_kernels = batch_size * num_query * num_heads * channels; + if (channels > 1024) + { + if ((channels & 1023) == 0) + { + ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + else + { + ms_deformable_col2im_gpu_kernel_gm + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + } + else{ + switch(channels) + { + case 1: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 2: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 4: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 8: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 16: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 32: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 64: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 128: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 256: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 512: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 1024: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + default: + if (channels < 64) + { + ms_deformable_col2im_gpu_kernel_shm_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + else + { + ms_deformable_col2im_gpu_kernel_shm_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + } + } + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in ms_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); + } + +} diff --git a/pllava/lib/python3.10/site-packages/transformers/kernels/deformable_detr/cuda/ms_deform_attn_cuda.h b/pllava/lib/python3.10/site-packages/transformers/kernels/deformable_detr/cuda/ms_deform_attn_cuda.h new file mode 100644 index 0000000000000000000000000000000000000000..fbcf4543e66bb1162f42ce2ae57e1bac92243cb4 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/transformers/kernels/deformable_detr/cuda/ms_deform_attn_cuda.h @@ -0,0 +1,29 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#pragma once +#include + +at::Tensor ms_deform_attn_cuda_forward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const int im2col_step); + +std::vector ms_deform_attn_cuda_backward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const at::Tensor &grad_output, + const int im2col_step); diff --git a/pllava/lib/python3.10/site-packages/transformers/kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh b/pllava/lib/python3.10/site-packages/transformers/kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh new file mode 100644 index 0000000000000000000000000000000000000000..c0db0c88c9db2c09d7f601937ea0f6ac480913bf --- /dev/null +++ b/pllava/lib/python3.10/site-packages/transformers/kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh @@ -0,0 +1,1327 @@ +/*! +************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************** +* Modified from DCN (https://github.com/msracver/Deformable-ConvNets) +* Copyright (c) 2018 Microsoft +************************************************************************** +*/ + +#include +#include +#include + +#include +#include + +#include + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ + i < (n); \ + i += blockDim.x * gridDim.x) + +const int CUDA_NUM_THREADS = 1024; +inline int GET_BLOCKS(const int N, const int num_threads) +{ + return (N + num_threads - 1) / num_threads; +} + + +template +__device__ scalar_t ms_deform_attn_im2col_bilinear(const scalar_t* &bottom_data, + const int &height, const int &width, const int &nheads, const int &channels, + const scalar_t &h, const scalar_t &w, const int &m, const int &c) +{ + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + } + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + + +template +__device__ void ms_deform_attn_col2im_bilinear(const scalar_t* &bottom_data, + const int &height, const int &width, const int &nheads, const int &channels, + const scalar_t &h, const scalar_t &w, const int &m, const int &c, + const scalar_t &top_grad, + const scalar_t &attn_weight, + scalar_t* &grad_value, + scalar_t* grad_sampling_loc, + scalar_t* grad_attn_weight) +{ + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + const scalar_t top_grad_value = top_grad * attn_weight; + scalar_t grad_h_weight = 0, grad_w_weight = 0; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + grad_h_weight -= hw * v1; + grad_w_weight -= hh * v1; + atomicAdd(grad_value+ptr1, w1*top_grad_value); + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + grad_h_weight -= lw * v2; + grad_w_weight += hh * v2; + atomicAdd(grad_value+ptr2, w2*top_grad_value); + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + grad_h_weight += hw * v3; + grad_w_weight -= lh * v3; + atomicAdd(grad_value+ptr3, w3*top_grad_value); + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + grad_h_weight += lw * v4; + grad_w_weight += lh * v4; + atomicAdd(grad_value+ptr4, w4*top_grad_value); + } + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + *grad_attn_weight = top_grad * val; + *grad_sampling_loc = width * grad_w_weight * top_grad_value; + *(grad_sampling_loc + 1) = height * grad_h_weight * top_grad_value; +} + + +template +__device__ void ms_deform_attn_col2im_bilinear_gm(const scalar_t* &bottom_data, + const int &height, const int &width, const int &nheads, const int &channels, + const scalar_t &h, const scalar_t &w, const int &m, const int &c, + const scalar_t &top_grad, + const scalar_t &attn_weight, + scalar_t* &grad_value, + scalar_t* grad_sampling_loc, + scalar_t* grad_attn_weight) +{ + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + const scalar_t top_grad_value = top_grad * attn_weight; + scalar_t grad_h_weight = 0, grad_w_weight = 0; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + grad_h_weight -= hw * v1; + grad_w_weight -= hh * v1; + atomicAdd(grad_value+ptr1, w1*top_grad_value); + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + grad_h_weight -= lw * v2; + grad_w_weight += hh * v2; + atomicAdd(grad_value+ptr2, w2*top_grad_value); + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + grad_h_weight += hw * v3; + grad_w_weight -= lh * v3; + atomicAdd(grad_value+ptr3, w3*top_grad_value); + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + grad_h_weight += lw * v4; + grad_w_weight += lh * v4; + atomicAdd(grad_value+ptr4, w4*top_grad_value); + } + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + atomicAdd(grad_attn_weight, top_grad * val); + atomicAdd(grad_sampling_loc, width * grad_w_weight * top_grad_value); + atomicAdd(grad_sampling_loc + 1, height * grad_h_weight * top_grad_value); +} + + +template +__global__ void ms_deformable_im2col_gpu_kernel(const int n, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *data_col) +{ + CUDA_KERNEL_LOOP(index, n) + { + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + scalar_t *data_col_ptr = data_col + index; + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + scalar_t col = 0; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const scalar_t *data_value_ptr = data_value + (data_value_ptr_init_offset + level_start_id * qid_stride); + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + col += ms_deform_attn_im2col_bilinear(data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col) * weight; + } + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + } + } + *data_col_ptr = col; + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2]; + __shared__ scalar_t cache_grad_attn_weight[blockSize]; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + if (tid == 0) + { + scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0]; + int sid=2; + for (unsigned int tid = 1; tid < blockSize; ++tid) + { + _grad_w += cache_grad_sampling_loc[sid]; + _grad_h += cache_grad_sampling_loc[sid + 1]; + _grad_a += cache_grad_attn_weight[tid]; + sid += 2; + } + + + *grad_sampling_loc = _grad_w; + *(grad_sampling_loc + 1) = _grad_h; + *grad_attn_weight = _grad_a; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2]; + __shared__ scalar_t cache_grad_attn_weight[blockSize]; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s=blockSize/2; s>0; s>>=1) + { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1]; + } + __syncthreads(); + } + + if (tid == 0) + { + *grad_sampling_loc = cache_grad_sampling_loc[0]; + *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1]; + *grad_attn_weight = cache_grad_attn_weight[0]; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v1(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + extern __shared__ int _s[]; + scalar_t* cache_grad_sampling_loc = (scalar_t*)_s; + scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + if (tid == 0) + { + scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0]; + int sid=2; + for (unsigned int tid = 1; tid < blockDim.x; ++tid) + { + _grad_w += cache_grad_sampling_loc[sid]; + _grad_h += cache_grad_sampling_loc[sid + 1]; + _grad_a += cache_grad_attn_weight[tid]; + sid += 2; + } + + + *grad_sampling_loc = _grad_w; + *(grad_sampling_loc + 1) = _grad_h; + *grad_attn_weight = _grad_a; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + extern __shared__ int _s[]; + scalar_t* cache_grad_sampling_loc = (scalar_t*)_s; + scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1) + { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1]; + if (tid + (s << 1) < spre) + { + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)]; + } + } + __syncthreads(); + } + + if (tid == 0) + { + *grad_sampling_loc = cache_grad_sampling_loc[0]; + *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1]; + *grad_attn_weight = cache_grad_attn_weight[0]; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + extern __shared__ int _s[]; + scalar_t* cache_grad_sampling_loc = (scalar_t*)_s; + scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1) + { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1]; + if (tid + (s << 1) < spre) + { + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)]; + } + } + __syncthreads(); + } + + if (tid == 0) + { + atomicAdd(grad_sampling_loc, cache_grad_sampling_loc[0]); + atomicAdd(grad_sampling_loc + 1, cache_grad_sampling_loc[1]); + atomicAdd(grad_attn_weight, cache_grad_attn_weight[0]); + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +__global__ void ms_deformable_col2im_gpu_kernel_gm(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear_gm( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + grad_sampling_loc, grad_attn_weight); + } + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +void ms_deformable_im2col_cuda(cudaStream_t stream, + const scalar_t* data_value, + const int64_t* data_spatial_shapes, + const int64_t* data_level_start_index, + const scalar_t* data_sampling_loc, + const scalar_t* data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t* data_col) +{ + const int num_kernels = batch_size * num_query * num_heads * channels; + const int num_actual_kernels = batch_size * num_query * num_heads * channels; + const int num_threads = CUDA_NUM_THREADS; + ms_deformable_im2col_gpu_kernel + <<>>( + num_kernels, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, + batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, data_col); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in ms_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); + } + +} + +template +void ms_deformable_col2im_cuda(cudaStream_t stream, + const scalar_t* grad_col, + const scalar_t* data_value, + const int64_t * data_spatial_shapes, + const int64_t * data_level_start_index, + const scalar_t * data_sampling_loc, + const scalar_t * data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t* grad_value, + scalar_t* grad_sampling_loc, + scalar_t* grad_attn_weight) +{ + const int num_threads = (channels > CUDA_NUM_THREADS)?CUDA_NUM_THREADS:channels; + const int num_kernels = batch_size * num_query * num_heads * channels; + const int num_actual_kernels = batch_size * num_query * num_heads * channels; + if (channels > 1024) + { + if ((channels & 1023) == 0) + { + ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + else + { + ms_deformable_col2im_gpu_kernel_gm + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + } + else{ + switch(channels) + { + case 1: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 2: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 4: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 8: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 16: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 32: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 64: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 128: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 256: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 512: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 1024: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + default: + if (channels < 64) + { + ms_deformable_col2im_gpu_kernel_shm_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + else + { + ms_deformable_col2im_gpu_kernel_shm_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + } + } + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in ms_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); + } + +} diff --git a/pllava/lib/python3.10/site-packages/transformers/kernels/mra/cuda_launch.cu b/pllava/lib/python3.10/site-packages/transformers/kernels/mra/cuda_launch.cu new file mode 100644 index 0000000000000000000000000000000000000000..ba2a0cacfe614e75e06d2dde80dc77a6e8a4ec1a --- /dev/null +++ b/pllava/lib/python3.10/site-packages/transformers/kernels/mra/cuda_launch.cu @@ -0,0 +1,154 @@ +#include +#include +#include "cuda_launch.h" +#include "cuda_kernel.h" +#include + +////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////////////// + +std::vector index_max_kernel( + at::Tensor index_vals, // [batch_size, 32, num_block] + at::Tensor indices, // [batch_size, num_block], + int A_num_block, + int B_num_block +) { + int batch_size = indices.size(0); + int num_block = indices.size(1); + + at::Tensor max_vals = at::zeros({batch_size, A_num_block * 32}, index_vals.options()); + at::Tensor max_vals_scatter = at::zeros({batch_size, 32, num_block}, index_vals.options()); + + dim3 threads(256); + dim3 blocks(batch_size); + int shared_mem = A_num_block * 32 * sizeof(float); + + index_max_cuda_kernel<<>>( + index_vals.data_ptr(), + indices.data_ptr(), + max_vals.data_ptr(), + max_vals_scatter.data_ptr(), + batch_size, + A_num_block, + B_num_block, + num_block + ); + + return {max_vals, max_vals_scatter}; +} + +at::Tensor mm_to_sparse_kernel( + at::Tensor dense_A, // [batch_size, A_num_block, dim, 32] + at::Tensor dense_B, // [batch_size, B_num_block, dim, 32] + at::Tensor indices // [batch_size, num_block] +) { + int batch_size = dense_A.size(0); + int A_num_block = dense_A.size(1); + int B_num_block = dense_B.size(1); + int dim = dense_A.size(2); + int num_block = indices.size(1); + + at::Tensor sparse_C = at::zeros({batch_size, num_block, 32, 32}, dense_A.options()); + + dim3 threads(64, 4); + dim3 blocks(num_block / 4, batch_size); + + mm_to_sparse_cuda_kernel<<>>( + dense_A.data_ptr(), + dense_B.data_ptr(), + indices.data_ptr(), + sparse_C.data_ptr(), + batch_size, + A_num_block, + B_num_block, + dim, + num_block + ); + + return sparse_C; +} + +at::Tensor sparse_dense_mm_kernel( + at::Tensor sparse_A, // [batch_size, num_block, 32, 32] + at::Tensor indices, // [batch_size, num_block] + at::Tensor dense_B, // [batch_size, B_num_block, dim, 32] + int A_num_block +) { + int batch_size = sparse_A.size(0); + int num_block = sparse_A.size(1); + int B_num_block = dense_B.size(1); + int dim = dense_B.size(2); + + at::Tensor dense_C = at::zeros({batch_size, A_num_block, dim, 32}, dense_B.options()); + + dim3 threads(128, 2); + dim3 blocks(num_block / 2, batch_size); + + sparse_dense_mm_cuda_kernel<<>>( + sparse_A.data_ptr(), + indices.data_ptr(), + dense_B.data_ptr(), + dense_C.data_ptr(), + batch_size, + A_num_block, + B_num_block, + dim, + num_block + ); + + return dense_C; +} + +at::Tensor reduce_sum_kernel( + at::Tensor sparse_A, // [batch_size, num_block, 32, 32] + at::Tensor indices, // [batch_size, num_block] + int A_num_block, + int B_num_block +) { + int batch_size = sparse_A.size(0); + int num_block = sparse_A.size(1); + + at::Tensor dense_C = at::zeros({batch_size, A_num_block, 32}, sparse_A.options()); + + dim3 threads(32, 4); + dim3 blocks(num_block / 4, batch_size); + + reduce_sum_cuda_kernel<<>>( + sparse_A.data_ptr(), + indices.data_ptr(), + dense_C.data_ptr(), + batch_size, + A_num_block, + B_num_block, + num_block + ); + + return dense_C; +} + +at::Tensor scatter_kernel( + at::Tensor dense_A, // [batch_size, A_num_block, 32] + at::Tensor indices, // [batch_size, num_block] + int B_num_block +) { + int batch_size = dense_A.size(0); + int A_num_block = dense_A.size(1); + int num_block = indices.size(1); + + at::Tensor sparse_C = at::zeros({batch_size, num_block, 32, 32}, dense_A.options()); + + dim3 threads(32, 4); + dim3 blocks(num_block / 4, batch_size); + + scatter_cuda_kernel<<>>( + dense_A.data_ptr(), + indices.data_ptr(), + sparse_C.data_ptr(), + batch_size, + A_num_block, + B_num_block, + num_block + ); + + return sparse_C; +} diff --git a/pllava/lib/python3.10/site-packages/transformers/kernels/rwkv/wkv_cuda.cu b/pllava/lib/python3.10/site-packages/transformers/kernels/rwkv/wkv_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..571d5a8a8307e95aac689eb3c9333d1ad350c7de --- /dev/null +++ b/pllava/lib/python3.10/site-packages/transformers/kernels/rwkv/wkv_cuda.cu @@ -0,0 +1,187 @@ +#include +#include + +#define MIN_VALUE (-1e38) + +template +__global__ void kernel_forward( + const int B, const int T, const int C, const F *__restrict__ const _w, const F *__restrict__ const _u, + const F *__restrict__ const _k, const F *__restrict__ const _v, F *__restrict__ const _y +) { + const int idx = blockIdx.x * blockDim.x + threadIdx.x; + const int _b = idx / C; + const int _c = idx % C; + const int _offset = _b * T * C + _c; + + F u = _u[_c]; + F w = _w[_c]; + const F *__restrict__ const k = _k + _offset; + const F *__restrict__ const v = _v + _offset; + F *__restrict__ const y = _y + _offset; + + // aa and bb are running sums divided by exp(pp) (to avoid overflow) + F aa = 0, bb = 0, pp = MIN_VALUE; + for (int i = 0; i < T; i++) { + const int ii = i * C; + const F kk = k[ii]; + const F vv = v[ii]; + + F ww = u + kk; + F p = max(pp, ww); + F e1 = exp(pp - p); + F e2 = exp(ww - p); + y[ii] = (e1 * aa + e2 * vv) / (e1 * bb + e2); + + ww = w + pp; + p = max(ww, kk); + e1 = exp(ww - p); + e2 = exp(kk - p); + aa = e1 * aa + e2 * vv; + bb = e1 * bb + e2; + pp = p; + } +} + +template +__global__ void kernel_forward_with_state( + const int B, const int T, const int C, const F *__restrict__ const _w, const F *__restrict__ const _u, + const F *__restrict__ const _k, const F *__restrict__ const _v, F *__restrict__ const _y, F *__restrict__ const _s +) { + const int idx = blockIdx.x * blockDim.x + threadIdx.x; + const int _b = idx / C; + const int _c = idx % C; + const int _offset_s = _b * C * 3 + _c * 3; + const int _offset = _b * T * C + _c; + + F u = _u[_c]; + F w = _w[_c]; + const F *__restrict__ const k = _k + _offset; + const F *__restrict__ const v = _v + _offset; + F *__restrict__ const y = _y + _offset; + F *__restrict__ const s = _s + _offset_s; + + // aa and bb are running sums divided by exp(pp) (to avoid overflow) + F aa = s[0], bb = s[1], pp = s[2]; + for (int i = 0; i < T; i++) { + const int ii = i * C; + const F kk = k[ii]; + const F vv = v[ii]; + + F ww = u + kk; + F p = max(pp, ww); + F e1 = exp(pp - p); + F e2 = exp(ww - p); + y[ii] = (e1 * aa + e2 * vv) / (e1 * bb + e2); + + ww = w + pp; + p = max(ww, kk); + e1 = exp(ww - p); + e2 = exp(kk - p); + aa = e1 * aa + e2 * vv; + bb = e1 * bb + e2; + pp = p; + } + s[0] = aa; + s[1] = bb; + s[2] = pp; +} + +template +__global__ void kernel_backward( + const int B, const int T, const int C, const F *__restrict__ const _w, const F *__restrict__ const _u, + const F *__restrict__ const _k, const F *__restrict__ const _v, const F *__restrict__ const _y, + const F *__restrict__ const _gy, F *__restrict__ const _gw, F *__restrict__ const _gu, F *__restrict__ const _gk, + F *__restrict__ const _gv +) { + const int idx = blockIdx.x * blockDim.x + threadIdx.x; + const int _b = idx / C; + const int _c = idx % C; + const int _offset = _b * T * C + _c; + + F u = _u[_c]; + F w = _w[_c]; + const F *__restrict__ const k = _k + _offset; + const F *__restrict__ const v = _v + _offset; + const F *__restrict__ const y = _y + _offset; + const F *__restrict__ const gy = _gy + _offset; + F *__restrict__ const gk = _gk + _offset; + F *__restrict__ const gv = _gv + _offset; + + F q[Tmax], r[Tmax]; + + F gw = 0, gu = 0, aa = 0, bb = 0, ga = 0, gb = 0, pp = MIN_VALUE; + for (int i = 0; i < T; i++) { + const int ii = i * C; + const F kk = k[ii]; + const F vv = v[ii]; + const F yy = y[ii]; + + F ww = u + kk; + F p = max(pp, ww); + F e1 = exp(pp - p); + F e2 = exp(ww - p); + const F qq = gy[ii] / (e1 * bb + e2); + gw += (ga - gb * yy) * e1 * qq; + gu += (vv - yy) * e2 * qq; + q[i] = qq; + r[i] = ww - p; + + ww = w + pp; + p = max(ww, kk); + e1 = exp(ww - p); + e2 = exp(kk - p); + ga = e1 * (aa + ga); + gb = e1 * (bb + gb); + aa = e1 * aa + e2 * vv; + bb = e1 * bb + e2; + pp = p; + } + const int _offsetBC = _b * C + _c; + _gw[_offsetBC] = gw * _w[_c]; // multiply by w because of w -> -exp(w) in python forward() + _gu[_offsetBC] = gu; + + aa = 0, bb = 0, pp = MIN_VALUE; + for (int i = T - 1; i >= 0; i--) { + const int ii = i * C; + const F kk = k[ii]; + const F vv = v[ii]; + const F yy = y[ii]; + const F qq = q[i]; + const F rr = r[i]; + + F e1 = qq * exp(rr); + F e2 = exp(kk + pp); + gk[ii] = e1 * (vv - yy) + e2 * (aa * vv + bb); + gv[ii] = e1 + e2 * aa; + + const F ww = w + pp; + const F www = rr - u - kk; + const F p = max(ww, www); + e1 = exp(ww - p); + e2 = qq * exp(www - p); + aa = e1 * aa + e2; + bb = e1 * bb - e2 * yy; + pp = p; + } +} + +void cuda_forward(int B, int T, int C, float *w, float *u, float *k, float *v, float *y) { + dim3 threadsPerBlock( min(C, 32) ); // requires --maxrregcount 60 for optimal performance + assert(B * C % threadsPerBlock.x == 0); + dim3 numBlocks(B * C / threadsPerBlock.x); + kernel_forward<<>>(B, T, C, w, u, k, v, y); +} + +void cuda_forward_with_state(int B, int T, int C, float *w, float *u, float *k, float *v, float *y, float *s) { + dim3 threadsPerBlock( min(C, 32) ); // requires --maxrregcount 60 for optimal performance + assert(B * C % threadsPerBlock.x == 0); + dim3 numBlocks(B * C / threadsPerBlock.x); + kernel_forward_with_state<<>>(B, T, C, w, u, k, v, y, s); +} + +void cuda_backward(int B, int T, int C, float *w, float *u, float *k, float *v, float *y, float *gy, float *gw, float *gu, float *gk, float *gv) { + dim3 threadsPerBlock( min(C, 32) ); // requires --maxrregcount 60 for optimal performance + assert(B * C % threadsPerBlock.x == 0); + dim3 numBlocks(B * C / threadsPerBlock.x); + kernel_backward<<>>(B, T, C, w, u, k, v, y, gy, gw, gu, gk, gv); +} diff --git a/pllava/lib/python3.10/site-packages/transformers/kernels/rwkv/wkv_cuda_bf16.cu b/pllava/lib/python3.10/site-packages/transformers/kernels/rwkv/wkv_cuda_bf16.cu new file mode 100644 index 0000000000000000000000000000000000000000..042cb4aba1db98be5916aea1de86a7fed0b6510d --- /dev/null +++ b/pllava/lib/python3.10/site-packages/transformers/kernels/rwkv/wkv_cuda_bf16.cu @@ -0,0 +1,186 @@ +#include +#include +#include "ATen/ATen.h" +#define MIN_VALUE (-1e38) +typedef at::BFloat16 bf16; + +__global__ void kernel_forward_bf16( + const int B, const int T, const int C, const float *__restrict__ const _w, const bf16 *__restrict__ const _u, + const bf16 *__restrict__ const _k, const bf16 *__restrict__ const _v, bf16 *__restrict__ const _y +) { + const int idx = blockIdx.x * blockDim.x + threadIdx.x; + const int _b = idx / C; + const int _c = idx % C; + const int _offset = _b * T * C + _c; + + float u = float(_u[_c]); + float w = _w[_c]; + const bf16 *__restrict__ const k = _k + _offset; + const bf16 *__restrict__ const v = _v + _offset; + bf16 *__restrict__ const y = _y + _offset; + + // aa and bb are running sums divided by exp(pp) (to avoid overflow) + float aa = 0, bb = 0, pp = MIN_VALUE; + for (int i = 0; i < T; i++) { + const int ii = i * C; + const float kk = float(k[ii]); + const float vv = float(v[ii]); + + float ww = u + kk; + float p = max(pp, ww); + float e1 = exp(pp - p); + float e2 = exp(ww - p); + y[ii] = bf16((e1 * aa + e2 * vv) / (e1 * bb + e2)); + + ww = w + pp; + p = max(ww, kk); + e1 = exp(ww - p); + e2 = exp(kk - p); + aa = e1 * aa + e2 * vv; + bb = e1 * bb + e2; + pp = p; + } +} + +__global__ void kernel_forward_with_state_bf16( + const int B, const int T, const int C, const float *__restrict__ const _w, const bf16 *__restrict__ const _u, + const bf16 *__restrict__ const _k, const bf16 *__restrict__ const _v, bf16 *__restrict__ const _y, + float *__restrict__ const _s +) { + const int idx = blockIdx.x * blockDim.x + threadIdx.x; + const int _b = idx / C; + const int _c = idx % C; + const int _offset_s = _b * C * 3 + _c * 3; + const int _offset = _b * T * C + _c; + + float u = float(_u[_c]); + float w = _w[_c]; + const bf16 *__restrict__ const k = _k + _offset; + const bf16 *__restrict__ const v = _v + _offset; + bf16 *__restrict__ const y = _y + _offset; + float *__restrict__ const s = _s + _offset_s; + + // aa and bb are running sums divided by exp(pp) (to avoid overflow) + float aa = s[0], bb = s[1], pp = s[2]; + for (int i = 0; i < T; i++) { + const int ii = i * C; + const float kk = float(k[ii]); + const float vv = float(v[ii]); + + float ww = u + kk; + float p = max(pp, ww); + float e1 = exp(pp - p); + float e2 = exp(ww - p); + y[ii] = bf16(e1 * aa + e2 * vv) / (e1 * bb + e2); + + ww = w + pp; + p = max(ww, kk); + e1 = exp(ww - p); + e2 = exp(kk - p); + aa = e1 * aa + e2 * vv; + bb = e1 * bb + e2; + pp = p; + } + s[0] = aa; + s[1] = bb; + s[2] = pp; +} + +__global__ void kernel_backward_bf16( + const int B, const int T, const int C, const float *__restrict__ const _w, const bf16 *__restrict__ const _u, + const bf16 *__restrict__ const _k, const bf16 *__restrict__ const _v, const bf16 *__restrict__ const _y, + const bf16 *__restrict__ const _gy, bf16 *__restrict__ const _gw, bf16 *__restrict__ const _gu, + bf16 *__restrict__ const _gk, bf16 *__restrict__ const _gv +) { + const int idx = blockIdx.x * blockDim.x + threadIdx.x; + const int _b = idx / C; + const int _c = idx % C; + const int _offset = _b * T * C + _c; + + float u = float(_u[_c]); + float w = _w[_c]; + const bf16 *__restrict__ const k = _k + _offset; + const bf16 *__restrict__ const v = _v + _offset; + const bf16 *__restrict__ const y = _y + _offset; + const bf16 *__restrict__ const gy = _gy + _offset; + bf16 *__restrict__ const gk = _gk + _offset; + bf16 *__restrict__ const gv = _gv + _offset; + + float q[Tmax], r[Tmax]; + + float gw = 0, gu = 0, aa = 0, bb = 0, ga = 0, gb = 0, pp = MIN_VALUE; + for (int i = 0; i < T; i++) { + const int ii = i * C; + const float kk = float(k[ii]); + const float vv = float(v[ii]); + const float yy = float(y[ii]); + + float ww = u + kk; + float p = max(pp, ww); + float e1 = exp(pp - p); + float e2 = exp(ww - p); + const float qq = float(gy[ii]) / (e1 * bb + e2); + gw += (ga - gb * yy) * e1 * qq; + gu += (vv - yy) * e2 * qq; + q[i] = qq; + r[i] = ww - p; + + ww = w + pp; + p = max(ww, kk); + e1 = exp(ww - p); + e2 = exp(kk - p); + ga = e1 * (aa + ga); + gb = e1 * (bb + gb); + aa = e1 * aa + e2 * vv; + bb = e1 * bb + e2; + pp = p; + } + const int _offsetBC = _b * C + _c; + _gw[_offsetBC] = bf16(gw * _w[_c]); // multiply by w because of w -> -exp(w) in python forward() + _gu[_offsetBC] = bf16(gu); + + aa = 0, bb = 0, pp = MIN_VALUE; + for (int i = T - 1; i >= 0; i--) { + const int ii = i * C; + const float kk = float(k[ii]); + const float vv = float(v[ii]); + const float yy = float(y[ii]); + const float qq = q[i]; + const float rr = r[i]; + + float e1 = qq * exp(rr); + float e2 = exp(kk + pp); + gk[ii] = bf16(e1 * (vv - yy) + e2 * (aa * vv + bb)); + gv[ii] = bf16(e1 + e2 * aa); + + const float ww = w + pp; + const float www = rr - u - kk; + const float p = max(ww, www); + e1 = exp(ww - p); + e2 = qq * exp(www - p); + aa = e1 * aa + e2; + bb = e1 * bb - e2 * yy; + pp = p; + } +} + +void cuda_forward_bf16(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y) { + dim3 threadsPerBlock( min(C, 32) ); // requires --maxrregcount 60 for optimal performance + assert(B * C % threadsPerBlock.x == 0); + dim3 numBlocks(B * C / threadsPerBlock.x); + kernel_forward_bf16<<>>(B, T, C, w, u, k, v, y); +} + +void cuda_forward_with_state_bf16(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y, float *s) { + dim3 threadsPerBlock( min(C, 32) ); // requires --maxrregcount 60 for optimal performance + assert(B * C % threadsPerBlock.x == 0); + dim3 numBlocks(B * C / threadsPerBlock.x); + kernel_forward_with_state_bf16<<>>(B, T, C, w, u, k, v, y, s); +} + +void cuda_backward_bf16(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y, bf16 *gy, bf16 *gw, bf16 *gu, bf16 *gk, bf16 *gv) { + dim3 threadsPerBlock( min(C, 32) ); // requires --maxrregcount 60 for optimal performance + assert(B * C % threadsPerBlock.x == 0); + dim3 numBlocks(B * C / threadsPerBlock.x); + kernel_backward_bf16<<>>(B, T, C, w, u, k, v, y, gy, gw, gu, gk, gv); +} diff --git a/pllava/lib/python3.10/site-packages/transformers/kernels/rwkv/wkv_op.cpp b/pllava/lib/python3.10/site-packages/transformers/kernels/rwkv/wkv_op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..55e7280665927b523a88021d5111daf28a63c905 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/transformers/kernels/rwkv/wkv_op.cpp @@ -0,0 +1,66 @@ +#include +#include "ATen/ATen.h" +typedef at::BFloat16 bf16; + +void cuda_forward(int B, int T, int C, float *w, float *u, float *k, float *v, float *y); +void cuda_forward_bf16(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y); +void cuda_forward_with_state(int B, int T, int C, float *w, float *u, float *k, float *v, float *y, float *s); +void cuda_forward_with_state_bf16(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y, float *s); +void cuda_backward(int B, int T, int C, float *w, float *u, float *k, float *v, float *y, float *gy, float *gw, float *gu, float *gk, float *gv); +void cuda_backward_bf16(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y, bf16 *gy, bf16 *gw, bf16 *gu, bf16 *gk, bf16 *gv); + +void forward(torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y) { + const int B = k.size(0); + const int T = k.size(1); + const int C = k.size(2); + cuda_forward(B, T, C, w.data_ptr(), u.data_ptr(), k.data_ptr(), v.data_ptr(), y.data_ptr()); +} +void forward_bf16(torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y) { + const int B = k.size(0); + const int T = k.size(1); + const int C = k.size(2); + cuda_forward_bf16(B, T, C, w.data_ptr(), u.data_ptr(), k.data_ptr(), v.data_ptr(), y.data_ptr()); +} +void forward_with_state(torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y, torch::Tensor &s) { + const int B = k.size(0); + const int T = k.size(1); + const int C = k.size(2); + cuda_forward_with_state(B, T, C, w.data_ptr(), u.data_ptr(), k.data_ptr(), v.data_ptr(), y.data_ptr(), s.data_ptr()); +} +void forward_with_state_bf16(torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y, torch::Tensor &s) { + const int B = k.size(0); + const int T = k.size(1); + const int C = k.size(2); + cuda_forward_with_state_bf16(B, T, C, w.data_ptr(), u.data_ptr(), k.data_ptr(), v.data_ptr(), y.data_ptr(), s.data_ptr()); +} +void backward(torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y, torch::Tensor &gy, torch::Tensor &gw, torch::Tensor &gu, torch::Tensor &gk, torch::Tensor &gv) { + const int B = k.size(0); + const int T = k.size(1); + const int C = k.size(2); + cuda_backward(B, T, C, w.data_ptr(), u.data_ptr(), k.data_ptr(), v.data_ptr(), y.data_ptr(), gy.data_ptr(), gw.data_ptr(), gu.data_ptr(), gk.data_ptr(), gv.data_ptr()); +} +void backward_bf16(torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y, torch::Tensor &gy, torch::Tensor &gw, torch::Tensor &gu, torch::Tensor &gk, torch::Tensor &gv) { + const int B = k.size(0); + const int T = k.size(1); + const int C = k.size(2); + cuda_backward_bf16(B, T, C, w.data_ptr(), u.data_ptr(), k.data_ptr(), v.data_ptr(), y.data_ptr(), + gy.data_ptr(), gw.data_ptr(), gu.data_ptr(), gk.data_ptr(), gv.data_ptr()); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &forward, "wkv forward"); + m.def("forward_bf16", &forward_bf16, "wkv forward bf16"); + m.def("forward_with_state", &forward_with_state, "wkv forward with state"); + m.def("forward_with_state_bf16", &forward_with_state_bf16, "wkv forward with state bf16"); + m.def("backward", &backward, "wkv backward"); + m.def("backward_bf16", &backward_bf16, "wkv backward bf16"); +} + +TORCH_LIBRARY(wkv, m) { + m.def("forward", forward); + m.def("forward_bf16", forward_bf16); + m.def("forward_with_state", forward_with_state); + m.def("forward_with_state_bf16", forward_with_state_bf16); + m.def("backward", backward); + m.def("backward_bf16", backward_bf16); +} diff --git a/pllava/lib/python3.10/site-packages/transformers/kernels/yoso/common.h b/pllava/lib/python3.10/site-packages/transformers/kernels/yoso/common.h new file mode 100644 index 0000000000000000000000000000000000000000..e5085c88dd3ea9a12eec264a8c48946bf2b80b23 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/transformers/kernels/yoso/common.h @@ -0,0 +1,10 @@ + +#define min(a, b) ((a)<(b)?(a):(b)) +#define max(a, b) ((a)>(b)?(a):(b)) +#define ceil_divide(a, b) ((a)/(b)+((a)%(b)!=0)) +#define select(cond, a, b) ((cond)?(a):(b)) +#define PI 3.141592 +#define EPSILON 1e-8 +#define MAX_VAL 1e12 +#define MIN_VAL -1e12 +#define EMPTY_VALUE -1 diff --git a/pllava/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.h b/pllava/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.h new file mode 100644 index 0000000000000000000000000000000000000000..dd48de0ed159f49ee3afe93b12aaae719fe87688 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.h @@ -0,0 +1,71 @@ +#include +#include +#include + +std::vector fast_hash_ver1_kernel( + at::Tensor query_mask, + at::Tensor query_vector, + at::Tensor key_mask, + at::Tensor key_vector, + int num_hash_f, + int hash_code_len, + bool use_cuda +); + +at::Tensor lsh_cumulation_ver1_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +); + +at::Tensor lsh_weighted_cumulation_ver1_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor query_weight, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor key_weight, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +); + +at::Tensor lsh_weighted_cumulation_ver2_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor query_weight, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor key_weight, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +); + +at::Tensor lsh_weighted_cumulation_ver3_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor query_weight, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor key_weight, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +); + +at::Tensor lsh_weighted_cumulation_ver4_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor query_weight, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor key_weight, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +); diff --git a/pllava/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.h b/pllava/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.h new file mode 100644 index 0000000000000000000000000000000000000000..b2adc0f735358d0fcb6a056e7d19ba745977e129 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.h @@ -0,0 +1,157 @@ +__global__ void fast_hash_ver1_cuda_kernel( + int *mask, // [batch_size, num_vector] + float *vector, // [batch_size, num_vector, vector_dim] + int *Dmat, // [3, num_part, vector_dim] + int *hash_code, // [batch_size, num_vector, num_hash_f] + int batch_size, + int num_vector, + int vector_dim, + int num_part, + int num_hash_f, + int hash_code_len +); + +__global__ void lsh_cumulation_ver1_step1_cuda_kernel( + int *key_mask, // [batch_size, num_key] + int *key_hash_code, // [batch_size, num_key, num_hash_f] + float *value, // [batch_size, num_key, value_dim] + float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, value_dim] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_key, + int value_dim, + int offset_warp +); + +__global__ void lsh_cumulation_ver1_step2_cuda_kernel( + int *query_mask, // [batch_size, num_query] + int *query_hash_code, // [batch_size, num_query, num_hash_f] + float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, value_dim] + float *cumulation_value, // [batch_size, num_query, value_dim] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_query, + int value_dim, + int offset_warp +); + +__global__ void lsh_weighted_cumulation_ver1_step1_cuda_kernel( + int *key_mask, // [batch_size, num_key] + int *key_hash_code, // [batch_size, num_key, num_hash_f] + float *key_weight, // [batch_size, num_key, weight_dim] + float *value, // [batch_size, num_key, value_dim] + float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_key, + int value_dim, + int weight_dim, + int offset_warp, + int weight_idx +); + +__global__ void lsh_weighted_cumulation_ver1_step2_cuda_kernel( + int *query_mask, // [batch_size, num_query] + int *query_hash_code, // [batch_size, num_query, num_hash_f] + float *query_weight, // [batch_size, num_query, weight_dim] + float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE] + float *cumulation_value, // [batch_size, num_query, value_dim] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_query, + int value_dim, + int weight_dim, + int offset_warp, + int weight_idx +); + +__global__ void count_sort_step1_cuda_kernel( + int *key_mask, // [batch_size, num_key] + int *key_hash_code, // [batch_size, num_key, num_hash_f] + int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_key +); + +__global__ void count_sort_step2_cuda_kernel( + int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity] + int batch_size, + int num_hash_f, + int hashtable_capacity +); + +__global__ void count_sort_step3_cuda_kernel( + int *key_mask, // [batch_size, num_key] + int *key_hash_code, // [batch_size, num_key, num_hash_f] + int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity] + int *key_sorted_idxes, // [batch_size, num_hash_f, num_key] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_key +); + +__global__ void extract_query_info_cuda_kernel( + int *query_mask, // [batch_size, num_query] + int *query_hash_code, // [batch_size, num_query, num_hash_f] + int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity] + int *query_info, // [batch_size, num_query, 2, num_hash_f] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_query +); + +__global__ void lsh_weighted_cumulation_ver2_step2_cuda_kernel( + int *query_mask, // [batch_size, num_query] + int *query_info, // [batch_size, num_query, 2, num_hash_f] + int *key_sorted_idxes, // [batch_size, num_hash_f, num_key] + float *query_weight, // [batch_size, num_query, weight_dim] + float *key_weight, // [batch_size, num_key, weight_dim] + float *value, // [batch_size, num_key, value_dim] + float *cumulation_value, // [batch_size, num_query, value_dim] + int batch_size, + int num_hash_f, + int num_query, + int num_key, + int value_dim, + int weight_dim +); + +__global__ void lsh_weighted_cumulation_ver3_step2_cuda_kernel( + int *query_sorted_idxes, // [batch_size, num_hash_f, num_query] + int *key_mask, // [batch_size, num_key] + int *key_info, // [batch_size, num_key, 2, num_hash_f] + float *query_weight, // [batch_size, num_query, weight_dim] + float *key_weight, // [batch_size, num_key, weight_dim] + float *value, // [batch_size, num_key, value_dim] + float *cumulation_value, // [batch_size, num_query, value_dim] + int batch_size, + int num_hash_f, + int num_query, + int num_key, + int value_dim, + int weight_dim +); + +__global__ void lsh_weighted_cumulation_ver4_step2_cuda_kernel( + int *query_sorted_idxes, // [batch_size, num_hash_f, num_query] + int *key_mask, // [batch_size, num_key] + int *key_info, // [batch_size, num_key, 2, num_hash_f] + float *query_weight, // [batch_size, num_query, weight_dim] + float *key_weight, // [batch_size, num_key, weight_dim] + float *value, // [batch_size, num_key, value_dim] + float *cumulation_value, // [batch_size, num_query, value_dim] + int batch_size, + int num_hash_f, + int num_query, + int num_key, + int value_dim, + int weight_dim +); diff --git a/pllava/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_torch.cpp b/pllava/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_torch.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e150a2be604b28f600ab345a8cc9e97819cca416 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_torch.cpp @@ -0,0 +1,128 @@ +#include +#include +#include "fast_lsh_cumulation.h" +#include "common_cuda.h" +#include + +std::vector fast_hash( + at::Tensor query_mask, + at::Tensor query_vector, + at::Tensor key_mask, + at::Tensor key_vector, + int num_hash_f, + int hash_code_len, + bool use_cuda, + int version +) { + return fast_hash_ver1_kernel( + query_mask, + query_vector, + key_mask, + key_vector, + num_hash_f, + hash_code_len, + use_cuda + ); +} + +at::Tensor lsh_cumulation( + at::Tensor query_mask, // [batch_size, num_query] + at::Tensor query_hash_code, // [batch_size, num_query, num_hash_f] + at::Tensor key_mask, // [batch_size, num_key] + at::Tensor key_hash_code, // [batch_size, num_key, num_hash_f] + at::Tensor value, // [batch_size, num_key, value_dim] + int hashtable_capacity, + bool use_cuda, + int version +) { + return lsh_cumulation_ver1_kernel( + query_mask, + query_hash_code, + key_mask, + key_hash_code, + value, + hashtable_capacity, + use_cuda + ); +} + +at::Tensor lsh_weighted_cumulation( + at::Tensor query_mask, // [batch_size, num_query] + at::Tensor query_hash_code, // [batch_size, num_query, num_hash_f] + at::Tensor query_weight, // [batch_size, num_query, weight_dim] + at::Tensor key_mask, // [batch_size, num_key] + at::Tensor key_hash_code, // [batch_size, num_key, num_hash_f] + at::Tensor key_weight, // [batch_size, num_key, weight_dim] + at::Tensor value, // [batch_size, num_key, value_dim] + int hashtable_capacity, + bool use_cuda, + int version +) { + if (version == 1) { + return lsh_weighted_cumulation_ver1_kernel( + query_mask, + query_hash_code, + query_weight, + key_mask, + key_hash_code, + key_weight, + value, + hashtable_capacity, + use_cuda + ); + } else if (version == 2) { + return lsh_weighted_cumulation_ver2_kernel( + query_mask, + query_hash_code, + query_weight, + key_mask, + key_hash_code, + key_weight, + value, + hashtable_capacity, + use_cuda + ); + } else if (version == 3) { + return lsh_weighted_cumulation_ver3_kernel( + query_mask, + query_hash_code, + query_weight, + key_mask, + key_hash_code, + key_weight, + value, + hashtable_capacity, + use_cuda + ); + } else if (version == 4) { + return lsh_weighted_cumulation_ver4_kernel( + query_mask, + query_hash_code, + query_weight, + key_mask, + key_hash_code, + key_weight, + value, + hashtable_capacity, + use_cuda + ); + } else { + return lsh_weighted_cumulation_ver3_kernel( + query_mask, + query_hash_code, + query_weight, + key_mask, + key_hash_code, + key_weight, + value, + hashtable_capacity, + use_cuda + ); + } +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("fast_hash", &fast_hash, "Fast Hash (CUDA)"); + m.def("lsh_cumulation", &lsh_cumulation, "LSH Cumulation (CUDA)"); + m.def("lsh_weighted_cumulation", &lsh_weighted_cumulation, "LSH Weighted Cumulation (CUDA)"); +}