language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | dagster-io__dagster | python_modules/libraries/dagster-deltalake/dagster_deltalake/config.py | {
"start": 533,
"end": 2052
} | class ____(Config):
"""Storage configuration for Microsoft Azure Blob or ADLS Gen 2 object store."""
provider: Literal["azure"] = "azure"
account_name: str
"""Storage account name"""
client_id: Optional[str] = None
"""Client ID for ID / secret based authentication."""
client_secret: Optional[str] = None
"""Client secret for ID / secret based authentication."""
tenant_id: Optional[str] = None
"""Tenant ID for ID / secret based authentication."""
federated_token_file: Optional[str] = None
"""File containing federated credential token"""
account_key: Optional[str] = None
"""Storage account master key"""
sas_key: Optional[str] = None
"""Shared access signature"""
token: Optional[str] = None
"""Hard-coded bearer token"""
use_azure_cli: Optional[bool] = None
"""Use azure cli for acquiring access token"""
use_fabric_endpoint: Optional[bool] = None
"""Use object store with url scheme account.dfs.fabric.microsoft.com"""
msi_resource_id: Optional[str] = None
"""Msi resource id for use with managed identity authentication."""
msi_endpoint: Optional[str] = None
"""Endpoint to request a imds managed identity token."""
container_name: Optional[str] = None
"""Storage container name"""
def str_dict(self) -> dict[str, str]:
"""Storage options as str dict."""
return _to_str_dict(self.dict())
# TODO add documentation and config to handle atomic writes with S3
| AzureConfig |
python | spack__spack | lib/spack/spack/vendor/jsonschema/validators.py | {
"start": 758,
"end": 3383
} | class ____(Exception):
"""
Raised when a Validators with non-default type checker is misused.
Asking one for DEFAULT_TYPES doesn't make sense, since type checkers
exist for the unrepresentable cases where DEFAULT_TYPES can't
represent the type relationship.
"""
def __str__(self):
return "DEFAULT_TYPES cannot be used on Validators using TypeCheckers"
validators = {}
meta_schemas = _utils.URIDict()
def _generate_legacy_type_checks(types=()):
"""
Generate newer-style type checks out of JSON-type-name-to-type mappings.
Arguments:
types (dict):
A mapping of type names to their Python types
Returns:
A dictionary of definitions to pass to `TypeChecker`
"""
types = dict(types)
def gen_type_check(pytypes):
pytypes = _utils.flatten(pytypes)
def type_check(checker, instance):
if isinstance(instance, bool):
if bool not in pytypes:
return False
return isinstance(instance, pytypes)
return type_check
definitions = {}
for typename, pytypes in iteritems(types):
definitions[typename] = gen_type_check(pytypes)
return definitions
_DEPRECATED_DEFAULT_TYPES = {
u"array": list,
u"boolean": bool,
u"integer": int_types,
u"null": type(None),
u"number": numbers.Number,
u"object": dict,
u"string": str_types,
}
_TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES = _types.TypeChecker(
type_checkers=_generate_legacy_type_checks(_DEPRECATED_DEFAULT_TYPES),
)
def validates(version):
"""
Register the decorated validator for a ``version`` of the specification.
Registered validators and their meta schemas will be considered when
parsing ``$schema`` properties' URIs.
Arguments:
version (str):
An identifier to use as the version's name
Returns:
collections.Callable:
a class decorator to decorate the validator with the version
"""
def _validates(cls):
validators[version] = cls
meta_schema_id = cls.ID_OF(cls.META_SCHEMA)
if meta_schema_id:
meta_schemas[meta_schema_id] = cls
return cls
return _validates
def _DEFAULT_TYPES(self):
if self._CREATED_WITH_DEFAULT_TYPES is None:
raise _DontDoThat()
warn(
(
"The DEFAULT_TYPES attribute is deprecated. "
"See the type checker attached to this validator instead."
),
DeprecationWarning,
stacklevel=2,
)
return self._DEFAULT_TYPES
| _DontDoThat |
python | kamyu104__LeetCode-Solutions | Python/maximum-xor-after-operations.py | {
"start": 48,
"end": 224
} | class ____(object):
def maximumXOR(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return reduce(lambda x, y: x|y, nums)
| Solution |
python | geekcomputers__Python | insta_monitering/insta_api.py | {
"start": 390,
"end": 1522
} | class ____(tornado.web.RequestHandler):
executor = ThreadPoolExecutor(max_workers=MAX_WORKERS)
@run_on_executor
def background_task(self, user, tags, type, productId):
try:
instasubprocess(user=user, tags=tags, type=type, productId=productId)
except:
print("error::background_task>>", sys.exc_info()[1])
@coroutine
def get(self):
try:
q = self.get_argument("q")
user = self.get_argument("userId")
type = self.get_argument("type")
productId = self.get_argument("productId")
except:
self.send_error(400)
if " " in q:
q = q.replace(" ", "")
self.background_task(user=user, tags=q, type=type, productId=productId)
temp = {}
temp["query"] = q
temp["userId"] = user
temp["status"] = True
temp["productId"] = productId
print(
"{0}, {1}, {2}, {3}".format(
temp["userId"], temp["productId"], temp["query"], temp["status"]
)
)
self.write(ujson.dumps(temp))
| StartHandlerinsta |
python | python__mypy | mypy/suggestions.py | {
"start": 34297,
"end": 39110
} | class ____(TypeTranslator):
def visit_any(self, t: AnyType) -> Type:
if not t.missing_import_name:
return t.copy_modified(type_of_any=TypeOfAny.suggestion_engine)
else:
return t
def visit_type_alias_type(self, t: TypeAliasType) -> Type:
return t.copy_modified(args=[a.accept(self) for a in t.args])
def generate_type_combinations(types: list[Type]) -> list[Type]:
"""Generate possible combinations of a list of types.
mypy essentially supports two different ways to do this: joining the types
and unioning the types. We try both.
"""
joined_type = join_type_list(types)
union_type = make_simplified_union(types)
if joined_type == union_type:
return [joined_type]
else:
return [joined_type, union_type]
def count_errors(msgs: list[str]) -> int:
return len([x for x in msgs if " error: " in x])
def refine_type(ti: Type, si: Type) -> Type:
"""Refine `ti` by replacing Anys in it with information taken from `si`
This basically works by, when the types have the same structure,
traversing both of them in parallel and replacing Any on the left
with whatever the type on the right is. If the types don't have the
same structure (or aren't supported), the left type is chosen.
For example:
refine(Any, T) = T, for all T
refine(float, int) = float
refine(List[Any], List[int]) = List[int]
refine(Dict[int, Any], Dict[Any, int]) = Dict[int, int]
refine(Tuple[int, Any], Tuple[Any, int]) = Tuple[int, int]
refine(Callable[[Any], Any], Callable[[int], int]) = Callable[[int], int]
refine(Callable[..., int], Callable[[int, float], Any]) = Callable[[int, float], int]
refine(Optional[Any], int) = Optional[int]
refine(Optional[Any], Optional[int]) = Optional[int]
refine(Optional[Any], Union[int, str]) = Optional[Union[int, str]]
refine(Optional[List[Any]], List[int]) = List[int]
"""
t = get_proper_type(ti)
s = get_proper_type(si)
if isinstance(t, AnyType):
# If s is also an Any, we return if it is a missing_import Any
return t if isinstance(s, AnyType) and t.missing_import_name else s
if isinstance(t, Instance) and isinstance(s, Instance) and t.type == s.type:
return t.copy_modified(args=[refine_type(ta, sa) for ta, sa in zip(t.args, s.args)])
if (
isinstance(t, TupleType)
and isinstance(s, TupleType)
and t.partial_fallback == s.partial_fallback
and len(t.items) == len(s.items)
):
return t.copy_modified(items=[refine_type(ta, sa) for ta, sa in zip(t.items, s.items)])
if isinstance(t, CallableType) and isinstance(s, CallableType):
return refine_callable(t, s)
if isinstance(t, UnionType):
return refine_union(t, s)
# TODO: Refining of builtins.tuple, Type?
return t
def refine_union(t: UnionType, s: ProperType) -> Type:
"""Refine a union type based on another type.
This is done by refining every component of the union against the
right hand side type (or every component of its union if it is
one). If an element of the union is successfully refined, we drop it
from the union in favor of the refined versions.
"""
# Don't try to do any union refining if the types are already the
# same. This prevents things like refining Optional[Any] against
# itself and producing None.
if t == s:
return t
rhs_items = s.items if isinstance(s, UnionType) else [s]
new_items = []
for lhs in t.items:
refined = False
for rhs in rhs_items:
new = refine_type(lhs, rhs)
if new != lhs:
new_items.append(new)
refined = True
if not refined:
new_items.append(lhs)
# Turn strict optional on when simplifying the union since we
# don't want to drop Nones.
with state.strict_optional_set(True):
return make_simplified_union(new_items)
def refine_callable(t: CallableType, s: CallableType) -> CallableType:
"""Refine a callable based on another.
See comments for refine_type.
"""
if t.fallback != s.fallback:
return t
if t.is_ellipsis_args and not is_tricky_callable(s):
return s.copy_modified(ret_type=refine_type(t.ret_type, s.ret_type))
if is_tricky_callable(t) or t.arg_kinds != s.arg_kinds:
return t
return t.copy_modified(
arg_types=[refine_type(ta, sa) for ta, sa in zip(t.arg_types, s.arg_types)],
ret_type=refine_type(t.ret_type, s.ret_type),
)
T = TypeVar("T")
def dedup(old: list[T]) -> list[T]:
new: list[T] = []
for x in old:
if x not in new:
new.append(x)
return new
| MakeSuggestionAny |
python | pytorch__pytorch | torch/_dynamo/output_graph.py | {
"start": 18064,
"end": 117138
} | class ____(OutputGraphCommon):
"""
Wrapper class to hold outputs of InstructionTranslator. Mainly the
generated fx.Graph.
OutputGraph is 1:1 with a frame being processed. Each frame is associated
with some root InstructionTranslator. When user code calls a function,
we construct a InliningInstructionTranslator that continues to write into
the root InstructionTranslator's OutputGraph.
"""
side_effects: SideEffects
def __init__(
self,
code_options: dict[str, Any],
compiler_fn: Optional[CompilerFn],
root_tx: "InstructionTranslatorBase",
export: bool,
export_constraints: Sequence[_ConstraintTarget],
frame_state: Any,
local_scope: Scope,
global_scope: Scope,
f_code: CodeType,
torch_function_mode_stack: list[torch.overrides.TorchFunctionMode],
package: Optional["CompilePackage"],
one_graph: bool = False,
) -> None:
OutputGraphGuardsState.__init__(
self,
local_scope,
global_scope,
torch_function_mode_stack,
guard_on_key_order=set(),
input_source_to_sizes_strides={},
dual_level=torch.autograd.forward_ad._current_level,
functorch_layers=torch._functorch.pyfunctorch.retrieve_all_functorch_interpreters(),
current_device=torch.utils._device.CURRENT_DEVICE,
# initial_global_state is only None during NopTest.
global_state_guard=torch._dynamo.convert_frame.initial_global_state
or torch._C._dynamo.guards.GlobalStateGuard(),
# These are set by @property instead, just initialize them as blank
_guards=torch._guards.GuardsSet(),
_aotautograd_guards=[],
)
self.tracers = [SubgraphTracer(self, is_export=export)]
# Map from graph input's `Source` to its `VariableTracker` to
# de-duplicate graph inputs by source and reuse the tracker
self.input_source_to_var: dict[Source, VariableTracker] = {}
self.export = export
self.export_constraints = export_constraints # type: ignore[assignment]
self.frame_state = frame_state
self.cleanup_hooks: list[Callable[[], Any]] = []
# compile_id is an id number for the current torch.compile
self.compile_id: int = next(_compile_id_counter)
# Set of globals installed via install_global* APIs
self.installed_globals: set[str] = set()
# TODO: maybe should just pass the entire f_code in here? Not
# sure...
self.co_fields = {
"co_name": f_code.co_name,
"co_filename": f_code.co_filename,
"co_firstlineno": f_code.co_firstlineno,
}
self.region_tracker = GraphRegionTracker()
# tracked_fakes says where any tensor that was wrapped to fake came
# from. It is similar to GraphArg, in that all GraphArgs will get
# will get added to TrackedFakes, but TrackedFakes also contains
# GraphArgs that got pruned, and things like Tensor attributes which
# aren't explicit graph inputs. Used by shape guard
self.tracked_fakes: list[TrackedFake] = []
shape_env = ShapeEnv(
# Reference Cycle!
# Share a reference to the list of TrackedFake.
#
# ShapeEnv needs this in order to be able to reproduce the call
# to produce_guards at an arbitrary time point. That is because
# TrackedFake instances may have its metadata changed throughout
# the program execution.
tracked_fakes=self.tracked_fakes,
# We want to allow capture scalar outputs and allow_dynamic_output_shape_ops when fullgraph=True
allow_scalar_outputs=one_graph or config.capture_scalar_outputs,
allow_dynamic_output_shape_ops=one_graph
or config.capture_dynamic_output_shape_ops,
prefer_deferred_runtime_asserts_over_guards=config.prefer_deferred_runtime_asserts_over_guards,
co_fields=self.co_fields,
)
# In export mode, we force the shape_env to strictly disallow any constraining
# of the user marked dynamic dims
import torch._functorch.config as _config
with _config.patch(fake_tensor_allow_unsafe_data_ptr_access=False):
fake_mode = torch._subclasses.FakeTensorMode(
shape_env=shape_env,
# TODO (tmanlaibaatar) Remove this once we always lift params and buffers
allow_non_fake_inputs=bool(self.export),
export=self.export,
)
self.tracing_context: TracingContext = TracingContext(fake_mode)
self.tracing_context.traced_code.append(f_code)
self.traced_code = self.tracing_context.traced_code
self.dynamo_compile_id: Optional[CompileId] = (
CompileContext.current_compile_id()
)
self.init_ambient_guards()
# Map each tensor id to a list of sources. This is necessary because
# tensor ids cannot be recovered from tracked fakes (in general).
# We use this map to interpret (i.e., check for violations of) constraints,
# specifically equality constraints, which have shared tensor ids in them.
# This map should also be generally useful, e.g., for (de)serialization.
self.tracked_fakes_id_to_source: dict[int, list[Source]] = (
collections.defaultdict(list)
)
# Stores the full fqn of a param or buffer to the relevant source.
self.param_name_to_source: Optional[dict[str, Source]] = {}
self.side_effects = SideEffects(self)
# Cached variable trackers. This makes symbolic analysis of LOAD_GLOBAL
# and LOAD_ATTR for same python objects free.
self.variable_tracker_cache = VariableTrackerCache()
self.unique_var_id = itertools.count()
self.code_options: dict[str, Any] = dict(code_options)
self.output_instructions: list[Instruction] = []
# used to track nodes that are added between calls of copy_graphstate
# and restore_graphstate
self.timestamp = 0
# A list of register_finalizer_fns to apply to the output graph module
self.register_finalizer_fns: list[Callable[[fx.GraphModule], None]] = []
# Not checkpointed
self.compiler_fn: Optional[CompilerFn] = compiler_fn
self.root_tx = root_tx
self.package = package
# Given a source, what are the user stacks of all locations that
# accessed it?
#
# For efficiency, we only populate this:
# - During export, and
# - If the source could potentially lead to a spurious export input
#
# Feel free to populate this more frequently if other use-cases arise,
# but be aware that we have to generate full stacks for each
# recording!
self.source_to_user_stacks: dict[Source, list[traceback.StackSummary]] = {}
self._current_tx: list[InstructionTranslatorBase] = []
self.cleanups: list[CleanupHook] = []
self.should_exit = False
self.unspec_variable_map: dict[str, UnspecializedPythonVariable] = {}
# This returns false if TF Overall (both mode and subclass) is disabled OR that TF Mode stack is empty
self.torch_function_mode_enabled = torch._C._is_torch_function_mode_enabled()
# Tracks if the output graph has a user defined allowed function in the
# graph. This is used later to determine if we should fallback to eager
# for certain exceptions. THe idea is that if the user has applied
# allow_in_graph, they would like to see the error instead of falling
# back for backend errors.
self.has_user_defined_allowed_in_graph = False
# Tracks a list of called ops that were not tagged with "pt2_compliant_tag".
# This information is useful for logging.
self.non_compliant_ops: set[torch._ops.OpOverload] = set({})
# Tracks a list of called custom ops that were tagged with "pt2_compliant_tag".
# This information is useful for logging.
self.compliant_custom_ops: set[torch._ops.OpOverload] = set({})
# We save the global torch state here to be restored in case of graph
# breaks. The relevant issue is seen here
# https://github.com/pytorch/pytorch/pull/100570#issuecomment-1543427086
# where inlining of a function changes the global state (because of the
# presence of torch.no_grad) and there is a graph break.
self.save_global_state()
# Tracks the original FQNs of the constant tensors from the original graph,
# i.e. buffers and parameters.
self.dynamo_flat_name_to_original_fqn: dict[str, str] = {}
# All calls to random() are replaced with a single call to __gen_rand_values
# functions that returns a tuple of random values for each original call.
# random_calls tracks calls to random() and random_values_var stores the name of
# the variable that stores __gen_rand_values results.
self.random_calls: list[
tuple[Callable[..., object], tuple[object, ...], dict[str, object]]
] = []
self.random_values_var: Any = None
# Bytecode to insert right before we call the graph
self.pregraph_bytecode: list[Instruction] = []
# Use to pass values to backward hooks when using compiled autograd
self.backward_state: dict[str, VariableTracker] = {}
self.backward_state_proxy: Optional[torch.fx.Proxy] = None
self.backward_state_var: Optional[str] = None
# pyrefly: ignore [bad-override]
self.name_of_builtins_dict_key_in_fglobals: str = (
self.install_builtins_dict_in_fglobals()
)
self.compiler_trace_stack = contextlib.ExitStack()
# These are the ambient, currently-global saved_tensor_hooks stashed in autograd,
# that are set for the entire duration of the compiled region.
# This is an invariant today because we graph break on the saved_tensor_hook
# context manager inside a compiled region
self.saved_tensors_hooks_subgraph_names: Optional[list[str]] = (
self.maybe_install_saved_tensors_hooks_subgraphs()
)
# mangled alias -> module fqn name
self.import_sources: dict[str, str] = {}
self.export_metadata = ExportMetaData()
# Set of inlined unspecialized modules names to generate the
# dynamo_flat_name_to_original_fqn mapping.
self.used_inlined_inbuilt_modules_names: OrderedSet[str] = OrderedSet()
def mark_bytecode_tracing_start(self) -> None:
self.compiler_trace_stack.enter_context(
dynamo_timed(
"bytecode_tracing",
log_pt2_compile_event=True,
)
)
def mark_bytecode_tracing_stop(self) -> None:
self.compiler_trace_stack.close()
def install_builtins_dict_in_fglobals(self) -> str:
f_builtins = get_builtins_dict(self.global_scope)
return self.install_global("__builtins_dict__", f_builtins)
def add_backward_state_hook(
self, hook: VariableTracker, prefix: str = "hook"
) -> tuple[str, torch.fx.Proxy]:
name = f"{prefix}{len(self.backward_state)}"
assert name not in self.backward_state
self.backward_state[name] = hook
return name, self.get_backward_state_proxy()
def get_backward_state_proxy(self) -> torch.fx.Proxy:
if self.backward_state_proxy is None:
if self.export:
unimplemented(
gb_type="backward_state does not support export",
context="",
explanation="Compiled autograd doesn't work with `torch.export`.",
hints=[],
)
example_value = BackwardState()
self.backward_state_proxy = self.root_tracer.create_graph_input(
"dynamo_backward_state",
type(example_value),
example_value,
source=BackwardStateSource(),
)
self.backward_state_proxy.node.meta["grapharg"] = BackwardStateGraphArg()
self.backward_state_var = self.new_var()
return self.backward_state_proxy
# This gets its own helper function so guards DEBUG logs are more informative
def init_ambient_guards(self) -> None:
# Register a SHAPE_ENV guard to make sure we setup shape guards
# that show up in ShapeEnv
self.guards.add(ShapeEnvSource().make_guard(GuardBuilder.SHAPE_ENV))
self.guards.add(
GlobalStateSource().make_guard(GuardBuilder.DETERMINISTIC_ALGORITHMS)
)
self.guards.add(GlobalStateSource().make_guard(GuardBuilder.GRAD_MODE))
self.guards.add(GlobalStateSource().make_guard(GuardBuilder.DEFAULT_DEVICE))
self.guards.add(GlobalStateSource().make_guard(GuardBuilder.GLOBAL_STATE))
self.guards.add(
GlobalStateSource().make_guard(GuardBuilder.TORCH_FUNCTION_STATE)
)
ci = torch._C._functorch.peek_interpreter_stack()
if ci is not None:
self.guards.add(
GlobalStateSource().make_guard(GuardBuilder.FUNCTORCH_STACK_MATCH)
)
if not torch._dynamo.compiled_autograd.in_compiled_autograd_region:
self.guards.add(
GlobalStateSource().make_guard(
GuardBuilder.AUTOGRAD_SAVED_TENSORS_HOOKS
)
)
def maybe_install_saved_tensors_hooks_subgraphs(self) -> Optional[list[str]]:
if torch._dynamo.compiled_autograd.in_compiled_autograd_region:
return None
get_hooks = torch._functorch._aot_autograd.utils.top_saved_tensors_hooks
are_inline_hooks = (
torch._functorch._aot_autograd.utils.saved_tensors_hooks_are_inlineable
)
hooks = get_hooks()
if not are_inline_hooks(hooks):
return None
# If GraphModule provided by user contains fx.wrap,
# We can only rely on user provided cache hash in this case.
# If user did not provide cache hash - then we always bypass cache.
pack_gm, unpack_gm = hooks
pack_subgraph_name = self.install_subgraph(
"saved_tensors_hooks_pack",
torch.fx.GraphModule(self.nn_modules, pack_gm.graph),
)
unpack_subgraph_name = self.install_subgraph(
"saved_tensors_hooks_unpack",
torch.fx.GraphModule(self.nn_modules, unpack_gm.graph),
)
assert pack_subgraph_name == "saved_tensors_hooks_pack_0"
assert unpack_subgraph_name == "saved_tensors_hooks_unpack_0"
return [pack_subgraph_name, unpack_subgraph_name]
def synthetic_graph_input(
self, fn: Callable[..., Any], args: tuple[Any, ...]
) -> VariableTracker:
"""
call fn(*args) before the graph runs and turn the result into a fake input.
"""
example_value = fn(*args)
varname = self.new_var()
cg = PyCodegen(self.root_tx)
cg.add_push_null(
lambda: cg.load_import_from(
fn.__module__,
fn.__name__,
)
)
cg.foreach(map(variables.ConstantVariable.create, args))
cg.call_function(len(args), False)
cg.store(varname)
self.pregraph_bytecode.extend(cg.get_instructions())
source = SyntheticLocalSource(varname)
result = VariableTracker.build(self.root_tx, example_value, source)
# Realize the VT because we will delete the guards on it in the next line.
result = result.realize()
TracingContext.get().guards_context.dynamo_guards.remove_guards_with_source(
source
)
return result
def add_cleanup_hook(self, fn: Callable[[], Any]) -> None:
self.cleanup_hooks.append(fn)
def call_cleanup_hooks(self) -> None:
for hook in reversed(self.cleanup_hooks):
hook()
self.cleanup_hooks.clear()
@property
def root_tracer(self) -> "SubgraphTracer":
return self.tracers[0]
@property
def current_tracer(self) -> "SubgraphTracer":
return self.tracers[-1]
def is_root_tracer(self) -> bool:
# Helper to tell if we are inside the higher order operator tracing.
return len(self.tracers) == 1
@property
def graph(self) -> torch.fx.Graph:
return self.current_tracer.graph
# TODO(rzou): can delete after we refactor speculate_subgraph to use nested GraphTracer.
@graph.setter
def graph(self, value: torch.fx.Graph) -> None:
self.current_tracer.graph = value
@property
def input_name_to_proxy(self) -> dict[str, fx.Proxy]:
return self.current_tracer.input_name_to_proxy
@property
def real_value_cache(self) -> dict[fx.Node, torch.Tensor]:
return self.current_tracer.real_value_cache
@property
def bound_symbols(self) -> dict[sympy.Symbol, Union[torch.fx.Proxy, "LazyProxy"]]:
return self.current_tracer.bound_symbols
# If you are here, and you're looking for create_graph_input,
# to avoid ambiguity, please call one of the following:
# - self.current_tracer.create_graph_input
# - self.root_tracer.create_graph_input
# See NOTE [HigherOrderOperator tracing design] for more context.
def create_proxy(self, *args: Any, **kwargs: Any) -> torch.fx.Proxy:
return self.current_tracer.create_proxy(*args, **kwargs)
def create_node(self, *args: Any, **kwargs: Any) -> torch.fx.Node:
return self.current_tracer.create_node(*args, **kwargs)
def remove_node(self, *args: Any, **kwargs: Any) -> None:
return self.current_tracer.remove_node(*args, **kwargs)
@contextlib.contextmanager
def subtracer(
self, source_target: Optional[Target], prior_tracer: "SubgraphTracer"
) -> Generator[fx.Tracer, None, None]:
new_scope_ctx = enter_new_scope()
try:
if prior_tracer:
# Lineage MUST stay preserved
assert prior_tracer.parent is self.current_tracer
new_scope_ctx.__enter__()
tracer = (
prior_tracer
if prior_tracer
else SubgraphTracer(
self,
parent=self.current_tracer,
source_target=source_target,
is_export=self.current_tracer.is_export,
)
)
self.tracers.append(tracer)
yield tracer
finally:
new_scope_ctx.__exit__(None, None, None)
self.tracers.pop()
@property
def output(self) -> "OutputGraph":
return self
@property
def fake_mode(self) -> torch._subclasses.FakeTensorMode:
assert self.tracing_context.fake_mode is not None
return self.tracing_context.fake_mode
@property
def shape_env(self) -> ShapeEnv:
assert self.tracing_context.fake_mode is not None
assert self.tracing_context.fake_mode.shape_env is not None
return self.tracing_context.fake_mode.shape_env
@property
def guards(self) -> torch._guards.GuardsSet:
return self.tracing_context.guards_context.dynamo_guards
@property
def nn_modules(self) -> dict[str, Any]:
return self.tracing_context.module_context.nn_modules
@property
def aotautograd_guards(self) -> list[torch._guards.GuardEnvExpr]:
return self.tracing_context.guards_context.aotautograd_guards
def save_global_state(
self, out: Optional[dict[str, tuple[Callable[..., Any], bool]]] = None
) -> None:
"""
Saves to out if it is provided. Else saves to the tracing context's global_state.
"""
global_state = cast(
dict[str, tuple[Callable[..., Any], bool]],
(
out
if out is not None
else self.tracing_context.global_context.global_state
),
)
global_state["grad_enabled"] = (torch.set_grad_enabled, torch.is_grad_enabled())
global_state["autocast_enabled"] = (
functools.partial(torch.set_autocast_enabled, "cuda"),
torch.is_autocast_enabled("cuda"),
)
global_state["autocast_cpu_enabled"] = (
functools.partial(torch.set_autocast_enabled, "cpu"),
torch.is_autocast_enabled("cpu"),
)
global_state["autocast_gpu_dtype"] = ( # type:ignore[assignment]
functools.partial(torch.set_autocast_dtype, "cuda"),
torch.get_autocast_dtype("cuda"),
)
global_state["autocast_cpu_dtype"] = ( # type:ignore[assignment]
functools.partial(torch.set_autocast_dtype, "cpu"),
torch.get_autocast_dtype("cpu"),
)
global_state["autocast_cache_enabled"] = (
torch.set_autocast_cache_enabled,
torch.is_autocast_cache_enabled(),
)
def push_tx(self, tx: "InstructionTranslatorBase") -> None:
self._current_tx.append(tx)
def pop_tx(self) -> "InstructionTranslatorBase":
return self._current_tx.pop()
@property
def current_tx(self) -> "InstructionTranslatorBase":
return self.root_tx if not self._current_tx else self._current_tx[-1]
def count_calls(self) -> int:
return count_calls(self.graph)
def is_empty_graph(self) -> bool:
return len(list(self.graph.nodes)) == 0
def has_outputs(self) -> bool:
return len([x for x in self.graph.nodes if x.op == "output"]) > 0
def get_submodule(self, keys: str) -> Union[torch.nn.Module, Any]:
assert keys
obj: Union[torch.nn.Module, dict[str, torch.nn.Module]] = self.nn_modules
for k in keys.split("."):
if isinstance(obj, dict):
obj = obj[k]
else:
obj = getattr(obj, k)
return obj
def new_var(self, name: str = "tmp") -> str:
existing = set(self.code_options["co_varnames"])
# In common case, this will be O(1)
while True:
var = f"{name}_{next(self.unique_var_id)}"
if var not in existing:
self.code_options["co_varnames"] += (var,)
return var
def update_co_names(self, name: str) -> None:
"""Ensure self.code_options.co_names contains name"""
if name not in self.code_options["co_names"]:
self.code_options["co_names"] += (name,)
@staticmethod
def module_key_name(*names: Any) -> str:
# create a new unique name
name = "_".join(map(str, names))
# Strip _buffers[..]/_parameters[..]/_modules[..] names
name = re.sub(
r"\._(?:modules|parameters|buffers)\[(['\"])([^'\"\]]+)\1\]", r".\2", name
)
# Replace getattr(a, b) with a.b
name = re.sub(
r"getattr\(\s*([^,]+?)\s*,\s*(['\"])([^'\"]+)\2\s*\)", r"\1.\3", name
)
# Strip the guard lookup L/G access
name = re.sub(r"^[GL]\['?(.*?)'?\]$", r"\1", name)
# e.g. replace abc.xyz[123].qkv with abc.xyz_123.qkv
name = re.sub(r"\[(\d+)\]", r"_\g<1>", name)
# e.g. replace abc.xyz_123.qkv with abc_xyz_123_qkv
name = re.sub(r"[^a-zA-Z0-9]", "_", name)
if not name or not name[0].isalpha():
name = "sub" + name
return name
def register_static_attr_and_return_proxy(
self, attr_prefix: str, attr_value: Any
) -> fx.Proxy:
# Check if the module already exists, if it does, return the already
# added proxy. This is important for executorch tests.
if isinstance(attr_value, torch.nn.Module):
for name, mod in self.nn_modules.items():
if mod is attr_value:
proxy = self.create_proxy("get_attr", name, (), {})
return proxy
attr_name = get_unique_name_wrt(attr_prefix, self.nn_modules)
# TODO `nn_modules` has been historically overloaded to store a lot more
# than just nn module objects, fix that.
self.nn_modules[attr_name] = attr_value
proxy = self.create_proxy("get_attr", attr_name, (), {})
set_example_value(proxy.node, attr_value)
return proxy
def register_attr_or_module(
self,
target: Union[torch.nn.Module, torch.Tensor, Any],
*names: Any,
**options: Any,
) -> VariableTracker:
if is_dynamic_nn_module(target, self.export):
# Instead of returning UnspecializedNNModuleVariable, call
# VariableTracker.build so that it is tracked for mutation.
return VariableTracker.build(self.current_tx, target, **options)
options = dict(options)
assert "source" in options
source = options["source"]
assert not isinstance(source, ParamBufferSource)
if isinstance(target, torch.Tensor):
tracer = self.current_tracer
if not self.is_root_tracer():
# For higher order ops, we don't want to insert the get_attr in
# innermost graph. Instead, we want to raise the params/buffers
# as inputs to the higher-order graph, and register them as
# get_attrs in the root tracer.
# Note that Dynamo will still call lift_tracked_freevar_to_input
# when these inputs are encountered for the inner graph. The
# only difference is what happens at the root tracer for
# nn.Parameters vs free inputs. The free inputs are registered
# as placeholders in the root graph, whereas the nn.Parameters
# are registered as get_attr nodes in the root graph.
tracer = self.root_tracer
def wrap_name(module_key: str) -> VariableTracker:
assert self.param_name_to_source is not None
self.param_name_to_source[module_key] = source
# Check if the attr has already been registered. This can happen
# when two different sources point to the same tensor.
assert self.root_tx is not None
if target in self.root_tx.output.side_effects:
return self.root_tx.output.side_effects[target]
if get_static_address_type(target) == "guarded" and not isinstance(
source, NumpyTensorSource
):
install_guard(source.make_guard(GuardBuilder.ID_MATCH))
elif not is_constant_source(source):
install_guard(source.make_guard(GuardBuilder.TENSOR_MATCH))
vt = wrap_fx_proxy(
self.root_tx,
tracer.create_proxy("get_attr", module_key, (), {}),
example_value=target,
**options,
)
# Track the object so to avoid duplicate registration in case of
# different sources pointing to the same tensor object.
vt = self.root_tx.output.side_effects.track_object_existing(target, vt)
assert "tensor_dict" not in vt.as_proxy().node.meta
# pyrefly: ignore [bad-argument-type]
vt.as_proxy().node.meta["tensor_dict"] = _extract_tensor_dict(target)
return vt
elif isinstance(target, torch.nn.Module):
assert isinstance(target, torch.nn.Module)
if source:
install_guard(source.make_guard(GuardBuilder.NN_MODULE))
def wrap_name(module_key: str) -> VariableTracker:
# pyrefly: ignore [bad-argument-type]
return NNModuleVariable(type(target), module_key, target, **options)
else:
# This is Dynamo created graph module, e.g., graph module coming
# from higher order ops. NNModuleVariable tracker can't be
# sourceless, so let's return a unspecializedNNModule variable
# tracker.
def wrap_name(module_key: str) -> VariableTracker:
return variables.UnspecializedNNModuleVariable(target, **options)
elif isinstance(target, (torch.SymInt, torch.SymFloat)):
# HACKY CODE REGION BEGIN
# WE ARE PIGGYBACKING ON EXISTING INFRA TO REGISTER ATTRS
# This ultimately gets written to self.nn_modules, which is unfortunate
# Attrs that are tenors and symints and such need to be migrated to have their
# own storage
# alas, this is like this for now
def wrap_name(module_key: str) -> VariableTracker:
return SymNodeVariable.create(
self,
self.create_proxy("get_attr", module_key, (), {}),
sym_num=target,
**options,
)
# HACKY CODE REGION END
else:
def wrap_name(module_key: str) -> VariableTracker:
self.output.update_co_names(module_key)
self.global_scope[module_key] = target
return VariableTracker.build(
self, # type: ignore[arg-type]
target,
ConstantSource(source_name=module_key),
)
for k, v in self.nn_modules.items():
if v is target:
# it already exists
return wrap_name(k)
name = OutputGraph.module_key_name(*names)
name = get_unique_name_wrt(name, self.nn_modules, self.global_scope)
self.nn_modules[name] = target
if isinstance(target, torch.nn.Module):
def register_leaf_name(leaf_name: str) -> None:
assert self.param_name_to_source is not None
new_source = ParamBufferSource(source, leaf_name)
new_name = f"{name}.{leaf_name}"
self.param_name_to_source[new_name] = new_source
if isinstance(source, LocalSource):
self.dynamo_flat_name_to_original_fqn[
OutputGraph.module_key_name(new_source.name())
] = leaf_name
# annoying, but there are cases when we do not have parameters
# see test_nn_moduledict_contains
if hasattr(target, "_parameters"):
for leaf_name, _ in target.named_parameters():
register_leaf_name(leaf_name)
if hasattr(target, "_buffers"):
for leaf_name, _ in target.named_buffers():
register_leaf_name(leaf_name)
return wrap_name(name)
def handle_aliases_for_stolen_lists(
self, tx: "InstructionTranslatorBase"
) -> tuple[list[Instruction], dict[Source, Source]]:
# If list inputs are stolen, but still needed after the function call, create aliases to keep them alive
maybe_gm = self.local_scope.get("self")
stolen_list_names = get_locals_to_steal(maybe_gm)
if not stolen_list_names:
return [], {}
alias_insts = []
needs_alias: dict[str, list[VariableTracker]] = {}
queue = [
*tx.stack,
*tx.symbolic_locals.values(),
*self.side_effects.store_attr_mutations.keys(),
]
while queue:
x = queue.pop()
if isinstance(x, BaseListVariable):
assert isinstance(x.items, list)
queue += x.items
continue
if not (
(
x not in self.side_effects.store_attr_mutations
or isinstance(x.mutation_type, AttributeMutationExisting)
)
and isinstance(x.source, GetItemSource)
and isinstance(x.source.base, LocalSource)
and x.source.base.local_name in stolen_list_names
):
continue
stolen_name = x.source.base.local_name
if stolen_name not in needs_alias:
needs_alias[stolen_name] = []
needs_alias[stolen_name].append(x)
visited = {}
overridden_sources: dict[Source, Source] = {}
for arg in self.graphargs:
if not (
isinstance(arg._example, list)
and isinstance(arg.source, LocalSource)
and arg.source.local_name in needs_alias
):
continue
# arg is a list that will be cleared by the compiled function
list_name = arg.source.local_name
assert list_name in self.code_options["co_varnames"]
for x in needs_alias[list_name]:
# Skip if already handled.
if x.source in overridden_sources:
continue
# A small codegen optimization because we might have different
# VariableTrackers that share the same source.
assert x.source is not None
list_idx = x.source.index # type: ignore[attr-defined]
if list_idx not in visited:
alias_name = self.new_var(
f"{list_name}_ref"
) # self.new_var already adds unique id suffix
visited[list_idx] = alias_name
# bytecode of `alias_name = list_name[list_idx]`
alias_insts.extend(
[
create_instruction("LOAD_FAST", argval=list_name),
create_load_const(list_idx),
create_binary_subscr(),
create_instruction("STORE_FAST", argval=alias_name),
]
)
# operate on alias, handled by suffix codegen
assert x.source is not None
old_source = x.source
overridden_sources[old_source] = LocalSource(visited[list_idx])
# NOTE: we need `overridden_sources` because (1) we want to codegen for
# these list items to use the new local source, but (2) we want to avoid
# updating `source` in place because that might break invariants in
# other parts of Dynamo like guards.
return alias_insts, overridden_sources
def _get_stack_values_to_restore(
self, tx: "InstructionTranslatorBase", stack_pops: int
) -> tuple[list[VariableTracker], StackLocalsMetadata]:
"""
Gets the stack + locals values belonging to tx that need to be restored.
Also prunes dead tx locals and realizes all VTs in the tx's stack.
NullVariables in stack/locals will NOT be restored, unless they are the top `stack_pops`
elements of the stack - it is expected that the next instruction to run will pop the top
`stack_pops` elements of the stack, so we should codegen NULLs.
Returns:
- stack_values: stack and locals values that need to be restored
- meta: locations of NULLs and ContextWrappingVariables in the stack/locals
(ignores the top `stack_pops` values on the stack)
"""
tx.prune_dead_locals()
stack_values = []
meta = StackLocalsMetadata()
# realize any unrealized tensor VTs in case they
# need to be added to self.nn_modules as attributes
for i, value in enumerate(tx.stack):
variables.LazyVariableTracker.realize_all(value)
# ignore top `stack_pops` values on the stack
if len(tx.stack) - i <= stack_pops:
stack_values.append(value)
continue
if isinstance(value, NullVariable):
meta.stack_null_idxes.append(i)
else:
stack_values.append(value)
if isinstance(value, ContextWrappingVariable):
target_values = (
() if value.target_values is None else tuple(value.target_values)
)
# NOTE: track index in stack after NULLs have been removed
meta.stack_ctx_args.append((len(stack_values) - 1, target_values))
meta.stack_ctx_idxes_orig.append(i)
meta.num_stack = len(stack_values)
cell_and_freevars = set(tx.cellvars() + tx.freevars())
# NB: Typically (i.e., for graph compile from RETURN_VALUE),
# symbolic_locals will be empty at this point, as prune_dead_locals
# will clear out all of symbolic_locals because RETURN_VALUE is the
# last instruction and no more locals are used. The fanciness here
# is only needed for partial graphs.
# NOTE: All cell and free variables are represented as CellVariable,
# so checks for NULLs and context managers in the case of codegen'ing resume
# functions will not be performed on them. This is expected behavior.
for k, v in tx.symbolic_locals.items():
# Note! this explicitly uses .local_name for matching
# Failure to do so will cause spurious registrations in val_to_names.
# This will in turn result in spurious variables showing up in the graph.
# This was very tricky to debug. For an example, dump the graph at call_user_compiler
# while running test_subgraphs.py
# Do not include top-frame unmodified locals here - otherwise, the compiled graph may
# erroneously include them as part of the return. We manually codegen them afterward.
if (
isinstance(v.source, LocalSource)
and v.source.local_name == k
and tx is self.root_tx
):
continue
# Do not load cell/free vars
if k in cell_and_freevars:
continue
# Do not load variable if it is NULL.
if sys.version_info >= (3, 12):
# NOTE: do not use isinstance, since it realizes lazy VT's
# Continuation function will load the NULL for v.
if type.__instancecheck__(NullVariable, v):
meta.locals_null_keys.append(k)
continue
else:
# A variable should never be NULL in < 3.12
assert not type.__instancecheck__(NullVariable, v)
meta.locals_names[k] = len(meta.locals_names)
if isinstance(v, ContextWrappingVariable):
target_values = (
() if v.target_values is None else tuple(v.target_values)
)
meta.locals_ctx_args.append((k, target_values))
stack_values.append(v)
return stack_values, meta
def compile_subgraph(
self,
tx: "InstructionTranslatorBase",
reason: GraphCompileReason,
partial_convert: bool = False,
stack_pops: int = 0,
) -> list[StackLocalsMetadata]:
"""
Compiles the current subgraph, with inputs w.r.t. self.root_tx, and codegens:
- Call the compiled subgraph
- Apply side effects
- Codegen stack and locals
- Store the locals
Python does not allow NULL to be an arg to a function, so we do not codegen NULLs on the stack,
unless the value is one of the top `stack_pops` values on the stack (these values are expected to be
popped immediately after this generated code. The prologue of the resume function is expected to restore
any dropped NULLs.
Returns stack indices and locals keys where we dropped NULLs, and where we found inactive context manager objects.
"""
assert self.root_tx is not None
if not config.nested_graph_breaks:
# expect to only compile 1 frame
assert self.root_tx is tx
# bytecode tracing has finished. Pop the context manager for dynamo_timed
self.mark_bytecode_tracing_stop()
self.partial_convert = partial_convert
self.compile_subgraph_reason = reason
self.should_exit = True
log.debug("COMPILING GRAPH due to %s", reason)
# prefix instructions (Python 3.11+)
prefix_insts: list[Instruction] = []
if sys.version_info >= (3, 11):
for inst in self.root_tx.prefix_insts:
if inst.opname == "COPY_FREE_VARS":
prefix_insts.append(
create_instruction(
"COPY_FREE_VARS",
arg=len(self.root_tx.code_options["co_freevars"]),
)
)
else:
prefix_insts.append(copy.copy(inst))
# stack values and restore vars for each frame are pushed in reverse order
# i.e. last element corresponds to root frame (1),
# first element corresponds to current frame (N)
all_stack_values = []
all_stack_locals_metas = []
cur_tx: Optional[InstructionTranslatorBase] = tx
while cur_tx is not None:
# this should have been checked by the caller
assert all(block.can_restore() for block in cur_tx.block_stack)
stack_values, meta = self._get_stack_values_to_restore(
cur_tx, stack_pops if cur_tx is tx else 0
)
all_stack_values.append(stack_values)
all_stack_locals_metas.append(meta)
# Exit from all context manager variables to make sure global state is restored
for block in reversed(cur_tx.block_stack):
block.exit(cur_tx, is_graph_break=reason.graph_break)
cur_tx = cur_tx.parent
# "Garbage collect the heap".
self.side_effects.prune_dead_object_new(tx)
self.add_output_instructions(prefix_insts)
assert not (self.pregraph_bytecode and self.export), (
"export does not support pregraph_bytecode"
)
self.add_output_instructions(self.pregraph_bytecode)
alias_insts, overridden_sources = self.handle_aliases_for_stolen_lists(
self.root_tx
)
self.add_output_instructions(alias_insts)
self.cleanup_graph()
# Use nn.Module "proxies" in the constructed GraphModule so that
# the resulting GM does not hold additional strong references to the original modules.
# This prevents a strong ref cycle where Dynamo created code holds on to references
# to modules that also have Dynamo code cache invalidation checks.
# When cache invalidation runs, the generated GM will be invalidated, which also deletes
# the proxies.
nn_modules_proxies = {
name: nn_module_proxy(mod) for name, mod in self.nn_modules.items()
}
root = FakeRootModule(nn_modules_proxies)
from .decorators import disable
# to handle random calls
if len(self.random_calls) > 0:
random_calls_instructions = []
self.random_values_var = self.new_var("random_values")
rand_fn = disable(
_get_gen_rand_values_fn(self.random_calls),
reason="do not trace into Dynamo rng recovery function",
)
rand_fn_name = self.install_global("__gen_rand_values", rand_fn)
codegen = PyCodegen(
self.root_tx, root, overridden_sources=overridden_sources
)
random_calls_instructions.extend(
codegen.load_function_name(rand_fn_name, True)
)
random_calls_instructions.extend(create_call_function(0, False))
random_calls_instructions.append(
codegen.create_store(self.random_values_var),
)
self.add_output_instructions(random_calls_instructions)
# Codegen stack convention before the unsupported instruction
# NOTE: in these comment blocks, "locals" EXCLUDE free and cell vars.
# NOTE: stack/locals/cells must be codegen'd BEFORE the unsupported instruction, since the latter
# can arbitrarily mutate the former.
# [frame N cells, .., frame 1 cells],
# [
# frame N locals,
# frame N-1 stack + locals,
# ...,
# frame 1 stack + locals,
# ], frame N stack
# see symbolic_convert.py for
# codegen stack convention after the unsupported instruction
# NOTE: cells will be loaded into continuation functions directly by symbolic_convert
# this determines the order that values are codegen'd to the stack
stack_values_flat = [val for vals in all_stack_values for val in vals]
stored_graph_output_var = False
graph_output_var = None
# call compiled fx graph and codegen all values - stack and locals
if (
self.root_tx is tx # single frame
and stack_values_flat
and all(
not isinstance(
v,
(
UnspecializedPythonVariable,
NumpyNdarrayVariable,
TensorWithTFOverrideVariable,
),
)
and not (isinstance(v, SymNodeVariable) and v.python_type() is float)
for v in stack_values_flat
)
and all(isinstance(x, TensorVariable) for x in stack_values_flat)
and len(set(stack_values_flat)) == len(stack_values_flat)
and self.side_effects.is_empty()
and not tx.debug_locals
and not self.backward_state
and not all_stack_locals_metas[-1].stack_null_idxes
and not all_stack_locals_metas[-1].locals_null_keys
):
# optimization to generate better code in a common case
# codegen cells
# no side effects, so no new cells created - no need to call side_effects.codegen_save_tempvars
cell_cg = PyCodegen(self.root_tx)
self.codegen_cells(tx, cell_cg)
self.add_output_instructions(
[
# load in reverse since UNPACK_SEQUENCE will reverse
*self.compile_and_call_fx_graph(
tx, list(reversed(stack_values_flat)), root
),
*cell_cg.get_instructions(),
*create_swap(2),
create_instruction("UNPACK_SEQUENCE", arg=len(stack_values_flat)),
]
)
# function output will be moved to the correct places below
else:
graph_output_var = self.new_var("graph_out")
# load stack values in a flat manner - we will codegen bytecode to place them correctly
# according to our convention above
pass1 = PyCodegen(
self.root_tx,
root,
graph_output_var,
overridden_sources=overridden_sources,
)
self.codegen_suffix(tx, stack_values_flat, pass1)
# Use `pass1.uses` to selectively cache multi-user variables into a
# temporary local source. This (a). speeds up loading VTs with long
# chained source, and (b). avoids redundantly saving single-user VT
# into a temporary local.
tempvars = {} # type: ignore[var-annotated]
for val, count in pass1.uses.items():
# If it's already a local source, no need to cache it
if count > 1 and not istype(val, (SyntheticLocalSource, LocalSource)):
tempvars[val] = None
pass2 = PyCodegen(
self.root_tx,
root,
graph_output_var,
tempvars=tempvars,
overridden_sources=overridden_sources,
)
self.codegen_suffix(tx, stack_values_flat, pass2)
if (
torch._dynamo.config.log_graph_in_out_metadata
and stack_values_flat
and len(stack_values_flat) == 1
):
vt = stack_values_flat[0]
if (
isinstance(vt, torch._dynamo.variables.NamedTupleVariable)
and vt.tuple_cls
is torch._dynamo.functional_export.ExportTracerOutput
):
flat_returns = vt.items[0]
out_spec = vt.items[1]
assert isinstance(
flat_returns, torch._dynamo.variables.ListVariable
)
vt_to_graph_out_idx: dict[VariableTracker, int] = {}
for value in pass2.graph_outputs.values():
assert isinstance(value, torch._dynamo.codegen.GraphOutputEntry)
variable: VariableTracker = value.variable
vt_to_graph_out_idx[variable] = value.index
for idx, vt in enumerate(flat_returns.items):
if vt in vt_to_graph_out_idx:
self.export_metadata.output_return_type[idx] = (
"graph_out",
vt_to_graph_out_idx[vt],
)
elif (
vt.source is not None
and (source := getattr(vt.source, "base", None)) # type: ignore[assignment]
and source.is_input
):
self.export_metadata.output_return_type[idx] = (
"input",
vt.source,
)
elif isinstance(vt, torch._dynamo.variables.ConstantVariable):
self.export_metadata.output_return_type[idx] = (
"constant",
vt.as_python_constant(),
)
else:
assert f"Encountered unrecognized type {vt} at output {idx}" # noqa: PLW0129
self.export_metadata.out_spec = out_spec.as_python_constant()
output = []
if count_calls(self.graph) != 0 or len(pass2.graph_outputs) != 0:
output.extend(
self.compile_and_call_fx_graph(tx, pass2.graph_output_vars(), root)
)
if len(pass2.graph_outputs) != 0:
output.append(pass2.create_store(graph_output_var))
stored_graph_output_var = True
else:
output.append(create_instruction("POP_TOP"))
else:
# NB: Important to run compiler collective even when there is
# a graph break
self.run_compiler_collective()
self.add_output_instructions(output + pass2.get_instructions())
# store all stack and locals for each frame
# current state of the stack:
# all cells,
# *(frame N stack), *(frame N locals),
# ...,
# *(frame 1 stack), *(frame 1 locals)
self.add_output_instructions(
[
create_instruction(
"BUILD_LIST",
arg=len(stack_values_flat) - all_stack_locals_metas[0].num_stack,
),
]
)
# current state of the stack:
# all cells,
# *(frame N stack), [
# *(frame N locals),
# *(frame N-1 stack), *(frame N-1 locals),
# ...
# *(frame 1 stack), *(frame 1 locals),
# ]
# iterate current frame (N) to root frame (1)
# sliding window over frame stack/locals
start_idx = 0
end_idx = 0
for i, meta in enumerate(all_stack_locals_metas):
# do not pack frame N's stack into the value list
n_vals = len(meta.locals_names)
if i != 0:
n_vals += meta.num_stack
if n_vals == 0:
self.add_output_instructions(
[
create_instruction("BUILD_LIST", arg=0),
*create_swap(2),
]
)
# [], stack_values_flat
else:
end_idx += n_vals
self.add_output_instructions(
[
create_dup_top(),
*create_binary_slice(start_idx, end_idx),
*create_swap(2),
]
)
start_idx += n_vals
# stack_values_flat[x:y], stack_values_flat
# add root frame's unmodified locals here
if i == len(all_stack_locals_metas) - 1:
root_cg = PyCodegen(self.root_tx)
unmodified_locals_names: dict[str, int] = {}
for k, v in self.root_tx.symbolic_locals.items():
if isinstance(v.source, LocalSource) and v.source.local_name == k:
root_cg.append_output(root_cg.create_load(k))
unmodified_locals_names[k] = len(meta.locals_names) + len(
unmodified_locals_names
)
self.add_output_instructions(
root_cg.get_instructions()
+ [
create_instruction(
"BUILD_LIST", arg=len(unmodified_locals_names)
),
# arg=2 because we already swapped the locals list back
create_instruction("LIST_EXTEND", arg=2),
]
)
meta.locals_names.update(unmodified_locals_names)
# *(frame N stack), metas[0] stack + locals, ..., metas[i] stack + locals, stack_values_flat
# current state of the stack:
# all cells,
# *(frame N stack),
# frame N locals,
# frame N-1 stack, frame N-1 locals,
# ...
# frame 1 stack, frame 1 locals,
# stack_values_flat
#
self.add_output_instructions(
[
create_instruction("POP_TOP"),
create_instruction("BUILD_LIST", arg=len(all_stack_locals_metas)),
*create_rot_n(all_stack_locals_metas[0].num_stack + 1),
]
)
# final state of the stack before running the unsupported bytecode:
# all cells,
# [
# [frame N locals],
# [frame N-1 stack + locals],
# ...,
# [frame 1 stack + locals],
# ], *(frame N stack)
if graph_output_var and stored_graph_output_var:
self.add_output_instructions(
[create_instruction("DELETE_FAST", argval=graph_output_var)]
)
if torch._dynamo.config.side_effect_replay_policy in ["warn", "error"]:
from torch.export._trace import _ExportModuleSpecTrackerDict
potential_side_effects = []
for var in self.side_effects._get_modified_vars():
if hasattr(var, "mutation_type"):
mut_type = var.mutation_type
# Make sure to skip codegen specific mutations
if isinstance(
mut_type, (AttributeMutationExisting, ValueMutationExisting)
):
if isinstance(var, UserDefinedDictVariable) and isinstance(
var.value, _ExportModuleSpecTrackerDict
):
for k, v in var.items.items():
specs = {}
for k_spec, val in v.items.items():
specs[k_spec.vt.as_python_constant()] = (
val.as_python_constant()
)
assert ["in_spec", "out_spec"] == list(specs.keys())
self.export_metadata.module_call_spec[
k.vt.as_python_constant()
] = specs
# export uses tracepoint pass to dump submodule inp/out spec
# into global state, so we filter it here
if not (
isinstance(var, UserDefinedDictVariable)
and isinstance(var.value, _ExportModuleSpecTrackerDict)
):
potential_side_effects.append(var)
side_effect_refs = [
_get_source_debug_name(var.source) for var in potential_side_effects
]
if side_effect_refs:
if torch._dynamo.config.side_effect_replay_policy == "warn":
warnings.warn(
f"While compiling, we found certain side effects happened in the model.forward. "
f"Here are the list of potential sources you can double check: {side_effect_refs}"
)
else:
raise RuntimeError(
f"While compiling, we found certain side effects happened in the model.forward. "
f"Here are the list of potential sources you can double check: {side_effect_refs}"
)
return all_stack_locals_metas
def codegen_cells(self, tx: "InstructionTranslatorBase", cg: PyCodegen) -> None:
# no need to codegen if reason.graph_break is False (since we won't resume)
if self.compile_subgraph_reason.graph_break:
tx_cnt = 0
cur_tx: Optional[InstructionTranslatorBase] = tx
while cur_tx is not None:
# NOTE: we generate cells in the same order as resume_execution.py: sorted freevars + cellvars
# Emitting `LOAD_FAST/LOAD_CLOSURE` with names in `co_freevars`
# requires that in the generated bytecode, these cells would keep
# their original local names, which we ensure via
# `CellVariable.local_name`.
freevars = tuple(sorted(cur_tx.cell_and_freevars()))
for cell in freevars:
if cur_tx is self.root_tx: # root frame
cg.append_output(cg.create_load_closure(cell))
else: # nested frame
assert cur_tx.post_prune_cell_and_freevars
cg(cur_tx.post_prune_cell_and_freevars[cell])
cg.append_output(create_build_tuple(len(freevars)))
cur_tx = cur_tx.parent
tx_cnt += 1
cg.append_output(create_instruction("BUILD_LIST", arg=tx_cnt))
else:
cg.append_output(create_instruction("BUILD_LIST", arg=0))
def codegen_suffix(
self,
tx: "InstructionTranslatorBase",
stack_values: list[VariableTracker],
cg: PyCodegen,
) -> None:
# NOTE: `codegen_save_tempvars` must run first to update `source` fields
# for variables with `AttributeMutationNew`, as they don't implement
# `reconstruct` themselves.
self.side_effects.codegen_save_tempvars(cg)
if self.backward_state:
assert not self.export
for name, val in self.backward_state.items():
cg(val)
assert self.backward_state_var is not None
cg.append_output(cg.create_load(self.backward_state_var))
cg.store_attr(name)
if config.replay_side_effects:
self.side_effects.codegen_hooks(cg)
# TODO get debug_locals working for nested graph breaks
# Return variables used for logging at the end
for debug_var, args in tx.debug_locals:
cg.add_push_null(lambda: cg(debug_var))
for arg in args:
cg(arg)
cg.extend_output(create_call_function(len(args), False))
cg.extend_output([create_instruction("POP_TOP")])
# codegen cells before we apply side effects
self.codegen_cells(tx, cg)
cg.restore_stack(stack_values, value_from_source=not tx.export)
if config.replay_side_effects:
self.side_effects.codegen_update_mutated(cg)
def cleanup_graph(self) -> None:
"""
Remove "creation_timestamp" from node meta
Remove this pattern from the graph:
torch._C._set_grad_enabled(False)
torch._C._set_grad_enabled(True)
"""
assert self.should_exit
nodes = list(self.graph.nodes)
for node in nodes:
node.meta.pop("creation_timestamp", None)
grad_enabled = torch.is_grad_enabled()
for node1, node2 in itertools.pairwise(nodes):
if (
node1.target is torch._C._set_grad_enabled
and tuple(node1.args) == (not grad_enabled,)
and not node1._erased
):
grad_enabled = node1.args[0]
if (
node2.target is torch._C._set_grad_enabled
and tuple(node2.args) == (not grad_enabled,)
and not node2._erased
):
grad_enabled = node2.args[0]
self.graph.erase_node(node1)
self.graph.erase_node(node2)
def bypass_package(self, reason: str = "", **kwargs: Any) -> None:
"""
Do not save this output graph to the CompilePackage
"""
if not self.package:
return
if torch._dynamo.config.strict_precompile:
raise torch._dynamo.exc.PackageError(
"Detected a package bypass: %s", reason
)
log.warning("Detected a package bypass: %s", reason)
torch._logging.trace_structured(
"artifact",
metadata_fn=lambda: {
"name": "precompile_cache_bypass",
"encoding": "json",
},
payload_fn=lambda: {
# precede with underscore so it always appear first in JSON in tlparse
"_reason": reason,
**kwargs,
},
)
self.package.bypass_current_entry()
self.package = None
def get_graph_sizes_structured(self) -> dict[str, list[Union[int, str]]]:
ret: dict[str, list[Union[int, str]]] = {}
for node in self.graph.nodes:
example_value = node.meta.get("example_value", None)
if isinstance(example_value, torch._subclasses.FakeTensor):
size = example_value.size()
ret[node.name] = [s if isinstance(s, int) else repr(s) for s in size]
return ret
def get_graph_sizes(self, name: str) -> str:
graph_sizes_str = "TRACED GRAPH TENSOR SIZES\n"
graph_sizes_str += f"===== {name} =====\n"
for node in self.graph.nodes:
example_value = node.meta.get("example_value", None)
if isinstance(example_value, torch._subclasses.FakeTensor):
size = example_value.size()
graph_sizes_str += f"{node.name}: {tuple(size)}\n"
concrete_size = []
has_symint = False
for sz in size:
if isinstance(sz, int):
concrete_size.append(sz)
elif isinstance(sz, torch.SymInt):
has_symint = True
concrete_size.append(sz.node.hint)
else:
break
else:
if has_symint:
graph_sizes_str += (
f"{node.name} (concrete): {tuple(concrete_size)}\n"
)
return graph_sizes_str
@contextlib.contextmanager
def restore_global_state(self) -> Any:
"""
Momentarily restores the global state to what it was prior to tracing the current output
"""
prior_global_state = self.tracing_context.global_context.copy_graphstate()
current_global_state: dict[str, tuple[Any, bool]] = {}
self.save_global_state(out=current_global_state)
try:
# Set to state prior to tracing the graph
self.tracing_context.global_context.restore_graphstate(prior_global_state)
yield
finally:
# Reset to state at the current time (e.g. before calling the user compiler)
self.tracing_context.global_context.restore_graphstate(
GlobalContextCheckpointState(current_global_state)
)
def run_compiler_collective(self) -> None:
tx = self.root_tx
assert tx is not None
if (ds := tx.distributed_state) is not None and ds.all_states is None:
compile_pg = ds.compile_pg
log.info("compiler_collective %s", ds.local_state)
torch._logging.trace_structured(
"artifact",
metadata_fn=lambda: {
"name": "compiler_collective",
"encoding": "string",
},
payload_fn=lambda: ds.local_state.render(),
)
device_types = compile_pg._device_types
assert len(device_types) == 1, (
"Expect only one device type but got {}".format("+".join(device_types))
)
with (
get_interface_for_device(device_types.pop()).device( # type: ignore[attr-defined]
compile_pg.rank() % torch.accelerator.device_count()
),
dynamo_timed("compiler_collective", log_pt2_compile_event=True),
):
all_states: list[Any] = [None] * compile_pg.size()
dist.all_gather_object(all_states, ds.local_state, group=compile_pg)
ds.all_states = all_states
# Clear speculation log, because are tracing may diverge due to
# this information from the compiler collective
tx.speculation_log.clear()
raise exc.CompileCollectiveRestartAnalysis
def compile_and_call_fx_graph(
self,
tx: "InstructionTranslatorBase",
rv: list[VariableTracker],
root: FakeRootModule,
) -> list[Instruction]:
"""
Generate code from self.graph and return the Instruction()s to
call that generated code.
Code is generated w.r.t. self.root_tx.
tx is only used for preserving GraphModule metadata
"""
with torch._guards.TracingContext.clear_frame():
from .decorators import disable
assert self.should_exit
self.run_compiler_collective()
if count_calls(self.graph) == 0 and len(rv) == 0:
return []
name = unique_id("__compiled_fn", with_uuid=True)
assert isinstance(rv, list)
assert isinstance(root, FakeRootModule)
output_node = self.create_node(
"output",
"output",
(self.current_tracer.create_arg(tuple(x.as_proxy() for x in rv)),),
{},
)
sub_gms = self.dedup_pass()
root.add_nn_modules(sub_gms) # type: ignore[arg-type]
self.current_tracer._maybe_preserve_original_meta(tx, output_node)
if not config.do_not_emit_runtime_asserts:
# There is a rare scenario where codegen_suffix adds a new entry
# to self.nn_modules while `root` knows only about the
# nn_modules at the time of its creation. This causes failures
# while creating the graph module because self.graph and root
# are out of sync. This only happens for `get_attr` nodes, so
# here we clean up the get_attr nodes that are unused.
for attr in dir(root):
subgraph = getattr(root, attr)
if isinstance(subgraph, fx.GraphModule):
insert_deferred_runtime_asserts(
subgraph,
self.shape_env,
name,
export=self.export,
)
self.remove_unused_get_attr_nodes()
insert_deferred_runtime_asserts(
fx.GraphModule(root, self.graph),
self.shape_env,
name,
export=self.export,
)
# NB: deferred runtime asserts can keep graphargs live, so make sure
# those are inserted before pruning
self.remove_unused_graphargs()
ncalls = count_calls(self.graph)
counters["stats"]["calls_captured"] += ncalls
self.remove_tensorify_specialized_graphargs()
# free a bit of memory
self.real_value_cache.clear()
gm = _make_graph_module(root, self.graph)
# Saved tensors hooks are not used by the graph.
# GraphModule by default only copies used in the graph submodules.
# Copying them into the result graph manually.
if self.saved_tensors_hooks_subgraph_names:
for subgraph_name in self.saved_tensors_hooks_subgraph_names:
setattr(gm, subgraph_name, getattr(root, subgraph_name))
for register_finalizer in self.register_finalizer_fns:
register_finalizer(gm)
if next(gm.parameters(), None) is not None:
# If dynamo produces a graph with parameters, skip package stuff
# Bypass output graph
self.bypass_package(
"Graph contains named parameters: either inline_inbuilt_nn_modules=False or there are static addresses.",
inline_builtin_nn_modules=torch._dynamo.config.inline_inbuilt_nn_modules,
gm=gm.print_readable(
print_output=False, include_stride=True, include_device=True
),
)
if self.package is not None:
gm._backend_id = name
gm.compile_subgraph_reason = self.compile_subgraph_reason
gm.meta["dynamo_flat_name_to_original_fqn"] = (
self.dynamo_flat_name_to_original_fqn.copy()
)
gm.meta["dynamo_compile_id"] = self.dynamo_compile_id
gm.meta["backend_id"] = name
graph_code_log.debug(
"%s",
lazy_format_graph_code(
name, gm, include_stride=True, include_device=True, colored=True
),
)
torch._logging.trace_structured(
"dynamo_output_graph",
lambda: {"sizes": self.get_graph_sizes_structured()},
payload_fn=lambda: gm.print_readable(
print_output=False, include_stride=True, include_device=True
),
)
self.call_cleanup_hooks()
old_fake_mode = self.tracing_context.fake_mode
assert old_fake_mode is not None
if not self.export:
import torch._functorch.config as _config
with _config.patch(fake_tensor_allow_unsafe_data_ptr_access=False):
# TODO(voz): The way export uses gm, and fake tensors, is not supported with us resetting
# Why create a new FakeTensorMode?
#
# The reason this needs to be done is because when we do Dynamo tracing, fake
# tensors can have their metadata mutated. Thus, the fake tensor we allocated
# for any given tensor may no longer be valid for the beginning trace of the
# graph. Nor is it convenient to "clone" the input tensors before mutating them,
# since you have to preserve aliasing. So we just reconstruct the FakeTensorMode
# from scratch when we go to AOTAutograd. But the ShapeEnv must be preserved as
# Dynamo made decisions about what is dynamic or not / guards from the user code
# that is not in graph.
backend_fake_mode = torch._subclasses.FakeTensorMode(
shape_env=old_fake_mode.shape_env,
)
# TODO(voz): Ostensibly, this should be scoped and
# restore back to old_fake_mode, but doing so currently violates
# a lot of fake_tensor ownership assumptions and runs afoul of detect_fake_mode
self.tracing_context.fake_mode = backend_fake_mode
with self.restore_global_state():
compiled_fn = self.call_user_compiler(gm, self.example_inputs())
from torch.fx._lazy_graph_module import _LazyGraphModule
if isinstance(compiled_fn, _LazyGraphModule) or (
isinstance(getattr(compiled_fn, "__self__", None), _LazyGraphModule)
and compiled_fn.__name__ == "_lazy_forward" # type: ignore[attr-defined]
):
# Since dynamo will run the forward method for the GraphModule shortly
# anyways, it does not hurt to do the real recompilation here if
# this is a _LazyGraphModule. This makes it easier for dynamo to
# optimize a _LazyGraphModule.
lazy_gm = (
compiled_fn
if isinstance(compiled_fn, _LazyGraphModule)
else compiled_fn.__self__ # type: ignore[attr-defined]
)
_LazyGraphModule.force_recompile(lazy_gm)
if not isinstance(compiled_fn, _LazyGraphModule):
# replace compiled_fn with the real forward method
compiled_fn = lazy_gm.forward
if self.package is not None:
self.package.add_backend_id(name, compiled_fn)
compiled_fn = disable(
compiled_fn, reason="do not trace Dynamo-compiled graph"
)
counters["stats"]["unique_graphs"] += 1
assert old_fake_mode.shape_env is not None
if specializations := old_fake_mode.shape_env.specializations:
specialization_guards = []
specialization_cache: dict[Specialization, Callable[[Any], Any]] = {}
sources = [a.source for a in self.graphargs]
for specialization in specializations:
source_index = sources.index(specialization.source)
check_fn_source = inspect.getsource(specialization.check_fn).strip()
# Required because the LABDA_GUARD API requires a root guard manager
unused_root_guard_manager = RootGuardManager()
check_fn = guards.LAMBDA_GUARD( # type: ignore[attr-defined]
unused_root_guard_manager,
specialization.check_fn,
[check_fn_source],
)
log.debug(
"Compiling backend specialized graph with specialization=%s",
check_fn_source,
)
specialization_guards.append(
(
functools.partial(
lambda idx, args, check_fn=check_fn: check_fn(
args[idx]
),
source_index,
),
specialization,
)
)
@torch._dynamo.disable(reason="do not trace Dynamo-compiled graph") # type: ignore[misc]
def specialized_dispatch(*args: Any, **kwargs: Any) -> Any:
for check_fn, specialization in specialization_guards:
if check_fn(args):
if specialization in specialization_cache:
return specialization_cache[specialization](
*args, **kwargs
)
with self.shape_env.patch_source_specialization(
specialization.source, specialization.check_fn
):
# Modify gm so AOTAutogradCache key changes per specialization
gm.meta["specialization"] = specialization
example_inputs: list[Tensor] = list(args)
with tracing(self.tracing_context):
specialization_cache[specialization] = (
self.call_user_compiler(gm, example_inputs)
)
return specialization_cache[specialization](*args, **kwargs)
return compiled_fn(*args, **kwargs)
# This is safe because we pre-process name to be unique
self.install_global_unsafe(name, specialized_dispatch)
else:
# This is safe because we pre-process name to be unique
self.install_global_unsafe(name, compiled_fn)
assert self.root_tx is not None
cg = PyCodegen(self.root_tx)
if has_user_objects():
# NB: This is where we store possible user objects before running the graph
# index_to_user_object_weakref is the function used in the graph to translate
# the dynamo-generated index into the actual object passed to the compiled function.
# We generate bytecode to store all user objects at the proper index in the below
# call.
cg.add_push_null(
lambda: cg.load_import_from(
torch._dynamo.graph_bytecode_inputs.__name__,
"store_user_object_weakrefs",
)
)
tmp_vars = []
for constructor in index_to_bytecode_constructor.values():
constructor(cg)
var_name = (
self.new_var()
) # keep alive any user objects for the rest of the frame
# TODO: we could omit this for objects we create but shouldn't be too much overhead for now
cg.store(var_name)
tmp_vars.append(var_name)
for var_name in tmp_vars:
cg.append_output(cg.create_load(var_name))
cg.call_function(len(index_to_bytecode_constructor), False)
cg.pop_top()
for idx, arg in enumerate(self.graphargs):
self.export_metadata.graph_input_idx_to_local_source[idx] = arg.source
cg.make_call_generated_code(name)
return cg.get_instructions()
@property
def placeholders(self) -> list[fx.Node]:
return self.graph.find_nodes(op="placeholder")
@property
def graphargs(self) -> list[GraphArg]:
return [node.meta["grapharg"] for node in self.placeholders]
def call_user_compiler(
self, gm: fx.GraphModule, example_inputs: list[Tensor]
) -> CompiledFn:
with dynamo_timed(
"OutputGraph.call_user_compiler",
phase_name="backend_compile",
log_pt2_compile_event=True,
log_waitcounter=True,
waitcounter_name_override="compile_aot_autograd",
dynamo_compile_column_us="aot_autograd_cumulative_compile_time_us",
):
return self._call_user_compiler(gm, example_inputs)
def _call_user_compiler(
self, gm: fx.GraphModule, example_inputs: list[Tensor]
) -> CompiledFn:
assert self.compiler_fn is not None
tot = 0
placeholders = []
for node in gm.graph.nodes:
if node.op in ("call_function", "call_method", "call_module"):
tot += 1
if node.op == "placeholder":
placeholders.append(node)
increment_op_count(tot)
for pl in placeholders:
if not hasattr(pl, "_dynamo_source"):
arg = pl.meta["grapharg"]
# TODO: Why isn't this stored in meta :think:
# NOTE: can't move these into meta: https://github.com/pytorch/pytorch/issues/141640
pl._dynamo_source = arg.source
# NOTE: can't move these into meta: https://github.com/pytorch/pytorch/issues/141640
gm._param_name_to_source = self.param_name_to_source # type: ignore[assignment]
gm._source_to_user_stacks = self.source_to_user_stacks # type: ignore[assignment]
name = (
self.compiler_fn.__name__
if hasattr(self.compiler_fn, "__name__")
else "<unknown compiler_fn>"
)
try:
_step_logger()(logging.INFO, f"calling compiler function {name}")
compiler_fn = self.compiler_fn
if config.verify_correctness:
compiler_fn = WrapperBackend(compiler_fn)
compiled_fn = compiler_fn(gm, example_inputs)
_step_logger()(logging.INFO, f"done compiler function {name}")
assert callable(compiled_fn), "compiler_fn did not return callable"
except (TensorifyScalarRestartAnalysis, ShortenTraceback):
raise
except exceptions_allowed_to_be_fallback as e:
if self.has_user_defined_allowed_in_graph:
raise BackendCompilerFailed(
self.compiler_fn, e, inspect.currentframe()
).with_traceback(e.__traceback__) from None
unimplemented_with_warning(
e,
self.root_tx.f_code,
gb_type="Backend compiler exception",
context=f"Backend: {name}\nException:{str(e)}\nTraceback:\n{self.root_tx.format_frame_summary()}",
explanation=f"Backend compiler `{name}` failed with {str(e)}. Adding a graph break.",
hints=[
"Report an issue to the backend compiler repo.",
],
)
except SkipFrame as e:
# The backend compiler has requested that we skip the frame, instead of
# aborting execution.
raise e
except Exception as e:
raise BackendCompilerFailed(
self.compiler_fn, e, inspect.currentframe()
).with_traceback(e.__traceback__) from None
signpost_event(
"dynamo",
"OutputGraph.call_user_compiler",
{
**self.co_fields,
"op_count": tot,
"node_count": len(gm.graph.nodes),
"input_count": len(placeholders),
},
)
# pyrefly: ignore [unbound-name]
return compiled_fn
def dedup_pass(self) -> dict[str, torch.fx.GraphModule]:
if torch._dynamo.config.use_graph_deduplication:
return apply_graph_deduplication(self)
else:
return {}
def install_subgraph(self, name: str, sub_gm: torch.fx.GraphModule) -> str:
next_name = get_unique_name_wrt(name, self.nn_modules, requires_suffix=True)
sub_gm.__name__ = next_name # type: ignore[assignment]
sub_gm.torchdynamo_force_dynamic = False # type: ignore[assignment]
# This graph module is not present in the user space, so it can't be
# accessed by a source. Set source=None.
self.register_attr_or_module(sub_gm, next_name, source=None)
return next_name
def example_inputs(self) -> list[torch.Tensor]:
result = [arg.example for arg in self.graphargs]
return result
def remove_unused_get_attr_nodes(self) -> None:
for node in sorted(self.graph.find_nodes(op="get_attr"), reverse=True):
if len(list(node.users)) == 0:
self.remove_node(node)
def remove_unused_graphargs(self) -> None:
# NB: It's OK to drop GraphArg for symbols that ended up being
# specialized iff they are not used in runtime assertions. You don't
# even have to make a guard for it, because ShapeEnv produce_guards
# operates on tracked_fakes, which never gets pruned.
# That being said, you'll get marginally better generated
# guard code if you promote the guard into a Dynamo guard (since that
# allows for the guard to be done using C++ guards.) If we get
# ShapeEnv guards to go into C++ guards, this will stop being a thing
# though!
assert self.should_exit
# Miniature DCE pass, but only for obviously trivial operations
def is_static_true(b_node: fx.node.Argument) -> bool:
if b_node is True:
return True
if not isinstance(b_node, fx.Node):
return False
b = b_node.meta.get("example_value")
if b is None:
return False
if b is True:
return True
if (
isinstance(b, torch.SymBool)
and (r := b.node.maybe_as_bool()) is not None
):
return r
# TODO: We can also technically remove all cases when the input
# doesn't have unbacked inputs, since it's all in the ShapeEnv
return False
def is_symnode_arg(a: fx.node.Argument) -> bool:
from torch.fx.experimental.sym_node import SymTypes
if isinstance(a, (int, float, bool)):
return True
if isinstance(a, fx.Node):
return isinstance(a.meta.get("example_value"), SymTypes)
return False
# NB: We assume that you cannot do mutations on int/float/bool,
# because they are immutable types, and therefore is always safe to
# DCE.
def is_symnode_compute_node(node: fx.Node) -> bool:
from torch.fx.experimental.sym_node import SymTypes
if node.op != "call_function":
return False
# TODO: I don't think it's possible to have a bare int/float here?
if not isinstance(node.meta.get("example_value"), SymTypes):
return False
# TODO: This will bail here if you ever end up with a more complicated
# computation function, like sum(list_of_ints), even though it
# should be DCE'able
if not all(is_symnode_arg(a) for a in node.args):
return False
if not all(is_symnode_arg(a) for a in node.kwargs.values()):
return False
return True
from torch.fx.experimental.symbolic_shapes import is_accessor_node
for node in reversed(list(self.graph.nodes)):
if len(list(node.users)) == 0:
if (
node.op == "get_attr"
or (node.op == "call_function" and node.target is operator.getitem)
or (
node.op == "call_function"
and node.target is torch._check
and is_static_true(node.args[0])
)
or is_symnode_compute_node(node)
or is_accessor_node(node)
):
self.remove_node(node)
def placeholder_binds_symbol(node: fx.Node) -> Optional[sympy.Symbol]:
arg = node.meta["grapharg"]
example = arg.example
if isinstance(example, torch.SymInt) and isinstance(
example.node.expr, sympy.Symbol
):
return example.node.expr
return None
def remove_unused(node: fx.Node) -> None:
log.debug("REMOVE UNUSED GRAPHARG %s", node.meta["grapharg"].source.name())
# I'm not really sure why you need to delete these from the
# node since the node is going to get removed
del node.meta["grapharg"]
self.remove_node(node)
self.real_value_cache.pop(node, None)
used_symbols: set[sympy.Symbol] = set()
def update_used_symbols(
used_symbols: set[sympy.Symbol], fake: Union[torch.SymInt, torch.Tensor]
) -> None:
used_symbols |= free_symbols(fake)
recheck_placeholders = []
for node in self.placeholders:
binds_symbol = placeholder_binds_symbol(node) is not None
# Don't delete symbol bindings yet
if binds_symbol:
if not node.users:
recheck_placeholders.append(node)
else:
if not node.users and not isinstance(
node.meta["grapharg"], BackwardStateGraphArg
):
remove_unused(node)
else:
# Register the free symbols as uses
arg = node.meta["grapharg"]
if isinstance(arg, BackwardStateGraphArg):
continue
if isinstance(node.meta["grapharg"].example, torch.ScriptObject):
real_script_obj = node.meta["grapharg"].example
fake_script_obj = node.meta["grapharg"].example_strong_ref
if not torch._library.fake_class_registry.tracing_with_real(
real_script_obj
):
flat_dict = dict(real_script_obj.__obj_flatten__()) # type: ignore[attr-defined]
for attr in flat_dict:
fake_attr_val = getattr(
fake_script_obj.wrapped_obj, attr
)
pytree.tree_map_only(
(torch.SymInt, torch.Tensor),
lambda t: update_used_symbols(used_symbols, t),
fake_attr_val,
)
continue
if is_opaque_type(type(node.meta["grapharg"].example)):
continue
fake = (
arg.fake_tensor if arg.fake_tensor is not None else arg.example
)
update_used_symbols(used_symbols, fake)
# After removing unused graphargs, prune unused binds_symbol
for node in recheck_placeholders:
symbol = placeholder_binds_symbol(node)
if symbol is not None:
if symbol not in used_symbols:
remove_unused(node)
else:
# Make sure we delete later occurrences of the same symbol
used_symbols.remove(symbol)
def remove_tensorify_specialized_graphargs(self) -> None:
# This is a pretty interesting function. Basically we have this problem
# where our compiler tends to choke when we have unused inputs. The way
# we support dynamic float arguments is by doing a joint fx pass and
# tensorifying away as many symfloats as we can. For the remaining symfloats
# we have no choice but to specialize... HOWEVER at that point in time
# we can no longer remove graph inputs. So our sledgehammer solution is to
# save the state of what inputs we should have specialized in dynamo and
# restart analysis. This function incorporates this "view from the future"
# state and specializes inputs that we know we won't be able to tensorify
# away in the joint pass. In principle we shouldn't choke on unused inputs
# and so this shouldn't be necessary. In practice CUDA graphs choke on
# unused inputs so we need this for now.
# Import here to prevent circular import
from torch._dynamo.symbolic_convert import TensorifyState
for node in self.graph.nodes:
example_value = node.meta.get("example_value")
if (
isinstance(example_value, FakeTensor)
and example_value.item_memo is not None
and hasattr(example_value.item_memo.node._expr, "name")
and all(u.target == "item" for u in node.users)
and TensorifyState.should_specialize(
# We use _expr instead of expr b/c we want the symbol not the replacement
example_value.item_memo.node._expr.name
)
):
for u in list(node.users):
u.replace_all_uses_with(guard_scalar(example_value.item_memo))
self.remove_node(u)
self.remove_node(node)
def add_output_instructions(self, prefix: list[Instruction]) -> None:
"""
We call this on the creation of a new compiled subgraph that is inserted
before user code.
"""
self.output_instructions.extend(prefix)
self.should_exit = True
def install_global_unsafe(self, name: str, value: Any) -> None:
"""
WARNING: prefer the safer `install_global_by_id/install_global`.
torch.compile instances should be independent of each other;
one footgun is to have one instance depend on the existence of
a global installed by another instance. This can happen if we mangle
a global the same way across both instances.
"""
assert name not in self.installed_globals
self.installed_globals.add(name)
self.cleanups.append(CleanupHook.create(self.global_scope, name, value))
def install_global_by_id(self, prefix: str, value: Any) -> str:
"""
Installs a global if it hasn't been installed already.
This is determined by (prefix, id(value)) pair.
Returns the name of the newly installed global.
"""
# NB: need self.compile_id to distinguish this global
# from another global created in a different torch.compile instance
name = f"{prefix}_{id(value)}_c{self.compile_id}"
if name in self.installed_globals:
return name
self.install_global_unsafe(name, value)
return name
def install_global(self, prefix: str, value: Any) -> str:
"""
Installs a global, generating a unique name for it.
Returns the name of the newly installed global.
"""
# NB: unique_id is unique, even across torch.compile instances
name = unique_id(prefix)
self.install_global_unsafe(name, value)
return name
def cleanup(self) -> None:
# There is a reference cycle between tracer and OutputGraph, causing
# some of the tensor objects to be held alive for longer than necessary.
self.root_tx = None # type: ignore[assignment]
self.nn_modules.clear()
self.used_inlined_inbuilt_modules_names.clear()
self.param_name_to_source = None
for node in self.graph.nodes:
if "grapharg" in node.meta:
del node.meta["grapharg"]
self.real_value_cache.clear()
self.input_name_to_proxy.clear()
self.side_effects.clear()
self.variable_tracker_cache.clear()
self.register_finalizer_fns.clear()
self.dynamo_flat_name_to_original_fqn.clear()
self.tracing_context.clear()
self.input_source_to_var.clear()
self.unspec_variable_map.clear()
self.backward_state.clear()
def add_graph_finalizer(
self, register_finalizer: Callable[[fx.GraphModule], None]
) -> None:
self.register_finalizer_fns.append(register_finalizer)
def example_value_from_input_node(self, node: torch.fx.Node) -> Any:
"""Extract the non-fake example tensor"""
if node.op == "placeholder":
return node.meta["grapharg"].example
assert node.op == "get_attr"
return self.nn_modules[node.target] # type: ignore[index]
def add_fqn_info_for_inlined_modules(
self, inlined_module: torch.nn.Module, source: Source
) -> None:
name = OutputGraph.module_key_name(source.name())
name = get_unique_name_wrt(
name, self.used_inlined_inbuilt_modules_names, self.global_scope
)
self.used_inlined_inbuilt_modules_names.add(name)
def register_leaf_name(leaf_name: str) -> None:
assert self.param_name_to_source is not None
new_source = ParamBufferSource(source, leaf_name)
new_name = f"{name}.{leaf_name}"
self.param_name_to_source[new_name] = new_source
if isinstance(source, LocalSource):
self.dynamo_flat_name_to_original_fqn[
OutputGraph.module_key_name(new_source.name())
] = leaf_name
# annoying, but there are cases when we do not have parameters
# see test_nn_moduledict_contains
if hasattr(inlined_module, "_parameters"):
if (
callable(inlined_module.named_parameters)
and inlined_module.named_parameters.__func__ # type: ignore[attr-defined]
is og_module_named_parameters_fn_ptr
):
for leaf_name, _ in inlined_module.named_parameters():
register_leaf_name(leaf_name)
if hasattr(inlined_module, "_buffers"):
if (
callable(inlined_module.named_buffers)
and inlined_module.named_buffers.__func__ # type: ignore[attr-defined]
is og_module_named_buffers_fn_ptr
):
for leaf_name, _ in inlined_module.named_buffers():
register_leaf_name(leaf_name)
| OutputGraph |
python | dask__dask | dask/dataframe/dask_expr/_reductions.py | {
"start": 41116,
"end": 41502
} | class ____(NLargest):
_parameters = ["frame", "n", "_columns", "ascending", "split_every"]
_defaults = {"n": 5, "_columns": None, "ascending": None, "split_every": None}
reduction_chunk = staticmethod(_nfirst)
reduction_aggregate = staticmethod(_nfirst)
@property
def chunk_kwargs(self):
return {"ascending": self.ascending, **super().chunk_kwargs}
| NFirst |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 281829,
"end": 282156
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("DeploymentStatus", graphql_name="node")
| DeploymentStatusEdge |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclass5.py | {
"start": 839,
"end": 1122
} | class ____:
x: int
def __eq__(self, x: "E") -> float:
return 1.23
def __lt__(self, x: "E") -> str:
return ""
foo1 = E(3) == E(3)
reveal_type(foo1, expected_text="float")
foo2 = E(3) < E(3)
reveal_type(foo2, expected_text="str")
@dataclass(order=True)
| E |
python | ZoranPandovski__al-go-rithms | data_structures/binarySearch_tree/Python/binary_search_tree.py | {
"start": 1124,
"end": 3705
} | class ____():
def __init__(self, key, value=None):
self.key = key
self.value = value
self.left = None
self.right = None
self.parent = None
# creating a function to visualize a tree easily
def display_keys(node, space='\t', level=0):
# print(node.key if node else None, level)
# If the node is empty
if node is None:
print(space*level + '∅')
return
# If the node is a leaf
if node.left is None and node.right is None:
print(space*level + str(node.key))
return
# If the node has children
display_keys(node.right, space, level+1)
print(space*level + str(node.key))
display_keys(node.left,space, level+1)
# creating a function to find out the size of the tree
def tree_size(node):
if node is None:
return 0
return 1 + tree_size(node.left) + tree_size(node.right)
# creating a function to insert a value in a BST
def insert(node, key, value):
if node is None:
node = BSTNode(key, value)
elif key < node.key:
node.left = insert(node.left, key, value)
node.left.parent = node
elif key > node.key:
node.right = insert(node.right, key, value)
node.right.parent = node
return node
# creating a function to find a node given the key(username)
def find(node, key):
if node is None:
return None
if key == node.key:
return node
if key < node.key:
return find(node.left, key)
if key > node.key:
return find(node.right, key)
# creating a function to update a node value
def update(node, key, value):
target = find(node, key)
if target is not None:
target.value = value
# creating a function to list all the nodes in the tree
def list_all(node):
if node is None:
return []
return list_all(node.left) + [(node.key, node.value)] + list_all(node.right)
# creating a function to make a balanced BST (can replace insert operation)
def make_balanced_bst(data, lo=0, hi=None, parent=None):
if hi is None:
hi = len(data) - 1
if lo > hi:
return None
mid = (lo + hi) // 2
key, value = data[mid]
root = BSTNode(key, value)
root.parent = parent
root.left = make_balanced_bst(data, lo, mid-1, root)
root.right = make_balanced_bst(data, mid+1, hi, root)
return root
# creating a function to balance an unbalanced tree
def balance_bst(node):
return make_balanced_bst(list_all(node))
# summarizing all helper functions into a tree map
| BSTNode |
python | paramiko__paramiko | paramiko/kex_gex.py | {
"start": 10219,
"end": 10320
} | class ____(KexGex):
name = "diffie-hellman-group-exchange-sha256"
hash_algo = sha256
| KexGexSHA256 |
python | django__django | tests/backends/mysql/test_introspection.py | {
"start": 1399,
"end": 2727
} | class ____(TestCase):
databases = {"default", "other"}
def test_get_storage_engine(self):
table_name = "test_storage_engine"
create_sql = "CREATE TABLE %s (id INTEGER) ENGINE = %%s" % table_name
drop_sql = "DROP TABLE %s" % table_name
default_connection = connections["default"]
other_connection = connections["other"]
try:
with default_connection.cursor() as cursor:
cursor.execute(create_sql % "InnoDB")
self.assertEqual(
default_connection.introspection.get_storage_engine(
cursor, table_name
),
"InnoDB",
)
with other_connection.cursor() as cursor:
cursor.execute(create_sql % "MyISAM")
self.assertEqual(
other_connection.introspection.get_storage_engine(
cursor, table_name
),
"MyISAM",
)
finally:
with default_connection.cursor() as cursor:
cursor.execute(drop_sql)
with other_connection.cursor() as cursor:
cursor.execute(drop_sql)
@skipUnless(connection.vendor == "mysql", "MySQL specific SQL")
| StorageEngineTests |
python | tensorflow__tensorflow | tensorflow/python/keras/saving/utils_v1/export_output.py | {
"start": 7136,
"end": 8127
} | class ____(ExportOutput):
"""Represents the output of a generic prediction head.
A generic prediction need not be either a classification or a regression.
Named outputs must be provided as a dict from string to `Tensor`,
"""
_SINGLE_OUTPUT_DEFAULT_NAME = 'output'
def __init__(self, outputs):
"""Constructor for PredictOutput.
Args:
outputs: A `Tensor` or a dict of string to `Tensor` representing the
predictions.
Raises:
ValueError: if the outputs is not dict, or any of its keys are not
strings, or any of its values are not `Tensor`s.
"""
self._outputs = self._wrap_and_check_outputs(
outputs, self._SINGLE_OUTPUT_DEFAULT_NAME, error_label='Prediction')
@property
def outputs(self):
return self._outputs
def as_signature_def(self, receiver_tensors):
return signature_def_utils.predict_signature_def(receiver_tensors,
self.outputs)
| PredictOutput |
python | spyder-ide__spyder | external-deps/qtconsole/qtconsole/manager.py | {
"start": 321,
"end": 747
} | class ____(KernelRestarter, QtKernelRestarterMixin):
def start(self):
if self._timer is None:
self._timer = QtCore.QTimer()
self._timer.timeout.connect(self.poll)
self._timer.start(round(self.time_to_dead * 1000))
def stop(self):
self._timer.stop()
def poll(self):
super().poll()
def reset_count(self):
self._restart_count = 0
| QtKernelRestarter |
python | kamyu104__LeetCode-Solutions | Python/apple-redistribution-into-boxes.py | {
"start": 48,
"end": 434
} | class ____(object):
def minimumBoxes(self, apple, capacity):
"""
:type apple: List[int]
:type capacity: List[int]
:rtype: int
"""
capacity.sort(reverse=True)
total = sum(apple)
for i in xrange(len(capacity)):
total -= capacity[i]
if total <= 0:
return i+1
return -1
| Solution |
python | explosion__spaCy | spacy/lang/am/__init__.py | {
"start": 734,
"end": 830
} | class ____(Language):
lang = "am"
Defaults = AmharicDefaults
__all__ = ["Amharic"]
| Amharic |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/dagster_run.py | {
"start": 4812,
"end": 8635
} | class ____(NamedTupleSerializer["DagsterRun"]):
# serdes log
# * removed reexecution_config - serdes logic expected to strip unknown keys so no need to preserve
# * added pipeline_snapshot_id
# * renamed previous_run_id -> parent_run_id, added root_run_id
# * added execution_plan_snapshot_id
# * removed selector
# * added solid_subset
# * renamed solid_subset -> solid_selection, added solids_to_execute
# * renamed environment_dict -> run_config
# * added asset_selection
# * added has_repository_load_data
def before_unpack(self, context, unpacked_dict: dict[str, Any]) -> dict[str, Any]:
# back compat for environment dict => run_config
if "environment_dict" in unpacked_dict:
check.invariant(
unpacked_dict.get("run_config") is None,
"Cannot set both run_config and environment_dict. Use run_config parameter.",
)
unpacked_dict["run_config"] = unpacked_dict["environment_dict"]
del unpacked_dict["environment_dict"]
# back compat for previous_run_id => parent_run_id, root_run_id
if "previous_run_id" in unpacked_dict and not (
"parent_run_id" in unpacked_dict and "root_run_id" in unpacked_dict
):
unpacked_dict["parent_run_id"] = unpacked_dict["previous_run_id"]
unpacked_dict["root_run_id"] = unpacked_dict["previous_run_id"]
del unpacked_dict["previous_run_id"]
# back compat for selector => pipeline_name, solids_to_execute
if "selector" in unpacked_dict:
selector = unpacked_dict["selector"]
if not isinstance(selector, ExecutionSelector):
check.failed(f"unexpected entry for 'select', {selector}")
selector_name = selector.name
selector_subset = selector.solid_subset
job_name = unpacked_dict.get("pipeline_name")
check.invariant(
job_name is None or selector_name == job_name,
f"Conflicting pipeline name {job_name} in arguments to PipelineRun: "
f"selector was passed with pipeline {selector_name}",
)
if job_name is None:
unpacked_dict["pipeline_name"] = selector_name
solids_to_execute = unpacked_dict.get("solids_to_execute")
check.invariant(
solids_to_execute is None
or (selector_subset and set(selector_subset) == solids_to_execute),
f"Conflicting solids_to_execute {solids_to_execute} in arguments to"
f" PipelineRun: selector was passed with subset {selector_subset}",
)
# for old runs that only have selector but no solids_to_execute
if solids_to_execute is None:
solids_to_execute = frozenset(selector_subset) if selector_subset else None
# back compat for solid_subset => solids_to_execute
if "solid_subset" in unpacked_dict:
unpacked_dict["solids_to_execute"] = unpacked_dict["solid_subset"]
del unpacked_dict["solid_subset"]
return unpacked_dict
@whitelist_for_serdes(
serializer=DagsterRunSerializer,
# DagsterRun is serialized as PipelineRun so that it can be read by older (pre 0.13.x) version
# of Dagster, but is read back in as a DagsterRun.
storage_name="PipelineRun",
old_fields={"mode": None},
storage_field_names={
"job_name": "pipeline_name",
"job_snapshot_id": "pipeline_snapshot_id",
"remote_job_origin": "external_pipeline_origin",
"job_code_origin": "pipeline_code_origin",
"op_selection": "solid_selection",
"resolved_op_selection": "solids_to_execute",
},
)
@public
@record_custom
| DagsterRunSerializer |
python | jina-ai__jina | tests/unit/orchestrate/flow/flow-orchestrate/test_flow_complex_topology.py | {
"start": 1386,
"end": 1768
} | class ____(Executor):
@requests
def bar(self, docs, **kwargs):
for doc in docs:
doc.text += 'bar'
def test_flow_to_flow():
with Flow().add(uses=FooExec) as external_flow:
with Flow().add(external=True, port=external_flow.port).add(uses=BarExec) as f:
docs = f.search(Document())
assert docs.texts == ['foobar']
| BarExec |
python | django__django | tests/generic_views/test_base.py | {
"start": 10966,
"end": 15540
} | class ____(SimpleTestCase):
rf = RequestFactory()
def _assert_about(self, response):
response.render()
self.assertContains(response, "<h1>About</h1>")
def test_get(self):
"""
Test a view that simply renders a template on GET
"""
self._assert_about(AboutTemplateView.as_view()(self.rf.get("/about/")))
def test_head(self):
"""
Test a TemplateView responds correctly to HEAD
"""
response = AboutTemplateView.as_view()(self.rf.head("/about/"))
self.assertEqual(response.status_code, 200)
def test_get_template_attribute(self):
"""
Test a view that renders a template on GET with the template name as
an attribute on the class.
"""
self._assert_about(AboutTemplateAttributeView.as_view()(self.rf.get("/about/")))
def test_get_generic_template(self):
"""
Test a completely generic view that renders a template on GET
with the template name as an argument at instantiation.
"""
self._assert_about(
TemplateView.as_view(template_name="generic_views/about.html")(
self.rf.get("/about/")
)
)
def test_template_name_required(self):
"""
A template view must provide a template name.
"""
msg = (
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'"
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.client.get("/template/no_template/")
@require_jinja2
def test_template_engine(self):
"""
A template view may provide a template engine.
"""
request = self.rf.get("/using/")
view = TemplateView.as_view(template_name="generic_views/using.html")
self.assertEqual(view(request).render().content, b"DTL\n")
view = TemplateView.as_view(
template_name="generic_views/using.html", template_engine="django"
)
self.assertEqual(view(request).render().content, b"DTL\n")
view = TemplateView.as_view(
template_name="generic_views/using.html", template_engine="jinja2"
)
self.assertEqual(view(request).render().content, b"Jinja2\n")
def test_template_params(self):
"""
A generic template view passes kwargs as context.
"""
response = self.client.get("/template/simple/bar/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["foo"], "bar")
self.assertIsInstance(response.context["view"], View)
def test_extra_template_params(self):
"""
A template view can be customized to return extra context.
"""
response = self.client.get("/template/custom/bar/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["foo"], "bar")
self.assertEqual(response.context["key"], "value")
self.assertIsInstance(response.context["view"], View)
def test_cached_views(self):
"""
A template view can be cached
"""
response = self.client.get("/template/cached/bar/")
self.assertEqual(response.status_code, 200)
time.sleep(1.0)
response2 = self.client.get("/template/cached/bar/")
self.assertEqual(response2.status_code, 200)
self.assertEqual(response.content, response2.content)
time.sleep(2.0)
# Let the cache expire and test again
response2 = self.client.get("/template/cached/bar/")
self.assertEqual(response2.status_code, 200)
self.assertNotEqual(response.content, response2.content)
def test_content_type(self):
response = self.client.get("/template/content_type/")
self.assertEqual(response.headers["Content-Type"], "text/plain")
def test_resolve_view(self):
match = resolve("/template/content_type/")
self.assertIs(match.func.view_class, TemplateView)
self.assertEqual(match.func.view_initkwargs["content_type"], "text/plain")
def test_resolve_login_required_view(self):
match = resolve("/template/login_required/")
self.assertIs(match.func.view_class, TemplateView)
def test_extra_context(self):
response = self.client.get("/template/extra_context/")
self.assertEqual(response.context["title"], "Title")
@override_settings(ROOT_URLCONF="generic_views.urls")
| TemplateViewTest |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/tests/llama_index/test_vectorstore.py | {
"start": 3326,
"end": 13278
} | class ____:
"""Integration tests for the AzurePGVectorStore implementation.
Covers table creation, initialization via parameters, CRUD operations,
and similarity queries against seeded data in the test database.
"""
def test_table_creation_success(
self, vectorstore: AzurePGVectorStore, table: Table
):
"""Verify the database table is created with the expected columns."""
with (
vectorstore.connection_pool.connection() as conn,
conn.cursor(row_factory=dict_row) as cursor,
):
cursor.execute(
_GET_TABLE_COLUMNS_AND_TYPES,
{
"schema_name": table.schema_name,
"table_name": table.table_name,
},
)
resultset = cursor.fetchall()
verify_table_created(table, resultset)
def test_vectorstore_initialization_from_params(
self,
connection_pool: ConnectionPool,
schema: str,
):
"""Create a store using class factory `from_params` and assert type."""
table_name = "vs_init_from_params"
embedding_dimension = 3
diskann = DiskANN(
op_class="vector_cosine_ops",
max_neighbors=32,
l_value_ib=100,
l_value_is=100,
)
vectorstore = AzurePGVectorStore.from_params(
connection_pool=connection_pool,
schema_name=schema,
table_name=table_name,
embed_dim=embedding_dimension,
embedding_index=diskann,
)
assert isinstance(vectorstore, AzurePGVectorStore)
def test_get_nodes(
self,
vectorstore: AzurePGVectorStore,
):
"""Retrieve all nodes and assert expected seeded node count."""
in_nodes = vectorstore.get_nodes()
assert len(in_nodes) == 4, "Retrieved node count does not match expected"
@pytest.mark.parametrize(
["node_tuple", "expected"],
[
("node-success", nullcontext(AzurePGVectorStore)),
("node-not-found", pytest.raises(IndexError)),
],
indirect=["node_tuple"],
ids=[
"success",
"not-found",
],
)
def test_get_nodes_with_ids(
self,
vectorstore: AzurePGVectorStore,
node_tuple: tuple[TextNode, str | None],
expected: nullcontext[AzurePGVectorStore] | pytest.RaisesExc,
):
"""Retrieve nodes by ID and validate returned node matches expected."""
node, expected_node_id = node_tuple
in_nodes = vectorstore.get_nodes([node.node_id])
with expected:
assert expected_node_id == in_nodes[0].node_id, (
"Retrieved node ID does not match expected"
)
@pytest.mark.parametrize(
["node_tuple", "expected"],
[
("node-success", nullcontext(AzurePGVectorStore)),
# ("node-failure", pytest.raises(AssertionError)),
],
indirect=["node_tuple"],
ids=[
"success",
# "failure",
],
)
def test_add(
self,
vectorstore: AzurePGVectorStore,
node_tuple: tuple[TextNode, str | None],
expected: nullcontext[AzurePGVectorStore] | pytest.RaisesExc,
):
"""Add a node to the store and assert the returned ID matches."""
node, expected_node_id = node_tuple
with expected:
assert node.node_id is not None, "Node ID must be provided for this test"
returned_ids = vectorstore.add([node])
assert returned_ids[0] == expected_node_id, "Inserted text IDs do not match"
@pytest.mark.parametrize(
["doc_id"],
[
("1",),
("10",),
],
ids=["existing", "non-existing"],
)
def test_delete(
self,
vectorstore: AzurePGVectorStore,
doc_id: str,
):
"""Delete a node by reference doc id and assert it was removed."""
vectorstore.delete(doc_id)
with (
vectorstore.connection_pool.connection() as conn,
conn.cursor(row_factory=dict_row) as cursor,
):
cursor.execute(
sql.SQL(
"""
select {metadata} ->> 'doc_id' as doc_id
from {table_name}
"""
).format(
metadata=sql.Identifier(vectorstore.metadata_columns),
table_name=sql.Identifier(
vectorstore.schema_name, vectorstore.table_name
),
)
)
resultset = cursor.fetchall()
remaining_set = set(str(r["doc_id"]) for r in resultset)
assert doc_id not in remaining_set, (
"Deleted document IDs should not exist in the remaining set"
)
@pytest.mark.parametrize(
["node_tuple"],
[
("node-success",),
("node-not-found",),
],
indirect=["node_tuple"],
ids=[
"success",
"not-found",
],
)
def test_delete_nodes(
self,
vectorstore: AzurePGVectorStore,
node_tuple: tuple[TextNode, str | None],
):
"""Delete a list of node IDs and assert they are removed from the table."""
node, expected_node_id = node_tuple
vectorstore.delete_nodes([node.node_id])
with (
vectorstore.connection_pool.connection() as conn,
conn.cursor(row_factory=dict_row) as cursor,
):
cursor.execute(
sql.SQL(
"""
select {id_column} as node_id
from {table_name}
"""
).format(
id_column=sql.Identifier(vectorstore.id_column),
table_name=sql.Identifier(
vectorstore.schema_name, vectorstore.table_name
),
)
)
resultset = cursor.fetchall()
remaining_set = set(str(r["node_id"]) for r in resultset)
assert expected_node_id not in remaining_set, (
"Deleted document IDs should not exist in the remaining set"
)
def test_clear(
self,
vectorstore: AzurePGVectorStore,
):
"""Clear all nodes from the underlying table and verify none remain."""
vectorstore.clear()
with (
vectorstore.connection_pool.connection() as conn,
conn.cursor(row_factory=dict_row) as cursor,
):
cursor.execute(
sql.SQL(
"""
select {id_column} as node_id
from {table_name}
"""
).format(
id_column=sql.Identifier(vectorstore.id_column),
table_name=sql.Identifier(
vectorstore.schema_name, vectorstore.table_name
),
)
)
resultset = cursor.fetchall()
remaining_set = set(str(r["node_id"]) for r in resultset)
assert not remaining_set, "All document IDs should have been deleted"
@pytest.mark.parametrize(
["query", "embedding", "k", "filters", "mode"],
[
("query about cats", [0.99] * 1536, 2, None, None),
("query about cats", [0.99] * 1536, 2, None, "hybrid"),
("query about animals", [0.5] * 1536, 3, None, None),
("query about cats", [0.99] * 1536, 2, "filter1", None),
("query about cats", [0.99] * 1536, 2, "filter2", None),
],
indirect=["filters"],
ids=[
"search-cats",
"search-cats-hybrid",
"search-animals",
"search-cats-filtered",
"search-cats-multifiltered",
],
)
def test_query(
self,
vectorstore: AzurePGVectorStore,
query: str,
embedding: list[float],
k: int,
filters: MetadataFilters | None,
mode: str | None,
):
"""Run a similarity query and assert returned documents match expectations.
Tests multiple query types (cats/animals) and optional metadata
filters to ensure the vector search returns relevant documents and
that filtering works as intended.
"""
vsquery = VectorStoreQuery(
query_str=query,
query_embedding=embedding,
similarity_top_k=k,
filters=filters,
mode=(
VectorStoreQueryMode.HYBRID
if mode == "hybrid"
else VectorStoreQueryMode.DEFAULT
),
)
results = vectorstore.query(query=vsquery)
results = results.nodes
contents = [row.get_content() for row in results]
if ("cats" in query) or ("animals" in query):
assert len(results) == k, f"Expected {k} results"
assert any("cats" in c for c in contents) or any(
"tigers" in c for c in contents
), (
f"Expected 'cats' or 'tigers' in retrieved documents' contents for query: {query}"
)
if "cats" in query:
assert all("dogs" not in c for c in contents), (
f"Expected 'dogs' not to be in retrieved documents' contents for query: {query}"
)
elif "animals" in query:
assert any("dogs" in c for c in contents), (
f"Expected 'dogs' to be in retrieved documents' contents for query: {query}"
)
assert all("plants" not in c for c in contents), (
f"Expected 'plants' not to be in retrieved documents' contents for query: {query}"
)
| TestAzurePGVectorStore |
python | ray-project__ray | python/ray/util/client/server/proxier.py | {
"start": 25728,
"end": 32810
} | class ____(ray_client_pb2_grpc.RayletDataStreamerServicer):
def __init__(self, proxy_manager: ProxyManager):
self.num_clients = 0
# dictionary mapping client_id's to the last time they connected
self.clients_last_seen: Dict[str, float] = {}
self.reconnect_grace_periods: Dict[str, float] = {}
self.clients_lock = Lock()
self.proxy_manager = proxy_manager
self.stopped = Event()
def modify_connection_info_resp(
self, init_resp: ray_client_pb2.DataResponse
) -> ray_client_pb2.DataResponse:
"""
Modify the `num_clients` returned the ConnectionInfoResponse because
individual SpecificServers only have **one** client.
"""
init_type = init_resp.WhichOneof("type")
if init_type != "connection_info":
return init_resp
modified_resp = ray_client_pb2.DataResponse()
modified_resp.CopyFrom(init_resp)
with self.clients_lock:
modified_resp.connection_info.num_clients = self.num_clients
return modified_resp
def Datapath(self, request_iterator, context):
request_iterator = RequestIteratorProxy(request_iterator)
cleanup_requested = False
start_time = time.time()
client_id = _get_client_id_from_context(context)
if client_id == "":
return
reconnecting = _get_reconnecting_from_context(context)
if reconnecting:
with self.clients_lock:
if client_id not in self.clients_last_seen:
# Client took too long to reconnect, session has already
# been cleaned up
context.set_code(grpc.StatusCode.NOT_FOUND)
context.set_details(
"Attempted to reconnect a session that has already "
"been cleaned up"
)
return
self.clients_last_seen[client_id] = start_time
server = self.proxy_manager._get_server_for_client(client_id)
channel = self.proxy_manager.get_channel(client_id)
# iterator doesn't need modification on reconnect
new_iter = request_iterator
else:
# Create Placeholder *before* reading the first request.
server = self.proxy_manager.create_specific_server(client_id)
with self.clients_lock:
self.clients_last_seen[client_id] = start_time
self.num_clients += 1
try:
if not reconnecting:
logger.info(f"New data connection from client {client_id}: ")
init_req = next(request_iterator)
with self.clients_lock:
self.reconnect_grace_periods[
client_id
] = init_req.init.reconnect_grace_period
try:
modified_init_req, job_config = prepare_runtime_init_req(init_req)
if not self.proxy_manager.start_specific_server(
client_id, job_config
):
logger.error(
f"Server startup failed for client: {client_id}, "
f"using JobConfig: {job_config}!"
)
raise RuntimeError(
"Starting Ray client server failed. See "
f"ray_client_server_{server.port}.err for "
"detailed logs."
)
channel = self.proxy_manager.get_channel(client_id)
if channel is None:
logger.error(f"Channel not found for {client_id}")
raise RuntimeError(
"Proxy failed to Connect to backend! Check "
"`ray_client_server.err` and "
f"`ray_client_server_{server.port}.err` on the "
"head node of the cluster for the relevant logs. "
"By default these are located at "
"/tmp/ray/session_latest/logs."
)
except Exception:
init_resp = ray_client_pb2.DataResponse(
init=ray_client_pb2.InitResponse(
ok=False, msg=traceback.format_exc()
)
)
init_resp.req_id = init_req.req_id
yield init_resp
return None
new_iter = chain([modified_init_req], request_iterator)
stub = ray_client_pb2_grpc.RayletDataStreamerStub(channel)
metadata = [("client_id", client_id), ("reconnecting", str(reconnecting))]
resp_stream = stub.Datapath(new_iter, metadata=metadata)
for resp in resp_stream:
resp_type = resp.WhichOneof("type")
if resp_type == "connection_cleanup":
# Specific server is skipping cleanup, proxier should too
cleanup_requested = True
yield self.modify_connection_info_resp(resp)
except Exception as e:
logger.exception("Proxying Datapath failed!")
# Propogate error through context
recoverable = _propagate_error_in_context(e, context)
if not recoverable:
# Client shouldn't attempt to recover, clean up connection
cleanup_requested = True
finally:
cleanup_delay = self.reconnect_grace_periods.get(client_id)
if not cleanup_requested and cleanup_delay is not None:
# Delay cleanup, since client may attempt a reconnect
# Wait on stopped event in case the server closes and we
# can clean up earlier
self.stopped.wait(timeout=cleanup_delay)
with self.clients_lock:
if client_id not in self.clients_last_seen:
logger.info(f"{client_id} not found. Skipping clean up.")
# Connection has already been cleaned up
return
last_seen = self.clients_last_seen[client_id]
logger.info(
f"{client_id} last started stream at {last_seen}. Current "
f"stream started at {start_time}."
)
if last_seen > start_time:
logger.info("Client reconnected. Skipping cleanup.")
# Client has reconnected, don't clean up
return
logger.debug(f"Client detached: {client_id}")
self.num_clients -= 1
del self.clients_last_seen[client_id]
if client_id in self.reconnect_grace_periods:
del self.reconnect_grace_periods[client_id]
server.set_result(None)
| DataServicerProxy |
python | django-haystack__django-haystack | haystack/query.py | {
"start": 350,
"end": 21870
} | class ____:
"""
Provides a way to specify search parameters and lazily load results.
Supports chaining (a la QuerySet) to narrow the search.
"""
def __init__(self, using=None, query=None):
# ``_using`` should only ever be a value other than ``None`` if it's
# been forced with the ``.using`` method.
self._using = using
self.query = None
self._determine_backend()
# If ``query`` is present, it should override even what the routers
# think.
if query is not None:
self.query = query
self._result_cache = []
self._result_count = None
self._cache_full = False
self._load_all = False
self._ignored_result_count = 0
self.log = logging.getLogger("haystack")
def _determine_backend(self):
# A backend has been manually selected. Use it instead.
if self._using is not None:
self.query = connections[self._using].get_query()
return
# No backend, so rely on the routers to figure out what's right.
hints = {}
if self.query:
hints["models"] = self.query.models
backend_alias = connection_router.for_read(**hints)
# The ``SearchQuery`` might swap itself out for a different variant
# here.
if self.query:
self.query = self.query.using(backend_alias)
else:
self.query = connections[backend_alias].get_query()
def __getstate__(self):
"""
For pickling.
"""
len(self)
obj_dict = self.__dict__.copy()
obj_dict["_iter"] = None
obj_dict["log"] = None
return obj_dict
def __setstate__(self, data_dict):
"""
For unpickling.
"""
self.__dict__ = data_dict
self.log = logging.getLogger("haystack")
def __repr__(self):
return "<SearchQuerySet: query=%r, using=%r>" % (self.query, self._using)
def __len__(self):
if self._result_count is None:
self._result_count = self.query.get_count()
# Some backends give weird, false-y values here. Convert to zero.
if not self._result_count:
self._result_count = 0
# This needs to return the actual number of hits, not what's in the cache.
return self._result_count - self._ignored_result_count
def __iter__(self):
if self._cache_is_full():
# We've got a fully populated cache. Let Python do the hard work.
return iter(self._result_cache)
return self._manual_iter()
def __and__(self, other):
if isinstance(other, EmptySearchQuerySet):
return other._clone()
combined = self._clone()
combined.query.combine(other.query, SQ.AND)
return combined
def __or__(self, other):
combined = self._clone()
if isinstance(other, EmptySearchQuerySet):
return combined
combined.query.combine(other.query, SQ.OR)
return combined
def _cache_is_full(self):
if not self.query.has_run():
return False
if len(self) <= 0:
return True
try:
self._result_cache.index(None)
return False
except ValueError:
# No ``None``s found in the results. Check the length of the cache.
return len(self._result_cache) > 0
def _manual_iter(self):
# If we're here, our cache isn't fully populated.
# For efficiency, fill the cache as we go if we run out of results.
# Also, this can't be part of the __iter__ method due to Python's rules
# about generator functions.
current_position = 0
current_cache_max = 0
while True:
if len(self._result_cache) > 0:
try:
current_cache_max = self._result_cache.index(None)
except ValueError:
current_cache_max = len(self._result_cache)
while current_position < current_cache_max:
yield self._result_cache[current_position]
current_position += 1
if self._cache_is_full():
return
# We've run out of results and haven't hit our limit.
# Fill more of the cache.
if not self._fill_cache(
current_position, current_position + ITERATOR_LOAD_PER_QUERY
):
return
def post_process_results(self, results):
to_cache = []
# Check if we wish to load all objects.
if self._load_all:
models_pks = {}
loaded_objects = {}
# Remember the search position for each result so we don't have to resort later.
for result in results:
models_pks.setdefault(result.model, []).append(result.pk)
# Load the objects for each model in turn.
for model in models_pks:
loaded_objects[model] = self._load_model_objects(
model, models_pks[model]
)
for result in results:
if self._load_all:
model_objects = loaded_objects.get(result.model, {})
# Try to coerce a primary key object that matches the models pk
# We have to deal with semi-arbitrary keys being cast from strings (UUID, int, etc)
if model_objects:
result_klass = type(next(iter(model_objects)))
result.pk = result_klass(result.pk)
try:
result._object = model_objects[result.pk]
except KeyError:
# The object was either deleted since we indexed or should
# be ignored for other reasons such as an overriden 'load_all_queryset';
# fail silently.
self._ignored_result_count += 1
# avoid an unfilled None at the end of the result cache
self._result_cache.pop()
continue
else:
# No objects were returned -- possible due to SQS nesting such as
# XYZ.objects.filter(id__gt=10) where the amount ignored are
# exactly equal to the ITERATOR_LOAD_PER_QUERY
del self._result_cache[:1]
self._ignored_result_count += 1
continue
to_cache.append(result)
return to_cache
def _load_model_objects(self, model, pks):
try:
ui = connections[self.query._using].get_unified_index()
index = ui.get_index(model)
objects = index.read_queryset(using=self.query._using)
return objects.in_bulk(pks)
except NotHandled:
self.log.warning("Model '%s' not handled by the routers.", model)
# Revert to old behaviour
return model._default_manager.in_bulk(pks)
def _fill_cache(self, start, end, **kwargs):
# Tell the query where to start from and how many we'd like.
self.query._reset()
if start is None:
start = 0
query_start = start
query_start += self._ignored_result_count
query_end = end
if query_end is not None:
query_end += self._ignored_result_count
self.query.set_limits(query_start, query_end)
results = self.query.get_results(**kwargs)
if results is None or len(results) == 0:
# trim missing stuff from the result cache
self._result_cache = self._result_cache[:start]
return False
# Setup the full cache now that we know how many results there are.
# We need the ``None``s as placeholders to know what parts of the
# cache we have/haven't filled.
# Using ``None`` like this takes up very little memory. In testing,
# an array of 100,000 ``None``s consumed less than .5 Mb, which ought
# to be an acceptable loss for consistent and more efficient caching.
if len(self._result_cache) == 0:
self._result_cache = [None] * self.query.get_count()
fill_start, fill_end = start, end
if fill_end is None:
fill_end = self.query.get_count()
cache_start = fill_start
while True:
to_cache = self.post_process_results(results)
# Assign by slice.
self._result_cache[cache_start : cache_start + len(to_cache)] = to_cache
if None in self._result_cache[start:end]:
fill_start = fill_end
fill_end += ITERATOR_LOAD_PER_QUERY
cache_start += len(to_cache)
# Tell the query where to start from and how many we'd like.
self.query._reset()
self.query.set_limits(fill_start, fill_end)
results = self.query.get_results()
if results is None or len(results) == 0:
# No more results. Trim missing stuff from the result cache
self._result_cache = self._result_cache[:cache_start]
break
else:
break
return True
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice, int)):
raise TypeError
assert (not isinstance(k, slice) and (k >= 0)) or (
isinstance(k, slice)
and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0)
), "Negative indexing is not supported."
# Remember if it's a slice or not. We're going to treat everything as
# a slice to simply the logic and will `.pop()` at the end as needed.
if isinstance(k, slice):
is_slice = True
start = k.start
if k.stop is not None:
bound = int(k.stop)
else:
bound = None
else:
is_slice = False
start = k
bound = k + 1
# We need check to see if we need to populate more of the cache.
if len(self._result_cache) <= 0 or (
None in self._result_cache[start:bound] and not self._cache_is_full()
):
try:
self._fill_cache(start, bound)
except StopIteration:
# There's nothing left, even though the bound is higher.
pass
# Cache should be full enough for our needs.
if is_slice:
return self._result_cache[start:bound]
return self._result_cache[start]
# Methods that return a SearchQuerySet.
def all(self): # noqa A003
"""Returns all results for the query."""
return self._clone()
def none(self):
"""Returns an empty result list for the query."""
return self._clone(klass=EmptySearchQuerySet)
def filter(self, *args, **kwargs): # noqa A003
"""Narrows the search based on certain attributes and the default operator."""
if DEFAULT_OPERATOR == "OR":
return self.filter_or(*args, **kwargs)
return self.filter_and(*args, **kwargs)
def exclude(self, *args, **kwargs):
"""Narrows the search by ensuring certain attributes are not included."""
clone = self._clone()
clone.query.add_filter(~SQ(*args, **kwargs))
return clone
def filter_and(self, *args, **kwargs):
"""Narrows the search by looking for (and including) certain attributes."""
clone = self._clone()
clone.query.add_filter(SQ(*args, **kwargs))
return clone
def filter_or(self, *args, **kwargs):
"""Narrows the search by ensuring certain attributes are not included."""
clone = self._clone()
clone.query.add_filter(SQ(*args, **kwargs), use_or=True)
return clone
def order_by(self, *args):
"""Alters the order in which the results should appear."""
clone = self._clone()
for field in args:
clone.query.add_order_by(field)
return clone
def highlight(self, **kwargs):
"""Adds highlighting to the results."""
clone = self._clone()
clone.query.add_highlight(**kwargs)
return clone
def models(self, *models):
"""Accepts an arbitrary number of Model classes to include in the search."""
clone = self._clone()
for model in models:
if (
model
not in connections[self.query._using]
.get_unified_index()
.get_indexed_models()
):
warnings.warn("The model %r is not registered for search." % (model,))
clone.query.add_model(model)
return clone
def result_class(self, klass):
"""
Allows specifying a different class to use for results.
Overrides any previous usages. If ``None`` is provided, Haystack will
revert back to the default ``SearchResult`` object.
"""
clone = self._clone()
clone.query.set_result_class(klass)
return clone
def boost(self, term, boost):
"""Boosts a certain aspect of the query."""
clone = self._clone()
clone.query.add_boost(term, boost)
return clone
def facet(self, field, **options):
"""Adds faceting to a query for the provided field."""
clone = self._clone()
clone.query.add_field_facet(field, **options)
return clone
def within(self, field, point_1, point_2):
"""Spatial: Adds a bounding box search to the query."""
clone = self._clone()
clone.query.add_within(field, point_1, point_2)
return clone
def dwithin(self, field, point, distance):
"""Spatial: Adds a distance-based search to the query."""
clone = self._clone()
clone.query.add_dwithin(field, point, distance)
return clone
def stats(self, field):
"""Adds stats to a query for the provided field."""
return self.stats_facet(field, facet_fields=None)
def stats_facet(self, field, facet_fields=None):
"""Adds stats facet for the given field and facet_fields represents
the faceted fields."""
clone = self._clone()
stats_facets = []
try:
stats_facets.append(sum(facet_fields, []))
except TypeError:
if facet_fields:
stats_facets.append(facet_fields)
clone.query.add_stats_query(field, stats_facets)
return clone
def distance(self, field, point):
"""
Spatial: Denotes results must have distance measurements from the
provided point.
"""
clone = self._clone()
clone.query.add_distance(field, point)
return clone
def date_facet(self, field, start_date, end_date, gap_by, gap_amount=1):
"""Adds faceting to a query for the provided field by date."""
clone = self._clone()
clone.query.add_date_facet(
field, start_date, end_date, gap_by, gap_amount=gap_amount
)
return clone
def query_facet(self, field, query):
"""Adds faceting to a query for the provided field with a custom query."""
clone = self._clone()
clone.query.add_query_facet(field, query)
return clone
def narrow(self, query):
"""Pushes existing facet choices into the search."""
if isinstance(query, SQ):
# produce query string using empty query of the same class
empty_query = self.query._clone()
empty_query._reset()
query = query.as_query_string(empty_query.build_query_fragment)
clone = self._clone()
clone.query.add_narrow_query(query)
return clone
def raw_search(self, query_string, **kwargs):
"""Passes a raw query directly to the backend."""
return self.filter(content=Raw(query_string, **kwargs))
def load_all(self):
"""Efficiently populates the objects in the search results."""
clone = self._clone()
clone._load_all = True
return clone
def auto_query(self, query_string, fieldname="content"):
"""
Performs a best guess constructing the search query.
This method is somewhat naive but works well enough for the simple,
common cases.
"""
kwargs = {fieldname: AutoQuery(query_string)}
return self.filter(**kwargs)
def autocomplete(self, **kwargs):
"""
A shortcut method to perform an autocomplete search.
Must be run against fields that are either ``NgramField`` or
``EdgeNgramField``.
"""
clone = self._clone()
query_bits = []
for field_name, query in kwargs.items():
for word in query.split(" "):
bit = clone.query.clean(word.strip())
if bit:
kwargs = {field_name: bit}
query_bits.append(SQ(**kwargs))
return clone.filter(reduce(operator.__and__, query_bits))
def using(self, connection_name):
"""
Allows switching which connection the ``SearchQuerySet`` uses to
search in.
"""
clone = self._clone()
clone.query = self.query.using(connection_name)
clone._using = connection_name
return clone
# Methods that do not return a SearchQuerySet.
def count(self):
"""Returns the total number of matching results."""
return len(self)
def best_match(self):
"""Returns the best/top search result that matches the query."""
return self[0]
def latest(self, date_field):
"""Returns the most recent search result that matches the query."""
clone = self._clone()
clone.query.clear_order_by()
clone.query.add_order_by("-%s" % date_field)
return clone.best_match()
def more_like_this(self, model_instance):
"""Finds similar results to the object passed in."""
clone = self._clone()
clone.query.more_like_this(model_instance)
return clone
def facet_counts(self):
"""
Returns the facet counts found by the query.
This will cause the query to execute and should generally be used when
presenting the data.
"""
if self.query.has_run():
return self.query.get_facet_counts()
else:
clone = self._clone()
return clone.query.get_facet_counts()
def stats_results(self):
"""
Returns the stats results found by the query.
"""
if self.query.has_run():
return self.query.get_stats()
else:
clone = self._clone()
return clone.query.get_stats()
def set_spelling_query(self, spelling_query):
"""Set the exact text to be used to generate spelling suggestions
When making complicated queries, such as the alt parser mechanism
used by Solr dismax/edismax, this provides a convenient way to set
the a simple text string which will be used to generate spelling
suggestions without including unnecessary syntax.
"""
clone = self._clone()
clone.query.set_spelling_query(spelling_query)
return clone
def spelling_suggestion(self, preferred_query=None):
"""
Returns the spelling suggestion found by the query.
To work, you must set ``INCLUDE_SPELLING`` within your connection's
settings dictionary to ``True``. Otherwise, ``None`` will be returned.
This will cause the query to execute and should generally be used when
presenting the data.
"""
if self.query.has_run():
return self.query.get_spelling_suggestion(preferred_query)
else:
clone = self._clone()
return clone.query.get_spelling_suggestion(preferred_query)
def values(self, *fields):
"""
Returns a list of dictionaries, each containing the key/value pairs for
the result, exactly like Django's ``ValuesQuerySet``.
"""
qs = self._clone(klass=ValuesSearchQuerySet)
qs._fields.extend(fields)
return qs
def values_list(self, *fields, **kwargs):
"""
Returns a list of field values as tuples, exactly like Django's
``QuerySet.values``.
Optionally accepts a ``flat=True`` kwarg, which in the case of a
single field being provided, will return a flat list of that field
rather than a list of tuples.
"""
flat = kwargs.pop("flat", False)
if flat and len(fields) > 1:
raise TypeError(
"'flat' is not valid when values_list is called with more than one field."
)
qs = self._clone(klass=ValuesListSearchQuerySet)
qs._fields.extend(fields)
qs._flat = flat
return qs
# Utility methods.
def _clone(self, klass=None):
if klass is None:
klass = self.__class__
query = self.query._clone()
clone = klass(query=query)
clone._load_all = self._load_all
return clone
| SearchQuerySet |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-that-sum-of-the-prices-is-less-than-or-equal-to-k.py | {
"start": 1487,
"end": 2360
} | class ____(object):
def findMaximumNumber(self, k, x):
"""
:type k: int
:type x: int
:rtype: int
"""
def floor_log2(x):
return x.bit_length()-1
result = prefix_cnt = 0
while k >= prefix_cnt:
# l = result.bit_length()
# assert(prefix_cnt == sum(c == '1' and (l-i)%x == 0 for i, c in enumerate(bin(result)[2:])))
cnt, i = prefix_cnt, 0
while (cnt<<x)+(1<<(i+x-1)) <= k:
cnt = (cnt<<x)+(1<<(i+x-1))
i += x
c = min(floor_log2(k//cnt) if cnt else float("inf"), x-1)
cnt <<= c
i += c
k -= cnt
result += 1<<i
prefix_cnt += int((i+1)%x == 0)
return result-1
# Time: O(max(logk, x)^2)
# Space: O(1)
# bit manipulation, combinatorics
| Solution2 |
python | huggingface__transformers | src/transformers/models/fnet/tokenization_fnet.py | {
"start": 777,
"end": 3241
} | class ____(AlbertTokenizer):
"""
Construct an FNet tokenizer. Based on [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
This tokenizer inherits from [`AlbertTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
keep_accents (`bool`, *optional*, defaults to `False`):
Whether or not to keep accents when tokenizing.
bos_token (`str`, *optional*, defaults to `"[CLS]"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
eos_token (`str`, *optional*, defaults to `"[SEP]"`):
The end of sequence token.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
"""
model_input_names = ["input_ids", "token_type_ids"]
# FNetTokenizerFast is an alias for FNetTokenizer (since AlbertTokenizer is already a fast tokenizer)
FNetTokenizerFast = FNetTokenizer
__all__ = ["FNetTokenizer", "FNetTokenizerFast"]
| FNetTokenizer |
python | ray-project__ray | python/ray/train/v2/xgboost/xgboost_trainer.py | {
"start": 461,
"end": 6853
} | class ____(DataParallelTrainer):
"""A Trainer for distributed data-parallel XGBoost training.
Example
-------
.. testcode::
import xgboost
import ray.data
import ray.train
from ray.train.xgboost import RayTrainReportCallback
from ray.train.xgboost import XGBoostTrainer
def train_fn_per_worker(config: dict):
# (Optional) Add logic to resume training state from a checkpoint.
# ray.train.get_checkpoint()
# 1. Get the dataset shard for the worker and convert to a `xgboost.DMatrix`
train_ds_iter, eval_ds_iter = (
ray.train.get_dataset_shard("train"),
ray.train.get_dataset_shard("validation"),
)
train_ds, eval_ds = train_ds_iter.materialize(), eval_ds_iter.materialize()
train_df, eval_df = train_ds.to_pandas(), eval_ds.to_pandas()
train_X, train_y = train_df.drop("y", axis=1), train_df["y"]
eval_X, eval_y = eval_df.drop("y", axis=1), eval_df["y"]
dtrain = xgboost.DMatrix(train_X, label=train_y)
deval = xgboost.DMatrix(eval_X, label=eval_y)
params = {
"tree_method": "approx",
"objective": "reg:squarederror",
"eta": 1e-4,
"subsample": 0.5,
"max_depth": 2,
}
# 2. Do distributed data-parallel training.
# Ray Train sets up the necessary coordinator processes and
# environment variables for your workers to communicate with each other.
bst = xgboost.train(
params,
dtrain=dtrain,
evals=[(deval, "validation")],
num_boost_round=1,
callbacks=[RayTrainReportCallback()],
)
train_ds = ray.data.from_items([{"x": x, "y": x + 1} for x in range(32)])
eval_ds = ray.data.from_items([{"x": x, "y": x + 1} for x in range(16)])
trainer = XGBoostTrainer(
train_fn_per_worker,
datasets={"train": train_ds, "validation": eval_ds},
scaling_config=ray.train.ScalingConfig(num_workers=2),
)
result = trainer.fit()
booster = RayTrainReportCallback.get_model(result.checkpoint)
Args:
train_loop_per_worker: The training function to execute on each worker.
This function can either take in zero arguments or a single ``Dict``
argument which is set by defining ``train_loop_config``.
Within this function you can use any of the
:ref:`Ray Train Loop utilities <train-loop-api>`.
train_loop_config: A configuration ``Dict`` to pass in as an argument to
``train_loop_per_worker``.
This is typically used for specifying hyperparameters.
xgboost_config: The configuration for setting up the distributed xgboost
backend. Defaults to using the "rabit" backend.
See :class:`~ray.train.xgboost.XGBoostConfig` for more info.
scaling_config: The configuration for how to scale data parallel training.
``num_workers`` determines how many Python processes are used for training,
and ``use_gpu`` determines whether or not each process should use GPUs.
See :class:`~ray.train.ScalingConfig` for more info.
run_config: The configuration for the execution of the training run.
See :class:`~ray.train.RunConfig` for more info.
datasets: The Ray Datasets to ingest for training.
Datasets are keyed by name (``{name: dataset}``).
Each dataset can be accessed from within the ``train_loop_per_worker``
by calling ``ray.train.get_dataset_shard(name)``.
Sharding and additional configuration can be done by
passing in a ``dataset_config``.
dataset_config: The configuration for ingesting the input ``datasets``.
By default, all the Ray Dataset are split equally across workers.
See :class:`~ray.train.DataConfig` for more details.
resume_from_checkpoint: [Deprecated]
metadata: [Deprecated]
"""
def __init__(
self,
train_loop_per_worker: Union[Callable[[], None], Callable[[Dict], None]],
*,
train_loop_config: Optional[Dict] = None,
xgboost_config: Optional["XGBoostConfig"] = None,
scaling_config: Optional[ScalingConfig] = None,
run_config: Optional[RunConfig] = None,
datasets: Optional[Dict[str, GenDataset]] = None,
dataset_config: Optional[ray.train.DataConfig] = None,
# TODO: [Deprecated]
metadata: Optional[Dict[str, Any]] = None,
resume_from_checkpoint: Optional[Checkpoint] = None,
# TODO(justinvyu): [Deprecated] Legacy XGBoostTrainer API
label_column: Optional[str] = None,
params: Optional[Dict[str, Any]] = None,
num_boost_round: Optional[int] = None,
):
if (
label_column is not None
or params is not None
or num_boost_round is not None
):
raise DeprecationWarning(
"The legacy XGBoostTrainer API is deprecated. "
"Please switch to passing in a custom `train_loop_per_worker` "
"function instead. "
"See this issue for more context: "
"https://github.com/ray-project/ray/issues/50042"
)
from ray.train.xgboost import XGBoostConfig
super(XGBoostTrainer, self).__init__(
train_loop_per_worker=train_loop_per_worker,
train_loop_config=train_loop_config,
backend_config=xgboost_config or XGBoostConfig(),
scaling_config=scaling_config,
dataset_config=dataset_config,
run_config=run_config,
datasets=datasets,
resume_from_checkpoint=resume_from_checkpoint,
metadata=metadata,
)
@classmethod
@Deprecated
def get_model(cls, checkpoint: Checkpoint):
"""[Deprecated] Retrieve the XGBoost model stored in this checkpoint."""
raise DeprecationWarning(
"`XGBoostTrainer.get_model` is deprecated. "
"Use `RayTrainReportCallback.get_model` instead."
)
| XGBoostTrainer |
python | pypa__warehouse | tests/unit/oidc/models/test_core.py | {
"start": 1037,
"end": 6328
} | class ____:
def test_lookup_by_claims_raises(self):
with pytest.raises(NotImplementedError):
_core.OIDCPublisher.lookup_by_claims(pretend.stub(), pretend.stub())
def test_oidc_publisher_not_default_verifiable(self):
publisher = _core.OIDCPublisher(projects=[])
with pytest.raises(errors.InvalidPublisherError) as e:
publisher.check_claims_existence(signed_claims={})
assert str(e.value) == "No required verifiable claims"
def test_attestation_identity(self):
publisher = _core.OIDCPublisher(projects=[])
assert not publisher.attestation_identity
def test_admin_details_default(self):
publisher = _core.OIDCPublisher(projects=[])
assert publisher.admin_details == []
@pytest.mark.parametrize(
("url", "publisher_url", "expected"),
[
( # GitHub trivial case
"https://github.com/owner/project",
"https://github.com/owner/project",
True,
),
( # ActiveState trivial case
"https://platform.activestate.com/owner/project",
"https://platform.activestate.com/owner/project",
True,
),
( # GitLab trivial case
"https://gitlab.com/owner/project",
"https://gitlab.com/owner/project",
True,
),
(
# Google trivial case (no publisher URL)
"https://example.com/owner/project",
None,
False,
),
( # URL is a sub-path of the TP URL
"https://github.com/owner/project/issues",
"https://github.com/owner/project",
True,
),
( # Normalization
"https://GiThUB.com/owner/project/",
"https://github.com/owner/project",
True,
),
( # TP URL is a prefix, but not a parent of the URL
"https://github.com/owner/project22",
"https://github.com/owner/project",
False,
),
( # URL is a parent of the TP URL
"https://github.com/owner",
"https://github.com/owner/project",
False,
),
( # Scheme component does not match
"http://github.com/owner/project",
"https://github.com/owner/project",
False,
),
( # Host component does not match
"https://gitlab.com/owner/project",
"https://github.com/owner/project",
False,
),
( # Host component matches, but contains user and port info
"https://user@github.com:443/owner/project",
"https://github.com/owner/project",
False,
),
( # URL path component is empty
"https://github.com",
"https://github.com/owner/project",
False,
),
( # TP URL path component is empty
# (currently no TPs have an empty path, so even if the given URL is a
# sub-path of the TP URL, we fail the verification)
"https://github.com/owner/project",
"https://github.com",
False,
),
( # Both path components are empty
# (currently no TPs have an empty path, so even if the given URL is the
# same as the TP URL, we fail the verification)
"https://github.com",
"https://github.com",
False,
),
( # Default verification is case-sensitive
"https://publisher.com/owner/project",
"https://publisher.com/owner/PrOjeCt",
False,
),
],
)
def test_verify_url(self, monkeypatch, url, publisher_url, expected):
class TestPublisher(_core.OIDCPublisher):
__abstract__ = True
@property
def publisher_base_url(self):
return publisher_url
publisher = TestPublisher()
assert publisher.verify_url(url) == expected
def test_check_existing_jti():
publisher = pretend.stub(
jwt_identifier_exists=pretend.call_recorder(lambda s: False),
)
assert _core.check_existing_jti(
pretend.stub(),
"6e67b1cb-2b8d-4be5-91cb-757edb2ec970",
pretend.stub(),
publisher_service=publisher,
)
def test_check_existing_jti_fails(metrics):
publisher = pretend.stub(
jwt_identifier_exists=pretend.call_recorder(lambda s: True),
metrics=metrics,
publisher="fakepublisher",
)
with pytest.raises(errors.ReusedTokenError):
assert _core.check_existing_jti(
pretend.stub(),
"6e67b1cb-2b8d-4be5-91cb-757edb2ec970",
pretend.stub(),
publisher_service=publisher,
)
assert (
pretend.call("warehouse.oidc.reused_token", tags=["publisher:fakepublisher"])
in metrics.increment.calls
)
| TestOIDCPublisher |
python | cython__cython | tests/run/methodmangling_unknown_names.py | {
"start": 58,
"end": 700
} | class ____(object):
def run(self):
"""
>>> Test().run()
NameError1
NameError2
found mangled
"""
try:
print(__something)
except NameError:
print("NameError1") # correct - shouldn't exist
globals()['__something'] = 'found unmangled'
try:
print(__something)
except NameError:
print("NameError2") # correct - shouldn't exist
globals()['_Test__something'] = 'found mangled'
try:
print(__something) # should print this
except NameError:
print("NameError3")
| Test |
python | spack__spack | lib/spack/spack/cmd/common/arguments.py | {
"start": 3733,
"end": 4442
} | class ____(argparse.Action):
"""Sets the value for maximum number of concurrent package builds
The value is set in the command line configuration scope so that
it can be retrieved using the spack.config API.
"""
def __call__(self, parser, namespace, concurrent_packages, option_string):
if concurrent_packages < 1:
msg = 'invalid value for argument "{0}" ' '[expected a positive integer, got "{1}"]'
raise ValueError(msg.format(option_string, concurrent_packages))
spack.config.set("config:concurrent_packages", concurrent_packages, scope="command_line")
setattr(namespace, "concurrent_packages", concurrent_packages)
| SetConcurrentPackages |
python | huggingface__transformers | tests/pipelines/test_pipelines_common.py | {
"start": 2446,
"end": 9150
} | class ____(unittest.TestCase):
@require_torch
def test_pipeline_iteration(self):
from torch.utils.data import Dataset
class MyDataset(Dataset):
data = [
"This is a test",
"This restaurant is great",
"This restaurant is awful",
]
def __len__(self):
return 3
def __getitem__(self, i):
return self.data[i]
text_classifier = pipeline(task="text-classification", model="hf-internal-testing/tiny-random-distilbert")
dataset = MyDataset()
for output in text_classifier(dataset):
self.assertEqual(output, {"label": ANY(str), "score": ANY(float)})
@require_torch
def test_check_task_auto_inference(self):
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert")
self.assertIsInstance(pipe, TextClassificationPipeline)
@require_torch
def test_pipeline_batch_size_global(self):
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert")
self.assertEqual(pipe._batch_size, None)
self.assertEqual(pipe._num_workers, None)
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", batch_size=2, num_workers=1)
self.assertEqual(pipe._batch_size, 2)
self.assertEqual(pipe._num_workers, 1)
@require_torch
def test_pipeline_pathlike(self):
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert")
with tempfile.TemporaryDirectory() as d:
pipe.save_pretrained(d)
path = Path(d)
newpipe = pipeline(task="text-classification", model=path)
self.assertIsInstance(newpipe, TextClassificationPipeline)
@require_torch
def test_pipeline_override(self):
class MyPipeline(TextClassificationPipeline):
pass
text_classifier = pipeline(model="hf-internal-testing/tiny-random-distilbert", pipeline_class=MyPipeline)
self.assertIsInstance(text_classifier, MyPipeline)
def test_check_task(self):
task = get_task("openai-community/gpt2")
self.assertEqual(task, "text-generation")
with self.assertRaises(RuntimeError):
# Wrong framework
get_task("espnet/siddhana_slurp_entity_asr_train_asr_conformer_raw_en_word_valid.acc.ave_10best")
@require_torch
def test_iterator_data(self):
def data(n: int):
for _ in range(n):
yield "This is a test"
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert")
results = []
for out in pipe(data(10)):
self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504})
results.append(out)
self.assertEqual(len(results), 10)
# When using multiple workers on streamable data it should still work
# This will force using `num_workers=1` with a warning for now.
results = []
for out in pipe(data(10), num_workers=2):
self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504})
results.append(out)
self.assertEqual(len(results), 10)
@require_torch
def test_unbatch_attentions_hidden_states(self):
model = DistilBertForSequenceClassification.from_pretrained(
"hf-internal-testing/tiny-random-distilbert", output_hidden_states=True, output_attentions=True
)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-distilbert")
text_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer)
# Used to throw an error because `hidden_states` are a tuple of tensors
# instead of the expected tensor.
outputs = text_classifier(["This is great !"] * 20, batch_size=32)
self.assertEqual(len(outputs), 20)
@require_torch
def test_dtype_property(self):
import torch
model_id = "hf-internal-testing/tiny-random-distilbert"
# If dtype is specified in the pipeline constructor, the property should return that type
pipe = pipeline(model=model_id, dtype=torch.float16)
self.assertEqual(pipe.dtype, torch.float16)
# If the underlying model changes dtype, the property should return the new type
pipe.model.to(torch.bfloat16)
self.assertEqual(pipe.dtype, torch.bfloat16)
# If dtype is NOT specified in the pipeline constructor, the property should just return
# the dtype of the underlying model (default)
pipe = pipeline(model=model_id)
self.assertEqual(pipe.dtype, torch.float32)
# If underlying model doesn't have dtype property, simply return None
pipe.model = None
self.assertIsNone(pipe.dtype)
@require_torch
def test_auto_model_pipeline_registration_from_local_dir(self):
with tempfile.TemporaryDirectory() as tmp_dir:
snapshot_download("hf-internal-testing/tiny-random-custom-architecture", local_dir=tmp_dir)
pipe = pipeline("text-generation", tmp_dir, trust_remote_code=True)
self.assertIsInstance(pipe, TextGenerationPipeline) # Assert successful load
@require_torch
def test_pipeline_with_task_parameters_no_side_effects(self):
"""
Regression test: certain pipeline flags, like `task`, modified the model configuration, causing unexpected
side-effects
"""
# This checkpoint has task-specific parameters that will modify the behavior of the pipeline
model = T5ForConditionalGeneration.from_pretrained("t5-small")
self.assertTrue(model.config.num_beams == 1)
# The task-specific parameters used to cause side-effects on `model.config` -- not anymore
pipe = pipeline(model=model, tokenizer=AutoTokenizer.from_pretrained("t5-small"), task="translation_en_to_de")
self.assertTrue(model.config.num_beams == 1)
self.assertTrue(model.generation_config.num_beams == 1)
# Under the hood: we now store a generation config in the pipeline. This generation config stores the
# task-specific parameters.
self.assertTrue(pipe.generation_config.num_beams == 4)
# We can confirm that the task-specific parameters have an effect. (In this case, the default is `num_beams=1`,
# which would crash when `num_return_sequences=4` is passed.)
pipe("Hugging Face doesn't sell hugs.", num_return_sequences=4)
with self.assertRaises(ValueError):
pipe("Hugging Face doesn't sell hugs.", num_return_sequences=4, num_beams=1)
@is_pipeline_test
@require_torch
| CommonPipelineTest |
python | gevent__gevent | src/gevent/tests/test__greenlet.py | {
"start": 1731,
"end": 1781
} | class ____(ExpectedError):
pass
| ExpectedJoinError |
python | getsentry__sentry | src/sentry/rules/conditions/tagged_event.py | {
"start": 1198,
"end": 4003
} | class ____(EventCondition):
id = "sentry.rules.conditions.tagged_event.TaggedEventCondition"
label = "The event's tags match {key} {match} {value}"
form_fields = {
"key": {"type": "string", "placeholder": "key"},
"match": {"type": "choice", "choices": list(MATCH_CHOICES.items())},
"value": {"type": "string", "placeholder": "value"},
}
def _passes(self, raw_tags: Sequence[tuple[str, Any]]) -> bool:
option_key = self.get_option("key")
option_match = self.get_option("match")
option_value = self.get_option("value")
if not (option_key and option_match):
return False
option_key = option_key.lower()
tag_keys = (
k
for gen in (
(k.lower() for k, v in raw_tags),
(tagstore.backend.get_standardized_key(k) for k, v in raw_tags),
)
for k in gen
)
# NOTE: IS_SET condition differs btw tagged_event and event_attribute so not handled by match_values
if option_match == MatchType.IS_SET:
return option_key in tag_keys
elif option_match == MatchType.NOT_SET:
return option_key not in tag_keys
if not option_value:
return False
option_value = option_value.lower()
# This represents the fetched tag values given the provided key
# so eg. if the key is 'environment' and the tag_value is 'production'
tag_values = (
v.lower()
for k, v in raw_tags
if k.lower() == option_key or tagstore.backend.get_standardized_key(k) == option_key
)
return match_values(
group_values=tag_values, match_value=option_value, match_type=option_match
)
def passes(self, event: GroupEvent, state: EventState, **kwargs: Any) -> bool:
return self._passes(event.tags)
def passes_activity(
self, condition_activity: ConditionActivity, event_map: dict[str, Any]
) -> bool:
try:
tags = event_map[condition_activity.data["event_id"]]["tags"]
return self._passes(tags.items())
except (TypeError, KeyError):
return False
def render_label(self) -> str:
data = {
"key": self.data["key"],
"value": self.data["value"],
"match": MATCH_CHOICES[self.data["match"]],
}
return self.label.format(**data)
def get_event_columns(self) -> dict[Dataset, Sequence[str]]:
columns: dict[Dataset, Sequence[str]] = get_dataset_columns(
[Columns.TAGS_KEY, Columns.TAGS_VALUE]
)
return columns
def get_form_instance(self) -> TaggedEventForm:
return TaggedEventForm(self.data)
| TaggedEventCondition |
python | aimacode__aima-python | csp.py | {
"start": 34977,
"end": 37146
} | class ____:
"""
A Constraint consists of:
scope : a tuple of variables
condition: a function that can applied to a tuple of values
for the variables.
"""
def __init__(self, scope, condition):
self.scope = scope
self.condition = condition
def __repr__(self):
return self.condition.__name__ + str(self.scope)
def holds(self, assignment):
"""Returns the value of Constraint con evaluated in assignment.
precondition: all variables are assigned in assignment
"""
return self.condition(*tuple(assignment[v] for v in self.scope))
def all_diff_constraint(*values):
"""Returns True if all values are different, False otherwise"""
return len(values) is len(set(values))
def is_word_constraint(words):
"""Returns True if the letters concatenated form a word in words, False otherwise"""
def isw(*letters):
return "".join(letters) in words
return isw
def meet_at_constraint(p1, p2):
"""Returns a function that is True when the words meet at the positions (p1, p2), False otherwise"""
def meets(w1, w2):
return w1[p1] == w2[p2]
meets.__name__ = "meet_at(" + str(p1) + ',' + str(p2) + ')'
return meets
def adjacent_constraint(x, y):
"""Returns True if x and y are adjacent numbers, False otherwise"""
return abs(x - y) == 1
def sum_constraint(n):
"""Returns a function that is True when the the sum of all values is n, False otherwise"""
def sumv(*values):
return sum(values) is n
sumv.__name__ = str(n) + "==sum"
return sumv
def is_constraint(val):
"""Returns a function that is True when x is equal to val, False otherwise"""
def isv(x):
return val == x
isv.__name__ = str(val) + "=="
return isv
def ne_constraint(val):
"""Returns a function that is True when x is not equal to val, False otherwise"""
def nev(x):
return val != x
nev.__name__ = str(val) + "!="
return nev
def no_heuristic(to_do):
return to_do
def sat_up(to_do):
return SortedSet(to_do, key=lambda t: 1 / len([var for var in t[1].scope]))
| Constraint |
python | doocs__leetcode | solution/2200-2299/2239.Find Closest Number to Zero/Solution.py | {
"start": 0,
"end": 227
} | class ____:
def findClosestNumber(self, nums: List[int]) -> int:
ans, d = 0, inf
for x in nums:
if (y := abs(x)) < d or (y == d and x > ans):
ans, d = x, y
return ans
| Solution |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-legacy-office/llama_index/readers/legacy_office/reader.py | {
"start": 340,
"end": 8027
} | class ____(BaseReader):
"""
Legacy Office Reader for parsing old Office documents (.doc, etc.) using Apache Tika.
This reader uses Apache Tika to parse legacy Office documents like Word 97 (.doc) files.
It can use either a local Tika server or connect to a remote one.
Args:
tika_server_jar_path (Optional[str]): Path to the Tika server JAR file.
If not provided, will download and use the default Tika server JAR.
tika_server_url (Optional[str]): URL of remote Tika server.
If provided, will use remote server instead of starting local one.
cache_dir (Optional[str]): Directory to cache the Tika server JAR.
Defaults to ~/.cache/llama_index/tika
excluded_embed_metadata_keys (Optional[List[str]]): Metadata keys to exclude from embedding.
excluded_llm_metadata_keys (Optional[List[str]]): Metadata keys to exclude from LLM.
"""
def __init__(
self,
tika_server_jar_path: Optional[str] = None,
tika_server_url: Optional[str] = None,
cache_dir: Optional[str] = None,
excluded_embed_metadata_keys: Optional[List[str]] = None,
excluded_llm_metadata_keys: Optional[List[str]] = None,
) -> None:
"""Initialize with parameters."""
super().__init__()
try:
import tika
from tika import parser
except ImportError as err:
raise ImportError(
"`tika` package not found, please run `pip install tika`"
) from err
self.parser = parser
self.excluded_embed_metadata_keys = excluded_embed_metadata_keys or []
self.excluded_llm_metadata_keys = excluded_llm_metadata_keys or []
# Set up cache directory
if cache_dir is None:
cache_dir = os.path.expanduser("~/.cache/llama_index/tika")
self.cache_dir = Path(cache_dir)
self.cache_dir.mkdir(parents=True, exist_ok=True)
# Handle remote server configuration
if tika_server_url:
logger.info(f"Using remote Tika server at {tika_server_url}")
os.environ["TIKA_SERVER_ENDPOINT"] = tika_server_url
return
# Set up local Tika server
if tika_server_jar_path:
os.environ["TIKA_SERVER_JAR"] = tika_server_jar_path
else:
# Use cached JAR if available
cached_jar = self.cache_dir / "tika-server.jar"
if cached_jar.exists():
logger.info("Using cached Tika server JAR")
os.environ["TIKA_SERVER_JAR"] = str(cached_jar)
else:
logger.info("Downloading Tika server JAR (this may take a while)...")
os.environ["TIKA_SERVER_JAR"] = str(cached_jar)
# Check if Tika server is already running
try:
response = requests.get("http://localhost:9998/version")
if response.status_code == 200:
logger.info("Using existing Tika server on port 9998")
os.environ["TIKA_SERVER_ENDPOINT"] = "http://localhost:9998"
return
except requests.RequestException:
# Server not running, will start it
pass
# Initialize Tika
logger.info("Initializing Tika server...")
tika.initVM()
# Set server endpoint
os.environ["TIKA_SERVER_ENDPOINT"] = "http://localhost:9998"
logger.info("Tika server will run on port 9998")
def _process_metadata(
self, tika_metadata: Dict[str, Any], file_path: str
) -> Dict[str, Any]:
"""
Process Tika metadata into LlamaIndex format.
Args:
tika_metadata: Raw metadata from Tika
file_path: Path to the document
Returns:
Processed metadata dictionary with essential information only
"""
# Start with basic metadata
metadata = {
"file_path": file_path,
"file_name": Path(file_path).name,
"file_type": Path(file_path).suffix.lower(),
}
# Whitelist of metadata keys to keep
essential_keys = {
# Document properties
"title": "title",
"dc:title": "title",
"dc:creator": "author",
"meta:author": "author",
"meta:word-count": "words",
"meta:character-count": "chars",
"meta:page-count": "pages",
"xmptpg:npages": "pages",
# Dates
"dcterms:created": "created",
"dcterms:modified": "modified",
}
for key, orig_value in tika_metadata.items():
# Skip if not an essential key
normalized_key = essential_keys.get(key.lower())
if not normalized_key:
continue
# Skip empty values
if not orig_value:
continue
# Handle lists by joining with semicolon
processed_value = orig_value
if isinstance(orig_value, list):
processed_value = "; ".join(str(v) for v in orig_value)
# Convert to string and clean up
processed_value = str(processed_value).strip()
if processed_value and ":" in processed_value:
processed_value = processed_value.split(":", 1)[1].strip()
if processed_value:
metadata[normalized_key] = processed_value
return metadata
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""
Load data from legacy Office documents.
Args:
file (Path): Path to the legacy Office document.
extra_info (Optional[Dict]): Optional dictionary of extra metadata to add.
fs (Optional[AbstractFileSystem]): Optional filesystem to use.
Returns:
List[Document]: List of documents parsed from the file.
Raises:
ValueError: If document parsing fails or content is empty.
"""
try:
logger.info(f"Parsing document: {file}")
# Parse the document using Tika
if fs:
with fs.open(file) as f:
parsed = cast(Dict[str, Any], self.parser.from_buffer(f.read()))
else:
parsed = cast(Dict[str, Any], self.parser.from_file(str(file)))
if parsed is None:
raise ValueError(f"Failed to parse document: {file}")
content = str(parsed.get("content", "")).strip()
if not content:
raise ValueError(f"No content found in document: {file}")
# Process metadata
tika_metadata = parsed.get("metadata", {})
if not isinstance(tika_metadata, dict):
tika_metadata = {}
metadata = self._process_metadata(tika_metadata, str(file))
if extra_info:
metadata.update(extra_info)
# Create document with content and metadata
doc = Document(
text=content,
metadata=metadata,
excluded_embed_metadata_keys=self.excluded_embed_metadata_keys,
excluded_llm_metadata_keys=self.excluded_llm_metadata_keys,
)
logger.info(f"Successfully parsed document: {file}")
return [doc]
except Exception as e:
logger.error(f"Error processing document {file}: {e!s}")
raise ValueError(f"Error processing document {file}: {e!s}")
| LegacyOfficeReader |
python | huggingface__transformers | src/transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py | {
"start": 36029,
"end": 40428
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Qwen3VLMoeTextConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
self.mrope_section = config.rope_parameters.get("mrope_section", [24, 20, 20])
@staticmethod
def compute_default_rope_parameters(
config: Optional[Qwen3VLMoeTextConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
# In contrast to other models, Qwen3VLMoe has different position ids for the grids
# So we expand the inv_freq to shape (3, ...)
if position_ids.ndim == 2:
position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1)
position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions)
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3)
freqs = self.apply_interleaved_mrope(freqs, self.mrope_section)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def apply_interleaved_mrope(self, freqs, mrope_section):
"""Apply interleaved MRoPE to 3D rotary embeddings.
Reorganizes frequency layout from chunked [TTT...HHH...WWW] to
interleaved [THWTHWTHW...TT], preserving frequency continuity.
args:
x: (3, bs, seq_len, head_dim // 2)
mrope_section: (3,)
returns:
x_t: (bs, seq_len, head_dim // 2)
"""
freqs_t = freqs[0] # just overwrite the first dimension T
for dim, offset in enumerate((1, 2), start=1): # H, W
length = mrope_section[dim] * 3
idx = slice(offset, length, 3)
freqs_t[..., idx] = freqs[dim, ..., idx]
return freqs_t
@auto_docstring(
custom_intro=(
"Text part of Qwen3VLMoe, "
"not a pure text-only model, as DeepStack integrates visual features into the early hidden states."
)
)
| Qwen3VLMoeTextRotaryEmbedding |
python | tensorflow__tensorflow | tensorflow/python/debug/wrappers/framework.py | {
"start": 8174,
"end": 9882
} | class ____:
"""Request from an on-run-start callback.
The caller of the callback can use this response object to specify what
action the debug-wrapper session actually takes on the run() call.
"""
def __init__(self,
action,
debug_urls,
debug_ops="DebugIdentity",
node_name_regex_allowlist=None,
op_type_regex_allowlist=None,
tensor_dtype_regex_allowlist=None,
tolerate_debug_op_creation_failures=False):
"""Constructor of `OnRunStartResponse`.
Args:
action: (`OnRunStartAction`) the action actually taken by the wrapped
session for the run() call.
debug_urls: (`list` of `str`) debug_urls used in watching the tensors
during the run() call.
debug_ops: (`str` or `list` of `str`) Debug op(s) to be used by the
debugger.
node_name_regex_allowlist: Regular-expression allowlist for node
name.
op_type_regex_allowlist: Regular-expression allowlist for op type.
tensor_dtype_regex_allowlist: Regular-expression allowlist for tensor
dtype.
tolerate_debug_op_creation_failures: Whether debug op creation failures
are to be tolerated.
"""
_check_type(action, str)
self.action = action
_check_type(debug_urls, list)
self.debug_urls = debug_urls
self.debug_ops = debug_ops
self.node_name_regex_allowlist = node_name_regex_allowlist
self.op_type_regex_allowlist = op_type_regex_allowlist
self.tensor_dtype_regex_allowlist = tensor_dtype_regex_allowlist
self.tolerate_debug_op_creation_failures = (
tolerate_debug_op_creation_failures)
| OnRunStartResponse |
python | Pylons__pyramid | src/pyramid/interfaces.py | {
"start": 31522,
"end": 31637
} | class ____(Interface):
"""Interface representing a PEP 282 logger"""
ILogger = IDebugLogger # b/c
| IDebugLogger |
python | openai__openai-python | src/openai/types/evals/runs/output_item_list_response.py | {
"start": 1457,
"end": 1633
} | class ____(BaseModel):
content: str
"""The content of the message."""
role: str
"""The role of the message sender (e.g., system, user, developer)."""
| SampleInput |
python | vyperlang__vyper | vyper/exceptions.py | {
"start": 12467,
"end": 12582
} | class ____(VyperInternalException):
"""Constant folding logic cannot be applied to an AST node."""
| UnfoldableNode |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 9171,
"end": 9515
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layers = torch.nn.ModuleDict(
{
"0": torch.nn.Linear(10, 10),
}
)
def forward(self, x):
# TODO(future PR): handle more logic
x = self.layers["0"](x)
return x
| ModuleDict |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 55310,
"end": 55505
} | class ____(VegaLiteSchema):
"""AutosizeType schema wrapper."""
_schema = {"$ref": "#/definitions/AutosizeType"}
def __init__(self, *args):
super().__init__(*args)
| AutosizeType |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/representer.py | {
"start": 19408,
"end": 44563
} | class ____(SafeRepresenter):
# need to add type here and write out the .comment
# in serializer and emitter
def __init__(self, default_style=None, default_flow_style=None, dumper=None):
# type: (Any, Any, Any) -> None
if not hasattr(dumper, 'typ') and default_flow_style is None:
default_flow_style = False
SafeRepresenter.__init__(
self,
default_style=default_style,
default_flow_style=default_flow_style,
dumper=dumper,
)
def ignore_aliases(self, data):
# type: (Any) -> bool
try:
if data.anchor is not None and data.anchor.value is not None:
return False
except AttributeError:
pass
return SafeRepresenter.ignore_aliases(self, data)
def represent_none(self, data):
# type: (Any) -> Any
if len(self.represented_objects) == 0 and not self.serializer.use_explicit_start:
# this will be open ended (although it is not yet)
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
return self.represent_scalar('tag:yaml.org,2002:null', "")
def represent_literal_scalarstring(self, data):
# type: (Any) -> Any
tag = None
style = '|'
anchor = data.yaml_anchor(any=True)
tag = 'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data, style=style, anchor=anchor)
represent_preserved_scalarstring = represent_literal_scalarstring
def represent_folded_scalarstring(self, data):
# type: (Any) -> Any
tag = None
style = '>'
anchor = data.yaml_anchor(any=True)
for fold_pos in reversed(getattr(data, 'fold_pos', [])):
if (
data[fold_pos] == ' '
and (fold_pos > 0 and not data[fold_pos - 1].isspace())
and (fold_pos < len(data) and not data[fold_pos + 1].isspace())
):
data = data[:fold_pos] + '\a' + data[fold_pos:]
tag = 'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data, style=style, anchor=anchor)
def represent_single_quoted_scalarstring(self, data):
# type: (Any) -> Any
tag = None
style = "'"
anchor = data.yaml_anchor(any=True)
tag = 'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data, style=style, anchor=anchor)
def represent_double_quoted_scalarstring(self, data):
# type: (Any) -> Any
tag = None
style = '"'
anchor = data.yaml_anchor(any=True)
tag = 'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data, style=style, anchor=anchor)
def represent_plain_scalarstring(self, data):
# type: (Any) -> Any
tag = None
style = ''
anchor = data.yaml_anchor(any=True)
tag = 'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data, style=style, anchor=anchor)
def insert_underscore(self, prefix, s, underscore, anchor=None):
# type: (Any, Any, Any, Any) -> Any
if underscore is None:
return self.represent_scalar('tag:yaml.org,2002:int', prefix + s, anchor=anchor)
if underscore[0]:
sl = list(s)
pos = len(s) - underscore[0]
while pos > 0:
sl.insert(pos, '_')
pos -= underscore[0]
s = "".join(sl)
if underscore[1]:
s = '_' + s
if underscore[2]:
s += '_'
return self.represent_scalar('tag:yaml.org,2002:int', prefix + s, anchor=anchor)
def represent_scalar_int(self, data):
# type: (Any) -> Any
if data._width is not None:
s = '{:0{}d}'.format(data, data._width)
else:
s = format(data, 'd')
anchor = data.yaml_anchor(any=True)
return self.insert_underscore("", s, data._underscore, anchor=anchor)
def represent_binary_int(self, data):
# type: (Any) -> Any
if data._width is not None:
# cannot use '{:#0{}b}', that strips the zeros
s = '{:0{}b}'.format(data, data._width)
else:
s = format(data, 'b')
anchor = data.yaml_anchor(any=True)
return self.insert_underscore('0b', s, data._underscore, anchor=anchor)
def represent_octal_int(self, data):
# type: (Any) -> Any
if data._width is not None:
# cannot use '{:#0{}o}', that strips the zeros
s = '{:0{}o}'.format(data, data._width)
else:
s = format(data, 'o')
anchor = data.yaml_anchor(any=True)
return self.insert_underscore('0o', s, data._underscore, anchor=anchor)
def represent_hex_int(self, data):
# type: (Any) -> Any
if data._width is not None:
# cannot use '{:#0{}x}', that strips the zeros
s = '{:0{}x}'.format(data, data._width)
else:
s = format(data, 'x')
anchor = data.yaml_anchor(any=True)
return self.insert_underscore('0x', s, data._underscore, anchor=anchor)
def represent_hex_caps_int(self, data):
# type: (Any) -> Any
if data._width is not None:
# cannot use '{:#0{}X}', that strips the zeros
s = '{:0{}X}'.format(data, data._width)
else:
s = format(data, 'X')
anchor = data.yaml_anchor(any=True)
return self.insert_underscore('0x', s, data._underscore, anchor=anchor)
def represent_scalar_float(self, data):
# type: (Any) -> Any
""" this is way more complicated """
value = None
anchor = data.yaml_anchor(any=True)
if data != data or (data == 0.0 and data == 1.0):
value = '.nan'
elif data == self.inf_value:
value = '.inf'
elif data == -self.inf_value:
value = '-.inf'
if value:
return self.represent_scalar('tag:yaml.org,2002:float', value, anchor=anchor)
if data._exp is None and data._prec > 0 and data._prec == data._width - 1:
# no exponent, but trailing dot
value = '{}{:d}.'.format(data._m_sign if data._m_sign else "", abs(int(data)))
elif data._exp is None:
# no exponent, "normal" dot
prec = data._prec
ms = data._m_sign if data._m_sign else ""
# -1 for the dot
value = '{}{:0{}.{}f}'.format(
ms, abs(data), data._width - len(ms), data._width - prec - 1
)
if prec == 0 or (prec == 1 and ms != ""):
value = value.replace('0.', '.')
while len(value) < data._width:
value += '0'
else:
# exponent
m, es = '{:{}.{}e}'.format(
# data, data._width, data._width - data._prec + (1 if data._m_sign else 0)
data,
data._width,
data._width + (1 if data._m_sign else 0),
).split('e')
w = data._width if data._prec > 0 else (data._width + 1)
if data < 0:
w += 1
m = m[:w]
e = int(es)
m1, m2 = m.split('.') # always second?
while len(m1) + len(m2) < data._width - (1 if data._prec >= 0 else 0):
m2 += '0'
if data._m_sign and data > 0:
m1 = '+' + m1
esgn = '+' if data._e_sign else ""
if data._prec < 0: # mantissa without dot
if m2 != '0':
e -= len(m2)
else:
m2 = ""
while (len(m1) + len(m2) - (1 if data._m_sign else 0)) < data._width:
m2 += '0'
e -= 1
value = m1 + m2 + data._exp + '{:{}0{}d}'.format(e, esgn, data._e_width)
elif data._prec == 0: # mantissa with trailing dot
e -= len(m2)
value = m1 + m2 + '.' + data._exp + '{:{}0{}d}'.format(e, esgn, data._e_width)
else:
if data._m_lead0 > 0:
m2 = '0' * (data._m_lead0 - 1) + m1 + m2
m1 = '0'
m2 = m2[: -data._m_lead0] # these should be zeros
e += data._m_lead0
while len(m1) < data._prec:
m1 += m2[0]
m2 = m2[1:]
e -= 1
value = m1 + '.' + m2 + data._exp + '{:{}0{}d}'.format(e, esgn, data._e_width)
if value is None:
value = repr(data).lower()
return self.represent_scalar('tag:yaml.org,2002:float', value, anchor=anchor)
def represent_sequence(self, tag, sequence, flow_style=None):
# type: (Any, Any, Any) -> Any
value = [] # type: List[Any]
# if the flow_style is None, the flow style tacked on to the object
# explicitly will be taken. If that is None as well the default flow
# style rules
try:
flow_style = sequence.fa.flow_style(flow_style)
except AttributeError:
flow_style = flow_style
try:
anchor = sequence.yaml_anchor()
except AttributeError:
anchor = None
node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
try:
comment = getattr(sequence, comment_attrib)
node.comment = comment.comment
# reset any comment already printed information
if node.comment and node.comment[1]:
for ct in node.comment[1]:
ct.reset()
item_comments = comment.items
for v in item_comments.values():
if v and v[1]:
for ct in v[1]:
ct.reset()
item_comments = comment.items
if node.comment is None:
node.comment = comment.comment
else:
# as we are potentially going to extend this, make a new list
node.comment = comment.comment[:]
try:
node.comment.append(comment.end)
except AttributeError:
pass
except AttributeError:
item_comments = {}
for idx, item in enumerate(sequence):
node_item = self.represent_data(item)
self.merge_comments(node_item, item_comments.get(idx))
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
value.append(node_item)
if flow_style is None:
if len(sequence) != 0 and self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def merge_comments(self, node, comments):
# type: (Any, Any) -> Any
if comments is None:
assert hasattr(node, 'comment')
return node
if getattr(node, 'comment', None) is not None:
for idx, val in enumerate(comments):
if idx >= len(node.comment):
continue
nc = node.comment[idx]
if nc is not None:
assert val is None or val == nc
comments[idx] = nc
node.comment = comments
return node
def represent_key(self, data):
# type: (Any) -> Any
if isinstance(data, CommentedKeySeq):
self.alias_key = None
return self.represent_sequence('tag:yaml.org,2002:seq', data, flow_style=True)
if isinstance(data, CommentedKeyMap):
self.alias_key = None
return self.represent_mapping('tag:yaml.org,2002:map', data, flow_style=True)
return SafeRepresenter.represent_key(self, data)
def represent_mapping(self, tag, mapping, flow_style=None):
# type: (Any, Any, Any) -> Any
value = [] # type: List[Any]
try:
flow_style = mapping.fa.flow_style(flow_style)
except AttributeError:
flow_style = flow_style
try:
anchor = mapping.yaml_anchor()
except AttributeError:
anchor = None
node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
# no sorting! !!
try:
comment = getattr(mapping, comment_attrib)
if node.comment is None:
node.comment = comment.comment
else:
# as we are potentially going to extend this, make a new list
node.comment = comment.comment[:]
if node.comment and node.comment[1]:
for ct in node.comment[1]:
ct.reset()
item_comments = comment.items
if self.dumper.comment_handling is None:
for v in item_comments.values():
if v and v[1]:
for ct in v[1]:
ct.reset()
try:
node.comment.append(comment.end)
except AttributeError:
pass
else:
# NEWCMNT
pass
except AttributeError:
item_comments = {}
merge_list = [m[1] for m in getattr(mapping, merge_attrib, [])]
try:
merge_pos = getattr(mapping, merge_attrib, [[0]])[0][0]
except IndexError:
merge_pos = 0
item_count = 0
if bool(merge_list):
items = mapping.non_merged_items()
else:
items = mapping.items()
for item_key, item_value in items:
item_count += 1
node_key = self.represent_key(item_key)
node_value = self.represent_data(item_value)
item_comment = item_comments.get(item_key)
if item_comment:
# assert getattr(node_key, 'comment', None) is None
# issue 351 did throw this because the comment from the list item was
# moved to the dict
node_key.comment = item_comment[:2]
nvc = getattr(node_value, 'comment', None)
if nvc is not None: # end comment already there
nvc[0] = item_comment[2]
nvc[1] = item_comment[3]
else:
node_value.comment = item_comment[2:]
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if ((item_count != 0) or bool(merge_list)) and self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
if bool(merge_list):
# because of the call to represent_data here, the anchors
# are marked as being used and thereby created
if len(merge_list) == 1:
arg = self.represent_data(merge_list[0])
else:
arg = self.represent_data(merge_list)
arg.flow_style = True
value.insert(merge_pos, (ScalarNode('tag:yaml.org,2002:merge', '<<'), arg))
return node
def represent_omap(self, tag, omap, flow_style=None):
# type: (Any, Any, Any) -> Any
value = [] # type: List[Any]
try:
flow_style = omap.fa.flow_style(flow_style)
except AttributeError:
flow_style = flow_style
try:
anchor = omap.yaml_anchor()
except AttributeError:
anchor = None
node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
try:
comment = getattr(omap, comment_attrib)
if node.comment is None:
node.comment = comment.comment
else:
# as we are potentially going to extend this, make a new list
node.comment = comment.comment[:]
if node.comment and node.comment[1]:
for ct in node.comment[1]:
ct.reset()
item_comments = comment.items
for v in item_comments.values():
if v and v[1]:
for ct in v[1]:
ct.reset()
try:
node.comment.append(comment.end)
except AttributeError:
pass
except AttributeError:
item_comments = {}
for item_key in omap:
item_val = omap[item_key]
node_item = self.represent_data({item_key: item_val})
# node_item.flow_style = False
# node item has two scalars in value: node_key and node_value
item_comment = item_comments.get(item_key)
if item_comment:
if item_comment[1]:
node_item.comment = [None, item_comment[1]]
assert getattr(node_item.value[0][0], 'comment', None) is None
node_item.value[0][0].comment = [item_comment[0], None]
nvc = getattr(node_item.value[0][1], 'comment', None)
if nvc is not None: # end comment already there
nvc[0] = item_comment[2]
nvc[1] = item_comment[3]
else:
node_item.value[0][1].comment = item_comment[2:]
# if not (isinstance(node_item, ScalarNode) \
# and not node_item.style):
# best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_set(self, setting):
# type: (Any) -> Any
flow_style = False
tag = 'tag:yaml.org,2002:set'
# return self.represent_mapping(tag, value)
value = [] # type: List[Any]
flow_style = setting.fa.flow_style(flow_style)
try:
anchor = setting.yaml_anchor()
except AttributeError:
anchor = None
node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
# no sorting! !!
try:
comment = getattr(setting, comment_attrib)
if node.comment is None:
node.comment = comment.comment
else:
# as we are potentially going to extend this, make a new list
node.comment = comment.comment[:]
if node.comment and node.comment[1]:
for ct in node.comment[1]:
ct.reset()
item_comments = comment.items
for v in item_comments.values():
if v and v[1]:
for ct in v[1]:
ct.reset()
try:
node.comment.append(comment.end)
except AttributeError:
pass
except AttributeError:
item_comments = {}
for item_key in setting.odict:
node_key = self.represent_key(item_key)
node_value = self.represent_data(None)
item_comment = item_comments.get(item_key)
if item_comment:
assert getattr(node_key, 'comment', None) is None
node_key.comment = item_comment[:2]
node_key.style = node_value.style = '?'
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
best_style = best_style
return node
def represent_dict(self, data):
# type: (Any) -> Any
"""write out tag if saved on loading"""
try:
t = data.tag.value
except AttributeError:
t = None
if t:
if t.startswith('!!'):
tag = 'tag:yaml.org,2002:' + t[2:]
else:
tag = t
else:
tag = 'tag:yaml.org,2002:map'
return self.represent_mapping(tag, data)
def represent_list(self, data):
# type: (Any) -> Any
try:
t = data.tag.value
except AttributeError:
t = None
if t:
if t.startswith('!!'):
tag = 'tag:yaml.org,2002:' + t[2:]
else:
tag = t
else:
tag = 'tag:yaml.org,2002:seq'
return self.represent_sequence(tag, data)
def represent_datetime(self, data):
# type: (Any) -> Any
inter = 'T' if data._yaml['t'] else ' '
_yaml = data._yaml
if _yaml['delta']:
data += _yaml['delta']
value = data.isoformat(inter)
else:
value = data.isoformat(inter)
if _yaml['tz']:
value += _yaml['tz']
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_tagged_scalar(self, data):
# type: (Any) -> Any
try:
tag = data.tag.value
except AttributeError:
tag = None
try:
anchor = data.yaml_anchor()
except AttributeError:
anchor = None
return self.represent_scalar(tag, data.value, style=data.style, anchor=anchor)
def represent_scalar_bool(self, data):
# type: (Any) -> Any
try:
anchor = data.yaml_anchor()
except AttributeError:
anchor = None
return SafeRepresenter.represent_bool(self, data, anchor=anchor)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
# type: (Any, Any, Any, Optional[Any]) -> Any
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
anchor = state.pop(Anchor.attrib, None)
res = self.represent_mapping(tag, state, flow_style=flow_style)
if anchor is not None:
res.anchor = anchor
return res
RoundTripRepresenter.add_representer(type(None), RoundTripRepresenter.represent_none)
RoundTripRepresenter.add_representer(
LiteralScalarString, RoundTripRepresenter.represent_literal_scalarstring
)
RoundTripRepresenter.add_representer(
FoldedScalarString, RoundTripRepresenter.represent_folded_scalarstring
)
RoundTripRepresenter.add_representer(
SingleQuotedScalarString, RoundTripRepresenter.represent_single_quoted_scalarstring
)
RoundTripRepresenter.add_representer(
DoubleQuotedScalarString, RoundTripRepresenter.represent_double_quoted_scalarstring
)
RoundTripRepresenter.add_representer(
PlainScalarString, RoundTripRepresenter.represent_plain_scalarstring
)
# RoundTripRepresenter.add_representer(tuple, Representer.represent_tuple)
RoundTripRepresenter.add_representer(ScalarInt, RoundTripRepresenter.represent_scalar_int)
RoundTripRepresenter.add_representer(BinaryInt, RoundTripRepresenter.represent_binary_int)
RoundTripRepresenter.add_representer(OctalInt, RoundTripRepresenter.represent_octal_int)
RoundTripRepresenter.add_representer(HexInt, RoundTripRepresenter.represent_hex_int)
RoundTripRepresenter.add_representer(HexCapsInt, RoundTripRepresenter.represent_hex_caps_int)
RoundTripRepresenter.add_representer(ScalarFloat, RoundTripRepresenter.represent_scalar_float)
RoundTripRepresenter.add_representer(ScalarBoolean, RoundTripRepresenter.represent_scalar_bool)
RoundTripRepresenter.add_representer(CommentedSeq, RoundTripRepresenter.represent_list)
RoundTripRepresenter.add_representer(CommentedMap, RoundTripRepresenter.represent_dict)
RoundTripRepresenter.add_representer(
CommentedOrderedMap, RoundTripRepresenter.represent_ordereddict
)
if sys.version_info >= (2, 7):
import collections
RoundTripRepresenter.add_representer(
collections.OrderedDict, RoundTripRepresenter.represent_ordereddict
)
RoundTripRepresenter.add_representer(CommentedSet, RoundTripRepresenter.represent_set)
RoundTripRepresenter.add_representer(
TaggedScalar, RoundTripRepresenter.represent_tagged_scalar
)
RoundTripRepresenter.add_representer(TimeStamp, RoundTripRepresenter.represent_datetime)
| RoundTripRepresenter |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP008.py | {
"start": 6693,
"end": 6957
} | class ____(B):
def f(self):
C = B # Local variable C shadows the class name
return super(C, self).f() # Should NOT trigger UP008
# See: https://github.com/astral-sh/ruff/issues/20491
# UP008 should not apply when __class__ is a local variable
| C |
python | pytorch__pytorch | torch/_decomp/decompositions.py | {
"start": 1211,
"end": 181968
} | class ____(Enum):
NONE = 0
MEAN = 1
SUM = 2
# This wraps a decomposition and performs various type promotion logic within it, depending on the strategy provided
# We're currently reusing ELEMENTWISE_TYPE_PROMOTION_KIND, although some of the usages are on non-elementwise ops
# Will need to validate the non-elementwise uses
def type_casts(
f: Callable,
type_promotion: utils.ELEMENTWISE_TYPE_PROMOTION_KIND,
compute_dtype_only: bool = False,
include_non_tensor_args: bool = False,
):
@functools.wraps(f)
def inner(*args, **kwargs):
allowed_types = (
(Tensor, torch.types._Number) if include_non_tensor_args else (Tensor,)
) # type: ignore[arg-type]
flat_args = [
x
for x in pytree.arg_tree_leaves(*args, **kwargs)
if isinstance(x, allowed_types)
]
computation_dtype, result_dtype = utils.elementwise_dtypes(
*flat_args, type_promotion_kind=type_promotion
)
# TODO: pretty sure this is not quite right
def increase_prec(x):
if isinstance(x, Tensor):
return x.to(computation_dtype)
else:
return x
def decrease_prec(x):
if isinstance(x, Tensor):
return x.to(result_dtype)
else:
return x
r = f(*tree_map(increase_prec, args), **tree_map(increase_prec, kwargs))
if compute_dtype_only:
return r
else:
return tree_map(decrease_prec, r)
return inner
compute_only_pw_cast_for_opmath = partial(
type_casts,
type_promotion=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
compute_dtype_only=True,
)
pw_cast_for_opmath = partial(
type_casts, type_promotion=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT
)
pw_cast_for_opmath_non_tensor_args = partial(
type_casts,
type_promotion=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
include_non_tensor_args=True,
)
pw_cast_for_int_to_real = partial(
type_casts, type_promotion=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
# This expands x until x.dim() == dim. Might be useful as an operator
def _unsqueeze_to_dim(x: Tensor, dim: int) -> Tensor:
for _ in range(dim - x.dim()):
x = x.unsqueeze(-1)
return x
@register_decomposition(aten.tanh_backward)
@out_wrapper("grad_input")
@pw_cast_for_opmath
def tanh_backward(out_grad: Tensor, y: Tensor):
return out_grad * (1 - y * y).conj_physical()
@register_decomposition(aten.sigmoid_backward)
@out_wrapper("grad_input")
@pw_cast_for_opmath
def sigmoid_backward(out_grad: Tensor, y: Tensor):
return out_grad * (y * (1 - y)).conj_physical()
@register_decomposition(aten.softplus_backward)
@out_wrapper("grad_input")
@pw_cast_for_opmath
def softplus_backward(out_grad: Tensor, x: Tensor, beta: float, threshold: float):
z = (x * beta).exp()
return torch.where((x * beta) > threshold, out_grad, out_grad * z / (z + 1.0))
@register_decomposition(aten.elu_backward)
@out_wrapper("grad_input")
@pw_cast_for_opmath
def elu_backward(
grad_output: Tensor,
alpha: float,
scale: float,
input_scale: float,
is_result: bool,
self_or_result: Tensor,
):
negcoef = alpha * scale
poscoef = scale
negiptcoef = input_scale
if is_result:
return torch.where(
self_or_result <= 0,
grad_output * negiptcoef * (self_or_result + negcoef),
grad_output * poscoef,
)
else:
return torch.where(
self_or_result <= 0,
grad_output * negiptcoef * negcoef * torch.exp(self_or_result * negiptcoef),
grad_output * poscoef,
)
@register_decomposition([aten.fill.Scalar])
def fill_scalar(self, value):
return torch.full_like(self, value)
@register_decomposition([aten.fill.Tensor])
def fill_tensor(self, value: Tensor):
torch._check(
value.dim() == 0,
lambda: f"fill only supports 0-dimension value tensor but got tensor with {value.dim()} dimensions",
)
return aten.copy(self, value)
@register_decomposition(aten.hardsigmoid)
@out_wrapper()
@pw_cast_for_opmath
def hardsigmoid(self: Tensor) -> Tensor:
return torch.clamp(torch.clamp(self + 3, min=0), max=6) / 6
@register_decomposition(aten.hardsigmoid_backward)
@out_wrapper("grad_input")
@pw_cast_for_opmath
def hardsigmoid_backward(grad_output: Tensor, self: Tensor):
return torch.where(
(self > -3.0) & (self < 3.0),
grad_output * (1.0 / 6.0),
0.0,
)
@register_decomposition(aten.hardtanh_backward)
@out_wrapper("grad_input")
def hardtanh_backward(
grad_output: Tensor, self: Tensor, min_val: float, max_val: float
):
return torch.where((self <= min_val) | (self >= max_val), 0.0, grad_output)
@register_decomposition(aten.hardswish)
@out_wrapper()
@pw_cast_for_opmath
def hardswish(self: Tensor) -> Tensor:
return self * torch.clamp(torch.clamp(self + 3, min=0), max=6) / 6
@register_decomposition(aten.hardswish_backward)
@out_wrapper()
@pw_cast_for_opmath
def hardswish_backward(grad_output: Tensor, self: Tensor) -> Tensor:
return torch.where(
self <= -3,
0.0,
torch.where(self < 3, grad_output * ((self / 3) + 0.5), grad_output),
)
@register_decomposition(aten.threshold_backward)
@out_wrapper("grad_input")
def threshold_backward(grad_output: Tensor, self: Tensor, threshold: float):
return torch.where(self <= threshold, 0, grad_output)
@register_decomposition(aten.leaky_relu_backward)
@out_wrapper("grad_input")
@pw_cast_for_opmath
def leaky_relu_backward(
grad_output: Tensor, self: Tensor, negative_slope: float, self_is_result: bool
):
return torch.where(self > 0, grad_output, grad_output * negative_slope)
@register_decomposition(aten.gelu_backward)
@out_wrapper("grad_input")
@pw_cast_for_opmath
def gelu_backward(grad: Tensor, self: Tensor, approximate: str = "none"):
M_SQRT2 = 1.41421356237309504880
M_SQRT1_2 = 0.70710678118654752440
M_2_SQRTPI = 1.12837916709551257390
if approximate == "tanh":
kBeta = M_SQRT2 * M_2_SQRTPI * 0.5
kKappa = 0.044715
x_sq = self * self
x_cube = x_sq * self
inner = kBeta * (self + kKappa * x_cube)
tanh_inner = torch.tanh(inner)
left = 0.5 * self
right = 1 + tanh_inner
left_derivative = 0.5 * right
tanh_derivative = 1 - tanh_inner * tanh_inner
inner_derivative = kBeta * (1 + 3 * kKappa * x_sq)
right_derivative = left * tanh_derivative * inner_derivative
return grad * (left_derivative + right_derivative)
else:
kAlpha = M_SQRT1_2
kBeta = M_2_SQRTPI * M_SQRT1_2 * 0.5
cdf = 0.5 * (1 + torch.erf(self * kAlpha))
pdf = kBeta * torch.exp(self * self * -0.5)
return grad * (cdf + self * pdf)
@register_decomposition(aten.mish_backward)
@pw_cast_for_opmath
def mish_backward(grad_output: Tensor, input: Tensor):
input_tanh_softplus = torch.tanh(F.softplus(input))
input_sigmoid = torch.sigmoid(input)
out = input * input_sigmoid * (1 - input_tanh_softplus * input_tanh_softplus)
return grad_output * (input_tanh_softplus + out)
@register_decomposition(aten.silu)
@out_wrapper()
@pw_cast_for_opmath
def silu(self: Tensor) -> Tensor:
return self * torch.sigmoid(self)
@register_decomposition(aten.silu_backward)
@out_wrapper("grad_input")
@pw_cast_for_opmath
def silu_backward(grad_output: Tensor, self: Tensor) -> Tensor:
sigmoid = 1 / (1 + torch.exp(-self))
return grad_output * sigmoid * (1 + self * (1 - sigmoid))
@register_decomposition(aten._prelu_kernel)
def _prelu_kernel(self: Tensor, weight: Tensor) -> Tensor:
return torch.where(self > 0, self, weight * self)
@register_decomposition(aten._prelu_kernel_backward)
def _prelu_kernel_backward(
grad_output: Tensor,
self: Tensor,
weight: Tensor,
) -> tuple[Tensor, Tensor]:
input_grad = torch.where(self > 0, grad_output, weight * grad_output)
weight_grad = torch.where(self > 0, 0.0, self * grad_output)
return (input_grad, weight_grad)
@register_decomposition(aten.rrelu_with_noise_backward)
@out_wrapper()
@pw_cast_for_opmath
def rrelu_with_noise_backward(
grad_output: Tensor,
self: Tensor,
noise: Tensor,
lower: float,
upper: float,
training: bool,
self_is_result: bool,
) -> Tensor:
if training and upper - lower > 1e-6:
return grad_output.mul(noise)
else:
negative_slope = (lower + upper) / 2
return aten.leaky_relu_backward(
grad_output, self, negative_slope, self_is_result
)
@register_decomposition(aten.log_sigmoid_backward)
@out_wrapper("grad_input")
@pw_cast_for_opmath
def log_sigmoid_backward(grad_output: Tensor, self: Tensor, buffer: Tensor) -> Tensor:
in_negative = self < 0
max_deriv = torch.where(in_negative, 1, 0)
sign = torch.where(in_negative, 1, -1)
z = torch.exp(-torch.abs(self))
return grad_output * (max_deriv - sign * (z / (1 + z)))
# CPU has a special formula that uses buffer, but disabled for convenience sake
# return (max_deriv - sign * (buffer / (1 + buffer))) * grad_output
def apply_loss_reduction(loss: Tensor, reduction: int):
if reduction == Reduction.MEAN.value:
return torch.mean(loss)
elif reduction == Reduction.SUM.value:
return torch.sum(loss)
else:
return loss
def to_real_dtype(dtype: torch.dtype):
if dtype == torch.complex32:
return torch.float16
elif dtype == torch.complex64:
return torch.float32
elif dtype == torch.complex128:
return torch.float64
# TODO: None of these loss castings are quite correct, see
# https://github.com/pytorch/pytorch/issues/76870. Also, the ATen kernels
# perform the pointwise portion in opmath, but don't maintain it between the
# pointwise portion and the reduction
@register_decomposition(aten.mse_loss)
@out_wrapper()
@pw_cast_for_opmath
def mse_loss(
self: Tensor, target: Tensor, reduction: int = Reduction.MEAN.value
) -> Tensor:
# pyrefly: ignore [unsupported-operation]
loss = (self - target) ** 2
return apply_loss_reduction(loss, reduction)
@register_decomposition(aten.mse_loss_backward)
@out_wrapper("grad_input")
@pw_cast_for_opmath
def mse_loss_backward(
grad_output: Tensor, input: Tensor, target: Tensor, reduction: int
):
norm = 2.0 / input.numel() if reduction == Reduction.MEAN.value else 2.0
return norm * (input - target) * grad_output
@register_decomposition(aten._safe_softmax)
def safe_softmax(self, dim, dtype=None):
out = torch.softmax(self, dim=dim, dtype=dtype)
masked = self.eq(float("-inf"))
masked_rows = torch.all(masked, dim=dim, keepdim=True)
zeros = torch.zeros_like(out)
return torch.where(masked_rows, zeros, out)
@register_decomposition(aten.smooth_l1_loss)
@out_wrapper()
@pw_cast_for_opmath
def smooth_l1_loss(
self: Tensor,
target: Tensor,
reduction: int = Reduction.MEAN.value,
beta: float = 1.0,
):
loss = (self - target).abs()
# pyrefly: ignore [unsupported-operation]
loss = torch.where(loss < beta, 0.5 * loss**2 / beta, loss - 0.5 * beta)
return apply_loss_reduction(loss, reduction)
@register_decomposition(aten.smooth_l1_loss_backward.default)
@pw_cast_for_opmath
def smooth_l1_loss_backward(
grad_output: Tensor, self: Tensor, target: Tensor, reduction: int, beta: float
):
norm = 1.0 / self.numel() if reduction == Reduction.MEAN.value else 1.0
x = self - target
abs_x = torch.abs(x)
norm_grad = norm * grad_output
return torch.where(
abs_x < beta,
norm_grad * x / beta,
norm_grad * torch.sign(x),
)
@register_decomposition(aten.smooth_l1_loss_backward.grad_input)
@pw_cast_for_opmath
def smooth_l1_loss_backward_out(
grad_output: Tensor,
self: Tensor,
target: Tensor,
reduction: int,
beta: float,
grad_input: Tensor,
):
result = smooth_l1_loss_backward(grad_output, self, target, reduction, beta)
_maybe_resize_out(grad_input, result.shape)
return _safe_copy_out(copy_from=result, copy_to=grad_input, exact_dtype=True)
@register_decomposition(aten.huber_loss_backward.default)
@pw_cast_for_opmath
def huber_loss_backward(
grad_output: Tensor, self: Tensor, target: Tensor, reduction: int, delta: float
):
norm = 1.0 / self.numel() if reduction == Reduction.MEAN.value else 1.0
x = self - target
return torch.where(
x < -delta,
-norm * grad_output * delta,
torch.where(x > delta, norm * grad_output * delta, norm * x * grad_output),
)
# We cannot use @out_wrapper() here, because the output tensor is not named 'out', it's 'grad_input'
@register_decomposition(aten.huber_loss_backward.out)
@pw_cast_for_opmath
def huber_loss_backward_out(
grad_output: Tensor,
self: Tensor,
target: Tensor,
reduction: int,
delta: float,
grad_input: Tensor,
):
result = huber_loss_backward(grad_output, self, target, reduction, delta)
_maybe_resize_out(grad_input, result.shape)
return _safe_copy_out(copy_from=result, copy_to=grad_input, exact_dtype=True)
def _nll_loss_backward(
grad_output: Tensor,
self: Tensor,
target: Tensor,
weight: Optional[Tensor],
reduction: int,
ignore_index: int,
total_weight: Tensor,
) -> Tensor:
channel_dim = 0 if self.dim() < 2 else 1
if reduction == Reduction.MEAN.value:
grad_output = grad_output / total_weight
target = target.unsqueeze(channel_dim)
safe_target = torch.where(target != ignore_index, target, 0)
grad_input = torch.zeros_like(self)
grad_input = torch.scatter(grad_input, channel_dim, safe_target, -1.0)
if grad_input.dim() > grad_output.dim() > 0:
grad_output = grad_output.unsqueeze(channel_dim)
if weight is not None:
new_shape = [1 for _ in range(self.dim())]
new_shape[channel_dim] = weight.shape[0]
weight = weight.reshape(new_shape)
grad_output = grad_output * weight
grad_output = torch.where(target != ignore_index, grad_output, 0)
return grad_input * grad_output
@register_decomposition(aten.glu_backward)
@out_wrapper("grad_input")
@pw_cast_for_opmath
def glu_backward(grad_output: Tensor, self: Tensor, dim: int) -> Tensor:
assert self.dim() > 0, "glu does not support 0-dimensional tensors"
wrap_dim = utils.canonicalize_dim(self.dim(), dim)
nIn = self.size(wrap_dim)
assert nIn % 2 == 0, (
f"Halving dimension must be even, but dimension {wrap_dim} is size {nIn}"
)
inputSize = nIn // 2
firstHalf = self.narrow(wrap_dim, 0, inputSize)
secondHalf = self.narrow(wrap_dim, inputSize, inputSize)
gradInputFirstHalf = torch.sigmoid(secondHalf)
gradInputSecondHalf = (
(1.0 - gradInputFirstHalf) * gradInputFirstHalf * firstHalf * grad_output
)
gradInputFirstHalf = gradInputFirstHalf * grad_output
return torch.cat([gradInputFirstHalf, gradInputSecondHalf], dim=wrap_dim)
@register_decomposition(aten.nll_loss_backward)
@out_wrapper("grad_input")
def nll_loss_backward(
grad_output: Tensor,
self: Tensor,
target: Tensor,
weight: Optional[Tensor],
reduction: int,
ignore_index: int,
total_weight: Tensor,
) -> Tensor:
assert 0 <= self.dim() <= 2, "input tensor should be 1D or 2D"
assert target.dim() <= 1, (
"0D or 1D target tensor expected, multi-target not supported"
)
no_batch_dim = self.dim() == 1 and target.dim() == 0
assert no_batch_dim or (self.shape[0] == target.shape[0]), (
f"size mismatch (got input: {self.shape}, target: {target.shape})"
)
assert total_weight.numel() == 1, (
"expected total_weight to be a single element tensor, got: ",
f"{total_weight.shape} ({total_weight.numel()} elements)",
)
assert weight is None or weight.numel() == self.shape[-1], (
"weight tensor should be defined either for all or no classes"
)
if reduction == Reduction.NONE.value and self.dim() == 2:
assert grad_output.dim() == 1 and grad_output.shape[0] == self.shape[0], (
f"Expected a tensor of dimension 1 and tensor.size[0] == {self.shape[0]} but "
f"got: dimension {grad_output.dim()} and tensor.size[0] == {grad_output.shape[0]}"
)
else:
assert grad_output.dim() <= 1 and grad_output.numel() == 1, (
f"Expected a single element grad_output tensor, but got: {grad_output.shape}"
)
return _nll_loss_backward(
grad_output, self, target, weight, reduction, ignore_index, total_weight
)
@register_decomposition(aten.nll_loss2d_backward)
@out_wrapper("grad_input")
def nll_loss2d_backward(
grad_output: Tensor,
self: Tensor,
target: Tensor,
weight: Optional[Tensor],
reduction: int,
ignore_index: int,
total_weight: Tensor,
) -> Tensor:
assert self.dim() == 4, (
f"only batches of spatial inputs supported (4D tensors), but got input of dimension: {self.dim()}"
)
assert target.dim() == 3, (
f"only batches of spatial targets supported (3D tensors) but got targets of dimension: {target.dim()}"
)
assert (
self.shape[0] == target.shape[0]
and self.shape[2] == target.shape[1]
and self.shape[3] == target.shape[2]
), f"size mismatch (got input: {self.shape}, target: {target.shape}"
assert total_weight.numel() == 1, (
"expected total_weight to be a single element tensor, "
f"got: {total_weight.shape} ( {total_weight.numel()}, elements)"
)
return _nll_loss_backward(
grad_output, self, target, weight, reduction, ignore_index, total_weight
)
@register_decomposition(aten.binary_cross_entropy)
@out_wrapper()
@pw_cast_for_opmath
def binary_cross_entropy(
self: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
reduction: int = Reduction.MEAN.value,
) -> Tensor:
# We cannot currently model this without introducing data-dependent control flow
# TORCH_CHECK(
# (input_val >= 0) && (input_val <= 1),
# "all elements of input should be between 0 and 1"
# )
loss = (target - 1) * torch.maximum(
torch.log1p(-self), self.new_full((), -100)
) - target * torch.maximum(torch.log(self), self.new_full((), -100))
if weight is not None:
loss = loss * weight
return apply_loss_reduction(loss, reduction)
@register_decomposition(aten.binary_cross_entropy_backward)
@out_wrapper("grad_input")
@pw_cast_for_opmath
def binary_cross_entropy_backward(
grad_output: Tensor,
self: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
reduction: int = Reduction.MEAN.value,
) -> Tensor:
EPSILON = 1e-12
result = grad_output * (self - target) / torch.clamp(self * (1 - self), min=EPSILON)
if weight is not None:
result = result * weight
if reduction == Reduction.MEAN.value:
result = result / self.numel()
return result
@register_decomposition(aten.soft_margin_loss)
@out_wrapper()
@pw_cast_for_opmath
def soft_margin_loss(
input: Tensor,
target: Tensor,
reduction: int = Reduction.MEAN.value,
) -> Tensor:
loss = torch.log1p(torch.exp(-input * target))
return apply_loss_reduction(loss, reduction)
@register_decomposition(aten.soft_margin_loss_backward)
@out_wrapper("grad_input")
@pw_cast_for_opmath
def soft_margin_loss_backward(
grad_output: Tensor,
self: Tensor,
target: Tensor,
reduction: int = Reduction.MEAN.value,
) -> Tensor:
grad_input = target * grad_output * (torch.sigmoid(target * self) - 1)
if reduction == Reduction.MEAN.value:
grad_input = grad_input / self.numel()
return grad_input
@register_decomposition(aten.dist)
@out_wrapper()
def dist(input: Tensor, other: Tensor, p: float = 2):
return aten.norm(input - other, p=p)
@register_decomposition(aten._euclidean_dist)
@out_wrapper()
def _euclidean_dist(x1: Tensor, x2: Tensor) -> Tensor:
x1_norm = x1.pow(2).sum(-1, True)
x1_pad = torch.ones_like(x1_norm, memory_format=torch.contiguous_format)
x2_norm = x2.pow(2).sum(-1, True)
x2_pad = torch.ones_like(x2_norm, memory_format=torch.contiguous_format)
x1_ = torch.cat([x1.mul(-2), x1_norm, x1_pad], -1)
x2_ = torch.cat([x2, x2_pad, x2_norm], -1)
result = x1_.matmul(x2_.mT)
return result.clamp_min(0).sqrt()
@register_decomposition(aten.slice_backward)
@out_wrapper()
def slice_backward(
grad_output: Tensor,
input_sizes: list[int],
dim: int,
start: int,
end: int,
step: int,
):
grad_input = grad_output.new_zeros(input_sizes)
return torch.slice_scatter(grad_input, grad_output, dim, start, end, step)
@register_decomposition(aten.slice.Tensor)
def slice_forward(
# Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1
self: Tensor,
dim: int = 0,
start: Optional[int] = None,
end: Optional[int] = None,
step: int = 1,
):
from torch.fx.experimental.symbolic_shapes import statically_known_true
ndim = self.dim()
if ndim == 0:
raise RuntimeError("slice() cannot be applied to a 0-dim tensor.")
dim = utils.canonicalize_dim(self.dim(), dim)
sizes = list(self.size())
strides = list(self.stride())
if step <= 0:
raise RuntimeError("slice step must be positive")
start_val = start if start is not None else 0
end_val = end if end is not None else sys.maxsize # 2^63 - 1
if start_val < 0:
start_val += sizes[dim]
if end_val < 0:
end_val += sizes[dim]
if start_val < 0:
start_val = 0
elif start_val > sizes[dim]:
start_val = sizes[dim]
if statically_known_true(end_val == sys.maxsize):
end_val = sizes[dim]
elif end_val < start_val:
end_val = start_val
elif end_val > sizes[dim]:
end_val = sizes[dim]
storage_offset = self.storage_offset() + start_val * strides[dim]
len = end_val - start_val
sizes[dim] = (len + step - 1) // step
strides[dim] *= step
if self.is_quantized:
raise NotImplementedError(
"Slice decomposition for quantized tensors aren't implemented"
)
else:
return self.as_strided(sizes, strides, storage_offset)
def _normalize_start_end(
x: Tensor, dim: int, start: Optional[int], end: Optional[int]
) -> tuple[int, int]:
"""
Normalize start and end such that both are in the range
[0, x.get_size()[dim]] and start <= end.
"""
dim_size = x.shape[dim]
def clamp_wrap(val, lower, upper, default) -> int:
if val is None:
return default
if val < 0:
val = val + dim_size
return min(max(val, lower), upper)
start = clamp_wrap(start, 0, dim_size, 0)
end = clamp_wrap(end, start, dim_size, dim_size)
return start, end
# This is not in torch._refs because aten.index used by
# aten._unsafe_masked_index does not have a decomposition.
@register_decomposition(aten.slice_scatter)
@out_wrapper()
def slice_scatter(
input: Tensor,
src: Tensor,
dim: int = 0,
start: Optional[int] = None,
end: Optional[int] = None,
step: int = 1,
):
dim = utils.canonicalize_dim(input.ndim, dim)
dim_size = input.shape[dim]
start, end = _normalize_start_end(input, dim, start, end)
src_size = list(input.shape)
src_size[dim] = (end - start + (step - 1)) // step
src = src.expand(src_size)
if start == 0 and end == dim_size and step == 1:
return src.clone()
indices: list[Optional[Tensor]] = [None] * input.dim()
idx = torch.arange(dim_size, device=input.device)
indices[dim] = (idx - start) // step
mask = torch.ones(dim_size, device=input.device, dtype=torch.bool)
if start != 0:
mask = torch.logical_and(mask, idx >= start)
if end != dim_size:
mask = torch.logical_and(mask, idx < end)
if step != 1:
mask = torch.logical_and(mask, (idx - start) % step == 0)
mask_shape = [1] * input.dim()
mask_shape[dim] = -1
mask = mask.view(mask_shape)
return aten.where(mask, aten._unsafe_masked_index(src, mask, indices, 0), input)
@register_decomposition(aten.select_backward)
@out_wrapper()
def select_backward(grad_output: Tensor, input_sizes: list[int], dim: int, index: int):
grad_input = grad_output.new_zeros(input_sizes)
return torch.select_scatter(grad_input, grad_output, dim, index)
@register_decomposition(aten.diagonal_backward)
@out_wrapper()
def diagonal_backward(
grad_output: Tensor, input_sizes: list[int], offset: int, dim1: int, dim2: int
):
grad_input = grad_output.new_zeros(input_sizes)
return torch.diagonal_scatter(grad_input, grad_output, offset, dim1, dim2)
def _cast_grad_to_input_dtype(
grad_output: Tensor, grad_input: Tensor, input_dtype: torch.dtype
):
if grad_output.dtype != input_dtype:
grad_input = grad_input.to(input_dtype)
return grad_input
@register_decomposition(aten._softmax_backward_data)
@out_wrapper("grad_input")
@compute_only_pw_cast_for_opmath
def _softmax_backward_data(
grad_output: Tensor, output: Tensor, dim: int, input_dtype: torch.dtype
):
new_grad_output = grad_output * output
grad_input = new_grad_output - output * torch.sum(
new_grad_output, dim=dim, keepdim=True
)
# CPU kernel doesn't respect input_dtype, but following check doesn't work for meta tensor
# if grad_output.device == torch.device("cpu"):
# return grad_input.contiguous()
return _cast_grad_to_input_dtype(grad_output, grad_input, input_dtype).contiguous()
@register_decomposition(aten._log_softmax_backward_data)
@out_wrapper()
@compute_only_pw_cast_for_opmath
def _log_softmax_backward_data(
grad_output: Tensor, output: Tensor, dim: int, input_dtype: torch.dtype
):
grad_input = grad_output - torch.exp(output) * torch.sum(
grad_output, dim=dim, keepdim=True
)
return _cast_grad_to_input_dtype(grad_output, grad_input, input_dtype)
def _im2col_col2im_indices_along_dim(
input_d, kernel_d, dilation_d, padding_d, stride_d, device
):
"""Utility function to implement im2col and col2im"""
blocks_d = input_d + padding_d * 2 - dilation_d * (kernel_d - 1)
arange_kw = partial(torch.arange, dtype=torch.int64, device=device)
# Stride kernel over input and find starting indices along dim d
blocks_d_indices = arange_kw(0, blocks_d, stride_d).unsqueeze(0)
# Apply dilation on kernel and find its indices along dim d
kernel_grid = arange_kw(0, kernel_d * dilation_d, dilation_d).unsqueeze(-1)
# Broadcast and add kernel starting positions (indices) with
# kernel_grid along dim d, to get block indices along dim d
return blocks_d_indices + kernel_grid
@register_decomposition(aten.im2col)
@out_wrapper()
def im2col(
input: Tensor,
kernel_size: list[int],
dilation: list[int],
padding: list[int],
stride: list[int],
) -> Tensor:
torch._check(len(kernel_size) == 2, lambda: "im2col(): only 2D kernel supported")
torch._check(len(dilation) == 2, lambda: "im2col(): only 2D dilation supported")
torch._check(len(padding) == 2, lambda: "im2col(): only 2D padding supported")
torch._check(len(stride) == 2, lambda: "im2col(): only 2D stride supported")
def check_positive(param, param_name, strict=True):
cond = all(p > 0 for p in param) if strict else all(p >= 0 for p in param)
torch._check(
cond, lambda: f"{param_name} should be greater than zero, but got {param}"
)
check_positive(kernel_size, "kernel_size")
check_positive(dilation, "dilation")
check_positive(dilation, "padding", strict=False)
check_positive(stride, "stride")
shape = input.shape
ndim = len(shape)
torch._check(
ndim in (3, 4) and all(d != 0 for d in shape[-3:]),
lambda: "Expected 3D or 4D (batch mode) tensor for input with possible 0 batch size "
f"and non-zero dimensions, but got: {tuple(shape)}",
)
output_size = tuple(
1 + (out + 2 * pad - dil * (ker - 1) - 1) // st
for out, pad, dil, ker, st in zip(
shape[-2:], padding, dilation, kernel_size, stride
)
)
torch._check(
all(c > 0 for c in output_size),
lambda: f"Given an input with spatial size {tuple(shape[-2:])}, "
f"kernel_size={kernel_size}, dilation={dilation}, "
f"padding={padding}, stride={stride}, "
"the calculated shape of the array of sliding blocks "
f"is {output_size}, but its components must be at least one.",
)
batched_input = ndim == 4
if not batched_input:
input = input.unsqueeze(0)
batch_dim, channel_dim, input_h, input_w = input.shape
stride_h, stride_w = stride
padding_h, padding_w = padding
dilation_h, dilation_w = dilation
kernel_h, kernel_w = kernel_size
blocks_row_indices = _im2col_col2im_indices_along_dim(
input_h, kernel_h, dilation_h, padding_h, stride_h, input.device
)
blocks_col_indices = _im2col_col2im_indices_along_dim(
input_w, kernel_w, dilation_w, padding_w, stride_w, input.device
)
# Note that F.pad takes (padding_left, padding_right, padding_top, padding_bottom)
# ugh
padded_input = F.pad(input, (padding_w, padding_w, padding_h, padding_h))
blocks_row_indices = blocks_row_indices.unsqueeze(-1).unsqueeze(-1)
output = padded_input[:, :, blocks_row_indices, blocks_col_indices]
output = output.permute(0, 1, 2, 4, 3, 5)
num_blocks_row = blocks_row_indices.size(1)
num_blocks_col = blocks_col_indices.size(1)
output = output.reshape(
batch_dim, channel_dim * kernel_h * kernel_w, num_blocks_row * num_blocks_col
)
if not batched_input:
output = output.squeeze(0)
return output
@register_decomposition(aten.col2im)
@out_wrapper()
@pw_cast_for_opmath
def col2im(
input: Tensor,
output_size: list[int],
kernel_size: list[int],
dilation: list[int],
padding: list[int],
stride: list[int],
) -> Tensor:
torch._check(len(output_size) == 2, lambda: "only 2D output_size supported")
torch._check(len(kernel_size) == 2, lambda: "only 2D kernel supported")
torch._check(len(dilation) == 2, lambda: "only 2D dilation supported")
torch._check(len(padding) == 2, lambda: "only 2D padding supported")
torch._check(len(stride) == 2, lambda: "only 2D stride supported")
def check_positive(param, param_name, strict=True):
cond = all(p > 0 for p in param) if strict else all(p >= 0 for p in param)
torch._check(
cond, lambda: f"{param_name} should be greater than zero, but got {param}"
)
check_positive(kernel_size, "kernel_size")
check_positive(dilation, "dilation")
check_positive(padding, "padding", strict=False)
check_positive(stride, "stride")
check_positive(output_size, "output_size")
shape = input.shape
ndim = len(shape)
torch._check(
ndim in (2, 3) and all(d != 0 for d in shape[-2:]),
lambda: "Expected 2D or 3D (batch mode) tensor for input with possible 0 batch size "
f"and non-zero dimensions, but got: {tuple(shape)}",
)
prod_kernel_size = kernel_size[0] * kernel_size[1]
torch._check(
shape[-2] % prod_kernel_size == 0,
lambda: "Expected size of input's first non-batch dimension to be divisible by the "
f"product of kernel_size, but got input.shape[-2] = {shape[-2]} and "
f"kernel_size={kernel_size}",
)
col = [
1 + (out + 2 * pad - dil * (ker - 1) - 1) // st
for out, pad, dil, ker, st in zip(
output_size, padding, dilation, kernel_size, stride
)
]
L = col[0] * col[1]
torch._check(
shape[-1] == L,
lambda: f"Given output_size={output_size}, kernel_size={kernel_size}, "
f"dilation={dilation}, padding={padding}, stride={stride}, "
f"expected input.size(-1) to be {L} but got {shape[-1]}.",
)
torch._check(
L > 0,
lambda: f"Given output_size={output_size}, kernel_size={kernel_size}, "
f"dilation={dilation}, padding={padding}, stride={stride}, "
f"expected input.size(-1) to be {L} but got {shape[-1]}.",
)
batched_input = ndim == 3
if not batched_input:
input = input.unsqueeze(0)
shape = input.shape
out_h, out_w = output_size
stride_h, stride_w = stride
padding_h, padding_w = padding
dilation_h, dilation_w = dilation
kernel_h, kernel_w = kernel_size
# col2im is defined as the backwards of im2col, so we differentiate its decomposition by hand
input = input.reshape([shape[0], shape[1] // prod_kernel_size] + kernel_size + col)
input = input.permute(0, 1, 2, 4, 3, 5)
indices_row = _im2col_col2im_indices_along_dim(
out_h, kernel_h, dilation_h, padding_h, stride_h, input.device
)
indices_row = _unsqueeze_to_dim(indices_row, 4)
indices_col = _im2col_col2im_indices_along_dim(
out_w, kernel_w, dilation_w, padding_w, stride_w, input.device
)
output_padded_size = [o + 2 * p for o, p in zip(output_size, padding)]
output = input.new_zeros(
[shape[0], shape[1] // prod(kernel_size)] + output_padded_size
)
idx = (None, None, indices_row, indices_col)
output = aten._unsafe_index_put(output, idx, input, accumulate=True)
output = F.pad(output, (-padding_w, -padding_w, -padding_h, -padding_h))
if not batched_input:
output = output.squeeze(0)
return output
@register_decomposition(aten.native_dropout_backward)
@out_wrapper()
def native_dropout_backward(grad_output: Tensor, mask: Tensor, scale: float):
# According to the CUDA kernel implementation we should have this test;
# but it seems to fail tests!
# torch._check(mask.dtype == torch.bool, lambda: f"Mask should be Bool Scalar Type {mask.dtype}")
# Mimicking CUDA kernel's behavior for output stride: output follow input's memory format
# This different from TensorIterator's behavior
r = (grad_output * (mask.type_as(grad_output) * scale)).clone(
memory_format=utils.suggest_memory_format(grad_output)
)
return r
@register_decomposition(aten.unfold_backward)
@out_wrapper()
def unfold_backward(
grad: Tensor, input_size: list[int], dimension: int, size: int, step: int
) -> Tensor:
if len(input_size) == 0:
return torch.squeeze_copy(grad, 0)
dim = utils.canonicalize_dim(len(input_size), dimension)
idx = torch.arange(input_size[dim], device=grad.device, dtype=torch.int32)
idx = idx.unfold(0, size, step).flatten()
grad = grad.movedim(-1, dim + 1).flatten(dim, dim + 1)
# nb. At the moment this generates two kernels in triton
# It could potentially be fused into one call to scatter_reduce,
# in the case step <= size provided scatter_reduce generates 1 kernel
grad_input = grad.new_zeros(input_size)
index = (None,) * dim + (idx,)
return aten._unsafe_index_put(grad_input, index, grad, accumulate=True).contiguous()
@register_decomposition(aten.logit_backward.default)
@pw_cast_for_opmath
def logit_backward(
grad_output: Tensor, self: Tensor, eps: Optional[float] = None
) -> Tensor:
if eps is not None:
lo = eps
hi = 1.0 - lo
return torch.where(
torch.logical_and(self >= lo, self <= hi),
grad_output / (self * (1.0 - self)),
0.0,
)
else:
return torch.where(
torch.logical_and(self >= 0.0, self <= 1.0),
grad_output / (self * (1.0 - self)),
self.new_full((), float("nan")),
)
@register_decomposition(aten.dropout)
@aten.dropout.default.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.dropout.default.py_impl(DispatchKey.Autograd)
def dropout(input: Tensor, p: float, train: Optional[bool]):
if train and p != 0:
return aten.native_dropout(input, p, train)[0]
else:
return input.clone()
@register_decomposition(aten.native_dropout)
@out_wrapper("out0", "out1")
def native_dropout(input: Tensor, p: float, train: Optional[bool]):
if train and p != 0:
if p == 1:
return (torch.zeros_like(input), torch.zeros_like(input, dtype=torch.bool))
if not input.dtype.is_floating_point:
raise RuntimeError(
"result type Float can't be cast to the desired output type Long"
)
bool_mask = torch.rand_like(input) > p
res = bool_mask * input * float(1.0 / (1.0 - p))
return (res, bool_mask)
else:
return (input, torch.ones_like(input, dtype=torch.bool))
@register_decomposition(aten._softmax)
@out_wrapper()
def _softmax(x: Tensor, dim: int, half_to_float: bool):
from torch.fx.experimental.symbolic_shapes import guard_or_false
# eager softmax returns a contiguous tensor. Ensure that decomp also returns
# a contiguous tensor.
x = x.contiguous()
if half_to_float:
assert x.dtype == torch.half
computation_dtype, result_dtype = utils.elementwise_dtypes(
x, type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT
)
x = x.to(computation_dtype)
if guard_or_false(x.numel() == 0):
unnormalized = torch.exp(x)
else:
x_max = torch.amax(x, dim, keepdim=True)
unnormalized = torch.exp(x - x_max)
result = unnormalized / torch.sum(unnormalized, dim, keepdim=True)
if not half_to_float:
result = result.to(result_dtype)
return result
@register_decomposition(aten._log_softmax)
@out_wrapper(exact_dtype=True)
def _log_softmax(x: Tensor, dim: int, half_to_float: bool):
from torch.fx.experimental.symbolic_shapes import guard_or_false
# eager log_softmax returns a contiguous tensor. Ensure that decomp also
# returns a contiguous tensor.
x = x.contiguous()
if half_to_float:
assert x.dtype == torch.half
computation_dtype, result_dtype = utils.elementwise_dtypes(
x, type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT
)
x = x.to(computation_dtype)
if guard_or_false(x.numel() == 0):
shifted = x
else:
x_max = torch.amax(x, dim, keepdim=True)
shifted = x - x_max
shifted_logsumexp = torch.log(torch.sum(torch.exp(shifted), dim, keepdim=True))
result = shifted - shifted_logsumexp
if not half_to_float:
result = result.to(result_dtype)
return result
@register_decomposition(aten.embedding)
@out_wrapper()
def embedding(
weight: Tensor,
indices: Tensor,
padding_idx: int = -1,
scale_grad_by_freq: bool = False,
sparse: bool = False,
) -> Tensor:
assert weight.dim() == 2, "'weight' must be 2-D"
# Nb. scale_grad_by_freq is not used in the forward
if indices.ndim <= 1:
# We need this one as weight[indices] calls item() in these cases
out = weight.index_select(0, indices)
if indices.ndim == 0:
out = out.squeeze(0)
return out
else:
return weight[indices]
@register_decomposition(aten.embedding_dense_backward)
@out_wrapper()
def embedding_dense_backward(
grad_output: Tensor,
indices: Tensor,
num_weights: int,
padding_idx: int,
scale_grad_by_freq: bool,
):
computation_dtype, result_dtype = utils.elementwise_dtypes(
grad_output, type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT
)
grad_output = grad_output.to(computation_dtype)
indices = _maybe_convert_to_dtype(indices, torch.long) # type: ignore[assignment]
if scale_grad_by_freq:
counts = indices.new_zeros((num_weights,))
ones = torch.ones_like(indices)
counts = aten._unsafe_index_put(counts, [indices], ones, accumulate=True)
grad_weights_scale = counts[indices]
grad_output = grad_output / grad_weights_scale.unsqueeze(-1)
mask = _unsqueeze_to_dim(indices == padding_idx, grad_output.ndim)
grad = grad_output.masked_fill(mask, 0)
grad_weight = grad_output.new_zeros(
(num_weights,) + grad_output.shape[indices.ndim :]
)
return aten._unsafe_index_put(grad_weight, [indices], grad, accumulate=True).to(
result_dtype
)
def prod(x: list[int]):
r = 1
for i in x:
r *= i
return r
def _pad_chunk(
tensors: list[Tensor],
dim: int,
num_chunks: int,
) -> list[Tensor]:
padded_tensors = []
for tensor in tensors:
tensor_size = tensor.size()
pad_along_dim = (tensor_size[dim] + num_chunks - 1) // num_chunks * num_chunks
if pad_along_dim != tensor_size[dim]:
# Use aten.constant_pad_nd instead of copy_ for functionalization
pad = [0] * 2 * (tensor.ndim - dim - 1) + [
0,
pad_along_dim - tensor_size[dim],
]
tensor = aten.constant_pad_nd(tensor, pad, 0)
view_size = tensor_size[:dim] + torch.Size([num_chunks, -1])
padded_tensors.append(tensor.reshape(view_size))
return padded_tensors
def have_same_ndims(tensors: list[Tensor]):
ndim = tensors[0].ndim
for tensor in tensors:
if tensor.ndim != ndim:
return False
return True
def leading_dimension_matches(tensors: list[Tensor], dim: int):
leading_dim_sizes = tensors[0].size()[:dim]
for tensor in tensors:
torch._check(
tensor.size()[:dim] == leading_dim_sizes,
lambda: "_chunk_cat expects same sizes of 0,...,dim-1 dimensions for all tensors",
)
def _preprocess_chunk_cat_inputs(
tensors: list[Tensor],
dim: int,
num_chunks: int,
):
torch._check(num_chunks >= 1, lambda: "_chunk_cat expects positive num_chunks")
torch._check(
len(tensors) > 0, lambda: "_chunk_cat expects a non-empty input tensor list"
)
expected_dtype = tensors[0].dtype
expected_device = tensors[0].device
for tensor in tensors:
torch._check(tensor.numel() > 0, lambda: "_chunk_cat expects non-empty tensor")
torch._check(
tensor.dtype == expected_dtype,
lambda: "_chunk_cat expects all input tensors with the same dtype",
)
torch._check(
tensor.device == expected_device,
lambda: "_chunk_cat expects all inputs tensors on the same device",
)
if have_same_ndims(tensors):
dim = utils.canonicalize_dim(tensors[0].dim(), dim)
else:
torch._check(
dim >= 0,
lambda: "_chunk_cat expects non-negative dim when input tensors have different ndims",
)
for tensor in tensors:
torch._check(
dim < tensor.ndim,
lambda: "_chunk_cat expects dim < ndim for all input tensors",
)
leading_dimension_matches(tensors, dim)
return dim
@register_decomposition([aten._chunk_cat.default, aten._chunk_cat.out])
def _chunk_cat(
tensors: list[Tensor],
dim: int,
num_chunks: int,
out: Optional[Tensor] = None,
) -> Tensor:
dim = _preprocess_chunk_cat_inputs(tensors, dim, num_chunks)
padded_tensors = _pad_chunk(tensors, dim, num_chunks)
if out is None:
return torch.cat(padded_tensors, dim + 1)
else:
torch.cat(padded_tensors, dim + 1, out=out)
return out
# out_wrapper currently does not allow optional outputs
@register_decomposition(
[aten.split_with_sizes_copy.default, aten.split_with_sizes_copy.out]
)
def split_with_sizes_copy(
self: Tensor,
split_sizes: list[int],
dim: int = 0,
out: Optional[list[Tensor]] = None,
) -> Optional[list[Tensor]]:
splits = aten.split_with_sizes(self, split_sizes, dim=dim)
if out is None:
return [s.clone(memory_format=torch.contiguous_format) for s in splits]
else:
for output, split in zip(out, splits):
_maybe_resize_out(output, split.shape)
_safe_copy_out(copy_from=split, copy_to=output, exact_dtype=True)
return None
@register_decomposition(aten.unsafe_split.Tensor)
def unsafe_split(input: Tensor, split_size: int, dim: int = 0) -> tuple[Tensor, ...]:
return aten.split.Tensor(input, split_size, dim)
@register_decomposition(aten.unsafe_split_with_sizes.default)
def unsafe_split_with_sizes(
input: Tensor, split_sizes: list[int], dim: int = 0
) -> tuple[Tensor, ...]:
return aten.split_with_sizes.default(input, split_sizes, dim)
@register_decomposition(aten.split.Tensor)
def split(self: Tensor, split_size: int, dim: int = 0) -> tuple[Tensor, ...]:
input_sizes = self.shape
dim_size = input_sizes[dim]
if split_size == 0:
assert dim_size == 0
return (self.detach(),)
chunks = (dim_size + split_size - 1) // split_size
# Avoid importing sympy at a module level
from torch.fx.experimental.symbolic_shapes import guard_int
chunks = guard_int(chunks)
split_sizes = [split_size for i in range(chunks)]
split_sizes[-1] = split_size - (split_size * chunks - dim_size)
return torch.split(self, split_sizes, dim)
@aten.tensor_split.tensor_indices_or_sections.py_impl(
DispatchKey.CompositeImplicitAutograd
)
def tensor_split_tensor_indices_or_sections_py_impl(
self: Tensor,
tensor_indices_or_sections: Tensor,
dim: int = 0,
) -> tuple[Tensor, ...]:
assert tensor_indices_or_sections.device.type == "cpu"
assert tensor_indices_or_sections.dtype == torch.int64
split_dim = tensor_indices_or_sections.dim()
torch._check(
split_dim == 1 or split_dim == 0,
lambda: "tensor_split expected tensor_indices_or_sections to be a zero-dimensional "
f"or one-dimensional tensor, but got a tensor with {split_dim} dims",
)
if split_dim == 0:
sections = tensor_indices_or_sections.item()
assert isinstance(sections, IntLike)
return self.tensor_split(sections, dim)
else:
ctx = nullcontext
if (fake_mode := torch._guards.detect_fake_mode()) and (
shape_env := fake_mode.shape_env
):
ctx = shape_env.ignore_fresh_unbacked_symbols # type: ignore[assignment]
# In fake tensor prop, we end up calling slice() with these unbacked indices.
# Because slice has flexible semantics, the unbacked handling generates new output sizes
# for each slice, effectively clobbering over these index symbols.
# To avoid PendingUnbackedSymbolNotFound errors, we tell the compiler it's fine to not bind these.
with ctx():
indices = [i.item() for i in tensor_indices_or_sections]
# WARNING: Tempted to torch._check(x>0) on the indices here? You
# can't: tensor_split works with negative values in indices:
#
# >>> torch.tensor_split(torch.randn(10), torch.tensor([-5, 5]))
# (tensor([ 0.3540, 2.1074, -0.8507, 1.1639, 0.3055]), tensor([]),
# tensor([-0.4285, 1.0692, -0.1776, 0.9362, 1.6143]))
#
# Sorry, I don't make the rules. Explicitly do the item call in user
# code if you KNOW that they are non-negative.
return self.tensor_split(indices, dim)
# TODO: this doesn't appear to have enough precision in bfloat16
@register_decomposition(aten.addmm)
@out_wrapper(exact_dtype=True)
@pw_cast_for_opmath
def addmm(self: Tensor, mat1: Tensor, mat2: Tensor, beta: int = 1, alpha: int = 1):
if not self.is_floating_point() and not self.is_complex():
beta = int(beta)
alpha = int(alpha)
out = alpha * torch.mm(mat1, mat2)
if beta == 0:
return out
# The output of aten.addmm is contiguous, we need to match this behavior in the decomposition.
# The original implementation 'beta * self + out' would return a strided tensor if `self` is strided.
# We thus use `out`, the output of torch.mm, which is always contiguous, as the first argument for addition.
# This is relying on TensorIterator's behavior that it takes higher precedence on the stride of first input.
# Alternative, we can write `(beta * self + out).contiguous()`, but it introduces another copy in some cases.
# This implementation is not ideal, and we should revisit this when we have a better solution.
return out + beta * self
@register_decomposition(aten._addmm_activation)
@out_wrapper()
@pw_cast_for_opmath
def _addmm_activation(
self: Tensor,
mat1: Tensor,
mat2: Tensor,
beta: int = 1,
alpha: int = 1,
use_gelu: bool = False,
):
out = addmm(self, mat1, mat2, beta, alpha)
if use_gelu:
if self.is_cuda:
return aten.gelu(out, approximate="tanh")
else:
return aten.gelu(out)
return aten.relu(out)
@register_decomposition(aten.addmv)
@out_wrapper(exact_dtype=True)
@pw_cast_for_opmath
def addmv(self: Tensor, mat1: Tensor, vec: Tensor, beta: int = 1, alpha: int = 1):
if not self.is_floating_point() and not self.is_complex():
beta = int(beta)
alpha = int(alpha)
out = alpha * torch.mv(mat1, vec)
if beta == 0:
return out
if out.numel() == 0: # handle empty matrix
return beta * self
return out + beta * self
@register_decomposition(aten.native_group_norm_backward.default)
@pw_cast_for_opmath
def native_group_norm_backward(
grad_output: Tensor,
input: Tensor,
mean: Tensor,
rstd: Tensor,
gamma: Optional[Tensor],
N: int,
C: int,
HxW: int,
group: int,
output_mask: list[bool],
) -> tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]]:
utils.check_same_device(
grad_output, input, mean, rstd, allow_cpu_scalar_tensors=False
)
utils.check_same_shape(input, grad_output, allow_cpu_scalar_tensors=False)
utils.check_same_shape(mean, rstd, allow_cpu_scalar_tensors=False)
torch._check(
input.numel() == N * C * HxW,
lambda: f"Expect input to have {N * C * HxW} elements",
)
torch._check(
mean.shape == (N, group),
lambda: f"Expect mean to have shape ({N}, {group}, but got {mean.shape}",
)
torch._check(
gamma is None or gamma.numel() == C,
lambda: f"Expect gamma to have {C} elements but got {gamma.numel() if gamma is not None else -1}",
)
cpg = C // group
torch._check(
C == cpg * group,
lambda: f"Expect number of channels {C} to be evenly-divisible by number of groups {group}",
)
# Compute Internal gradients
ds = torch.mul(grad_output, input).view(N, C, HxW).sum(dim=[2])
db = grad_output.view(N, C, HxW).sum(dim=[2])
d_input: Optional[Tensor] = None
d_gamma: Optional[Tensor] = None
d_bias: Optional[Tensor] = None
if output_mask[0]:
s = 1.0 / (HxW * cpg)
if gamma is not None:
ds_val = torch.mul(ds, gamma.unsqueeze(0)).reshape(N, group, cpg).sum(2)
db_val = torch.mul(db, gamma.unsqueeze(0)).reshape(N, group, cpg).sum(2)
c1 = torch.mul(
rstd.unsqueeze(-1),
gamma.reshape(1, group, cpg),
)
else:
ds_val = ds.reshape(N, group, cpg).sum(2)
db_val = db.reshape(N, group, cpg).sum(2)
c1 = torch.mul(
rstd.unsqueeze(-1),
torch.ones((1, group, cpg), device=rstd.device),
)
c2 = (db_val * mean - ds_val) * rstd * rstd * rstd * s
c3 = -c2 * mean - db_val * rstd * s
c1 = c1.unsqueeze(-1)
c2 = _unsqueeze_to_dim(c2, 4)
c3 = _unsqueeze_to_dim(c3, 4)
d_input = (
torch.mul(grad_output.reshape(N, group, cpg, HxW), c1)
+ torch.mul(input.reshape(N, group, cpg, HxW), c2)
+ c3
)
d_input = d_input.reshape(input.shape).to(input.dtype)
if output_mask[1]:
d_gamma = (
(
(ds.view(N, group, cpg) - db.view(N, group, cpg) * mean.unsqueeze(-1))
* rstd.unsqueeze(-1)
)
.sum(dim=[0])
.reshape(C)
)
if output_mask[2]:
d_bias = db.sum(dim=[0])
return (d_input, d_gamma, d_bias)
# out_wrapper currently does not allow optional outputs
@register_decomposition(aten.native_group_norm_backward.out)
def native_group_norm_backward_out(
grad_output: Tensor,
input: Tensor,
mean: Tensor,
rstd: Tensor,
gamma: Optional[Tensor],
N: int,
C: int,
HxW: int,
group: int,
output_mask: list[bool],
*,
out0: torch.Tensor,
out1: torch.Tensor,
out2: torch.Tensor,
) -> tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]]:
result = native_group_norm_backward(
grad_output, input, mean, rstd, gamma, N, C, HxW, group, output_mask
)
grad_input = (out0, out1, out2)
for i, r in enumerate(result):
if r is not None:
_maybe_resize_out(grad_input[i], r.shape)
_safe_copy_out(copy_from=r, copy_to=grad_input[i], exact_dtype=True)
return grad_input
def _maybe_cast(x: Optional[Tensor], dtype) -> Optional[Tensor]:
if x is not None:
return x.to(dtype)
return x
# TODO: Take a closer look at the type promotion semantics
@register_decomposition(aten.native_layer_norm_backward.default)
def native_layer_norm_backward(
grad_out: Tensor,
input: Tensor,
normalized_shape: list[int],
mean: Tensor,
rstd: Tensor,
weight: Optional[Tensor],
bias: Optional[Tensor],
output_mask: list[bool],
) -> tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]]:
input_shape = input.shape
input_ndim = input.dim()
computation_dtype = utils.get_computation_dtype(input.dtype)
grad_out_cast, input_cast, weight_cast, bias_cast = (
x.to(computation_dtype, memory_format=torch.contiguous_format)
if x is not None
else x
for x in (grad_out, input, weight, bias)
)
assert grad_out_cast is not None
axis = input_ndim - len(normalized_shape)
inner_dims = input_shape[axis:]
outer_dims = input_shape[:axis]
inner_dim_indices: list[int] = []
outer_dim_indices: list[int] = []
for i in range(input_ndim):
if i >= axis:
inner_dim_indices.append(i)
else:
outer_dim_indices.append(i)
N = prod(inner_dims) # type: ignore[arg-type]
M = prod(outer_dims) # type: ignore[arg-type]
from torch.fx.experimental.symbolic_shapes import statically_known_true
if statically_known_true(M == 0) or statically_known_true(N == 0):
return (
input.new_zeros(input_shape) if output_mask[0] else None,
input.new_zeros(input_shape[axis:]) if output_mask[1] else None,
input.new_zeros(input_shape[axis:]) if output_mask[2] else None,
)
mean = _unsqueeze_to_dim(mean, input_cast.dim()) # type: ignore[union-attr]
rstd = _unsqueeze_to_dim(rstd, input_cast.dim()) # type: ignore[union-attr]
assert input_cast is not None
x_hat = (input_cast - mean) * rstd
if weight_cast is not None:
grad_x_hat = grad_out_cast * weight_cast
else:
grad_x_hat = grad_out_cast
a = grad_x_hat * N
b = torch.sum(grad_x_hat, inner_dim_indices, True)
c1 = torch.mul(grad_x_hat, x_hat)
c2 = torch.sum(c1, inner_dim_indices, True)
c3 = torch.mul(x_hat, c2)
inner = a - b - c3
d_input: Optional[Tensor] = None
d_weight: Optional[Tensor] = None
d_bias: Optional[Tensor] = None
if output_mask[0]:
d_input = (rstd / N) * inner
if output_mask[1] and weight_cast is not None:
if len(outer_dim_indices) > 0:
d_weight = torch.sum(grad_out_cast * x_hat, outer_dim_indices, False)
else:
d_weight = grad_out_cast * x_hat
if output_mask[2] and bias_cast is not None:
if len(outer_dim_indices) > 0:
d_bias = torch.sum(grad_out_cast, outer_dim_indices, False)
else:
d_bias = grad_out_cast.clone()
return (
_maybe_cast(d_input, input.dtype),
_maybe_cast(d_weight, weight.dtype if weight is not None else None),
_maybe_cast(d_bias, bias.dtype if bias is not None else None),
)
# out_wrapper currently does not allow optional outputs
@register_decomposition(aten.native_layer_norm_backward.out)
def native_layer_norm_backward_out(
grad_out: Tensor,
input: Tensor,
normalized_shape: list[int],
mean: Tensor,
rstd: Tensor,
weight: Optional[Tensor],
bias: Optional[Tensor],
output_mask: list[bool],
*,
out0: torch.Tensor,
out1: torch.Tensor,
out2: torch.Tensor,
) -> tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]]:
result = native_layer_norm_backward(
grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask
)
grad_input = (out0, out1, out2)
for i, r in enumerate(result):
if r is not None:
_maybe_resize_out(grad_input[i], r.shape)
_safe_copy_out(copy_from=r, copy_to=grad_input[i], exact_dtype=True)
return grad_input
@register_decomposition(aten._fused_rms_norm.default)
def _fused_rms_norm(
input: Tensor,
normalized_shape: list[int],
weight: Optional[Tensor],
eps: Optional[float],
) -> tuple[Tensor, Tensor]:
dims_to_reduce: list[int] = []
for i in range(len(normalized_shape)):
dims_to_reduce.append(input.dim() - i - 1)
# upcast is needed for fp16 and bf16
computation_dtype = utils.get_computation_dtype(input.dtype)
upcasted_input = input.to(computation_dtype)
# computation_dtype would be one of [Double, Float, ComplexFloat, ComplexDouble]
if eps is None:
if computation_dtype in (torch.float32, torch.complex64):
eps_val = torch.finfo(torch.float32).eps
else:
eps_val = torch.finfo(torch.float64).eps
else:
eps_val = eps
rqrst_input = torch.rsqrt(
# NB: don't inplace here, will violate functional IR invariant
# NB: carefully use the Scalar overload of add to ensure compatibility with the C++ decomp
torch.ops.aten.add.Scalar(
torch.pow(upcasted_input, 2).mean(dim=dims_to_reduce, keepdim=True), eps_val
)
)
upcasted_result = upcasted_input.mul(rqrst_input)
if weight is not None:
upcasted_result = upcasted_result.mul(weight)
# NB: nested should be dead here, just here for fidelity
is_nested = input.is_nested or (weight is not None and weight.is_nested)
memory_format = utils.suggest_memory_format(input)
is_channels_last = memory_format in (
torch.channels_last,
torch.channels_last_3d,
)
if not is_nested and not is_channels_last:
upcasted_result = upcasted_result.contiguous()
rqrst_input = rqrst_input.contiguous()
# Cast normalized result back to original input type
result = upcasted_result.type_as(input)
return result, rqrst_input
@register_decomposition(aten._fused_rms_norm_backward.default)
def _fused_rms_norm_backward(
grad_out: Tensor,
input: Tensor,
normalized_shape: list[int],
rstd: Tensor,
weight: Optional[Tensor],
output_mask: list[bool],
) -> tuple[Optional[Tensor], Optional[Tensor]]:
input_shape = input.shape
input_ndim = input.dim()
computation_dtype = utils.get_computation_dtype(input.dtype)
grad_out_cast = grad_out.to(
computation_dtype, memory_format=torch.contiguous_format
)
input_cast = input.to(computation_dtype, memory_format=torch.contiguous_format)
weight_cast = (
weight.to(computation_dtype, memory_format=torch.contiguous_format)
if weight is not None
else None
)
assert grad_out_cast is not None
axis = input_ndim - len(normalized_shape)
inner_dims = input_shape[axis:]
outer_dims = input_shape[:axis]
inner_dim_indices: list[int] = []
outer_dim_indices: list[int] = []
for i in range(input_ndim):
if i >= axis:
inner_dim_indices.append(i)
else:
outer_dim_indices.append(i)
N = prod(inner_dims) # type: ignore[arg-type]
M = prod(outer_dims) # type: ignore[arg-type]
from torch.fx.experimental.symbolic_shapes import guard_or_false
if guard_or_false(M == 0) or guard_or_false(N == 0):
return (
input.new_zeros(input_shape) if output_mask[0] else None,
input.new_zeros(input_shape[axis:]) if output_mask[1] else None,
)
rstd = _unsqueeze_to_dim(rstd, input_cast.dim()) # type: ignore[union-attr]
if weight_cast is not None:
grad_x_hat = grad_out_cast * weight_cast
else:
grad_x_hat = grad_out_cast
d_input: Optional[Tensor] = None
d_weight: Optional[Tensor] = None
x_hat = input_cast * rstd
if output_mask[0]:
sum_val = torch.sum(x_hat * grad_x_hat, dim=inner_dim_indices, keepdim=True)
d_input = (grad_x_hat - (x_hat / N) * sum_val) * rstd
if output_mask[1] and weight_cast is not None:
d_weight_full_shape = grad_out_cast * x_hat
if len(outer_dim_indices) > 0:
d_weight = torch.sum(
d_weight_full_shape, dim=outer_dim_indices, keepdim=False
)
else:
d_weight = d_weight_full_shape
return (
_maybe_cast(d_input, input.dtype),
_maybe_cast(d_weight, input.dtype),
)
def native_batch_norm_helper(
input: Tensor,
weight: Optional[Tensor],
bias: Optional[Tensor],
running_mean: Optional[Tensor],
running_var: Optional[Tensor],
training: bool,
momentum: float,
eps: float,
functional: bool,
) -> tuple[Tensor, Tensor, Tensor, Optional[Tensor], Optional[Tensor]]:
reduction_dims = [0] + list(range(2, input.dim()))
computation_dtype = utils.get_computation_dtype(input.dtype)
new_running_mean = running_mean
new_running_var = running_var
if training:
computation_dtype = utils.get_computation_dtype(input.dtype)
input_acc = input.to(dtype=computation_dtype)
biased_var, mean = torch.var_mean(
input_acc, dim=reduction_dims, correction=0, keepdim=True
)
rstd = torch.rsqrt(biased_var + eps)
output = (input - mean) * rstd
save_mean = torch.squeeze(mean, reduction_dims)
save_rstd = torch.squeeze(rstd, reduction_dims)
if running_mean is not None:
new_running_mean = momentum * save_mean + (1 - momentum) * running_mean
if not functional:
running_mean.copy_(new_running_mean)
if running_var is not None:
n = input.numel() / input.shape[1]
# This doesn't strictly match eager's numerics, which accumulates var sum and then directly applies the correction
# But... that would require re-implementing var here, for negligible numerics gain on a tensor whose
# numerics probably don't matter.
squeezed_var = torch.squeeze(biased_var, reduction_dims)
unbiased_var = squeezed_var * (n / (n - 1))
new_running_var = momentum * unbiased_var + (1 - momentum) * running_var
if not functional:
running_var.copy_(new_running_var)
else:
assert running_mean is not None and running_var is not None
running_mean = running_mean.to(dtype=computation_dtype, copy=True)
new_running_mean = running_mean
running_var = running_var.to(dtype=computation_dtype, copy=True)
new_running_var = running_var
mean = running_mean
invstd = 1 / (torch.sqrt(running_var + eps))
# Very annoying inconsistency where CPU and CUDA give different shapes
if input.device.type != "cpu":
save_mean = running_mean
save_rstd = invstd
else:
save_mean = input.new_zeros((0,))
save_rstd = input.new_zeros((0,))
mean = _unsqueeze_to_dim(mean, input.dim() - 1)
invstd = _unsqueeze_to_dim(invstd, input.dim() - 1)
output = (input - mean) * invstd
if weight is not None:
weight = weight.flatten()
weight = _unsqueeze_to_dim(weight, input.dim() - 1)
output = output * weight
if bias is not None:
bias = bias.flatten()
bias = _unsqueeze_to_dim(bias, input.dim() - 1)
output = output + bias
if input.device.type == "cpu":
save_mean = save_mean.to(dtype=input.dtype)
save_rstd = save_rstd.to(dtype=input.dtype)
return (
output.to(dtype=input.dtype),
save_mean,
save_rstd,
new_running_mean,
new_running_var,
)
@register_decomposition(aten.native_batch_norm)
@out_wrapper("out", "save_mean", "save_invstd")
def native_batch_norm(
input: Tensor,
weight: Optional[Tensor],
bias: Optional[Tensor],
running_mean: Optional[Tensor],
running_var: Optional[Tensor],
training: bool,
momentum: float,
eps: float,
) -> tuple[Tensor, Tensor, Tensor]:
output, save_mean, save_rstd, _, _ = native_batch_norm_helper(
input, weight, bias, running_mean, running_var, training, momentum, eps, False
)
return output, save_mean, save_rstd
# TODO: this decomposition is NOT here to stay. We would much prefer replacing native_batch_norm
# with our new correctly schema'd _native_batch_norm_legit and its variants, but
# we cannot do that immediately in the C++ because it would be forwards incompatible
# with some mobile use cases.
#
# Since this change is most impactful for aot autograd/functionalization, we simply
# register this decomposition on the Autograd key for the python dispatcher (which is
# currently only used by aot autograd/functionalization and no one else, really).
# In two weeks or so, we should remove this decomposition and phase out the current native_batch_norm
# to be _native_batch_norm_legit and have the right schema (stating that there are input mutations).
@aten.native_batch_norm.default.py_impl(DispatchKey.Autograd)
@aten.native_batch_norm.default.py_impl(DispatchKey.CompositeImplicitAutograd)
def native_batch_norm_decomposition(
input: Tensor,
weight: Optional[Tensor],
bias: Optional[Tensor],
running_mean: Optional[Tensor],
running_var: Optional[Tensor],
training: bool,
momentum: float,
eps: float,
) -> tuple[Tensor, Tensor, Tensor]:
if running_mean is None and running_var is None:
return aten._native_batch_norm_legit(
input, weight, bias, training, momentum, eps
)
if running_mean is None:
raise RuntimeError(
"running_mean is None, but running_var is provided. "
"They should both be None or both be provided."
)
if running_var is None:
raise RuntimeError(
"running_var is None, but running_mean is provided. "
"They should both be None or both be provided."
)
if training:
# HACK: batch norm consolidation should clean this up so this op doesn't take in a training arg.
return aten._native_batch_norm_legit(
input, weight, bias, running_mean, running_var, training, momentum, eps
)
else:
return aten._native_batch_norm_legit_no_training(
input, weight, bias, running_mean, running_var, momentum, eps
)
@aten.unsafe_chunk.default.py_impl(DispatchKey.CompositeImplicitAutograd)
def unsafe_chunk_py_impl(tensor, chunks, dim=0) -> list[Tensor]:
dim_size = tensor.size(dim)
split_size = (dim_size + chunks - 1) // chunks
if split_size == 0 and dim_size == 0:
split_sizes = [split_size for _ in chunks]
split_sizes[chunks - 1] = split_size - (split_size * chunks - dim_size)
return torch.ops.aten.unsafe_split_with_sizes.default(tensor, split_sizes, dim)
return torch.ops.aten.unsafe_split.Tensor(tensor, split_size, dim)
@register_decomposition(aten._native_batch_norm_legit_no_training.default)
def _native_batch_norm_legit_no_training(
input: Tensor,
weight: Optional[Tensor],
bias: Optional[Tensor],
running_mean: Tensor,
running_var: Tensor,
momentum: float,
eps: float,
) -> tuple[Tensor, Tensor, Tensor]:
return aten._native_batch_norm_legit.default(
input,
weight,
bias,
running_mean,
running_var,
False, # training
momentum,
eps,
)
@register_decomposition(aten._native_batch_norm_legit.default)
def _native_batch_norm_legit(
input: Tensor,
weight: Optional[Tensor],
bias: Optional[Tensor],
running_mean: Tensor,
running_var: Tensor,
training: bool,
momentum: float,
eps: float,
) -> tuple[Tensor, Tensor, Tensor]:
output, save_mean, save_rstd, _, _ = native_batch_norm_helper(
input, weight, bias, running_mean, running_var, training, momentum, eps, False
)
return output, save_mean, save_rstd
@register_decomposition(aten._native_batch_norm_legit.no_stats)
def _native_batch_norm_legit_no_stats(
input: Tensor,
weight: Optional[Tensor],
bias: Optional[Tensor],
training: bool,
momentum: float,
eps: float,
) -> tuple[Tensor, Tensor, Tensor]:
output, save_mean, save_rstd, _, _ = native_batch_norm_helper(
input, weight, bias, None, None, training, momentum, eps, False
)
return output, save_mean, save_rstd
@register_decomposition(aten._native_batch_norm_legit_functional.default)
def _native_batch_norm_legit_functional(
input: Tensor,
weight: Optional[Tensor],
bias: Optional[Tensor],
running_mean: Tensor,
running_var: Tensor,
training: bool,
momentum: float,
eps: float,
) -> tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
(
output,
save_mean,
save_rstd,
new_running_mean,
new_running_var,
) = native_batch_norm_helper(
input, weight, bias, running_mean, running_var, training, momentum, eps, True
)
assert new_running_mean is not None, "new_running_mean should not be None"
assert new_running_var is not None, "new_running_var should not be None"
return output, save_mean, save_rstd, new_running_mean, new_running_var
def _get_batch_norm_reserve_tensor(
input: Tensor,
weight: Optional[Tensor],
bias: Optional[Tensor],
running_mean: Tensor,
running_var: Tensor,
eps: float,
training: bool,
) -> Tensor:
"""
Return a reserve tensor for batch norm, used only by cudnn to pass forward state to the
backward pass. This is needed for `_batch_norm_with_update` and `_batch_norm_no_update`,
which support a variety of backends including cudnn. We create this tensor here to get
the correct shape in the traced graph if we detect that will call the cudnn kernel,
and rely on DCE to avoid materializing this tensor.
"""
backend = torch._C._select_batch_norm_backend( # type: ignore[attr-defined]
input, weight, bias, running_mean, running_var, True, eps
)
reserve_size = 0
if backend == torch._C._BatchNormBackend.Cudnn: # type: ignore[attr-defined]
reserve_size = torch._C._get_cudnn_batch_norm_reserve_space_size( # type: ignore[attr-defined]
input, training
)
return torch.empty(
reserve_size, dtype=torch.uint8, layout=input.layout, device=input.device
)
@register_decomposition(aten._batch_norm_with_update.default)
def _batch_norm_with_update(
input: Tensor,
weight: Optional[Tensor],
bias: Optional[Tensor],
running_mean: Tensor,
running_var: Tensor,
momentum: float,
eps: float,
) -> tuple[Tensor, Tensor, Tensor, Tensor]:
output, save_mean, save_rstd, _, _ = native_batch_norm_helper(
input,
weight,
bias,
running_mean,
running_var,
True, # training
momentum,
eps,
False, # functional
)
reserve = _get_batch_norm_reserve_tensor(
input, weight, bias, running_mean, running_var, eps, training=True
)
return output, save_mean, save_rstd, reserve
@register_decomposition(aten._batch_norm_with_update_functional.default)
def _batch_norm_with_update_functional(
input: Tensor,
weight: Optional[Tensor],
bias: Optional[Tensor],
running_mean: Tensor,
running_var: Tensor,
momentum: float,
eps: float,
) -> tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
(
output,
save_mean,
save_rstd,
new_rm,
new_rv,
) = native_batch_norm_helper(
input, weight, bias, running_mean, running_var, True, momentum, eps, True
)
reserve = _get_batch_norm_reserve_tensor(
input, weight, bias, running_mean, running_var, eps, training=True
)
assert new_rm is not None, "new_running_mean should not be None"
assert new_rv is not None, "new_running_var should not be None"
return (output, save_mean, save_rstd, reserve, new_rm, new_rv)
@register_decomposition(aten._batch_norm_no_update.default)
def _batch_norm_no_update(
input: Tensor,
weight: Optional[Tensor],
bias: Optional[Tensor],
running_mean: Tensor,
running_var: Tensor,
momentum: float,
eps: float,
) -> tuple[Tensor, Tensor, Tensor, Tensor]:
output, save_mean, save_rstd, _, _ = native_batch_norm_helper(
input,
weight,
bias,
running_mean,
running_var,
False, # training
momentum,
eps,
False, # functional
)
reserve = _get_batch_norm_reserve_tensor(
input, weight, bias, running_mean, running_var, eps, training=False
)
return output, save_mean, save_rstd, reserve
@register_decomposition(aten._fused_dropout)
@out_wrapper("out0", "out1")
@pw_cast_for_opmath
def _fused_dropout_decomposition(input, p, generator=None):
assert generator is None
mask = (torch.rand_like(input) < p).to(dtype=torch.uint8)
res = mask.type_as(input) * input * (1.0 / p)
return (res, mask)
@register_decomposition(aten._to_copy)
@out_wrapper()
def _to_copy(
x: Union[Tensor, NumberType],
*,
dtype: Optional[torch.dtype] = None,
layout=None,
device: Optional[torch.device] = None,
pin_memory: bool = False,
non_blocking: bool = False,
memory_format: Optional[torch.memory_format] = None,
):
assert not layout or layout == torch.strided, "TODO"
assert not pin_memory, "TODO"
assert isinstance(x, (torch.Tensor, int, float, bool, complex))
if device is None and dtype is None and memory_format is None:
if isinstance(x, torch.Tensor):
return x.clone()
else:
return x
dtype_converted = False
if isinstance(x, torch.Tensor):
x_tensor = x
else:
x_tensor = torch.scalar_tensor(x)
if device is not None and device != x_tensor.device:
# avoid conversions on cpu
if dtype is not None and device.type == "cpu":
x_tensor = torch._prims.convert_element_type(x_tensor, dtype)
dtype_converted = True
x_tensor = torch._prims.device_put(x_tensor, device, non_blocking)
if dtype is not None and not dtype_converted:
x_tensor = torch._prims.convert_element_type(x_tensor, dtype)
dtype_converted = True
if memory_format is not None: # no ref/prim for memory format
return torch.clone(x_tensor, memory_format=memory_format)
return x_tensor
# Questionable decompositions
# This is only valid if we're running the graph without autograd, such as if the backward pass has been traced.
# Note that this decomposition causes issues with in-place ops
@register_decomposition([aten.detach, aten.lift, aten.lift_fresh])
@out_wrapper()
def nop_decomposition(x):
return aten.alias(x)
# Also register to the Autograd dispatch key, so this decomp can run above autograd.
# native_batch_norm needs to decompose into other ops before autograd.
@aten.cudnn_batch_norm.default.py_impl(DispatchKey.Autograd)
@register_decomposition(aten.cudnn_batch_norm)
@out_wrapper("out0", "out1", "out2", "out3")
def cudnn_batch_norm(
input: Tensor,
weight: Tensor,
bias: Optional[Tensor],
running_mean: Optional[Tensor],
running_var: Optional[Tensor],
training: bool,
exponential_average_factor: float,
epsilon: float,
):
a, b, c = aten.native_batch_norm(
input,
weight,
bias,
running_mean,
running_var,
training,
exponential_average_factor,
epsilon,
)
# Cudnn return running mean and variance when training is True
if training:
return (a, b, c, input.new_zeros((0,), dtype=torch.uint8))
return (
a,
weight.new_zeros((0,)),
weight.new_zeros((0,)),
input.new_zeros((0,), dtype=torch.uint8),
)
def _broadcast_batch_norm_backward(x, broadcast_mask):
for axis, mask in enumerate(broadcast_mask):
if mask == 1 and not (axis < x.ndim and x.shape[axis] == mask):
x = x.unsqueeze(axis)
return x
@register_decomposition(aten.batch_norm_backward.default)
def batch_norm_backward(
grad_out: Tensor,
input: Tensor,
weight: Optional[Tensor],
running_mean: Optional[Tensor],
running_var: Optional[Tensor],
save_mean: Optional[Tensor],
save_invstd: Optional[Tensor],
train: bool,
eps: float,
output_mask: list[bool],
reserve: Tensor,
) -> tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
return native_batch_norm_backward(
grad_out,
input,
weight,
running_mean,
running_var,
save_mean,
save_invstd,
train,
eps,
output_mask,
)
@register_decomposition(aten.native_batch_norm_backward.default)
def native_batch_norm_backward(
grad_out: Tensor,
input: Tensor,
weight: Optional[Tensor],
running_mean: Optional[Tensor],
running_var: Optional[Tensor],
save_mean: Optional[Tensor],
save_invstd: Optional[Tensor],
train: bool,
eps: float,
output_mask: list[bool],
) -> tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
input_dtype = input.dtype
if weight is not None:
weight_dtype = weight.dtype
else:
weight_dtype = input_dtype
computation_dtype = utils.get_computation_dtype(input.dtype)
(
grad_out_cast,
input_cast,
weight_cast,
running_mean_cast,
running_var_cast,
save_mean_cast,
save_invstd_cast,
) = (
x.to(computation_dtype) if x is not None else x
for x in (
grad_out,
input,
weight,
running_mean,
running_var,
save_mean,
save_invstd,
)
)
input_shape = input.shape
input_rank = input.dim()
assert input_rank >= 2, "rank of the input must be at least 2"
axis = 1
num_features = prod(list(input_shape)) / input_shape[axis]
mean = save_mean_cast
invstd = save_invstd_cast
if train:
assert mean is not None and invstd is not None
else:
assert running_mean_cast is not None and running_var_cast is not None
mean = running_mean_cast
invstd = torch.rsqrt(running_var_cast + eps)
broadcast_mask: list[int] = [1] * input_rank
broadcast_mask[axis] = input_shape[axis]
reduction_axes: list[int] = []
for i in range(input_rank):
if i != axis:
reduction_axes.append(i)
mean = _broadcast_batch_norm_backward(mean, broadcast_mask) # type: ignore[arg-type]
norm = 1.0 / num_features
grad_output_sum = torch.sum(grad_out_cast, reduction_axes) # type: ignore[arg-type]
dot_p = torch.sum(grad_out_cast * (input_cast - mean), reduction_axes) # type: ignore[operator]
grad_mean = _broadcast_batch_norm_backward(grad_output_sum * norm, broadcast_mask)
proj_scale = _broadcast_batch_norm_backward(
torch.mul(dot_p * norm, invstd * invstd), # type: ignore[operator]
broadcast_mask,
)
if weight_cast is None:
grad_scale = _broadcast_batch_norm_backward(invstd, broadcast_mask) * 1.0 # type: ignore[arg-type]
else:
grad_scale = _broadcast_batch_norm_backward(
invstd * weight_cast, broadcast_mask
)
if train:
proj = (input_cast - mean) * proj_scale # type: ignore[operator]
grad_input = ((grad_out_cast - proj) - grad_mean) * grad_scale
else:
grad_input = grad_out_cast * grad_scale
if output_mask[1]:
grad_weight = dot_p * invstd
else:
grad_weight = None # "None" doesn't work with vjp, should use zeros for vjp
if output_mask[2]:
grad_bias = grad_output_sum
else:
grad_bias = None # "None" doesn't work with vjp, should use zeros for vjp
return (
grad_input.to(input_dtype),
_maybe_cast(grad_weight, weight_dtype),
_maybe_cast(grad_bias, weight_dtype),
)
# out_wrapper currently does not allow optional outputs
@register_decomposition(aten.native_batch_norm_backward.out)
def native_batch_norm_backward_out(
grad_out: Tensor,
input: Tensor,
weight: Optional[Tensor],
running_mean: Optional[Tensor],
running_var: Optional[Tensor],
save_mean: Optional[Tensor],
save_invstd: Optional[Tensor],
train: bool,
eps: float,
output_mask: list[bool],
*,
out0: torch.Tensor,
out1: torch.Tensor,
out2: torch.Tensor,
) -> tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
result = native_batch_norm_backward(
grad_out,
input,
weight,
running_mean,
running_var,
save_mean,
save_invstd,
train,
eps,
output_mask,
)
grad_input = (out0, out1, out2)
for i, r in enumerate(result):
if r is not None:
_maybe_resize_out(grad_input[i], r.shape)
_safe_copy_out(copy_from=r, copy_to=grad_input[i], exact_dtype=True)
return grad_input
@register_decomposition(aten.miopen_batch_norm_backward)
@out_wrapper("out0", "out1", "out2")
def miopen_batch_norm_backward(
input: Tensor,
grad_output: Tensor,
weight: Tensor,
running_mean: Optional[Tensor],
running_var: Optional[Tensor],
save_mean: Optional[Tensor],
save_var: Optional[Tensor],
epsilon: float,
):
return aten.native_batch_norm_backward(
grad_output,
input,
weight,
running_mean,
running_var,
save_mean,
save_var,
True,
epsilon,
[True, True, True],
)
@register_decomposition(aten.cudnn_batch_norm_backward)
@out_wrapper("out0", "out1", "out2")
def cudnn_batch_norm_backward(
input: Tensor,
grad_output: Tensor,
weight: Tensor,
running_mean: Optional[Tensor],
running_var: Optional[Tensor],
save_mean: Optional[Tensor],
save_var: Optional[Tensor],
epsilon: float,
reserveSpace: Tensor,
):
return aten.native_batch_norm_backward(
grad_output,
input,
weight,
running_mean,
running_var,
save_mean,
save_var,
True,
epsilon,
[True, True, True],
)
@register_decomposition(aten._adaptive_avg_pool2d)
@out_wrapper()
@pw_cast_for_opmath
def adaptive_avg_pool2d(input: Tensor, output_size: tuple[int, int]):
# Preconditions
device = input.device
shape = input.shape
ndim = len(shape)
torch._check(
ndim in (3, 4),
lambda: f"adaptive_avg_pool2d(): Expected 3D or 4D tensor, but got {ndim}",
)
for d in input.shape[-2:]:
torch._check(
d != 0,
lambda: "adaptive_avg_pool2d(): Expected input to have non-zero size for "
f"non-batch dimensions, but input has shape {tuple(shape)}.",
)
# Optimisation (we should also do this in the kernel implementation)
if shape[-2] % output_size[-2] == 0 and shape[-1] % output_size[-1] == 0:
stride = tuple(i // o for i, o in zip(shape[-2:], output_size))
kernel = tuple(
i - (o - 1) * s for i, o, s in zip(shape[-2:], output_size, stride)
)
return torch.nn.functional.avg_pool2d(input, kernel, stride)
def start_index(a, b, c):
return torch.div(a * c, b, rounding_mode="trunc")
def end_index(a, b, c):
return torch.div((a + 1) * c + b - 1, b, rounding_mode="trunc")
def compute_idx(in_size, out_size):
orange = torch.arange(out_size, device=device, dtype=torch.int64)
i0 = start_index(orange, out_size, in_size)
# Let length = end_index - start_index, i.e. the length of the pooling kernels
# length.max() can be computed analytically as follows:
maxlength = in_size // out_size + 1
in_size_mod = in_size % out_size
# adaptive = True iff there are kernels with different lengths
adaptive = not (in_size_mod == 0 or out_size % in_size_mod == 0)
if adaptive:
maxlength += 1
elif in_size_mod == 0:
maxlength -= 1
range_max = torch.arange(maxlength, device=device, dtype=torch.int64)
idx = i0.unsqueeze(-1) + range_max
if adaptive:
# Need to clamp to avoid accessing out-of-bounds memory
# TODO make minimum accept scalars
maxval = torch.scalar_tensor(
in_size - 1, dtype=idx.dtype, device=idx.device
)
idx = torch.minimum(idx, maxval)
# Compute the length
i1 = end_index(orange, out_size, in_size)
length = i1 - i0
else:
length = maxlength
return idx, length, range_max, adaptive
# length is not None if it's constant, otherwise we'll need to compute it
idxh, length_h, range_max_h, adaptive_h = compute_idx(shape[-2], output_size[-2])
idxw, length_w, range_max_w, adaptive_w = compute_idx(shape[-1], output_size[-1])
vals = input[..., _unsqueeze_to_dim(idxh, 4), idxw]
# Shortcut for the simpler case
if not adaptive_h and not adaptive_w:
return torch.mean(vals, dim=(-3, -1))
def maybe_mask(vals, length, range_max, adaptive, dim):
if isinstance(length, IntLike):
return vals, length
else:
# zero-out the things we didn't really want to select
assert dim < 0
# hack
mask = range_max >= length.unsqueeze(-1)
if dim == -2:
mask = _unsqueeze_to_dim(mask, 4)
vals = torch.masked_fill(vals, mask, 0.0)
# Compute the length of each window
length = _unsqueeze_to_dim(length, -dim)
return vals, length
vals, length_h = maybe_mask(
vals, length_h, range_max_h, adaptive=adaptive_h, dim=-2
)
vals, length_w = maybe_mask(
vals, length_w, range_max_w, adaptive=adaptive_w, dim=-1
)
# We unroll the sum as we assume that the kernels are going to be small
ret = None
for i, j in product(range(vals.shape[-3]), range(vals.shape[-1])):
if ret is None:
ret = vals[..., i, :, j]
else:
ret = ret + vals[..., i, :, j]
return ret / (length_h * length_w)
def _max_unpoolnd(
self: TensorLike, indices: TensorLike, output_size: list[int], dim: int
):
# If the input tensors self and indices came from max_pool call as
# required by the documentation, this operation is deterministic
# because that ensures that if there are two entries in `indices`
# tensor that are equal, the corresponding values in `self` are also
# equal. If this condition is not satisfied, the operation is
# non-deterministic as one of the different values in `self` 'wins'.
utils.alert_not_deterministic(f"max_unpooling{dim}d_forward_out")
nc = reduce(operator.mul, self.shape[:-dim])
hw = reduce(operator.mul, output_size)
indices_nc_shape = [1] * self.ndim
indices_nc_shape[:-dim] = self.shape[:-dim]
indices_flat = (
indices + aten.arange(nc, device=self.device).view(indices_nc_shape) * hw
).reshape(-1)
output = self.new_zeros(list(self.shape[:-dim]) + list(output_size))
return aten._unsafe_index_put(
output.reshape(-1), [indices_flat], self.reshape(-1), accumulate=False
).view(output.shape)
@register_decomposition(aten.max_unpool2d)
@out_wrapper()
def max_unpool2d(
self: TensorLike,
indices: TensorLike,
output_size: list[int],
):
torch._check(
indices.dtype == torch.int64,
lambda: f"elements in indices should be type int64 but got: {indices.dtype}",
)
torch._check(
len(output_size) == 2,
lambda: (
f"There should be exactly two elements (height, width) in output_size, "
f"but got {len(output_size)} elements."
),
)
torch._check(
self.ndim in (3, 4),
lambda: (
f"Input to max_unpooling2d should be a 3d or 4d Tensor, "
f"but got a tensor with {self.ndim} dimensions."
),
)
torch._check(
self.shape == indices.shape,
lambda: (
f"Expected shape of indices to be same as that of the input tensor ({self.shape}) "
f"but got indices tensor with shape: {indices.shape}"
),
)
for i in range(1, self.ndim):
torch._check(
self.size(i) > 0,
lambda: (
f"max_unpooling2d(): "
f"Expected input to have non-zero size for non-batch dimensions, "
f"but got {self.shape} with dimension {i} being empty."
),
)
return _max_unpoolnd(self, indices, output_size, 2)
@register_decomposition(aten.max_unpool3d)
@out_wrapper()
def max_unpool3d(
input: TensorLike,
indices: TensorLike,
output_size: list[int],
stride: list[int],
padding: list[int],
):
torch._check(
indices.dtype == torch.int64, lambda: "elements in indices should be type int64"
)
torch._check(
input.ndim in (4, 5),
lambda: f"Input to max_unpooling3d should be a 4d or 5d Tensor, but got a tensor with {input.ndim} dimensions.",
)
torch._check(
len(output_size) == 3,
lambda: (
f"There should be exactly three elements (depth, height, width) in output_size, "
f"but got {len(output_size)} elements."
),
)
torch._check(
len(stride) == 3,
lambda: f"There should be exactly three elements (depth, height, width) in stride, but got: {len(stride)} elements.",
)
torch._check(
len(padding) == 3,
lambda: f"There should be exactly three elements (depth, height, width) in padding, but got: {len(padding)} elements.",
)
torch._check(
input.shape == indices.shape,
lambda: (
f"Expected shape of indices to be same as that of the input tensor ({input.shape}) "
f"but got indices tensor with shape: {indices.shape}"
),
)
for i in range(1, input.ndim):
torch._check(
input.size(i) > 0,
lambda: (
f"max_unpooling3d(): "
f"Expected input to have non-zero size for non-batch dimensions, "
f"but got {input.shape} with dimension {i} being empty."
),
)
torch._check(
stride[0] > 0 and stride[1] > 0 and stride[2] > 0,
lambda: f"strides should be greater than zero, but got stride: {stride}",
)
return _max_unpoolnd(input, indices, output_size, 3)
@register_decomposition(aten.index_add_)
def index_add_(
x: TensorLike,
dim: int,
index: TensorLike,
tensor: TensorLike,
*,
alpha: NumberType = 1,
):
return _index_add(x, dim, index, tensor, inplace=True, alpha=alpha)
@register_decomposition(aten.index_add)
@out_wrapper()
def index_add(
x: TensorLike,
dim: int,
index: TensorLike,
tensor: TensorLike,
*,
alpha: NumberType = 1,
):
return _index_add(x, dim, index, tensor, inplace=False, alpha=alpha)
def _index_add(
x: TensorLike,
dim: int,
index: TensorLike,
tensor: TensorLike,
*,
inplace: bool,
alpha: NumberType = 1,
):
dim = utils.canonicalize_dims(x.ndim, dim)
torch._check(
index.ndim <= 1,
lambda: f"Index should have dimension 1 or 0 (got {index.ndim})",
)
index_size = index.size(0) if index.ndim == 1 else 1
tensor_size = tensor.size(dim) if tensor.ndim > 0 else 1
torch._check(
tensor_size == index_size,
lambda: f"Number of indices ({index_size}) should be equal to tensor.size(dim) ({tensor_size}), for {dim=}",
)
if alpha != 1:
python_type = utils.dtype_to_type(x.dtype)
torch._check(
python_type is bool
or utils.is_weakly_lesser_type(type(alpha), python_type),
lambda: f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!",
)
tensor = tensor * alpha
# Treat scalars as elements of \R^1
zero_dim = x.ndim == 0
x1 = x.unsqueeze(0) if zero_dim else x
idx = (None,) * dim + (index,)
index_put = aten.index_put_ if inplace else aten.index_put
out = index_put(x1, idx, tensor, accumulate=True)
if inplace:
return x
else:
return out.squeeze(0) if zero_dim else out.contiguous()
@register_decomposition(aten.pad_sequence.default)
@aten.pad_sequence.default.py_impl(DispatchKey.CompositeImplicitAutograd)
def pad_sequence(sequences, batch_first=False, padding_value=0.0):
torch._check(len(sequences) > 0, lambda: "received an empty list of sequences")
sequences_size = len(sequences)
max_size = sequences[0].size()
trailing_dims = max_size[1:]
max_len = max(x.size(0) for x in sequences)
if batch_first:
out_dims = (sequences_size, max_len)
else:
out_dims = (max_len, sequences_size)
out_dims = out_dims + trailing_dims
out = sequences[0].new_full(out_dims, padding_value)
dim_paddings = (0, 0) * len(trailing_dims)
for i in range(sequences_size):
currseq = sequences[i]
row = aten.constant_pad_nd(
currseq, dim_paddings + (0, max_len - currseq.size(0)), padding_value
)
if batch_first:
out = aten.select_scatter(out, row, dim=0, index=i)
else:
out = aten.select_scatter(out, row, dim=1, index=i)
return out
@register_decomposition(aten.index_copy_)
def index_copy_(x: TensorLike, dim: int, index: TensorLike, tensor: TensorLike):
return _index_copy(x, dim, index, tensor, inplace=True)
@register_decomposition(aten.index_copy)
@out_wrapper()
def index_copy(x: TensorLike, dim: int, index: TensorLike, tensor: TensorLike):
return _index_copy(x, dim, index, tensor, inplace=False)
def _index_copy(
x: TensorLike, dim: int, index: TensorLike, tensor: TensorLike, *, inplace: bool
):
dim = utils.canonicalize_dims(x.ndim, dim)
torch._check(
index.ndim <= 1,
lambda: f"Index should have dimension 1 or 0 (got {index.ndim})",
)
# Treat scalars as elements of \R^1
zero_dim = x.ndim == 0
x1 = x.unsqueeze(0) if zero_dim else x
index = index.unsqueeze(0) if index.ndim == 0 else index
idx = (None,) * dim + (index,)
index_put = aten.index_put_ if inplace else aten.index_put
out = index_put(x1, idx, tensor)
if inplace:
return x
else:
return out.squeeze(0) if zero_dim else out.contiguous()
# nb: Should use acc_t, not op_math
@register_decomposition(aten.log_sigmoid_forward)
@out_wrapper("output", "buffer")
@pw_cast_for_opmath
def log_sigmoid_forward(self: Tensor) -> tuple[Tensor, Tensor]:
min = torch.minimum(self.new_zeros(()), self)
z = torch.exp(-torch.abs(self))
if self.is_cuda or self.is_xpu:
buffer = self.new_zeros((0,))
else:
buffer = z
return min - torch.log1p(z), buffer
@register_decomposition(aten.uniform)
@out_wrapper()
def uniform(
x: Tensor,
low: Union[bool, int, float] = 0.0,
high: Union[bool, int, float] = 1.0,
generator: Optional[torch.Generator] = None,
):
return prims._uniform_helper(
x.shape,
low=sym_float(low),
high=sym_float(high),
dtype=x.dtype,
device=x.device,
generator=generator,
)
@register_decomposition(aten.uniform_)
def uniform_(self, low=0, high=1, generator=None):
return self.copy_(uniform(self, low, high, generator))
# aten/src/ATen/native/UpSample.cpp compute_output_size
def upsample_compute_output_size(input_size, output_size, scale_factors):
spatial_dimensions = len(input_size) - 2
if output_size is not None:
torch._check(
scale_factors is None,
lambda: "Must specify exactly one of output_size and scale_factors",
)
torch._check(len(output_size) == spatial_dimensions, lambda: "")
return output_size
if scale_factors is not None:
# NB: this isn't necessary lol
torch._check(
output_size is None,
lambda: "Must specify exactly one of output_size and scale_factors",
)
torch._check(len(scale_factors) == spatial_dimensions, lambda: "")
output_size = []
for i, s in enumerate(scale_factors):
if int(s) == s:
output_size.append(input_size[i + 2] * int(s))
else:
output_size.append(sym_int(input_size[i + 2] * s))
return output_size
torch._check(
False, lambda: "Must specify exactly one of output_size and scale_factors"
)
def get_scale_value(scales, idx):
if scales is None:
return None
return scales[idx]
@register_decomposition(aten.upsample_nearest1d.vec)
@register_decomposition(aten.upsample_nearest2d.vec)
@register_decomposition(aten.upsample_nearest3d.vec)
@aten.upsample_nearest1d.vec.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.upsample_nearest1d.vec.py_impl(DispatchKey.Autograd)
@aten.upsample_nearest2d.vec.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.upsample_nearest2d.vec.py_impl(DispatchKey.Autograd)
@aten.upsample_nearest3d.vec.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.upsample_nearest3d.vec.py_impl(DispatchKey.Autograd)
def _upsample_nearest_vec(
input: Tensor,
output_size: Optional[list[int]],
scale_factors: Optional[list[float]],
) -> Tensor:
osize = upsample_compute_output_size(input.size(), output_size, scale_factors)
scales = (
scale_factors if scale_factors else [None] * len(osize) # type: ignore[list-item]
)
return _upsample_nearest(input, osize, scales)
@register_decomposition(aten._upsample_nearest_exact1d.vec)
@register_decomposition(aten._upsample_nearest_exact2d.vec)
@register_decomposition(aten._upsample_nearest_exact3d.vec)
@aten._upsample_nearest_exact1d.vec.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten._upsample_nearest_exact1d.vec.py_impl(DispatchKey.Autograd)
@aten._upsample_nearest_exact2d.vec.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten._upsample_nearest_exact2d.vec.py_impl(DispatchKey.Autograd)
@aten._upsample_nearest_exact3d.vec.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten._upsample_nearest_exact3d.vec.py_impl(DispatchKey.Autograd)
def _upsample_nearest_exact_vec(
input: Tensor,
output_size: Optional[list[int]],
scale_factors: Optional[list[float]],
) -> Tensor:
osize = upsample_compute_output_size(input.size(), output_size, scale_factors)
scales = (
scale_factors if scale_factors else [None] * len(osize) # type: ignore[list-item]
)
return _upsample_nearest(input, osize, scales, exact=True)
def _compute_upsample_nearest_indices(input, output_size, scales, exact=False):
# For each dim in output_size, compute the set of input indices used
# to produce the upsampled output.
indices = []
num_spatial_dims = len(output_size)
offset = 0.5 if exact else 0.0
for d in range(num_spatial_dims):
# Math matches aten/src/ATen/native/cpu/UpSampleKernel.cpp
#
# Indices are computed as following:
# scale = isize / osize
# Case: exact=False
# input_index = floor(output_index * scale)
# Same as OpenCV INTER_NEAREST
#
# Case: exact=False
# index_f32 = (output_index + 0.5) * scale - 0.5
# input_index = round(index_f32)
# Same as Pillow and Scikit-Image/Scipy ndi.zoom
osize = output_size[d]
isize = input.shape[-num_spatial_dims + d]
scale = isize / (isize * scales[d]) if scales[d] is not None else isize / osize
output_indices = torch.arange(osize, dtype=torch.float32, device=input.device)
input_indices = ((output_indices + offset) * scale).to(torch.int64)
for _ in range(num_spatial_dims - 1 - d):
input_indices = input_indices.unsqueeze(-1)
indices.append(input_indices)
return indices
@register_decomposition([aten.upsample_nearest1d.default, aten.upsample_nearest1d.out])
@aten.upsample_nearest1d.default.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.upsample_nearest1d.default.py_impl(DispatchKey.Autograd)
@out_wrapper(preserve_memory_format=True, exact_dtype=True)
def upsample_nearest1d(
input: Tensor,
output_size: list[int],
scales: Optional[float] = None,
) -> Tensor:
return _upsample_nearest(input, output_size, [scales])
@register_decomposition(
[aten._upsample_nearest_exact1d.default, aten._upsample_nearest_exact1d.out]
)
@aten._upsample_nearest_exact1d.default.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten._upsample_nearest_exact1d.default.py_impl(DispatchKey.Autograd)
@out_wrapper(preserve_memory_format=True, exact_dtype=True)
def upsample_nearest_exact1d(
input: Tensor,
output_size: list[int],
scales: Optional[float] = None,
) -> Tensor:
return _upsample_nearest(input, output_size, [scales], exact=True)
@register_decomposition([aten.upsample_nearest2d.default, aten.upsample_nearest2d.out])
@aten.upsample_nearest2d.default.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.upsample_nearest2d.default.py_impl(DispatchKey.Autograd)
@out_wrapper(preserve_memory_format=True, exact_dtype=True)
def upsample_nearest2d(
input: Tensor,
output_size: list[int],
scales_h: Optional[float] = None,
scales_w: Optional[float] = None,
) -> Tensor:
return _upsample_nearest(input, output_size, [scales_h, scales_w])
@register_decomposition(
[aten._upsample_nearest_exact2d.default, aten._upsample_nearest_exact2d.out]
)
@aten._upsample_nearest_exact2d.default.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten._upsample_nearest_exact2d.default.py_impl(DispatchKey.Autograd)
@out_wrapper(preserve_memory_format=True, exact_dtype=True)
def _upsample_nearest_exact2d(
input: Tensor,
output_size: list[int],
scales_h: Optional[float] = None,
scales_w: Optional[float] = None,
) -> Tensor:
return _upsample_nearest(input, output_size, [scales_h, scales_w], exact=True)
@register_decomposition([aten.upsample_nearest3d.default, aten.upsample_nearest3d.out])
@aten.upsample_nearest3d.default.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.upsample_nearest3d.default.py_impl(DispatchKey.Autograd)
@out_wrapper(preserve_memory_format=True, exact_dtype=True)
def upsample_nearest3d(
input: Tensor,
output_size: list[int],
scales_d: Optional[float] = None,
scales_h: Optional[float] = None,
scales_w: Optional[float] = None,
) -> Tensor:
return _upsample_nearest(input, output_size, [scales_d, scales_h, scales_w])
@register_decomposition(
[aten._upsample_nearest_exact3d.default, aten._upsample_nearest_exact3d.out]
)
@aten._upsample_nearest_exact3d.default.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten._upsample_nearest_exact3d.default.py_impl(DispatchKey.Autograd)
@out_wrapper(preserve_memory_format=True, exact_dtype=True)
def _upsample_nearest_exact3d(
input: Tensor,
output_size: list[int],
scales_d: Optional[float] = None,
scales_h: Optional[float] = None,
scales_w: Optional[float] = None,
) -> Tensor:
return _upsample_nearest(
input, output_size, [scales_d, scales_h, scales_w], exact=True
)
@pw_cast_for_opmath
def _upsample_nearest(
input: Tensor,
output_size: list[int],
scales: list[Optional[float]],
exact: bool = False,
) -> Tensor:
spatial_indices = _compute_upsample_nearest_indices(
input, output_size, scales, exact=exact
)
indices = [None, None] + spatial_indices
result = aten._unsafe_index(input, indices)
if result.ndim == 4:
# convert output to correct memory format, if necessary
memory_format = utils.suggest_memory_format(input)
# following "heuristic: only use channels_last path when it's faster than the contiguous path"
n_channels = input.shape[1]
if input.device.type == "cuda" and n_channels < 4:
memory_format = torch.contiguous_format
result = result.contiguous(memory_format=memory_format)
return result
def gather_params(params, has_biases, has_projections):
if has_biases and has_projections:
group_size = 5
elif has_biases:
group_size = 4
elif has_projections:
group_size = 3
else:
group_size = 2
assert len(params) % group_size == 0, len(params)
return [
tuple(params[i : i + group_size]) for i in range(0, len(params), group_size)
]
def params_hiddens(params, hiddens, i, bidirectional):
if bidirectional:
cur_params, cur_hidden = params[2 * i], hiddens[2 * i]
bidir_params, bidir_hidden = params[2 * i + 1], hiddens[2 * i + 1]
else:
cur_params, cur_hidden = params[i], hiddens[i]
bidir_params, bidir_hidden = None, None
return cur_params, cur_hidden, bidir_params, bidir_hidden
def update_hidden_for_packed(cur_hidden, last_batch_size, batch_size, hiddens):
assert last_batch_size > batch_size
hiddens.append(cur_hidden.narrow(0, batch_size, last_batch_size - batch_size))
return cur_hidden.narrow(0, 0, batch_size)
def update_hidden_for_packed_reverse(
cur_hidden, last_batch_size, batch_size, inp_hidden
):
if last_batch_size == batch_size:
return cur_hidden
assert last_batch_size < batch_size
return torch.concat(
(
cur_hidden,
inp_hidden.narrow(0, last_batch_size, batch_size - last_batch_size),
)
)
def one_layer_rnn_data(
inp, hidden, params, has_biases, hidden_fn, batch_sizes, reverse=False
):
ih_weight = params[0]
hh_weight = params[1]
ih_bias = params[2] if has_biases else None
hh_bias = params[3] if has_biases else None
step_output = []
hiddens: list[torch.Tensor] = []
last_batch_size = batch_sizes[-1] if reverse else batch_sizes[0]
cur_hidden = hidden.narrow(0, 0, last_batch_size)
split_inp = torch.split(inp, list(batch_sizes))
if reverse:
split_inp = split_inp[::-1]
for inp in split_inp:
i = inp.shape[0]
if last_batch_size == i:
pass # don't update cur_hidden
# this will only happen when reverse=False, since batch sizes are sorted largest -> smallest
elif reverse:
cur_hidden = update_hidden_for_packed_reverse(
cur_hidden, last_batch_size, i, hidden
)
else:
cur_hidden = update_hidden_for_packed(
cur_hidden, last_batch_size, i, hiddens
)
cur_hidden = hidden_fn(inp, cur_hidden, ih_weight, ih_bias, hh_weight, hh_bias)
last_batch_size = i
step_output.append(cur_hidden)
if reverse:
step_output.reverse()
else:
hiddens.append(cur_hidden)
hiddens.reverse()
out = torch.cat(step_output, 0)
hidden_out = torch.cat(hiddens, 0) if not reverse else cur_hidden
return out, hidden_out
def rnn_cell(nonlinearity):
def inner(i, cur_hidden, ih_weight, ih_bias, hh_weight, hh_bias):
return nonlinearity(F.linear(cur_hidden, hh_weight, hh_bias) + i)
return inner
def rnn_cell_data(nonlinearity):
def inner(i, cur_hidden, ih_weight, ih_bias, hh_weight, hh_bias):
i = F.linear(i, ih_weight, ih_bias)
return nonlinearity(F.linear(cur_hidden, hh_weight, hh_bias) + i)
return inner
def one_layer_rnn(inp, hidden, params, has_biases, hidden_fn, reverse=False):
ih_weight = params[0]
hh_weight = params[1]
ih_bias = params[2] if has_biases else None
hh_bias = params[3] if has_biases else None
precomputed_input = F.linear(inp, ih_weight, ih_bias)
precomputed_input = precomputed_input.flip(0) if reverse else precomputed_input
cur_hidden = hidden.unsqueeze(0)
step_output = []
for i in precomputed_input:
cur_hidden = hidden_fn(i, cur_hidden, ih_weight, ih_bias, hh_weight, hh_bias)
step_output.append(cur_hidden)
if reverse:
step_output.reverse()
out = torch.cat(step_output, 0)
return out, cur_hidden.squeeze(0)
def mkldnn_one_layer_lstm(inp, hidden, params, has_biases, reverse=False):
w0 = params[0]
w1 = params[1]
if has_biases:
w2 = params[2]
w3 = params[3]
else:
w2 = torch.zeros(w0.size())
w3 = torch.zeros(w1.size())
hx = hidden[0].unsqueeze(0)
cx = hidden[1].unsqueeze(0)
batch_sizes: list[int] = []
mode = 2 # third_party/ideep/include/ideep/abstract_types.hpp: ideep::rnn_kind::LSTM = 2
hidden_size = hx.size(2)
num_layers = 1
# _rnn_helper already handles bidirectional and batch_first so we hard-code them to False here
bidirectional = False
batch_first = False
train = False
# If batch_first, inp has been permuted in _rnn_helper. Convert to contiguous here.
# Same as aten/src/ATen/native/mkldnn/RNN.cpp: mkldnn_rnn: input = input.contiguous();
inp = inp.contiguous()
hx = hx.contiguous()
cx = cx.contiguous()
outputs = torch.ops.aten.mkldnn_rnn_layer.default(
inp,
w0,
w1,
w2,
w3,
hx,
cx,
reverse,
batch_sizes,
mode,
hidden_size,
num_layers,
has_biases,
bidirectional,
batch_first,
train,
)
y, hy, cy = outputs[0], outputs[1], outputs[2]
return y, (hy.squeeze(0), cy.squeeze(0))
def _rnn_helper(
input,
hidden,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first,
layer_fn,
):
input = input.transpose(0, 1) if batch_first else input
final_hiddens = []
for i in range(num_layers):
cur_params, cur_hidden, bidir_params, bidir_hidden = params_hiddens(
params, hidden, i, bidirectional
)
dropout = dropout if (train and num_layers < i - 1) else 0.0
fwd_inp, fwd_hidden = layer_fn(input, cur_hidden, cur_params, has_biases)
final_hiddens.append(fwd_hidden)
if bidirectional:
bwd_inp, bwd_hidden = layer_fn(
input, bidir_hidden, bidir_params, has_biases, reverse=True
)
final_hiddens.append(bwd_hidden)
if bidirectional:
input = torch.cat([fwd_inp, bwd_inp], fwd_inp.dim() - 1) # type: ignore[possibly-undefined]
else:
input = fwd_inp
if dropout != 0 and train and i < num_layers - 1:
input = torch.dropout(input, dropout, train=True)
input = input.transpose(0, 1) if batch_first else input
return input, final_hiddens
@register_decomposition(aten.rnn_tanh.input)
@aten.rnn_tanh.input.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.rnn_tanh.input.py_impl(DispatchKey.Autograd)
def rnn_tanh_input(
input,
hx,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first,
):
hidden = hx.unbind(0)
params = gather_params(params, has_biases, False)
out, final_hiddens = _rnn_helper(
input,
hidden,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first,
partial(one_layer_rnn, hidden_fn=rnn_cell(torch.tanh)),
)
return out, torch.stack(final_hiddens, 0)
@register_decomposition(aten.rnn_relu.input)
@aten.rnn_relu.input.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.rnn_relu.input.py_impl(DispatchKey.Autograd)
def rnn_relu_input(
input,
hx,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first,
):
hidden = hx.unbind(0)
params = gather_params(params, has_biases, False)
out, final_hiddens = _rnn_helper(
input,
hidden,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first,
partial(one_layer_rnn, hidden_fn=rnn_cell(torch.relu)),
)
return out, torch.stack(final_hiddens, 0)
@register_decomposition(aten.rnn_relu.data)
@aten.rnn_relu.data.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.rnn_relu.data.py_impl(DispatchKey.Autograd)
def rnn_relu_data(
data,
batch_sizes,
hx,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
):
hidden = hx.unbind(0)
params = gather_params(params, has_biases, False)
out, final_hiddens = _rnn_helper(
data,
hidden,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
False,
partial(
one_layer_rnn_data,
batch_sizes=batch_sizes,
hidden_fn=rnn_cell_data(torch.relu),
),
)
return out, torch.stack(final_hiddens, 0)
@register_decomposition(aten.rnn_tanh.data)
@aten.rnn_tanh.data.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.rnn_tanh.data.py_impl(DispatchKey.Autograd)
def rnn_tanh_data(
data,
batch_sizes,
hx,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
):
hidden = hx.unbind(0)
params = gather_params(params, has_biases, False)
out, final_hiddens = _rnn_helper(
data,
hidden,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
False,
partial(
one_layer_rnn_data,
batch_sizes=batch_sizes,
hidden_fn=rnn_cell_data(torch.tanh),
),
)
return out, torch.stack(final_hiddens, 0)
def lstm_cell(inp, hx, cx, hh_weight, hh_bias, hr_weight, chunk_dim):
gates = F.linear(hx, hh_weight, hh_bias) + inp
chunked_gates = gates.chunk(4, chunk_dim)
in_gate = chunked_gates[0].sigmoid()
forget_gate = chunked_gates[1].sigmoid()
cell_gate = chunked_gates[2].tanh()
out_gate = chunked_gates[3].sigmoid()
cy = forget_gate * cx + (in_gate * cell_gate)
hy = out_gate * cy.tanh()
hy = hy if hr_weight is None else F.linear(hy, hr_weight, None)
return hy, cy
def one_layer_lstm(inp, hidden, params, has_biases, reverse=False):
ih_weight = params[0]
hh_weight = params[1]
ih_bias = params[2] if has_biases else None
hh_bias = params[3] if has_biases else None
hr_weight = (
params[4] if len(params) == 5 else params[2] if len(params) == 3 else None
)
hx = hidden[0].unsqueeze(0)
cx = hidden[1].unsqueeze(0)
precomputed_input = F.linear(inp, ih_weight, ih_bias)
precomputed_input = precomputed_input.flip(0) if reverse else precomputed_input
step_output = []
for inp in precomputed_input:
hx, cx = lstm_cell(inp, hx, cx, hh_weight, hh_bias, hr_weight, chunk_dim=2)
step_output.append(hx)
if reverse:
step_output.reverse()
out = torch.cat(step_output, 0)
return out, (hx.squeeze(1), cx.squeeze(1))
def one_layer_lstm_data(inp, hidden, params, has_biases, batch_sizes, reverse=False):
ih_weight = params[0]
hh_weight = params[1]
ih_bias = params[2] if has_biases else None
hh_bias = params[3] if has_biases else None
hr_weight = (
params[4] if len(params) == 5 else params[2] if len(params) == 3 else None
)
step_output = []
hiddens = []
last_batch_size = batch_sizes[-1] if reverse else batch_sizes[0]
split_inp = torch.split(inp, list(batch_sizes))
if reverse:
split_inp = split_inp[::-1]
orig_hx = hidden[0]
orig_cx = hidden[1]
hx, cx = (
orig_hx.narrow(0, 0, last_batch_size),
orig_cx.narrow(0, 0, last_batch_size),
)
for inp in split_inp:
i = inp.shape[0]
inp = F.linear(inp, ih_weight, ih_bias)
# this will only happen when reverse=False, since batch sizes are sorted largest -> smallest
if i < last_batch_size:
hiddens.append(
(
hx.narrow(0, i, last_batch_size - i),
cx.narrow(0, i, last_batch_size - i),
)
)
hx, cx = hx.narrow(0, 0, i), cx.narrow(0, 0, i)
# this will only happen when reverse=True
if i > last_batch_size:
hx = torch.concat(
(hx, orig_hx.narrow(0, last_batch_size, i - last_batch_size)), 0
)
cx = torch.concat(
(cx, orig_cx.narrow(0, last_batch_size, i - last_batch_size)), 0
)
hx, cx = lstm_cell(inp, hx, cx, hh_weight, hh_bias, hr_weight, chunk_dim=1)
last_batch_size = i
step_output.append(hx)
if reverse:
step_output.reverse()
hidden_out = (hx, cx)
else:
hiddens.append((hx, cx))
hiddens.reverse()
hidden0, hidden1 = zip(*hiddens)
hidden_out = torch.cat(hidden0, 0), torch.cat(hidden1, 0)
out = torch.cat(step_output, 0)
return out, hidden_out
def select_one_layer_lstm_function(input, hx, params):
r"""Check whether we could use decompose lstm with mkldnn_rnn_layer.
All the below conditions need to be met:
* ``torch._C._get_mkldnn_enabled()`` returns ``True``.
* All the input args are on CPU.
* The dtypes of args are either torch.float or torch.bfloat16.
* Inference.
* ``has_projections`` returns ``False``.
Args:
* input: the input sequence to LSTM
* hx: a tuple of the input hidden state and cell state ``(h_0, c_0)`` to LSTM
* params: the weight and bias tensors of LSTM
"""
def use_mkldnn(input, hx, params):
if not torch._C._get_mkldnn_enabled():
return False
tensors = [input] + list(hx) + list(chain.from_iterable(params))
devices = {t.device for t in tensors}
if len(devices) != 1:
return False
device = devices.pop()
if device != torch.device("cpu"):
return False
# With autocast, possible to have mixed dtype here
dtypes = {t.dtype for t in tensors}
for dtype in dtypes:
if dtype not in [torch.float, torch.bfloat16]:
return False
if input.requires_grad:
return False
has_projections = hx[0].size(2) != hx[1].size(2)
if has_projections:
return False
return True
# mkldnn_one_layer_lstm does not depend on seq_len while one_layer_lstm
# will expand over the seq_len dim
if use_mkldnn(input, hx, params):
return mkldnn_one_layer_lstm
else:
return one_layer_lstm
@register_decomposition(aten.lstm.input)
@aten.lstm.input.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.lstm.input.py_impl(DispatchKey.Autograd)
def lstm_impl(
input,
hx,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first,
):
assert len(hx) == 2, "lstm expects two hidden states"
params = gather_params(params, has_biases, hx[0].size(2) != hx[1].size(2))
hidden = list(zip(hx[0], hx[1]))
layer_fn = select_one_layer_lstm_function(input, hx, params)
out, final_hiddens = _rnn_helper(
input,
hidden,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first,
layer_fn,
)
final_hiddens = list(zip(*final_hiddens))
return out, torch.stack(final_hiddens[0], 0), torch.stack(final_hiddens[1], 0)
@register_decomposition(aten.lstm.data)
@aten.lstm.data.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.lstm.data.py_impl(DispatchKey.Autograd)
def lstm_data_impl(
data,
batch_sizes,
hx,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
):
assert len(hx) == 2, "lstm expects two hidden states"
params = gather_params(params, has_biases, hx[0].size(2) != hx[1].size(2))
hidden = list(zip(hx[0], hx[1]))
out, final_hiddens = _rnn_helper(
data,
hidden,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
False,
partial(one_layer_lstm_data, batch_sizes=batch_sizes),
)
final_hiddens = list(zip(*final_hiddens))
return out, torch.stack(final_hiddens[0], 0), torch.stack(final_hiddens[1], 0)
def gru_cell(inp, cur_hidden, ih_weight, ih_bias, hh_weight, hh_bias):
chunked_igates = inp.chunk(3, 1)
chunked_hgates = F.linear(cur_hidden, hh_weight, hh_bias).chunk(3, 2)
reset_gate = (chunked_hgates[0] + chunked_igates[0]).sigmoid()
input_gate = (chunked_hgates[1] + chunked_igates[1]).sigmoid()
new_gate = (chunked_igates[2] + (chunked_hgates[2] * reset_gate)).tanh()
return (cur_hidden - new_gate) * input_gate + new_gate
def gru_cell_data(inp, cur_hidden, ih_weight, ih_bias, hh_weight, hh_bias):
chunked_igates = F.linear(inp, ih_weight, ih_bias).chunk(3, 1)
chunked_hgates = F.linear(cur_hidden, hh_weight, hh_bias).chunk(3, 1)
reset_gate = (chunked_hgates[0] + chunked_igates[0]).sigmoid()
input_gate = (chunked_hgates[1] + chunked_igates[1]).sigmoid()
new_gate = (chunked_igates[2] + (chunked_hgates[2] * reset_gate)).tanh()
return (cur_hidden - new_gate) * input_gate + new_gate
@register_decomposition(aten.gru.data)
@aten.gru.data.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.gru.data.py_impl(DispatchKey.Autograd)
def gru_impl_data(
data,
batch_sizes,
hx,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
):
params = gather_params(params, has_biases, False)
out, final_hiddens = _rnn_helper(
data,
hx.unbind(0),
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
False,
partial(one_layer_rnn_data, batch_sizes=batch_sizes, hidden_fn=gru_cell_data),
)
return out, torch.stack(final_hiddens, 0)
@register_decomposition(aten.gru.input)
@aten.gru.input.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.gru.input.py_impl(DispatchKey.Autograd)
def gru_impl(
input,
hx,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first,
):
params = gather_params(params, has_biases, False)
out, final_hiddens = _rnn_helper(
input,
hx.unbind(0),
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first,
partial(one_layer_rnn, hidden_fn=gru_cell),
)
return out, torch.stack(final_hiddens, 0)
@register_decomposition(aten._upsample_bilinear2d_aa.vec)
@aten._upsample_bilinear2d_aa.vec.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten._upsample_bilinear2d_aa.vec.py_impl(DispatchKey.Autograd)
def upsample_bilinear2d_aa_vec(input, output_size, align_corners, scale_factors):
osize = upsample_compute_output_size(input.size(), output_size, scale_factors)
scale_h = get_scale_value(scale_factors, 0)
scale_w = get_scale_value(scale_factors, 1)
return torch.ops.aten._upsample_bilinear2d_aa(
input, osize, align_corners, scale_h, scale_w
)
@register_decomposition(aten._upsample_bicubic2d_aa.vec)
@aten._upsample_bicubic2d_aa.vec.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten._upsample_bicubic2d_aa.vec.py_impl(DispatchKey.Autograd)
def upsample_bicubic2d_aa_vec(input, output_size, align_corners, scale_factors):
osize = upsample_compute_output_size(input.size(), output_size, scale_factors)
scale_h = get_scale_value(scale_factors, 0)
scale_w = get_scale_value(scale_factors, 1)
return torch.ops.aten._upsample_bicubic2d_aa(
input, osize, align_corners, scale_h, scale_w
)
@register_decomposition(aten.upsample_bilinear2d.vec)
@register_decomposition(aten.upsample_trilinear3d.vec)
@aten.upsample_linear1d.vec.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.upsample_linear1d.vec.py_impl(DispatchKey.Autograd)
@aten.upsample_bilinear2d.vec.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.upsample_bilinear2d.vec.py_impl(DispatchKey.Autograd)
@aten.upsample_trilinear3d.vec.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.upsample_trilinear3d.vec.py_impl(DispatchKey.Autograd)
def _upsample_linear_vec(input, output_size, align_corners, scale_factors):
osize = upsample_compute_output_size(input.size(), output_size, scale_factors)
scales = scale_factors if scale_factors else [None] * len(osize)
return _upsample_linear(input, osize, align_corners, scales)
@register_decomposition([aten.upsample_linear1d.default, aten.upsample_linear1d.out])
@out_wrapper()
def upsample_linear1d(
input: Tensor,
output_size: list[int],
align_corners: bool,
scales_w: Optional[float] = None,
) -> Tensor:
return _upsample_linear(input, output_size, align_corners, [scales_w])
@register_decomposition(
[aten.upsample_bilinear2d.default, aten.upsample_bilinear2d.out]
)
@aten.upsample_bilinear2d.default.py_impl(DispatchKey.Autograd)
@out_wrapper()
def upsample_bilinear2d(
input: Tensor,
output_size: list[int],
align_corners: bool,
scales_h: Optional[float] = None,
scales_w: Optional[float] = None,
) -> Tensor:
return _upsample_linear(input, output_size, align_corners, [scales_h, scales_w])
@register_decomposition(
[aten.upsample_trilinear3d.default, aten.upsample_trilinear3d.out]
)
@out_wrapper()
def upsample_trilinear3d(
input: Tensor,
output_size: list[int],
align_corners: bool,
scales_d: Optional[float] = None,
scales_h: Optional[float] = None,
scales_w: Optional[float] = None,
) -> Tensor:
return _upsample_linear(
input, output_size, align_corners, [scales_d, scales_h, scales_w]
)
def _compute_scale(in_size, out_size, align_corners, scale=None):
if align_corners:
return (in_size - 1.0) / (out_size - 1.0) if out_size > 1 else 0
else:
return 1.0 / scale if scale is not None and scale > 0 else in_size / out_size
def _compute_source_index(scale, dst_index, align_corners):
if align_corners:
return scale * dst_index
else:
return scale * (dst_index + 0.5) - 0.5
def _sum_tensors_uint8(
src: Iterable[Tensor], weights: Iterable[Tensor], weights_precision: Tensor
) -> Tensor:
output = _sum_tensors(
s.to(torch.int32) * c.to(torch.int32) for s, c in zip(src, weights)
) + (1 << (weights_precision - 1))
output = output >> weights_precision
return torch.clamp(output, 0, 255).to(torch.uint8)
def _compute_weight_precision(weights: TensorSequenceType) -> Tensor:
max_weight = torch.stack(weights).max()
max_weight_precision = 22
precisions = torch.arange(max_weight_precision, device=max_weight.device)
values = 0.5 + max_weight * (1 << (precisions + 1))
mask = values >= (1 << 15)
return max_weight_precision - mask.sum()
@pw_cast_for_opmath
def _upsample_linear(
input: Tensor,
output_size: list[int],
align_corners: bool,
scales: list[Optional[float]],
) -> Tensor:
# get dimensions of original image
n_channels = input.shape[1]
inp_sizes = input.shape[2:]
n_dims = len(inp_sizes)
_, dtype = utils.elementwise_dtypes(
input,
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
)
def get_values(inp_size, out_size, scales, nsqueeze):
# First Calculate scaling factor
scale_factor = _compute_scale(inp_size, out_size, align_corners, scales)
# We have to create arange with int64 dtype and use .to in order to avoid
# additional kernels creation in inductor and get a perf slowdown
i = torch.arange(out_size, device=input.device).to(dtype=dtype)
x_f32 = _compute_source_index(scale_factor, i, align_corners).clamp(min=0.0)
x_f32 = x_f32.reshape(x_f32.shape[0], *[1] * (nsqueeze))
x = x_f32.to(torch.int64)
xp1 = (x + 1).clamp(max=inp_size - 1)
return x_f32, x, xp1
values = [
get_values(inp_size, out_size, scales, n_dims - 1 - i)
for i, (inp_size, out_size, scales) in enumerate(
zip(inp_sizes, output_size, scales)
)
]
xs_f32, xs, xp1s = list(zip(*values))
vs = []
for a in product(*[[0, 1]] * n_dims):
idx = [None, None] + [xs[k] if a[k] == 0 else xp1s[k] for k in range(n_dims)]
v = aten._unsafe_index(input, idx)
v = _maybe_convert_to_dtype(v, dtype)
vs.append(v)
for i in reversed(range(n_dims)):
xscale = (xs_f32[i] - xs[i]).clamp(0.0, 1.0).to(dtype)
vs = [
# x1 * (1 - alpha) + x2 * alpha == x1 + (x2 - x1) * alpha
v1 + torch.mul(v2 - v1, xscale)
for v1, v2 in zip(vs[::2], vs[1::2])
]
assert len(vs) == 1
result = vs[0]
# convert output to correct memory format, if necessary
memory_format = utils.suggest_memory_format(input)
# following "heuristic: only use channels_last path when it's faster than the contiguous path"
if input.device.type == "cuda" and n_channels < 16:
memory_format = torch.contiguous_format
assert isinstance(result, torch.Tensor)
result = result.contiguous(memory_format=memory_format)
if not input.is_floating_point():
result = result.round()
return result
# We should be applying decompositions after all transformations
@register_decomposition(aten.is_same_size.default)
def is_same_size(a: Tensor, b: Tensor) -> bool:
return a.shape == b.shape
@register_decomposition([aten._reshape_alias, aten._unsafe_view])
@out_wrapper()
def _reshape_alias(x, shape, *args):
return aten.view(x, shape)
@register_decomposition([aten._unsafe_index])
def _unsafe_index(x, indices):
return aten.index(x, indices)
@register_decomposition([aten._unsafe_index_put])
def _unsafe_index_put(x, indices, value, accumulate=False):
return aten.index_put(x, indices, value, accumulate)
@register_decomposition([aten._unsafe_masked_index])
def _unsafe_masked_index(x, mask, indices, fill):
for index in indices:
if index is not None:
torch._check(
index.dtype in [torch.long, torch.int],
lambda: "tensors used as indices must be long or int tensors",
)
torch._check(
mask.dtype == torch.bool,
lambda: "tensors used as masks must be bool tensors",
)
from torch.fx.experimental.symbolic_shapes import guard_or_false
if guard_or_false(x.numel() == 0):
meta_result = torch._meta_registrations.meta_index_Tensor(x, indices)
return x.new_full(meta_result.shape, fill)
for i in range(len(indices)):
index = indices[i]
if index is not None:
indices[i] = index.clamp(min=0, max=x.size(i) - 1)
return aten._unsafe_index(x, indices).masked_fill(~mask, fill)
@register_decomposition([aten._unsafe_masked_index_put_accumulate])
def _unsafe_masked_index_put_accumulate(x, mask, indices, values):
for index in indices:
if index is not None:
torch._check(
index.dtype in [torch.long, torch.int],
lambda: "tensors used as indices must be long or int tensors",
)
torch._check(
mask.dtype == torch.bool,
lambda: "tensors used as masks must be bool tensors",
)
if x.numel() == 0:
return x.clone()
for i in range(len(indices)):
index = indices[i]
if index is not None:
indices[i] = index.clamp(min=-x.size(i), max=x.size(i) - 1)
masked_value = values.masked_fill(~mask, 0)
return aten._unsafe_index_put(x, indices, masked_value, accumulate=True)
def _nll_loss_forward(
self: Tensor,
target: Tensor,
weight: Optional[Tensor],
reduction: int,
ignore_index: int,
) -> tuple[Tensor, Tensor]:
# self can be [N, C] or [C]
# target can be [N] or []
n_dims = self.dim()
channel_dim = 1
if n_dims < 2:
channel_dim = 0
if weight is not None:
if n_dims > 1:
shape = [
1,
] * n_dims
shape[channel_dim] = weight.shape[0]
w = weight.view(shape)
else:
w = weight
self = self * w
safe_target = torch.where(target != ignore_index, target, 0)
safe_target_ = safe_target.unsqueeze(channel_dim)
# target can be [N, 1] or [1]
result = -torch.gather(self, channel_dim, safe_target_).squeeze(channel_dim)
result = torch.where(target != ignore_index, result, 0)
if reduction == Reduction.NONE.value and n_dims > 1:
total_weight = self.new_full((), 0.0)
return result, total_weight
if weight is not None:
# pyrefly: ignore [unbound-name]
w = w.expand(self.shape)
wsum = torch.gather(w, channel_dim, safe_target_).squeeze(channel_dim)
wsum = torch.where(target != ignore_index, wsum, 0)
total_weight = wsum.sum()
else:
total_weight = (target != ignore_index).sum().to(self)
if reduction == Reduction.SUM.value:
result = result.sum()
elif reduction == Reduction.MEAN.value:
result = result.sum() / total_weight
return result, total_weight
@register_decomposition(aten.nll_loss_forward)
@out_wrapper("output", "total_weight")
def nll_loss_forward(
self: Tensor,
target: Tensor,
weight: Optional[Tensor],
reduction: int,
ignore_index: int,
) -> tuple[Tensor, Tensor]:
assert self.dim() > 0 and self.dim() <= 2, "input tensor should be 1D or 2D"
assert target.dim() <= 1, (
"0D or 1D target tensor expected, multi-target not supported"
)
no_batch_dim = self.dim() == 1 and target.dim() == 0
assert no_batch_dim or (self.shape[0] == target.shape[0]), (
f"size mismatch (got input: {self.shape}, target: {target.shape})"
)
n_classes = self.shape[-1]
assert weight is None or (weight.dim() == 1 and weight.numel() == n_classes), (
f"weight tensor should be defined either for all {n_classes} classes or no classes "
f"but got weight tensor of shape: {weight.shape}"
)
return _nll_loss_forward(self, target, weight, reduction, ignore_index)
@register_decomposition(aten.nll_loss2d_forward)
@out_wrapper("output", "total_weight")
def nll_loss2d_forward(
self: Tensor,
target: Tensor,
weight: Optional[Tensor],
reduction: int,
ignore_index: int,
) -> tuple[Tensor, Tensor]:
return _nll_loss_forward(self, target, weight, reduction, ignore_index)
# These are adapted from aten/src/ATen/native/UpSample.h, which is based on
# https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm
def _upsample_cubic_convolution1(x: Tensor, A: float) -> Tensor:
return ((A + 2) * x - (A + 3)) * x * x + 1
def _upsample_cubic_convolution2(x: Tensor, A: float) -> Tensor:
return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A
def _upsample_get_cubic_coefficients(t: Tensor) -> TensorSequenceType:
A = -0.75
if t.device == torch.device("cpu"):
tt1 = torch.stack([t, 1.0 - t], dim=0)
tt2 = torch.stack([t + 1.0, 2.0 - t], dim=0)
w03 = _upsample_cubic_convolution2(tt2, A)
w12 = _upsample_cubic_convolution1(tt1, A)
w0, w3 = torch.unbind(w03, dim=0)
w1, w2 = torch.unbind(w12, dim=0)
return w0, w1, w2, w3
else:
return (
_upsample_cubic_convolution2(t + 1.0, A),
_upsample_cubic_convolution1(t, A),
_upsample_cubic_convolution1(1.0 - t, A),
_upsample_cubic_convolution2(2.0 - t, A),
)
def _upsample_cubic_interp1d(coeffs: TensorSequenceType, ts: Tensor) -> Tensor:
coeffs2 = _upsample_get_cubic_coefficients(ts)
return _sum_tensors(c1 * c2 for (c1, c2) in zip(coeffs, coeffs2))
# Need this instead of just sum() to keep mypy happy
def _sum_tensors(ts: Iterable[Tensor]) -> Tensor:
return reduce(torch.add, ts)
def _linspace_from_neg_one(
num_steps: int, align_corners: bool, dtype: torch.dtype, device: torch.device
):
if num_steps <= 1:
return torch.tensor(0, device=device, dtype=dtype)
a = ((num_steps - 1) / num_steps) if not align_corners else 1
return torch.linspace(-a, a, steps=num_steps, device=device, dtype=dtype)
def _make_base_grid_4d(theta: Tensor, h: int, w: int, align_corners: bool):
dtype = theta.dtype
device = theta.device
# Using padding and summation generates a single kernel vs using torch.stack where 3 kernels generated
# corresponding to each individual tensor: grid_x, grid_y, grid_one
grid_x = _linspace_from_neg_one(w, align_corners, dtype, device).view(1, w, 1)
grid_y = _linspace_from_neg_one(h, align_corners, dtype, device).view(h, 1, 1)
grid_one = torch.ones((1, 1, 1), dtype=dtype, device=device)
# this is just a temporary hack and we should use torch.stack here once #104480 is merged
grid_x = torch.nn.functional.pad(grid_x, pad=(0, 2), mode="constant", value=0)
grid_y = torch.nn.functional.pad(grid_y, pad=(1, 1), mode="constant", value=0)
grid_one = torch.nn.functional.pad(grid_one, pad=(2, 0), mode="constant", value=0)
return grid_x + grid_y + grid_one
def _make_base_grid_5d(theta: Tensor, d: int, h: int, w: int, align_corners: bool):
dtype = theta.dtype
device = theta.device
grid_x = _linspace_from_neg_one(w, align_corners, dtype, device).view(1, 1, w, 1)
grid_y = _linspace_from_neg_one(h, align_corners, dtype, device).view(1, h, 1, 1)
grid_z = _linspace_from_neg_one(d, align_corners, dtype, device).view(d, 1, 1, 1)
grid_one = torch.ones((1, 1, 1, 1), dtype=dtype, device=device)
# this is just a temporary hack and we should use torch.stack here once #104480 is merged
grid_x = torch.nn.functional.pad(grid_x, pad=(0, 3), mode="constant", value=0)
grid_y = torch.nn.functional.pad(grid_y, pad=(1, 2), mode="constant", value=0)
grid_z = torch.nn.functional.pad(grid_z, pad=(2, 1), mode="constant", value=0)
grid_one = torch.nn.functional.pad(grid_one, pad=(3, 0), mode="constant", value=0)
return grid_x + grid_y + grid_z + grid_one
def _affine_grid_generator_4d(theta: Tensor, size: list[int], align_corners: bool):
n, _, h, w = size
base_grid = _make_base_grid_4d(theta, h, w, align_corners=align_corners)
# base_grid shape is (h, w, 3) and theta shape is (n, 2, 3)
# We do manually a matrix multiplication which is faster than mm()
# (h * w, 3, 1) * (n, 1, 3, 2) -> (n, h * w, 2)
grid = (base_grid.view(-1, 3, 1) * theta.mT.unsqueeze(1)).sum(-2)
return grid.view(n, h, w, 2)
def _affine_grid_generator_5d(theta: Tensor, size: list[int], align_corners: bool):
n, _, d, h, w = size
base_grid = _make_base_grid_5d(theta, d, h, w, align_corners=align_corners)
# base_grid shape is (d, h, w, 4) and theta shape is (n, 3, 4)
# We do manually a matrix multiplication which is faster than mm()
# (d * h * w, 4, 1) * (n, 1, 4, 3) -> (n, h * w, 3)
grid = (base_grid.view(-1, 4, 1) * theta.mT.unsqueeze(1)).sum(-2)
return grid.view(n, d, h, w, 3)
@register_decomposition(aten.affine_grid_generator)
@out_wrapper()
@pw_cast_for_opmath
def affine_grid_generator(theta: Tensor, size: list[int], align_corners: bool):
torch._check(
len(size) in (4, 5),
lambda: "affine_grid_generator needs 4d (spatial) or 5d (volumetric) inputs.",
)
if len(size) == 4:
return _affine_grid_generator_4d(theta, size, align_corners=align_corners)
else:
return _affine_grid_generator_5d(theta, size, align_corners=align_corners)
def _grid_sampler_2d(
a: Tensor,
grid: Tensor,
interpolation_mode: int = 0,
padding_mode: int = 0,
align_corners: bool = False,
_expand_grid: bool = True,
) -> Tensor:
# This method is a copy of grid_sampler_2d implementation and introduced with additional arg _expand_grid to
# optionally expand the input grid for performance reasons.
# Experimenting locally it was found that compiled CUDA code is accelerated by ~5x
# and CPU code by ~2x on bicubic mode, if we expand the grid from (N, H, W, 2) into (N, C, H, W, 2)
# However, this leads to a slowdown around ~0.8x on CPU bilinear mode, channels first.
# Thus we apply this hack to not expand the grid for this case.
torch._check(
interpolation_mode in (0, 1, 2),
lambda: f"Invalid interpolation mode {interpolation_mode}",
)
torch._check(
padding_mode in (0, 1, 2), lambda: f"Invalid padding mode {padding_mode}"
)
def unnormalize(coords: Tensor, size: int) -> Tensor:
# Rescale coordinates from [-1, 1] to:
# [0, size - 1] if align_corners is True
# [-.5, size -.5] if align_corners is False
mul = (size * 0.5 - 0.5) if align_corners else (size * 0.5)
ofs = size * 0.5 - 0.5
return coords * mul + ofs
# Reflects coordinates until they fall between low and high (inclusive).
# The bounds are passed as twice their value so that half-integer values
# can be represented as ints.
def reflect_coordinates(coords: Tensor, twice_low: int, twice_high: int) -> Tensor:
if twice_low == twice_high:
return torch.zeros_like(coords)
coords_min = twice_low / 2
coords_span = (twice_high - twice_low) / 2
coords2 = (coords - coords_min).abs()
extra = torch.fmod(coords2, coords_span)
flips = (coords2 / coords_span).floor().to(dtype=torch.int8)
return torch.where(
flips & 1 == 0, extra + coords_min, coords_span + coords_min - extra
)
def compute_coordinates(coords: Tensor, size: int) -> Tensor:
if padding_mode == 0: # Zero
return coords
elif padding_mode == 1: # Borders
return torch.clamp(coords, 0, size - 1)
else: # padding_mode == 2, Reflection
if align_corners:
coords_reflected = reflect_coordinates(coords, 0, 2 * (size - 1))
else:
coords_reflected = reflect_coordinates(coords, -1, 2 * size - 1)
return torch.clamp(coords_reflected, 0, size - 1)
def compute_source_index(coords: Tensor, size: int) -> Tensor:
coords_un = unnormalize(coords, size)
return compute_coordinates(coords_un, size)
N, C, iH, iW = a.shape
_, oH, oW, two = grid.shape
assert two == 2
if _expand_grid:
# Let's expand grid to [N, C, oH, oW, 2]
# This allows to generate a single triton cuda kernel instead of two kernels.
# Two kernels are due source indices, weights have shape (N, 1, oH, oW), xnumel=N*oH*oW
# and output has shape (N, C, oH, oW), xnumel=N*C*oH*oW
# Expanding grid to (N, C, oH, oW, two) unifies xnumel to N*C*oH*oW
grid = grid.view(N, 1, oH, oW, two).expand(N, C, oH, oW, 2)
def in_bounds_cond(xs: Tensor, ys: Tensor) -> Tensor:
return torch.logical_and(
0 <= xs, torch.logical_and(xs < iW, torch.logical_and(0 <= ys, ys < iH))
)
N_idx = torch.arange(N, device=a.device).view(N, 1, 1, 1)
C_idx = torch.arange(C, device=a.device).view(1, C, 1, 1)
def clip(xs: Tensor, ys: Tensor, ws: Tensor) -> TensorSequenceType:
cond = in_bounds_cond(xs, ys)
# To clip to inside valid coordinates, we map the coordinates
# to (x, y) = (0, 0) and also set the weight to 0
# We also change the shape of the tensor to the appropriate one for
# broadcasting with N_idx, C_idx for the purposes of advanced indexing
c = C if _expand_grid else 1
return tuple(
torch.where(cond, t, 0).view(N, c, oH, oW)
for t in (xs.to(dtype=torch.int64), ys.to(dtype=torch.int64), ws)
)
def get_summand(ix: Tensor, iy: Tensor, w) -> Tensor:
# Perform clipping, index into input tensor and multiply by weight
idx_x, idx_y, w_ = clip(ix, iy, w)
return a[N_idx, C_idx, idx_y, idx_x] * w_
x = grid[..., 0]
y = grid[..., 1]
if interpolation_mode == 0: # Bilinear
ix = compute_source_index(x, iW)
iy = compute_source_index(y, iH)
ix_nw, iy_nw = ix.floor(), iy.floor()
ix_ne, iy_ne = ix_nw + 1, iy_nw
ix_sw, iy_sw = ix_nw, iy_nw + 1
ix_se, iy_se = ix_ne, iy_sw
w_nw = (ix_se - ix) * (iy_se - iy)
w_ne = (ix - ix_sw) * (iy_sw - iy)
w_sw = (ix_ne - ix) * (iy - iy_ne)
w_se = (ix - ix_nw) * (iy - iy_nw)
return _sum_tensors(
get_summand(ix, iy, w)
for (ix, iy, w) in (
(ix_nw, iy_nw, w_nw),
(ix_ne, iy_ne, w_ne),
(ix_sw, iy_sw, w_sw),
(ix_se, iy_se, w_se),
)
)
elif interpolation_mode == 1: # Nearest
ix = compute_source_index(x, iW)
iy = compute_source_index(y, iH)
ix_nearest = ix.round()
iy_nearest = iy.round()
return get_summand(ix_nearest, iy_nearest, 1)
else: # interpolation_mode == 2, Bicubic
ix = unnormalize(x, iW)
iy = unnormalize(y, iH)
ix_nw = ix.floor()
iy_nw = iy.floor()
tx = ix - ix_nw
ty = iy - iy_nw
if not _expand_grid:
tx = tx.unsqueeze(1)
ty = ty.unsqueeze(1)
def get_value_bounded(ix: Tensor, iy: Tensor) -> Tensor:
x = compute_coordinates(ix, iW)
y = compute_coordinates(iy, iH)
return get_summand(x, y, 1)
def get_coeff(ofs: int) -> Tensor:
iy_ofs = iy_nw + (ofs - 1)
cs = (
get_value_bounded(ix_nw - 1, iy_ofs),
get_value_bounded(ix_nw, iy_ofs),
get_value_bounded(ix_nw + 1, iy_ofs),
get_value_bounded(ix_nw + 2, iy_ofs),
)
return _upsample_cubic_interp1d(cs, tx)
coeffs = tuple(get_coeff(ofs) for ofs in range(4))
return _upsample_cubic_interp1d(coeffs, ty)
@register_decomposition(aten.grid_sampler_2d)
@out_wrapper()
@pw_cast_for_opmath
def grid_sampler_2d(
a: Tensor,
grid: Tensor,
interpolation_mode: int = 0,
padding_mode: int = 0,
align_corners: bool = False,
) -> Tensor:
return _grid_sampler_2d(
a,
grid=grid,
interpolation_mode=interpolation_mode,
padding_mode=padding_mode,
align_corners=align_corners,
)
@register_decomposition(aten.mv)
@out_wrapper(exact_dtype=True)
@pw_cast_for_opmath
def mv(self, vec):
torch._check(
self.dim() == 2 and vec.dim() == 1,
lambda: f"matrix @ vector expected, got {self.dim()}, {vec.dim()}",
)
torch._check(
self.size(1) == vec.size(0),
lambda: f"size mismatch, got input ({self.size(0)}x{self.size(1)}), vec ({vec.size(0)})",
)
return (self * vec).sum(dim=1)
@register_decomposition(aten.binary_cross_entropy_with_logits)
@out_wrapper()
def binary_cross_entropy_with_logits(
self, target, weight=None, pos_weight=None, reduction=Reduction.MEAN.value
):
if pos_weight is not None:
log_weight = (pos_weight - 1) * target + 1
loss = (1 - target) * self - (log_weight * F.logsigmoid(self))
else:
loss = (1 - target) * self - F.logsigmoid(self)
if weight is not None:
loss = loss * weight
return apply_loss_reduction(loss, reduction)
def should_fold(tensor1: torch.Tensor, tensor2: torch.Tensor, is_out: bool) -> bool:
# For comments of the logic of this function see eager in /native/LinearAlgebra.cpp
t1, t2 = (tensor1, tensor2) if tensor1.ndim >= tensor2.ndim else (tensor2, tensor1)
from torch.fx.experimental.symbolic_shapes import guard_or_false
if not (t1.ndim >= 3 and t2.ndim <= 2):
return False
if t2.requires_grad and not is_out:
return True
if tensor1.ndim == 2:
return False
if guard_or_false(t1.numel() == 0):
return True
t1_shape = t1.shape
t1_stride = t1.stride()
# Check the contiguous, we can skip the dim with size of 1
# as aten: https://github.com/pytorch/pytorch/blob/e201460f8aa1510b4c4686627d57b69756c4b916/aten/src/ATen/TensorGeometry.cpp#L17
expected_stride = [1]
for size in reversed(t1_shape[1:]):
expected_stride.append(size * expected_stride[-1])
return all(
guard_or_false(size == 1) or guard_or_false(left == right)
for left, right, size in zip(
t1_stride, list(reversed(expected_stride)), t1_shape
)
)
@aten.matmul.default.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.matmul.out.py_impl(DispatchKey.CompositeImplicitAutograd)
@out_wrapper(pass_is_out=True)
def matmul(tensor1, tensor2, *, is_out=False):
from torch.fx.experimental.symbolic_shapes import guard_or_false, guard_or_true
dim_tensor1 = tensor1.dim()
dim_tensor2 = tensor2.dim()
assert dim_tensor1 != 0 and dim_tensor2 != 0
if dim_tensor1 == 1 and dim_tensor2 == 1:
return torch.dot(tensor1, tensor2)
elif dim_tensor1 == 2 and dim_tensor2 == 1:
return torch.mv(tensor1, tensor2)
elif dim_tensor1 == 1 and dim_tensor2 == 2:
return torch.squeeze(torch.mm(torch.unsqueeze(tensor1, 0), tensor2), 0)
elif dim_tensor1 == 2 and dim_tensor2 == 2:
return torch.mm(tensor1, tensor2)
elif should_fold(tensor1, tensor2, is_out):
# dim_tensor1 >=3 && (dim_tensor2 == 1 || dim_tensor2 == 2) ||
# dim_tensor2 >=3 && (dim_tensor1 == 1 || dim_tensor1 == 2)
# and some condition on the strides is fulfilled
# optimization: use mm instead of bmm by folding the batch of the larger tensor
# into its leading matrix dimension
transpose = dim_tensor2 > dim_tensor1
t1 = tensor2.mT if transpose else tensor1
t2 = (
tensor2 if not transpose else (tensor1.t() if dim_tensor1 == 2 else tensor1)
)
# Invariant: t1.dim() >= 3 && (t2.dim() == 1 || t2.dim() == 2)
# and t1 and t2 are matmul-compatible
# Why not t1.view(-1, sizes_1[-1])?
# If the last dim is 0, then view(-1, 0) won't work because the -1 becomes ambiguous.
# This can happen in e.g. [3, 5, 0] @ [0, 0].
sizes_1 = t1.shape
output_shape = list(sizes_1[:-1])
folded_dim1 = reduce(operator.mul, output_shape)
# Readjust output_shape if we are multiplying by a matrix
t2_is_matrix = t2.dim() == 2
if t2_is_matrix:
output_shape.append(t2.shape[1])
# This will almost always be a view.
# It may not be a view if t2->requires_grad(). See should_fold in aten/ for an explanation
t1_folded = t1.reshape(folded_dim1, sizes_1[-1])
if t2_is_matrix:
# This copies if we perform a 2D @ 3D and the first tensor requires_grad
# See should_fold native/LinearAlgebra.cpp for why.
output = torch.ops.aten._unsafe_view(t1_folded.mm(t2), output_shape)
return output.mT.contiguous() if transpose else output
else:
return torch.ops.aten._unsafe_view(t1_folded.mv(t2), output_shape)
elif dim_tensor1 >= 1 and dim_tensor2 >= 1:
# We are multiplying b1 x n x m1 by x2 x m2 x p (where b1 can be a list);
# we track m1 vs m2 separately even though they must match for nicer error messages
n = tensor1.size(-2) if dim_tensor1 > 1 else 1
m1 = tensor1.size(-1)
batch_tensor1 = tensor1.shape[:-2]
m2 = tensor2.size(-2) if dim_tensor2 > 1 else tensor2.size(-1)
p = tensor2.size(-1) if dim_tensor2 > 1 else 1
batch_tensor2: list[int] = []
# TODO: handling of slice
for i in range(dim_tensor2 - 2):
batch_tensor2.append(tensor2.size(i))
# Same optimization for the gradients as that in should_fold
# If we're going to broadcast, we force it to go through the should_fold branch
if (
dim_tensor1 == 3
and dim_tensor2 == 3
and guard_or_true(batch_tensor1[0] != batch_tensor2[0])
):
if guard_or_false(batch_tensor1[0] == 1) and tensor1.requires_grad:
return matmul(tensor1.squeeze(0), tensor2)
if guard_or_false(batch_tensor2[0] == 1) and tensor2.requires_grad:
return matmul(tensor1, tensor2.squeeze(0))
# expand the batch portion (i.e. cut off matrix dimensions and expand rest)
expand_batch_portion = list(
torch.broadcast_shapes(batch_tensor1, batch_tensor2)
)
tensor1_expand_size = expand_batch_portion + [n, m1]
expand_batch_product = prod(expand_batch_portion)
# HACK: We need reshape with symint support
tensor1_expanded = tensor1.expand(tensor1_expand_size).reshape(
expand_batch_product, n, m1
)
vector_rhs = dim_tensor2 == 1
if vector_rhs:
tensor2_expand_size = expand_batch_portion + [m2]
tensor2_expanded = (
tensor2.expand(tensor2_expand_size)
.reshape(expand_batch_product, m2)
.unsqueeze(2)
)
else:
tensor2_expand_size = expand_batch_portion + [m2, p]
tensor2_expanded = tensor2.expand(tensor2_expand_size).reshape(
expand_batch_product, m2, p
)
output_shape = expand_batch_portion
if dim_tensor1 > 1:
output_shape.append(n)
if dim_tensor2 > 1:
output_shape.append(p)
if vector_rhs:
return tensor1_expanded.bmm(tensor2_expanded).squeeze(-1).view(output_shape)
else:
return tensor1_expanded.bmm(tensor2_expanded).view(output_shape)
else:
torch._check(False, lambda: "both arguments to matmul need to be at least 1D")
@register_decomposition([aten.upsample_bicubic2d.default, aten.upsample_bicubic2d.out])
@aten.upsample_bicubic2d.default.py_impl(DispatchKey.Autograd)
@out_wrapper()
@pw_cast_for_opmath
def upsample_bicubic2d_default(
input: Tensor,
output_size: tuple[int, int],
align_corners: bool,
scale_h: Optional[float] = None,
scale_w: Optional[float] = None,
) -> Tensor:
# get dimensions of original image
_, _, in_h, in_w = input.shape
# Calculate horizontal and vertical scaling factor
h_scale_factor = _compute_scale(in_h, output_size[0], align_corners, scale_h)
w_scale_factor = _compute_scale(in_w, output_size[1], align_corners, scale_w)
_, dtype = utils.elementwise_dtypes(
input, type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
# We have to create arange with int64 dtype and use .to in order to avoid
# additional kernels creation in inductor and get a perf slowdown
i = torch.arange(output_size[0], device=input.device).to(dtype=dtype)
j = torch.arange(output_size[1], device=input.device).to(dtype=dtype)
x_float = _compute_source_index(w_scale_factor, j, align_corners)
y_float = _compute_source_index(h_scale_factor, i, align_corners)
y_float = y_float.unsqueeze(-1)
x = x_float.floor()
y = y_float.floor()
# We should also clamp xscale/yscale
# See guard_index_and_lambda in UpSample.h
yscale = (y_float - y).clamp(0.0, 1.0)
xscale = (x_float - x).clamp(0.0, 1.0)
x = x.to(torch.int64)
y = y.to(torch.int64)
iys_ofs = (y - 1, y, y + 1, y + 2)
ixs_ofs = (x - 1, x, x + 1, x + 2)
weights_x = _upsample_get_cubic_coefficients(xscale)
weights_y = _upsample_get_cubic_coefficients(yscale)
weights_precision_x, weights_precision_y = None, None
if input.dtype == torch.uint8:
weights_precision_x = _compute_weight_precision(weights_x)
weights_precision_y = _compute_weight_precision(weights_y)
weights_x = [
(w * (1 << weights_precision_x) + torch.sign(w) * 0.5).to(torch.int16)
for w in weights_x
]
weights_y = [
(w * (1 << weights_precision_y) + torch.sign(w) * 0.5).to(torch.int16)
for w in weights_y
]
def load_bounded(ys, xs):
y_idx = torch.clamp(ys, 0, in_h - 1)
x_idx = torch.clamp(xs, 0, in_w - 1)
v = aten._unsafe_index(input, [None, None, y_idx, x_idx])
return v
def get_x_interp(y):
src_x = tuple(load_bounded(y, x_ofs) for x_ofs in ixs_ofs)
if input.dtype == torch.uint8:
assert weights_precision_x is not None
return _sum_tensors_uint8(src_x, weights_x, weights_precision_x)
return _sum_tensors(c1 * c2 for (c1, c2) in zip(src_x, weights_x))
src_y = tuple(get_x_interp(y_ofs) for y_ofs in iys_ofs)
if input.dtype == torch.uint8:
assert weights_precision_y is not None
result = _sum_tensors_uint8(src_y, weights_y, weights_precision_y)
else:
result = _sum_tensors(c1 * c2 for (c1, c2) in zip(src_y, weights_y))
# convert output to correct memory format, if necessary
memory_format = utils.suggest_memory_format(input)
result = result.contiguous(memory_format=memory_format)
return result
@register_decomposition(aten.upsample_bicubic2d.vec)
@aten.upsample_bicubic2d.vec.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.upsample_bicubic2d.vec.py_impl(DispatchKey.Autograd)
@out_wrapper()
@pw_cast_for_opmath
def upsample_bicubic2d_vec(
a: Tensor,
output_size: Optional[tuple[int, int]],
align_corners: bool,
scale_factors: Optional[tuple[float, float]] = None,
) -> Tensor:
torch._check(
bool(output_size) + bool(scale_factors) == 1,
lambda: "Must specify exactly one of output_size and scale_factors.",
)
if output_size is None:
assert scale_factors is not None
output_size = cast(
tuple[int, int],
tuple(
sym_int(sym_float(w) * scale)
for w, scale in zip(a.shape[2:], scale_factors)
),
)
scale_h, scale_w = scale_factors if scale_factors else (None, None)
return upsample_bicubic2d_default(a, output_size, align_corners, scale_h, scale_w)
@register_decomposition(aten.reflection_pad1d)
@register_decomposition(aten.reflection_pad2d)
@register_decomposition(aten.reflection_pad3d)
@pw_cast_for_opmath
@out_wrapper()
def _reflection_pad(a: Tensor, padding: tuple[int, ...]) -> Tensor:
def idx(left, middle, right):
dim_idx = torch.arange(-left, middle + right, device=a.device)
return middle - 1 - (middle - 1 - dim_idx.abs()).abs()
return _reflection_or_replication_pad(
a,
padding,
idx,
)
@register_decomposition(aten.replication_pad1d)
@register_decomposition(aten.replication_pad2d)
@register_decomposition(aten.replication_pad3d)
@pw_cast_for_opmath
@out_wrapper()
def _replication_pad(a: Tensor, padding: tuple[int, ...]) -> Tensor:
def idx(left, middle, right):
dim_idx = torch.arange(-left, middle + right, device=a.device)
return torch.clamp(dim_idx, 0, middle - 1)
return _reflection_or_replication_pad(
a,
padding,
idx,
)
def _reflection_or_replication_pad(
a: Tensor,
padding: tuple[int, ...],
idx_fn: Callable[[int, int, int], Tensor],
) -> Tensor:
dim = len(padding) // 2
torch._check(
a.dim() in (dim + 1, dim + 2),
lambda: f"reflection_pad{dim}d requires {dim + 1}D or {dim + 2}D input",
)
inp_shape = a.shape[-dim:]
nc_dim = a.dim() - dim
padding_left = [padding[2 * (dim - 1 - i)] for i in range(dim)]
padding_right = [padding[2 * (dim - 1 - i) + 1] for i in range(dim)]
result = a
for i in range(dim):
idx: list[Any] = [None] * result.dim()
idx[i + nc_dim] = idx_fn(padding_left[i], inp_shape[i], padding_right[i])
result = aten._unsafe_index(result, idx)
# convert output to correct memory format, if necessary
memory_format = utils.suggest_memory_format(result)
result = result.contiguous(memory_format=memory_format)
return result
@register_decomposition(aten.reflection_pad1d_backward)
@register_decomposition(aten.reflection_pad2d_backward)
@register_decomposition(aten.reflection_pad3d_backward)
@out_wrapper("grad_input")
def _reflection_pad_backward(grad_output, x, padding):
dim = len(padding) // 2
dhw = [h - 1 for h in x.shape[-dim:]]
padding_left = [padding[2 * (dim - 1 - i)] for i in range(dim)]
padding_right = [padding[2 * (dim - 1 - i) + 1] for i in range(dim)]
indices = []
for i in range(x.ndim):
view_shape = [1] * x.ndim
view_shape[i] = -1
indices.append(torch.arange(x.shape[i], device=x.device).view(view_shape))
b = indices[:-dim]
xyz = indices[-dim:]
def index_range_condition(index_range):
i, lb, ub = index_range
return torch.logical_and(i >= lb, i <= ub)
# Areas after reflection:
#
# top-left | top | top-right
# -----------------------------------------
# left | center | right
# -----------------------------------------
# bottom-left | bottom | bottom-right
#
# The center area is the original matrix. Other areas are reflections.
center = [xyz[i] + padding_left[i] for i in range(dim)]
left_reflect = [padding_left[i] - xyz[i] for i in range(dim)]
right_reflect = [2 * dhw[i] + padding_left[i] - xyz[i] for i in range(dim)]
# Accumulate gradients from different areas
# If some of the padding is negative, center load is not always valid
range_c = [
(center[i], 0, dhw[i] + padding_left[i] + padding_right[i]) for i in range(dim)
]
cond = functools.reduce(
aten.logical_and, [index_range_condition(range_c[i]) for i in range(dim)]
)
grad = aten._unsafe_masked_index(grad_output, cond, b + center, 0.0)
def accumulate(grad, out, index_ranges):
# If the upper bound is less than the lower bound, we can get rid of one accumulation.
# This happens when the padding size is zero.
for i in range(dim):
upper_less_than_lower = index_ranges[i][2] < index_ranges[i][1]
if isinstance(upper_less_than_lower, bool) and upper_less_than_lower:
return grad
cond = functools.reduce(
aten.logical_and,
[index_range_condition(index_range) for index_range in index_ranges],
)
g = aten._unsafe_masked_index(grad_output, cond, b + out, 0.0)
return grad + g
for area in itertools.product(*[[-1, 0, 1] for _ in range(dim)]):
if area == tuple([0] * dim):
# center, this is already done.
continue
outs = []
index_ranges = []
for i in range(dim):
if area[i] == 0:
out = center[i]
index_range = range_c[i]
elif area[i] == -1:
out = left_reflect[i]
index_range = (xyz[i], 1, padding_left[i])
elif area[i] == 1:
out = right_reflect[i]
index_range = (xyz[i], dhw[i] - padding_right[i], dhw[i] - 1)
outs.append(out) # type: ignore[possibly-undefined]
index_ranges.append(index_range) # type: ignore[possibly-undefined]
grad = accumulate(grad, outs, index_ranges)
return grad
@register_decomposition(aten.aminmax)
@out_wrapper("min", "max")
def aminmax(self, *, dim=None, keepdim=False):
# pyrefly: ignore [bad-argument-type]
amin = torch.amin(self, dim=dim, keepdim=keepdim)
# pyrefly: ignore [bad-argument-type]
amax = torch.amax(self, dim=dim, keepdim=keepdim)
return amin, amax
@register_decomposition(aten.nansum)
@out_wrapper()
def nansum(self, dim=None, keepdim=False, *, dtype=None):
return aten.sum(torch.where(torch.isnan(self), 0, self), dim, keepdim, dtype=dtype)
@register_decomposition([aten.arange.default, aten.arange.out])
@out_wrapper()
def arange_default(
end: NumberType,
*,
dtype: Optional[torch.dtype] = None,
layout: torch.layout = torch.strided,
device: Optional[torch.device] = None,
pin_memory: bool = False,
):
return aten.arange.start_step(
0, end, 1, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
)
@register_decomposition([aten.arange.start])
def arange_start(
start: NumberType,
end: NumberType,
*,
dtype: Optional[torch.dtype] = None,
layout: torch.layout = torch.strided,
device: Optional[torch.device] = None,
pin_memory: bool = False,
):
return aten.arange.start_step(
start, end, 1, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
)
@register_decomposition(out_dtype)
def out_dtype_decomp(*args, **kwargs):
from torch._higher_order_ops.out_dtype import out_dtype_dense
return out_dtype_dense(*args, **kwargs)
@register_decomposition(aten.multi_margin_loss)
@aten.multi_margin_loss.default.py_impl(DispatchKey.Autograd)
@out_wrapper()
def multi_margin_loss(
input: Tensor,
target: Tensor,
p: NumberType = 1,
margin: NumberType = 1,
weight: Optional[Tensor] = None,
reduction: int = Reduction.MEAN.value,
) -> Tensor:
input = torch.atleast_2d(input)
target = torch.atleast_1d(target)
nframe = input.shape[0]
dim = input.shape[1]
torch._check(p == 1 or p == 2, lambda: "only p == 1 and p == 2 supported")
torch._check(
input.ndim == 2 and dim != 0,
lambda: f"Expected non-empty vector or matrix with optional 0-dim batch size, but got: {input.shape}",
)
torch._check(
target.ndim == 1 and target.numel() == nframe,
lambda: f"inconsistent target size, expected {nframe} but got {target.shape}",
)
if weight is not None:
weight = torch.atleast_1d(weight)
torch._check(
weight.ndim == 1 and weight.numel() == dim, # type: ignore[union-attr]
lambda: f"inconsistent weight size, expected {dim} but got {weight.shape}", # type: ignore[union-attr]
)
target = target.unsqueeze(1)
u = torch.gather(input, dim=1, index=target)
z = margin - u + input
z = z.clamp_min(0)
z = z if p == 1 else z * z
if weight is not None:
z = z * weight[target]
idx = torch.arange(dim, device=input.device)
z = torch.where(idx != target, z, 0)
if reduction == Reduction.MEAN.value:
return z.mean()
elif reduction == Reduction.SUM.value:
return z.sum() / z.shape[1]
else:
return z.mean(dim=1)
@register_decomposition(aten.multilabel_margin_loss_forward)
@aten.multilabel_margin_loss_forward.default.py_impl(DispatchKey.Autograd)
@out_wrapper("output", "is_target")
def multilabel_margin_loss_forward(
input: Tensor,
target: Tensor,
reduction: int,
) -> tuple[Tensor, Tensor]:
orig_input_shape = input.shape
orig_target_shape = target.shape
input = torch.atleast_2d(input)
target = torch.atleast_2d(target)
dim = input.shape[1]
torch._check(
len(orig_input_shape) <= 2 and dim != 0,
lambda: f"Expected non-empty vector or matrix with optional 0-dim batch size, but got: {orig_input_shape}",
)
torch._check(
len(orig_target_shape) <= 2 and orig_target_shape == orig_input_shape,
lambda: f"inconsistent target size: {orig_target_shape} for input of size: {orig_input_shape}",
)
# ignores labels after the first -1, detects when -1 is not present
idx = torch.arange(dim, device=target.device)
is_end = target == -1
end_idx = torch.amin(torch.where(is_end, idx, dim), dim=-1, keepdim=True)
# target indices
target_mask = idx < end_idx
# masks target to be able to use gather, which doesn't allow -1
tidx0 = torch.where(target_mask, target, 0)
u = torch.gather(input, dim=-1, index=tidx0)
# is_target
tidx1 = torch.where(target_mask, target, -1)
is_target = torch.any(idx == tidx1.unsqueeze(dim=-1), dim=1)
# loss
z = 1.0 - u.T.unsqueeze(dim=-1) + input
z = z.clamp_min(0)
z = z / dim
# masks loss
z = torch.where(is_target, 0, z)
# reduction
if reduction == Reduction.MEAN.value:
z = z.sum(dim=(0, -1)).mean()
elif reduction == Reduction.SUM.value:
z = z.sum()
else:
z = z.sum(dim=(0, -1))
# result
is_target = is_target.to(input.dtype).reshape(orig_target_shape)
return z, is_target
# scaled_dot_product_attention used to be decomposed in pre-autograd, given that
# it calls _scaled_dot_product_attention_math and
# _scaled_dot_product_attention_math only has a CompositeImplicitAutograd
# kernel. As a result it's decomposed into ops with finer granularity.
# However recent PRs (#103826 #105131 #115913) added new logic in
# scaled_dot_product_attention and now it calls
# _scaled_dot_product_flash_attention_for_cpu in export path. This results
# in _scaled_dot_product_flash_attention_for_cpu showing up in export result.
# This decomposition ensures scaled_dot_product_attention is still decomposed
# the same way as before, i.e., going through
# _scaled_dot_product_attention_math. Notice that this decomp rule should be
# excluded by inductor.
@register_decomposition(aten._scaled_dot_product_flash_attention_for_cpu.default)
def scaled_dot_product_flash_attention_for_cpu(
query: Tensor,
key: Tensor,
value: Tensor,
dropout_p: float = 0.0,
is_causal: bool = False,
*,
attn_mask: Optional[Tensor] = None,
scale: Optional[float] = None,
) -> tuple[Tensor, Tensor]:
torch._check(
torch.is_floating_point(query),
lambda: f"query must be FP32, FP64, BF16, FP16 but got {query.dtype}",
)
torch._check(
query.dim() == 4 and key.dim() == 4 and value.dim() == 4,
lambda: f"q, k, v must be a 4 dimensional tensor, got {query.dim()}, {key.dim()}, {value.dim()}",
)
torch._check(
dropout_p == 0.0, lambda: f"dropout probability must be zero, got {dropout_p}"
)
torch._check(
query.shape[3] == value.shape[3] and key.shape[3] == value.shape[3],
lambda: "q, k, v should have the same head size",
)
output, attn = aten._scaled_dot_product_attention_math.default(
query,
key,
value,
attn_mask=attn_mask,
dropout_p=dropout_p,
is_causal=is_causal,
dropout_mask=None,
scale=scale,
enable_gqa=query.size(1) != key.size(1),
)
# Why this change?
# In pre-dispatch export scaled_dot_product_attention is executed via
# * flash_attention.
# flash_attention allocates output tensor as (N, H, L, E) (see PR #134656)
# assume x: [N, H, L, E] is the output sdpa
# In MHA code, this output is then permuted via (2, 0, 1, 3) to get
# (L, N, H, E) dim tensor
# x = x.permute(2, 0, 1, 3).contiguous() and the viewed via
# x = x.view(L * N, H * E)
# During pre autograd dispatch call to contiguous is not traced because
# flash_attention output after the x.permute is already contiguous
# on which the view is valid
# However, during 2nd stage export, post-dispatch, we run _match variant
# instead of flash* to get the decomposition. _match variant returns
# x: [N, H, L, E] applying x.permute(2, 0, 1, 3) returns
# x: [L, N, H, E] and without converting this to contiguous tensor
# subsequent view is not valid and the export fails
# solution is to maintain the return tensor view from the decomp to be
# exactly same as *flash* variant.
# Really the invariant you want to maintain is:
# pre-dispatch op-output and its decomposed representation must
# return tensor with same view and dims
output = (
output.permute(2, 0, 1, 3)
.contiguous(memory_format=torch.contiguous_format)
.permute(1, 2, 0, 3)
)
return output, attn
def register_inplace(aten_op, outplace_op):
@register_decomposition(aten_op)
def inplace_op(*args, **kwargs):
out = outplace_op(*args, **kwargs)
return args[0].copy_(out)
return inplace_op
@register_decomposition([aten.baddbmm])
@out_wrapper(exact_dtype=True)
@pw_cast_for_opmath
def baddbmm(self, batch1, batch2, beta=1, alpha=1):
if not self.is_floating_point() and not self.is_complex():
beta = int(beta)
alpha = int(alpha)
result = torch.bmm(batch1, batch2)
if not isinstance(alpha, numbers.Number) or alpha != 1:
# pyrefly: ignore [unsupported-operation]
result = result * alpha
if beta == 0:
return result
if not isinstance(beta, numbers.Number) or beta != 1:
self = self * beta
return self + result
@register_decomposition(aten.floor_divide)
@out_wrapper()
def floor_divide(self, other):
return torch.div(self, other, rounding_mode="floor")
@register_decomposition(aten.sym_numel)
def sym_numel(t):
return functools.reduce(operator.mul, t.shape, 1)
@register_decomposition([aten.sum.default, aten.sum.out])
def sum_default(
self: Tensor,
*,
dtype: Optional[torch.dtype] = None,
out: Optional[Tensor] = None,
) -> Tensor:
if out is None:
return aten.sum.dim_IntList(self, [], dtype=dtype)
else:
return aten.sum.IntList_out(self, [], dtype=dtype, out=out)
@register_decomposition([aten.squeeze.default, aten.squeeze.dim])
def squeeze_default(self: Tensor, dim: Optional[int] = None):
# handle a scalar directly
if not isinstance(self, torch.Tensor):
return self
# perform squeeze
if dim is None:
return aten.squeeze.dims(self, list(range(self.dim())))
else:
return aten.squeeze.dims(self, [dim])
@register_decomposition(torch.ops.aten._weight_norm_interface)
def _weight_norm_interface(v, g, dim=0):
# https://github.com/pytorch/pytorch/blob/852f8526c52190125446adc9a6ecbcc28fb66182/aten/src/ATen/native/WeightNorm.cpp#L58
keep_dim = tuple(i for i in range(len(v.shape)) if i != dim)
# align with cuda behavior, keep norm in 'float' when g is 'bfloat16'
norm_dtype = torch.float if g.dtype == torch.bfloat16 else None
norm = v.norm(2, keep_dim, keepdim=True, dtype=norm_dtype)
return v * (g / norm.to(g.dtype)), norm
@register_decomposition(aten.isin)
@out_wrapper()
def isin(elements, test_elements, *, assume_unique=False, invert=False):
# handle when either elements or test_elements are Scalars (they can't both be)
if not isinstance(elements, torch.Tensor):
elements = torch.tensor(elements, device=test_elements.device)
if not isinstance(test_elements, torch.Tensor):
if invert:
return torch.ne(elements, test_elements)
else:
return torch.eq(elements, test_elements)
if test_elements.numel() < 10.0 * pow(elements.numel(), 0.145):
return isin_default(elements, test_elements, invert=invert)
else:
return isin_sorting(
elements, test_elements, assume_unique=assume_unique, invert=invert
)
@register_decomposition(aten.bernoulli.default)
def bernoulli(
self: torch.Tensor,
*,
generator: Optional[torch.Generator] = None,
) -> torch.Tensor:
if generator is None:
raw_p = torch.rand(self.size(), dtype=torch.float32, device=self.device)
else:
raw_p = torch.rand(
self.size(),
generator=generator,
dtype=torch.float32,
device=self.device,
)
p = (raw_p < self).to(self.dtype)
return p
def isin_default(elements, test_elements, *, invert=False):
if elements.numel() == 0:
return torch.empty_like(elements, dtype=torch.bool)
expanded_elem_shape = elements.shape + (1,) * test_elements.ndim
x = elements.view(expanded_elem_shape)
dim = tuple(range(-1, -test_elements.ndim - 1, -1))
res = (x == test_elements).any(dim=dim)
return ~res if invert else res
def isin_sorting(elements, test_elements, *, assume_unique=False, invert=False):
elements_flat = elements.flatten()
test_elements_flat = test_elements.flatten()
if assume_unique:
# This is the same as the aten implementation. For
# assume_unique=False, we cannot use unique() here, so we use a
# version with searchsorted instead.
all_elements = torch.cat([elements_flat, test_elements_flat])
sorted_elements, sorted_order = torch.sort(all_elements, stable=True)
duplicate_mask = sorted_elements[1:] == sorted_elements[:-1]
duplicate_mask = torch.constant_pad_nd(duplicate_mask, [0, 1], False)
if invert:
duplicate_mask = duplicate_mask.logical_not()
mask = torch.empty_like(duplicate_mask)
mask = mask.index_copy(0, sorted_order, duplicate_mask)
return mask[0 : elements.numel()]
else:
sorted_test_elements, _ = torch.sort(test_elements_flat)
idx = torch.searchsorted(sorted_test_elements, elements_flat)
test_idx = torch.where(idx < sorted_test_elements.numel(), idx, 0)
cmp = sorted_test_elements[test_idx] == elements_flat
cmp = cmp.logical_not() if invert else cmp
return cmp.reshape(elements.shape)
@register_decomposition(aten.take)
@out_wrapper()
def take(self, index):
flattened = self.reshape(-1)
return flattened[index]
@register_decomposition(aten.resize_as)
def resize_as(self, other, memory_format=None):
if memory_format is None:
memory_format = torch.contiguous_format
if memory_format == torch.preserve_format:
memory_format = suggest_memory_format(other)
return aten.resize(self, other.shape, memory_format=memory_format)
register_inplace(aten.addbmm_, aten.addbmm)
register_inplace(aten.addmm_, aten.addmm)
register_inplace(aten.addmv_, aten.addmv)
register_inplace(aten.baddbmm_, aten.baddbmm)
register_inplace(aten.fill_, aten.fill)
register_inplace(aten.gelu_, aten.gelu)
register_inplace(aten.hardswish_, aten.hardswish)
register_inplace(aten.hardtanh_, aten.hardtanh)
register_inplace(aten.hardsigmoid_, aten.hardsigmoid)
register_inplace(aten.__iand__, aten.__and__)
register_inplace(aten.__ilshift__, aten.__lshift__)
register_inplace(aten.index_put_, aten.index_put)
register_inplace(aten.index_reduce_, aten.index_reduce)
register_inplace(aten.__ior__, aten.__or__)
register_inplace(aten.__irshift__, aten.__rshift__)
register_inplace(aten.__ixor__, aten.__xor__)
register_inplace(aten.leaky_relu_, aten.leaky_relu)
register_inplace(aten.logit_, aten.logit)
register_inplace(aten.relu_, aten.relu)
register_inplace(aten.renorm_, aten.renorm)
register_inplace(aten.round_, aten.round)
register_inplace(aten.scatter_, aten.scatter)
register_inplace(aten.scatter_add_, aten.scatter_add)
register_inplace(aten.scatter_reduce_, aten.scatter_reduce)
register_inplace(aten.silu_, aten.silu)
| Reduction |
python | python__mypy | mypy/fastparse.py | {
"start": 73215,
"end": 84921
} | class ____:
def __init__(
self,
errors: Errors | None,
line: int = -1,
override_column: int = -1,
is_evaluated: bool = True,
) -> None:
self.errors = errors
self.line = line
self.override_column = override_column
self.node_stack: list[AST] = []
self.is_evaluated = is_evaluated
def convert_column(self, column: int) -> int:
"""Apply column override if defined; otherwise return column.
Column numbers are sometimes incorrect in the AST and the column
override can be used to work around that.
"""
if self.override_column < 0:
return column
else:
return self.override_column
def invalid_type(self, node: AST, note: str | None = None) -> RawExpressionType:
"""Constructs a type representing some expression that normally forms an invalid type.
For example, if we see a type hint that says "3 + 4", we would transform that
expression into a RawExpressionType.
The semantic analysis layer will report an "Invalid type" error when it
encounters this type, along with the given note if one is provided.
See RawExpressionType's docstring for more details on how it's used.
"""
return RawExpressionType(
None, "typing.Any", line=self.line, column=getattr(node, "col_offset", -1), note=note
)
@overload
def visit(self, node: ast3.expr) -> ProperType: ...
@overload
def visit(self, node: AST | None) -> ProperType | None: ...
def visit(self, node: AST | None) -> ProperType | None:
"""Modified visit -- keep track of the stack of nodes"""
if node is None:
return None
self.node_stack.append(node)
try:
method = "visit_" + node.__class__.__name__
visitor = getattr(self, method, None)
if visitor is not None:
typ = visitor(node)
assert isinstance(typ, ProperType)
return typ
else:
return self.invalid_type(node)
finally:
self.node_stack.pop()
def parent(self) -> AST | None:
"""Return the AST node above the one we are processing"""
if len(self.node_stack) < 2:
return None
return self.node_stack[-2]
def fail(self, msg: ErrorMessage, line: int, column: int) -> None:
if self.errors:
self.errors.report(line, column, msg.value, blocker=True, code=msg.code)
def note(self, msg: str, line: int, column: int) -> None:
if self.errors:
self.errors.report(line, column, msg, severity="note", code=codes.SYNTAX)
def translate_expr_list(self, l: Sequence[ast3.expr]) -> list[Type]:
return [self.visit(e) for e in l]
def visit_Call(self, e: Call) -> Type:
# Parse the arg constructor
f = e.func
constructor = stringify_name(f)
if not isinstance(self.parent(), ast3.List):
note = None
if constructor:
note = "Suggestion: use {0}[...] instead of {0}(...)".format(constructor)
return self.invalid_type(e, note=note)
if not constructor:
self.fail(message_registry.ARG_CONSTRUCTOR_NAME_EXPECTED, e.lineno, e.col_offset)
name: str | None = None
default_type = AnyType(TypeOfAny.special_form)
typ: Type = default_type
for i, arg in enumerate(e.args):
if i == 0:
converted = self.visit(arg)
assert converted is not None
typ = converted
elif i == 1:
name = self._extract_argument_name(arg)
else:
self.fail(message_registry.ARG_CONSTRUCTOR_TOO_MANY_ARGS, f.lineno, f.col_offset)
for k in e.keywords:
value = k.value
if k.arg == "name":
if name is not None:
self.fail(
message_registry.MULTIPLE_VALUES_FOR_NAME_KWARG.format(constructor),
f.lineno,
f.col_offset,
)
name = self._extract_argument_name(value)
elif k.arg == "type":
if typ is not default_type:
self.fail(
message_registry.MULTIPLE_VALUES_FOR_TYPE_KWARG.format(constructor),
f.lineno,
f.col_offset,
)
converted = self.visit(value)
assert converted is not None
typ = converted
else:
self.fail(
message_registry.ARG_CONSTRUCTOR_UNEXPECTED_ARG.format(k.arg),
value.lineno,
value.col_offset,
)
return CallableArgument(typ, name, constructor, e.lineno, e.col_offset)
def translate_argument_list(self, l: Sequence[ast3.expr]) -> TypeList:
return TypeList([self.visit(e) for e in l], line=self.line)
def _extract_argument_name(self, n: ast3.expr) -> str | None:
if isinstance(n, ast3.Constant) and isinstance(n.value, str):
return n.value.strip()
elif isinstance(n, ast3.Constant) and n.value is None:
return None
self.fail(
message_registry.ARG_NAME_EXPECTED_STRING_LITERAL.format(type(n).__name__),
self.line,
0,
)
return None
def visit_Name(self, n: Name) -> Type:
return UnboundType(n.id, line=self.line, column=self.convert_column(n.col_offset))
def visit_BinOp(self, n: ast3.BinOp) -> Type:
if not isinstance(n.op, ast3.BitOr):
return self.invalid_type(n)
left = self.visit(n.left)
right = self.visit(n.right)
return UnionType(
[left, right],
line=self.line,
column=self.convert_column(n.col_offset),
is_evaluated=self.is_evaluated,
uses_pep604_syntax=True,
)
def visit_Constant(self, n: ast3.Constant) -> Type:
val = n.value
if val is None:
# None is a type.
return UnboundType("None", line=self.line)
if isinstance(val, str):
# Parse forward reference.
return parse_type_string(val, "builtins.str", self.line, n.col_offset)
if val is Ellipsis:
# '...' is valid in some types.
return EllipsisType(line=self.line)
if isinstance(val, bool):
# Special case for True/False.
return RawExpressionType(val, "builtins.bool", line=self.line)
if isinstance(val, (int, float, complex)):
return self.numeric_type(val, n)
if isinstance(val, bytes):
contents = bytes_to_human_readable_repr(val)
return RawExpressionType(contents, "builtins.bytes", self.line, column=n.col_offset)
# Everything else is invalid.
# UnaryOp(op, operand)
def visit_UnaryOp(self, n: UnaryOp) -> Type:
# We support specifically Literal[-4], Literal[+4], and nothing else.
# For example, Literal[~6] or Literal[not False] is not supported.
typ = self.visit(n.operand)
if (
isinstance(typ, RawExpressionType)
# Use type() because we do not want to allow bools.
and type(typ.literal_value) is int
):
if isinstance(n.op, USub):
typ.literal_value *= -1
return typ
if isinstance(n.op, UAdd):
return typ
return self.invalid_type(n)
def numeric_type(self, value: object, n: AST) -> Type:
# The node's field has the type complex, but complex isn't *really*
# a parent of int and float, and this causes isinstance below
# to think that the complex branch is always picked. Avoid
# this by throwing away the type.
if isinstance(value, int):
numeric_value: int | None = value
type_name = "builtins.int"
else:
# Other kinds of numbers (floats, complex) are not valid parameters for
# RawExpressionType so we just pass in 'None' for now. We'll report the
# appropriate error at a later stage.
numeric_value = None
type_name = f"builtins.{type(value).__name__}"
return RawExpressionType(
numeric_value, type_name, line=self.line, column=getattr(n, "col_offset", -1)
)
def visit_Slice(self, n: ast3.Slice) -> Type:
return self.invalid_type(n, note="did you mean to use ',' instead of ':' ?")
# Subscript(expr value, expr slice, expr_context ctx)
def visit_Subscript(self, n: ast3.Subscript) -> Type:
empty_tuple_index = False
if isinstance(n.slice, ast3.Tuple):
params = self.translate_expr_list(n.slice.elts)
if len(n.slice.elts) == 0:
empty_tuple_index = True
else:
params = [self.visit(n.slice)]
value = self.visit(n.value)
if isinstance(value, UnboundType) and not value.args:
result = UnboundType(
value.name,
params,
line=self.line,
column=value.column,
empty_tuple_index=empty_tuple_index,
)
result.end_column = getattr(n, "end_col_offset", None)
result.end_line = getattr(n, "end_lineno", None)
return result
else:
return self.invalid_type(n)
def visit_Tuple(self, n: ast3.Tuple) -> Type:
return TupleType(
self.translate_expr_list(n.elts),
_dummy_fallback,
implicit=True,
line=self.line,
column=self.convert_column(n.col_offset),
)
def visit_Dict(self, n: ast3.Dict) -> Type:
if not n.keys:
return self.invalid_type(n)
items: dict[str, Type] = {}
extra_items_from = []
for item_name, value in zip(n.keys, n.values):
if not isinstance(item_name, ast3.Constant) or not isinstance(item_name.value, str):
if item_name is None:
extra_items_from.append(self.visit(value))
continue
return self.invalid_type(n)
items[item_name.value] = self.visit(value)
result = TypedDictType(items, set(), set(), _dummy_fallback, n.lineno, n.col_offset)
result.extra_items_from = extra_items_from
return result
# Attribute(expr value, identifier attr, expr_context ctx)
def visit_Attribute(self, n: Attribute) -> Type:
before_dot = self.visit(n.value)
if isinstance(before_dot, UnboundType) and not before_dot.args:
return UnboundType(f"{before_dot.name}.{n.attr}", line=self.line, column=n.col_offset)
else:
return self.invalid_type(n)
# Used for Callable[[X *Ys, Z], R] etc.
def visit_Starred(self, n: ast3.Starred) -> Type:
return UnpackType(self.visit(n.value), from_star_syntax=True)
# List(expr* elts, expr_context ctx)
def visit_List(self, n: ast3.List) -> Type:
assert isinstance(n.ctx, ast3.Load)
result = self.translate_argument_list(n.elts)
return result
def stringify_name(n: AST) -> str | None:
if isinstance(n, Name):
return n.id
elif isinstance(n, Attribute):
sv = stringify_name(n.value)
if sv is not None:
return f"{sv}.{n.attr}"
return None # Can't do it.
| TypeConverter |
python | tensorflow__tensorflow | tensorflow/python/ops/numpy_ops/integration_test/benchmarks/micro_benchmarks.py | {
"start": 1502,
"end": 5477
} | class ____(tf.test.Benchmark):
"""Main micro benchmark class."""
def _benchmark_and_report(
self,
name,
fn,
repeat=None,
number=None):
"""Run fn repeat * number times, report time, and return fastest time."""
# Can't make these default above since the flags may not have been parsed
# at module import time.
repeat = repeat or int(FLAGS.repeat)
number = number or int(FLAGS.number)
# Warmup
fn()
times = []
for _ in range(repeat):
gc.disable()
start = time.time()
for _ in range(number):
fn()
times.append(time.time() - start)
gc.enable()
gc.collect()
# Regular benchmark to report numbers.
fastest_time_us = min(times) * 1e6 / number
total_time = sum(times)
self.report_benchmark(name=name,
wall_time=total_time,
extras={'fastest_time_us': fastest_time_us})
return fastest_time_us
def benchmark_tf_np_mlp_inference_batch_1_cpu(self):
with tf.device('/CPU:0'):
model = tf_numpy_mlp.MLP()
x = tfnp.ones(shape=(1, 10)).astype(np.float32)
self._benchmark_and_report(self._get_name(), lambda: model.inference(x))
def benchmark_tf_np_tf_function_mlp_inference_batch_1_cpu(self):
with tf.device('/CPU:0'):
model = tf_numpy_mlp.MLP()
x = tfnp.ones(shape=(1, 10)).astype(np.float32)
self._benchmark_and_report(
self._get_name(), tf.function(lambda: model.inference(x)))
def benchmark_numpy_mlp_inference_batch_1_cpu(self):
model = numpy_mlp.MLP()
x = np.random.uniform(size=(1, 10)).astype(np.float32, copy=False)
self._benchmark_and_report(self._get_name(), lambda: model.inference(x))
def _benchmark_np_and_tf_np(self, name, op, args, repeat=None): # pylint: disable=redefined-builtin
fn = getattr(np, op)
assert fn is not None
np_time = self._benchmark_and_report(
'{}_numpy'.format(name), lambda: fn(*args), repeat=repeat)
fn = getattr(tfnp, op)
assert fn is not None
with tf.device('CPU:0'):
tf_time = self._benchmark_and_report(
'{}_tfnp_cpu'.format(name), lambda: fn(*args), repeat=repeat)
return np_time, tf_time
def _print_times(self, op, sizes, times):
# For easy reporting.
print('For np.{}:'.format(op))
print('{:<15} {:>11} {:>11}'.format('Size', 'NP time', 'TF NP Time'))
for size, (np_time, tf_time) in zip(sizes, times):
print('{:<15} {:>10.5}us {:>10.5}us'.format(
str(size), np_time, tf_time))
print()
def _benchmark_np_and_tf_np_unary(self, op):
sizes = [(100,), (10000,), (1000000,)]
repeats = [FLAGS.repeat] * 2 + [10]
times = []
for size, repeat in zip(sizes, repeats):
x = np.random.uniform(size=size).astype(np.float32, copy=False)
name = '{}_{}'.format(self._get_name(), size)
times.append(self._benchmark_np_and_tf_np(name, op, (x,), repeat))
self._print_times(op, sizes, times)
def benchmark_count_nonzero(self):
self._benchmark_np_and_tf_np_unary('count_nonzero')
def benchmark_log(self):
self._benchmark_np_and_tf_np_unary('log')
def benchmark_exp(self):
self._benchmark_np_and_tf_np_unary('exp')
def benchmark_tanh(self):
self._benchmark_np_and_tf_np_unary('tanh')
def benchmark_matmul(self):
sizes = [(2, 2), (10, 10), (100, 100), (200, 200), (1000, 1000)]
# Override repeat flag since this can be very slow.
repeats = [FLAGS.repeat] * 3 + [50, 10]
times = []
for size, repeat in zip(sizes, repeats):
x = np.random.uniform(size=size).astype(np.float32, copy=False)
name = '{}_{}'.format(self._get_name(), size)
times.append(
self._benchmark_np_and_tf_np(name, 'matmul', (x, x), repeat=repeat))
self._print_times('matmul', sizes, times)
if __name__ == '__main__':
logging.set_verbosity(logging.WARNING)
tf.enable_v2_behavior()
tf.test.main()
| MicroBenchmarks |
python | ray-project__ray | release/ray_release/test.py | {
"start": 1730,
"end": 1946
} | class ____(enum.Enum):
"""
Overall state of the test
"""
JAILED = "jailed"
FAILING = "failing"
FLAKY = "flaky"
CONSITENTLY_FAILING = "consistently_failing"
PASSING = "passing"
| TestState |
python | encode__django-rest-framework | tests/authentication/migrations/0001_initial.py | {
"start": 76,
"end": 563
} | class ____(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CustomToken',
fields=[
('key', models.CharField(max_length=40, primary_key=True, serialize=False)),
('user', models.OneToOneField(on_delete=models.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| Migration |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_length/invalid_length_returned.py | {
"start": 204,
"end": 306
} | class ____:
"""__len__ returns <type 'int'>"""
def __len__(self):
return 0
| FirstGoodLen |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_hyperlink52.py | {
"start": 304,
"end": 857
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("hyperlink52.xlsx")
def test_create_file(self):
"""
Test the creation of a simple XlsxWriter file with hyperlinks.
"""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write_url(0, 0, "dynamicsnav://www.example.com")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pydantic__pydantic | tests/benchmarks/test_discriminated_unions.py | {
"start": 239,
"end": 324
} | class ____(BaseModel):
state_type: Literal['loop']
substate: AnyState
| LoopState |
python | allegroai__clearml | examples/hyperdatasets/create_qa_entries.py | {
"start": 1574,
"end": 12048
} | class ____(DataEntry):
def __init__(
self,
question: str,
answer: str,
*,
reference: Optional[str] = None,
tags: Optional[Iterable[str]] = None,
):
metadata = {
"question": question,
"answer": answer,
}
if reference:
metadata["reference"] = reference
if tags:
metadata["tags"] = list(tags)
super().__init__(metadata=metadata)
@property
def question(self) -> str:
return self._metadata.get("question", "")
@property
def answer(self) -> str:
return self._metadata.get("answer", "")
def parse_args() -> argparse.Namespace:
description = textwrap.dedent(__doc__ or "").strip()
parser = argparse.ArgumentParser(
description=description or None,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--project", default="HyperDatasets Examples", help="ClearML project name")
parser.add_argument("--dataset-name", required=True, help="HyperDataset collection name")
parser.add_argument("--version-name", required=True, help="HyperDataset version name")
parser.add_argument("--description", default="Q&A demo HyperDataset", help="Version description")
parser.add_argument(
"--qa-json",
type=Path,
help="Path to a JSON file containing a list of {question, answer, reference?, tags?}",
)
parser.add_argument("--embed", action="store_true", help="Generate vector embeddings for each Q&A pair")
parser.add_argument(
"--embedding-model",
default="sentence-transformers/all-MiniLM-L6-v2",
help="SentenceTransformer model name or local path used when --embed is set",
)
parser.add_argument(
"--embedding-device",
default=None,
help="Optional device string passed to SentenceTransformer (e.g. 'cpu', 'cuda')",
)
parser.add_argument(
"--vector-field",
help="Metadata field used to store the embedding vector",
)
parser.add_argument(
"--normalize",
action="store_true",
help="L2-normalize the generated embeddings before storing them",
)
return parser.parse_args()
def load_qa_pairs(qa_json: Optional[Path]) -> List[dict]:
if qa_json:
with qa_json.open("r", encoding="utf-8") as fp:
data = json.load(fp)
if not isinstance(data, list):
raise ValueError("QA JSON must contain a list of objects")
return data
# Default in-script examples.
return [
{
"question": "What is ClearML HyperDataset?",
"answer": "A ClearML data structure that lets you manage structured data entries with optional assets.",
"reference": "https://clear.ml/docs/latest/docs/hyperdatasets",
"tags": ["docs", "product"],
},
{
"question": "How can I add items into a HyperDataset?",
"answer": "Create DataEntry instances, attach sub-entries when needed, and call add_data_entries().",
"tags": ["usage"],
},
{
"question": "Why store embeddings on a HyperDataset?",
"answer": "Embeddings enable semantic search and similarity queries directly on dataset entries.",
"tags": ["search"],
},
]
def maybe_encode_embeddings(
qa_pairs: List[dict],
*,
model_name: str,
device: Optional[str],
normalize: bool,
) -> Optional[List[List[float]]]:
try:
from sentence_transformers import SentenceTransformer
except ImportError as exc:
raise RuntimeError("sentence-transformers is required for --embed") from exc
model = SentenceTransformer(model_name, device=device)
sentences = [
f"question: {item.get('question', '')}\nanswer: {item.get('answer', '')}"
for item in qa_pairs
]
embeddings = model.encode(sentences, normalize_embeddings=normalize)
return [list(map(float, emb)) for emb in embeddings]
def build_entries(
qa_pairs: List[dict],
embeddings: Optional[List[List[float]]],
vector_field: Optional[str],
preview_sources: Optional[List[Optional[str]]] = None,
) -> Tuple[List[QADataEntry], Optional[int]]:
entries: List[QADataEntry] = []
vector_dims: Optional[int] = None
for idx, item in enumerate(qa_pairs):
question = item.get("question", "")
answer = item.get("answer", "")
if not question or not answer:
raise ValueError(f"Q&A item at index {idx} must include both 'question' and 'answer'")
preview_uri = None
if preview_sources and idx < len(preview_sources):
preview_uri = preview_sources[idx]
entry = QADataEntry(
question=question,
answer=answer,
reference=item.get("reference"),
tags=item.get("tags"),
)
entry.add_sub_entries(
[
QADataSubEntry(name="question", text=question, role="question", preview_source=preview_uri),
QADataSubEntry(name="answer", text=answer, role="answer", preview_source=preview_uri),
]
)
if embeddings:
if not vector_field:
raise ValueError("vector_field must be provided when embeddings are supplied")
vector = embeddings[idx]
entry.set_vector(vector, metadata_field=vector_field)
if vector_dims is None:
vector_dims = len(vector)
elif vector_dims != len(vector):
raise ValueError("All embedding vectors must share the same dimensionality")
entries.append(entry)
return entries, vector_dims
def _resolve_vector_dims(embeddings: Optional[List[List[float]]]) -> Optional[int]:
if not embeddings:
return None
if not embeddings[0]:
raise ValueError("Embedding vectors must not be empty")
dims = len(embeddings[0])
for idx, vector in enumerate(embeddings[1:], start=1):
if len(vector) != dims:
raise ValueError(
f"Embedding at index {idx} has dimensionality {len(vector)}; expected {dims}"
)
return dims
def _build_preview_base_url(app_host: str, version_id: Optional[str]) -> Optional[str]:
host = (app_host or "").rstrip("/")
if not host or not version_id:
return None
return f"{host}/files/datasets/{version_id}/clearml-docs/docs/webapp"
def _preview_file_name(question: str, index: int) -> str:
slug = re.sub(r"[^a-z0-9]+", "-", (question or "").lower()).strip("-")
if slug:
slug = slug[:60].strip("-")
if not slug:
slug = f"qa-entry-{index:04d}"
return f"{slug}-{uuid.uuid4().hex[:8]}"
def _render_preview_html(question: str, answer: str) -> str:
escaped_question = html.escape(question)
escaped_answer = html.escape(answer).replace("\n", "<br />\n")
return f"""<!DOCTYPE html>
<html lang=\"en\">
<head>
<meta charset=\"utf-8\" />
<title>{escaped_question}</title>
<style>body{{font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',sans-serif;margin:1.5rem;line-height:1.5;}} .question{{font-size:1.1rem;font-weight:600;}} .answer{{margin-top:1rem;}}</style>
</head>
<body>
<div class=\"question\">{escaped_question}</div>
<div class=\"answer\">{escaped_answer}</div>
</body>
</html>
"""
def upload_entry_previews(
qa_pairs: List[dict],
preview_base_url: Optional[str],
) -> Optional[List[str]]:
if not preview_base_url:
return None
preview_urls: List[str] = []
with tempfile.TemporaryDirectory() as preview_dir:
preview_dir_path = Path(preview_dir)
for idx, item in enumerate(qa_pairs):
question = item.get("question", "") or ""
answer = item.get("answer", "") or ""
preview_name = _preview_file_name(question, idx)
local_path = preview_dir_path / f"{preview_name}.html"
local_path.write_text(_render_preview_html(question, answer), encoding="utf-8")
remote_url = StorageManager.upload_file(
local_file=local_path.as_posix(),
remote_url=f"{preview_base_url}/{preview_name}.html",
)
preview_urls.append(remote_url)
return preview_urls
def main():
args = parse_args()
qa_pairs = load_qa_pairs(args.qa_json)
embeddings = None
if args.embed:
embeddings = maybe_encode_embeddings(
qa_pairs,
model_name=args.embedding_model,
device=args.embedding_device,
normalize=args.normalize,
)
vector_field = args.vector_field
if embeddings and not vector_field:
raise ValueError("--vector-field must be provided when --embed is used")
vector_dims = _resolve_vector_dims(embeddings)
field_mappings = None
if vector_dims:
field_path = vector_field if vector_field.startswith("meta.") else f"meta.{vector_field}"
# ClearML field mapping informs the server about the dense vector metadata schema
field_mappings = {
field_path: {
"type": "dense_vector",
"element_type": "float",
"dims": vector_dims,
}
}
# ClearML HyperDataset version handle (creates if missing)
dataset = HyperDataset(
project_name=args.project,
dataset_name=args.dataset_name,
version_name=args.version_name,
description=args.description,
field_mappings=field_mappings,
)
app_host = Session.get_files_server_host()
preview_base_url = _build_preview_base_url(app_host, dataset.version_id)
preview_sources = upload_entry_previews(qa_pairs, preview_base_url)
entries, _ = build_entries(qa_pairs, embeddings, vector_field, preview_sources=preview_sources)
# Upload entries so ClearML handles storage and indexing
errors = dataset.add_data_entries(entries, upload_local_files_destination=None, force_upload=True)
if errors.get("register"):
raise RuntimeError(f"Failed registering entries: {errors['register']}")
print(
"Created HyperDataset version: project={project} dataset={dataset} version={version}".format(
project=dataset.project_id,
dataset=dataset.dataset_id,
version=dataset.version_id,
)
)
if __name__ == "__main__":
main()
| QADataEntry |
python | kamyu104__LeetCode-Solutions | Python/check-if-an-original-string-exists-given-two-encoded-strings.py | {
"start": 152,
"end": 3233
} | class ____(object):
def possiblyEquals(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: bool
"""
def general_possible_numbers(s): # Time: O(2^l), Space: O(2^l), l is the length of consecutive digits, and l is at most 3
dp = [set() for _ in xrange(len(s))]
for i in xrange(len(s)):
curr, basis = 0, 1
for j in reversed(xrange(i+1)):
curr += int(s[j])*basis
basis *= 10
if s[j] == '0':
continue
if j == 0:
dp[i].add(curr)
else:
dp[i].update(x+curr for x in dp[j-1])
return dp[-1]
def optimized_possible_numbers(s):
assert(len(s) <= 3)
result = {int(s)}
if len(s) >= 2:
if s[1] != '0':
result.add(int(s[:1])+int(s[1:]))
if len(s) >= 3:
if s[2] != '0':
result.add(int(s[:2])+int(s[2:]))
if s[1] != '0':
result.add(int(s[0:1])+int(s[1:2])+int(s[2:]))
return result
def memoization(s1, s2, i, j, k, lookup):
if (i, j, k) not in lookup:
if i == len(s1) and j == len(s2):
lookup[(i, j, k)] = (k == 0)
elif i != len(s1) and s1[i].isdigit():
lookup[(i, j, k)] = False
for ni in xrange(i+1, len(s1)+1):
if ni == len(s1) or not s1[ni].isdigit():
break
for x in optimized_possible_numbers(s1[i:ni]):
if memoization(s1, s2, ni, j, k+x, lookup):
lookup[(i, j, k)] = True
break
elif j != len(s2) and s2[j].isdigit():
lookup[(i, j, k)] = False
for nj in xrange(j+1, len(s2)+1):
if nj == len(s2) or not s2[nj].isdigit():
break
for x in optimized_possible_numbers(s2[j:nj]):
if memoization(s1, s2, i, nj, k-x, lookup):
lookup[(i, j, k)] = True
break
elif k < 0:
lookup[(i, j, k)] = memoization(s1, s2, i+1, j, k+1, lookup) if i != len(s1) else False
elif k > 0:
lookup[(i, j, k)] = memoization(s1, s2, i, j+1, k-1, lookup) if j != len(s2) else False
else:
lookup[(i, j, k)] = memoization(s1, s2, i+1, j+1, k, lookup) if i != len(s1) and j != len(s2) and s1[i] == s2[j] else False
return lookup[(i, j, k)]
return memoization(s1, s2, 0, 0, 0, {})
# Time: O(m * n * k), k is the max number of consecutive digits in s1 and s2
# Space: O(m * n * k)
# top-down dp (faster since accessing less states)
| Solution |
python | walkccc__LeetCode | solutions/858. Mirror Reflection/858.py | {
"start": 0,
"end": 216
} | class ____:
def mirrorReflection(self, p: int, q: int) -> int:
while p % 2 == 0 and q % 2 == 0:
p //= 2
q //= 2
if p % 2 == 0:
return 2
if q % 2 == 0:
return 0
return 1
| Solution |
python | dask__distributed | distributed/http/worker/prometheus/core.py | {
"start": 505,
"end": 9538
} | class ____(PrometheusCollector):
server: Worker
def __init__(self, server: Worker):
super().__init__(server)
self.subsystem = "worker"
self.crick_available = True
try:
import crick # noqa: F401
except ImportError:
self.crick_available = False
logger.debug(
"Not all prometheus metrics available are exported. "
"Digest-based metrics require crick to be installed."
)
def collect(self) -> Iterator[Metric]:
self.server.monitor.update()
ws = self.server.state
tasks = GaugeMetricFamily(
self.build_name("tasks"),
"Number of tasks at worker.",
labels=["state"],
)
for state, n in ws.task_counter.current_count(by_prefix=False).items():
if state == "memory" and hasattr(self.server.data, "slow"):
n_spilled = len(self.server.data.slow)
if n - n_spilled > 0:
tasks.add_metric(["memory"], n - n_spilled)
if n_spilled > 0:
tasks.add_metric(["disk"], n_spilled)
else:
tasks.add_metric([state], n)
yield tasks
yield GaugeMetricFamily(
self.build_name("concurrent_fetch_requests"),
(
"Deprecated: This metric has been renamed to transfer_incoming_count.\n"
"Number of open fetch requests to other workers"
),
value=ws.transfer_incoming_count,
)
if self.server.monitor.monitor_gil_contention:
yield CounterMetricFamily(
self.build_name("gil_contention"),
"GIL contention metric",
value=self.server.monitor.cumulative_gil_contention,
unit="seconds",
)
yield CounterMetricFamily(
self.build_name("gc_collection"),
"Total time spent on garbage collection",
value=gc_collect_duration(),
unit="seconds",
)
yield GaugeMetricFamily(
self.build_name("threads"),
"Number of worker threads",
value=ws.nthreads,
)
yield GaugeMetricFamily(
self.build_name("latency"),
"Latency of worker connection",
unit="seconds",
value=self.server.latency,
)
try:
spilled_memory, spilled_disk = self.server.data.spilled_total # type: ignore
except AttributeError:
spilled_memory, spilled_disk = 0, 0 # spilling is disabled
process_memory = self.server.monitor.get_process_memory()
managed_memory = min(process_memory, ws.nbytes - spilled_memory)
memory = GaugeMetricFamily(
self.build_name("memory_bytes"),
"Memory breakdown",
labels=["type"],
)
memory.add_metric(["managed"], managed_memory)
memory.add_metric(["unmanaged"], process_memory - managed_memory)
memory.add_metric(["spilled"], spilled_disk)
yield memory
yield GaugeMetricFamily(
self.build_name("transfer_incoming_bytes"),
"Total size of open data transfers from other workers",
value=ws.transfer_incoming_bytes,
)
yield GaugeMetricFamily(
self.build_name("transfer_incoming_count"),
"Number of open data transfers from other workers",
value=ws.transfer_incoming_count,
)
yield CounterMetricFamily(
self.build_name("transfer_incoming_count_total"),
(
"Total number of data transfers from other workers "
"since the worker was started"
),
value=ws.transfer_incoming_count_total,
)
yield GaugeMetricFamily(
self.build_name("transfer_outgoing_bytes"),
"Total size of open data transfers to other workers",
value=self.server.transfer_outgoing_bytes,
)
yield GaugeMetricFamily(
self.build_name("transfer_outgoing_count"),
"Number of open data transfers to other workers",
value=self.server.transfer_outgoing_count,
)
yield CounterMetricFamily(
self.build_name("transfer_outgoing_bytes_total"),
(
"Total size of data transfers to other workers "
"since the worker was started (including in-progress and failed transfers)"
),
value=self.server.transfer_outgoing_bytes_total,
)
yield CounterMetricFamily(
self.build_name("transfer_outgoing_count_total"),
(
"Total number of data transfers to other workers "
"since the worker was started"
),
value=self.server.transfer_outgoing_count_total,
)
yield from self.collect_crick()
yield from self.collect_spillbuffer()
now = time()
max_tick_duration = max(
self.server.digests_max.pop("tick_duration", 0),
now - self.server._last_tick,
)
yield GaugeMetricFamily(
self.build_name("tick_duration_maximum"),
"Maximum tick duration observed since Prometheus last scraped metrics",
unit="seconds",
value=max_tick_duration,
)
yield CounterMetricFamily(
self.build_name("tick_count"),
"Total number of ticks observed since the server started",
value=self.server._tick_counter,
)
def collect_crick(self) -> Iterator[Metric]:
# All metrics using digests require crick to be installed.
# The following metrics will export NaN, if the corresponding digests are None
if not self.crick_available:
return
assert self.server.digests
yield GaugeMetricFamily(
self.build_name("tick_duration_median"),
"Median tick duration at worker",
unit="seconds",
value=self.server.digests["tick-duration"].components[1].quantile(50),
)
yield GaugeMetricFamily(
self.build_name("task_duration_median"),
"Median task runtime at worker",
unit="seconds",
value=self.server.digests["task-duration"].components[1].quantile(50),
)
yield GaugeMetricFamily(
self.build_name("transfer_bandwidth_median"),
"Bandwidth for transfer at worker",
unit="bytes",
value=self.server.digests["transfer-bandwidth"].components[1].quantile(50),
)
def collect_spillbuffer(self) -> Iterator[Metric]:
"""SpillBuffer-specific metrics.
Additionally, you can obtain derived metrics as follows:
cache hit ratios:
by keys = spill_count.memory_read / (spill_count.memory_read + spill_count.disk_read)
by bytes = spill_bytes.memory_read / (spill_bytes.memory_read + spill_bytes.disk_read)
mean times per key:
pickle = spill_time.pickle / spill_count.disk_write
write = spill_time.disk_write / spill_count.disk_write
unpickle = spill_time.unpickle / spill_count.disk_read
read = spill_time.disk_read / spill_count.disk_read
mean bytes per key:
write = spill_bytes.disk_write / spill_count.disk_write
read = spill_bytes.disk_read / spill_count.disk_read
mean bytes per second:
write = spill_bytes.disk_write / spill_time.disk_write
read = spill_bytes.disk_read / spill_time.disk_read
"""
try:
metrics = self.server.data.cumulative_metrics # type: ignore
except AttributeError:
return # spilling is disabled
counters = {
"bytes": CounterMetricFamily(
self.build_name("spill_bytes"),
"Total size of memory and disk accesses caused by managed data "
"since the latest worker restart",
labels=["activity"],
),
"count": CounterMetricFamily(
self.build_name("spill_count"),
"Total number of memory and disk accesses caused by managed data "
"since the latest worker restart",
labels=["activity"],
),
"seconds": CounterMetricFamily(
self.build_name("spill_time"),
"Total time spent spilling/unspilling since the latest worker restart",
unit="seconds",
labels=["activity"],
),
}
# Note: memory_read is used to calculate cache hit ratios (see docstring)
for (label, unit), value in metrics.items():
counters[unit].add_metric([label], value)
yield from counters.values()
| WorkerMetricCollector |
python | falconry__falcon | e2e-tests/server/hub.py | {
"start": 2305,
"end": 2482
} | class ____:
def __init__(self, hub: Hub):
self._hub = hub
async def on_get(self, req: Request, resp: Response) -> None:
resp.sse = self._hub.events()
| Events |
python | docker__docker-py | tests/unit/api_test.py | {
"start": 2208,
"end": 3092
} | class ____(unittest.TestCase):
def setUp(self):
self.patcher = mock.patch.multiple(
'docker.api.client.APIClient',
get=fake_get,
post=fake_post,
put=fake_put,
delete=fake_delete,
_read_from_socket=fake_read_from_socket
)
self.patcher.start()
self.client = APIClient(version=DEFAULT_DOCKER_API_VERSION)
def tearDown(self):
self.client.close()
self.patcher.stop()
def base_create_payload(self, img='busybox', cmd=None):
if not cmd:
cmd = ['true']
return {"Tty": False, "Image": img, "Cmd": cmd,
"AttachStdin": False,
"AttachStderr": True, "AttachStdout": True,
"StdinOnce": False,
"OpenStdin": False, "NetworkDisabled": False,
}
| BaseAPIClientTest |
python | astropy__astropy | astropy/units/tests/test_quantity_annotations.py | {
"start": 586,
"end": 9880
} | class ____:
"""Test Quantity[Unit] type annotation."""
def test_simple_annotation(self):
@u.quantity_input
def func(x: Quantity[u.m], y: str):
return x, y
i_q, i_str = 2 * u.m, "cool string"
o_q, o_str = func(i_q, i_str)
assert i_q == o_q
assert i_str == o_str
# checks the input on the 1st arg
with pytest.raises(u.UnitsError):
func(1 * u.s, i_str)
# but not the second
o_q, o_str = func(i_q, {"not": "a string"})
assert i_q == o_q
assert i_str != o_str
def test_multiple_annotation(self):
@u.quantity_input
def multi_func(a: Quantity[u.km]) -> Quantity[u.m]:
return a
i_q = 2 * u.km
o_q = multi_func(i_q)
assert o_q == i_q
assert o_q.unit == u.m
def test_optional_and_annotated(self):
@u.quantity_input
def opt_func(x: Quantity[u.m] | None = None) -> Quantity[u.km]:
if x is None:
return 1 * u.km
return x
i_q = 250 * u.m
o_q = opt_func(i_q)
assert o_q.unit == u.km
assert o_q == i_q
i_q = None
o_q = opt_func(i_q)
assert o_q == 1 * u.km
def test_union_and_annotated(self):
# Union and Annotated
@u.quantity_input
def union_func(x: Quantity[u.m] | (Quantity[u.s] | None)):
if x is None:
return None
else:
return 2 * x
i_q = 1 * u.m
o_q = union_func(i_q)
assert o_q == 2 * i_q
i_q = 1 * u.s
o_q = union_func(i_q)
assert o_q == 2 * i_q
i_q = None
o_q = union_func(i_q)
assert o_q is None
def test_not_unit_or_ptype(self):
with pytest.raises(TypeError, match="unit annotation is not"):
Quantity["definitely not a unit"]
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.arcsec), ("angle", "angle")]
)
def test_args3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec, 1 * u.arcsec)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.arcsec
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.arcsec), ("angle", "angle")]
)
def test_args_noconvert3(solarx_unit, solary_unit):
@u.quantity_input()
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
solarx, solary = myfunc_args(1 * u.deg, 1 * u.arcmin)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.deg
assert solary.unit == u.arcmin
@pytest.mark.parametrize("solarx_unit", [u.arcsec, "angle"])
def test_args_nonquantity3(solarx_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec, 100)
assert isinstance(solarx, Quantity)
assert isinstance(solary, int)
assert solarx.unit == u.arcsec
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.eV), ("angle", "energy")]
)
def test_arg_equivalencies3(solarx_unit, solary_unit):
@u.quantity_input(equivalencies=u.mass_energy())
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary + (10 * u.J) # Add an energy to check equiv is working
solarx, solary = myfunc_args(1 * u.arcsec, 100 * u.gram)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.gram
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_wrong_unit3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
with pytest.raises(
u.UnitsError,
match=(
"Argument 'solary' to function 'myfunc_args' must be in units "
f"convertible to '{str(solary_unit)}'."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, 100 * u.km)
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_not_quantity3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
with pytest.raises(
TypeError,
match=(
"Argument 'solary' to function 'myfunc_args' has no 'unit' "
"attribute. You should pass in an astropy Quantity instead."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, 100)
def test_decorator_override():
@u.quantity_input(solarx=u.arcsec)
def myfunc_args(solarx: u.km, solary: u.arcsec):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec, 1 * u.arcsec)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.arcsec
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwargs3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary, myk: solary_unit = 1 * u.arcsec):
return solarx, solary, myk
solarx, solary, myk = myfunc_args(1 * u.arcsec, 100, myk=100 * u.deg)
assert isinstance(solarx, Quantity)
assert isinstance(solary, int)
assert isinstance(myk, Quantity)
assert myk.unit == u.deg
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_unused_kwargs3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(
solarx: solarx_unit, solary, myk: solary_unit = 1 * u.arcsec, myk2=1000
):
return solarx, solary, myk, myk2
solarx, solary, myk, myk2 = myfunc_args(1 * u.arcsec, 100, myk=100 * u.deg, myk2=10)
assert isinstance(solarx, Quantity)
assert isinstance(solary, int)
assert isinstance(myk, Quantity)
assert isinstance(myk2, int)
assert myk.unit == u.deg
assert myk2 == 10
@pytest.mark.parametrize("solarx_unit,energy", [(u.arcsec, u.eV), ("angle", "energy")])
def test_kwarg_equivalencies3(solarx_unit, energy):
@u.quantity_input(equivalencies=u.mass_energy())
def myfunc_args(solarx: solarx_unit, energy: energy = 10 * u.eV):
return solarx, energy + (10 * u.J) # Add an energy to check equiv is working
solarx, energy = myfunc_args(1 * u.arcsec, 100 * u.gram)
assert isinstance(solarx, Quantity)
assert isinstance(energy, Quantity)
assert solarx.unit == u.arcsec
assert energy.unit == u.gram
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwarg_wrong_unit3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit = 10 * u.deg):
return solarx, solary
with pytest.raises(
u.UnitsError,
match=(
"Argument 'solary' to function 'myfunc_args' must be in "
f"units convertible to '{str(solary_unit)}'."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, solary=100 * u.km)
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwarg_not_quantity3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit = 10 * u.deg):
return solarx, solary
with pytest.raises(
TypeError,
match=(
"Argument 'solary' to function 'myfunc_args' has no 'unit' attribute. "
"You should pass in an astropy Quantity instead."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, solary=100)
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwarg_default3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit = 10 * u.deg):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec)
def test_return_annotation():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> u.deg:
return solarx
solarx = myfunc_args(1 * u.arcsec)
assert solarx.unit is u.deg
def test_return_annotation_none():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> None:
pass
solarx = myfunc_args(1 * u.arcsec)
assert solarx is None
def test_return_annotation_notUnit():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> int:
return 0
solarx = myfunc_args(1 * u.arcsec)
assert solarx == 0
def test_enum_annotation():
# Regression test for gh-9932
from enum import Enum, auto
class BasicEnum(Enum):
AnOption = auto()
@u.quantity_input
def myfunc_args(a: BasicEnum, b: u.arcsec) -> None:
pass
myfunc_args(BasicEnum.AnOption, 1 * u.arcsec)
| TestQuantityUnitAnnotations |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/zs_responses/ticket_metrics_response_builder.py | {
"start": 310,
"end": 1057
} | class ____(HttpResponseBuilder):
@classmethod
def stateful_ticket_metrics_response(cls) -> "TicketMetricsResponseBuilder":
return cls(find_template("stateful_ticket_metrics", __file__), FieldPath("ticket_metric"), CursorBasedPaginationStrategy())
@classmethod
def stateless_ticket_metrics_response(cls) -> "TicketMetricsResponseBuilder":
return cls(find_template("stateless_ticket_metrics", __file__), NestedPath(["ticket_metrics", 0]), CursorBasedPaginationStrategy())
def build(self) -> HttpResponse:
for record in self._records:
self._records_path.update(self._response, record.build())
return HttpResponse(json.dumps(self._response), self._status_code)
| TicketMetricsResponseBuilder |
python | scipy__scipy | scipy/special/tests/test_basic.py | {
"start": 125393,
"end": 127712
} | class ____:
def test_gamma(self):
gam = special.gamma(5)
assert_equal(gam,24.0)
def test_gammaln(self):
gamln = special.gammaln(3)
lngam = log(special.gamma(3))
assert_allclose(gamln, lngam, atol=1.5e-8, rtol=0)
def test_gammainccinv(self):
gccinv = special.gammainccinv(.5,.5)
gcinv = special.gammaincinv(.5,.5)
assert_allclose(gccinv, gcinv, atol=1.5e-8, rtol=0)
@with_special_errors
def test_gammaincinv(self):
y = special.gammaincinv(.4,.4)
x = special.gammainc(.4,y)
assert_allclose(x, 0.4, atol=1.5e-10, rtol=0)
y = special.gammainc(10, 0.05)
x = special.gammaincinv(10, 2.5715803516000736e-20)
assert_allclose(0.05, x, atol=1.5e-10, rtol=0)
assert_allclose(y, 2.5715803516000736e-20, atol=1.5e-10, rtol=0)
x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18)
assert_allclose(11.0, x, atol=1.5e-10, rtol=0)
@with_special_errors
def test_975(self):
# Regression test for ticket #975 -- switch point in algorithm
# check that things work OK at the point, immediately next floats
# around it, and a bit further away
pts = [0.25,
np.nextafter(0.25, 0), 0.25 - 1e-12,
np.nextafter(0.25, 1), 0.25 + 1e-12]
for pt in pts:
y = special.gammaincinv(.4, pt)
x = special.gammainc(0.4, y)
assert_allclose(x, pt, rtol=1e-12)
def test_rgamma(self):
rgam = special.rgamma(8)
rlgam = 1/special.gamma(8)
assert_allclose(rgam, rlgam, atol=1.5e-8, rtol=0)
def test_infinity(self):
assert_equal(special.rgamma(-1), 0)
@pytest.mark.parametrize(
"x,expected",
[
# infinities
([-np.inf, np.inf], [np.nan, np.inf]),
# negative and positive zero
([-0.0, 0.0], [-np.inf, np.inf]),
# small poles
(range(-32, 0), np.full(32, np.nan)),
# medium sized poles
(range(-1024, -32, 99), np.full(11, np.nan)),
# large pole
([-4.141512231792294e+16], [np.nan]),
]
)
def test_poles(self, x, expected):
assert_array_equal(special.gamma(x), expected)
| TestGamma |
python | doocs__leetcode | solution/0600-0699/0600.Non-negative Integers without Consecutive Ones/Solution.py | {
"start": 0,
"end": 465
} | class ____:
def findIntegers(self, n: int) -> int:
@cache
def dfs(i: int, pre: int, limit: bool) -> int:
if i < 0:
return 1
up = (n >> i & 1) if limit else 1
ans = 0
for j in range(up + 1):
if pre and j:
continue
ans += dfs(i - 1, j, limit and j == up)
return ans
return dfs(n.bit_length() - 1, 0, True)
| Solution |
python | astropy__astropy | astropy/coordinates/tests/test_separation.py | {
"start": 666,
"end": 13309
} | class ____(NamedTuple):
"""
The coordinates the position angle and separations are relative to
are different for different tests.
"""
coord: BaseCoordinateFrame | SkyCoord
pytest_id: str
position_angle: u.Quantity
separation: u.Quantity
separation_3d: u.Quantity
reversed_position_angle: u.Quantity
@property
def reversed_separation(self) -> u.Quantity:
return self.separation
@property
def reversed_separation_3d(self) -> u.Quantity:
return self.separation_3d
@pytest.mark.parametrize("coord_class", [SkyCoord, ICRS])
@pytest.mark.parametrize(
"other_coord",
[
SeparationExpectation(
ICRS(0 * u.deg, 0 * u.deg, 3 * u.pc),
"no_separation",
0 * u.deg,
0 * u.deg,
0 * u.pc,
0 * u.deg,
),
SeparationExpectation(
ICRS(0 * u.deg, 1 * u.deg, 3 * u.pc),
"dec_separation",
0 * u.deg,
1 * u.deg,
0.05235921 * u.pc,
180 * u.deg,
),
SeparationExpectation(
ICRS(1 * u.deg, 0 * u.deg, 3 * u.pc),
"ra_separation",
90 * u.deg,
1 * u.deg,
0.05235921 * u.pc,
270 * u.deg,
),
SeparationExpectation(
ICRS(0 * u.deg, 0 * u.deg, 10 * u.pc),
"distance_separation",
0 * u.deg,
0 * u.deg,
7 * u.pc,
0 * u.deg,
),
SeparationExpectation(
SkyCoord(1 * u.deg, 1 * u.deg, 9 * u.pc),
"SkyCoord_input",
44.995636 * u.deg,
1.4141777 * u.deg,
6.00137 * u.pc,
225.00436354 * u.deg,
),
],
ids=lambda x: x.pytest_id,
)
@pytest.mark.parametrize("method", ["position_angle", "separation", "separation_3d"])
def test_scalar_coords(coord_class, other_coord, method):
vernal_equinox = coord_class(0 * u.deg, 0 * u.deg, 3 * u.pc)
assert_quantity_allclose(
getattr(vernal_equinox, method)(other_coord.coord), getattr(other_coord, method)
)
assert_quantity_allclose(
getattr(other_coord.coord, method)(vernal_equinox),
getattr(other_coord, f"reversed_{method}"),
)
@pytest.mark.parametrize("coord_class", [SkyCoord, ICRS])
@pytest.mark.parametrize(
"other_coord",
[
SeparationExpectation(
FK5(0 * u.deg, 90 * u.deg, 2 * u.pc),
"FK5_input",
245.42603114 * u.deg,
6.07832112e-06 * u.deg,
2.12173433e-07 * u.pc,
65.42602474 * u.deg,
),
SeparationExpectation(
Galactic(0 * u.deg, 90 * u.deg, 9 * u.pc),
"Galactic_input",
347.14052211 * u.deg,
62.871748 * u.deg,
8.28158093 * u.pc,
57.06807474 * u.deg,
),
],
ids=lambda x: x.pytest_id,
)
@pytest.mark.parametrize("method", ["position_angle", "separation", "separation_3d"])
def test_scalar_coords_frame_transformation(coord_class, other_coord, method):
north_pole = coord_class(0 * u.deg, 90 * u.deg, 2 * u.pc)
assert_quantity_allclose(
getattr(north_pole, method)(other_coord.coord), getattr(other_coord, method)
)
assert_quantity_allclose(
getattr(other_coord.coord, method)(north_pole),
getattr(other_coord, f"reversed_{method}"),
)
@pytest.mark.parametrize("coord_class", [SkyCoord, ICRS])
@pytest.mark.parametrize(
"other_coord",
[
SeparationExpectation(
ICRS([-1, -2, -3] * u.deg, [0.1, 1.1, 2.1] * u.deg, [11, 13, 17] * u.pc),
"ICRS_input",
[275.710887, 272.880921, 271.963607] * u.deg,
[1.0049871, 2.0021627, 2.9997464] * u.deg,
[8.000635, 8.004959, 10.016293] * u.pc,
[95.71001423, 92.84426777, 91.8562681] * u.deg,
),
SeparationExpectation(
SkyCoord([1, -2, 3] * u.deg, [-0.1, 1.1, 2.1] * u.deg, [11, 13, 17] * u.pc),
"SkyCoord_input",
[95.71088692, 272.880921, 88.03639251] * u.deg,
[1.0049871, 2.0021627, 2.9997464] * u.deg,
[8.000635, 8.004959, 10.016293] * u.pc,
[275.71001423, 92.84426777, 268.1437319] * u.deg,
),
],
ids=lambda x: x.pytest_id,
)
@pytest.mark.parametrize("method", ["position_angle", "separation", "separation_3d"])
def test_array_coords(coord_class, other_coord, method):
coord = coord_class(0 * u.deg, [0, 1, 2] * u.deg, [3, 5, 7] * u.pc)
assert_quantity_allclose(
getattr(coord, method)(other_coord.coord), getattr(other_coord, method)
)
assert_quantity_allclose(
getattr(other_coord.coord, method)(coord),
getattr(other_coord, f"reversed_{method}"),
)
@pytest.mark.parametrize(
"coord",
[
pytest.param(FK5(1 * u.deg, 0 * u.deg, 1 * u.pc), id="FK5"),
pytest.param(
SkyCoord(1 * u.deg, 0 * u.deg, 1 * u.pc, frame="fk5"), id="SkyCoord"
),
],
)
@pytest.mark.parametrize(
"other_coord",
[
SeparationExpectation(
FK5(1 * u.deg, 0 * u.deg, 1 * u.pc, equinox="B1950"),
"FK5_B1950",
66.51310007 * u.deg,
0.69835342 * u.deg,
0.01218849 * u.pc,
246.50823798 * u.deg,
),
SeparationExpectation(
SkyCoord(1 * u.deg, 0 * u.deg, 10 * u.pc, frame="fk5", equinox="B1950"),
"SkyCoord_B1950",
66.51310007 * u.deg,
0.69835342 * u.deg,
9.000083 * u.pc,
246.50823798 * u.deg,
),
],
ids=lambda x: x.pytest_id,
)
@pytest.mark.parametrize("method", ["position_angle", "separation", "separation_3d"])
def test_equinox_conversion(coord, other_coord, method):
"""
Regression test for
- #868, #891: methods raised errors in case of frames with equinoxes
- #3106, #15659: unspecified equinox should be interpreted as frame default,
both in `SkyCoord` methods (#3106) and in `BaseCoordinateFrame` methods with
`SkyCoord` input (#15659).
- #5702: `position_angle()` didn't check if equinoxes differed
"""
assert_quantity_allclose(
getattr(coord, method)(other_coord.coord), getattr(other_coord, method)
)
assert_quantity_allclose(
getattr(other_coord.coord, method)(coord),
getattr(other_coord, f"reversed_{method}"),
)
@pytest.mark.parametrize("other_class", [SkyCoord, ICRS])
@pytest.mark.parametrize("coord_class", [SkyCoord, ICRS])
def test_separation_3d_dimensionless_distance(coord_class, other_class):
assert_quantity_allclose(
coord_class(35 * u.deg, 0 * u.deg, 3 * u.one).separation_3d(
other_class(125 * u.deg, 0 * u.deg, 4 * u.one)
),
5 * u.one,
)
@pytest.mark.parametrize("dimensionless_class", [SkyCoord, ICRS])
@pytest.mark.parametrize("length_class", [SkyCoord, ICRS])
def test_separation_3d_distance_dimension_mismatch(length_class, dimensionless_class):
dimensionless_coord = dimensionless_class(1 * u.deg, -2 * u.deg, 14)
length_coord = length_class(-1 * u.deg, 2 * u.deg, 21 * u.pc)
error_message = (
"^Can only apply 'subtract' function to quantities with compatible dimensions$"
)
with pytest.raises(u.UnitConversionError, match=error_message):
dimensionless_coord.separation_3d(length_coord)
with pytest.raises(u.UnitConversionError, match=error_message):
length_coord.separation_3d(dimensionless_coord)
@pytest.mark.parametrize("coord_class", [SkyCoord, ICRS])
def test_separation_3d_no_distance(coord_class):
coord_no_distance = coord_class(0 * u.deg, 0 * u.deg)
coord_with_distance = ICRS(0 * u.deg, 0 * u.deg, distance=3 * u.pc)
with pytest.raises(
ValueError,
match="^This object does not have a distance; cannot compute 3d separation.$",
):
coord_no_distance.separation_3d(coord_with_distance)
with pytest.raises(
u.UnitConversionError,
match=(
"^Can only apply 'subtract' function to quantities with compatible"
" dimensions$"
),
):
coord_with_distance.separation_3d(coord_no_distance)
@pytest.mark.parametrize("coord_class", [SkyCoord, ICRS])
@pytest.mark.parametrize(
"velocity_kwargs",
[
pytest.param({}, id="no_velocity"),
pytest.param({"radial_velocity": -108 * u.km / u.s}, id="radial_velocity"),
pytest.param(
{"pm_ra_cosdec": 7 * u.mas / u.s, "pm_dec": -5 * u.mas / u.s},
id="proper_motion",
),
pytest.param(
{
"radial_velocity": -108 * u.km / u.s,
"pm_ra_cosdec": 7 * u.mas / u.s,
"pm_dec": -5 * u.mas / u.s,
},
id="3d_velocity",
),
],
)
@pytest.mark.parametrize(
"other_coord",
[
SeparationExpectation(
ICRS(
ra=20 * u.deg,
dec=10 * u.deg,
distance=8 * u.pc,
pm_ra_cosdec=14 * u.mas / u.yr,
pm_dec=-11 * u.mas / u.yr,
radial_velocity=5 * u.km / u.s,
),
"ICRS_with_velocity",
134.58168775 * u.deg,
13.89233851 * u.deg,
3.36750833 * u.pc,
317.18593154 * u.deg,
),
SeparationExpectation(
SkyCoord(ra=20 * u.deg, dec=10 * u.deg, distance=8 * u.pc),
"SkyCoord_no_velocity",
134.58168775 * u.deg,
13.89233851 * u.deg,
3.36750833 * u.pc,
317.18593154 * u.deg,
),
],
ids=lambda x: x.pytest_id,
)
@pytest.mark.parametrize("method", ["position_angle", "separation", "separation_3d"])
def test_with_velocities(coord_class, velocity_kwargs, other_coord, method):
coord = coord_class(
ra=10 * u.deg, dec=20 * u.deg, distance=5 * u.pc, **velocity_kwargs
)
assert_quantity_allclose(
getattr(coord, method)(other_coord.coord), getattr(other_coord, method)
)
assert_quantity_allclose(
getattr(other_coord.coord, method)(coord),
getattr(other_coord, f"reversed_{method}"),
)
@pytest.mark.parametrize("coord_class", [SkyCoord, ICRS])
@pytest.mark.parametrize(
"method,output_type",
[
pytest.param(method, type_, id=method)
for method, type_ in (
("position_angle", Angle),
("separation", Angle),
("separation_3d", Distance),
)
],
)
def test_return_types(coord_class, method, output_type):
"""
This is especially important for SkyCoord because SkyCoord instances
expose the methods of their underlying frame at runtime, so they cannot be
checked statically.
"""
coord = coord_class(0 * u.deg, 0 * u.deg, 1 * u.pc)
assert type(getattr(coord, method)(coord)) is output_type
@pytest.mark.parametrize("coord_class", [SkyCoord, ICRS])
@pytest.mark.parametrize(
"origin_mismatch_kwarg,expectation",
[
pytest.param({"origin_mismatch": "ignore"}, nullcontext(), id="ignore"),
pytest.param(
{"origin_mismatch": "warn"},
pytest.warns(
NonRotationTransformationWarning,
match="^transforming other coordinates from <GCRS Frame ",
),
id="warn",
),
pytest.param(
{"origin_mismatch": "error"},
pytest.raises(
NonRotationTransformationError,
match="^refusing to transform other coordinates from <GCRS Frame ",
),
id="error",
),
pytest.param(
{},
pytest.warns(
NonRotationTransformationWarning,
match="^transforming other coordinates from <GCRS Frame ",
),
id="default",
),
pytest.param(
{"origin_mismatch": "bad"},
pytest.raises(
ValueError,
match=(
r"^origin_mismatch='bad' is invalid\. Allowed values are 'ignore', "
r"'warn' or 'error'\.$"
),
),
id="invalid",
),
],
)
def test_separation_origin_mismatch_action(
coord_class, origin_mismatch_kwarg, expectation
):
with expectation:
coord_class(0 * u.deg, 0 * u.deg).separation(
SkyCoord(0 * u.deg, 0 * u.deg, frame=GCRS), **origin_mismatch_kwarg
)
| SeparationExpectation |
python | PyCQA__pycodestyle | tests/test_blank_lines.py | {
"start": 2709,
"end": 3219
} | class ____(object):
pass
""")
self.assertEqual([
'E302:7:1', # another_function
'E302:14:1', # SomeCloseClass
], result)
def test_top_level_more_blank_lines(self):
"""
It will trigger an error when more 2 blank lines are found
before top level definitions.
"""
result = errors_from_src("""# First comment line.
# Second line of comment.
def some_function():
pass
def this_one_is_good():
pass
| AFarEnoughClass |
python | django-guardian__django-guardian | guardian/testapp/tests/test_mixins.py | {
"start": 965,
"end": 1080
} | class ____(PermissionRequiredMixin, RemoveDatabaseView):
permission_required = "testapp.change_post"
| NoObjectView |
python | hynek__structlog | tests/processors/test_processors.py | {
"start": 1009,
"end": 1727
} | class ____:
def test_encodes(self):
"""
Unicode strings get encoded (as UTF-8 by default).
"""
e = UnicodeEncoder()
assert {"foo": b"b\xc3\xa4r"} == e(None, None, {"foo": "b\xe4r"})
def test_passes_arguments(self):
"""
Encoding options are passed into the encoding call.
"""
e = UnicodeEncoder("latin1", "xmlcharrefreplace")
assert {"foo": b"–"} == e(None, None, {"foo": "\u2013"})
def test_bytes_nop(self):
"""
If the string is already bytes, don't do anything.
"""
e = UnicodeEncoder()
assert {"foo": b"b\xc3\xa4r"} == e(None, None, {"foo": b"b\xc3\xa4r"})
| TestUnicodeEncoder |
python | django__django | tests/many_to_many/models.py | {
"start": 1838,
"end": 1891
} | class ____(AbstractArticle):
pass
| InheritedArticleA |
python | django__django | tests/sessions_tests/models.py | {
"start": 640,
"end": 1242
} | class ____(DBStore):
"""
A database session store, that handles updating the account ID column
inside the custom session model.
"""
@classmethod
def get_model_class(cls):
return CustomSession
def create_model_instance(self, data):
obj = super().create_model_instance(data)
try:
account_id = int(data.get("_auth_user_id"))
except (ValueError, TypeError):
account_id = None
obj.account_id = account_id
return obj
def get_session_cookie_age(self):
return 60 * 60 * 24 # One day.
| SessionStore |
python | pytorch__pytorch | benchmarks/operator_benchmark/common/tests/pt_cpu_gpu_forward_backward_test.py | {
"start": 168,
"end": 714
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.input_one = torch.rand(M, N, K, device=device, requires_grad=True)
self.input_two = torch.rand(M, N, K, device=device, requires_grad=True)
self.set_module_name("add")
def forward(self):
return torch.add(self.input_one, self.input_two)
op_bench.generate_pt_test(add_configs, AddBenchmark)
op_bench.generate_pt_gradient_test(add_configs, AddBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| AddBenchmark |
python | doocs__leetcode | solution/2500-2599/2583.Kth Largest Sum in a Binary Tree/Solution.py | {
"start": 192,
"end": 690
} | class ____:
def kthLargestLevelSum(self, root: Optional[TreeNode], k: int) -> int:
arr = []
q = deque([root])
while q:
t = 0
for _ in range(len(q)):
root = q.popleft()
t += root.val
if root.left:
q.append(root.left)
if root.right:
q.append(root.right)
arr.append(t)
return -1 if len(arr) < k else nlargest(k, arr)[-1]
| Solution |
python | getsentry__sentry | src/sentry/preprod/migrations/0013_binary_uuid.py | {
"start": 155,
"end": 1487
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("preprod", "0012_installablepreprod"),
]
operations = [
migrations.AddField(
model_name="preprodartifact",
name="main_binary_identifier",
field=models.CharField(db_index=True, max_length=255, null=True),
),
]
| Migration |
python | getsentry__sentry | tests/sentry/ratelimits/utils/test_enforce_rate_limit.py | {
"start": 463,
"end": 738
} | class ____(Endpoint):
permission_classes = (AllowAny,)
rate_limits = RateLimitConfig(
limit_overrides={"GET": {RateLimitCategory.IP: RateLimit(limit=1, window=100)}}
)
def get(self, request):
return Response({"ok": True})
| RateLimitTestEndpoint |
python | streamlit__streamlit | lib/tests/streamlit/runtime/caching/cache_data_api_test.py | {
"start": 9020,
"end": 18282
} | class ____(DeltaGeneratorTestCase):
"""st.cache_data disk persistence tests"""
def setUp(self) -> None:
super().setUp()
mock_runtime = MagicMock(spec=Runtime)
mock_runtime.cache_storage_manager = LocalDiskCacheStorageManager()
Runtime._instance = mock_runtime
def tearDown(self) -> None:
st.cache_data.clear()
super().tearDown()
@patch("streamlit.runtime.caching.storage.local_disk_cache_storage.streamlit_write")
def test_dont_persist_by_default(self, mock_write):
@st.cache_data
def foo():
return "data"
foo()
mock_write.assert_not_called()
@patch("streamlit.runtime.caching.storage.local_disk_cache_storage.streamlit_write")
def test_persist_path(self, mock_write):
"""Ensure we're writing to ~/.streamlit/cache/*.memo"""
@st.cache_data(persist="disk")
def foo():
return "data"
foo()
mock_write.assert_called_once()
write_path = mock_write.call_args[0][0]
match = re.fullmatch(
r"/mock/home/folder/.streamlit/cache/.*?\.memo", write_path
)
assert match is not None
@patch("streamlit.file_util.os.stat", MagicMock())
@patch(
"streamlit.file_util.open",
mock_open(read_data=pickle.dumps(as_cached_result("mock_pickled_value"))),
)
@patch(
"streamlit.runtime.caching.storage.local_disk_cache_storage.streamlit_read",
wraps=file_util.streamlit_read,
)
def test_read_persisted_data(self, mock_read):
"""We should read persisted data from disk on cache miss."""
@st.cache_data(persist="disk")
def foo():
return "actual_value"
data = foo()
mock_read.assert_called_once()
assert data == "mock_pickled_value"
@patch("streamlit.file_util.os.stat", MagicMock())
@patch("streamlit.file_util.open", mock_open(read_data="bad_pickled_value"))
@patch(
"streamlit.runtime.caching.storage.local_disk_cache_storage.streamlit_read",
wraps=file_util.streamlit_read,
)
def test_read_bad_persisted_data(self, mock_read):
"""If our persisted data is bad, we raise an exception."""
@st.cache_data(persist="disk")
def foo():
return "actual_value"
with pytest.raises(CacheError) as error:
foo()
mock_read.assert_called_once()
assert str(error.value) == "Unable to read from cache"
@patch("streamlit.file_util.os.stat", MagicMock())
@patch("streamlit.file_util.open", mock_open(read_data=b"bad_binary_pickled_value"))
@patch(
"streamlit.runtime.caching.storage.local_disk_cache_storage.streamlit_read",
wraps=file_util.streamlit_read,
)
def test_read_bad_persisted_binary_data(self, mock_read):
"""If our persisted data is bad, we raise an exception."""
@st.cache_data(persist="disk")
def foo():
return "actual_value"
with pytest.raises(CacheError) as error:
foo()
mock_read.assert_called_once()
assert "Failed to unpickle" in str(error.value)
def test_bad_persist_value(self):
"""Throw an error if an invalid value is passed to 'persist'."""
with pytest.raises(StreamlitAPIException) as e:
@st.cache_data(persist="yesplz")
def foo():
pass
assert (
str(e.value)
== "Unsupported persist option 'yesplz'. Valid values are 'disk' or None."
)
@patch("shutil.rmtree")
def test_clear_all_disk_caches(self, mock_rmtree):
"""`clear_all` should remove the disk cache directory if it exists."""
# If the cache dir exists, we should delete it.
with patch("os.path.isdir", MagicMock(return_value=True)):
st.cache_data.clear()
mock_rmtree.assert_called_once_with(get_cache_folder_path())
mock_rmtree.reset_mock()
# If the cache dir does not exist, we shouldn't try to delete it.
with patch("os.path.isdir", MagicMock(return_value=False)):
st.cache_data.clear()
mock_rmtree.assert_not_called()
@patch("streamlit.file_util.os.stat", MagicMock())
@patch(
"streamlit.file_util.open",
wraps=mock_open(read_data=pickle.dumps(as_cached_result("mock_pickled_value"))),
)
@patch("streamlit.runtime.caching.storage.local_disk_cache_storage.os.remove")
def test_clear_one_disk_cache(self, mock_os_remove: Mock, mock_open: Mock):
"""A memoized function's clear_cache() property should just clear
that function's cache."""
@st.cache_data(persist="disk")
def foo(val):
return "actual_value"
foo(0)
foo(1)
# We should've opened two files, one for each distinct "foo" call.
assert mock_open.call_count == 2
# Get the names of the two files that were created. These will look
# something like '/mock/home/folder/.streamlit/cache/[long_hash].memo'
created_filenames = {
mock_open.call_args_list[0][0][0],
mock_open.call_args_list[1][0][0],
}
created_files_base_names = [
os.path.basename(filename) for filename in created_filenames
]
mock_os_remove.assert_not_called()
with (
patch("os.listdir", MagicMock(return_value=created_files_base_names)),
patch("os.path.isdir", MagicMock(return_value=True)),
):
# Clear foo's cache
foo.clear()
# os.remove should have been called once for each of our created cache files
assert mock_os_remove.call_count == 2
removed_filenames = {
mock_os_remove.call_args_list[0][0][0],
mock_os_remove.call_args_list[1][0][0],
}
# The two files we removed should be the same two files we created.
assert created_filenames == removed_filenames
@patch("streamlit.file_util.os.stat", MagicMock())
@patch(
"streamlit.file_util.open",
wraps=mock_open(read_data=pickle.dumps(as_replay_test_data())),
)
def test_cached_st_function_replay(self, _):
@st.cache_data(persist="disk")
def foo(i):
st.text(i)
return i
foo(1)
deltas = self.get_all_deltas_from_queue()
text = [
element.text.body
for element in (delta.new_element for delta in deltas)
if element.WhichOneof("type") == "text"
]
assert text == ["1"]
@patch("streamlit.file_util.os.stat", MagicMock())
@patch(
"streamlit.runtime.caching.storage.local_disk_cache_storage.streamlit_write",
MagicMock(),
)
@patch(
"streamlit.file_util.open",
wraps=mock_open(read_data=pickle.dumps(1)),
)
def test_cached_st_function_clear_args_persist(self, _):
self.x = 0
@st.cache_data(persist="disk")
def foo(y):
self.x += y
return self.x
assert foo(1) == 1
foo.clear(2)
assert foo(1) == 1
foo.clear(1)
assert foo(1) == 2
@patch("streamlit.file_util.os.stat", MagicMock())
@patch(
"streamlit.runtime.caching.storage.local_disk_cache_storage.streamlit_write",
MagicMock(),
)
@patch(
"streamlit.file_util.open",
wraps=mock_open(read_data=pickle.dumps(1)),
)
def test_cached_format_migration(self, _):
@st.cache_data(persist="disk")
def foo(i):
st.text(i)
return i
# Executes normally, without raising any errors
foo(1)
@patch("streamlit.runtime.caching.storage.local_disk_cache_storage.streamlit_write")
def test_warning_memo_ttl_persist(self, _):
"""Using @st.cache_data with ttl and persist produces a warning."""
with self.assertLogs(
"streamlit.runtime.caching.storage.local_disk_cache_storage",
level=logging.WARNING,
) as logs:
@st.cache_data(ttl=60, persist="disk")
def user_function():
return 42
st.write(user_function())
output = "".join(logs.output)
assert (
"The cached function 'user_function' has a TTL that will be ignored."
in output
)
@parameterized.expand(
[
("disk", "disk", True),
("True", True, True),
("None", None, False),
("False", False, False),
]
)
@patch("streamlit.runtime.caching.storage.local_disk_cache_storage.streamlit_write")
def test_persist_param_value(
self,
_,
persist_value: str | bool | None,
should_persist: bool,
mock_write: Mock,
):
"""Passing "disk" or `True` enables persistence; `None` or `False` disables it."""
@st.cache_data(persist=persist_value)
def foo():
return "data"
foo()
if should_persist:
mock_write.assert_called_once()
else:
mock_write.assert_not_called()
| CacheDataPersistTest |
python | python__mypy | mypyc/ir/ops.py | {
"start": 6120,
"end": 7208
} | class ____(Value):
"""Short integer literal.
Integer literals are treated as constant values and are generally
not included in data flow analyses and such, unlike Register and
Op subclasses.
Integer can represent multiple types:
* Short tagged integers (short_int_primitive type; the tag bit is clear)
* Ordinary fixed-width integers (e.g., int32_rprimitive)
* Values of other unboxed primitive types that are represented as integers
(none_rprimitive, bool_rprimitive)
* Null pointers (value 0) of various types, including object_rprimitive
"""
def __init__(self, value: int, rtype: RType = short_int_rprimitive, line: int = -1) -> None:
if is_short_int_rprimitive(rtype) or is_int_rprimitive(rtype):
self.value = value * 2
else:
self.value = value
self.type = rtype
self.line = line
def numeric_value(self) -> int:
if is_short_int_rprimitive(self.type) or is_int_rprimitive(self.type):
return self.value // 2
return self.value
@final
| Integer |
python | kubernetes-client__python | kubernetes/client/models/v1_custom_resource_definition_status.py | {
"start": 383,
"end": 6600
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'accepted_names': 'V1CustomResourceDefinitionNames',
'conditions': 'list[V1CustomResourceDefinitionCondition]',
'stored_versions': 'list[str]'
}
attribute_map = {
'accepted_names': 'acceptedNames',
'conditions': 'conditions',
'stored_versions': 'storedVersions'
}
def __init__(self, accepted_names=None, conditions=None, stored_versions=None, local_vars_configuration=None): # noqa: E501
"""V1CustomResourceDefinitionStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._accepted_names = None
self._conditions = None
self._stored_versions = None
self.discriminator = None
if accepted_names is not None:
self.accepted_names = accepted_names
if conditions is not None:
self.conditions = conditions
if stored_versions is not None:
self.stored_versions = stored_versions
@property
def accepted_names(self):
"""Gets the accepted_names of this V1CustomResourceDefinitionStatus. # noqa: E501
:return: The accepted_names of this V1CustomResourceDefinitionStatus. # noqa: E501
:rtype: V1CustomResourceDefinitionNames
"""
return self._accepted_names
@accepted_names.setter
def accepted_names(self, accepted_names):
"""Sets the accepted_names of this V1CustomResourceDefinitionStatus.
:param accepted_names: The accepted_names of this V1CustomResourceDefinitionStatus. # noqa: E501
:type: V1CustomResourceDefinitionNames
"""
self._accepted_names = accepted_names
@property
def conditions(self):
"""Gets the conditions of this V1CustomResourceDefinitionStatus. # noqa: E501
conditions indicate state for particular aspects of a CustomResourceDefinition # noqa: E501
:return: The conditions of this V1CustomResourceDefinitionStatus. # noqa: E501
:rtype: list[V1CustomResourceDefinitionCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1CustomResourceDefinitionStatus.
conditions indicate state for particular aspects of a CustomResourceDefinition # noqa: E501
:param conditions: The conditions of this V1CustomResourceDefinitionStatus. # noqa: E501
:type: list[V1CustomResourceDefinitionCondition]
"""
self._conditions = conditions
@property
def stored_versions(self):
"""Gets the stored_versions of this V1CustomResourceDefinitionStatus. # noqa: E501
storedVersions lists all versions of CustomResources that were ever persisted. Tracking these versions allows a migration path for stored versions in etcd. The field is mutable so a migration controller can finish a migration to another version (ensuring no old objects are left in storage), and then remove the rest of the versions from this list. Versions may not be removed from `spec.versions` while they exist in this list. # noqa: E501
:return: The stored_versions of this V1CustomResourceDefinitionStatus. # noqa: E501
:rtype: list[str]
"""
return self._stored_versions
@stored_versions.setter
def stored_versions(self, stored_versions):
"""Sets the stored_versions of this V1CustomResourceDefinitionStatus.
storedVersions lists all versions of CustomResources that were ever persisted. Tracking these versions allows a migration path for stored versions in etcd. The field is mutable so a migration controller can finish a migration to another version (ensuring no old objects are left in storage), and then remove the rest of the versions from this list. Versions may not be removed from `spec.versions` while they exist in this list. # noqa: E501
:param stored_versions: The stored_versions of this V1CustomResourceDefinitionStatus. # noqa: E501
:type: list[str]
"""
self._stored_versions = stored_versions
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1CustomResourceDefinitionStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1CustomResourceDefinitionStatus):
return True
return self.to_dict() != other.to_dict()
| V1CustomResourceDefinitionStatus |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/errors.py | {
"start": 16035,
"end": 16226
} | class ____(DagsterError):
"""Indicates that an error has occurred when an op has been invoked, but before the actual
core compute has been reached.
"""
| DagsterInvalidInvocationError |
python | google__jax | tests/custom_partitioning_test.py | {
"start": 1065,
"end": 16330
} | class ____(jtu.JaxTestCase):
def skip_if_custom_partitioning_not_supported(self):
if jtu.is_cloud_tpu():
raise unittest.SkipTest("Custom partitioning is not supported on libtpu.")
@jtu.skip_on_devices('cpu') # Collectives don't seem to work on CPU.
def test_custom_partitioner(self):
self.skip_if_custom_partitioning_not_supported()
def partition(precision, mesh, arg_shapes, result_shape):
arg_shardings = jax.tree.map(lambda s: s.sharding, arg_shapes)
result_sharding = result_shape[0].sharding
self.assertEqual(arg_shardings[0], result_sharding)
self.assertEqual(P('x', None), result_sharding.spec)
self.assertEqual(P('y', None), arg_shardings[1].spec)
def lower_fn(x, y):
axis_name = arg_shardings[1].spec[0][0]
i = jax.lax.axis_index(axis_name)
# Use offset i * 0 instead of 0 to ensure that the two offsets have the
# same dtype regardless the value of config.enable_x64.
z = jax.lax.psum(
jax.lax.dynamic_slice(x, (i * 0, i * 8), (8, 8)) @ y, (axis_name)
)
return z, z * z
return mesh, lower_fn, (result_sharding, result_sharding), arg_shardings
def infer_sharding_from_operands(precision, mesh, arg_shapes, result_shape):
arg_shardings = jax.tree.map(lambda s: s.sharding, arg_shapes)
x_shard, y_shard = arg_shardings
x_shape, y_shape = arg_shapes
x_names = tuple(x_shard.spec) + tuple(
None for _ in range(len(x_shape.shape) - len(x_shard.spec)))
y_names = tuple(y_shard.spec) + tuple(
None for _ in range(len(y_shape.shape) - len(y_shard.spec)))
z_shard = NamedSharding(y_shard.mesh, P(*(x_names[:-1] + y_names[1:])))
return z_shard, z_shard
@partial(custom_partitioning, static_argnums=(2,))
def f(x, y, precision=None):
z = jnp.matmul(x, y, precision=precision)
return z, z * z
f.def_partition(
infer_sharding_from_operands=infer_sharding_from_operands,
partition=partition,
sharding_rule=SdyShardingRule(operand_mappings=(('i', 'j'), ('j', 'k')), result_mappings=(('i', 'k'), ('i', 'k'))))
with jax.set_mesh(jtu.create_mesh((4, 2), ('x', 'y'))):
jit_f = jax.jit(f, in_shardings=(P('x'), P('y')), out_shardings=P('x'))
x = np.asarray(np.random.randint(0, 20, (32, 16)), dtype=np.float32)
y = np.asarray(np.random.randint(0, 20, (16, 32)), dtype=np.float32)
x_sharded = jax.device_put(x, P('x'))
y_sharded = jax.device_put(y, P('y'))
result1 = jax.jit(f)(x_sharded, y_sharded)
result2 = f(x, y)
result0 = jit_f(x_sharded, y_sharded)
self.assertArraysEqual(result0, result1)
self.assertArraysEqual(result1, result2)
def test_custom_partitioner_propagate_user_sharding(self):
self.skip_if_custom_partitioning_not_supported()
def partition(mesh, arg_shapes, result_shape):
def lower_fn(x):
return x
return (
mesh,
lower_fn,
arg_shapes[0].sharding,
(arg_shapes[0].sharding,),
)
def infer_sharding_from_operands(mesh, arg_shapes, result_shape):
return arg_shapes[0].sharding
def propagate_user_sharding(mesh, user_shape):
return user_shape.sharding
@custom_partitioning
def f(x):
return x
f.def_partition(
infer_sharding_from_operands=infer_sharding_from_operands,
partition=partition,
propagate_user_sharding=propagate_user_sharding,
sharding_rule='i j -> i j',
)
def f2(a):
return a + f(a)
with jax.set_mesh(jtu.create_mesh((4, 2), ('x', 'y'))):
jit_f = jax.jit(f2, in_shardings=(P(None, 'x')), out_shardings=P('x'))
x = np.asarray(np.random.randint(0, 20, (32, 16)), dtype=np.float32)
self.assertArraysEqual(x + x, jit_f(jax.device_put(x, P(None, 'x'))))
def test_custom_partitioner_sharding_override(self):
self.skip_if_custom_partitioning_not_supported()
def partition(mesh, arg_shapes, result_shape):
def lower_fn(x):
return x
y_shard = arg_shapes[0].sharding
return (
mesh,
lower_fn,
NamedSharding(y_shard.mesh, P(None)),
(NamedSharding(y_shard.mesh, P(None)),),
)
def infer_sharding_from_operands(mesh, arg_shapes, result_shape):
y_shard = arg_shapes[0].sharding
return NamedSharding(y_shard.mesh, P('x'))
@custom_partitioning
def f(x):
return x
f.def_partition(
infer_sharding_from_operands=infer_sharding_from_operands,
partition=partition,
sharding_rule=SdyShardingRule(operand_mappings=((BATCHING, 'i'),), result_mappings=((BATCHING, 'i'),)))
with jax.set_mesh(jtu.create_mesh((4, 2), ('x', 'y'))):
jit_f = jax.jit(f, in_shardings=(P(None, 'x')), out_shardings=P('x'))
x = np.asarray(np.random.randint(0, 20, (32, 16)), dtype=np.float32)
self.assertArraysEqual(x, jit_f(jax.device_put(x, P(None, 'x'))))
def test_custom_partitioner_invalid_sharding(self):
self.skip_if_custom_partitioning_not_supported()
def partition(mesh, arg_shapes, result_shape):
def lower_fn(x):
return x
y_shard = arg_shapes[0].sharding
return (
mesh,
lower_fn,
NamedSharding(y_shard.mesh, P(None)),
(NamedSharding(y_shard.mesh, P(None, 'x')),),
)
def infer_sharding_from_operands(mesh, arg_shapes, result_shape):
y_shard = arg_shapes[0].sharding
return NamedSharding(y_shard.mesh, P('x'))
@custom_partitioning
def f(x):
return x
f.def_partition(
infer_sharding_from_operands=infer_sharding_from_operands,
partition=partition,
sharding_rule='i j -> i j',
)
with jax.set_mesh(jtu.create_mesh((4, 2), ('x', 'y'))):
jit_f = jax.jit(f, in_shardings=(P(None, 'x')), out_shardings=P('x'))
x = np.asarray(np.random.randint(0, 20, (32, 16)), dtype=np.float32)
with self.assertRaisesRegex(Exception, 'Mismatch in result shapes.'):
jit_f(jax.device_put(x, P(None, 'x'))).block_until_ready()
def test_custom_partitioner_jit_annotated_function(self):
"""Test correct lowering of function with a @jax.jit annotated callee.
Annotating a callee with @jax.jit results in a module with a HLO CallOp.
This test is makes sure that the custom partitioner lowering supports
CallOps.
"""
self.skip_if_custom_partitioning_not_supported()
@custom_partitioning
def f(x):
return x
def partition(mesh, arg_shapes, result_shape):
def lower_fn(x):
@jax.jit
def g(y):
return y
return g(x)
x_shard = arg_shapes[0].sharding
return (
mesh,
lower_fn,
NamedSharding(x_shard.mesh, P('x')),
(NamedSharding(x_shard.mesh, P('x')),),
)
def infer_sharding_from_operands(mesh, arg_shapes, result_shape):
x_shard = arg_shapes[0].sharding
return NamedSharding(x_shard.mesh, P('x'))
f.def_partition(
infer_sharding_from_operands=infer_sharding_from_operands,
partition=partition,
sharding_rule='i -> i',
)
with jax.set_mesh(jtu.create_mesh((4,), ('x',))):
jit_f = jax.jit(f)
x = np.asarray(np.random.randint(0, 20, (32,)), dtype=np.float32)
jit_f = jax.jit(jit_f, in_shardings=(P('x')), out_shardings=P('x'))
self.assertArraysEqual(x, jit_f(jax.device_put(x, P('x'))))
def test_custom_partitioner_with_scan(self):
self.skip_if_custom_partitioning_not_supported()
# This is a reproducer from https://github.com/jax-ml/jax/issues/20864.
@custom_partitioning
def f(x):
return jnp.sum(x)
def partition(mesh, arg_shapes, result_shape):
def lower_fn(xs):
def f(carry, x):
return carry + jax.lax.psum(jnp.sum(x), axis_name='x'), None
carry, _ = jax.lax.scan(f, 0, xs)
return carry
result_shardings = jax.tree.map(lambda x: x.sharding, result_shape)
arg_shardings = jax.tree.map(lambda x: x.sharding, arg_shapes)
return mesh, lower_fn, result_shardings, arg_shardings
f.def_partition(
partition,
infer_sharding_from_operands=lambda mesh, *_: NamedSharding(mesh, P()),
propagate_user_sharding=lambda _, user_shape: user_shape.sharding,
sharding_rule='i j -> ') # Result is a scalar.
with jax.set_mesh(jtu.create_mesh((4,), ('x',))):
jit_f = jax.jit(f, in_shardings=P(None, 'x'))
xs = jax.device_put(jnp.ones([32, 16]), P(None, 'x'))
self.assertEqual(jit_f(xs), xs.sum())
def test_custom_partitioning_no_mesh_context(self):
self.skip_if_custom_partitioning_not_supported()
@custom_partitioning
def f(x):
return x
def partition(mesh, arg_shapes, result_shape):
def lower_fn(x):
@jax.jit
def g(y):
return y
return g(x)
x_shard = arg_shapes[0].sharding
return (
mesh,
lower_fn,
NamedSharding(x_shard.mesh, P('x')),
(NamedSharding(x_shard.mesh, P('x')),),
)
def infer_sharding_from_operands(mesh, arg_shapes, result_shape):
x_shard = arg_shapes[0].sharding
return NamedSharding(x_shard.mesh, P('x'))
f.def_partition(
infer_sharding_from_operands=infer_sharding_from_operands,
partition=partition,
sharding_rule='i -> i',
)
mesh = jtu.create_mesh((4,), ('x',))
x = np.asarray(np.random.randint(0, 20, (32,)), dtype=np.float32)
s = NamedSharding(mesh, P('x'))
jit_f = jax.jit(f, in_shardings=s, out_shardings=s)
self.assertArraysEqual(x, jit_f(x))
def test_custom_partitioner_pytree_inputs(self):
self.skip_if_custom_partitioning_not_supported()
def partition(mesh, arg_shapes, result_shape):
def lower_fn(xs):
x, y, z = xs
return x + y + z
return (
mesh,
lower_fn,
arg_shapes[0][0].sharding,
jax.tree.map(lambda x: x.sharding, arg_shapes),
)
def infer_sharding_from_operands(mesh, arg_shapes, result_shape):
return arg_shapes[0][0].sharding
def propagate_user_sharding(mesh, user_shape):
return user_shape.sharding
@custom_partitioning
def f(xs):
x, y, z = xs
return x + y + z
f.def_partition(
infer_sharding_from_operands=infer_sharding_from_operands,
partition=partition,
propagate_user_sharding=propagate_user_sharding,
sharding_rule='i j, i j, i j -> i j',
)
def f2(a):
return a + f((a, a, a))
with jax.set_mesh(jtu.create_mesh((4, 2), ('x', 'y'))):
jit_f = jax.jit(f2, in_shardings=(P(None, 'x')), out_shardings=P('x'))
x = np.asarray(np.random.randint(0, 20, (32, 16)), dtype=np.float32)
self.assertArraysEqual(x * 4, jit_f(jax.device_put(x, P(None, 'x'))))
@jtu.skip_on_devices('cpu')
def test_custom_partition_with_sharding_rule_callback(self):
self.skip_if_custom_partitioning_not_supported()
def partition(static_arg0, static_arg1, mesh, arg_shapes, result_shape):
arg_shardings = jax.tree.map(lambda s: s.sharding, arg_shapes)
result_sharding = result_shape.sharding
rank = len(arg_shapes[0].shape)
self.assertEqual(static_arg0, 1)
self.assertEqual(static_arg1, 2)
def lower_fn(x, y):
axis_name = arg_shardings[1].spec[rank-2][0]
i = jax.lax.axis_index(axis_name)
z = jax.lax.psum(jax.lax.dynamic_slice_in_dim(
jax.lax.dynamic_slice_in_dim(x, i * 0, 8, axis=rank-2),
i * 8, 8, axis=rank-1) @ y, (axis_name))
return z
return mesh, lower_fn, (result_sharding), arg_shardings
def produce_sharding_rule(static_arg0, static_arg1, mesh, arg_shapes, result_shape):
self.assertEqual(static_arg0, 1)
self.assertEqual(static_arg1, 2)
rank = len(arg_shapes[0].shape)
leading_axes = ""
for i in range(rank - 2):
leading_axes += f" b{i}"
return f"{leading_axes} i j, {leading_axes} j k -> {leading_axes} i k" , dict(reduction_factors=("j",))
@partial(custom_partitioning, static_argnums=(2,3))
def f(x, y, static_arg0=1, static_arg1=2):
return jnp.matmul(x, y)
f.def_partition(
infer_sharding_from_operands=None,
partition=partition,
sharding_rule=produce_sharding_rule)
mesh = jtu.create_mesh((4, 2), ('x', 'y'))
x = jax.device_put(np.arange(2 * 3 * 32 * 16).reshape(2, 3, 32, 16),
NamedSharding(mesh, P(None, None, 'x')))
y = jax.device_put(np.arange(2 * 3 * 16 * 32).reshape(2, 3, 16, 32),
NamedSharding(mesh, P(None, None,'y')))
result = jax.jit(f)(x, y)
expected_result = f(x, y)
self.assertArraysEqual(result, expected_result)
self.assertEqual(result.sharding, NamedSharding(mesh, P(None, None, 'x')))
def test_custom_partition_shardy_migration(self):
self.skip_if_custom_partitioning_not_supported()
def partition(mesh, arg_shapes, result_shape):
def lower_fn(x):
return x
return (
mesh,
lower_fn,
arg_shapes[0].sharding,
(arg_shapes[0].sharding,),
)
def infer_sharding_from_operands(mesh, arg_shapes, result_shape):
return arg_shapes[0].sharding
def propagate_user_sharding(mesh, user_shape):
return user_shape.sharding
@custom_partitioning
def f(x):
return x
f.def_partition(
infer_sharding_from_operands=infer_sharding_from_operands,
partition=partition,
propagate_user_sharding=propagate_user_sharding,
)
mesh = jtu.create_mesh((4, 2), ('x', 'y'))
x = jax.device_put(np.arange(32 * 16).reshape(32, 16),
NamedSharding(mesh, P(None, 'x')))
with self.assertRaisesRegex(
NotImplementedError, 'provide sharding_rule to migrate to Shardy'):
jax.jit(f)(x)
def test_custom_partitioner_reshape(self):
self.skip_if_custom_partitioning_not_supported()
def partition(mesh, arg_shapes, result_shape):
arg_shardings = jax.tree.map(lambda s: s.sharding, arg_shapes)
result_sharding = result_shape.sharding
def lower_fn(x, y):
return x.reshape((4,)) + y
return mesh, lower_fn, (result_sharding), arg_shardings
@partial(custom_partitioning)
def f(x, y):
x = x.reshape((8,))
return x + y
f.def_partition(
infer_sharding_from_operands=None,
propagate_user_sharding=None,
partition=partition,
sharding_rule='(i k) j, (i k j) -> (i k j)', i=2, k=2, need_replication_factors=('k',))
mesh = jtu.create_mesh((2, 4), ('x', 'y'))
x = jax.device_put(np.arange(8).reshape(4, 2),
NamedSharding(mesh, P('x', None)))
y = jax.device_put(np.arange(8),
NamedSharding(mesh, P('x')))
jitted_result = jax.jit(f)(x, y)
unjitted_result = f(x, y)
self.assertArraysEqual(jitted_result, unjitted_result)
self.assertEqual(jitted_result.sharding, NamedSharding(mesh, P('x')))
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| CustomPartitionerTest |
python | huggingface__transformers | src/transformers/models/edgetam/modeling_edgetam.py | {
"start": 8531,
"end": 11619
} | class ____(nn.Module):
def __init__(self, config: EdgeTamMaskDecoderConfig, skip_first_layer_pe: bool = False):
"""
A transformer block with four layers:
(1) self-attention of sparse inputs (2) cross attention of sparse inputs -> dense inputs (3) mlp block on
sparse inputs (4) cross attention of dense inputs -> sparse inputs
Arguments:
config (`EdgeTamMaskDecoderConfig`):
The configuration file used to instantiate the block
attention_downsample_rate (*optionalk*, int, defaults to 2):
The downsample ratio of the block used to reduce the inner dim of the attention.
skip_first_layer_pe (*optional*, bool, defaults to `False`):
Whether or not to skip the addition of the query_point_embedding on the first layer.
"""
super().__init__()
self.self_attn = EdgeTamAttention(config, downsample_rate=1)
self.layer_norm1 = nn.LayerNorm(config.hidden_size)
self.cross_attn_token_to_image = EdgeTamAttention(config)
self.layer_norm2 = nn.LayerNorm(config.hidden_size)
self.mlp = EdgeTamFeedForward(
config.hidden_size, config.mlp_dim, config.hidden_size, num_layers=config.num_hidden_layers
)
self.layer_norm3 = nn.LayerNorm(config.hidden_size)
self.layer_norm4 = nn.LayerNorm(config.hidden_size)
self.cross_attn_image_to_token = EdgeTamAttention(config)
self.skip_first_layer_pe = skip_first_layer_pe
def forward(
self,
queries: Tensor,
keys: Tensor,
query_point_embedding: Tensor,
key_point_embedding: Tensor,
attention_similarity: Tensor,
**kwargs: Unpack[TransformersKwargs],
):
# Self attention block
if self.skip_first_layer_pe:
queries, _ = self.self_attn(query=queries, key=queries, value=queries)
else:
query = queries + query_point_embedding
attn_out, _ = self.self_attn(query=query, key=query, value=queries)
queries = queries + attn_out
queries = self.layer_norm1(queries)
# Cross attention block, tokens attending to image embedding
query = queries + query_point_embedding
key = keys + key_point_embedding
attn_out, _ = self.cross_attn_token_to_image(
query=query, key=key, value=keys, attention_similarity=attention_similarity
)
queries = queries + attn_out
queries = self.layer_norm2(queries)
# MLP block
mlp_out = self.mlp(queries)
queries = queries + mlp_out
queries = self.layer_norm3(queries)
# Cross attention block, image embedding attending to tokens
query = queries + query_point_embedding
key = keys + key_point_embedding
attn_out, _ = self.cross_attn_image_to_token(query=key, key=query, value=queries)
keys = keys + attn_out
keys = self.layer_norm4(keys)
return queries, keys, attn_out
| EdgeTamTwoWayAttentionBlock |
python | walkccc__LeetCode | solutions/2025. Maximum Number of Ways to Partition an Array/2025.py | {
"start": 0,
"end": 662
} | class ____:
def waysToPartition(self, nums: list[int], k: int) -> int:
n = len(nums)
summ = sum(nums)
prefix = 0
# Count of sum(A[0..k)) - sum(A[k..n)) for k in [0..i)
l = collections.Counter()
# Count of sum(A[0..k)) - sum(A[k..n)) for k in [i..n)
r = collections.Counter()
for pivot in range(1, n):
prefix += nums[pivot - 1]
suffix = summ - prefix
r[prefix - suffix] += 1
ans = r[0]
prefix = 0
for num in nums:
ans = max(ans, l[k - num] + r[num - k])
prefix += num
suffix = summ - prefix
diff = prefix - suffix
r[diff] -= 1
l[diff] += 1
return ans
| Solution |
python | ansible__ansible | test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py | {
"start": 3067,
"end": 4811
} | class ____(KeyValueParser):
"""Composite argument parser for docker key/value pairs."""
def __init__(self, image: str, controller: bool) -> None:
self.controller = controller
self.versions = get_docker_pythons(image, controller, False)
self.allow_default = bool(get_docker_pythons(image, controller, True))
def get_parsers(self, state: ParserState) -> dict[str, Parser]:
"""Return a dictionary of key names and value parsers."""
return dict(
python=PythonParser(versions=self.versions, allow_venv=False, allow_default=self.allow_default),
seccomp=ChoicesParser(SECCOMP_CHOICES),
cgroup=EnumValueChoicesParser(CGroupVersion),
audit=EnumValueChoicesParser(AuditMode),
privileged=BooleanParser(),
memory=IntegerParser(),
)
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
python_parser = PythonParser(versions=[], allow_venv=False, allow_default=self.allow_default)
section_name = 'docker options'
state.sections[f'{"controller" if self.controller else "target"} {section_name} (comma separated):'] = '\n'.join([
f' python={python_parser.document(state)}',
f' seccomp={ChoicesParser(SECCOMP_CHOICES).document(state)}',
f' cgroup={EnumValueChoicesParser(CGroupVersion).document(state)}',
f' audit={EnumValueChoicesParser(AuditMode).document(state)}',
f' privileged={BooleanParser().document(state)}',
f' memory={IntegerParser().document(state)} # bytes',
])
return f'{{{section_name}}}'
| DockerKeyValueParser |
python | readthedocs__readthedocs.org | readthedocs/projects/tests/test_build_tasks.py | {
"start": 106003,
"end": 108529
} | class ____(BuildEnvironmentBase):
def _trigger_sync_repository_task(self):
sync_repository_task.delay(self.version.pk, build_api_key="1234")
@mock.patch("readthedocs.projects.tasks.builds.clean_build")
def test_clean_build_after_sync_repository(self, clean_build):
self._trigger_sync_repository_task()
clean_build.assert_called_once()
@mock.patch("readthedocs.projects.tasks.builds.SyncRepositoryTask.execute")
@mock.patch("readthedocs.projects.tasks.builds.clean_build")
def test_clean_build_after_failure_in_sync_repository(self, clean_build, execute):
execute.side_effect = Exception("Something weird happen")
self._trigger_sync_repository_task()
clean_build.assert_called_once()
@pytest.mark.parametrize(
"verbose_name",
[
"stable",
"latest",
],
)
@mock.patch("readthedocs.projects.tasks.builds.SyncRepositoryTask.on_failure")
def test_check_duplicate_reserved_version_latest(self, on_failure, verbose_name):
# `repository.tags` and `repository.branch` both will return a tag/branch named `latest/stable`
with mock.patch(
"readthedocs.vcs_support.backends.git.Backend.lsremote",
return_value=[
[mock.MagicMock(identifier="branch/a1b2c3", verbose_name=verbose_name)],
[mock.MagicMock(identifier="tag/a1b2c3", verbose_name=verbose_name)],
],
):
self._trigger_sync_repository_task()
on_failure.assert_called_once_with(
# This argument is the exception we are intereste, but I don't know
# how to assert it here. It's checked in the following assert.
mock.ANY,
mock.ANY,
[self.version.pk],
{
"build_api_key": mock.ANY,
},
mock.ANY,
)
exception = on_failure.call_args[0][0]
assert isinstance(exception, RepositoryError) == True
assert exception.message_id == RepositoryError.DUPLICATED_RESERVED_VERSIONS
@mock.patch("readthedocs.builds.tasks.sync_versions_task")
@mock.patch("readthedocs.vcs_support.backends.git.Backend.lsremote")
def test_skip_sync_version_task_if_lsremote_fails(self, lsremote, sync_versions_task):
lsremote.side_effect = RepositoryError(RepositoryError.FAILED_TO_GET_VERSIONS)
self._trigger_sync_repository_task()
sync_versions_task.assert_not_called()
| TestSyncRepositoryTask |
python | python__mypy | mypy/test/meta/test_parse_data.py | {
"start": 416,
"end": 1983
} | class ____(Suite):
def test_parse_invalid_case(self) -> None:
# Act
result = _run_pytest(
"""
[case abc]
s: str
[case foo-XFAIL]
s: str
"""
)
# Assert
assert "Invalid testcase id 'foo-XFAIL'" in result.stdout
def test_parse_invalid_section(self) -> None:
# Act
result = _run_pytest(
"""
[case abc]
s: str
[unknownsection]
abc
"""
)
# Assert
expected_lineno = result.input.splitlines().index("[unknownsection]") + 1
expected = (
f".test:{expected_lineno}: Invalid section header [unknownsection] in case 'abc'"
)
assert expected in result.stdout
def test_bad_ge_version_check(self) -> None:
# Act
actual = _run_pytest(
"""
[case abc]
s: str
[out version>=3.10]
abc
"""
)
# Assert
assert (
"version>=3.10 always true since minimum runtime version is (3, 10)" in actual.stdout
)
def test_bad_eq_version_check(self) -> None:
# Act
actual = _run_pytest(
"""
[case abc]
s: str
[out version==3.7]
abc
"""
)
# Assert
assert (
"version==3.7 always false since minimum runtime version is (3, 10)" in actual.stdout
)
| ParseTestDataSuite |
python | Textualize__textual | src/textual/_animator.py | {
"start": 7272,
"end": 20530
} | class ____:
"""An object to manage updates to a given attribute over a period of time."""
def __init__(self, app: App, frames_per_second: int = 60) -> None:
"""Initialise the animator object.
Args:
app: The application that owns the animator.
frames_per_second: The number of frames/second to run the animation at.
"""
self._animations: dict[AnimationKey, Animation] = {}
"""Dictionary that maps animation keys to the corresponding animation instances."""
self._scheduled: dict[AnimationKey, Timer] = {}
"""Dictionary of scheduled animations, comprising of their keys and the timer objects."""
self.app = app
"""The app that owns the animator object."""
self._timer = Timer(
app,
1 / frames_per_second,
name="Animator",
callback=self,
pause=True,
)
@cached_property
def _idle_event(self) -> asyncio.Event:
"""The timer that runs the animator."""
return asyncio.Event()
@cached_property
def _complete_event(self) -> asyncio.Event:
"""Flag if no animations are currently taking place."""
return asyncio.Event()
async def start(self) -> None:
"""Start the animator task."""
self._idle_event.set()
self._complete_event.set()
self._timer._start()
async def stop(self) -> None:
"""Stop the animator task."""
try:
self._timer.stop()
except asyncio.CancelledError:
pass
finally:
self._idle_event.set()
self._complete_event.set()
def bind(self, obj: object) -> BoundAnimator:
"""Bind the animator to a given object.
Args:
obj: The object to bind to.
Returns:
The bound animator.
"""
return BoundAnimator(self, obj)
def is_being_animated(self, obj: object, attribute: str) -> bool:
"""Does the object/attribute pair have an ongoing or scheduled animation?
Args:
obj: An object to check for.
attribute: The attribute on the object to test for.
Returns:
`True` if that attribute is being animated for that object, `False` if not.
"""
key = (id(obj), attribute)
return key in self._animations or key in self._scheduled
def animate(
self,
obj: object,
attribute: str,
value: Any,
*,
final_value: object = ...,
duration: float | None = None,
speed: float | None = None,
easing: EasingFunction | str = DEFAULT_EASING,
delay: float = 0.0,
on_complete: CallbackType | None = None,
level: AnimationLevel = "full",
) -> None:
"""Animate an attribute to a new value.
Args:
obj: The object containing the attribute.
attribute: The name of the attribute.
value: The destination value of the attribute.
final_value: The final value, or ellipsis if it is the same as ``value``.
duration: The duration of the animation, or ``None`` to use speed.
speed: The speed of the animation.
easing: An easing function.
delay: Number of seconds to delay the start of the animation by.
on_complete: Callback to run after the animation completes.
level: Minimum level required for the animation to take place (inclusive).
"""
self._record_animation(attribute)
animate_callback = partial(
self._animate,
obj,
attribute,
value,
final_value=final_value,
duration=duration,
speed=speed,
easing=easing,
on_complete=on_complete,
level=level,
)
if delay:
self._complete_event.clear()
self._scheduled[(id(obj), attribute)] = self.app.set_timer(
delay, animate_callback
)
else:
animate_callback()
def _record_animation(self, attribute: str) -> None:
"""Called when an attribute is to be animated.
Args:
attribute: Attribute being animated.
"""
def _animate(
self,
obj: object,
attribute: str,
value: Any,
*,
final_value: object = ...,
duration: float | None = None,
speed: float | None = None,
easing: EasingFunction | str = DEFAULT_EASING,
on_complete: CallbackType | None = None,
level: AnimationLevel = "full",
) -> None:
"""Animate an attribute to a new value.
Args:
obj: The object containing the attribute.
attribute: The name of the attribute.
value: The destination value of the attribute.
final_value: The final value, or ellipsis if it is the same as ``value``.
duration: The duration of the animation, or ``None`` to use speed.
speed: The speed of the animation.
easing: An easing function.
on_complete: Callback to run after the animation completes.
level: Minimum level required for the animation to take place (inclusive).
"""
if not hasattr(obj, attribute):
raise AttributeError(
f"Can't animate attribute {attribute!r} on {obj!r}; attribute does not exist"
)
assert (duration is not None and speed is None) or (
duration is None and speed is not None
), "An Animation should have a duration OR a speed"
# If an animation is already scheduled for this attribute, unschedule it.
animation_key = (id(obj), attribute)
try:
del self._scheduled[animation_key]
except KeyError:
pass
if final_value is ...:
final_value = value
start_time = self._get_time()
easing_function = EASING[easing] if isinstance(easing, str) else easing
animation: Animation | None = None
if hasattr(obj, "__textual_animation__"):
animation = getattr(obj, "__textual_animation__")(
attribute,
getattr(obj, attribute),
value,
start_time,
duration=duration,
speed=speed,
easing=easing_function,
on_complete=on_complete,
level=level,
)
if animation is None:
if not isinstance(value, (int, float)) and not isinstance(
value, Animatable
):
raise AnimationError(
f"Don't know how to animate {value!r}; "
"Can only animate <int>, <float>, or objects with a blend method"
)
start_value = getattr(obj, attribute)
if start_value == value:
self._animations.pop(animation_key, None)
return
if duration is not None:
animation_duration = duration
else:
if hasattr(value, "get_distance_to"):
animation_duration = value.get_distance_to(start_value) / (
speed or 50
)
else:
animation_duration = abs(value - start_value) / (speed or 50)
animation = SimpleAnimation(
obj,
attribute=attribute,
start_time=start_time,
duration=animation_duration,
start_value=start_value,
end_value=value,
final_value=final_value,
easing=easing_function,
on_complete=(
partial(self.app.call_later, on_complete)
if on_complete is not None
else None
),
level=level,
)
assert animation is not None, "animation expected to be non-None"
current_animation = self._animations.get(animation_key)
if current_animation is not None and current_animation == animation:
return
self._animations[animation_key] = animation
self._timer.resume()
self._idle_event.clear()
self._complete_event.clear()
async def _stop_scheduled_animation(
self, key: AnimationKey, complete: bool
) -> None:
"""Stop a scheduled animation.
Args:
key: The key for the animation to stop.
complete: Should the animation be moved to its completed state?
"""
# First off, pull the timer out of the schedule and stop it; it
# won't be needed.
try:
schedule = self._scheduled.pop(key)
except KeyError:
return
schedule.stop()
# If we've been asked to complete (there's no point in making the
# animation only to then do nothing with it), and if there was a
# callback (there will be, but this just keeps type checkers happy
# really)...
if complete and schedule._callback is not None:
# ...invoke it to get the animator created and in the running
# animations. Yes, this does mean that a stopped scheduled
# animation will start running early...
await invoke(schedule._callback)
# ...but only so we can call on it to run right to the very end
# right away.
await self._stop_running_animation(key, complete)
async def _stop_running_animation(self, key: AnimationKey, complete: bool) -> None:
"""Stop a running animation.
Args:
key: The key for the animation to stop.
complete: Should the animation be moved to its completed state?
"""
try:
animation = self._animations.pop(key)
except KeyError:
return
await animation.stop(complete)
async def stop_animation(
self, obj: object, attribute: str, complete: bool = True
) -> None:
"""Stop an animation on an attribute.
Args:
obj: The object containing the attribute.
attribute: The name of the attribute.
complete: Should the animation be set to its final value?
Note:
If there is no animation scheduled or running, this is a no-op.
"""
key = (id(obj), attribute)
if key in self._scheduled:
await self._stop_scheduled_animation(key, complete)
elif key in self._animations:
await self._stop_running_animation(key, complete)
def force_stop_animation(self, obj: object, attribute: str) -> None:
"""Force stop an animation on an attribute. This will immediately stop the animation,
without running any associated callbacks, setting the attribute to its final value.
Args:
obj: The object containing the attribute.
attribute: The name of the attribute.
Note:
If there is no animation scheduled or running, this is a no-op.
"""
from textual.css.scalar_animation import ScalarAnimation
animation_key = (id(obj), attribute)
try:
animation = self._animations.pop(animation_key)
except KeyError:
return
if isinstance(animation, SimpleAnimation):
setattr(obj, attribute, animation.end_value)
elif isinstance(animation, ScalarAnimation):
setattr(obj, attribute, animation.final_value)
if animation.on_complete is not None:
animation.on_complete()
def __call__(self) -> None:
if not self._animations:
self._timer.pause()
self._idle_event.set()
if not self._scheduled:
self._complete_event.set()
else:
app_animation_level = self.app.animation_level
animation_time = self._get_time()
animation_keys = list(self._animations.keys())
for animation_key in animation_keys:
animation = self._animations[animation_key]
animation_complete = animation(animation_time, app_animation_level)
if animation_complete:
del self._animations[animation_key]
if animation.on_complete is not None:
animation.on_complete()
def _get_time(self) -> float:
"""Get the current wall clock time, via the internal Timer.
Returns:
The wall clock time.
"""
# N.B. We could remove this method and always call `self._timer.get_time()` internally,
# but it's handy to have in mocking situations.
return _time.get_time()
async def wait_for_idle(self) -> None:
"""Wait for any animations to complete."""
await self._idle_event.wait()
async def wait_until_complete(self) -> None:
"""Wait for any current and scheduled animations to complete."""
await self._complete_event.wait()
| Animator |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 185212,
"end": 187450
} | class ____(Request):
"""
Delete models from task
:param task: ID of the task
:type task: str
:param models: The list of models to delete
:type models: Sequence[dict]
"""
_service = "tasks"
_action = "delete_models"
_version = "2.13"
_schema = {
"definitions": {"model_type_enum": {"enum": ["input", "output"], "type": "string"}},
"properties": {
"models": {
"description": "The list of models to delete",
"items": {
"properties": {
"name": {
"description": "The task model name",
"type": "string",
},
"type": {
"$ref": "#/definitions/model_type_enum",
"description": "The task model type",
},
},
"required": ["name", "type"],
"type": "object",
},
"type": "array",
},
"task": {"description": "ID of the task", "type": "string"},
},
"required": ["task", "models"],
"type": "object",
}
def __init__(self, task: str, models: List[dict], **kwargs: Any) -> None:
super(DeleteModelsRequest, self).__init__(**kwargs)
self.task = task
self.models = models
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("models")
def models(self) -> List[dict]:
return self._property_models
@models.setter
def models(self, value: List[dict]) -> None:
if value is None:
self._property_models = None
return
self.assert_isinstance(value, "models", (list, tuple))
self.assert_isinstance(value, "models", (dict,), is_array=True)
self._property_models = value
| DeleteModelsRequest |
python | apache__airflow | providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/utils/pod_manager.py | {
"start": 11629,
"end": 11808
} | class ____:
"""Return the status of the pod and last log time when exiting from `fetch_container_logs`."""
running: bool
last_log_time: DateTime | None
| PodLoggingStatus |
python | huggingface__transformers | src/transformers/models/diffllama/modeling_diffllama.py | {
"start": 25198,
"end": 26085
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
DiffLlamaRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
DIFFLLAMA_ATTENTION_CLASSES = {
"eager": DiffLlamaAttention,
"flash_attention_2": DiffLlamaFlashAttention2,
"sdpa": DiffLlamaSdpaAttention,
}
| DiffLlamaRMSNorm |
python | spyder-ide__spyder | spyder/plugins/help/widgets.py | {
"start": 2629,
"end": 4928
} | class ____(EditableComboBox):
"""
QComboBox handling object names
"""
# Signals
valid = Signal(bool, bool)
def __init__(self, parent, id_=None):
EditableComboBox.__init__(self, parent)
self.help = parent
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
self.tips = {True: '', False: ''}
if id_ is not None:
self.ID = id_
def is_valid(self, qstr=None):
"""Return True if string is valid"""
if not self.help.source_is_console():
return True
if qstr is None:
qstr = self.currentText()
if not re.search(r'^[a-zA-Z0-9_\.]*$', str(qstr), 0):
return False
objtxt = str(qstr)
shell_is_defined = False
if self.help.get_conf('automatic_import'):
shell = self.help.internal_shell
if shell is not None:
shell_is_defined = shell.is_defined(objtxt, force_import=True)
if not shell_is_defined:
shell = self.help.get_shell()
if shell is not None:
try:
shell_is_defined = shell.is_defined(objtxt)
except socket.error:
shell = self.help.get_shell()
try:
shell_is_defined = shell.is_defined(objtxt)
except socket.error:
# Well... too bad!
pass
return shell_is_defined
def validate_current_text(self):
self.validate(self.currentText())
def validate(self, qstr, editing=True):
"""Reimplemented to avoid formatting actions"""
valid = self.is_valid(qstr)
if self.hasFocus() and valid is not None:
if editing and not valid:
# Combo box text is being modified: invalidate the entry
self.show_tip(self.tips[valid])
self.valid.emit(False, False)
else:
# A new item has just been selected
if valid:
self.selected()
# See spyder-ide/spyder#9542.
self.lineEdit().cursorWordForward(False)
else:
self.valid.emit(False, False)
| ObjectComboBox |
python | huggingface__transformers | src/transformers/utils/auto_docstring.py | {
"start": 20106,
"end": 29657
} | class ____:
last_hidden_state = {
"description": """
Sequence of hidden-states at the output of the last layer of the model.
""",
"shape": "of shape `(batch_size, sequence_length, hidden_size)`",
}
past_key_values = {
"description": """
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
""",
"shape": None,
"additional_info": "returned when `use_cache=True` is passed or when `config.use_cache=True`",
}
hidden_states = {
"description": """
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
""",
"shape": None,
"additional_info": "returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`",
}
attentions = {
"description": """
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
""",
"shape": None,
"additional_info": "returned when `output_attentions=True` is passed or when `config.output_attentions=True`",
}
pooler_output = {
"description": """
Last layer hidden-state after a pooling operation on the spatial dimensions.
""",
"shape": "of shape `(batch_size, hidden_size)`",
}
cross_attentions = {
"description": """
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
""",
"shape": None,
"additional_info": "returned when `output_attentions=True` is passed or when `config.output_attentions=True`",
}
decoder_hidden_states = {
"description": """
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
""",
"shape": None,
"additional_info": "returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`",
}
decoder_attentions = {
"description": """
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
""",
"shape": None,
"additional_info": "returned when `output_attentions=True` is passed or when `config.output_attentions=True`",
}
encoder_last_hidden_state = {
"description": """
Sequence of hidden-states at the output of the last layer of the encoder of the model.
""",
"shape": "of shape `(batch_size, sequence_length, hidden_size)`",
}
encoder_hidden_states = {
"description": """
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
""",
"shape": None,
"additional_info": "returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`",
}
encoder_attentions = {
"description": """
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
""",
"shape": None,
"additional_info": "returned when `output_attentions=True` is passed or when `config.output_attentions=True`",
}
router_logits = {
"description": """
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`.
Router logits of the model, useful to compute the auxiliary loss for Mixture of Experts models.
""",
"shape": None,
"additional_info": "returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`",
}
router_probs = {
"description": """
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`.
Raw router probabilities that are computed by MoE routers, these terms are used to compute the auxiliary
loss and the z_loss for Mixture of Experts models.
""",
"shape": None,
"additional_info": "returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`",
}
z_loss = {
"description": """
z_loss for the sparse modules.
""",
"shape": None,
"additional_info": "returned when `labels` is provided",
}
aux_loss = {
"description": """
aux_loss for the sparse modules.
""",
"shape": None,
"additional_info": "returned when `labels` is provided",
}
start_logits = {
"description": """
Span-start scores (before SoftMax).
""",
"shape": "of shape `(batch_size, sequence_length)`",
}
end_logits = {
"description": """
Span-end scores (before SoftMax).
""",
"shape": "of shape `(batch_size, sequence_length)`",
}
feature_maps = {
"description": """
Feature maps of the stages.
""",
"shape": "of shape `(batch_size, num_channels, height, width)`",
}
reconstruction = {
"description": """
Reconstructed / completed images.
""",
"shape": "of shape `(batch_size, num_channels, height, width)`",
}
spectrogram = {
"description": """
The predicted spectrogram.
""",
"shape": "of shape `(batch_size, sequence_length, num_bins)`",
}
predicted_depth = {
"description": """
Predicted depth for each pixel.
""",
"shape": "of shape `(batch_size, height, width)`",
}
sequences = {
"description": """
Sampled values from the chosen distribution.
""",
"shape": "of shape `(batch_size, num_samples, prediction_length)` or `(batch_size, num_samples, prediction_length, input_size)`",
}
params = {
"description": """
Parameters of the chosen distribution.
""",
"shape": "of shape `(batch_size, num_samples, num_params)`",
}
loc = {
"description": """
Shift values of each time series' context window which is used to give the model inputs of the same
magnitude and then used to shift back to the original magnitude.
""",
"shape": "of shape `(batch_size,)` or `(batch_size, input_size)`",
}
scale = {
"description": """
Scaling values of each time series' context window which is used to give the model inputs of the same
magnitude and then used to rescale back to the original magnitude.
""",
"shape": "of shape `(batch_size,)` or `(batch_size, input_size)`",
}
static_features = {
"description": """
Static features of each time series' in a batch which are copied to the covariates at inference time.
""",
"shape": "of shape `(batch_size, feature size)`",
}
embeddings = {
"description": """
Utterance embeddings used for vector similarity-based retrieval.
""",
"shape": "of shape `(batch_size, config.xvector_output_dim)`",
}
extract_features = {
"description": """
Sequence of extracted feature vectors of the last convolutional layer of the model.
""",
"shape": "of shape `(batch_size, sequence_length, conv_dim[-1])`",
}
projection_state = {
"description": """
Text embeddings before the projection layer, used to mimic the last hidden state of the teacher encoder.
""",
"shape": "of shape `(batch_size,config.project_dim)`",
}
image_hidden_states = {
"description": """
Image hidden states of the model produced by the vision encoder and after projecting the last hidden state.
""",
"shape": "of shape `(batch_size, num_images, sequence_length, hidden_size)`",
}
video_hidden_states = {
"description": """
Video hidden states of the model produced by the vision encoder and after projecting the last hidden state.
""",
"shape": "of shape `(batch_size * num_frames, num_images, sequence_length, hidden_size)`",
}
| ModelOutputArgs |
python | sympy__sympy | sympy/assumptions/predicates/calculus.py | {
"start": 87,
"end": 1058
} | class ____(Predicate):
"""
Finite number predicate.
Explanation
===========
``Q.finite(x)`` is true if ``x`` is a number but neither an infinity
nor a ``NaN``. In other words, ``ask(Q.finite(x))`` is true for all
numerical ``x`` having a bounded absolute value.
Examples
========
>>> from sympy import Q, ask, S, oo, I, zoo
>>> from sympy.abc import x
>>> ask(Q.finite(oo))
False
>>> ask(Q.finite(-oo))
False
>>> ask(Q.finite(zoo))
False
>>> ask(Q.finite(1))
True
>>> ask(Q.finite(2 + 3*I))
True
>>> ask(Q.finite(x), Q.positive(x))
True
>>> print(ask(Q.finite(S.NaN)))
None
References
==========
.. [1] https://en.wikipedia.org/wiki/Finite
"""
name = 'finite'
handler = Dispatcher(
"FiniteHandler",
doc=("Handler for Q.finite. Test that an expression is bounded respect"
" to all its variables.")
)
| FinitePredicate |
python | facebookresearch__faiss | tests/test_index.py | {
"start": 12813,
"end": 13603
} | class ____(unittest.TestCase):
def test_range_search(self):
d = 4
nt = 100
nq = 10
nb = 50
(xt, xb, xq) = get_dataset(d, nb, nt, nq)
index = faiss.IndexFlatL2(d)
index.add(xb)
Dref, Iref = index.search(xq, 5)
thresh = 0.1 # *squared* distance
lims, D, I = index.range_search(xq, thresh)
for i in range(nq):
Iline = I[lims[i]:lims[i + 1]]
Dline = D[lims[i]:lims[i + 1]]
for j, dis in zip(Iref[i], Dref[i]):
if dis < thresh:
li, = np.where(Iline == j)
self.assertTrue(li.size == 1)
idx = li[0]
self.assertGreaterEqual(1e-4, abs(Dline[idx] - dis))
| TestRangeSearch |
python | apache__airflow | dev/breeze/src/airflow_breeze/global_constants.py | {
"start": 26345,
"end": 26771
} | class ____(Enum):
PULL_REQUEST = "pull_request"
PULL_REQUEST_REVIEW = "pull_request_review"
PULL_REQUEST_TARGET = "pull_request_target"
PULL_REQUEST_WORKFLOW = "pull_request_workflow"
PUSH = "push"
SCHEDULE = "schedule"
WORKFLOW_DISPATCH = "workflow_dispatch"
WORKFLOW_RUN = "workflow_run"
@clearable_cache
def github_events() -> list[str]:
return [e.value for e in GithubEvents]
| GithubEvents |
python | sympy__sympy | sympy/functions/special/error_functions.py | {
"start": 55328,
"end": 59306
} | class ____(TrigonometricIntegral):
r"""
Cosine integral.
Explanation
===========
This function is defined for positive $x$ by
.. math:: \operatorname{Ci}(x) = \gamma + \log{x}
+ \int_0^x \frac{\cos{t} - 1}{t} \mathrm{d}t
= -\int_x^\infty \frac{\cos{t}}{t} \mathrm{d}t,
where $\gamma$ is the Euler-Mascheroni constant.
We have
.. math:: \operatorname{Ci}(z) =
-\frac{\operatorname{E}_1\left(e^{i\pi/2} z\right)
+ \operatorname{E}_1\left(e^{-i \pi/2} z\right)}{2}
which holds for all polar $z$ and thus provides an analytic
continuation to the Riemann surface of the logarithm.
The formula also holds as stated
for $z \in \mathbb{C}$ with $\Re(z) > 0$.
By lifting to the principal branch, we obtain an analytic function on the
cut complex plane.
Examples
========
>>> from sympy import Ci
>>> from sympy.abc import z
The cosine integral is a primitive of $\cos(z)/z$:
>>> Ci(z).diff(z)
cos(z)/z
It has a logarithmic branch point at the origin:
>>> from sympy import exp_polar, I, pi
>>> Ci(z*exp_polar(2*I*pi))
Ci(z) + 2*I*pi
The cosine integral behaves somewhat like ordinary $\cos$ under
multiplication by $i$:
>>> from sympy import polar_lift
>>> Ci(polar_lift(I)*z)
Chi(z) + I*pi/2
>>> Ci(polar_lift(-1)*z)
Ci(z) + I*pi
It can also be expressed in terms of exponential integrals:
>>> from sympy import expint
>>> Ci(z).rewrite(expint)
-expint(1, z*exp_polar(-I*pi/2))/2 - expint(1, z*exp_polar(I*pi/2))/2
See Also
========
Si: Sine integral.
Shi: Hyperbolic sine integral.
Chi: Hyperbolic cosine integral.
Ei: Exponential integral.
expint: Generalised exponential integral.
E1: Special case of the generalised exponential integral.
li: Logarithmic integral.
Li: Offset logarithmic integral.
References
==========
.. [1] https://en.wikipedia.org/wiki/Trigonometric_integral
"""
_trigfunc = cos
_atzero = S.ComplexInfinity
@classmethod
def _atinf(cls):
return S.Zero
@classmethod
def _atneginf(cls):
return I*pi
@classmethod
def _minusfactor(cls, z):
return Ci(z) + I*pi
@classmethod
def _Ifactor(cls, z, sign):
return Chi(z) + I*pi/2*sign
def _eval_rewrite_as_expint(self, z, **kwargs):
return -(E1(polar_lift(I)*z) + E1(polar_lift(-I)*z))/2
def _eval_rewrite_as_Integral(self, z, **kwargs):
from sympy.integrals.integrals import Integral
t = Dummy(uniquely_named_symbol('t', [z]).name)
return S.EulerGamma + log(z) - Integral((1-cos(t))/t, (t, 0, z))
def _eval_as_leading_term(self, x, logx, cdir):
arg = self.args[0].as_leading_term(x, logx=logx, cdir=cdir)
arg0 = arg.subs(x, 0)
if arg0 is S.NaN:
arg0 = arg.limit(x, 0, dir='-' if re(cdir).is_negative else '+')
if arg0.is_zero:
c, e = arg.as_coeff_exponent(x)
logx = log(x) if logx is None else logx
return log(c) + e*logx + EulerGamma
elif arg0.is_finite:
return self.func(arg0)
else:
return self
def _eval_aseries(self, n, args0, x, logx):
from sympy.series.order import Order
point = args0[0]
if point in (S.Infinity, S.NegativeInfinity):
z = self.args[0]
p = [S.NegativeOne**k * factorial(2*k) / z**(2*k + 1)
for k in range(n//2 + 1)] + [Order(1/z**n, x)]
q = [S.NegativeOne**k * factorial(2*k + 1) / z**(2*(k + 1))
for k in range(n//2)] + [Order(1/z**n, x)]
result = sin(z)*(Add(*p)) - cos(z)*(Add(*q))
if point is S.NegativeInfinity:
result += I*pi
return result
return super(Ci, self)._eval_aseries(n, args0, x, logx)
| Ci |
python | tensorflow__tensorflow | tensorflow/python/checkpoint/checkpoint_metrics_test.py | {
"start": 1112,
"end": 4369
} | class ____(test.TestCase):
def _get_write_histogram_proto(self, api_label):
proto_bytes = metrics.GetCheckpointWriteDurations(api_label=api_label)
histogram_proto = summary_pb2.HistogramProto()
histogram_proto.ParseFromString(proto_bytes)
return histogram_proto
def _get_read_histogram_proto(self, api_label):
proto_bytes = metrics.GetCheckpointReadDurations(api_label=api_label)
histogram_proto = summary_pb2.HistogramProto()
histogram_proto.ParseFromString(proto_bytes)
return histogram_proto
def _get_time_saved(self, api_label):
return metrics.GetTrainingTimeSaved(api_label=api_label)
def _get_checkpoint_size(self, api_label, filesize):
return metrics.GetCheckpointSize(api_label=api_label, filesize=filesize)
def test_metrics_v2(self):
api_label = util._CHECKPOINT_V2
prefix = os.path.join(self.get_temp_dir(), 'ckpt')
with context.eager_mode():
ckpt = util.Checkpoint(v=variables_lib.Variable(1.))
self.assertEqual(self._get_time_saved(api_label), 0.0)
self.assertEqual(self._get_write_histogram_proto(api_label).num, 0.0)
for i in range(3):
time_saved = self._get_time_saved(api_label)
time.sleep(1)
ckpt_path = ckpt.write(file_prefix=prefix)
filesize = util._get_checkpoint_size(ckpt_path)
self.assertEqual(self._get_checkpoint_size(api_label, filesize), i + 1)
self.assertGreater(self._get_time_saved(api_label), time_saved)
self.assertEqual(self._get_write_histogram_proto(api_label).num, 3.0)
self.assertEqual(self._get_read_histogram_proto(api_label).num, 0.0)
time_saved = self._get_time_saved(api_label)
with context.eager_mode():
ckpt.restore(ckpt_path)
self.assertEqual(self._get_read_histogram_proto(api_label).num, 1.0)
# Restoring a checkpoint in the same "job" does not increase training time
# saved.
self.assertEqual(self._get_time_saved(api_label), time_saved)
def test_metrics_v1(self):
api_label = util._CHECKPOINT_V1
prefix = os.path.join(self.get_temp_dir(), 'ckpt')
with self.cached_session():
ckpt = util.CheckpointV1()
v = variables_lib.Variable(1.)
self.evaluate(v.initializer)
ckpt.v = v
self.assertEqual(self._get_time_saved(api_label), 0.0)
self.assertEqual(self._get_write_histogram_proto(api_label).num, 0.0)
for i in range(3):
time_saved = self._get_time_saved(api_label)
time.sleep(1)
ckpt_path = ckpt.write(file_prefix=prefix)
filesize = util._get_checkpoint_size(ckpt_path)
self.assertEqual(self._get_checkpoint_size(api_label, filesize), i + 1)
self.assertGreater(self._get_time_saved(api_label), time_saved)
self.assertEqual(self._get_write_histogram_proto(api_label).num, 3.0)
self.assertEqual(self._get_read_histogram_proto(api_label).num, 0.0)
time_saved = self._get_time_saved(api_label)
ckpt.restore(ckpt_path)
self.assertEqual(self._get_read_histogram_proto(api_label).num, 1.0)
# Restoring a checkpoint in the same "job" does not increase training time
# saved.
self.assertEqual(self._get_time_saved(api_label), time_saved)
if __name__ == '__main__':
test.main()
| CheckpointMetricTests |
python | pydantic__pydantic | tests/mypy/modules/plugin_success.py | {
"start": 2703,
"end": 2793
} | class ____(BaseModel):
__type_alias_attribute__ = Union[str, bytes]
| TypeAliasAsAttribute |
python | huggingface__transformers | tests/utils/test_deprecation.py | {
"start": 968,
"end": 8820
} | class ____(unittest.TestCase):
def test_rename_kwarg(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
@deprecate_kwarg("deprecated_name", new_name="new_name", version=INFINITE_VERSION)
def dummy_function(new_name=None, other_name=None):
return new_name, other_name
# Test keyword argument is renamed
value, other_value = dummy_function(deprecated_name="old_value")
self.assertEqual(value, "old_value")
self.assertIsNone(other_value)
# Test deprecated keyword argument not passed
value, other_value = dummy_function(new_name="new_value")
self.assertEqual(value, "new_value")
self.assertIsNone(other_value)
# Test other keyword argument
value, other_value = dummy_function(other_name="other_value")
self.assertIsNone(value)
self.assertEqual(other_value, "other_value")
# Test deprecated and new args are passed, the new one should be returned
value, other_value = dummy_function(deprecated_name="old_value", new_name="new_value")
self.assertEqual(value, "new_value")
self.assertIsNone(other_value)
def test_rename_multiple_kwargs(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
@deprecate_kwarg("deprecated_name1", new_name="new_name1", version=INFINITE_VERSION)
@deprecate_kwarg("deprecated_name2", new_name="new_name2", version=INFINITE_VERSION)
def dummy_function(new_name1=None, new_name2=None, other_name=None):
return new_name1, new_name2, other_name
# Test keyword argument is renamed
value1, value2, other_value = dummy_function(deprecated_name1="old_value1", deprecated_name2="old_value2")
self.assertEqual(value1, "old_value1")
self.assertEqual(value2, "old_value2")
self.assertIsNone(other_value)
# Test deprecated keyword argument is not passed
value1, value2, other_value = dummy_function(new_name1="new_value1", new_name2="new_value2")
self.assertEqual(value1, "new_value1")
self.assertEqual(value2, "new_value2")
self.assertIsNone(other_value)
# Test other keyword argument is passed and correctly returned
value1, value2, other_value = dummy_function(other_name="other_value")
self.assertIsNone(value1)
self.assertIsNone(value2)
self.assertEqual(other_value, "other_value")
def test_warnings(self):
# Test warning is raised for future version
@deprecate_kwarg("deprecated_name", new_name="new_name", version=INFINITE_VERSION)
def dummy_function(new_name=None, other_name=None):
return new_name, other_name
with self.assertWarns(FutureWarning):
dummy_function(deprecated_name="old_value")
# Test warning is not raised for past version, but arg is still renamed
@deprecate_kwarg("deprecated_name", new_name="new_name", version="0.0.0")
def dummy_function(new_name=None, other_name=None):
return new_name, other_name
with warnings.catch_warnings(record=True) as raised_warnings:
warnings.simplefilter("always")
value, other_value = dummy_function(deprecated_name="old_value")
self.assertEqual(value, "old_value")
self.assertIsNone(other_value)
self.assertEqual(len(raised_warnings), 0, f"Warning raised: {[w.message for w in raised_warnings]}")
# Test warning is raised for future version if warn_if_greater_or_equal_version is set
@deprecate_kwarg("deprecated_name", version="0.0.0", warn_if_greater_or_equal_version=True)
def dummy_function(deprecated_name=None):
return deprecated_name
with self.assertWarns(FutureWarning):
value = dummy_function(deprecated_name="deprecated_value")
self.assertEqual(value, "deprecated_value")
# Test arg is not renamed if new_name is not specified, but warning is raised
@deprecate_kwarg("deprecated_name", version=INFINITE_VERSION)
def dummy_function(deprecated_name=None):
return deprecated_name
with self.assertWarns(FutureWarning):
value = dummy_function(deprecated_name="deprecated_value")
self.assertEqual(value, "deprecated_value")
def test_raises(self):
# Test if deprecated name and new name are both passed and raise_if_both_names is set -> raise error
@deprecate_kwarg("deprecated_name", new_name="new_name", version=INFINITE_VERSION, raise_if_both_names=True)
def dummy_function(new_name=None, other_name=None):
return new_name, other_name
with self.assertRaises(ValueError):
dummy_function(deprecated_name="old_value", new_name="new_value")
# Test for current version == deprecation version
@deprecate_kwarg("deprecated_name", version=__version__, raise_if_greater_or_equal_version=True)
def dummy_function(deprecated_name=None):
return deprecated_name
with self.assertRaises(ValueError):
dummy_function(deprecated_name="old_value")
# Test for current version > deprecation version
@deprecate_kwarg("deprecated_name", version="0.0.0", raise_if_greater_or_equal_version=True)
def dummy_function(deprecated_name=None):
return deprecated_name
with self.assertRaises(ValueError):
dummy_function(deprecated_name="old_value")
def test_additional_message(self):
# Test additional message is added to the warning
@deprecate_kwarg("deprecated_name", version=INFINITE_VERSION, additional_message="Additional message")
def dummy_function(deprecated_name=None):
return deprecated_name
with warnings.catch_warnings(record=True) as raised_warnings:
warnings.simplefilter("always")
dummy_function(deprecated_name="old_value")
self.assertTrue("Additional message" in str(raised_warnings[0].message))
@parameterized.expand(["0.0.0", __version__, INFINITE_VERSION])
def test_warning_for_both_names(self, version):
# We should raise warning if both names are passed for any specified version
@deprecate_kwarg("deprecated_name", new_name="new_name", version=version)
def dummy_function(new_name=None, **kwargs):
return new_name
with self.assertWarns(FutureWarning):
result = dummy_function(deprecated_name="old_value", new_name="new_value")
self.assertEqual(result, "new_value")
@pytest.mark.torch_compile_test
@require_torch_accelerator
def test_compile_safe(self):
@deprecate_kwarg("deprecated_factor", new_name="new_factor", version=INFINITE_VERSION)
def dummy_function(new_factor=None, **kwargs):
return new_factor * torch.ones(1, device=torch_device)
compiled_function = torch.compile(dummy_function, fullgraph=True)
# Check that we can correctly call the compiled function with the old name, without raising errors
out = compiled_function(deprecated_factor=2)
self.assertEqual(out.item(), 2)
# Check that we can correctly call the compiled function with the new name, without raising errors
out = compiled_function(new_factor=2)
self.assertEqual(out.item(), 2)
# Check that we can correctly call the compiled function with both names, without raising errors
out = compiled_function(new_factor=2, deprecated_factor=10)
self.assertEqual(out.item(), 2)
| DeprecationDecoratorTester |
python | PyCQA__pylint | pylint/typing.py | {
"start": 1821,
"end": 2576
} | class ____(NamedTuple):
"""Tuple with information about a managed message of the linter."""
name: str | None
msgid: str
symbol: str
line: int | None
is_disabled: bool
MessageTypesFullName = Literal[
"convention", "error", "fatal", "info", "refactor", "statement", "warning"
]
"""All possible message categories."""
OptionDict = dict[
str,
None
| str
| bool
| int
| Pattern[str]
| Iterable[str | int | Pattern[str]]
| type["_CallbackAction"]
| Callable[[Any], Any]
| Callable[[Any, Any, Any, Any], Any],
]
Options = tuple[tuple[str, OptionDict], ...]
ReportsCallable = Callable[["Section", "LinterStats", "LinterStats | None"], None]
"""Callable to create a report."""
| ManagedMessage |
python | mitmproxy__pdoc | test/testdata/enums.py | {
"start": 339,
"end": 414
} | class ____(enum.IntEnum):
FOO = enum.auto()
BAR = enum.auto()
| IntEnum |
python | joke2k__faker | faker/providers/person/gu_IN/__init__.py | {
"start": 44,
"end": 2991
} | class ____(PersonProvider):
formats_male = (
"{{first_name_male}} {{last_name}}",
"{{prefix_male}} {{first_name_male}} {{last_name}}",
)
formats_female = (
"{{first_name_female}} {{last_name}}",
"{{prefix_female}} {{first_name_female}} {{last_name}}",
)
formats = (
"{{first_name}} {{last_name}}",
"{{prefix}} {{first_name}} {{last_name}}",
)
# names taken from https://www.behindthename.com/names/gender/feminine/usage/gujarati
first_names_female = (
"અંકિતા",
"અવની",
"હીરા",
"કાજલ",
"કિરણ",
"નેહા",
"નિશા",
"પૂજા",
"પ્રાચી",
"પ્રીતિ",
"પ્રીતિ",
"પૂજા",
"રચના",
"રાધીકા",
"શ્રેયા",
"શ્વેતા",
"સોનલ",
"તન્વી",
"તેજલ",
"ઉર્વી",
"વર્ષા",
)
# names taken from https://www.behindthename.com/names/gender/masculine/usage/gujarati
first_names_male = (
"અભિષેક",
"અજય",
"અક્ષય",
"આનંદ",
"અનિલ",
"અંકિત",
"અર્જુન",
"અરુણ",
"આશિષ",
"અશોક",
"ભારત",
"બ્રિજેશ",
"ચેતન",
"ચિરાગ",
"દર્શન",
"દીપા",
"દીપક",
"ધવલ",
"દિલીપ",
"દિનેશ",
"દીપા",
"દીપક",
"હરીશ",
"હર્ષ",
"હર્ષલ",
"હીરા",
"જગદીશ",
"જય",
"જયેશ",
"જિતેન્દ્ર",
"કાજલ",
"કમલ",
"કરણ",
"કિરણ",
"કિશન",
"કૃષ્ણ",
"કુમાર",
"કુનાલ",
"મહેન્દ્ર",
"મહેશ",
"મનોજ",
"મયૂર",
"મિતુલ",
"મુકેશ",
"નરેન્દ્ર",
"નીરજ",
"નિખિલ",
"નીરજ",
"નીરવ",
"નિશાંત",
"નિતિન",
"પંકજ",
"પાર્થ",
"પ્રકાશ",
"પ્રણવ",
"પ્રતિક",
"પ્રતિક",
"પ્રવીણ",
"પ્રવીણ",
"રાહુલ",
"રાજ",
"રાજેન્દ્ર",
"રાજેશ",
"રાકેશ",
"રમેશ",
"રવિ",
"રોહિત",
"સચિન",
"સમીર",
"સમીર",
"સંદિપ",
"સંદિપ",
"સંજય",
"સંજીવ",
"સંજીવ",
"શેખર",
"સિદ્ધાર્થ",
"સુભાષ",
"સુનીલ",
"સૂરજ",
"તુષાર",
"વસંત",
"વિક્રમ",
"વિપુલ",
"વિરાજ",
"વિશાલ",
"વિવેક",
"યશ",
)
first_names = first_names_female + first_names_male
# last names taken from https://surnames.behindthename.com/names/usage/gujarati
last_names = (
"ચૌધરી",
"ચૌધરી",
"ગઢવી",
"ગુપ્તા",
"જૈન",
"જોષી",
"કુમાર",
"પટેલ",
"શર્મા",
)
prefixes_female = ("શ્રીમતી", "કુમારી")
prefixes_male = ("શ્રી", "શ્રી માન")
prefixes = prefixes_female + prefixes_male
| Provider |
python | lxml__lxml | src/lxml/html/__init__.py | {
"start": 7686,
"end": 23266
} | class ____:
def set(self, key, value=None):
"""set(self, key, value=None)
Sets an element attribute. If no value is provided, or if the value is None,
creates a 'boolean' attribute without value, e.g. "<form novalidate></form>"
for ``form.set('novalidate')``.
"""
super().set(key, value)
@property
def classes(self):
"""
A set-like wrapper around the 'class' attribute.
"""
return Classes(self.attrib)
@classes.setter
def classes(self, classes):
assert isinstance(classes, Classes) # only allow "el.classes |= ..." etc.
value = classes._get_class_value()
if value:
self.set('class', value)
elif self.get('class') is not None:
del self.attrib['class']
@property
def base_url(self):
"""
Returns the base URL, given when the page was parsed.
Use with ``urlparse.urljoin(el.base_url, href)`` to get
absolute URLs.
"""
return self.getroottree().docinfo.URL
@property
def forms(self):
"""
Return a list of all the forms
"""
return _forms_xpath(self)
@property
def body(self):
"""
Return the <body> element. Can be called from a child element
to get the document's head.
"""
for element in self.getroottree().iter("body", f"{{{XHTML_NAMESPACE}}}body"):
return element
return None
@property
def head(self):
"""
Returns the <head> element. Can be called from a child
element to get the document's head.
"""
for element in self.getroottree().iter("head", f"{{{XHTML_NAMESPACE}}}head"):
return element
return None
@property
def label(self):
"""
Get or set any <label> element associated with this element.
"""
id = self.get('id')
if not id:
return None
result = _label_xpath(self, id=id)
if not result:
return None
else:
return result[0]
@label.setter
def label(self, label):
id = self.get('id')
if not id:
raise TypeError(
"You cannot set a label for an element (%r) that has no id"
% self)
if _nons(label.tag) != 'label':
raise TypeError(
"You can only assign label to a label element (not %r)"
% label)
label.set('for', id)
@label.deleter
def label(self):
label = self.label
if label is not None:
del label.attrib['for']
def drop_tree(self):
"""
Removes this element from the tree, including its children and
text. The tail text is joined to the previous element or
parent.
"""
parent = self.getparent()
assert parent is not None
if self.tail:
previous = self.getprevious()
if previous is None:
parent.text = (parent.text or '') + self.tail
else:
previous.tail = (previous.tail or '') + self.tail
parent.remove(self)
def drop_tag(self):
"""
Remove the tag, but not its children or text. The children and text
are merged into the parent.
Example::
>>> h = fragment_fromstring('<div>Hello <b>World!</b></div>')
>>> h.find('.//b').drop_tag()
>>> print(tostring(h, encoding='unicode'))
<div>Hello World!</div>
"""
parent = self.getparent()
assert parent is not None
previous = self.getprevious()
if self.text and isinstance(self.tag, str):
# not a Comment, etc.
if previous is None:
parent.text = (parent.text or '') + self.text
else:
previous.tail = (previous.tail or '') + self.text
if self.tail:
if len(self):
last = self[-1]
last.tail = (last.tail or '') + self.tail
elif previous is None:
parent.text = (parent.text or '') + self.tail
else:
previous.tail = (previous.tail or '') + self.tail
index = parent.index(self)
parent[index:index+1] = self[:]
def find_rel_links(self, rel):
"""
Find any links like ``<a rel="{rel}">...</a>``; returns a list of elements.
"""
rel = rel.lower()
return [el for el in _rel_links_xpath(self)
if el.get('rel').lower() == rel]
def find_class(self, class_name):
"""
Find any elements with the given class name.
"""
return _class_xpath(self, class_name=class_name)
def get_element_by_id(self, id, *default):
"""
Get the first element in a document with the given id. If none is
found, return the default argument if provided or raise KeyError
otherwise.
Note that there can be more than one element with the same id,
and this isn't uncommon in HTML documents found in the wild.
Browsers return only the first match, and this function does
the same.
"""
try:
# FIXME: should this check for multiple matches?
# browsers just return the first one
return _id_xpath(self, id=id)[0]
except IndexError:
if default:
return default[0]
else:
raise KeyError(id)
def text_content(self):
"""
Return the text content of the tag (and the text in any children).
"""
return _collect_string_content(self)
def cssselect(self, expr, translator='html'):
"""
Run the CSS expression on this element and its children,
returning a list of the results.
Equivalent to lxml.cssselect.CSSSelect(expr, translator='html')(self)
-- note that pre-compiling the expression can provide a substantial
speedup.
"""
# Do the import here to make the dependency optional.
from lxml.cssselect import CSSSelector
return CSSSelector(expr, translator=translator)(self)
########################################
## Link functions
########################################
def make_links_absolute(self, base_url=None, resolve_base_href=True,
handle_failures=None):
"""
Make all links in the document absolute, given the
``base_url`` for the document (the full URL where the document
came from), or if no ``base_url`` is given, then the ``.base_url``
of the document.
If ``resolve_base_href`` is true, then any ``<base href>``
tags in the document are used *and* removed from the document.
If it is false then any such tag is ignored.
If ``handle_failures`` is None (default), a failure to process
a URL will abort the processing. If set to 'ignore', errors
are ignored. If set to 'discard', failing URLs will be removed.
"""
if base_url is None:
base_url = self.base_url
if base_url is None:
raise TypeError(
"No base_url given, and the document has no base_url")
if resolve_base_href:
self.resolve_base_href()
if handle_failures == 'ignore':
def link_repl(href):
try:
return urljoin(base_url, href)
except ValueError:
return href
elif handle_failures == 'discard':
def link_repl(href):
try:
return urljoin(base_url, href)
except ValueError:
return None
elif handle_failures is None:
def link_repl(href):
return urljoin(base_url, href)
else:
raise ValueError(
"unexpected value for handle_failures: %r" % handle_failures)
self.rewrite_links(link_repl)
def resolve_base_href(self, handle_failures=None):
"""
Find any ``<base href>`` tag in the document, and apply its
values to all links found in the document. Also remove the
tag once it has been applied.
If ``handle_failures`` is None (default), a failure to process
a URL will abort the processing. If set to 'ignore', errors
are ignored. If set to 'discard', failing URLs will be removed.
"""
base_href = None
basetags = self.xpath('//base[@href]|//x:base[@href]',
namespaces={'x': XHTML_NAMESPACE})
for b in basetags:
base_href = b.get('href')
b.drop_tree()
if not base_href:
return
self.make_links_absolute(base_href, resolve_base_href=False,
handle_failures=handle_failures)
def iterlinks(self):
"""
Yield (element, attribute, link, pos), where attribute may be None
(indicating the link is in the text). ``pos`` is the position
where the link occurs; often 0, but sometimes something else in
the case of links in stylesheets or style tags.
Note: <base href> is *not* taken into account in any way. The
link you get is exactly the link in the document.
Note: multiple links inside of a single text string or
attribute value are returned in reversed order. This makes it
possible to replace or delete them from the text string value
based on their reported text positions. Otherwise, a
modification at one text position can change the positions of
links reported later on.
"""
link_attrs = defs.link_attrs
for el in self.iter(etree.Element):
attribs = el.attrib
tag = _nons(el.tag)
if tag == 'object':
codebase = None
## <object> tags have attributes that are relative to
## codebase
if 'codebase' in attribs:
codebase = el.get('codebase')
yield (el, 'codebase', codebase, 0)
for attrib in ('classid', 'data'):
if attrib in attribs:
value = el.get(attrib)
if codebase is not None:
value = urljoin(codebase, value)
yield (el, attrib, value, 0)
if 'archive' in attribs:
for match in _archive_re.finditer(el.get('archive')):
value = match.group(0)
if codebase is not None:
value = urljoin(codebase, value)
yield (el, 'archive', value, match.start())
else:
for attrib in link_attrs:
if attrib in attribs:
yield (el, attrib, attribs[attrib], 0)
if tag == 'meta':
http_equiv = attribs.get('http-equiv', '').lower()
if http_equiv == 'refresh':
content = attribs.get('content', '')
match = _parse_meta_refresh_url(content)
url = (match.group('url') if match else content).strip()
# unexpected content means the redirect won't work, but we might
# as well be permissive and return the entire string.
if url:
url, pos = _unquote_match(
url, match.start('url') if match else content.find(url))
yield (el, 'content', url, pos)
elif tag == 'param':
valuetype = el.get('valuetype') or ''
if valuetype.lower() == 'ref':
## FIXME: while it's fine we *find* this link,
## according to the spec we aren't supposed to
## actually change the value, including resolving
## it. It can also still be a link, even if it
## doesn't have a valuetype="ref" (which seems to be the norm)
## http://www.w3.org/TR/html401/struct/objects.html#adef-valuetype
yield (el, 'value', el.get('value'), 0)
elif tag == 'style' and el.text:
urls = [
# (start_pos, url)
_unquote_match(match.group(1), match.start(1))[::-1]
for match in _iter_css_urls(el.text)
] + [
(match.start(1), match.group(1))
for match in _iter_css_imports(el.text)
]
if urls:
# sort by start pos to bring both match sets back into order
# and reverse the list to report correct positions despite
# modifications
urls.sort(reverse=True)
for start, url in urls:
yield (el, None, url, start)
if 'style' in attribs:
urls = list(_iter_css_urls(attribs['style']))
if urls:
# return in reversed order to simplify in-place modifications
for match in urls[::-1]:
url, start = _unquote_match(match.group(1), match.start(1))
yield (el, 'style', url, start)
def rewrite_links(self, link_repl_func, resolve_base_href=True,
base_href=None):
"""
Rewrite all the links in the document. For each link
``link_repl_func(link)`` will be called, and the return value
will replace the old link.
Note that links may not be absolute (unless you first called
``make_links_absolute()``), and may be internal (e.g.,
``'#anchor'``). They can also be values like
``'mailto:email'`` or ``'javascript:expr'``.
If you give ``base_href`` then all links passed to
``link_repl_func()`` will take that into account.
If the ``link_repl_func`` returns None, the attribute or
tag text will be removed completely.
"""
if base_href is not None:
# FIXME: this can be done in one pass with a wrapper
# around link_repl_func
self.make_links_absolute(
base_href, resolve_base_href=resolve_base_href)
elif resolve_base_href:
self.resolve_base_href()
for el, attrib, link, pos in self.iterlinks():
new_link = link_repl_func(link.strip())
if new_link == link:
continue
if new_link is None:
# Remove the attribute or element content
if attrib is None:
el.text = ''
else:
del el.attrib[attrib]
continue
if attrib is None:
new = el.text[:pos] + new_link + el.text[pos+len(link):]
el.text = new
else:
cur = el.get(attrib)
if not pos and len(cur) == len(link):
new = new_link # most common case
else:
new = cur[:pos] + new_link + cur[pos+len(link):]
el.set(attrib, new)
| HtmlMixin |
python | bokeh__bokeh | tests/unit/bokeh/document/_util_document.py | {
"start": 1467,
"end": 2043
} | class ____(Model):
foo = DistanceSpec(2)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| ModelWithSpecInTestDocument |
python | Textualize__textual | src/textual/widgets/_markdown.py | {
"start": 5692,
"end": 11245
} | class ____(Static):
"""The base class for a Markdown Element."""
COMPONENT_CLASSES = {"em", "strong", "s", "code_inline"}
"""
These component classes target standard inline markdown styles.
Changing these will potentially break the standard markdown formatting.
| Class | Description |
| :- | :- |
| `code_inline` | Target text that is styled as inline code. |
| `em` | Target text that is emphasized inline. |
| `s` | Target text that is styled inline with strikethrough. |
| `strong` | Target text that is styled inline with strong. |
"""
DEFAULT_CSS = """
MarkdownBlock {
width: 1fr;
height: auto;
}
"""
def __init__(
self,
markdown: Markdown,
token: Token,
source_range: tuple[int, int] | None = None,
*args,
**kwargs,
) -> None:
self._markdown: Markdown = markdown
"""A reference to the Markdown document that contains this block."""
self._content: Content = Content()
self._token: Token = token
self._blocks: list[MarkdownBlock] = []
self._inline_token: Token | None = None
self.source_range: tuple[int, int] = source_range or (
(token.map[0], token.map[1]) if token.map is not None else (0, 0)
)
super().__init__(
*args, name=token.type, classes=f"level-{token.level}", **kwargs
)
@property
def select_container(self) -> Widget:
return self.query_ancestor(Markdown)
@property
def source(self) -> str | None:
"""The source of this block if known, otherwise `None`."""
if self.source_range is None:
return None
start, end = self.source_range
return "".join(self._markdown.source.splitlines(keepends=True)[start:end])
def _copy_context(self, block: MarkdownBlock) -> None:
"""Copy the context from another block."""
self._token = block._token
def compose(self) -> ComposeResult:
yield from self._blocks
self._blocks.clear()
def set_content(self, content: Content) -> None:
self._content = content
self.update(content)
async def _update_from_block(self, block: MarkdownBlock) -> None:
await self.remove()
await self._markdown.mount(block)
async def action_link(self, href: str) -> None:
"""Called on link click."""
self.post_message(Markdown.LinkClicked(self._markdown, href))
def build_from_token(self, token: Token) -> None:
"""Build inline block content from its source token.
Args:
token: The token from which this block is built.
"""
self._inline_token = token
content = self._token_to_content(token)
self.set_content(content)
def _token_to_content(self, token: Token) -> Content:
"""Convert an inline token to Textual Content.
Args:
token: A markdown token.
Returns:
Content instance.
"""
if token.children is None:
return Content("")
tokens: list[str] = []
spans: list[Span] = []
style_stack: list[tuple[Style | str, int]] = []
position: int = 0
def add_content(text: str) -> None:
"""Add text to the tokens list, and advance the position.
Args:
text: Text to add.
"""
nonlocal position
tokens.append(text)
position += len(text)
def add_style(style: Style | str) -> None:
"""Add a style to the stack.
Args:
style: A style as Style instance or string.
"""
style_stack.append((style, position))
position = 0
def close_tag() -> None:
style, start = style_stack.pop()
spans.append(Span(start, position, style))
for child in token.children:
child_type = child.type
if child_type == "text":
add_content(re.sub(r"\s+", " ", child.content))
if child_type == "hardbreak":
add_content("\n")
if child_type == "softbreak":
add_content(" ")
elif child_type == "code_inline":
add_style(".code_inline")
add_content(child.content)
close_tag()
elif child_type == "em_open":
add_style(".em")
elif child_type == "strong_open":
add_style(".strong")
elif child_type == "s_open":
add_style(".s")
elif child_type == "link_open":
href = child.attrs.get("href", "")
action = f"link({href!r})"
add_style(Style.from_meta({"@click": action}))
elif child_type == "image":
href = child.attrs.get("src", "")
alt = child.attrs.get("alt", "")
action = f"link({href!r})"
add_style(Style.from_meta({"@click": action}))
add_content("🖼 ")
if alt:
add_content(f"({alt})")
if child.children is not None:
for grandchild in child.children:
add_content(grandchild.content)
close_tag()
elif child_type.endswith("_close"):
close_tag()
content = Content("".join(tokens), spans=spans)
return content
| MarkdownBlock |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.