|
|
|
|
|
|
|
|
import torch |
|
|
from torch.package import PackageExporter |
|
|
from torch import Tensor |
|
|
from enum import Enum |
|
|
from pathlib import Path |
|
|
from typing import ( |
|
|
Any, BinaryIO, Callable, ContextManager, Dict, Iterable, Iterator, List, |
|
|
NamedTuple, Optional, overload, Sequence, Tuple, TypeVar, Type, Union, |
|
|
Generic, Set, AnyStr) |
|
|
from typing_extensions import Literal |
|
|
from torch._six import inf |
|
|
|
|
|
from torch.types import ( |
|
|
_int, _float, _bool, _dtype, _device, _qscheme, _size, _layout, Device, Number, Storage, SymInt, _dispatchkey |
|
|
) |
|
|
from torch.storage import TypedStorage |
|
|
|
|
|
import builtins |
|
|
|
|
|
|
|
|
|
|
|
from . import _nn as _nn |
|
|
from . import _onnx as _onnx |
|
|
from . import _VariableFunctions as _VariableFunctions |
|
|
from . import _functorch as _functorch |
|
|
from . import _lazy as _lazy |
|
|
from . import _lazy_ts_backend as _lazy_ts_backend |
|
|
|
|
|
T = TypeVar('T') |
|
|
S = TypeVar("S", bound="torch.Tensor") |
|
|
|
|
|
|
|
|
class device: |
|
|
type: str |
|
|
index: _int |
|
|
|
|
|
def __get__(self, instance, owner=None) -> device: ... |
|
|
|
|
|
|
|
|
@overload |
|
|
def __init__(self, device: Union[_device, _int, str]) -> None: ... |
|
|
|
|
|
@overload |
|
|
def __init__(self, type: str, index: _int) -> None: ... |
|
|
|
|
|
def __reduce__(self) -> Tuple[Any, ...]: ... |
|
|
|
|
|
|
|
|
class Stream: |
|
|
_cdata: _int |
|
|
device: device |
|
|
|
|
|
... |
|
|
|
|
|
|
|
|
class Size(Tuple[_int, ...]): |
|
|
|
|
|
|
|
|
@overload |
|
|
def __getitem__(self: Size, key: _int) -> _int: ... |
|
|
|
|
|
@overload |
|
|
def __getitem__(self: Size, key: slice) -> Size: ... |
|
|
|
|
|
def numel(self: Size) -> _int: ... |
|
|
|
|
|
... |
|
|
|
|
|
|
|
|
class dtype: |
|
|
|
|
|
is_floating_point: _bool |
|
|
is_complex: _bool |
|
|
is_signed: _bool |
|
|
... |
|
|
|
|
|
|
|
|
class iinfo: |
|
|
bits: _int |
|
|
min: _int |
|
|
max: _int |
|
|
dtype: str |
|
|
|
|
|
def __init__(self, dtype: _dtype) -> None: ... |
|
|
|
|
|
class finfo: |
|
|
bits: _int |
|
|
min: _float |
|
|
max: _float |
|
|
eps: _float |
|
|
tiny: _float |
|
|
smallest_normal: _float |
|
|
resolution: _float |
|
|
dtype: str |
|
|
|
|
|
@overload |
|
|
def __init__(self, dtype: _dtype) -> None: ... |
|
|
|
|
|
@overload |
|
|
def __init__(self) -> None: ... |
|
|
|
|
|
float32: dtype = ... |
|
|
float: dtype = ... |
|
|
float64: dtype = ... |
|
|
double: dtype = ... |
|
|
float16: dtype = ... |
|
|
bfloat16: dtype = ... |
|
|
half: dtype = ... |
|
|
uint8: dtype = ... |
|
|
int8: dtype = ... |
|
|
int16: dtype = ... |
|
|
short: dtype = ... |
|
|
int32: dtype = ... |
|
|
int: dtype = ... |
|
|
int64: dtype = ... |
|
|
long: dtype = ... |
|
|
complex32: dtype = ... |
|
|
complex64: dtype = ... |
|
|
cfloat: dtype = ... |
|
|
complex128: dtype = ... |
|
|
cdouble: dtype = ... |
|
|
quint8: dtype = ... |
|
|
qint8: dtype = ... |
|
|
qint32: dtype = ... |
|
|
bool: dtype = ... |
|
|
quint4x2: dtype = ... |
|
|
quint2x4: dtype = ... |
|
|
|
|
|
|
|
|
class layout: |
|
|
... |
|
|
|
|
|
|
|
|
def DisableTorchFunction(): ... |
|
|
|
|
|
|
|
|
strided : layout = ... |
|
|
sparse_coo : layout = ... |
|
|
sparse_csr : layout = ... |
|
|
sparse_csc : layout = ... |
|
|
sparse_bsr : layout = ... |
|
|
sparse_bsc : layout = ... |
|
|
_mkldnn : layout = ... |
|
|
|
|
|
|
|
|
class memory_format: ... |
|
|
|
|
|
|
|
|
contiguous_format: memory_format = ... |
|
|
channels_last: memory_format = ... |
|
|
channels_last_3d: memory_format = ... |
|
|
preserve_format: memory_format = ... |
|
|
|
|
|
|
|
|
class qscheme: ... |
|
|
|
|
|
|
|
|
per_tensor_affine: qscheme = ... |
|
|
per_channel_affine: qscheme = ... |
|
|
per_tensor_symmetric: qscheme = ... |
|
|
per_channel_symmetric: qscheme = ... |
|
|
per_channel_affine_float_qparams: qscheme = ... |
|
|
|
|
|
|
|
|
class _FunctionBase(object): |
|
|
... |
|
|
|
|
|
|
|
|
class _LegacyVariableBase(object): |
|
|
def __init__( |
|
|
self, |
|
|
data: Optional[Tensor]=..., |
|
|
requires_grad: Optional[_bool]=..., |
|
|
volatile: Optional[_bool]=..., |
|
|
_grad_fn: Optional[_FunctionBase]=... |
|
|
) -> None: ... |
|
|
|
|
|
|
|
|
class IODescriptor: ... |
|
|
|
|
|
class JITException: ... |
|
|
|
|
|
class Future(object): |
|
|
def __init__(self, devices: List[device]) -> None: ... |
|
|
def done(self) -> _bool: ... |
|
|
def value(self) -> Any: ... |
|
|
def wait(self) -> Any: ... |
|
|
def add_done_callback(self, callback: Callable) -> None: ... |
|
|
def then(self, callback: Callable) -> Future: ... |
|
|
def set_result(self, result: Any) -> None: ... |
|
|
def _set_unwrap_func(self, callback: Callable) -> None: ... |
|
|
|
|
|
def _jit_set_num_profiled_runs(num: _size) -> _size: ... |
|
|
|
|
|
class SymIntNode(object): |
|
|
def get_pyobj(self) -> Any: ... |
|
|
|
|
|
@staticmethod |
|
|
def new_symint(obj) -> SymIntNode: ... |
|
|
|
|
|
class SymFloatNode(object): |
|
|
def get_pyobj(self) -> Any: ... |
|
|
|
|
|
@staticmethod |
|
|
def new_symfloat(obj) -> SymFloatNode: ... |
|
|
|
|
|
|
|
|
class MobileOptimizerType: |
|
|
... |
|
|
|
|
|
CONV_BN_FUSION: MobileOptimizerType |
|
|
INSERT_FOLD_PREPACK_OPS: MobileOptimizerType |
|
|
REMOVE_DROPOUT: MobileOptimizerType |
|
|
FUSE_ADD_RELU: MobileOptimizerType |
|
|
HOIST_CONV_PACKED_PARAMS: MobileOptimizerType |
|
|
|
|
|
def fork(*args: Any, **kwargs: Any) -> Future: ... |
|
|
def wait(fut: Future) -> Any: ... |
|
|
def _collect_all(futures: List[Future]) -> Future: ... |
|
|
def _set_print_stack_traces_on_fatal_signal(print: _bool) -> None: ... |
|
|
|
|
|
def unify_type_list(types: List[JitType]) -> JitType: ... |
|
|
def _freeze_module(module: ScriptModule, |
|
|
preserved_attrs: List[str] = [], |
|
|
freeze_interfaces: _bool = True, |
|
|
preserveParameters: _bool = True) -> ScriptModule: ... |
|
|
def _jit_pass_optimize_frozen_graph(Graph, optimize_numerics: _bool = True) -> None: ... |
|
|
def _jit_pass_optimize_for_inference(module: 'torch.jit.ScriptModule', |
|
|
other_methods: List[str] = []) -> None: ... |
|
|
def _jit_pass_fold_frozen_conv_bn(graph: Graph): ... |
|
|
def _jit_pass_fold_frozen_conv_add_or_sub(graph: Graph): ... |
|
|
def _jit_pass_fold_frozen_conv_mul_or_div(graph: Graph): ... |
|
|
def _jit_pass_fuse_frozen_conv_add_relu(graph: Graph): ... |
|
|
def _jit_pass_concat_frozen_linear(graph: Graph): ... |
|
|
def _jit_pass_convert_frozen_ops_to_mkldnn(graph: Graph): ... |
|
|
def _jit_pass_transpose_frozen_linear(graph:Graph): ... |
|
|
def _jit_pass_remove_dropout(module: 'torch.jit.ScriptModule'): ... |
|
|
|
|
|
def _is_tracing() -> _bool: ... |
|
|
def _jit_init() -> _bool: ... |
|
|
def _jit_flatten(arg: Any) -> Tuple[List[Tensor], IODescriptor]: ... |
|
|
def _jit_unflatten(vars: List[Tensor], desc: IODescriptor) -> Any: ... |
|
|
def _jit_get_operation(op_name: str) -> Tuple[Callable, List[str]]: ... |
|
|
def _get_operation_overload(op_name: str, op_overload_name: str) -> Tuple[Callable, Callable, List[Any]]: ... |
|
|
def _get_schema(op_name: str, overload_name: str) -> FunctionSchema: ... |
|
|
def _jit_pass_optimize_for_mobile(module: 'torch.jit.ScriptModule', |
|
|
optimization_blocklist: Set[MobileOptimizerType], |
|
|
preserved_methods: List[AnyStr]) -> 'torch.jit.ScriptModule': ... |
|
|
def _clone_module_with_class(module: 'torch.jit.ScriptModule', |
|
|
ignored_methods: List[AnyStr], |
|
|
ignored_attributes: List[AnyStr]) -> 'torch.jit.ScriptModule': ... |
|
|
def _jit_pass_vulkan_optimize_for_mobile(module: 'torch.jit.ScriptModule', |
|
|
preserved_methods: List[AnyStr]) -> 'torch.jit.ScriptModule': ... |
|
|
def _jit_pass_metal_optimize_for_mobile(module: 'torch.jit.ScriptModule', |
|
|
preserved_methods: List[AnyStr]) -> 'torch.jit.ScriptModule': ... |
|
|
def _jit_pass_inline(Graph) -> None: ... |
|
|
def _jit_pass_constant_propagation(Graph) -> None: ... |
|
|
def _jit_pass_propagate_shapes_on_graph(Graph) -> None: ... |
|
|
def _jit_register_decomposition_for_schema(schema: FunctionSchema, Graph) -> None: ... |
|
|
def _jit_erase_non_input_shape_information(Graph) -> None: ... |
|
|
def _jit_get_schemas_for_operator(name :str) -> List[FunctionSchema]: ... |
|
|
def _jit_get_all_schemas() -> List[FunctionSchema]: ... |
|
|
def _jit_check_alias_annotation(g: Graph, args: Tuple[Any, ...], unqualified_op_name: str): ... |
|
|
def _jit_can_fuse_on_cpu() -> _bool: ... |
|
|
def _jit_can_fuse_on_gpu() -> _bool: ... |
|
|
def _jit_can_fuse_on_cpu_legacy() -> _bool: ... |
|
|
def _debug_get_fusion_group_inlining() -> _bool: ... |
|
|
def _debug_set_fusion_group_inlining(enable: _bool): ... |
|
|
def _jit_texpr_fuser_enabled() -> _bool: ... |
|
|
def _jit_nvfuser_enabled() -> _bool: ... |
|
|
def _jit_llga_enabled() -> _bool: ... |
|
|
def _jit_set_llga_enabled(enable: _bool): ... |
|
|
def _llvm_enabled() -> _bool: ... |
|
|
def _jit_override_can_fuse_on_cpu(override: _bool): ... |
|
|
def _jit_override_can_fuse_on_gpu(override: _bool): ... |
|
|
def _jit_override_can_fuse_on_cpu_legacy(override: _bool): ... |
|
|
def _jit_set_symbolic_shapes_test_mode(override: _bool): ... |
|
|
def _jit_symbolic_shapes_test_mode_enabled() -> _bool: ... |
|
|
def _jit_set_texpr_fuser_enabled(enable: _bool): ... |
|
|
def _jit_set_te_must_use_llvm_cpu(use_llvm: _bool): ... |
|
|
def _jit_set_nvfuser_enabled(enable: _bool) -> _bool: ... |
|
|
def _jit_cat_wo_conditionals(optimize_cat: _bool): ... |
|
|
def _jit_opt_conditionals(opt_conds: _bool): ... |
|
|
def _jit_pass_canonicalize(graph: Graph, keep_unique_names: _bool = True): ... |
|
|
def _jit_pass_erase_shape_information(graph: Graph): ... |
|
|
def _jit_pass_fold_convbn(module: 'torch.jit.ScriptModule'): ... |
|
|
def _jit_pass_insert_observers(module: 'torch.jit.ScriptModule', |
|
|
method_name: str, |
|
|
qconfig_dict: Dict[str, Any], |
|
|
inplace: _bool, |
|
|
quant_type: _int): ... |
|
|
def _jit_pass_insert_quant_dequant(module: 'torch.jit.ScriptModule', |
|
|
method_name: str, |
|
|
inplace: _bool, |
|
|
debug: _bool, |
|
|
quant_type: _int): ... |
|
|
def _jit_pass_insert_quant_dequant_for_ondevice_ptq(module: 'torch.jit.ScriptModule', |
|
|
method_name: str, |
|
|
inplace: _bool, |
|
|
debug: _bool, |
|
|
quant_type: _int): ... |
|
|
def _jit_pass_quant_finalize(module: 'torch.jit.ScriptModule', |
|
|
quant_type: _int, |
|
|
preserved_attrs: Sequence[str]): ... |
|
|
def _jit_pass_quant_finalize_for_ondevice_ptq(module: 'torch.jit.ScriptModule', |
|
|
quant_type: _int, |
|
|
method_name: str): ... |
|
|
def _jit_pass_insert_observer_method_for_ondevice_ptq(module: 'torch.jit.ScriptModule', |
|
|
method_name: str, |
|
|
qconfig_dict: Dict[str, Any], |
|
|
inplace: _bool, |
|
|
quant_type: _int): ... |
|
|
def _jit_set_profiling_executor(profiling_flag: _bool) -> _bool: ... |
|
|
def _jit_set_profiling_mode(profiling_flag: _bool) -> _bool: ... |
|
|
def _jit_set_fusion_strategy(strategy: List[Tuple[str, _int]]) -> List[Tuple[str, _int]]: ... |
|
|
def _jit_try_infer_type(obj: Any) -> InferredType: ... |
|
|
def _jit_get_trigger_value(trigger_name: str) -> _int: ... |
|
|
|
|
|
|
|
|
ResolutionCallback = Callable[[str], Callable[..., Any]] |
|
|
|
|
|
|
|
|
|
|
|
def _create_function_from_graph(qualname: str, graph: Graph) -> ScriptFunction: ... |
|
|
def _debug_set_autodiff_subgraph_inlining(disabled: _bool) -> None: ... |
|
|
def _ivalue_tags_match(lhs: ScriptModule, rhs: ScriptModule) -> _bool: ... |
|
|
def _jit_assert_is_instance(obj: Any, type: JitType): ... |
|
|
def _jit_clear_class_registry() -> None: ... |
|
|
def _jit_set_emit_hooks(ModuleHook: Optional[Callable], FunctionHook: Optional[Callable]) -> None: ... |
|
|
def _jit_get_emit_hooks() -> Tuple[Callable, Callable]: ... |
|
|
def _load_for_lite_interpreter(filename: Union[str, Path], map_location: Union[_device, str, None]): ... |
|
|
def _load_for_lite_interpreter_from_buffer(buffer: BinaryIO, map_location: Union[_device, str, None]): ... |
|
|
def _export_operator_list(module: LiteScriptModule): ... |
|
|
def _quantize_ondevice_ptq_dynamic(module: LiteScriptModule, method_name: str): ... |
|
|
def _get_model_bytecode_version(filename: Union[str, Path]) -> _int: ... |
|
|
def _get_model_bytecode_version_from_buffer(buffer: BinaryIO) -> _int: ... |
|
|
def _backport_for_mobile(filename_input: Union[str, Path], filename_output: Union[str, Path], to_version: _int) -> None: ... |
|
|
def _backport_for_mobile_from_buffer(buffer: BinaryIO, filename_output: Union[str, Path], to_version: _int) -> None: ... |
|
|
def _backport_for_mobile_to_buffer(filename_input: Union[str, Path], to_version: _int) -> bytes:... |
|
|
def _backport_for_mobile_from_buffer_to_buffer(buffer: BinaryIO, to_version: _int) -> bytes:... |
|
|
def _get_model_ops_and_info(filename: Union[str, Path]): ... |
|
|
def _get_model_ops_and_info_from_buffer(buffer: BinaryIO): ... |
|
|
def _get_mobile_model_contained_types(filename: Union[str, Path]): ... |
|
|
def _get_mobile_model_contained_types_from_buffer(buffer: BinaryIO): ... |
|
|
def _logging_set_logger(logger: LoggerBase) -> LoggerBase: ... |
|
|
def _get_graph_executor_optimize(optimize: Optional[_bool] = None) -> _bool: ... |
|
|
def _set_graph_executor_optimize(optimize: _bool): ... |
|
|
def _export_opnames(module: ScriptModule) -> List[str]: ... |
|
|
def _create_function_from_trace( |
|
|
qualname: str, |
|
|
func: Callable[..., Any], |
|
|
input_tuple: Tuple[Any, ...], |
|
|
var_lookup_fn: Callable[[Tensor], str], |
|
|
strict: _bool, |
|
|
force_outplace: _bool, |
|
|
argument_names: List[str] |
|
|
) -> Tuple[Graph, Stack]: ... |
|
|
def _jit_is_script_object(obj: Any) -> _bool: ... |
|
|
def _last_executed_optimized_graph() -> Graph: ... |
|
|
def parse_type_comment(comment: str) -> Decl: ... |
|
|
def _get_upgraders_map_size() -> _int: ... |
|
|
def _dump_upgraders_map() -> Dict[str, str]: ... |
|
|
def _test_only_populate_upgraders(content: Dict[str, str]) -> None: ... |
|
|
def _test_only_remove_upgraders(content: Dict[str, str]) -> None: ... |
|
|
def merge_type_from_type_comment(decl: Decl, type_annotation_decl: Decl, is_method: _bool) -> Decl: ... |
|
|
def parse_ir(input: str, parse_tensor_constants: _bool) -> Graph: ... |
|
|
def parse_schema(schema: str) -> FunctionSchema: ... |
|
|
def get_device(input: Tensor) -> _int: ... |
|
|
|
|
|
def _resolve_type_from_object(obj: Any, range: SourceRange, rcb: ResolutionCallback) -> JitType: ... |
|
|
def _create_module_with_type(ty: JitType) -> ScriptModule: ... |
|
|
def _create_object_with_type(ty: ClassType) -> ScriptObject: ... |
|
|
def _run_emit_module_hook(m: ScriptModule): ... |
|
|
def _replace_overloaded_method_decl(overload_decl: Decl, implementation_def: Def, new_name: str) -> Def: ... |
|
|
|
|
|
def _jit_pass_lower_all_tuples(graph: Graph) -> None: ... |
|
|
def _jit_pass_onnx_set_dynamic_input_shape(graph: Graph, dynamic_axes: Dict[str, Dict[_int, str]], input_names: List[str]) -> None: ... |
|
|
def _jit_pass_onnx_graph_shape_type_inference(graph: Graph, paramsDict: Dict[str, IValue], opset_version: _int) -> None: ... |
|
|
def _jit_pass_onnx_assign_output_shape(graph: Graph, tensors: List[Tensor], desc: IODescriptor, onnx_shape_inference: _bool, is_script: _bool) -> None: ... |
|
|
def _jit_pass_onnx_remove_inplace_ops_for_onnx(graph: Graph, module: Optional[ScriptModule] = None) -> None: ... |
|
|
def _jit_pass_remove_inplace_ops(graph: Graph) -> None: ... |
|
|
def _jit_pass_canonicalize_graph_fuser_ops(graph: Graph) -> None: ... |
|
|
def _jit_pass_peephole(graph: Graph, disable_shape_peepholes: _bool = False) -> None: ... |
|
|
def _jit_pass_onnx_autograd_function_process(graph: Graph) -> None: ... |
|
|
def _jit_pass_fuse_addmm(graph: Graph) -> None: ... |
|
|
def _jit_pass_onnx_preprocess(graph: Graph) -> None: ... |
|
|
def _jit_pass_prepare_division_for_onnx(graph: Graph) -> None: ... |
|
|
def _jit_pass_onnx_remove_print(graph: Graph) -> None: ... |
|
|
def _jit_pass_onnx_preprocess_caffe2(graph: Graph) -> None: ... |
|
|
def _jit_pass_onnx_unpack_quantized_weights( |
|
|
graph: Graph, |
|
|
paramsDict: Dict[str, IValue], |
|
|
caffe2: _bool |
|
|
) -> Dict[str, IValue]: ... |
|
|
def _jit_pass_onnx_quantization_insert_permutes( |
|
|
graph: Graph, |
|
|
paramsDict: Dict[str, IValue] |
|
|
) -> Dict[str, IValue]: ... |
|
|
def _jit_pass_custom_pattern_based_rewrite_graph(pattern: str, fused_node_name: str, graph: Graph) -> None: ... |
|
|
def _jit_onnx_list_model_parameters(module: ScriptModule) -> Tuple[ScriptModule, List[IValue]]: ... |
|
|
def _jit_pass_erase_number_types(graph: Graph) -> None: ... |
|
|
def _jit_pass_onnx_lint(graph: Graph) -> None: ... |
|
|
def _jit_pass_onnx(graph: Graph, _jit_pass_onnx: _onnx.OperatorExportTypes) -> Graph: ... |
|
|
def _jit_pass_onnx_scalar_type_analysis(graph: Graph, lowprecision_cast: _bool, opset_version: _int) -> None: ... |
|
|
def _jit_pass_onnx_peephole(graph: Graph, opset_version: _int, fixed_batch_size: _bool) -> None: ... |
|
|
def _jit_pass_dce_allow_deleting_nodes_with_side_effects(graph: Graph) -> None: ... |
|
|
def _jit_pass_onnx_function_substitution(graph: Graph) -> None: ... |
|
|
def _jit_pass_onnx_function_extraction(graph: Graph, module_names : Set[str], param_names : List[str]) -> Dict[Node, Dict[str, str]]: ... |
|
|
def _jit_pass_onnx_clear_scope_records() -> None: ... |
|
|
def _jit_pass_onnx_track_scope_attributes(graph: Graph, onnx_attrs: Dict[str, Any]) -> None: ... |
|
|
def _jit_is_onnx_log_enabled() -> _bool: ... |
|
|
def _jit_set_onnx_log_enabled(enabled: _bool) -> None: ... |
|
|
def _jit_set_onnx_log_output_stream(stream_name: str) -> None: ... |
|
|
def _jit_onnx_log(*args: Any) -> None: ... |
|
|
def _jit_pass_lower_graph(graph: Graph, m: Module) -> Tuple[Graph, List[IValue]]: ... |
|
|
def _jit_pass_inline_fork_wait(graph: Graph) -> None: ... |
|
|
def _jit_pass_onnx_deduplicate_initializers(graph: Graph, params_dict: Dict[str, IValue], is_train: _bool) -> Dict[str, IValue]: ... |
|
|
def _jit_pass_onnx_eval_peephole(graph: Graph, paramsDict: Dict[str, IValue]) -> Dict[str, IValue]: ... |
|
|
def _jit_pass_onnx_constant_fold(graph: Graph, paramsDict: Dict[str, IValue], opset_version: _int) -> Dict[str, IValue]: ... |
|
|
def _jit_pass_onnx_eliminate_unused_items(graph: Graph, paramsDict: Dict[str, IValue]) -> Dict[str, IValue]: ... |
|
|
def _jit_pass_onnx_cast_all_constant_to_floating(graph: Graph) -> None: ... |
|
|
def _jit_pass_filter_non_tensor_arguments(params: Dict[str, IValue]) -> Dict[str, Tensor]: ... |
|
|
def _jit_decay_packed_param_input_types(graph: Graph) -> None: ... |
|
|
def _jit_pass_onnx_node_shape_type_inference(n: Node, paramsDict: Dict[str, IValue], opset_version: _int) -> None: ... |
|
|
def _jit_onnx_convert_pattern_from_subblock(block: Block, n: Node, env: Dict[Value, Value]) -> List[Value]: ... |
|
|
def _jit_pass_onnx_block( |
|
|
old_block: Block, |
|
|
new_block: Block, |
|
|
operator_export_type: _onnx.OperatorExportTypes, |
|
|
env: Dict[Value, Value], |
|
|
is_sub_block: _bool |
|
|
) -> Dict[Value, Value]: ... |
|
|
def _jit_pass_onnx_assign_scoped_names_for_node_and_value(graph: Graph) -> None: ... |
|
|
def _jit_pass_fixup_onnx_controlflow_node(n: Node, opset_version: _int) -> List[Value]: ... |
|
|
def _jit_onnx_create_full_scope_name(class_name: str, variable_name: str) -> str: ... |
|
|
|
|
|
def _compile_graph_to_code_table(name: str, graph: Graph) -> IValue: ... |
|
|
|
|
|
def _generate_upgraders_graph() -> Dict[str, Graph]: ... |
|
|
|
|
|
def _calculate_package_version_based_on_upgraders(val: _bool): ... |
|
|
|
|
|
def _get_version_calculator_flag() -> _bool: ... |
|
|
|
|
|
def _jit_script_interface_compile(name: str, class_def: ClassDef, rcb: ResolutionCallback, is_module: _bool): ... |
|
|
def _jit_script_compile_overload( |
|
|
qualname: str, |
|
|
overload_decl: Decl, |
|
|
implementation_def: Def, |
|
|
rcb: ResolutionCallback, |
|
|
implementation_defaults: Dict[str, Any], |
|
|
signature: Any |
|
|
): ... |
|
|
def _jit_script_compile( |
|
|
qual_name: str, |
|
|
definition: Def, |
|
|
rcb: ResolutionCallback, |
|
|
defaults: Dict[str, Any] |
|
|
): ... |
|
|
def _jit_script_class_compile( |
|
|
qual_name: str, |
|
|
definition: ClassDef, |
|
|
defaults: Dict[str, Dict[str, Any]], |
|
|
rcb: ResolutionCallback |
|
|
): ... |
|
|
def _parse_source_def(src: str) -> Def: ... |
|
|
def import_ir_module( |
|
|
cu: CompilationUnit, |
|
|
filename: Union[str, Path], |
|
|
map_location: Union[_device, str, None], |
|
|
extra_files: Dict[str, Any] |
|
|
) -> ScriptModule: ... |
|
|
def import_ir_module_from_buffer( |
|
|
cu: CompilationUnit, |
|
|
buffer: BinaryIO, |
|
|
map_location: Union[_device, str, None], |
|
|
extra_files: Dict[str, Any] |
|
|
) -> ScriptModule: ... |
|
|
def _import_ir_module_from_package( |
|
|
cu: CompilationUnit, |
|
|
reader: PyTorchFileReader, |
|
|
storage_context: DeserializationStorageContext, |
|
|
map_location: Union[_device, str, None], |
|
|
ts_id: str |
|
|
) -> ScriptModule: ... |
|
|
|
|
|
def _assign_output_shapes(graph: Graph, inputs: List[Tensor]) -> Graph: ... |
|
|
def _check_onnx_proto(proto: str, full_check: _bool = False) -> None: ... |
|
|
def _propagate_and_assign_input_shapes( |
|
|
graph: Graph, |
|
|
inputs: Tuple[Tensor, ...], |
|
|
param_count_list: List[_int], |
|
|
with_grad: _bool, |
|
|
propagate: _bool |
|
|
) -> Graph: ... |
|
|
|
|
|
|
|
|
class GraphExecutorState: |
|
|
... |
|
|
|
|
|
|
|
|
class AliasDb: |
|
|
def __str__(self) -> str: ... |
|
|
... |
|
|
|
|
|
class _InsertPoint: |
|
|
def __enter__(self) -> None: ... |
|
|
def __exit__(self, *args) -> None: ... |
|
|
|
|
|
|
|
|
class Use: |
|
|
@property |
|
|
def user(self) -> Node: ... |
|
|
@property |
|
|
def offset(self) -> _int: ... |
|
|
def isAfter(self, other: Use) -> _bool: ... |
|
|
... |
|
|
|
|
|
|
|
|
class Value: |
|
|
def type(self)-> JitType: ... |
|
|
def setType(self, t: JitType) -> Value: ... |
|
|
def setTypeAs(self, other: Value) -> Value: ... |
|
|
def inferTypeFrom(self, t: Tensor) -> None: ... |
|
|
def debugName(self) -> str: ... |
|
|
def setDebugName(self, name: str) -> None: ... |
|
|
def unique(self) -> _int: ... |
|
|
def offset(self) -> _int: ... |
|
|
def node(self) -> Node: ... |
|
|
def uses(self) -> List[Use]: ... |
|
|
def replaceAllUsesWith(self, val: Value) -> None: ... |
|
|
def replaceAllUsesAfterNodeWith(self, node: Node, val: Value) -> None: ... |
|
|
def requires_grad(self) -> _bool: ... |
|
|
def requiresGrad(self) -> _bool: ... |
|
|
def copyMetadata(self, other: Value) -> Value: ... |
|
|
def isCompleteTensor(self) -> _bool: ... |
|
|
def toIValue(self) -> IValue: ... |
|
|
... |
|
|
|
|
|
|
|
|
class Block: |
|
|
def inputs(self) -> List[Value]: ... |
|
|
def outputs(self) -> List[Value]: ... |
|
|
def nodes(self) -> Iterator[Node]: ... |
|
|
def paramNode(self) -> Node: ... |
|
|
def returnNode(self) -> Node: ... |
|
|
def owningNode(self) -> Node: ... |
|
|
def registerOutput(self, n: Value) -> _int: ... |
|
|
def addNode(self, name: str, inputs: Sequence[Value]) -> Node: ... |
|
|
... |
|
|
|
|
|
|
|
|
class Node: |
|
|
def __getitem__(self, key: str) -> Any: ... |
|
|
def schema(self) -> str: ... |
|
|
def input(self) -> Value: ... |
|
|
def inputs(self) -> List[Value]: ... |
|
|
def inputsAt(self, idx: _int) -> Value: ... |
|
|
def inputsSize(self) -> _int: ... |
|
|
def output(self) -> Value: ... |
|
|
def outputs(self) -> List[Value]: ... |
|
|
def outputsAt(self, idx: _int) -> Value: ... |
|
|
def outputsSize(self) -> _int: ... |
|
|
def hasMultipleOutputs(self) -> _bool: ... |
|
|
def blocks(self) -> List[Block]: ... |
|
|
def addBlock(self) -> Block: ... |
|
|
def mustBeNone(self) -> _bool: ... |
|
|
def matches(self, pattern: str) -> _bool: ... |
|
|
def kind(self) -> str: ... |
|
|
def kindOf(self, name: str) -> str: ... |
|
|
def addInput(self, name: str) -> Value: ... |
|
|
def replaceInput(self, i: _int, newValue: Value) -> Value: ... |
|
|
def replaceInputWith(self, from_: Value, to: Value) -> None: ... |
|
|
def replaceAllUsesWith(self, n: Node) -> None: ... |
|
|
def insertBefore(self, n: Node) -> Node: ... |
|
|
def insertAfter(self, n: Node) -> Node: ... |
|
|
def isBefore(self, n: Node) -> _bool: ... |
|
|
def isAfter(self, n: Node) -> _bool: ... |
|
|
def moveBefore(self, n: Node) -> None: ... |
|
|
def moveAfter(self, n: Node) -> None: ... |
|
|
def removeInput(self, i: _int) -> None: ... |
|
|
def removeAllInputs(self, i: _int) -> None: ... |
|
|
def hasUses(self) -> _bool: ... |
|
|
def eraseOutput(self, i: _int) -> None: ... |
|
|
def addOutput(self) -> Value: ... |
|
|
def scopeName(self) -> str: ... |
|
|
def isNondeterministic(self) -> _bool: ... |
|
|
def copyAttributes(self, rhs: Node) -> Node: ... |
|
|
def copyMetadata(self, rhs: Node) -> Node: ... |
|
|
def hasAttributes(self) -> _bool: ... |
|
|
def hasAttribute(self, name: str) -> _bool: ... |
|
|
def removeAttribute(self, attr: str) -> Node: ... |
|
|
def namedInput(self, name: str) -> Value: ... |
|
|
def sourceRange(self) -> SourceRange: ... |
|
|
def owningBlock(self) -> Block: ... |
|
|
def findNode(self, kind: str, recurse: _bool = True) -> Node: ... |
|
|
def findAllNodes(self, kind: str, recurse: _bool = True) -> List[Node]: ... |
|
|
def getModuleHierarchy(self) -> str: ... |
|
|
def prev(self) -> Node: ... |
|
|
def destroy(self) -> None: ... |
|
|
def attributeNames(self) -> List[str]: ... |
|
|
|
|
|
|
|
|
def f(self, name: str) -> _float: ... |
|
|
def f_(self, name: str, val: _float) -> Node: ... |
|
|
def fs(self, name: str) -> List[_float]: ... |
|
|
def fs_(self, name: str, val: List[_float]) -> Node: ... |
|
|
def c(self, name: str) -> complex: ... |
|
|
def c_(self, name: str, val: complex) -> Node: ... |
|
|
def s(self, name: str) -> str: ... |
|
|
def s_(self, name: str, val: str) -> Node: ... |
|
|
def ss(self, name: str) -> List[str]: ... |
|
|
def ss_(self, name: str, val: List[str]) -> Node: ... |
|
|
def i(self, name: str) -> _int: ... |
|
|
def i_(self, name: str, val: _int) -> Node: ... |
|
|
|
|
|
|
|
|
|
|
|
def g(self, name: str) -> Graph: ... |
|
|
def g_(self, name: str, val: Graph) -> Node: ... |
|
|
def gs(self, name: str) -> List[Graph]: ... |
|
|
def gs_(self, name: str, val: List[Graph]) -> Node: ... |
|
|
def ival(self, name: str) -> IValue: ... |
|
|
def ival_(self, name: str, val: IValue) -> Node: ... |
|
|
def t(self, name: str) -> Tensor: ... |
|
|
def t_(self, name: str, val: Tensor) -> Node: ... |
|
|
def ts(self, name: str) -> List[Tensor]: ... |
|
|
def ts_(self, name: str, val: List[Tensor]) -> Node: ... |
|
|
def ty_(self, name: str, val: JitType) -> Node: ... |
|
|
def tys_(self, name: str, val: List[JitType]) -> Node: ... |
|
|
... |
|
|
|
|
|
|
|
|
class Graph: |
|
|
def inputs(self) -> List[Value]: ... |
|
|
def outputs(self) -> List[Value]: ... |
|
|
def nodes(self) -> Iterator[Node]: ... |
|
|
def param_node(self) -> Node: ... |
|
|
def return_node(self) -> Node: ... |
|
|
def addInput(self, name: str) -> Value: ... |
|
|
def eraseInput(self, i: _int) -> None: ... |
|
|
def registerOutput(self, n: Value) -> _int: ... |
|
|
def eraseOutput(self, i: _int) -> None: ... |
|
|
def create(self, name: str, args, num_outputs: _int) -> Node: ... |
|
|
def appendNode(self, n: Node) -> Node: ... |
|
|
def prependNode(self, n: Node) -> Node: ... |
|
|
def insertNode(self, n: Node) -> Node: ... |
|
|
def block(self) -> Block: ... |
|
|
def lint(self) -> None: ... |
|
|
def alias_db(self) -> AliasDb: ... |
|
|
def setInsertPoint(self, n: Union[Block, Node]) -> None: ... |
|
|
def insert_point_guard(self, n: Union[Block, Node]) -> _InsertPoint: ... |
|
|
def insertPoint(self) -> Node: ... |
|
|
def insertGraph(self, callee: Graph, inputs: List[Value]) -> List[Value]: ... |
|
|
def makeMultiOutputIntoTuple(self) -> None: ... |
|
|
... |
|
|
|
|
|
|
|
|
|
|
|
class AliasInfo: |
|
|
is_write: _bool |
|
|
before_set: Set[str] |
|
|
after_set: Set[str] |
|
|
|
|
|
|
|
|
|
|
|
class Argument: |
|
|
name: str |
|
|
type: JitType |
|
|
default_value: Optional[Any] |
|
|
def has_default_value(self) -> _bool: ... |
|
|
kwarg_only : _bool |
|
|
is_out: _bool |
|
|
alias_info: Optional[AliasInfo] |
|
|
... |
|
|
class FunctionSchema: |
|
|
arguments: List[Argument] |
|
|
returns: List[Argument] |
|
|
name: str |
|
|
overload_name: str |
|
|
... |
|
|
|
|
|
class _UpgraderEntry: |
|
|
bumped_at_version: _int |
|
|
upgrader_name: str |
|
|
old_schema: str |
|
|
def __init__(self, bumped_at_version: _int, upgrader_name: str, old_schema: str) -> None: ... |
|
|
|
|
|
class _UpgraderRange: |
|
|
min_version: _int |
|
|
max_version: _int |
|
|
|
|
|
def _get_max_operator_version() -> _int: ... |
|
|
|
|
|
def _get_operator_version_map() -> Dict[str, List[_UpgraderEntry]]: ... |
|
|
|
|
|
def _get_upgrader_ranges(name: str) -> List[_UpgraderRange]: ... |
|
|
|
|
|
def _test_only_add_entry_to_op_version(op_name: str, entry: _UpgraderEntry) -> None: ... |
|
|
|
|
|
def _test_only_remove_entry_to_op_version(op_name: str) -> None: ... |
|
|
|
|
|
|
|
|
class ScriptModuleSerializer(object): |
|
|
def __init__(self, export_writer: PyTorchFileWriter) -> None: ... |
|
|
def serialize(self, model: ScriptModule, script_module_id: _int) -> None: ... |
|
|
def write_files(self) -> None: ... |
|
|
def storage_context(self) -> SerializationStorageContext: ... |
|
|
... |
|
|
|
|
|
|
|
|
class SerializationStorageContext(object): |
|
|
def __init__(self) -> None: ... |
|
|
def has_storage(self, storage: Storage) -> _bool: ... |
|
|
def get_or_add_storage(self, storage: Storage) -> _int: ... |
|
|
... |
|
|
|
|
|
|
|
|
class DeserializationStorageContext(object): |
|
|
def __init__(self) -> None: ... |
|
|
def get_storage(self, name: str, dtype: _dtype) -> Tensor: ... |
|
|
def has_storage(self, name: str) -> _bool: ... |
|
|
def add_storage(self, name: str, tensor: Tensor) -> _int: ... |
|
|
... |
|
|
|
|
|
|
|
|
class ConcreteModuleTypeBuilder: |
|
|
def __init__(self, obj: Any) -> None: ... |
|
|
def set_module_dict(self): ... |
|
|
def set_module_list(self): ... |
|
|
def set_parameter_list(self): ... |
|
|
def set_parameter_dict(self): ... |
|
|
def add_attribute(self, name: str, ty: JitType, is_param: _bool, is_buffer: _bool): ... |
|
|
def add_module(self, name: str, meta: ConcreteModuleType): ... |
|
|
def add_constant(self, name: str, value: Any): ... |
|
|
def add_overload(self, method_name: str, overloaded_method_names: List[str]): ... |
|
|
def add_builtin_function(self, name: str, symbol_name: str): ... |
|
|
def add_failed_attribute(self, name: str, failure_reason: str): ... |
|
|
def add_function_attribute(self, name: str, ty: JitType, func: Callable[..., Any]): ... |
|
|
def add_ignored_attribute(self, name: str): ... |
|
|
def add_ignored_attributes(self, names: List[str]): ... |
|
|
def add_forward_hook(self, hook: Callable[..., Any]): ... |
|
|
def add_forward_pre_hook(self, pre_hook: Callable[..., Any]): ... |
|
|
|
|
|
class ConcreteModuleType: |
|
|
def get_constants(self) -> Dict[str, Any]: ... |
|
|
def equals(self, other: 'ConcreteModuleType') -> _bool: ... |
|
|
|
|
|
@staticmethod |
|
|
def from_jit_type(ty: JitType) -> ConcreteModuleType: ... |
|
|
|
|
|
class CallStack: |
|
|
def __init__(self, name: str, range: SourceRange): ... |
|
|
|
|
|
class ErrorReport: |
|
|
def __init__(self, range: SourceRange) -> None: ... |
|
|
def what(self) -> str: ... |
|
|
|
|
|
@staticmethod |
|
|
def call_stack() -> str: ... |
|
|
|
|
|
class CompilationUnit: |
|
|
def __init__(self, lang: str=..., _frames_up: _int=...) -> None: ... |
|
|
def find_function(self, name: str) -> ScriptFunction: ... |
|
|
def __getattr__(self, name: str) -> ScriptFunction: ... |
|
|
def define(self, script: str, rcb: ResolutionCallback=..., _frames_up: _int=...): ... |
|
|
def get_interface(self, name: str) -> InterfaceType: ... |
|
|
def get_functions(self) -> List[ScriptFunction]: ... |
|
|
def create_function(self, name: str, graph: Graph, shouldMangle: _bool=...) -> ScriptFunction: ... |
|
|
def get_class(self, name: str) -> ClassType: ... |
|
|
|
|
|
class ScriptObject: |
|
|
def setattr(self, name: str, value: Any): ... |
|
|
|
|
|
class ScriptModule(ScriptObject): |
|
|
def _method_names(self) -> List[str]: ... |
|
|
def _get_method(self, name: str) -> ScriptMethod: ... |
|
|
|
|
|
class LiteScriptModule: |
|
|
def __call__(self, *input): ... |
|
|
def find_method(self, method_name: str): ... |
|
|
def forward(self, *input) -> List[str]: ... |
|
|
def run_method(self, method_name: str, *input): ... |
|
|
|
|
|
class ScriptFunction: |
|
|
def __call__(self, *args, **kwargs) -> Tensor: ... |
|
|
def save(self, filename: str, _extra_files: Dict[str, bytes]) -> None: ... |
|
|
def save_to_buffer(self, _extra_files: Dict[str, bytes]) -> bytes: ... |
|
|
@property |
|
|
def graph(self) -> Graph: ... |
|
|
def inlined_graph(self) -> Graph: ... |
|
|
def schema(self) -> FunctionSchema: ... |
|
|
def code(self) -> str: ... |
|
|
def name(self) -> str: ... |
|
|
@property |
|
|
def qualified_name(self) -> str: ... |
|
|
|
|
|
class ScriptMethod: |
|
|
graph: Graph |
|
|
@property |
|
|
def owner(self) -> ScriptModule: ... |
|
|
@property |
|
|
def name(self) -> str: ... |
|
|
|
|
|
class ModuleDict: |
|
|
def __init__(self, mod: ScriptModule) -> None: ... |
|
|
def items(self) -> List[Tuple[str, Any]]: ... |
|
|
|
|
|
class ParameterDict: |
|
|
def __init__(self, mod: ScriptModule) -> None: ... |
|
|
|
|
|
class BufferDict: |
|
|
def __init__(self, mod: ScriptModule) -> None: ... |
|
|
|
|
|
|
|
|
class Module: |
|
|
... |
|
|
|
|
|
|
|
|
def _initExtension(shm_manager_path: str) -> None: ... |
|
|
def _autograd_init() -> _bool: ... |
|
|
def _add_docstr(obj: T, doc_obj: str) -> T: ... |
|
|
def _init_names(arg: Sequence[Type]) -> None: ... |
|
|
def _has_distributed() -> _bool: ... |
|
|
def _set_default_tensor_type(type) -> None: ... |
|
|
def _set_default_dtype(d: _dtype) -> None: ... |
|
|
def _infer_size(arg1: Size, arg2: Size) -> Size: ... |
|
|
def _crash_if_csrc_asan() -> _int: ... |
|
|
def _crash_if_csrc_ubsan() -> _int: ... |
|
|
def _crash_if_aten_asan() -> _int: ... |
|
|
def _show_config() -> str: ... |
|
|
def _cxx_flags() -> str: ... |
|
|
def _parallel_info() -> str: ... |
|
|
def _set_backcompat_broadcast_warn(arg: _bool) -> None: ... |
|
|
def _get_backcompat_broadcast_warn() -> _bool: ... |
|
|
def _set_backcompat_keepdim_warn(arg: _bool) -> None: ... |
|
|
def _get_backcompat_keepdim_warn() -> _bool: ... |
|
|
def get_num_thread() -> _int: ... |
|
|
def set_num_threads(nthreads: _int) -> None: ... |
|
|
def get_num_interop_threads() -> _int: ... |
|
|
def set_num_interop_threads(nthreads: _int) -> None: ... |
|
|
def _get_cudnn_enabled() -> _bool: ... |
|
|
def _set_cudnn_enabled(arg: _bool) -> None: ... |
|
|
def _get_flash_sdp_enabled() -> _bool: ... |
|
|
def _set_sdp_use_flash(arg: _bool) -> None: ... |
|
|
def _get_math_sdp_enabled() -> _bool: ... |
|
|
def _set_sdp_use_math(arg: _bool) -> None: ... |
|
|
def _get_mkldnn_enabled() -> _bool: ... |
|
|
def _set_mkldnn_enabled(arg: _bool) -> None: ... |
|
|
def _get_cudnn_benchmark() -> _bool: ... |
|
|
def _set_cudnn_benchmark(arg: _bool) -> None: ... |
|
|
def _get_cudnn_deterministic() -> _bool: ... |
|
|
def _set_cudnn_deterministic(arg: _bool) -> None: ... |
|
|
def _get_deterministic_algorithms() -> _bool: ... |
|
|
def _get_deterministic_algorithms_warn_only() -> _bool: ... |
|
|
def _set_deterministic_algorithms(mode: _bool, *, warn_only: _bool=...) -> None: ... |
|
|
def _get_warnAlways() -> _bool: ... |
|
|
def _set_warnAlways(arg: _bool) -> None: ... |
|
|
def _get_cudnn_allow_tf32() -> _bool: ... |
|
|
def _set_cudnn_allow_tf32(arg: _bool) -> None: ... |
|
|
def _get_cublas_allow_tf32() -> _bool: ... |
|
|
def _set_cublas_allow_tf32(arg: _bool) -> None: ... |
|
|
def _get_float32_matmul_precision() -> str: ... |
|
|
def _set_float32_matmul_precision(arg: str) -> None: ... |
|
|
def _get_cublas_allow_fp16_reduced_precision_reduction() -> _bool: ... |
|
|
def _set_cublas_allow_fp16_reduced_precision_reduction(arg: _bool) -> None: ... |
|
|
def _set_conj(x: Tensor, conj: _bool) -> None: ... |
|
|
def _set_neg(x: Tensor, neg: _bool) -> None: ... |
|
|
def _add_meta_to_tls_dispatch_include() -> None: ... |
|
|
def _meta_in_tls_dispatch_include() -> _bool: ... |
|
|
def _remove_meta_from_tls_dispatch_include() -> None: ... |
|
|
def _has_storage(x: Tensor) -> _bool: ... |
|
|
def _should_allow_numbers_as_tensors(func_name: str) -> _bool: ... |
|
|
|
|
|
|
|
|
def _to_dlpack(data: Tensor) -> Any: ... |
|
|
def _from_dlpack(data: Any) -> Tensor: ... |
|
|
def _get_cpp_backtrace(frames_to_skip: _int, maximum_number_of_frames: _int) -> str: ... |
|
|
def set_flush_denormal(arg: _bool) -> _bool: ... |
|
|
def get_default_dtype() -> _dtype: ... |
|
|
def _get_default_device() -> str: ... |
|
|
def _get_qengine() -> _int: ... |
|
|
def _set_qengine(qegine: _int) -> None: ... |
|
|
def _supported_qengines() -> List[_int]: ... |
|
|
def _is_xnnpack_enabled() -> _bool: ... |
|
|
def _set_default_mobile_cpu_allocator() -> None: ... |
|
|
def _unset_default_mobile_cpu_allocator() -> None: ... |
|
|
def _is_torch_function_enabled() -> _bool: ... |
|
|
def _has_torch_function(args: Iterable[Any]) -> _bool: ... |
|
|
def _has_torch_function_unary(Any) -> _bool: ... |
|
|
def _has_torch_function_variadic(*args: Any) -> _bool: ... |
|
|
def _vmapmode_increment_nesting() -> _int: ... |
|
|
def _vmapmode_decrement_nesting() -> _int: ... |
|
|
def _log_api_usage_once(str) -> None: ... |
|
|
def _demangle(str) -> str: ... |
|
|
def _disabled_torch_function_impl(func: Callable, types: Iterable[Type], args: Tuple, kwargs: Dict) -> Any: ... |
|
|
def _disabled_torch_dispatch_impl(func: Callable, types: Iterable[Type], args: Tuple, kwargs: Dict) -> Any: ... |
|
|
def _get_linalg_preferred_backend() -> torch._C._LinalgBackend: ... |
|
|
def _set_linalg_preferred_backend(arg: torch._C._LinalgBackend): ... |
|
|
def _is_mps_available() -> _bool: ... |
|
|
class _LinalgBackend: |
|
|
Default: _LinalgBackend |
|
|
Cusolver: _LinalgBackend |
|
|
Magma: _LinalgBackend |
|
|
|
|
|
|
|
|
def _valgrind_supported_platform() -> _bool: ... |
|
|
def _valgrind_toggle() -> None: ... |
|
|
def _valgrind_toggle_and_dump_stats() -> None: ... |
|
|
|
|
|
has_openmp: _bool |
|
|
has_mkl: _bool |
|
|
has_mps: _bool |
|
|
has_lapack: _bool |
|
|
has_cuda: _bool |
|
|
has_mkldnn: _bool |
|
|
has_cudnn: _bool |
|
|
has_spectral: _bool |
|
|
_GLIBCXX_USE_CXX11_ABI: _bool |
|
|
default_generator: Generator |
|
|
|
|
|
|
|
|
def _set_grad_enabled(enabled: _bool) -> None: ... |
|
|
def is_grad_enabled() -> _bool: ... |
|
|
def is_inference_mode_enabled() -> _bool: ... |
|
|
def set_autocast_enabled(enabled: _bool) -> None: ... |
|
|
def is_autocast_enabled() -> _bool: ... |
|
|
def clear_autocast_cache() -> None: ... |
|
|
def set_autocast_cpu_enabled(enabled: _bool) -> None: ... |
|
|
def is_autocast_cpu_enabled() -> _bool: ... |
|
|
def set_autocast_cpu_dtype(dtype: _dtype) -> None: ... |
|
|
def set_autocast_gpu_dtype(dtype: _dtype) -> None: ... |
|
|
def get_autocast_cpu_dtype() -> _dtype: ... |
|
|
def get_autocast_gpu_dtype() -> _dtype: ... |
|
|
def autocast_increment_nesting() -> _int: ... |
|
|
def autocast_decrement_nesting() -> _int: ... |
|
|
def is_autocast_cache_enabled() -> _bool: ... |
|
|
def set_autocast_cache_enabled(enabled: _bool) -> None: ... |
|
|
def set_anomaly_enabled(enabled: _bool, check_nan: _bool = True) -> None: ... |
|
|
def is_anomaly_enabled() -> _bool: ... |
|
|
def is_anomaly_check_nan_enabled() -> _bool: ... |
|
|
def _enter_dual_level() -> _int: ... |
|
|
def _exit_dual_level(level: _int) -> None: ... |
|
|
def _make_dual(tensor: Tensor, tangent: Tensor, level: _int) -> Tensor: ... |
|
|
def _unpack_dual(tensor: Tensor, level: _int) -> Tensor: ... |
|
|
def __set_forward_AD_enabled(enabled: _bool) -> None: ... |
|
|
def __is_forward_AD_enabled() -> _bool: ... |
|
|
def _register_default_hooks(pack_hook: Callable, unpack_hook: Callable) -> None: ... |
|
|
def _reset_default_hooks() -> None: ... |
|
|
|
|
|
def _is_torch_function_mode_enabled()-> _bool: ... |
|
|
def _set_torch_function_mode(cls: Any) -> None: ... |
|
|
def _push_on_torch_function_stack(cls: Any) -> None: ... |
|
|
def _pop_torch_function_stack() -> Any: ... |
|
|
def _get_function_stack_at(idx: _int) -> Any: ... |
|
|
def _len_torch_function_stack() -> _int: ... |
|
|
|
|
|
def _set_torch_dispatch_mode(cls: Any) -> None: ... |
|
|
def _push_on_torch_dispatch_stack(cls: Any) -> None: ... |
|
|
def _pop_torch_dispatch_stack() -> Any: ... |
|
|
def _get_dispatch_stack_at(idx: _int) -> Any: ... |
|
|
def _len_torch_dispatch_stack() -> _int: ... |
|
|
|
|
|
class _InferenceMode(object): |
|
|
def __init__(self, mode: _bool) -> None: ... |
|
|
|
|
|
class _DisableFuncTorch: |
|
|
def __init__(self) -> None: ... |
|
|
|
|
|
class _EnableTorchFunction: |
|
|
def __init__(self) -> None: ... |
|
|
|
|
|
|
|
|
class LoggerBase(object): |
|
|
... |
|
|
|
|
|
class NoopLogger(LoggerBase): |
|
|
... |
|
|
|
|
|
class LockingLogger(LoggerBase): |
|
|
... |
|
|
|
|
|
class AggregationType(Enum): |
|
|
SUM = 0 |
|
|
AVG = 1 |
|
|
|
|
|
class FileCheck(object): |
|
|
|
|
|
def check_source_highlighted(self, highlight: str) -> 'FileCheck': ... |
|
|
def run(self, test_string: str) -> None: ... |
|
|
def check(self, test_string: str) -> 'FileCheck': ... |
|
|
def check_not(self, test_string: str) -> 'FileCheck': ... |
|
|
... |
|
|
|
|
|
|
|
|
class PyTorchFileReader(object): |
|
|
@overload |
|
|
def __init__(self, name: str) -> None: ... |
|
|
@overload |
|
|
def __init__(self, buffer: BinaryIO) -> None: ... |
|
|
def get_record(self, name: str) -> bytes: ... |
|
|
... |
|
|
|
|
|
class PyTorchFileWriter(object): |
|
|
@overload |
|
|
def __init__(self, name: str) -> None: ... |
|
|
@overload |
|
|
def __init__(self, buffer: BinaryIO) -> None: ... |
|
|
def write_record(self, name: str, data: Union[bytes, _int], size: _int) -> None: ... |
|
|
def write_end_of_file(self) -> None: ... |
|
|
def set_min_version(self, version: _int) -> None: ... |
|
|
def get_all_written_records(self) -> List[str]: ... |
|
|
def archive_name(self) -> str: ... |
|
|
... |
|
|
|
|
|
def _jit_get_inline_everything_mode() -> _bool: ... |
|
|
def _jit_set_inline_everything_mode(enabled: _bool) -> None: ... |
|
|
def _jit_get_logging_option() -> str: ... |
|
|
def _jit_set_logging_option(option: str) -> None: ... |
|
|
def _jit_set_logging_stream(stream_name: str) -> None: ... |
|
|
def _jit_pass_cse(Graph) -> _bool: ... |
|
|
def _jit_pass_dce(Graph) -> None: ... |
|
|
def _jit_pass_lint(Graph) -> None: ... |
|
|
|
|
|
|
|
|
def _get_custom_class_python_wrapper(name: str, attr: str) -> Any: ... |
|
|
|
|
|
|
|
|
class Generator(object): |
|
|
device: _device |
|
|
def __init__(self, device: Union[_device, str, None] = None) -> None: ... |
|
|
def get_state(self) -> Tensor: ... |
|
|
def set_state(self, _new_state: Tensor) -> Generator: ... |
|
|
def manual_seed(self, seed: _int) -> Generator: ... |
|
|
def seed(self) -> _int: ... |
|
|
def initial_seed(self) -> _int: ... |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class _DispatchOperatorHandle: |
|
|
def schema(self) -> FunctionSchema: ... |
|
|
|
|
|
class _DispatchModule: |
|
|
def def_(self, schema: str, alias: str = "") -> _DispatchModule: ... |
|
|
def def_legacy(self, schema: str) -> _DispatchModule: ... |
|
|
def def_name_t_t(self, name: str, dispatch: str, debug: str = "default_def_name_t_t") -> _DispatchModule: ... |
|
|
def def_schema_t_t(self, schema: str, dispatch: str, alias: str, debug: str = "default_def_schema_t_t") -> _DispatchModule: ... |
|
|
def impl_t_t(self, name: str, dispatch: str, debug: str = "impl_t_t") -> _DispatchModule: ... |
|
|
def impl_tt_t(self, name: str, dispatch: str, debug: str = "impl_tt_t") -> _DispatchModule: ... |
|
|
def impl(self, name: str, dispatch: str, func: Callable) -> _DispatchModule: ... |
|
|
def define(self, schema: str, alias: str = "") -> _DispatchModule: ... |
|
|
def fallback_fallthrough(self, dispatch: str = "") -> _DispatchModule: ... |
|
|
|
|
|
def _dispatch_library(kind: str, name: str, dispatch: str, file: str = "", linenum: Any = 0) -> _DispatchModule: ... |
|
|
def _dispatch_dump(name: str) -> str: ... |
|
|
def _dispatch_dump_table(name: str) -> str: ... |
|
|
def _dispatch_check_invariants(name: str) -> None: ... |
|
|
def _dispatch_check_all_invariants() -> None: ... |
|
|
def _dispatch_has_kernel(name: str) -> _bool: ... |
|
|
def _dispatch_has_kernel_for_dispatch_key(name: str, dispatch: _dispatchkey) -> _bool: ... |
|
|
def _dispatch_has_kernel_for_any_dispatch_key(name: str, dispatch_key_set: DispatchKeySet) -> _bool: ... |
|
|
def _dispatch_has_computed_kernel_for_dispatch_key(name: str, dispatch: _dispatchkey) -> _bool: ... |
|
|
def _dispatch_find_dangling_impls() -> List[str]: ... |
|
|
def _dispatch_tls_set_dispatch_key_excluded(dispatch: _dispatchkey, val: _bool) -> None: ... |
|
|
def _dispatch_tls_is_dispatch_key_excluded(dispatch: _dispatchkey) -> _bool: ... |
|
|
def _dispatch_isTensorSubclassLike(tensor: Tensor) -> _bool: ... |
|
|
def _dispatch_key_name(dispatch: _dispatchkey) -> str: ... |
|
|
def _dispatch_key_parse(dispatch: _dispatchkey) -> DispatchKey: ... |
|
|
def _dispatch_num_backends() -> _int: ... |
|
|
|
|
|
class DispatchKey(Enum): |
|
|
Undefined: DispatchKey = ... |
|
|
FPGA: DispatchKey = ... |
|
|
ORT: DispatchKey = ... |
|
|
Vulkan: DispatchKey = ... |
|
|
Metal: DispatchKey = ... |
|
|
MKLDNN: DispatchKey = ... |
|
|
OpenGL: DispatchKey = ... |
|
|
OpenCL: DispatchKey = ... |
|
|
IDEEP: DispatchKey = ... |
|
|
CustomRNGKeyId: DispatchKey = ... |
|
|
MkldnnCPU: DispatchKey = ... |
|
|
Sparse: DispatchKey = ... |
|
|
SparseCsrCPU: DispatchKey = ... |
|
|
SparseCsrCUDA: DispatchKey = ... |
|
|
Python: DispatchKey = ... |
|
|
ZeroTensor: DispatchKey = ... |
|
|
BackendSelect: DispatchKey = ... |
|
|
Named: DispatchKey = ... |
|
|
AutogradOther: DispatchKey = ... |
|
|
AutogradFunctionality: DispatchKey = ... |
|
|
AutogradNestedTensor: DispatchKey = ... |
|
|
Tracer: DispatchKey = ... |
|
|
Autocast: DispatchKey = ... |
|
|
Batched: DispatchKey = ... |
|
|
VmapMode: DispatchKey = ... |
|
|
TESTING_ONLY_GenericWrapper: DispatchKey = ... |
|
|
TESTING_ONLY_GenericMode: DispatchKey = ... |
|
|
Autograd: DispatchKey = ... |
|
|
CompositeImplicitAutograd: DispatchKey = ... |
|
|
CompositeImplicitAutogradNestedTensor: DispatchKey = ... |
|
|
CompositeExplicitAutograd: DispatchKey = ... |
|
|
CompositeExplicitAutogradNonFunctional: DispatchKey = ... |
|
|
CPU: DispatchKey = ... |
|
|
CUDA: DispatchKey = ... |
|
|
HIP: DispatchKey = ... |
|
|
XLA: DispatchKey = ... |
|
|
MPS: DispatchKey = ... |
|
|
IPU: DispatchKey = ... |
|
|
XPU: DispatchKey = ... |
|
|
HPU: DispatchKey = ... |
|
|
VE: DispatchKey = ... |
|
|
Lazy: DispatchKey = ... |
|
|
Meta: DispatchKey = ... |
|
|
PrivateUse1: DispatchKey = ... |
|
|
PrivateUse2: DispatchKey = ... |
|
|
PrivateUse3: DispatchKey = ... |
|
|
QuantizedCPU: DispatchKey = ... |
|
|
QuantizedCUDA: DispatchKey = ... |
|
|
QuantizedHIP: DispatchKey = ... |
|
|
QuantizedXLA: DispatchKey = ... |
|
|
QuantizedMPS: DispatchKey = ... |
|
|
QuantizedIPU: DispatchKey = ... |
|
|
QuantizedXPU: DispatchKey = ... |
|
|
QuantizedHPU: DispatchKey = ... |
|
|
QuantizedVE: DispatchKey = ... |
|
|
QuantizedLazy: DispatchKey = ... |
|
|
QuantizedMeta: DispatchKey = ... |
|
|
QuantizedPrivateUse1: DispatchKey = ... |
|
|
QuantizedPrivateUse2: DispatchKey = ... |
|
|
QuantizedPrivateUse3: DispatchKey = ... |
|
|
SparseCPU: DispatchKey = ... |
|
|
SparseCUDA: DispatchKey = ... |
|
|
SparseHIP: DispatchKey = ... |
|
|
SparseXLA: DispatchKey = ... |
|
|
SparseMPS: DispatchKey = ... |
|
|
SparseIPU: DispatchKey = ... |
|
|
SparseXPU: DispatchKey = ... |
|
|
SparseHPU: DispatchKey = ... |
|
|
SparseVE: DispatchKey = ... |
|
|
SparseLazy: DispatchKey = ... |
|
|
SparseMeta: DispatchKey = ... |
|
|
SparsePrivateUse1: DispatchKey = ... |
|
|
SparsePrivateUse2: DispatchKey = ... |
|
|
SparsePrivateUse3: DispatchKey = ... |
|
|
NestedTensorCPU: DispatchKey = ... |
|
|
NestedTensorCUDA: DispatchKey = ... |
|
|
NestedTensorHIP: DispatchKey = ... |
|
|
NestedTensorXLA: DispatchKey = ... |
|
|
NestedTensorMPS: DispatchKey = ... |
|
|
NestedTensorIPU: DispatchKey = ... |
|
|
NestedTensorXPU: DispatchKey = ... |
|
|
NestedTensorHPU: DispatchKey = ... |
|
|
NestedTensorVE: DispatchKey = ... |
|
|
NestedTensorLazy: DispatchKey = ... |
|
|
NestedTensorMeta: DispatchKey = ... |
|
|
NestedTensorPrivateUse1: DispatchKey = ... |
|
|
NestedTensorPrivateUse2: DispatchKey = ... |
|
|
NestedTensorPrivateUse3: DispatchKey = ... |
|
|
AutogradCPU: DispatchKey = ... |
|
|
AutogradCUDA: DispatchKey = ... |
|
|
AutogradHIP: DispatchKey = ... |
|
|
AutogradXLA: DispatchKey = ... |
|
|
AutogradMPS: DispatchKey = ... |
|
|
AutogradIPU: DispatchKey = ... |
|
|
AutogradXPU: DispatchKey = ... |
|
|
AutogradHPU: DispatchKey = ... |
|
|
AutogradVE: DispatchKey = ... |
|
|
AutogradLazy: DispatchKey = ... |
|
|
AutogradMeta: DispatchKey = ... |
|
|
AutogradPrivateUse1: DispatchKey = ... |
|
|
AutogradPrivateUse2: DispatchKey = ... |
|
|
AutogradPrivateUse3: DispatchKey = ... |
|
|
|
|
|
class DispatchKeySet: |
|
|
def __or__(self, other: DispatchKeySet) -> DispatchKeySet: ... |
|
|
def __sub__(self, other: DispatchKeySet) -> DispatchKeySet: ... |
|
|
def __and__(self, other: DispatchKeySet) -> DispatchKeySet: ... |
|
|
def highestPriorityTypeId(self) -> DispatchKey: ... |
|
|
def has(self, k: _dispatchkey) -> _bool: ... |
|
|
def __repr__(self) -> str: ... |
|
|
|
|
|
_dispatch_autogradother_backends: DispatchKeySet |
|
|
def _dispatch_has_backend_fallback(dispatch: _dispatchkey) -> _bool: ... |
|
|
def _dispatch_keyset_full_after(t: _dispatchkey) -> DispatchKeySet: ... |
|
|
def _dispatch_keyset_to_string(keyset: DispatchKeySet) -> str: ... |
|
|
def _dispatch_get_backend_keyset_from_autograd(dispatch: _dispatchkey) -> DispatchKeySet: ... |
|
|
def _dispatch_keys(tensor: Tensor) -> DispatchKeySet: ... |
|
|
def _dispatch_tls_local_exclude_set() -> DispatchKeySet: ... |
|
|
def _dispatch_tls_local_include_set() -> DispatchKeySet: ... |
|
|
def _dispatch_is_included_in_alias(dispatch_a: _dispatchkey, dispatch_b: _dispatchkey) -> _bool: ... |
|
|
|
|
|
class ExcludeDispatchKeyGuard: |
|
|
pass |
|
|
|
|
|
class _AutoDispatchBelowAutograd: |
|
|
pass |
|
|
|
|
|
def _dispatch_print_registrations_for_dispatch_key(dispatch_key: str = "") -> None: ... |
|
|
def _dispatch_get_registrations_for_dispatch_key(dispatch_key: str = "") -> List[str]: ... |
|
|
|
|
|
def _are_functorch_transforms_active() -> _bool: ... |
|
|
|
|
|
|
|
|
class _DisablePythonDispatcher(object): |
|
|
pass |
|
|
|
|
|
class _EnablePythonDispatcher(object): |
|
|
pass |
|
|
|
|
|
def _set_python_dispatcher(dispatcher: object) -> None: ... |
|
|
|
|
|
|
|
|
|
|
|
class BenchmarkConfig(object): |
|
|
num_calling_threads: _int |
|
|
num_worker_threads: _int |
|
|
num_warmup_iters: _int |
|
|
num_iters: _int |
|
|
profiler_output_path: str |
|
|
|
|
|
class BenchmarkExecutionStats(object): |
|
|
latency_avg_ms: _float |
|
|
num_iters: _int |
|
|
|
|
|
class ThroughputBenchmark(object): |
|
|
def __init__(self, module: Any) -> None: ... |
|
|
def add_input(self, *args: Any, **kwargs: Any) -> None: ... |
|
|
def run_once(self, *args: Any, **kwargs: Any) -> Any: ... |
|
|
def benchmark(self, config: BenchmarkConfig) -> BenchmarkExecutionStats: ... |
|
|
|
|
|
|
|
|
class StorageBase(object): ... |
|
|
|
|
|
|
|
|
class DoubleTensor(Tensor): ... |
|
|
class FloatTensor(Tensor): ... |
|
|
class LongTensor(Tensor): ... |
|
|
class IntTensor(Tensor): ... |
|
|
class ShortTensor(Tensor): ... |
|
|
class HalfTensor(Tensor): ... |
|
|
class CharTensor(Tensor): ... |
|
|
class ByteTensor(Tensor): ... |
|
|
class BoolTensor(Tensor): ... |
|
|
|
|
|
|
|
|
class _ImperativeEngine: |
|
|
... |
|
|
|
|
|
|
|
|
class _TensorMeta(type): |
|
|
pass |
|
|
|
|
|
|
|
|
class _TensorBase(metaclass=_TensorMeta): |
|
|
requires_grad: _bool |
|
|
shape: Size |
|
|
data: Tensor |
|
|
names: List[str] |
|
|
device: _device |
|
|
dtype: _dtype |
|
|
layout: _layout |
|
|
real: Tensor |
|
|
imag: Tensor |
|
|
T: Tensor |
|
|
H: Tensor |
|
|
mT: Tensor |
|
|
mH: Tensor |
|
|
ndim: _int |
|
|
output_nr: _int |
|
|
_version: _int |
|
|
_base: Optional[Tensor] |
|
|
_cdata: _int |
|
|
grad_fn: Any |
|
|
_grad_fn: Any |
|
|
_grad: Optional[Tensor] |
|
|
grad: Optional[Tensor] |
|
|
_backward_hooks: Optional[Dict[_int, Callable[[Tensor], Optional[Tensor]]]] |
|
|
def __abs__(self) -> Tensor: ... |
|
|
def __add__(self, other: Any) -> Tensor: ... |
|
|
@overload |
|
|
def __and__(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def __and__(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def __and__(self, other: Any) -> Tensor: ... |
|
|
def __bool__(self) -> builtins.bool: ... |
|
|
def __complex__(self) -> builtins.complex: ... |
|
|
def __div__(self, other: Any) -> Tensor: ... |
|
|
def __eq__(self, other: Any) -> Tensor: ... |
|
|
def __float__(self) -> builtins.float: ... |
|
|
def __floordiv__(self, other: Any) -> Tensor: ... |
|
|
def __ge__(self, other: Any) -> Tensor: ... |
|
|
def __getitem__(self, indices: Union[None, _int, slice, Tensor, List, Tuple]) -> Tensor: ... |
|
|
def __gt__(self, other: Any) -> Tensor: ... |
|
|
def __iadd__(self, other: Any) -> Tensor: ... |
|
|
@overload |
|
|
def __iand__(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def __iand__(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def __iand__(self, other: Any) -> Tensor: ... |
|
|
def __idiv__(self, other: Any) -> Tensor: ... |
|
|
def __ifloordiv__(self, other: Any) -> Tensor: ... |
|
|
@overload |
|
|
def __ilshift__(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def __ilshift__(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def __ilshift__(self, other: Any) -> Tensor: ... |
|
|
def __imod__(self, other: Any) -> Tensor: ... |
|
|
def __imul__(self, other: Any) -> Tensor: ... |
|
|
def __index__(self) -> builtins.int: ... |
|
|
@overload |
|
|
def __init__(self, *args: Any, device: Device=None) -> None: ... |
|
|
@overload |
|
|
def __init__(self, storage: Storage) -> None: ... |
|
|
@overload |
|
|
def __init__(self, other: Tensor) -> None: ... |
|
|
@overload |
|
|
def __init__(self, size: _size, *, device: Device=None) -> None: ... |
|
|
def __int__(self) -> builtins.int: ... |
|
|
def __invert__(self) -> Tensor: ... |
|
|
@overload |
|
|
def __ior__(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def __ior__(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def __ior__(self, other: Any) -> Tensor: ... |
|
|
@overload |
|
|
def __irshift__(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def __irshift__(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def __irshift__(self, other: Any) -> Tensor: ... |
|
|
def __isub__(self, other: Any) -> Tensor: ... |
|
|
@overload |
|
|
def __ixor__(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def __ixor__(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def __ixor__(self, other: Any) -> Tensor: ... |
|
|
def __le__(self, other: Any) -> Tensor: ... |
|
|
def __long__(self) -> builtins.int: ... |
|
|
@overload |
|
|
def __lshift__(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def __lshift__(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def __lshift__(self, other: Any) -> Tensor: ... |
|
|
def __lt__(self, other: Any) -> Tensor: ... |
|
|
def __matmul__(self, other: Any) -> Tensor: ... |
|
|
def __mod__(self, other: Any) -> Tensor: ... |
|
|
def __mul__(self, other: Any) -> Tensor: ... |
|
|
def __ne__(self, other: Any) -> Tensor: ... |
|
|
def __neg__(self) -> Tensor: ... |
|
|
def __nonzero__(self) -> builtins.bool: ... |
|
|
@overload |
|
|
def __or__(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def __or__(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def __or__(self, other: Any) -> Tensor: ... |
|
|
def __pow__(self, other: Any) -> Tensor: ... |
|
|
def __radd__(self, other: Any) -> Tensor: ... |
|
|
def __rand__(self, other: Any) -> Tensor: ... |
|
|
def __rfloordiv__(self, other: Any) -> Tensor: ... |
|
|
def __rmul__(self, other: Any) -> Tensor: ... |
|
|
def __ror__(self, other: Any) -> Tensor: ... |
|
|
def __rpow__(self, other: Any) -> Tensor: ... |
|
|
@overload |
|
|
def __rshift__(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def __rshift__(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def __rshift__(self, other: Any) -> Tensor: ... |
|
|
def __rsub__(self, other: Any) -> Tensor: ... |
|
|
def __rtruediv__(self, other: Any) -> Tensor: ... |
|
|
def __rxor__(self, other: Any) -> Tensor: ... |
|
|
def __setitem__(self, indices: Union[None, _int, slice, Tensor, List, Tuple], val: Union[Tensor, Number]) -> None: ... |
|
|
def __sub__(self, other: Any) -> Tensor: ... |
|
|
def __truediv__(self, other: Any) -> Tensor: ... |
|
|
@overload |
|
|
def __xor__(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def __xor__(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def __xor__(self, other: Any) -> Tensor: ... |
|
|
def _addmm_activation(self, mat1: Tensor, mat2: Tensor, *, beta: Number=1, alpha: Number=1, use_gelu: _bool=False) -> Tensor: ... |
|
|
def _autocast_to_full_precision(self, cuda_enabled: _bool, cpu_enabled: _bool) -> Tensor: ... |
|
|
def _autocast_to_reduced_precision(self, cuda_enabled: _bool, cpu_enabled: _bool, cuda_dtype: _dtype, cpu_dtype: _dtype) -> Tensor: ... |
|
|
def _coalesced_(self, coalesced: _bool) -> Tensor: ... |
|
|
def _conj(self) -> Tensor: ... |
|
|
def _conj_physical(self) -> Tensor: ... |
|
|
def _dimI(self) -> _int: ... |
|
|
def _dimV(self) -> _int: ... |
|
|
def _indices(self) -> Tensor: ... |
|
|
def _is_view(self) -> _bool: ... |
|
|
def _is_zerotensor(self) -> _bool: ... |
|
|
def _make_subclass(cls, data: Tensor, require_grad: _bool = False, dispatch_strides: _bool=False, dispatch_device: _bool=False, device_for_backend_keys: Optional[_device] = None) -> Tensor: ... |
|
|
def _neg_view(self) -> Tensor: ... |
|
|
def _nested_tensor_layer_norm(self, weight: Optional[Tensor], bias: Optional[Tensor], eps: _float) -> Tensor: ... |
|
|
def _nested_tensor_size(self) -> Tensor: ... |
|
|
def _nnz(self) -> _int: ... |
|
|
def _to_dense(self, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
def _values(self) -> Tensor: ... |
|
|
def abs(self) -> Tensor: ... |
|
|
def abs_(self) -> Tensor: ... |
|
|
def absolute(self) -> Tensor: ... |
|
|
def absolute_(self) -> Tensor: ... |
|
|
def acos(self) -> Tensor: ... |
|
|
def acos_(self) -> Tensor: ... |
|
|
def acosh(self) -> Tensor: ... |
|
|
def acosh_(self) -> Tensor: ... |
|
|
def add(self, other: Union[Tensor, Number, torch.SymIntNode, torch.SymFloatNode], *, alpha: Optional[Number]=1, out: Optional[Tensor]=None) -> Tensor: ... |
|
|
def add_(self, other: Union[Tensor, Number, torch.SymIntNode, torch.SymFloatNode], *, alpha: Optional[Number]=1) -> Tensor: ... |
|
|
def addbmm(self, batch1: Tensor, batch2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ... |
|
|
def addbmm_(self, batch1: Tensor, batch2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ... |
|
|
def addcdiv(self, tensor1: Tensor, tensor2: Tensor, *, value: Number=1) -> Tensor: ... |
|
|
def addcdiv_(self, tensor1: Tensor, tensor2: Tensor, *, value: Number=1) -> Tensor: ... |
|
|
def addcmul(self, tensor1: Tensor, tensor2: Tensor, *, value: Number=1) -> Tensor: ... |
|
|
def addcmul_(self, tensor1: Tensor, tensor2: Tensor, *, value: Number=1) -> Tensor: ... |
|
|
def addmm(self, mat1: Tensor, mat2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ... |
|
|
def addmm_(self, mat1: Tensor, mat2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ... |
|
|
def addmv(self, mat: Tensor, vec: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ... |
|
|
def addmv_(self, mat: Tensor, vec: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ... |
|
|
def addr(self, vec1: Tensor, vec2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ... |
|
|
def addr_(self, vec1: Tensor, vec2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ... |
|
|
def adjoint(self) -> Tensor: ... |
|
|
def align_as(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def align_to(self, order: Sequence[Union[str, ellipsis, None]], ellipsis_idx: _int) -> Tensor: ... |
|
|
@overload |
|
|
def align_to(self, names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ... |
|
|
@overload |
|
|
def all(self) -> Tensor: ... |
|
|
@overload |
|
|
def all(self, dim: _int, keepdim: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def all(self, dim: Union[str, ellipsis, None], keepdim: _bool=False) -> Tensor: ... |
|
|
def allclose(self, other: Tensor, rtol: _float=1e-05, atol: _float=1e-08, equal_nan: _bool=False) -> _bool: ... |
|
|
def amax(self, dim: Union[_int, _size]=(), keepdim: _bool=False) -> Tensor: ... |
|
|
def amin(self, dim: Union[_int, _size]=(), keepdim: _bool=False) -> Tensor: ... |
|
|
def aminmax(self, *, dim: Optional[_int]=None, keepdim: _bool=False) -> torch.return_types.aminmax: ... |
|
|
def angle(self) -> Tensor: ... |
|
|
@overload |
|
|
def any(self) -> Tensor: ... |
|
|
@overload |
|
|
def any(self, dim: _int, keepdim: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def any(self, dim: Union[str, ellipsis, None], keepdim: _bool=False) -> Tensor: ... |
|
|
def apply_(self, callable: Callable) -> Tensor: ... |
|
|
def arccos(self) -> Tensor: ... |
|
|
def arccos_(self) -> Tensor: ... |
|
|
def arccosh(self) -> Tensor: ... |
|
|
def arccosh_(self) -> Tensor: ... |
|
|
def arcsin(self) -> Tensor: ... |
|
|
def arcsin_(self) -> Tensor: ... |
|
|
def arcsinh(self) -> Tensor: ... |
|
|
def arcsinh_(self) -> Tensor: ... |
|
|
def arctan(self) -> Tensor: ... |
|
|
def arctan2(self, other: Tensor) -> Tensor: ... |
|
|
def arctan2_(self, other: Tensor) -> Tensor: ... |
|
|
def arctan_(self) -> Tensor: ... |
|
|
def arctanh(self) -> Tensor: ... |
|
|
def arctanh_(self) -> Tensor: ... |
|
|
def argmax(self, dim: Optional[_int]=None, keepdim: _bool=False) -> Tensor: ... |
|
|
def argmin(self, dim: Optional[_int]=None, keepdim: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def argsort(self, *, stable: _bool, dim: _int=-1, descending: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def argsort(self, dim: _int=-1, descending: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def argsort(self, dim: Union[str, ellipsis, None], descending: _bool=False) -> Tensor: ... |
|
|
def argwhere(self) -> Tensor: ... |
|
|
def as_strided(self, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]]=None) -> Tensor: ... |
|
|
def as_strided_(self, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]]=None) -> Tensor: ... |
|
|
def as_strided_scatter(self, src: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]]=None) -> Tensor: ... |
|
|
def as_subclass(self, cls: Type[S]) -> S: ... |
|
|
def asin(self) -> Tensor: ... |
|
|
def asin_(self) -> Tensor: ... |
|
|
def asinh(self) -> Tensor: ... |
|
|
def asinh_(self) -> Tensor: ... |
|
|
def atan(self) -> Tensor: ... |
|
|
def atan2(self, other: Tensor) -> Tensor: ... |
|
|
def atan2_(self, other: Tensor) -> Tensor: ... |
|
|
def atan_(self) -> Tensor: ... |
|
|
def atanh(self) -> Tensor: ... |
|
|
def atanh_(self) -> Tensor: ... |
|
|
def baddbmm(self, batch1: Tensor, batch2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ... |
|
|
def baddbmm_(self, batch1: Tensor, batch2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ... |
|
|
@overload |
|
|
def bernoulli(self, *, generator: Optional[Generator]=None) -> Tensor: ... |
|
|
@overload |
|
|
def bernoulli(self, p: _float, *, generator: Optional[Generator]=None) -> Tensor: ... |
|
|
@overload |
|
|
def bernoulli_(self, p: Tensor, *, generator: Optional[Generator]=None) -> Tensor: ... |
|
|
@overload |
|
|
def bernoulli_(self, p: _float=0.5, *, generator: Optional[Generator]=None) -> Tensor: ... |
|
|
def bfloat16(self) -> Tensor: ... |
|
|
def bincount(self, weights: Optional[Tensor]=None, minlength: _int=0) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_and(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_and(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_and_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_and_(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_left_shift(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_left_shift(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_left_shift_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_left_shift_(self, other: Number) -> Tensor: ... |
|
|
def bitwise_not(self) -> Tensor: ... |
|
|
def bitwise_not_(self) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_or(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_or(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_or_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_or_(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_right_shift(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_right_shift(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_right_shift_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_right_shift_(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_xor(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_xor(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_xor_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def bitwise_xor_(self, other: Number) -> Tensor: ... |
|
|
def bmm(self, mat2: Tensor) -> Tensor: ... |
|
|
def bool(self) -> Tensor: ... |
|
|
@overload |
|
|
def broadcast_to(self, size: _size) -> Tensor: ... |
|
|
@overload |
|
|
def broadcast_to(self, *size: _int) -> Tensor: ... |
|
|
def byte(self) -> Tensor: ... |
|
|
def cauchy_(self, median: _float=0, sigma: _float=1, *, generator: Optional[Generator]=None) -> Tensor: ... |
|
|
def ccol_indices(self) -> Tensor: ... |
|
|
def ceil(self) -> Tensor: ... |
|
|
def ceil_(self) -> Tensor: ... |
|
|
def chalf(self, *, memory_format: Optional[memory_format]=None) -> Tensor: ... |
|
|
def char(self) -> Tensor: ... |
|
|
def cholesky(self, upper: _bool=False) -> Tensor: ... |
|
|
def cholesky_inverse(self, upper: _bool=False) -> Tensor: ... |
|
|
def cholesky_solve(self, input2: Tensor, upper: _bool=False) -> Tensor: ... |
|
|
def chunk(self, chunks: _int, dim: _int=0) -> List[Tensor]: ... |
|
|
@overload |
|
|
def clamp(self, min: Optional[Tensor]=None, max: Optional[Tensor]=None) -> Tensor: ... |
|
|
@overload |
|
|
def clamp(self, min: Optional[Number]=None, max: Optional[Number]=None) -> Tensor: ... |
|
|
@overload |
|
|
def clamp_(self, min: Optional[Tensor]=None, max: Optional[Tensor]=None) -> Tensor: ... |
|
|
@overload |
|
|
def clamp_(self, min: Optional[Number]=None, max: Optional[Number]=None) -> Tensor: ... |
|
|
@overload |
|
|
def clamp_max(self, max: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def clamp_max(self, max: Number) -> Tensor: ... |
|
|
@overload |
|
|
def clamp_max_(self, max: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def clamp_max_(self, max: Number) -> Tensor: ... |
|
|
@overload |
|
|
def clamp_min(self, min: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def clamp_min(self, min: Number) -> Tensor: ... |
|
|
@overload |
|
|
def clamp_min_(self, min: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def clamp_min_(self, min: Number) -> Tensor: ... |
|
|
@overload |
|
|
def clip(self, min: Optional[Tensor]=None, max: Optional[Tensor]=None) -> Tensor: ... |
|
|
@overload |
|
|
def clip(self, min: Optional[Number]=None, max: Optional[Number]=None) -> Tensor: ... |
|
|
@overload |
|
|
def clip_(self, min: Optional[Tensor]=None, max: Optional[Tensor]=None) -> Tensor: ... |
|
|
@overload |
|
|
def clip_(self, min: Optional[Number]=None, max: Optional[Number]=None) -> Tensor: ... |
|
|
def clone(self, *, memory_format: Optional[memory_format]=None) -> Tensor: ... |
|
|
def coalesce(self) -> Tensor: ... |
|
|
def col_indices(self) -> Tensor: ... |
|
|
def conj(self) -> Tensor: ... |
|
|
def conj_physical(self) -> Tensor: ... |
|
|
def conj_physical_(self) -> Tensor: ... |
|
|
def contiguous(self, memory_format=torch.contiguous_format) -> Tensor: ... |
|
|
def copy_(self, src: Tensor, non_blocking: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def copysign(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def copysign(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def copysign_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def copysign_(self, other: Number) -> Tensor: ... |
|
|
def corrcoef(self) -> Tensor: ... |
|
|
def cos(self) -> Tensor: ... |
|
|
def cos_(self) -> Tensor: ... |
|
|
def cosh(self) -> Tensor: ... |
|
|
def cosh_(self) -> Tensor: ... |
|
|
@overload |
|
|
def count_nonzero(self, dim: Optional[_int]=None) -> Tensor: ... |
|
|
@overload |
|
|
def count_nonzero(self, dim: _size) -> Tensor: ... |
|
|
@overload |
|
|
def count_nonzero(self, *dim: _int) -> Tensor: ... |
|
|
def cov(self, *, correction: _int=1, fweights: Optional[Tensor]=None, aweights: Optional[Tensor]=None) -> Tensor: ... |
|
|
def cpu(self) -> Tensor: ... |
|
|
def cross(self, other: Tensor, dim: Optional[_int]=None) -> Tensor: ... |
|
|
def crow_indices(self) -> Tensor: ... |
|
|
def cuda(self, device: Optional[Union[_device, _int, str]]=None, non_blocking: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def cummax(self, dim: _int) -> torch.return_types.cummax: ... |
|
|
@overload |
|
|
def cummax(self, dim: Union[str, ellipsis, None]) -> torch.return_types.cummax: ... |
|
|
@overload |
|
|
def cummin(self, dim: _int) -> torch.return_types.cummin: ... |
|
|
@overload |
|
|
def cummin(self, dim: Union[str, ellipsis, None]) -> torch.return_types.cummin: ... |
|
|
@overload |
|
|
def cumprod(self, dim: _int, *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def cumprod(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def cumprod_(self, dim: _int, *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def cumprod_(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def cumsum(self, dim: _int, *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def cumsum(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def cumsum_(self, dim: _int, *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def cumsum_(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
def data_ptr(self) -> _int: ... |
|
|
def deg2rad(self) -> Tensor: ... |
|
|
def deg2rad_(self) -> Tensor: ... |
|
|
def dense_dim(self) -> _int: ... |
|
|
def dequantize(self) -> Tensor: ... |
|
|
def det(self) -> Tensor: ... |
|
|
def detach(self) -> Tensor: ... |
|
|
def detach_(self) -> Tensor: ... |
|
|
def diag(self, diagonal: _int=0) -> Tensor: ... |
|
|
def diag_embed(self, offset: _int=0, dim1: _int=-2, dim2: _int=-1) -> Tensor: ... |
|
|
def diagflat(self, offset: _int=0) -> Tensor: ... |
|
|
@overload |
|
|
def diagonal(self, offset: _int=0, dim1: _int=0, dim2: _int=1) -> Tensor: ... |
|
|
@overload |
|
|
def diagonal(self, *, outdim: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None], dim2: Union[str, ellipsis, None], offset: _int=0) -> Tensor: ... |
|
|
def diagonal_scatter(self, src: Tensor, offset: _int=0, dim1: _int=0, dim2: _int=1) -> Tensor: ... |
|
|
def diff(self, n: _int=1, dim: _int=-1, prepend: Optional[Tensor]=None, append: Optional[Tensor]=None) -> Tensor: ... |
|
|
def digamma(self) -> Tensor: ... |
|
|
def digamma_(self) -> Tensor: ... |
|
|
def dim(self) -> _int: ... |
|
|
def dist(self, other: Tensor, p: Number=2) -> Tensor: ... |
|
|
def div(self, other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None) -> Tensor: ... |
|
|
def div_(self, other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None) -> Tensor: ... |
|
|
@overload |
|
|
def divide(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def divide(self, other: Tensor, *, rounding_mode: Optional[str]) -> Tensor: ... |
|
|
@overload |
|
|
def divide(self, other: Number, *, rounding_mode: Optional[str]) -> Tensor: ... |
|
|
@overload |
|
|
def divide(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def divide_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def divide_(self, other: Tensor, *, rounding_mode: Optional[str]) -> Tensor: ... |
|
|
@overload |
|
|
def divide_(self, other: Number, *, rounding_mode: Optional[str]) -> Tensor: ... |
|
|
@overload |
|
|
def divide_(self, other: Number) -> Tensor: ... |
|
|
def dot(self, tensor: Tensor) -> Tensor: ... |
|
|
def double(self) -> Tensor: ... |
|
|
@overload |
|
|
def dsplit(self, sections: _int) -> List[Tensor]: ... |
|
|
@overload |
|
|
def dsplit(self, indices: _size) -> List[Tensor]: ... |
|
|
@overload |
|
|
def dsplit(self, *indices: _int) -> List[Tensor]: ... |
|
|
def element_size(self) -> _int: ... |
|
|
@overload |
|
|
def eq(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def eq(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def eq_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def eq_(self, other: Number) -> Tensor: ... |
|
|
def equal(self, other: Tensor) -> _bool: ... |
|
|
def erf(self) -> Tensor: ... |
|
|
def erf_(self) -> Tensor: ... |
|
|
def erfc(self) -> Tensor: ... |
|
|
def erfc_(self) -> Tensor: ... |
|
|
def erfinv(self) -> Tensor: ... |
|
|
def erfinv_(self) -> Tensor: ... |
|
|
def exp(self) -> Tensor: ... |
|
|
def exp2(self) -> Tensor: ... |
|
|
def exp2_(self) -> Tensor: ... |
|
|
def exp_(self) -> Tensor: ... |
|
|
@overload |
|
|
def expand(self, size: Sequence[Union[_int, SymInt]], *, implicit: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def expand(self, *size: _int, implicit: _bool=False) -> Tensor: ... |
|
|
def expand_as(self, other: Tensor) -> Tensor: ... |
|
|
def expm1(self) -> Tensor: ... |
|
|
def expm1_(self) -> Tensor: ... |
|
|
def exponential_(self, lambd: _float=1, *, generator: Optional[Generator]=None) -> Tensor: ... |
|
|
@overload |
|
|
def fill_(self, value: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def fill_(self, value: Number) -> Tensor: ... |
|
|
def fill_diagonal_(self, fill_value: Number, wrap: _bool=False) -> Tensor: ... |
|
|
def fix(self) -> Tensor: ... |
|
|
def fix_(self) -> Tensor: ... |
|
|
@overload |
|
|
def flatten(self, start_dim: _int=0, end_dim: _int=-1) -> Tensor: ... |
|
|
@overload |
|
|
def flatten(self, start_dim: _int, end_dim: _int, out_dim: Union[str, ellipsis, None]) -> Tensor: ... |
|
|
@overload |
|
|
def flatten(self, start_dim: Union[str, ellipsis, None], end_dim: Union[str, ellipsis, None], out_dim: Union[str, ellipsis, None]) -> Tensor: ... |
|
|
@overload |
|
|
def flatten(self, dims: Sequence[Union[str, ellipsis, None]], out_dim: Union[str, ellipsis, None]) -> Tensor: ... |
|
|
@overload |
|
|
def flip(self, dims: _size) -> Tensor: ... |
|
|
@overload |
|
|
def flip(self, *dims: _int) -> Tensor: ... |
|
|
def fliplr(self) -> Tensor: ... |
|
|
def flipud(self) -> Tensor: ... |
|
|
def float(self) -> Tensor: ... |
|
|
@overload |
|
|
def float_power(self, exponent: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def float_power(self, exponent: Number) -> Tensor: ... |
|
|
@overload |
|
|
def float_power_(self, exponent: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def float_power_(self, exponent: Number) -> Tensor: ... |
|
|
def floor(self) -> Tensor: ... |
|
|
def floor_(self) -> Tensor: ... |
|
|
def floor_divide(self, other: Union[Tensor, Number, torch.SymIntNode, torch.SymFloatNode], *, out: Optional[Tensor]=None) -> Tensor: ... |
|
|
def floor_divide_(self, other: Union[Tensor, Number, torch.SymIntNode, torch.SymFloatNode]) -> Tensor: ... |
|
|
def fmax(self, other: Tensor) -> Tensor: ... |
|
|
def fmin(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def fmod(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def fmod(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def fmod_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def fmod_(self, other: Number) -> Tensor: ... |
|
|
def frac(self) -> Tensor: ... |
|
|
def frac_(self) -> Tensor: ... |
|
|
def frexp(self) -> torch.return_types.frexp: ... |
|
|
@overload |
|
|
def gather(self, dim: _int, index: Tensor, *, sparse_grad: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def gather(self, dim: Union[str, ellipsis, None], index: Tensor, *, sparse_grad: _bool=False) -> Tensor: ... |
|
|
def gcd(self, other: Tensor) -> Tensor: ... |
|
|
def gcd_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def ge(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def ge(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def ge_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def ge_(self, other: Number) -> Tensor: ... |
|
|
def geometric_(self, p: _float, *, generator: Optional[Generator]=None) -> Tensor: ... |
|
|
def geqrf(self) -> torch.return_types.geqrf: ... |
|
|
def ger(self, vec2: Tensor) -> Tensor: ... |
|
|
def get_device(self) -> _int: ... |
|
|
@overload |
|
|
def greater(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def greater(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def greater_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def greater_(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def greater_equal(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def greater_equal(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def greater_equal_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def greater_equal_(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def gt(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def gt(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def gt_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def gt_(self, other: Number) -> Tensor: ... |
|
|
def half(self) -> Tensor: ... |
|
|
def hardshrink(self, lambd: Number=0.5) -> Tensor: ... |
|
|
def has_names(self) -> _bool: ... |
|
|
def heaviside(self, values: Tensor) -> Tensor: ... |
|
|
def heaviside_(self, values: Tensor) -> Tensor: ... |
|
|
def histc(self, bins: _int=100, min: Number=0, max: Number=0) -> Tensor: ... |
|
|
@overload |
|
|
def histogram(self, bins: Tensor, *, weight: Optional[Tensor]=None, density: _bool=False) -> torch.return_types.histogram: ... |
|
|
@overload |
|
|
def histogram(self, bins: _int=100, *, range: Optional[Sequence[_float]]=None, weight: Optional[Tensor]=None, density: _bool=False) -> torch.return_types.histogram: ... |
|
|
@overload |
|
|
def hsplit(self, sections: _int) -> List[Tensor]: ... |
|
|
@overload |
|
|
def hsplit(self, indices: _size) -> List[Tensor]: ... |
|
|
@overload |
|
|
def hsplit(self, *indices: _int) -> List[Tensor]: ... |
|
|
def hypot(self, other: Tensor) -> Tensor: ... |
|
|
def hypot_(self, other: Tensor) -> Tensor: ... |
|
|
def i0(self) -> Tensor: ... |
|
|
def i0_(self) -> Tensor: ... |
|
|
def igamma(self, other: Tensor) -> Tensor: ... |
|
|
def igamma_(self, other: Tensor) -> Tensor: ... |
|
|
def igammac(self, other: Tensor) -> Tensor: ... |
|
|
def igammac_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def index_add(self, dim: _int, index: Tensor, source: Tensor, *, alpha: Number=1) -> Tensor: ... |
|
|
@overload |
|
|
def index_add(self, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor, *, alpha: Number=1) -> Tensor: ... |
|
|
def index_add_(self, dim: _int, index: Tensor, source: Tensor, *, alpha: Number=1) -> Tensor: ... |
|
|
@overload |
|
|
def index_copy(self, dim: _int, index: Tensor, source: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def index_copy(self, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def index_copy_(self, dim: _int, index: Tensor, source: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def index_copy_(self, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def index_fill(self, dim: _int, index: Tensor, value: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def index_fill(self, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def index_fill(self, dim: _int, index: Tensor, value: Number) -> Tensor: ... |
|
|
@overload |
|
|
def index_fill(self, dim: Union[str, ellipsis, None], index: Tensor, value: Number) -> Tensor: ... |
|
|
@overload |
|
|
def index_fill_(self, dim: _int, index: Tensor, value: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def index_fill_(self, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def index_fill_(self, dim: _int, index: Tensor, value: Number) -> Tensor: ... |
|
|
@overload |
|
|
def index_fill_(self, dim: Union[str, ellipsis, None], index: Tensor, value: Number) -> Tensor: ... |
|
|
def index_put(self, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool=False) -> Tensor: ... |
|
|
def index_put_(self, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool=False) -> Tensor: ... |
|
|
def index_reduce(self, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool=True) -> Tensor: ... |
|
|
def index_reduce_(self, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool=True) -> Tensor: ... |
|
|
@overload |
|
|
def index_select(self, dim: _int, index: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def index_select(self, dim: Union[str, ellipsis, None], index: Tensor) -> Tensor: ... |
|
|
def indices(self) -> Tensor: ... |
|
|
def inner(self, other: Tensor) -> Tensor: ... |
|
|
def int(self) -> Tensor: ... |
|
|
def int_repr(self) -> Tensor: ... |
|
|
def inverse(self) -> Tensor: ... |
|
|
def is_coalesced(self) -> _bool: ... |
|
|
def is_complex(self) -> _bool: ... |
|
|
def is_conj(self) -> _bool: ... |
|
|
def is_contiguous(self, memory_format=torch.contiguous_format) -> _bool: ... |
|
|
is_cuda: _bool |
|
|
def is_distributed(self) -> _bool: ... |
|
|
def is_floating_point(self) -> _bool: ... |
|
|
def is_inference(self) -> _bool: ... |
|
|
is_ipu: _bool |
|
|
is_leaf: _bool |
|
|
is_meta: _bool |
|
|
is_mkldnn: _bool |
|
|
is_mps: _bool |
|
|
def is_neg(self) -> _bool: ... |
|
|
is_nested: _bool |
|
|
def is_nonzero(self) -> _bool: ... |
|
|
is_ort: _bool |
|
|
def is_pinned(self, device: Optional[Union[_device, str, None]]=None) -> _bool: ... |
|
|
is_quantized: _bool |
|
|
def is_same_size(self, other: Tensor) -> _bool: ... |
|
|
def is_set_to(self, tensor: Tensor) -> _bool: ... |
|
|
def is_signed(self) -> _bool: ... |
|
|
is_sparse: _bool |
|
|
is_sparse_csr: _bool |
|
|
is_vulkan: _bool |
|
|
def isclose(self, other: Tensor, rtol: _float=1e-05, atol: _float=1e-08, equal_nan: _bool=False) -> Tensor: ... |
|
|
def isfinite(self) -> Tensor: ... |
|
|
def isinf(self) -> Tensor: ... |
|
|
def isnan(self) -> Tensor: ... |
|
|
def isneginf(self) -> Tensor: ... |
|
|
def isposinf(self) -> Tensor: ... |
|
|
def isreal(self) -> Tensor: ... |
|
|
def istft(self, n_fft: _int, hop_length: Optional[_int]=None, win_length: Optional[_int]=None, window: Optional[Tensor]=None, center: _bool=True, normalized: _bool=False, onesided: Optional[_bool]=None, length: Optional[_int]=None, return_complex: _bool=False) -> Tensor: ... |
|
|
def item(self) -> Number: ... |
|
|
def kron(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def kthvalue(self, k: _int, dim: _int=-1, keepdim: _bool=False) -> torch.return_types.kthvalue: ... |
|
|
@overload |
|
|
def kthvalue(self, k: _int, dim: Union[str, ellipsis, None], keepdim: _bool=False) -> torch.return_types.kthvalue: ... |
|
|
def lcm(self, other: Tensor) -> Tensor: ... |
|
|
def lcm_(self, other: Tensor) -> Tensor: ... |
|
|
def ldexp(self, other: Tensor) -> Tensor: ... |
|
|
def ldexp_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def le(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def le(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def le_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def le_(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def lerp(self, end: Tensor, weight: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def lerp(self, end: Tensor, weight: Number) -> Tensor: ... |
|
|
@overload |
|
|
def lerp_(self, end: Tensor, weight: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def lerp_(self, end: Tensor, weight: Number) -> Tensor: ... |
|
|
@overload |
|
|
def less(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def less(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def less_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def less_(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def less_equal(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def less_equal(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def less_equal_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def less_equal_(self, other: Number) -> Tensor: ... |
|
|
def lgamma(self) -> Tensor: ... |
|
|
def lgamma_(self) -> Tensor: ... |
|
|
def log(self) -> Tensor: ... |
|
|
def log10(self) -> Tensor: ... |
|
|
def log10_(self) -> Tensor: ... |
|
|
def log1p(self) -> Tensor: ... |
|
|
def log1p_(self) -> Tensor: ... |
|
|
def log2(self) -> Tensor: ... |
|
|
def log2_(self) -> Tensor: ... |
|
|
def log_(self) -> Tensor: ... |
|
|
def log_normal_(self, mean: _float=1, std: _float=2, *, generator: Optional[Generator]=None) -> Tensor: ... |
|
|
@overload |
|
|
def log_softmax(self, dim: _int, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def log_softmax(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
def logaddexp(self, other: Tensor) -> Tensor: ... |
|
|
def logaddexp2(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def logcumsumexp(self, dim: _int) -> Tensor: ... |
|
|
@overload |
|
|
def logcumsumexp(self, dim: Union[str, ellipsis, None]) -> Tensor: ... |
|
|
def logdet(self) -> Tensor: ... |
|
|
def logical_and(self, other: Tensor) -> Tensor: ... |
|
|
def logical_and_(self, other: Tensor) -> Tensor: ... |
|
|
def logical_not(self) -> Tensor: ... |
|
|
def logical_not_(self) -> Tensor: ... |
|
|
def logical_or(self, other: Tensor) -> Tensor: ... |
|
|
def logical_or_(self, other: Tensor) -> Tensor: ... |
|
|
def logical_xor(self, other: Tensor) -> Tensor: ... |
|
|
def logical_xor_(self, other: Tensor) -> Tensor: ... |
|
|
def logit(self, eps: Optional[_float]=None) -> Tensor: ... |
|
|
def logit_(self, eps: Optional[_float]=None) -> Tensor: ... |
|
|
@overload |
|
|
def logsumexp(self, dim: Union[_int, _size], keepdim: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def logsumexp(self, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool=False) -> Tensor: ... |
|
|
def long(self) -> Tensor: ... |
|
|
@overload |
|
|
def lt(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def lt(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def lt_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def lt_(self, other: Number) -> Tensor: ... |
|
|
def lu_solve(self, LU_data: Tensor, LU_pivots: Tensor) -> Tensor: ... |
|
|
def map2_(self, x: Tensor, y: Tensor, callable: Callable) -> Tensor: ... |
|
|
def map_(self, tensor: Tensor, callable: Callable) -> Tensor: ... |
|
|
@overload |
|
|
def masked_fill(self, mask: Tensor, value: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def masked_fill(self, mask: Tensor, value: Number) -> Tensor: ... |
|
|
@overload |
|
|
def masked_fill_(self, mask: Tensor, value: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def masked_fill_(self, mask: Tensor, value: Number) -> Tensor: ... |
|
|
def masked_scatter(self, mask: Tensor, source: Tensor) -> Tensor: ... |
|
|
def masked_scatter_(self, mask: Tensor, source: Tensor) -> Tensor: ... |
|
|
def masked_select(self, mask: Tensor) -> Tensor: ... |
|
|
def matmul(self, other: Tensor) -> Tensor: ... |
|
|
def matrix_exp(self) -> Tensor: ... |
|
|
def matrix_power(self, n: _int) -> Tensor: ... |
|
|
@overload |
|
|
def max(self) -> Tensor: ... |
|
|
@overload |
|
|
def max(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def max(self, dim: _int, keepdim: _bool=False) -> torch.return_types.max: ... |
|
|
@overload |
|
|
def max(self, dim: Union[str, ellipsis, None], keepdim: _bool=False) -> torch.return_types.max: ... |
|
|
def maximum(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def mean(self, *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def mean(self, dim: Optional[Union[_int, _size]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def mean(self, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def median(self) -> Tensor: ... |
|
|
@overload |
|
|
def median(self, dim: _int, keepdim: _bool=False) -> torch.return_types.median: ... |
|
|
@overload |
|
|
def median(self, dim: Union[str, ellipsis, None], keepdim: _bool=False) -> torch.return_types.median: ... |
|
|
@overload |
|
|
def min(self) -> Tensor: ... |
|
|
@overload |
|
|
def min(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def min(self, dim: _int, keepdim: _bool=False) -> torch.return_types.min: ... |
|
|
@overload |
|
|
def min(self, dim: Union[str, ellipsis, None], keepdim: _bool=False) -> torch.return_types.min: ... |
|
|
def minimum(self, other: Tensor) -> Tensor: ... |
|
|
def mm(self, mat2: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def mode(self, dim: _int=-1, keepdim: _bool=False) -> torch.return_types.mode: ... |
|
|
@overload |
|
|
def mode(self, dim: Union[str, ellipsis, None], keepdim: _bool=False) -> torch.return_types.mode: ... |
|
|
@overload |
|
|
def moveaxis(self, source: _int, destination: _int) -> Tensor: ... |
|
|
@overload |
|
|
def moveaxis(self, source: _size, destination: _size) -> Tensor: ... |
|
|
@overload |
|
|
def movedim(self, source: _int, destination: _int) -> Tensor: ... |
|
|
@overload |
|
|
def movedim(self, source: _size, destination: _size) -> Tensor: ... |
|
|
def msort(self) -> Tensor: ... |
|
|
def mul(self, other: Union[Tensor, Number, torch.SymIntNode, torch.SymFloatNode], *, out: Optional[Tensor]=None) -> Tensor: ... |
|
|
def mul_(self, other: Union[Tensor, Number, torch.SymIntNode, torch.SymFloatNode]) -> Tensor: ... |
|
|
def multinomial(self, num_samples: _int, replacement: _bool=False, *, generator: Optional[Generator]=None) -> Tensor: ... |
|
|
@overload |
|
|
def multiply(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def multiply(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def multiply_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def multiply_(self, other: Number) -> Tensor: ... |
|
|
def mv(self, vec: Tensor) -> Tensor: ... |
|
|
def mvlgamma(self, p: _int) -> Tensor: ... |
|
|
def mvlgamma_(self, p: _int) -> Tensor: ... |
|
|
def nan_to_num(self, nan: Optional[_float]=None, posinf: Optional[_float]=None, neginf: Optional[_float]=None) -> Tensor: ... |
|
|
def nan_to_num_(self, nan: Optional[_float]=None, posinf: Optional[_float]=None, neginf: Optional[_float]=None) -> Tensor: ... |
|
|
def nanmean(self, dim: Optional[Union[_int, _size]]=None, keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def nanmedian(self) -> Tensor: ... |
|
|
@overload |
|
|
def nanmedian(self, dim: _int, keepdim: _bool=False) -> torch.return_types.nanmedian: ... |
|
|
@overload |
|
|
def nanmedian(self, dim: Union[str, ellipsis, None], keepdim: _bool=False) -> torch.return_types.nanmedian: ... |
|
|
@overload |
|
|
def nanquantile(self, q: Tensor, dim: Optional[_int]=None, keepdim: _bool=False, *, interpolation: str="linear") -> Tensor: ... |
|
|
@overload |
|
|
def nanquantile(self, q: _float, dim: Optional[_int]=None, keepdim: _bool=False, *, interpolation: str="linear") -> Tensor: ... |
|
|
def nansum(self, dim: Optional[Union[_int, _size]]=None, keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def narrow(self, dim: _int, start: Tensor, length: _int) -> Tensor: ... |
|
|
@overload |
|
|
def narrow(self, dim: _int, start: _int, length: _int) -> Tensor: ... |
|
|
def narrow_copy(self, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor: ... |
|
|
def ndimension(self) -> _int: ... |
|
|
@overload |
|
|
def ne(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def ne(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def ne_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def ne_(self, other: Number) -> Tensor: ... |
|
|
def neg(self) -> Tensor: ... |
|
|
def neg_(self) -> Tensor: ... |
|
|
def negative(self) -> Tensor: ... |
|
|
def negative_(self) -> Tensor: ... |
|
|
def nelement(self) -> _int: ... |
|
|
@overload |
|
|
def new(self, *args: Any, device: Device=None) ->Tensor: ... |
|
|
@overload |
|
|
def new(self, storage: Storage) -> Tensor: ... |
|
|
@overload |
|
|
def new(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def new(self, size: _size, *, device: Device=None) -> Tensor: ... |
|
|
@overload |
|
|
def new_empty(self, size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ... |
|
|
@overload |
|
|
def new_empty(self, *size: _int, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ... |
|
|
def new_empty_strided(self, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ... |
|
|
def new_full(self, size: Sequence[Union[_int, SymInt]], fill_value: Number, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ... |
|
|
@overload |
|
|
def new_ones(self, size: _size, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def new_ones(self, size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ... |
|
|
@overload |
|
|
def new_ones(self, *size: _int, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ... |
|
|
def new_tensor(self, data: Any, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def new_zeros(self, size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ... |
|
|
@overload |
|
|
def new_zeros(self, *size: _int, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ... |
|
|
def nextafter(self, other: Tensor) -> Tensor: ... |
|
|
def nextafter_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def nonzero(self, *, as_tuple: Literal[False]=False) -> Tensor: ... |
|
|
@overload |
|
|
def nonzero(self, *, as_tuple: Literal[True]) -> Tuple[Tensor, ...]: ... |
|
|
def normal_(self, mean: _float=0, std: _float=1, *, generator: Optional[Generator]=None) -> Tensor: ... |
|
|
@overload |
|
|
def not_equal(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def not_equal(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def not_equal_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def not_equal_(self, other: Number) -> Tensor: ... |
|
|
def numel(self) -> _int: ... |
|
|
def numpy(self, *, force: _bool=False) -> Any: ... |
|
|
def orgqr(self, input2: Tensor) -> Tensor: ... |
|
|
def ormqr(self, input2: Tensor, input3: Tensor, left: _bool=True, transpose: _bool=False) -> Tensor: ... |
|
|
def outer(self, vec2: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def permute(self, dims: _size) -> Tensor: ... |
|
|
@overload |
|
|
def permute(self, *dims: _int) -> Tensor: ... |
|
|
def pin_memory(self, device: Optional[Union[_device, str, None]]=None) -> Tensor: ... |
|
|
def pinverse(self, rcond: _float=1e-15) -> Tensor: ... |
|
|
def polygamma(self, n: _int) -> Tensor: ... |
|
|
def polygamma_(self, n: _int) -> Tensor: ... |
|
|
def positive(self) -> Tensor: ... |
|
|
@overload |
|
|
def pow(self, exponent: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def pow(self, exponent: Number) -> Tensor: ... |
|
|
@overload |
|
|
def pow_(self, exponent: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def pow_(self, exponent: Number) -> Tensor: ... |
|
|
def prelu(self, weight: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def prod(self, *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def prod(self, dim: _int, keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def prod(self, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
def put(self, index: Tensor, source: Tensor, accumulate: _bool=False) -> Tensor: ... |
|
|
def put_(self, index: Tensor, source: Tensor, accumulate: _bool=False) -> Tensor: ... |
|
|
def q_per_channel_axis(self) -> _int: ... |
|
|
def q_per_channel_scales(self) -> Tensor: ... |
|
|
def q_per_channel_zero_points(self) -> Tensor: ... |
|
|
def q_scale(self) -> _float: ... |
|
|
def q_zero_point(self) -> _int: ... |
|
|
def qr(self, some: _bool=True) -> torch.return_types.qr: ... |
|
|
def qscheme(self) -> _qscheme: ... |
|
|
@overload |
|
|
def quantile(self, q: Tensor, dim: Optional[_int]=None, keepdim: _bool=False, *, interpolation: str="linear") -> Tensor: ... |
|
|
@overload |
|
|
def quantile(self, q: _float, dim: Optional[_int]=None, keepdim: _bool=False, *, interpolation: str="linear") -> Tensor: ... |
|
|
def rad2deg(self) -> Tensor: ... |
|
|
def rad2deg_(self) -> Tensor: ... |
|
|
@overload |
|
|
def random_(self, *, generator: Optional[Generator]=None) -> Tensor: ... |
|
|
@overload |
|
|
def random_(self, from_: _int, to: Optional[_int], *, generator: Optional[Generator]=None) -> Tensor: ... |
|
|
@overload |
|
|
def random_(self, to: _int, *, generator: Optional[Generator]=None) -> Tensor: ... |
|
|
def ravel(self) -> Tensor: ... |
|
|
def reciprocal(self) -> Tensor: ... |
|
|
def reciprocal_(self) -> Tensor: ... |
|
|
def record_stream(self, s: Stream) -> None: ... |
|
|
def refine_names(self, names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ... |
|
|
def relu(self) -> Tensor: ... |
|
|
def relu_(self) -> Tensor: ... |
|
|
@overload |
|
|
def remainder(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def remainder(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def remainder_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def remainder_(self, other: Number) -> Tensor: ... |
|
|
def rename(self, names: Optional[Sequence[Union[str, ellipsis, None]]]) -> Tensor: ... |
|
|
def rename_(self, names: Optional[Sequence[Union[str, ellipsis, None]]]) -> Tensor: ... |
|
|
def renorm(self, p: Number, dim: _int, maxnorm: Number) -> Tensor: ... |
|
|
def renorm_(self, p: Number, dim: _int, maxnorm: Number) -> Tensor: ... |
|
|
@overload |
|
|
def repeat(self, repeats: Sequence[Union[_int, SymInt]]) -> Tensor: ... |
|
|
@overload |
|
|
def repeat(self, *repeats: _int) -> Tensor: ... |
|
|
@overload |
|
|
def repeat_interleave(self, repeats: Tensor, dim: Optional[_int]=None, *, output_size: Optional[_int]=None) -> Tensor: ... |
|
|
@overload |
|
|
def repeat_interleave(self, repeats: _int, dim: Optional[_int]=None, *, output_size: Optional[_int]=None) -> Tensor: ... |
|
|
def requires_grad_(self, mode: _bool=True) -> Tensor: ... |
|
|
@overload |
|
|
def reshape(self, shape: Sequence[Union[_int, SymInt]]) -> Tensor: ... |
|
|
@overload |
|
|
def reshape(self, *shape: _int) -> Tensor: ... |
|
|
def reshape_as(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def resize_(self, size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None) -> Tensor: ... |
|
|
@overload |
|
|
def resize_(self, *size: _int, memory_format: Optional[memory_format]=None) -> Tensor: ... |
|
|
def resize_as_(self, the_template: Tensor, *, memory_format: Optional[memory_format]=None) -> Tensor: ... |
|
|
def resize_as_sparse_(self, the_template: Tensor) -> Tensor: ... |
|
|
def resolve_conj(self) -> Tensor: ... |
|
|
def resolve_neg(self) -> Tensor: ... |
|
|
def retain_grad(self) -> None: ... |
|
|
def roll(self, shifts: Union[_int, _size], dims: Union[_int, _size]=()) -> Tensor: ... |
|
|
def rot90(self, k: _int=1, dims: _size=(0,1)) -> Tensor: ... |
|
|
@overload |
|
|
def round(self) -> Tensor: ... |
|
|
@overload |
|
|
def round(self, *, decimals: _int) -> Tensor: ... |
|
|
@overload |
|
|
def round_(self) -> Tensor: ... |
|
|
@overload |
|
|
def round_(self, *, decimals: _int) -> Tensor: ... |
|
|
def row_indices(self) -> Tensor: ... |
|
|
def rsqrt(self) -> Tensor: ... |
|
|
def rsqrt_(self) -> Tensor: ... |
|
|
@overload |
|
|
def scatter(self, dim: _int, index: Tensor, src: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def scatter(self, dim: _int, index: Tensor, src: Tensor, *, reduce: str) -> Tensor: ... |
|
|
@overload |
|
|
def scatter(self, dim: _int, index: Tensor, value: Number, *, reduce: str) -> Tensor: ... |
|
|
@overload |
|
|
def scatter(self, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def scatter(self, dim: _int, index: Tensor, value: Number) -> Tensor: ... |
|
|
@overload |
|
|
def scatter(self, dim: Union[str, ellipsis, None], index: Tensor, value: Number) -> Tensor: ... |
|
|
@overload |
|
|
def scatter_(self, dim: _int, index: Tensor, src: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def scatter_(self, dim: _int, index: Tensor, src: Tensor, *, reduce: str) -> Tensor: ... |
|
|
@overload |
|
|
def scatter_(self, dim: _int, index: Tensor, value: Number, *, reduce: str) -> Tensor: ... |
|
|
@overload |
|
|
def scatter_(self, dim: _int, index: Tensor, value: Number) -> Tensor: ... |
|
|
@overload |
|
|
def scatter_add(self, dim: _int, index: Tensor, src: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def scatter_add(self, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: ... |
|
|
def scatter_add_(self, dim: _int, index: Tensor, src: Tensor) -> Tensor: ... |
|
|
def scatter_reduce(self, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool=True) -> Tensor: ... |
|
|
def scatter_reduce_(self, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool=True) -> Tensor: ... |
|
|
@overload |
|
|
def select(self, dim: _int, index: _int) -> Tensor: ... |
|
|
@overload |
|
|
def select(self, dim: Union[str, ellipsis, None], index: _int) -> Tensor: ... |
|
|
def select_scatter(self, src: Tensor, dim: _int, index: _int) -> Tensor: ... |
|
|
@overload |
|
|
def set_(self, storage: Union[Storage, TypedStorage], offset: _int, size: _size, stride: _size) -> Tensor: ... |
|
|
@overload |
|
|
def set_(self, storage: Union[Storage, TypedStorage]) -> Tensor: ... |
|
|
def sgn(self) -> Tensor: ... |
|
|
def sgn_(self) -> Tensor: ... |
|
|
def short(self) -> Tensor: ... |
|
|
def sigmoid(self) -> Tensor: ... |
|
|
def sigmoid_(self) -> Tensor: ... |
|
|
def sign(self) -> Tensor: ... |
|
|
def sign_(self) -> Tensor: ... |
|
|
def signbit(self) -> Tensor: ... |
|
|
def sin(self) -> Tensor: ... |
|
|
def sin_(self) -> Tensor: ... |
|
|
def sinc(self) -> Tensor: ... |
|
|
def sinc_(self) -> Tensor: ... |
|
|
def sinh(self) -> Tensor: ... |
|
|
def sinh_(self) -> Tensor: ... |
|
|
@overload |
|
|
def size(self) -> Size: ... |
|
|
@overload |
|
|
def size(self, dim: _int) -> _int: ... |
|
|
def slice_scatter(self, src: Tensor, dim: _int=0, start: Optional[Union[_int, SymInt]]=None, end: Optional[Union[_int, SymInt]]=None, step: Union[_int, SymInt]=1) -> Tensor: ... |
|
|
def slogdet(self) -> torch.return_types.slogdet: ... |
|
|
def smm(self, mat2: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def softmax(self, dim: _int, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def softmax(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def sort(self, *, stable: Optional[_bool], dim: _int=-1, descending: _bool=False) -> torch.return_types.sort: ... |
|
|
@overload |
|
|
def sort(self, dim: _int=-1, descending: _bool=False) -> torch.return_types.sort: ... |
|
|
@overload |
|
|
def sort(self, *, stable: Optional[_bool], dim: Union[str, ellipsis, None], descending: _bool=False) -> torch.return_types.sort: ... |
|
|
@overload |
|
|
def sort(self, dim: Union[str, ellipsis, None], descending: _bool=False) -> torch.return_types.sort: ... |
|
|
def sparse_dim(self) -> _int: ... |
|
|
def sparse_mask(self, mask: Tensor) -> Tensor: ... |
|
|
def sparse_resize_(self, size: _size, sparse_dim: _int, dense_dim: _int) -> Tensor: ... |
|
|
def sparse_resize_and_clear_(self, size: _size, sparse_dim: _int, dense_dim: _int) -> Tensor: ... |
|
|
@overload |
|
|
def split(self, split_size: _int, dim: _int=0) -> Sequence[Tensor]: ... |
|
|
@overload |
|
|
def split(self, split_size: Tuple[_int, ...], dim: _int=0) -> Sequence[Tensor]: ... |
|
|
def split_with_sizes(self, split_sizes: _size, dim: _int=0) -> List[Tensor]: ... |
|
|
def sqrt(self) -> Tensor: ... |
|
|
def sqrt_(self) -> Tensor: ... |
|
|
def square(self) -> Tensor: ... |
|
|
def square_(self) -> Tensor: ... |
|
|
@overload |
|
|
def squeeze(self) -> Tensor: ... |
|
|
@overload |
|
|
def squeeze(self, dim: _int) -> Tensor: ... |
|
|
@overload |
|
|
def squeeze(self, dim: Union[str, ellipsis, None]) -> Tensor: ... |
|
|
@overload |
|
|
def squeeze_(self) -> Tensor: ... |
|
|
@overload |
|
|
def squeeze_(self, dim: _int) -> Tensor: ... |
|
|
@overload |
|
|
def squeeze_(self, dim: Union[str, ellipsis, None]) -> Tensor: ... |
|
|
def sspaddmm(self, mat1: Tensor, mat2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ... |
|
|
@overload |
|
|
def std(self, dim: Optional[Union[_int, _size]], *, correction: Optional[_int], keepdim: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def std(self, dim: Optional[Union[_int, _size]], unbiased: _bool=True, keepdim: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def std(self, unbiased: _bool=True) -> Tensor: ... |
|
|
@overload |
|
|
def std(self, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[_int], keepdim: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def std(self, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool=True, keepdim: _bool=False) -> Tensor: ... |
|
|
def _storage(self) -> Storage: ... |
|
|
def storage_offset(self) -> _int: ... |
|
|
def storage_type(self) -> Storage: ... |
|
|
@overload |
|
|
def stride(self) -> Tuple[_int]: ... |
|
|
@overload |
|
|
def stride(self, _int) -> _int: ... |
|
|
def sub(self, other: Union[Tensor, Number, torch.SymIntNode, torch.SymFloatNode], *, alpha: Optional[Number]=1, out: Optional[Tensor]=None) -> Tensor: ... |
|
|
def sub_(self, other: Union[Tensor, Number, torch.SymIntNode, torch.SymFloatNode], *, alpha: Optional[Number]=1) -> Tensor: ... |
|
|
@overload |
|
|
def subtract(self, other: Tensor, *, alpha: Number=1) -> Tensor: ... |
|
|
@overload |
|
|
def subtract(self, other: Number, alpha: Number=1) -> Tensor: ... |
|
|
@overload |
|
|
def subtract_(self, other: Tensor, *, alpha: Number=1) -> Tensor: ... |
|
|
@overload |
|
|
def subtract_(self, other: Number, alpha: Number=1) -> Tensor: ... |
|
|
@overload |
|
|
def sum(self, *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def sum(self, dim: Optional[Union[_int, _size]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def sum(self, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
@overload |
|
|
def sum_to_size(self, size: _size) -> Tensor: ... |
|
|
@overload |
|
|
def sum_to_size(self, *size: _int) -> Tensor: ... |
|
|
def svd(self, some: _bool=True, compute_uv: _bool=True) -> torch.return_types.svd: ... |
|
|
def swapaxes(self, axis0: _int, axis1: _int) -> Tensor: ... |
|
|
def swapaxes_(self, axis0: _int, axis1: _int) -> Tensor: ... |
|
|
def swapdims(self, dim0: _int, dim1: _int) -> Tensor: ... |
|
|
def swapdims_(self, dim0: _int, dim1: _int) -> Tensor: ... |
|
|
def symeig(self, eigenvectors: _bool=False, upper: _bool=True) -> torch.return_types.symeig: ... |
|
|
def t(self) -> Tensor: ... |
|
|
def t_(self) -> Tensor: ... |
|
|
def take(self, index: Tensor) -> Tensor: ... |
|
|
def take_along_dim(self, indices: Tensor, dim: Optional[_int]=None) -> Tensor: ... |
|
|
def tan(self) -> Tensor: ... |
|
|
def tan_(self) -> Tensor: ... |
|
|
def tanh(self) -> Tensor: ... |
|
|
def tanh_(self) -> Tensor: ... |
|
|
@overload |
|
|
def tensor_split(self, tensor_indices_or_sections: Tensor, dim: _int=0) -> List[Tensor]: ... |
|
|
@overload |
|
|
def tensor_split(self, sections: _int, dim: _int=0) -> List[Tensor]: ... |
|
|
@overload |
|
|
def tensor_split(self, indices: _size, dim: _int=0) -> List[Tensor]: ... |
|
|
@overload |
|
|
def tile(self, dims: _size) -> Tensor: ... |
|
|
@overload |
|
|
def tile(self, *dims: _int) -> Tensor: ... |
|
|
@overload |
|
|
def to(self, dtype: _dtype, non_blocking: _bool=False, copy: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def to(self, device: Optional[Union[_device, str]]=None, dtype: Optional[_dtype]=None, non_blocking: _bool=False, copy: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def to(self, other: Tensor, non_blocking: _bool=False, copy: _bool=False) -> Tensor: ... |
|
|
def to_dense(self, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
def to_mkldnn(self, dtype: Optional[_dtype]=None) -> Tensor: ... |
|
|
def to_padded_tensor(self, padding: _float, output_size: Optional[_size]=None) -> Tensor: ... |
|
|
@overload |
|
|
def to_sparse(self) -> Tensor: ... |
|
|
@overload |
|
|
def to_sparse(self, sparse_dim: _int) -> Tensor: ... |
|
|
@overload |
|
|
def to_sparse_bsc(self, blocksize: Union[_int, _size]) -> Tensor: ... |
|
|
@overload |
|
|
def to_sparse_bsc(self, *blocksize: _int) -> Tensor: ... |
|
|
@overload |
|
|
def to_sparse_bsr(self, blocksize: Union[_int, _size]) -> Tensor: ... |
|
|
@overload |
|
|
def to_sparse_bsr(self, *blocksize: _int) -> Tensor: ... |
|
|
def to_sparse_csc(self) -> Tensor: ... |
|
|
def to_sparse_csr(self) -> Tensor: ... |
|
|
def tolist(self) -> List: ... |
|
|
def topk(self, k: _int, dim: _int=-1, largest: _bool=True, sorted: _bool=True) -> torch.return_types.topk: ... |
|
|
def trace(self) -> Tensor: ... |
|
|
@overload |
|
|
def transpose(self, dim0: _int, dim1: _int) -> Tensor: ... |
|
|
@overload |
|
|
def transpose(self, dim0: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None]) -> Tensor: ... |
|
|
def transpose_(self, dim0: _int, dim1: _int) -> Tensor: ... |
|
|
def triangular_solve(self, A: Tensor, upper: _bool=True, transpose: _bool=False, unitriangular: _bool=False) -> torch.return_types.triangular_solve: ... |
|
|
def tril(self, diagonal: _int=0) -> Tensor: ... |
|
|
def tril_(self, diagonal: _int=0) -> Tensor: ... |
|
|
def triu(self, diagonal: _int=0) -> Tensor: ... |
|
|
def triu_(self, diagonal: _int=0) -> Tensor: ... |
|
|
def true_divide(self, other: Union[Tensor, Number, torch.SymIntNode, torch.SymFloatNode], *, out: Optional[Tensor]=None) -> Tensor: ... |
|
|
def true_divide_(self, other: Union[Tensor, Number, torch.SymIntNode, torch.SymFloatNode]) -> Tensor: ... |
|
|
def trunc(self) -> Tensor: ... |
|
|
def trunc_(self) -> Tensor: ... |
|
|
@overload |
|
|
def type(self, dtype: None=None, non_blocking: _bool=False) -> str: ... |
|
|
@overload |
|
|
def type(self, dtype: Union[str, _dtype], non_blocking: _bool=False) -> Tensor: ... |
|
|
def type_as(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def unbind(self, dim: _int=0) -> List[Tensor]: ... |
|
|
@overload |
|
|
def unbind(self, dim: Union[str, ellipsis, None]) -> List[Tensor]: ... |
|
|
@overload |
|
|
def unflatten(self, dim: _int, sizes: _size) -> Tensor: ... |
|
|
@overload |
|
|
def unflatten(self, dim: Union[str, ellipsis, None], sizes: _size, names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ... |
|
|
def unfold(self, dimension: _int, size: _int, step: _int) -> Tensor: ... |
|
|
def uniform_(self, from_: _float=0, to: _float=1, *, generator: Optional[Generator]=None) -> Tensor: ... |
|
|
def unsafe_chunk(self, chunks: _int, dim: _int=0) -> List[Tensor]: ... |
|
|
def unsafe_split(self, split_size: _int, dim: _int=0) -> List[Tensor]: ... |
|
|
def unsafe_split_with_sizes(self, split_sizes: _size, dim: _int=0) -> List[Tensor]: ... |
|
|
def unsqueeze(self, dim: _int) -> Tensor: ... |
|
|
def unsqueeze_(self, dim: _int) -> Tensor: ... |
|
|
def values(self) -> Tensor: ... |
|
|
@overload |
|
|
def var(self, dim: Optional[Union[_int, _size]], *, correction: Optional[_int], keepdim: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def var(self, dim: Optional[Union[_int, _size]], unbiased: _bool=True, keepdim: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def var(self, unbiased: _bool=True) -> Tensor: ... |
|
|
@overload |
|
|
def var(self, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[_int], keepdim: _bool=False) -> Tensor: ... |
|
|
@overload |
|
|
def var(self, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool=True, keepdim: _bool=False) -> Tensor: ... |
|
|
def vdot(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def view(self, dtype: _dtype) -> Tensor: ... |
|
|
@overload |
|
|
def view(self, size: Sequence[Union[_int, SymInt]]) -> Tensor: ... |
|
|
@overload |
|
|
def view(self, *size: _int) -> Tensor: ... |
|
|
def view_as(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def vsplit(self, sections: _int) -> List[Tensor]: ... |
|
|
@overload |
|
|
def vsplit(self, indices: _size) -> List[Tensor]: ... |
|
|
@overload |
|
|
def vsplit(self, *indices: _int) -> List[Tensor]: ... |
|
|
def where(self, condition: Tensor, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def xlogy(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def xlogy(self, other: Number) -> Tensor: ... |
|
|
@overload |
|
|
def xlogy_(self, other: Tensor) -> Tensor: ... |
|
|
@overload |
|
|
def xlogy_(self, other: Number) -> Tensor: ... |
|
|
def zero_(self) -> Tensor: ... |
|
|
|
|
|
|
|
|
def _multiprocessing_init() -> None: ... |
|
|
|
|
|
|
|
|
def _cuda_getCurrentStream(device: _int) -> _int: ... |
|
|
def _cuda_getCurrentRawStream(device: _int) -> _int: ... |
|
|
def _cuda_getDefaultStream(device: _int) -> _int: ... |
|
|
def _cuda_getCurrentBlasHandle() -> _int: ... |
|
|
def _cuda_setDevice(device: _int) -> None: ... |
|
|
def _cuda_getDevice() -> _int: ... |
|
|
def _cuda_getDeviceCount() -> _int: ... |
|
|
def _cuda_set_sync_debug_mode(warn_level: Union[_int, str]) -> None: ... |
|
|
def _cuda_get_sync_debug_mode() -> _int: ... |
|
|
def _cuda_sleep(cycles: _int) -> None: ... |
|
|
def _cuda_synchronize() -> None: ... |
|
|
def _cuda_ipc_collect() -> None: ... |
|
|
def _cuda_getArchFlags() -> Optional[str]: ... |
|
|
def _cuda_init() -> None: ... |
|
|
def _cuda_setStream(cuda_stream: _int) -> None: ... |
|
|
def _cuda_getCompiledVersion() -> _int: ... |
|
|
def _cuda_cudaHostAllocator() -> _int: ... |
|
|
def _cuda_cudaCachingAllocator_raw_alloc(size: _int, cuda_stream: _int) -> _int: ... |
|
|
def _cuda_cudaCachingAllocator_raw_delete(ptr: _int) -> None: ... |
|
|
def _cuda_cudaCachingAllocator_set_allocator_settings(env: str) -> None: ... |
|
|
def _cuda_setMemoryFraction(fraction: _float, device: _int) -> None: ... |
|
|
def _cuda_emptyCache() -> None: ... |
|
|
def _cuda_memoryStats(device: _int) -> Dict[str, Any]: ... |
|
|
def _cuda_resetAccumulatedMemoryStats(device: _int) -> None: ... |
|
|
def _cuda_resetPeakMemoryStats(device: _int) -> None: ... |
|
|
def _cuda_memorySnapshot() -> List[Dict[str, Any]]: ... |
|
|
def _cuda_recordMemoryHistory(enabled: _bool) -> None: ... |
|
|
def _cuda_lock_mutex() -> None: ... |
|
|
def _cuda_unlock_mutex() -> None: ... |
|
|
def _cuda_canDeviceAccessPeer(device: _int, peer_device: _int) -> _bool: ... |
|
|
def _cuda_jiterator_compile_and_launch_kernel(code_string: str, |
|
|
kernel_name: str, |
|
|
return_by_ref: _bool, |
|
|
num_outputs: _int, |
|
|
tensors: Tuple, |
|
|
kwargs: Dict[str, Union[_int, _float, _bool]]) -> Tensor: ... |
|
|
def _cuda_get_cudnn_benchmark_limit() -> _int: ... |
|
|
def _cuda_set_cudnn_benchmark_limit(arg: _int) -> None: ... |
|
|
def _nccl_version() -> _int: ... |
|
|
def _nccl_unique_id() -> bytes: ... |
|
|
def _nccl_init_rank(nranks: _int, comm_id: bytes, rank: _int) -> object: ... |
|
|
def _nccl_reduce(input: Sequence[Tensor], |
|
|
output: Tensor, |
|
|
root: _int, |
|
|
op: _int, |
|
|
streams: Optional[Sequence[_CudaStreamBase]], |
|
|
comms: Optional[Sequence[object]]) -> None: ... |
|
|
def _nccl_all_reduce(input: Sequence[Tensor], |
|
|
output: Sequence[Tensor], |
|
|
op: _int, |
|
|
streams: Optional[Sequence[_CudaStreamBase]], |
|
|
comms: Optional[Sequence[object]]) -> None: ... |
|
|
def _nccl_broadcast(input: Sequence[Tensor], |
|
|
root: _int, |
|
|
streams: Optional[Sequence[_CudaStreamBase]], |
|
|
comms: Optional[Sequence[object]]) -> None: ... |
|
|
def _nccl_all_gather(input: Sequence[Tensor], |
|
|
output: Sequence[Tensor], |
|
|
streams: Optional[Sequence[_CudaStreamBase]], |
|
|
comms: Optional[Sequence[object]]) -> None: ... |
|
|
def _nccl_reduce_scatter(input: Sequence[Tensor], |
|
|
output: Sequence[Tensor], |
|
|
op: _int, |
|
|
streams: Optional[Sequence[_CudaStreamBase]], |
|
|
comms: Optional[Sequence[object]]) -> None: ... |
|
|
def _rocm_is_backward_pass() -> _bool: ... |
|
|
|
|
|
|
|
|
class _CudaDeviceProperties: |
|
|
name: str |
|
|
major: _int |
|
|
minor: _int |
|
|
multi_processor_count: _int |
|
|
total_memory: _int |
|
|
is_integrated: _int |
|
|
is_multi_gpu_board: _int |
|
|
|
|
|
|
|
|
def _broadcast(tensor: Tensor, devices: List[_int]) -> List[Tensor]: ... |
|
|
def _broadcast_out(tensor: Tensor, out_tensors: List[Tensor]) -> List[Tensor]: ... |
|
|
def _broadcast_coalesced( |
|
|
tensors: List[Tensor], |
|
|
devices: List[_int], |
|
|
buffer_size: _int |
|
|
) -> List[List[Tensor]]: ... |
|
|
|
|
|
def _scatter(tensor: Tensor, devices: List[_int], chunk_sizes: Optional[List[_int]], dim: _int, streams: Optional[List[Stream]]) -> List[Tensor]: ... |
|
|
def _scatter_out(tensor: Tensor, out_tensors: List[Tensor], dim: _int, streams: Optional[List[Stream]]) -> List[Tensor]: ... |
|
|
def _gather(tensors: List[Tensor], dim: _int, destination_index: Optional[_int]) -> Tensor: ... |
|
|
def _gather_out(tensors: List[Tensor], out_tensor: Tensor, dim: _int) -> Tensor: ... |
|
|
|
|
|
|
|
|
class _CudaStreamBase: |
|
|
_cdata: _int |
|
|
device: _device |
|
|
cuda_stream: _int |
|
|
priority: _int |
|
|
|
|
|
def __new__(self, priority: _int = 0, _cdata: _int = 0, stream_ptr: _int = 0) -> _CudaStreamBase: ... |
|
|
def query(self) -> _bool: ... |
|
|
def synchronize(self) -> None: ... |
|
|
def priority_range(self) -> Tuple[_int, _int]: ... |
|
|
|
|
|
|
|
|
class _CudaEventBase: |
|
|
device: _device |
|
|
cuda_event: _int |
|
|
|
|
|
def __new__(cls, enable_timing: _bool = False, blocking: _bool = False, interprocess: _bool = False) -> _CudaEventBase: ... |
|
|
@classmethod |
|
|
def from_ipc_handle(cls, device: _device, ipc_handle: bytes) -> _CudaEventBase: ... |
|
|
def record(self, stream: _CudaStreamBase) -> None: ... |
|
|
def wait(self, stream: _CudaStreamBase) -> None: ... |
|
|
def query(self) -> _bool: ... |
|
|
def elapsed_time(self, other: _CudaEventBase) -> _float: ... |
|
|
def synchronize(self) -> None: ... |
|
|
def ipc_handle(self) -> bytes: ... |
|
|
|
|
|
|
|
|
class _CUDAGraph: |
|
|
def capture_begin(self, |
|
|
pool: Optional[Tuple[_int, _int]]=...) -> None: ... |
|
|
def capture_end(self) -> None: ... |
|
|
def replay(self) -> None: ... |
|
|
def reset(self) -> None: ... |
|
|
def pool(self) -> Tuple[_int, _int]: ... |
|
|
|
|
|
def _cuda_isCurrentStreamCapturing() -> _bool: ... |
|
|
|
|
|
def _graph_pool_handle() -> Tuple[_int, _int]: ... |
|
|
|
|
|
|
|
|
def _set_worker_signal_handlers(*arg: Any) -> None: ... |
|
|
def _set_worker_pids(key: _int, child_pids: Tuple[_int, ...]) -> None: ... |
|
|
def _remove_worker_pids(loader_id: _int) -> None: ... |
|
|
def _error_if_any_worker_fails() -> None: ... |
|
|
|
|
|
|
|
|
class TracingState: |
|
|
def push_scope(self, scope_name: str) -> None: ... |
|
|
def pop_scope(self) -> None: ... |
|
|
def current_scope(self) -> str: ... |
|
|
def set_graph(self, graph: Graph) -> None: ... |
|
|
def graph(self) -> Graph: ... |
|
|
... |
|
|
|
|
|
def _create_graph_by_tracing( |
|
|
func: Callable[..., Any], |
|
|
inputs: Any, |
|
|
var_name_lookup_fn: Callable[[Tensor], str], |
|
|
strict: Any, |
|
|
force_outplace: Any, |
|
|
self: Any = None, |
|
|
argument_names: List[str] = [] |
|
|
) -> Tuple[Graph, Stack]: ... |
|
|
def _tracer_warn_use_python(): ... |
|
|
def _get_tracing_state() -> TracingState: ... |
|
|
|
|
|
|
|
|
|
|
|
class IValue: |
|
|
... |
|
|
Stack = List[IValue] |
|
|
|
|
|
class JitType: |
|
|
annotation_str : str |
|
|
def isSubtypeOf(self, other: JitType) -> _bool: ... |
|
|
def with_dtype(self, dtype: _dtype) -> JitType: ... |
|
|
def with_sizes(self, sizes: List[Optional[_int]]) -> JitType: ... |
|
|
def kind(self) -> str: ... |
|
|
def scalarType(self) -> Optional[str]: ... |
|
|
|
|
|
class InferredType: |
|
|
def __init__(self, arg: Union[JitType, str]): ... |
|
|
def type(self) -> JitType: ... |
|
|
def success(self) -> _bool: ... |
|
|
def reason(self) -> str: ... |
|
|
|
|
|
R = TypeVar('R', bound=JitType) |
|
|
|
|
|
class AnyType(JitType): |
|
|
@staticmethod |
|
|
def get() -> AnyType: ... |
|
|
|
|
|
class NoneType(JitType): |
|
|
@staticmethod |
|
|
def get() -> NoneType: ... |
|
|
|
|
|
class BoolType(JitType): |
|
|
@staticmethod |
|
|
def get() -> BoolType: ... |
|
|
|
|
|
class FloatType(JitType): |
|
|
@staticmethod |
|
|
def get() -> FloatType: ... |
|
|
|
|
|
class ComplexType(JitType): |
|
|
@staticmethod |
|
|
def get() -> ComplexType: ... |
|
|
|
|
|
class IntType(JitType): |
|
|
@staticmethod |
|
|
def get() -> IntType: ... |
|
|
|
|
|
class NumberType(JitType): |
|
|
@staticmethod |
|
|
def get() -> NumberType: ... |
|
|
|
|
|
class StringType(JitType): |
|
|
@staticmethod |
|
|
def get() -> StringType: ... |
|
|
|
|
|
class DeviceObjType(JitType): |
|
|
@staticmethod |
|
|
def get() -> DeviceObjType: ... |
|
|
|
|
|
class StreamObjType(JitType): |
|
|
@staticmethod |
|
|
def get() -> StreamObjType: ... |
|
|
|
|
|
class ListType(JitType): |
|
|
def __init__(self, a: JitType) -> None: ... |
|
|
def getElementType(self) -> JitType: ... |
|
|
|
|
|
@staticmethod |
|
|
def ofInts() -> ListType: ... |
|
|
@staticmethod |
|
|
def ofTensors() -> ListType: ... |
|
|
@staticmethod |
|
|
def ofFloats() -> ListType: ... |
|
|
@staticmethod |
|
|
def ofComplexDoubles() -> ListType: ... |
|
|
@staticmethod |
|
|
def ofBools() -> ListType: ... |
|
|
|
|
|
class DictType(JitType): |
|
|
def __init__(self, key: JitType, value: JitType) -> None: ... |
|
|
def getKeyType(self) -> JitType: ... |
|
|
def getValueType(self) -> JitType: ... |
|
|
|
|
|
class TupleType(JitType): |
|
|
def __init__(self, a: List[Optional[JitType]]) -> None: ... |
|
|
def elements(self) -> List[JitType]: ... |
|
|
|
|
|
class UnionType(JitType): |
|
|
def __init__(self, a: List[JitType]) -> None: ... |
|
|
|
|
|
class ClassType(JitType): |
|
|
def __init__(self, qualified_name: str) -> None: ... |
|
|
|
|
|
class InterfaceType(JitType): |
|
|
def __init__(self, qualified_name: str) -> None: ... |
|
|
def getMethod(self, name: str) -> Optional[FunctionSchema]: ... |
|
|
def getMethodNames(self) -> List[str]: ... |
|
|
|
|
|
class OptionalType(JitType, Generic[R]): |
|
|
def __init__(self, a: JitType) -> None: ... |
|
|
def getElementType(self) -> JitType: ... |
|
|
|
|
|
@staticmethod |
|
|
def ofTensor() -> OptionalType: ... |
|
|
|
|
|
class FutureType(JitType): |
|
|
def __init__(self, a: JitType) -> None: ... |
|
|
def getElementType(self) -> JitType: ... |
|
|
|
|
|
class RRefType(JitType): |
|
|
def __init__(self, a: JitType) -> None: ... |
|
|
|
|
|
class EnumType(JitType): |
|
|
def __init__( |
|
|
self, |
|
|
qualified_name: str, |
|
|
value_type: JitType, |
|
|
enum_names_values: List[Any] |
|
|
) -> None: |
|
|
... |
|
|
|
|
|
class TensorType(JitType): |
|
|
@classmethod |
|
|
def get(cls) -> TensorType: ... |
|
|
@classmethod |
|
|
def getInferred(cls) -> TensorType: ... |
|
|
def with_sizes(self, other: Optional[List[Optional[_int]]]) -> TensorType: ... |
|
|
def sizes(self) -> Optional[List[_int]]: ... |
|
|
def varyingSizes(self) -> Optional[List[Optional[_int]]]: ... |
|
|
def strides(self) -> Optional[List[_int]]: ... |
|
|
def device(self) -> Optional[_device]: ... |
|
|
def dim(self) -> _int: ... |
|
|
def dtype(self) -> Optional[_dtype]: ... |
|
|
@staticmethod |
|
|
def create_from_tensor(t: Tensor) -> TensorType: ... |
|
|
|
|
|
|
|
|
class SourceRange: |
|
|
... |
|
|
|
|
|
class TreeView: |
|
|
... |
|
|
|
|
|
class Ident(TreeView): |
|
|
@property |
|
|
def name(self) -> str: ... |
|
|
|
|
|
class ClassDef(TreeView): |
|
|
... |
|
|
|
|
|
class Def(TreeView): |
|
|
def name(self) -> Ident: ... |
|
|
|
|
|
class Decl(TreeView): |
|
|
... |
|
|
|
|
|
|
|
|
def _rpc_init() -> _bool: ... |
|
|
|
|
|
|
|
|
def _dist_autograd_init() -> _bool: ... |
|
|
|
|
|
|
|
|
def _c10d_init() -> _bool: ... |
|
|
|
|
|
|
|
|
def _faulty_agent_init() -> _bool: ... |
|
|
|
|
|
def _enable_minidumps(directory: str) -> None: ... |
|
|
def _disable_minidumps() -> None: ... |
|
|
def _enable_minidumps_on_exceptions() -> None: ... |
|
|
def _register_py_class_for_device(device: str, cls: Any) -> None: ... |
|
|
def _activate_cuda_trace() -> None: ... |
|
|
|
|
|
class _OutOfMemoryError: |
|
|
pass |
|
|
|