| |
| |
|
|
| from collections.abc import Sequence |
| from typing import Literal, overload |
|
|
| from torch import memory_format, Tensor |
| from torch.types import _bool, _device, _dtype, _int, _size |
|
|
| |
|
|
| def adaptive_avg_pool2d(input: Tensor, output_size: _int | _size) -> Tensor: ... |
| def adaptive_avg_pool3d(input: Tensor, output_size: _int | _size) -> Tensor: ... |
| def adaptive_max_pool2d( |
| input: Tensor, |
| output_size: _int | _size, |
| ) -> tuple[Tensor, Tensor]: ... |
| def adaptive_max_pool3d( |
| input: Tensor, |
| output_size: _int | _size, |
| ) -> tuple[Tensor, Tensor]: ... |
| def avg_pool2d( |
| input: Tensor, |
| kernel_size: _int | _size, |
| stride: _int | _size | None = None, |
| padding: _int | _size = 0, |
| ceil_mode: bool = False, |
| count_include_pad: bool = True, |
| divisor_override: int | None = None, |
| ) -> Tensor: ... |
| def avg_pool3d( |
| input: Tensor, |
| kernel_size: _int | _size, |
| stride: _int | _size | None = None, |
| padding: _int | _size = 0, |
| ceil_mode: bool = False, |
| count_include_pad: bool = True, |
| divisor_override: int | None = None, |
| ) -> Tensor: ... |
| def binary_cross_entropy( |
| input: Tensor, |
| target: Tensor, |
| weight: Tensor | None = None, |
| reduction: str = ..., |
| ) -> Tensor: ... |
| def col2im( |
| input: Tensor, |
| output_size: _int | _size, |
| kernel_size: _int | _size, |
| dilation: _int | _size, |
| stride: _int | _size | None = None, |
| padding: _int | _size = 0, |
| ) -> Tensor: ... |
| def cross_entropy_loss( |
| input: Tensor, |
| target: Tensor, |
| weight: Tensor | None = None, |
| reduction: str = ..., |
| ignore_index: int = -100, |
| label_smoothing: float = 0.0, |
| ) -> Tensor: ... |
| def elu( |
| input: Tensor, |
| alpha: float = 1.0, |
| scale: float = 1.0, |
| input_scale: float = 1.0, |
| ) -> Tensor: ... |
| def elu_(input: Tensor, alpha: float = ...) -> Tensor: ... |
| def fractional_max_pool2d( |
| input: Tensor, |
| kernel_size: _int | _size, |
| output_size: _int | _size, |
| _random_samples: Tensor, |
| ) -> tuple[Tensor, Tensor]: ... |
| def fractional_max_pool3d( |
| input: Tensor, |
| kernel_size: _int | _size, |
| output_size: _int | _size, |
| _random_samples: Tensor, |
| ) -> tuple[Tensor, Tensor]: ... |
| def gelu(input: Tensor, approximate: str = ...) -> Tensor: ... |
| def glu(input: Tensor, dim: int = -1) -> Tensor: ... |
| def hardsigmoid(input: Tensor, *, out: Tensor | None = None) -> Tensor: ... |
| def hardsigmoid_(input: Tensor) -> Tensor: ... |
| def hardswish(input: Tensor) -> Tensor: ... |
| def hardswish_(input: Tensor) -> Tensor: ... |
| def hardtanh( |
| input: Tensor, |
| min_val: float = ..., |
| max_val: float = ..., |
| *, |
| out: Tensor | None = None, |
| ) -> Tensor: ... |
| def hardtanh_( |
| input: Tensor, |
| min_val: float = ..., |
| max_val: float = ..., |
| ) -> Tensor: ... |
| def huber_loss( |
| input: Tensor, |
| target: Tensor, |
| reduction: str = ..., |
| delta: float = 1.0, |
| ) -> Tensor: ... |
| def leaky_relu( |
| input: Tensor, |
| negative_slope: float = ..., |
| *, |
| out: Tensor | None = None, |
| ) -> Tensor: ... |
| def leaky_relu_(input: Tensor, negative_slope: float = ...) -> Tensor: ... |
| def linear( |
| input: Tensor, |
| weight: Tensor, |
| bias: Tensor | None = None, |
| ) -> Tensor: ... |
| def log_sigmoid(input: Tensor) -> Tensor: ... |
| def max_pool2d_with_indices( |
| input: Tensor, |
| kernel_size: _int | _size, |
| stride: _int | _size | None = None, |
| padding: _int | _size = 0, |
| dilation: _int | _size = 1, |
| ceil_mode: bool = False, |
| ) -> tuple[Tensor, Tensor]: ... |
| def max_pool3d_with_indices( |
| input: Tensor, |
| kernel_size: _int | _size, |
| stride: _int | _size | None = None, |
| padding: _int | _size = 0, |
| dilation: _int | _size = 1, |
| ceil_mode: bool = False, |
| ) -> tuple[Tensor, Tensor]: ... |
| def max_unpool2d( |
| input: Tensor, |
| indices: Tensor, |
| output_size: Sequence[int] | None, |
| ) -> Tensor: ... |
| def max_unpool3d( |
| input: Tensor, |
| indices: Tensor, |
| output_size: Sequence[int] | None, |
| stride: _int | _size, |
| padding: _int | _size, |
| ) -> Tensor: ... |
| def one_hot(tensor: Tensor, num_classes: int = ...) -> Tensor: ... |
| def pad( |
| input: Tensor, |
| pad: Sequence[int], |
| mode: str = ..., |
| value: float | None = None, |
| ) -> Tensor: ... |
| def scaled_dot_product_attention( |
| query: Tensor, |
| key: Tensor, |
| value: Tensor, |
| attn_mask: Tensor | None = None, |
| dropout_p: float = 0.0, |
| is_causal: bool = False, |
| scale: float | None = None, |
| enable_gqa: bool = False, |
| ) -> Tensor: ... |
| def softplus( |
| input: Tensor, |
| beta: float = ..., |
| threshold: float = ..., |
| ) -> Tensor: ... |
| def softshrink(input: Tensor, lambd: float = ...) -> Tensor: ... |
|
|
| |
| def mkldnn_linear(input: Tensor, weight: Tensor, bias: Tensor | None) -> Tensor: ... |
|
|
| |
| def mkldnn_reorder_conv2d_weight( |
| self: Tensor, |
| padding: list, |
| stride: list, |
| dilatation: list, |
| groups: int, |
| ) -> Tensor: ... |
| def mkldnn_reorder_conv3d_weight( |
| self: Tensor, |
| padding: list, |
| stride: list, |
| dilatation: list, |
| groups: int, |
| ) -> Tensor: ... |
|
|
| |
| def mkldnn_prelu(input: Tensor, weight: Tensor) -> Tensor: ... |
|
|
| |
| @overload |
| def _parse_to( |
| device: _device, |
| dtype: _dtype, |
| non_blocking: _bool, |
| copy: _bool, |
| *, |
| memory_format: memory_format, |
| ) -> tuple[_device, _dtype, _bool, memory_format]: ... |
| @overload |
| def _parse_to( |
| dtype: _dtype, |
| non_blocking: _bool, |
| copy: _bool, |
| *, |
| memory_format: memory_format, |
| ) -> tuple[_device, _dtype, _bool, memory_format]: ... |
| @overload |
| def _parse_to( |
| tensor: Tensor, |
| non_blocking: _bool, |
| copy: _bool, |
| *, |
| memory_format: memory_format, |
| ) -> tuple[_device, _dtype, _bool, memory_format]: ... |
|
|
| |
| def pad_sequence( |
| sequences: list[Tensor] | tuple[Tensor, ...], |
| batch_first: bool = False, |
| padding_value: float = 0.0, |
| padding_side: Literal["left", "right"] = "right", |
| ) -> Tensor: ... |
|
|
| |
| def upsample_nearest1d( |
| input: Tensor, |
| output_size: Sequence[int] | None, |
| scale_factors: Sequence[float] | None, |
| ) -> Tensor: ... |
| def upsample_nearest2d( |
| input: Tensor, |
| output_size: Sequence[int] | None, |
| scale_factors: Sequence[float] | None, |
| ) -> Tensor: ... |
| def upsample_nearest3d( |
| input: Tensor, |
| output_size: Sequence[int] | None, |
| scale_factors: Sequence[float] | None, |
| ) -> Tensor: ... |
| def _upsample_nearest_exact1d( |
| input: Tensor, |
| output_size: Sequence[int] | None, |
| scale_factors: Sequence[float] | None, |
| ) -> Tensor: ... |
| def _upsample_nearest_exact2d( |
| input: Tensor, |
| output_size: Sequence[int] | None, |
| scale_factors: Sequence[float] | None, |
| ) -> Tensor: ... |
| def _upsample_nearest_exact3d( |
| input: Tensor, |
| output_size: Sequence[int] | None, |
| scale_factors: Sequence[float] | None, |
| ) -> Tensor: ... |
| def upsample_linear1d( |
| input: Tensor, |
| output_size: Sequence[int] | None, |
| align_corners: bool, |
| scale_factors: Sequence[float] | None, |
| ) -> Tensor: ... |
| def _upsample_bilinear2d_aa( |
| input: Tensor, |
| output_size: Sequence[int] | None, |
| align_corners: bool, |
| scale_factors: Sequence[float] | None, |
| ) -> Tensor: ... |
| def upsample_bilinear2d( |
| input: Tensor, |
| output_size: Sequence[int] | None, |
| align_corners: bool, |
| scale_factors: Sequence[float] | None, |
| ) -> Tensor: ... |
| def upsample_trilinear3d( |
| input: Tensor, |
| output_size: Sequence[int] | None, |
| align_corners: bool, |
| scale_factors: Sequence[float] | None, |
| ) -> Tensor: ... |
| def _upsample_bicubic2d_aa( |
| input: Tensor, |
| output_size: Sequence[int] | None, |
| align_corners: bool, |
| scale_factors: Sequence[float] | None, |
| ) -> Tensor: ... |
| def upsample_bicubic2d( |
| input: Tensor, |
| output_size: Sequence[int] | None, |
| align_corners: bool, |
| scale_factors: Sequence[float] | None, |
| ) -> Tensor: ... |
| def flatten_dense_tensors(tensors: list[Tensor]) -> Tensor: ... |
| def unflatten_dense_tensors(flat: Tensor, tensors: list[Tensor]) -> list[Tensor]: ... |
|
|