File size: 5,146 Bytes
59f1501
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
# @generated by tools/pyi/gen_pyi.py from torch/_C/_nn.pyi.in
# mypy: disable-error-code="type-arg"

from collections.abc import Sequence
from typing import Literal, overload

from torch import memory_format, Tensor
from torch.types import _bool, _device, _dtype, _int, _size

# Defined in tools/autograd/templates/python_nn_functions.cpp

def adaptive_avg_pool2d(input: Tensor, output_size: _int | _size) -> Tensor: ...
def adaptive_avg_pool3d(input: Tensor, output_size: _int | _size) -> Tensor: ...
def adaptive_max_pool2d(

    input: Tensor,

    output_size: _int | _size,

) -> tuple[Tensor, Tensor]: ...
def adaptive_max_pool3d(

    input: Tensor,

    output_size: _int | _size,

) -> tuple[Tensor, Tensor]: ...
def avg_pool2d(

    input: Tensor,

    kernel_size: _int | _size,

    stride: _int | _size | None = None,

    padding: _int | _size = 0,

    ceil_mode: bool = False,

    count_include_pad: bool = True,

    divisor_override: int | None = None,

) -> Tensor: ...
def avg_pool3d(

    input: Tensor,

    kernel_size: _int | _size,

    stride: _int | _size | None = None,

    padding: _int | _size = 0,

    ceil_mode: bool = False,

    count_include_pad: bool = True,

    divisor_override: int | None = None,

) -> Tensor: ...
def binary_cross_entropy(

    input: Tensor,

    target: Tensor,

    weight: Tensor | None = None,

    reduction: str = ...,

) -> Tensor: ...
def col2im(

    input: Tensor,

    output_size: _int | _size,

    kernel_size: _int | _size,

    dilation: _int | _size,

    stride: _int | _size | None = None,

    padding: _int | _size = 0,

) -> Tensor: ...
def elu_(input: Tensor, alpha: float = ...) -> Tensor: ...
def fractional_max_pool2d(

    input: Tensor,

    kernel_size: _int | _size,

    output_size: _int | _size,

    _random_samples: Tensor,

) -> tuple[Tensor, Tensor]: ...
def fractional_max_pool3d(

    input: Tensor,

    kernel_size: _int | _size,

    output_size: _int | _size,

    _random_samples: Tensor,

) -> tuple[Tensor, Tensor]: ...
def gelu(input: Tensor, approximate: str = ...) -> Tensor: ...
def hardsigmoid(input: Tensor, *, out: Tensor | None = None) -> Tensor: ...
def hardtanh(

    input: Tensor,

    min_val: float = ...,

    max_val: float = ...,

    *,

    out: Tensor | None = None,

) -> Tensor: ...
def hardtanh_(

    input: Tensor,

    min_val: float = ...,

    max_val: float = ...,

) -> Tensor: ...
def leaky_relu(

    input: Tensor,

    negative_slope: float = ...,

    *,

    out: Tensor | None = None,

) -> Tensor: ...
def leaky_relu_(input: Tensor, negative_slope: float = ...) -> Tensor: ...
def linear(

    input: Tensor,

    weight: Tensor,

    bias: Tensor | None = None,

) -> Tensor: ...
def log_sigmoid(input: Tensor) -> Tensor: ...
def one_hot(tensor: Tensor, num_classes: int = ...) -> Tensor: ...
def pad(

    input: Tensor,

    pad: Sequence[int],

    mode: str = ...,

    value: float | None = None,

) -> Tensor: ...
def scaled_dot_product_attention(

    query: Tensor,

    key: Tensor,

    value: Tensor,

    attn_mask: Tensor | None = None,

    dropout_p: float = 0.0,

    is_causal: bool = False,

    scale: float | None = None,

    enable_gqa: bool = False,

) -> Tensor: ...
def softplus(

    input: Tensor,

    beta: float = ...,

    threshold: float = ...,

) -> Tensor: ...
def softshrink(input: Tensor, lambd: float = ...) -> Tensor: ...

# Defined in aten/src/ATen/native/mkldnn/Linear.cpp
def mkldnn_linear(input: Tensor, weight: Tensor, bias: Tensor | None) -> Tensor: ...

# Defined at aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp
def mkldnn_reorder_conv2d_weight(

    self: Tensor,

    padding: list,

    stride: list,

    dilatation: list,

    groups: int,

) -> Tensor: ...
def mkldnn_reorder_conv3d_weight(

    self: Tensor,

    padding: list,

    stride: list,

    dilatation: list,

    groups: int,

) -> Tensor: ...

# Defined in aten/src/ATen/native/mkldnn/Prelu.cpp
def mkldnn_prelu(input: Tensor, weight: Tensor) -> Tensor: ...

# Defined at tools/autograd/templates/python_nn_functions.cpp
@overload
def _parse_to(

    device: _device,

    dtype: _dtype,

    non_blocking: _bool,

    copy: _bool,

    *,

    memory_format: memory_format,

) -> tuple[_device, _dtype, _bool, memory_format]: ...
@overload
def _parse_to(

    dtype: _dtype,

    non_blocking: _bool,

    copy: _bool,

    *,

    memory_format: memory_format,

) -> tuple[_device, _dtype, _bool, memory_format]: ...
@overload
def _parse_to(

    tensor: Tensor,

    non_blocking: _bool,

    copy: _bool,

    *,

    memory_format: memory_format,

) -> tuple[_device, _dtype, _bool, memory_format]: ...

# Defined in aten/src/ATen/native/PackedSequence.cpp
def pad_sequence(

    sequences: list[Tensor] | tuple[Tensor, ...],

    batch_first: bool = False,

    padding_value: float = 0.0,

    padding_side: Literal["left", "right"] = "right",

) -> Tensor: ...
def flatten_dense_tensors(tensors: list[Tensor]) -> Tensor: ...
def unflatten_dense_tensors(flat: Tensor, tensors: list[Tensor]) -> list[Tensor]: ...