File size: 652 Bytes
44823a3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# flake8: noqa: F401
r"""Quantized Modules.



This file is in the process of migration to `torch/ao/nn/quantized`, and

is kept here for compatibility while the migration process is ongoing.

If you are adding a new entry/functionality, please, add it to the

appropriate file under the `torch/ao/nn/quantized/modules`,

while adding an import statement here.

"""

from torch.ao.nn.quantized.modules.normalization import (
    GroupNorm,
    InstanceNorm1d,
    InstanceNorm2d,
    InstanceNorm3d,
    LayerNorm,
)


__all__ = [
    "LayerNorm",
    "GroupNorm",
    "InstanceNorm1d",
    "InstanceNorm2d",
    "InstanceNorm3d",
]