File size: 408 Bytes
9dd3461 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
`torch/ao/quantization/stubs.py`, while adding an import statement
here.
"""
from torch.ao.quantization.stubs import (
QuantStub,
DeQuantStub,
QuantWrapper
)
|