diff --git a/deepseek/lib/python3.10/site-packages/numpy/array_api/__pycache__/__init__.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/numpy/array_api/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31ae0290ea8550703d96dac43189bcc42b83158a Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/numpy/array_api/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/numpy/array_api/__pycache__/_constants.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/numpy/array_api/__pycache__/_constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd1ee6e7459f3826df8fe52e4b4ec521ef68f750 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/numpy/array_api/__pycache__/_constants.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/numpy/doc/__init__.py b/deepseek/lib/python3.10/site-packages/numpy/doc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8a944fecd865487e489ecefb90700f5eed38cd44 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/numpy/doc/__init__.py @@ -0,0 +1,26 @@ +import os + +ref_dir = os.path.join(os.path.dirname(__file__)) + +__all__ = sorted(f[:-3] for f in os.listdir(ref_dir) if f.endswith('.py') and + not f.startswith('__')) + +for f in __all__: + __import__(__name__ + '.' + f) + +del f, ref_dir + +__doc__ = """\ +Topical documentation +===================== + +The following topics are available: +%s + +You can view them by + +>>> help(np.doc.TOPIC) #doctest: +SKIP + +""" % '\n- '.join([''] + __all__) + +__all__.extend(['__doc__']) diff --git a/deepseek/lib/python3.10/site-packages/numpy/doc/__pycache__/__init__.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/numpy/doc/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1de89845f8484a8f30c06dbd5699d8ace8bde265 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/numpy/doc/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/numpy/doc/__pycache__/constants.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/numpy/doc/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40caa0de93f834c3b88a6148d22fe02754df4d52 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/numpy/doc/__pycache__/constants.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/numpy/doc/__pycache__/ufuncs.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/numpy/doc/__pycache__/ufuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0d5eea5b0a9ab237c5986bbd1de60abb297aa6a Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/numpy/doc/__pycache__/ufuncs.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/numpy/random/LICENSE.md b/deepseek/lib/python3.10/site-packages/numpy/random/LICENSE.md new file mode 100644 index 0000000000000000000000000000000000000000..a6cf1b17e99725556ac56ce3661498df1ee2276a --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/numpy/random/LICENSE.md @@ -0,0 +1,71 @@ +**This software is dual-licensed under the The University of Illinois/NCSA +Open Source License (NCSA) and The 3-Clause BSD License** + +# NCSA Open Source License +**Copyright (c) 2019 Kevin Sheppard. All rights reserved.** + +Developed by: Kevin Sheppard (, +) +[http://www.kevinsheppard.com](http://www.kevinsheppard.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimers. + +Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimers in the documentation and/or +other materials provided with the distribution. + +Neither the names of Kevin Sheppard, nor the names of any contributors may be +used to endorse or promote products derived from this Software without specific +prior written permission. + +**THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH +THE SOFTWARE.** + + +# 3-Clause BSD License +**Copyright (c) 2019 Kevin Sheppard. All rights reserved.** + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +**THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE.** + +# Components + +Many parts of this module have been derived from original sources, +often the algorithm's designer. Component licenses are located with +the component code. diff --git a/deepseek/lib/python3.10/site-packages/numpy/random/__init__.py b/deepseek/lib/python3.10/site-packages/numpy/random/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2e8f99fe3045b9c2b691a8ece67d0f06d9d73b08 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/numpy/random/__init__.py @@ -0,0 +1,215 @@ +""" +======================== +Random Number Generation +======================== + +Use ``default_rng()`` to create a `Generator` and call its methods. + +=============== ========================================================= +Generator +--------------- --------------------------------------------------------- +Generator Class implementing all of the random number distributions +default_rng Default constructor for ``Generator`` +=============== ========================================================= + +============================================= === +BitGenerator Streams that work with Generator +--------------------------------------------- --- +MT19937 +PCG64 +PCG64DXSM +Philox +SFC64 +============================================= === + +============================================= === +Getting entropy to initialize a BitGenerator +--------------------------------------------- --- +SeedSequence +============================================= === + + +Legacy +------ + +For backwards compatibility with previous versions of numpy before 1.17, the +various aliases to the global `RandomState` methods are left alone and do not +use the new `Generator` API. + +==================== ========================================================= +Utility functions +-------------------- --------------------------------------------------------- +random Uniformly distributed floats over ``[0, 1)`` +bytes Uniformly distributed random bytes. +permutation Randomly permute a sequence / generate a random sequence. +shuffle Randomly permute a sequence in place. +choice Random sample from 1-D array. +==================== ========================================================= + +==================== ========================================================= +Compatibility +functions - removed +in the new API +-------------------- --------------------------------------------------------- +rand Uniformly distributed values. +randn Normally distributed values. +ranf Uniformly distributed floating point numbers. +random_integers Uniformly distributed integers in a given range. + (deprecated, use ``integers(..., closed=True)`` instead) +random_sample Alias for `random_sample` +randint Uniformly distributed integers in a given range +seed Seed the legacy random number generator. +==================== ========================================================= + +==================== ========================================================= +Univariate +distributions +-------------------- --------------------------------------------------------- +beta Beta distribution over ``[0, 1]``. +binomial Binomial distribution. +chisquare :math:`\\chi^2` distribution. +exponential Exponential distribution. +f F (Fisher-Snedecor) distribution. +gamma Gamma distribution. +geometric Geometric distribution. +gumbel Gumbel distribution. +hypergeometric Hypergeometric distribution. +laplace Laplace distribution. +logistic Logistic distribution. +lognormal Log-normal distribution. +logseries Logarithmic series distribution. +negative_binomial Negative binomial distribution. +noncentral_chisquare Non-central chi-square distribution. +noncentral_f Non-central F distribution. +normal Normal / Gaussian distribution. +pareto Pareto distribution. +poisson Poisson distribution. +power Power distribution. +rayleigh Rayleigh distribution. +triangular Triangular distribution. +uniform Uniform distribution. +vonmises Von Mises circular distribution. +wald Wald (inverse Gaussian) distribution. +weibull Weibull distribution. +zipf Zipf's distribution over ranked data. +==================== ========================================================= + +==================== ========================================================== +Multivariate +distributions +-------------------- ---------------------------------------------------------- +dirichlet Multivariate generalization of Beta distribution. +multinomial Multivariate generalization of the binomial distribution. +multivariate_normal Multivariate generalization of the normal distribution. +==================== ========================================================== + +==================== ========================================================= +Standard +distributions +-------------------- --------------------------------------------------------- +standard_cauchy Standard Cauchy-Lorentz distribution. +standard_exponential Standard exponential distribution. +standard_gamma Standard Gamma distribution. +standard_normal Standard normal distribution. +standard_t Standard Student's t-distribution. +==================== ========================================================= + +==================== ========================================================= +Internal functions +-------------------- --------------------------------------------------------- +get_state Get tuple representing internal state of generator. +set_state Set state of generator. +==================== ========================================================= + + +""" +__all__ = [ + 'beta', + 'binomial', + 'bytes', + 'chisquare', + 'choice', + 'dirichlet', + 'exponential', + 'f', + 'gamma', + 'geometric', + 'get_state', + 'gumbel', + 'hypergeometric', + 'laplace', + 'logistic', + 'lognormal', + 'logseries', + 'multinomial', + 'multivariate_normal', + 'negative_binomial', + 'noncentral_chisquare', + 'noncentral_f', + 'normal', + 'pareto', + 'permutation', + 'poisson', + 'power', + 'rand', + 'randint', + 'randn', + 'random', + 'random_integers', + 'random_sample', + 'ranf', + 'rayleigh', + 'sample', + 'seed', + 'set_state', + 'shuffle', + 'standard_cauchy', + 'standard_exponential', + 'standard_gamma', + 'standard_normal', + 'standard_t', + 'triangular', + 'uniform', + 'vonmises', + 'wald', + 'weibull', + 'zipf', +] + +# add these for module-freeze analysis (like PyInstaller) +from . import _pickle +from . import _common +from . import _bounded_integers + +from ._generator import Generator, default_rng +from .bit_generator import SeedSequence, BitGenerator +from ._mt19937 import MT19937 +from ._pcg64 import PCG64, PCG64DXSM +from ._philox import Philox +from ._sfc64 import SFC64 +from .mtrand import * + +__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937', + 'Philox', 'PCG64', 'PCG64DXSM', 'SFC64', 'default_rng', + 'BitGenerator'] + + +def __RandomState_ctor(): + """Return a RandomState instance. + + This function exists solely to assist (un)pickling. + + Note that the state of the RandomState returned here is irrelevant, as this + function's entire purpose is to return a newly allocated RandomState whose + state pickle can set. Consequently the RandomState returned by this function + is a freshly allocated copy with a seed=0. + + See https://github.com/numpy/numpy/issues/4763 for a detailed discussion + + """ + return RandomState(seed=0) + + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/deepseek/lib/python3.10/site-packages/numpy/random/__init__.pyi b/deepseek/lib/python3.10/site-packages/numpy/random/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..99ef6f3e2f2a0e45db86589e73c3c8bb36b18ea2 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/numpy/random/__init__.pyi @@ -0,0 +1,72 @@ +from numpy._pytesttester import PytestTester + +from numpy.random._generator import Generator as Generator +from numpy.random._generator import default_rng as default_rng +from numpy.random._mt19937 import MT19937 as MT19937 +from numpy.random._pcg64 import ( + PCG64 as PCG64, + PCG64DXSM as PCG64DXSM, +) +from numpy.random._philox import Philox as Philox +from numpy.random._sfc64 import SFC64 as SFC64 +from numpy.random.bit_generator import BitGenerator as BitGenerator +from numpy.random.bit_generator import SeedSequence as SeedSequence +from numpy.random.mtrand import ( + RandomState as RandomState, + beta as beta, + binomial as binomial, + bytes as bytes, + chisquare as chisquare, + choice as choice, + dirichlet as dirichlet, + exponential as exponential, + f as f, + gamma as gamma, + geometric as geometric, + get_bit_generator as get_bit_generator, + get_state as get_state, + gumbel as gumbel, + hypergeometric as hypergeometric, + laplace as laplace, + logistic as logistic, + lognormal as lognormal, + logseries as logseries, + multinomial as multinomial, + multivariate_normal as multivariate_normal, + negative_binomial as negative_binomial, + noncentral_chisquare as noncentral_chisquare, + noncentral_f as noncentral_f, + normal as normal, + pareto as pareto, + permutation as permutation, + poisson as poisson, + power as power, + rand as rand, + randint as randint, + randn as randn, + random as random, + random_integers as random_integers, + random_sample as random_sample, + ranf as ranf, + rayleigh as rayleigh, + sample as sample, + seed as seed, + set_bit_generator as set_bit_generator, + set_state as set_state, + shuffle as shuffle, + standard_cauchy as standard_cauchy, + standard_exponential as standard_exponential, + standard_gamma as standard_gamma, + standard_normal as standard_normal, + standard_t as standard_t, + triangular as triangular, + uniform as uniform, + vonmises as vonmises, + wald as wald, + weibull as weibull, + zipf as zipf, +) + +__all__: list[str] +__path__: list[str] +test: PytestTester diff --git a/deepseek/lib/python3.10/site-packages/numpy/random/_bounded_integers.pxd b/deepseek/lib/python3.10/site-packages/numpy/random/_bounded_integers.pxd new file mode 100644 index 0000000000000000000000000000000000000000..7e41463a903e0238d18c553b295c39b6ed8af938 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/numpy/random/_bounded_integers.pxd @@ -0,0 +1,29 @@ +from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t, + int8_t, int16_t, int32_t, int64_t, intptr_t) +import numpy as np +cimport numpy as np +ctypedef np.npy_bool bool_t + +from numpy.random cimport bitgen_t + +cdef inline uint64_t _gen_mask(uint64_t max_val) nogil: + """Mask generator for use in bounded random numbers""" + # Smallest bit mask >= max + cdef uint64_t mask = max_val + mask |= mask >> 1 + mask |= mask >> 2 + mask |= mask >> 4 + mask |= mask >> 8 + mask |= mask >> 16 + mask |= mask >> 32 + return mask + +cdef object _rand_uint64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_uint32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_uint16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_uint8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_bool(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) diff --git a/deepseek/lib/python3.10/site-packages/numpy/random/_common.pxd b/deepseek/lib/python3.10/site-packages/numpy/random/_common.pxd new file mode 100644 index 0000000000000000000000000000000000000000..659da0d2daa789089d4d7987c885161237b85141 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/numpy/random/_common.pxd @@ -0,0 +1,106 @@ +#cython: language_level=3 + +from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t + +import numpy as np +cimport numpy as np + +from numpy.random cimport bitgen_t + +cdef double POISSON_LAM_MAX +cdef double LEGACY_POISSON_LAM_MAX +cdef uint64_t MAXSIZE + +cdef enum ConstraintType: + CONS_NONE + CONS_NON_NEGATIVE + CONS_POSITIVE + CONS_POSITIVE_NOT_NAN + CONS_BOUNDED_0_1 + CONS_BOUNDED_GT_0_1 + CONS_BOUNDED_LT_0_1 + CONS_GT_1 + CONS_GTE_1 + CONS_POISSON + LEGACY_CONS_POISSON + +ctypedef ConstraintType constraint_type + +cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method) +cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output) +cdef object prepare_cffi(bitgen_t *bitgen) +cdef object prepare_ctypes(bitgen_t *bitgen) +cdef int check_constraint(double val, object name, constraint_type cons) except -1 +cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1 + +cdef extern from "include/aligned_malloc.h": + cdef void *PyArray_realloc_aligned(void *p, size_t n) + cdef void *PyArray_malloc_aligned(size_t n) + cdef void *PyArray_calloc_aligned(size_t n, size_t s) + cdef void PyArray_free_aligned(void *p) + +ctypedef void (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) noexcept nogil +ctypedef double (*random_double_0)(void *state) noexcept nogil +ctypedef double (*random_double_1)(void *state, double a) noexcept nogil +ctypedef double (*random_double_2)(void *state, double a, double b) noexcept nogil +ctypedef double (*random_double_3)(void *state, double a, double b, double c) noexcept nogil + +ctypedef void (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) noexcept nogil +ctypedef float (*random_float_0)(bitgen_t *state) noexcept nogil +ctypedef float (*random_float_1)(bitgen_t *state, float a) noexcept nogil + +ctypedef int64_t (*random_uint_0)(void *state) noexcept nogil +ctypedef int64_t (*random_uint_d)(void *state, double a) noexcept nogil +ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) noexcept nogil +ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) noexcept nogil +ctypedef int64_t (*random_uint_i)(void *state, int64_t a) noexcept nogil +ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) noexcept nogil + +ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) noexcept nogil +ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) noexcept nogil + +ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) noexcept nogil +ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) noexcept nogil + +cdef double kahan_sum(double *darr, np.npy_intp n) noexcept + +cdef inline double uint64_to_double(uint64_t rnd) noexcept nogil: + return (rnd >> 11) * (1.0 / 9007199254740992.0) + +cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object wrap_int(object val, object bits) + +cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size) + +cdef validate_output_shape(iter_shape, np.ndarray output) + +cdef object cont(void *func, void *state, object size, object lock, int narg, + object a, object a_name, constraint_type a_constraint, + object b, object b_name, constraint_type b_constraint, + object c, object c_name, constraint_type c_constraint, + object out) + +cdef object disc(void *func, void *state, object size, object lock, + int narg_double, int narg_int64, + object a, object a_name, constraint_type a_constraint, + object b, object b_name, constraint_type b_constraint, + object c, object c_name, constraint_type c_constraint) + +cdef object cont_f(void *func, bitgen_t *state, object size, object lock, + object a, object a_name, constraint_type a_constraint, + object out) + +cdef object cont_broadcast_3(void *func, void *state, object size, object lock, + np.ndarray a_arr, object a_name, constraint_type a_constraint, + np.ndarray b_arr, object b_name, constraint_type b_constraint, + np.ndarray c_arr, object c_name, constraint_type c_constraint) + +cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock, + np.ndarray a_arr, object a_name, constraint_type a_constraint, + np.ndarray b_arr, object b_name, constraint_type b_constraint, + np.ndarray c_arr, object c_name, constraint_type c_constraint) diff --git a/deepseek/lib/python3.10/site-packages/numpy/random/_generator.pyi b/deepseek/lib/python3.10/site-packages/numpy/random/_generator.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e1cdefb15b772149ee390bf201893718e2191f17 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/numpy/random/_generator.pyi @@ -0,0 +1,681 @@ +from collections.abc import Callable +from typing import Any, Union, overload, TypeVar, Literal + +from numpy import ( + bool_, + dtype, + float32, + float64, + int8, + int16, + int32, + int64, + int_, + ndarray, + uint, + uint8, + uint16, + uint32, + uint64, +) +from numpy.random import BitGenerator, SeedSequence +from numpy._typing import ( + ArrayLike, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _DoubleCodes, + _DTypeLikeBool, + _DTypeLikeInt, + _DTypeLikeUInt, + _Float32Codes, + _Float64Codes, + _FloatLike_co, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntCodes, + _ShapeLike, + _SingleCodes, + _SupportsDType, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCodes, +) + +_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) + +_DTypeLikeFloat32 = Union[ + dtype[float32], + _SupportsDType[dtype[float32]], + type[float32], + _Float32Codes, + _SingleCodes, +] + +_DTypeLikeFloat64 = Union[ + dtype[float64], + _SupportsDType[dtype[float64]], + type[float], + type[float64], + _Float64Codes, + _DoubleCodes, +] + +class Generator: + def __init__(self, bit_generator: BitGenerator) -> None: ... + def __repr__(self) -> str: ... + def __str__(self) -> str: ... + def __getstate__(self) -> dict[str, Any]: ... + def __setstate__(self, state: dict[str, Any]) -> None: ... + def __reduce__(self) -> tuple[Callable[[str], Generator], tuple[str], dict[str, Any]]: ... + @property + def bit_generator(self) -> BitGenerator: ... + def spawn(self, n_children: int) -> list[Generator]: ... + def bytes(self, length: int) -> bytes: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: None = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + out: None = ..., + ) -> float: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + *, + out: ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., + out: None | ndarray[Any, dtype[float32]] = ..., + ) -> ndarray[Any, dtype[float32]]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + out: None | ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def permutation(self, x: int, axis: int = ...) -> ndarray[Any, dtype[int64]]: ... + @overload + def permutation(self, x: ArrayLike, axis: int = ...) -> ndarray[Any, Any]: ... + @overload + def standard_exponential( # type: ignore[misc] + self, + size: None = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + method: Literal["zig", "inv"] = ..., + out: None = ..., + ) -> float: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_exponential( + self, + *, + out: ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + *, + method: Literal["zig", "inv"] = ..., + out: None | ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., + method: Literal["zig", "inv"] = ..., + out: None | ndarray[Any, dtype[float32]] = ..., + ) -> ndarray[Any, dtype[float32]]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + method: Literal["zig", "inv"] = ..., + out: None | ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def random( # type: ignore[misc] + self, + size: None = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + out: None = ..., + ) -> float: ... + @overload + def random( + self, + *, + out: ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def random( + self, + size: _ShapeLike = ..., + *, + out: None | ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def random( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., + out: None | ndarray[Any, dtype[float32]] = ..., + ) -> ndarray[Any, dtype[float32]]: ... + @overload + def random( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + out: None | ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def beta( + self, + a: _FloatLike_co, + b: _FloatLike_co, + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def beta( + self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def exponential(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def exponential( + self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + ) -> int: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: _DTypeLikeBool = ..., + endpoint: bool = ..., + ) -> bool: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: _DTypeLikeInt | _DTypeLikeUInt = ..., + endpoint: bool = ..., + ) -> int: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: _DTypeLikeBool = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[bool_]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[int8]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[int16]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[int32]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[uint8]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[uint16]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[uint32]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[uint64]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[uint]]: ... + # TODO: Use a TypeVar _T here to get away from Any output? Should be int->ndarray[Any,dtype[int64]], ArrayLike[_T] -> _T | ndarray[Any,Any] + @overload + def choice( + self, + a: int, + size: None = ..., + replace: bool = ..., + p: None | _ArrayLikeFloat_co = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> int: ... + @overload + def choice( + self, + a: int, + size: _ShapeLike = ..., + replace: bool = ..., + p: None | _ArrayLikeFloat_co = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def choice( + self, + a: ArrayLike, + size: None = ..., + replace: bool = ..., + p: None | _ArrayLikeFloat_co = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> Any: ... + @overload + def choice( + self, + a: ArrayLike, + size: _ShapeLike = ..., + replace: bool = ..., + p: None | _ArrayLikeFloat_co = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> ndarray[Any, Any]: ... + @overload + def uniform( + self, + low: _FloatLike_co = ..., + high: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def uniform( + self, + low: _ArrayLikeFloat_co = ..., + high: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def normal( + self, + loc: _FloatLike_co = ..., + scale: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def normal( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_gamma( # type: ignore[misc] + self, + shape: _FloatLike_co, + size: None = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + out: None = ..., + ) -> float: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + *, + out: ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., + out: None | ndarray[Any, dtype[float32]] = ..., + ) -> ndarray[Any, dtype[float32]]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + out: None | ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def gamma(self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def gamma( + self, + shape: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def f( + self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def noncentral_f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def noncentral_f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def chisquare(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def chisquare( + self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def noncentral_chisquare(self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def noncentral_chisquare( + self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_t(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: None = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def vonmises(self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def vonmises( + self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def pareto(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def pareto( + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def weibull(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def weibull( + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def power(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def power( + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload + def laplace( + self, + loc: _FloatLike_co = ..., + scale: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def laplace( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def gumbel( + self, + loc: _FloatLike_co = ..., + scale: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def gumbel( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def logistic( + self, + loc: _FloatLike_co = ..., + scale: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def logistic( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def lognormal( + self, + mean: _FloatLike_co = ..., + sigma: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def lognormal( + self, + mean: _ArrayLikeFloat_co = ..., + sigma: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def rayleigh(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def rayleigh( + self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def wald(self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def wald( + self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def triangular( + self, + left: _FloatLike_co, + mode: _FloatLike_co, + right: _FloatLike_co, + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def triangular( + self, + left: _ArrayLikeFloat_co, + mode: _ArrayLikeFloat_co, + right: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def binomial(self, n: int, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def binomial( + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def negative_binomial(self, n: _FloatLike_co, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def negative_binomial( + self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def poisson(self, lam: _FloatLike_co = ..., size: None = ...) -> int: ... # type: ignore[misc] + @overload + def poisson( + self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def zipf(self, a: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def zipf( + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def geometric(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def geometric( + self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def hypergeometric( + self, + ngood: _ArrayLikeInt_co, + nbad: _ArrayLikeInt_co, + nsample: _ArrayLikeInt_co, + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def logseries(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def logseries( + self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int64]]: ... + def multivariate_normal( + self, + mean: _ArrayLikeFloat_co, + cov: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + check_valid: Literal["warn", "raise", "ignore"] = ..., + tol: float = ..., + *, + method: Literal["svd", "eigh", "cholesky"] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + def multinomial( + self, n: _ArrayLikeInt_co, + pvals: _ArrayLikeFloat_co, + size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int64]]: ... + def multivariate_hypergeometric( + self, + colors: _ArrayLikeInt_co, + nsample: int, + size: None | _ShapeLike = ..., + method: Literal["marginals", "count"] = ..., + ) -> ndarray[Any, dtype[int64]]: ... + def dirichlet( + self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + def permuted( + self, x: ArrayLike, *, axis: None | int = ..., out: None | ndarray[Any, Any] = ... + ) -> ndarray[Any, Any]: ... + def shuffle(self, x: ArrayLike, axis: int = ...) -> None: ... + +def default_rng( + seed: None | _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator = ... +) -> Generator: ... diff --git a/deepseek/lib/python3.10/site-packages/numpy/random/_mt19937.pyi b/deepseek/lib/python3.10/site-packages/numpy/random/_mt19937.pyi new file mode 100644 index 0000000000000000000000000000000000000000..55cfb2db42b17a80a1e15b6f8eabee1ccbdee282 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/numpy/random/_mt19937.pyi @@ -0,0 +1,22 @@ +from typing import Any, TypedDict + +from numpy import dtype, ndarray, uint32 +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy._typing import _ArrayLikeInt_co + +class _MT19937Internal(TypedDict): + key: ndarray[Any, dtype[uint32]] + pos: int + +class _MT19937State(TypedDict): + bit_generator: str + state: _MT19937Internal + +class MT19937(BitGenerator): + def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... + def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ... + def jumped(self, jumps: int = ...) -> MT19937: ... + @property + def state(self) -> _MT19937State: ... + @state.setter + def state(self, value: _MT19937State) -> None: ... diff --git a/deepseek/lib/python3.10/site-packages/numpy/random/_pcg64.pyi b/deepseek/lib/python3.10/site-packages/numpy/random/_pcg64.pyi new file mode 100644 index 0000000000000000000000000000000000000000..470aee867493b48817670f7c4ff7b24d8be31f26 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/numpy/random/_pcg64.pyi @@ -0,0 +1,42 @@ +from typing import TypedDict + +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy._typing import _ArrayLikeInt_co + +class _PCG64Internal(TypedDict): + state: int + inc: int + +class _PCG64State(TypedDict): + bit_generator: str + state: _PCG64Internal + has_uint32: int + uinteger: int + +class PCG64(BitGenerator): + def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... + def jumped(self, jumps: int = ...) -> PCG64: ... + @property + def state( + self, + ) -> _PCG64State: ... + @state.setter + def state( + self, + value: _PCG64State, + ) -> None: ... + def advance(self, delta: int) -> PCG64: ... + +class PCG64DXSM(BitGenerator): + def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... + def jumped(self, jumps: int = ...) -> PCG64DXSM: ... + @property + def state( + self, + ) -> _PCG64State: ... + @state.setter + def state( + self, + value: _PCG64State, + ) -> None: ... + def advance(self, delta: int) -> PCG64DXSM: ... diff --git a/deepseek/lib/python3.10/site-packages/numpy/random/_philox.pyi b/deepseek/lib/python3.10/site-packages/numpy/random/_philox.pyi new file mode 100644 index 0000000000000000000000000000000000000000..26ce726ecf4a6f0e9fda4d596368a05da5629124 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/numpy/random/_philox.pyi @@ -0,0 +1,36 @@ +from typing import Any, TypedDict + +from numpy import dtype, ndarray, uint64 +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy._typing import _ArrayLikeInt_co + +class _PhiloxInternal(TypedDict): + counter: ndarray[Any, dtype[uint64]] + key: ndarray[Any, dtype[uint64]] + +class _PhiloxState(TypedDict): + bit_generator: str + state: _PhiloxInternal + buffer: ndarray[Any, dtype[uint64]] + buffer_pos: int + has_uint32: int + uinteger: int + +class Philox(BitGenerator): + def __init__( + self, + seed: None | _ArrayLikeInt_co | SeedSequence = ..., + counter: None | _ArrayLikeInt_co = ..., + key: None | _ArrayLikeInt_co = ..., + ) -> None: ... + @property + def state( + self, + ) -> _PhiloxState: ... + @state.setter + def state( + self, + value: _PhiloxState, + ) -> None: ... + def jumped(self, jumps: int = ...) -> Philox: ... + def advance(self, delta: int) -> Philox: ... diff --git a/deepseek/lib/python3.10/site-packages/numpy/random/_pickle.py b/deepseek/lib/python3.10/site-packages/numpy/random/_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..073993726eb30b48c7e5f9d6eba3299a14829fa3 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/numpy/random/_pickle.py @@ -0,0 +1,80 @@ +from .mtrand import RandomState +from ._philox import Philox +from ._pcg64 import PCG64, PCG64DXSM +from ._sfc64 import SFC64 + +from ._generator import Generator +from ._mt19937 import MT19937 + +BitGenerators = {'MT19937': MT19937, + 'PCG64': PCG64, + 'PCG64DXSM': PCG64DXSM, + 'Philox': Philox, + 'SFC64': SFC64, + } + + +def __bit_generator_ctor(bit_generator_name='MT19937'): + """ + Pickling helper function that returns a bit generator object + + Parameters + ---------- + bit_generator_name : str + String containing the name of the BitGenerator + + Returns + ------- + bit_generator : BitGenerator + BitGenerator instance + """ + if bit_generator_name in BitGenerators: + bit_generator = BitGenerators[bit_generator_name] + else: + raise ValueError(str(bit_generator_name) + ' is not a known ' + 'BitGenerator module.') + + return bit_generator() + + +def __generator_ctor(bit_generator_name="MT19937", + bit_generator_ctor=__bit_generator_ctor): + """ + Pickling helper function that returns a Generator object + + Parameters + ---------- + bit_generator_name : str + String containing the core BitGenerator's name + bit_generator_ctor : callable, optional + Callable function that takes bit_generator_name as its only argument + and returns an instantized bit generator. + + Returns + ------- + rg : Generator + Generator using the named core BitGenerator + """ + return Generator(bit_generator_ctor(bit_generator_name)) + + +def __randomstate_ctor(bit_generator_name="MT19937", + bit_generator_ctor=__bit_generator_ctor): + """ + Pickling helper function that returns a legacy RandomState-like object + + Parameters + ---------- + bit_generator_name : str + String containing the core BitGenerator's name + bit_generator_ctor : callable, optional + Callable function that takes bit_generator_name as its only argument + and returns an instantized bit generator. + + Returns + ------- + rs : RandomState + Legacy RandomState using the named core BitGenerator + """ + + return RandomState(bit_generator_ctor(bit_generator_name)) diff --git a/deepseek/lib/python3.10/site-packages/numpy/random/_sfc64.pyi b/deepseek/lib/python3.10/site-packages/numpy/random/_sfc64.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e1810e7d5261490d83ba1ec8f4f3df863837a5f0 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/numpy/random/_sfc64.pyi @@ -0,0 +1,28 @@ +from typing import Any, TypedDict + +from numpy import dtype as dtype +from numpy import ndarray as ndarray +from numpy import uint64 +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy._typing import _ArrayLikeInt_co + +class _SFC64Internal(TypedDict): + state: ndarray[Any, dtype[uint64]] + +class _SFC64State(TypedDict): + bit_generator: str + state: _SFC64Internal + has_uint32: int + uinteger: int + +class SFC64(BitGenerator): + def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... + @property + def state( + self, + ) -> _SFC64State: ... + @state.setter + def state( + self, + value: _SFC64State, + ) -> None: ... diff --git a/deepseek/lib/python3.10/site-packages/numpy/random/bit_generator.pxd b/deepseek/lib/python3.10/site-packages/numpy/random/bit_generator.pxd new file mode 100644 index 0000000000000000000000000000000000000000..dfa7d0a71c085dfa3dfb2819f47493cb8501d198 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/numpy/random/bit_generator.pxd @@ -0,0 +1,35 @@ +cimport numpy as np +from libc.stdint cimport uint32_t, uint64_t + +cdef extern from "numpy/random/bitgen.h": + struct bitgen: + void *state + uint64_t (*next_uint64)(void *st) nogil + uint32_t (*next_uint32)(void *st) nogil + double (*next_double)(void *st) nogil + uint64_t (*next_raw)(void *st) nogil + + ctypedef bitgen bitgen_t + +cdef class BitGenerator(): + cdef readonly object _seed_seq + cdef readonly object lock + cdef bitgen_t _bitgen + cdef readonly object _ctypes + cdef readonly object _cffi + cdef readonly object capsule + + +cdef class SeedSequence(): + cdef readonly object entropy + cdef readonly tuple spawn_key + cdef readonly Py_ssize_t pool_size + cdef readonly object pool + cdef readonly uint32_t n_children_spawned + + cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer, + np.ndarray[np.npy_uint32, ndim=1] entropy_array) + cdef get_assembled_entropy(self) + +cdef class SeedlessSequence(): + pass diff --git a/deepseek/lib/python3.10/site-packages/numpy/random/mtrand.pyi b/deepseek/lib/python3.10/site-packages/numpy/random/mtrand.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b5f600652b5444ce647108590c89626250f8bae8 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/numpy/random/mtrand.pyi @@ -0,0 +1,571 @@ +import builtins +from collections.abc import Callable +from typing import Any, Union, overload, Literal + +from numpy import ( + bool_, + dtype, + float32, + float64, + int8, + int16, + int32, + int64, + int_, + ndarray, + uint, + uint8, + uint16, + uint32, + uint64, +) +from numpy.random.bit_generator import BitGenerator +from numpy._typing import ( + ArrayLike, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _DoubleCodes, + _DTypeLikeBool, + _DTypeLikeInt, + _DTypeLikeUInt, + _Float32Codes, + _Float64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntCodes, + _ShapeLike, + _SingleCodes, + _SupportsDType, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCodes, +) + +_DTypeLikeFloat32 = Union[ + dtype[float32], + _SupportsDType[dtype[float32]], + type[float32], + _Float32Codes, + _SingleCodes, +] + +_DTypeLikeFloat64 = Union[ + dtype[float64], + _SupportsDType[dtype[float64]], + type[float], + type[float64], + _Float64Codes, + _DoubleCodes, +] + +class RandomState: + _bit_generator: BitGenerator + def __init__(self, seed: None | _ArrayLikeInt_co | BitGenerator = ...) -> None: ... + def __repr__(self) -> str: ... + def __str__(self) -> str: ... + def __getstate__(self) -> dict[str, Any]: ... + def __setstate__(self, state: dict[str, Any]) -> None: ... + def __reduce__(self) -> tuple[Callable[[str], RandomState], tuple[str], dict[str, Any]]: ... + def seed(self, seed: None | _ArrayLikeFloat_co = ...) -> None: ... + @overload + def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ... + @overload + def get_state( + self, legacy: Literal[True] = ... + ) -> dict[str, Any] | tuple[str, ndarray[Any, dtype[uint32]], int, int, float]: ... + def set_state( + self, state: dict[str, Any] | tuple[str, ndarray[Any, dtype[uint32]], int, int, float] + ) -> None: ... + @overload + def random_sample(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def random_sample(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload + def random(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def random(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload + def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def beta( + self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def exponential( + self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_exponential(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_exponential(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload + def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def tomaxint(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[int_]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + ) -> int: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: _DTypeLikeBool = ..., + ) -> bool: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: _DTypeLikeInt | _DTypeLikeUInt = ..., + ) -> int: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: _DTypeLikeBool = ..., + ) -> ndarray[Any, dtype[bool_]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., + ) -> ndarray[Any, dtype[int8]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., + ) -> ndarray[Any, dtype[int16]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., + ) -> ndarray[Any, dtype[int32]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., + ) -> ndarray[Any, dtype[uint8]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., + ) -> ndarray[Any, dtype[uint16]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., + ) -> ndarray[Any, dtype[uint32]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., + ) -> ndarray[Any, dtype[uint64]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., + ) -> ndarray[Any, dtype[uint]]: ... + def bytes(self, length: int) -> builtins.bytes: ... + @overload + def choice( + self, + a: int, + size: None = ..., + replace: bool = ..., + p: None | _ArrayLikeFloat_co = ..., + ) -> int: ... + @overload + def choice( + self, + a: int, + size: _ShapeLike = ..., + replace: bool = ..., + p: None | _ArrayLikeFloat_co = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def choice( + self, + a: ArrayLike, + size: None = ..., + replace: bool = ..., + p: None | _ArrayLikeFloat_co = ..., + ) -> Any: ... + @overload + def choice( + self, + a: ArrayLike, + size: _ShapeLike = ..., + replace: bool = ..., + p: None | _ArrayLikeFloat_co = ..., + ) -> ndarray[Any, Any]: ... + @overload + def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def uniform( + self, + low: _ArrayLikeFloat_co = ..., + high: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def rand(self) -> float: ... + @overload + def rand(self, *args: int) -> ndarray[Any, dtype[float64]]: ... + @overload + def randn(self) -> float: ... + @overload + def randn(self, *args: int) -> ndarray[Any, dtype[float64]]: ... + @overload + def random_integers(self, low: int, high: None | int = ..., size: None = ...) -> int: ... # type: ignore[misc] + @overload + def random_integers( + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def standard_normal(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_normal( # type: ignore[misc] + self, size: _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def normal( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_gamma( # type: ignore[misc] + self, + shape: float, + size: None = ..., + ) -> float: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def gamma( + self, + shape: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def f( + self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def noncentral_f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def chisquare( + self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def noncentral_chisquare( + self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: None = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def vonmises( + self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def pareto( + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def weibull( + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def power( + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload + def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def laplace( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def gumbel( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def logistic( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def lognormal( + self, + mean: _ArrayLikeFloat_co = ..., + sigma: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def rayleigh( + self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def wald( + self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def triangular( + self, + left: _ArrayLikeFloat_co, + mode: _ArrayLikeFloat_co, + right: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def binomial( + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def negative_binomial( + self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc] + @overload + def poisson( + self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def zipf( + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def geometric( + self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def hypergeometric( + self, + ngood: _ArrayLikeInt_co, + nbad: _ArrayLikeInt_co, + nsample: _ArrayLikeInt_co, + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def logseries( + self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int_]]: ... + def multivariate_normal( + self, + mean: _ArrayLikeFloat_co, + cov: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + check_valid: Literal["warn", "raise", "ignore"] = ..., + tol: float = ..., + ) -> ndarray[Any, dtype[float64]]: ... + def multinomial( + self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int_]]: ... + def dirichlet( + self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + def shuffle(self, x: ArrayLike) -> None: ... + @overload + def permutation(self, x: int) -> ndarray[Any, dtype[int_]]: ... + @overload + def permutation(self, x: ArrayLike) -> ndarray[Any, Any]: ... + +_rand: RandomState + +beta = _rand.beta +binomial = _rand.binomial +bytes = _rand.bytes +chisquare = _rand.chisquare +choice = _rand.choice +dirichlet = _rand.dirichlet +exponential = _rand.exponential +f = _rand.f +gamma = _rand.gamma +get_state = _rand.get_state +geometric = _rand.geometric +gumbel = _rand.gumbel +hypergeometric = _rand.hypergeometric +laplace = _rand.laplace +logistic = _rand.logistic +lognormal = _rand.lognormal +logseries = _rand.logseries +multinomial = _rand.multinomial +multivariate_normal = _rand.multivariate_normal +negative_binomial = _rand.negative_binomial +noncentral_chisquare = _rand.noncentral_chisquare +noncentral_f = _rand.noncentral_f +normal = _rand.normal +pareto = _rand.pareto +permutation = _rand.permutation +poisson = _rand.poisson +power = _rand.power +rand = _rand.rand +randint = _rand.randint +randn = _rand.randn +random = _rand.random +random_integers = _rand.random_integers +random_sample = _rand.random_sample +rayleigh = _rand.rayleigh +seed = _rand.seed +set_state = _rand.set_state +shuffle = _rand.shuffle +standard_cauchy = _rand.standard_cauchy +standard_exponential = _rand.standard_exponential +standard_gamma = _rand.standard_gamma +standard_normal = _rand.standard_normal +standard_t = _rand.standard_t +triangular = _rand.triangular +uniform = _rand.uniform +vonmises = _rand.vonmises +wald = _rand.wald +weibull = _rand.weibull +zipf = _rand.zipf +# Two legacy that are trivial wrappers around random_sample +sample = _rand.random_sample +ranf = _rand.random_sample + +def set_bit_generator(bitgen: BitGenerator) -> None: + ... + +def get_bit_generator() -> BitGenerator: + ... diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/__init__.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d130b606a924544dd2857ddd343a64288290aca Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_equalize.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_equalize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f07656e17a9cec7e3ebaaed852b88c1d9c360c4 Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_equalize.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_learnable_fake_quantize.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_learnable_fake_quantize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..278cfcc4b323f610db79bc9c445daaaea6053f45 Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_learnable_fake_quantize.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/qconfig.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/qconfig.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72560efa8c75c49922e84a2cc12830d09db28d8f Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/qconfig.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize_jit.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize_jit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04ca6009b3d2a29f5e5be6acb8f1f589b34fb49c Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize_jit.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/utils.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94578c951ab2e5fe5efb36f32733efaa360c1e4f Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/utils.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/__init__.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/__pycache__/__init__.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71638846b01c5da2d95af880963cc65a121cefb4 Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/__pycache__/utils.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12c06939d8eb45d596295a8cad7e2f38c845c214 Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/__pycache__/utils.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/utils.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..686337080d80dbd9c4852812fea6f279040ca595 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/utils.py @@ -0,0 +1,138 @@ +import torch +from torch.fx import GraphModule +from torch.nn.utils.fusion import fuse_conv_bn_weights +# TODO[jerryzh168]: move this to a more general util function +from torch.ao.quantization.fx.prepare import ( + _is_activation_post_process_node, +) +from collections import OrderedDict +import operator + +# TODO[qihan]: longer term, this should happen in the dynamo stack as well +def _get_renamed_nn_module_stack(nn_module_stack): + # initialize with top level parent scope + nn_module_stack_renamed = OrderedDict([("", None)]) + if nn_module_stack: + # Rename module_key, e.g. "self_layer1_1__conv1" to "self.layer1.1._conv1", for easier downstream parsing + prev_key = "" + for key, value in nn_module_stack.items(): + if not prev_key: + if key.startswith("self_"): + new_key = key[5:] + prev_key = new_key + else: + new_key = prev_key + "." + key[len(prev_key) + 6 :] + nn_module_stack_renamed[new_key] = value + prev_key = new_key + return nn_module_stack_renamed + +def _get_tensor_constant_from_node(node, m): + if node is None: + return None + assert node.op == "get_attr" + return getattr(m, node.target) + +# fuse conv bn weights, inplace modification of the graph_module and graph +def _fuse_conv_bn_(m: GraphModule) -> None: + for n in m.graph.nodes: + if n.op != "call_function" or n.target != torch.ops.aten.native_batch_norm.default: + continue + bn_op = n + n = bn_op.args[0] + if n.op != "call_function" or n.target != torch.ops.aten.convolution.default: + continue + conv_op = n + + # conv weight + conv_w = _get_tensor_constant_from_node(conv_op.args[1], m) + # conv bias + conv_b = _get_tensor_constant_from_node(conv_op.args[2], m) + transpose = conv_op.args[6] + + # bn weight + bn_w = _get_tensor_constant_from_node(bn_op.args[1], m) + # bn bias + bn_b = _get_tensor_constant_from_node(bn_op.args[2], m) + # bn running mean + bn_rm = _get_tensor_constant_from_node(bn_op.args[3], m) + # bn running variance + bn_rv = _get_tensor_constant_from_node(bn_op.args[4], m) + bn_eps = bn_op.args[7] + + fused_weight, fused_bias = fuse_conv_bn_weights(conv_w, conv_b, bn_rm, bn_rv, bn_eps, bn_w, bn_b, transpose=False) + + # update the weight and bias for conv + conv_args = list(conv_op.args) + # calling data since the fused_weight and fused_bias are nn.Parameter + weight_attr_name = conv_args[1].target + setattr(m, weight_attr_name, fused_weight) + if conv_args[2] is not None: + bias_attr_name = conv_args[2].target + else: + bias_attr_name = weight_attr_name + "_bias" + with m.graph.inserting_before(conv_op): + get_bias_node = m.graph.get_attr(bias_attr_name) + conv_args[2] = get_bias_node + setattr(m, bias_attr_name, fused_bias) + conv_op.args = tuple(conv_args) + + # native_batch_norm has 3 outputs, we expect getitem calls on the output + # and we want to replace the uses of getitem 0 with the output of conv + # + # Before: + # conv -> bn - (first output) -> users1 + # \ - (second output) -> users2 + # \ - (third output) -> users3 + # After: + # conv -> (first output) -> users1 + # bn - + # \ - (second output) -> users2 + # \ - (third output) -> users3 + # if users2 and users3 are empty then bn will be removed through dead code elimination + + for user in bn_op.users: + if user.op != "call_function" or user.target != operator.getitem or user.args[1] != 0: + continue + user.replace_all_uses_with(conv_op) + m.graph.eliminate_dead_code() + m.recompile() + +def _rearrange_weight_observer_for_addmm( + model: GraphModule, +) -> None: + """ + before: + weight - t - observer \ + input - observer - addmm + after: + weight - observer - t \ + input - observer - addmm + """ + named_modules = dict(model.named_modules(remove_duplicate=False)) + for node in model.graph.nodes: + if node.target != torch.ops.aten.addmm.default: + continue + addmm = node + maybe_weight_obs = addmm.args[2] + if not _is_activation_post_process_node(maybe_weight_obs, named_modules): + continue + transpose_node = maybe_weight_obs.args[0] + if transpose_node.target != torch.ops.aten.t.default: + continue + # swap the order of transpose and observation + + maybe_weight_obs.replace_input_with(transpose_node, transpose_node.args[0]) + # remove the transpose node + with model.graph.inserting_after(maybe_weight_obs): + args = list(transpose_node.args) + args[0] = maybe_weight_obs + new_transpose_node = model.graph.create_node( + "call_function", + torch.ops.aten.t.default, + tuple(args), + transpose_node.kwargs + ) + addmm.replace_input_with(maybe_weight_obs, new_transpose_node) + + model.graph.eliminate_dead_code() + model.graph.lint() diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__init__.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0e37eaaded975381d6153b2a66c9d9550d07cd03 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__init__.py @@ -0,0 +1,3 @@ +from .prepare import prepare +from .convert import convert +from .fuse import fuse diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/__init__.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..607d478ac10b711250e37ac0774f80fb2204e92d Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_decomposed.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_decomposed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3dbb8018a8630807e77eb669dd1e92dbc7619fb7 Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_decomposed.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_equalize.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_equalize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..051e279111412338c49db39ca1d8b66887bf9a64 Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_equalize.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_lower_to_native_backend.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_lower_to_native_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..003f0fab54d9c4e84ea28c38ced4a5cb82eee858 Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_lower_to_native_backend.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/convert.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/convert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9beacfadc839bfad87282afb7b61c09feb92d7d4 Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/convert.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/custom_config.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/custom_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3363308c95dfb603d44928cf7bb92e57bbae46f8 Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/custom_config.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/fuse.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/fuse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6bc60b317b9a8de5f9e2003f6cf4ce9a508c246 Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/fuse.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/fuse_handler.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/fuse_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e969f3bf4bdc2491cb4428b0499c19fd889e5b6 Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/fuse_handler.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/graph_module.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/graph_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1977ae19682fbc8d7fbffb60fb04821239dba1a Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/graph_module.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/lower_to_fbgemm.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/lower_to_fbgemm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c6f821603c072946744e6717edcb17ee79871a1 Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/lower_to_fbgemm.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/lower_to_qnnpack.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/lower_to_qnnpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d95795cbc557d42c6797c174207c469b6136a67 Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/lower_to_qnnpack.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/match_utils.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/match_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8c875b39241673237c97f4c01c44816b4bb225e Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/match_utils.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/pattern_utils.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/pattern_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2348534bb14a0b2d3dcb6211a1fc873ed1bf227b Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/pattern_utils.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/prepare.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/prepare.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c0201264ccd967a456a9cfde262f1336201aa12 Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/prepare.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/qconfig_mapping_utils.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/qconfig_mapping_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0d4b328896f615153903821c4ecbe2e367abc5f Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/qconfig_mapping_utils.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/quantize_handler.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/quantize_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..386c9301fd5a08cc5e047ed9800723f7d3f31b1d Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/quantize_handler.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/tracer.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/tracer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a165ccb4d7246744f3dc5f23e721c9eeb984346 Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/tracer.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/utils.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b2885f98aedfa7bd81a55ad29866182f474c24b Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/utils.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_decomposed.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_decomposed.py new file mode 100644 index 0000000000000000000000000000000000000000..6d7d834f2ea78b728bb49e4c9794b8aad43d8904 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_decomposed.py @@ -0,0 +1,416 @@ +import torch +from torch.library import Library, impl +from torch.ao.quantization.utils import determine_qparams, validate_qmin_qmax +from typing import Tuple + + +# Note: decomposed means decomposed quantized tensor, using decomposed so that the +# name is not too long +quantized_decomposed_lib = Library("quantized_decomposed", "DEF") + +_DTYPE_TO_QVALUE_BOUNDS = { + torch.uint8: (0, 255), + torch.int8: (-128, 127), + torch.int32: (-(2**31), 2**31 - 1) +} + +# Helper to check the passed in quant min and max are valid for the dtype +def _quant_min_max_bounds_check(quant_min, quant_max, dtype): + if dtype not in _DTYPE_TO_QVALUE_BOUNDS: + raise ValueError(f"Unsupported dtype: {dtype}") + quant_min_lower_bound, quant_max_upper_bound = _DTYPE_TO_QVALUE_BOUNDS[dtype] + + assert quant_min >= quant_min_lower_bound, \ + "quant_min out of bound for dtype, " \ + f"quant_min_lower_bound: {quant_min_lower_bound} quant_min: {quant_min}" + + assert quant_max <= quant_max_upper_bound, \ + "quant_max out of bound for dtype, " \ + f"quant_max_upper_bound: {quant_max_upper_bound} quant_max: {quant_max}" + +quantized_decomposed_lib.define( + "quantize_per_tensor(Tensor input, float scale, int zero_point, " + "int quant_min, int quant_max, ScalarType dtype) -> Tensor") + +@impl(quantized_decomposed_lib, "quantize_per_tensor", "CompositeExplicitAutograd") +def quantize_per_tensor( + input: torch.Tensor, + scale: float, + zero_point: int, + quant_min: int, + quant_max: int, + dtype: torch.dtype +) -> torch.Tensor: + """ Affine quantization for the Tensor using the same quantization parameters to map + from floating point to quantized values + + Args: + input (torch.Tensor): original float32 Tensor + scale (float): quantization parameter for affine quantization + zero_point (int): quantization parameter for affine quantization + quant_min (int): minimum quantized value for output Tensor + quant_max (int): maximum quantized value for output Tensor + dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor + + Returns: + Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters + are not stored in the Tensor, we are storing them in function arguments instead + """ + assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}" + _quant_min_max_bounds_check(quant_min, quant_max, dtype) + + inv_scale = 1.0 / scale + return torch.clamp(torch.round(input * inv_scale) + zero_point, quant_min, quant_max).to(dtype) + +quantized_decomposed_lib.define( + "quantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, " + "int quant_min, int quant_max, ScalarType dtype) -> Tensor") + +@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor", "CompositeExplicitAutograd") +def quantize_per_tensor_tensor( + input: torch.Tensor, + scale: torch.Tensor, + zero_point: torch.Tensor, + quant_min: int, + quant_max: int, + dtype: torch.dtype +) -> torch.Tensor: + """ Affine quantization for the Tensor using the same quantization parameters to map + from floating point to quantized values + Same as `quantize_per_tensor` but scale and zero_point are Scalar Tensor instead of + scalar values + """ + assert zero_point.numel() == 1, f"Exepecting zero_point tensor to be one element, but received : {zero_point.numel()}" + assert scale.numel() == 1, f"Exepecting scale tensor to be one element, but received : {scale.numel()}" + return quantize_per_tensor(input, scale.item(), zero_point.item(), quant_min, quant_max, dtype) + +@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor", "Meta") +def quantize_per_tensor_tensor_meta(input, scale, zero_point, quant_min, quant_max, dtype): + assert zero_point.numel() == 1, f"Exepecting zero_point tensor to be one element, but received : {zero_point.numel()}" + assert scale.numel() == 1, f"Exepecting scale tensor to be one element, but received : {scale.numel()}" + assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}" + _quant_min_max_bounds_check(quant_min, quant_max, dtype) + return torch.empty_like(input, dtype=dtype) + +# Note: quant_min/quant_max/dtype are not used in the operator, but for now it's kept in +# the signature as metadata for the input Tensor, this might be useful for pattern +# matching in the future +# We will revisit this later if we found there are no use cases for it +quantized_decomposed_lib.define( + "dequantize_per_tensor(Tensor input, float scale, int zero_point, " + "int quant_min, int quant_max, ScalarType dtype) -> Tensor") + +@impl(quantized_decomposed_lib, "dequantize_per_tensor", "CompositeExplicitAutograd") +def dequantize_per_tensor( + input: torch.Tensor, + scale: float, + zero_point: int, + quant_min: int, + quant_max: int, + dtype: torch.dtype +) -> torch.Tensor: + """ Affine dequantization for the Tensor using the same quantization parameters to map + from quantized values to floating point values + + Args: + input (torch.Tensor): Tensor with dtype matching `dtype` argument, + e.g. (`torch.uint8`), it is a per tensor quantized Tensor if combined with + quantization parameters in the argument of this function (scale/zero_point) + + scale (float): quantization parameter for affine quantization + + zero_point (int): quantization parameter for affine quantization + + quant_min (int): minimum quantized value for input Tensor (not used in computation, + reserved for pattern matching) + + quant_max (int): maximum quantized value for input Tensor (not used in computation, + reserved for pattern matching) + + dtype (torch.dtype): dtype for input Tensor (not used in computation, + reserved for pattern matching) + + Returns: + dequantized float32 Tensor + """ + assert input.dtype == dtype, f"Expecting input to have dtype: {dtype}" + if dtype in [torch.uint8, torch.int8, torch.int32]: + # TODO: investigate why + # (input - zero_point).to(torch.float32) * scale + # failed the test + return (input.to(torch.float32) - zero_point) * scale + else: + raise ValueError(f"Unsupported dtype in dequantize_per_tensor: {dtype}") + + +quantized_decomposed_lib.define( + "dequantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, " + "int quant_min, int quant_max, ScalarType dtype) -> Tensor") + +@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor", "CompositeExplicitAutograd") +def dequantize_per_tensor_tensor( + input: torch.Tensor, + scale: torch.Tensor, + zero_point: torch.Tensor, + quant_min: int, + quant_max: int, + dtype: torch.dtype +) -> torch.Tensor: + """ Affine dequantization for the Tensor using the same quantization parameters to map + from quantized values to floating point values + Same as `dequantize_per_tensor` but scale and zero_point are Scalar Tensor instead of + scalar values + """ + assert zero_point.numel() == 1, f"Exepecting zero_point tensor to be one element, but received : {zero_point.numel()}" + assert scale.numel() == 1, f"Exepecting scale tensor to be one element, but received : {scale.numel()}" + return dequantize_per_tensor(input, scale.item(), zero_point.item(), quant_min, quant_max, dtype) + +@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor", "Meta") +def dequantize_per_tensor_tensor_meta(input, scale, zero_point, quant_min, quant_max, dtype): + assert zero_point.numel() == 1, f"Exepecting zero_point tensor to be one element, but received : {zero_point.numel()}" + assert scale.numel() == 1, f"Exepecting scale tensor to be one element, but received : {scale.numel()}" + assert input.dtype == dtype, f"Expecting input to have dtype: {dtype}" + if dtype in [torch.uint8, torch.int8, torch.int32]: + return torch.empty_like(input, dtype=torch.float32) + else: + raise ValueError(f"Unsupported dtype in dequantize_per_tensor: {dtype}") + + +quantized_decomposed_lib.define( + "choose_qparams.tensor(Tensor input, int quant_min, int quant_max, " + "ScalarType dtype) -> (Tensor, Tensor)") + +@impl(quantized_decomposed_lib, "choose_qparams.tensor", "CompositeExplicitAutograd") +def choose_qparams_tensor( + input: torch.Tensor, + qmin: int, + qmax: int, + dtype: torch.dtype +) -> Tuple[torch.Tensor, torch.Tensor]: + """ Given an input Tensor, derive the per tensor affine quantization parameter + (scale and zero_point) for target quantized Tensor from the Tensor + + Args: + input (torch.Tensor): floating point input Tensor + quant_min (int): minimum quantized value for target quantized Tensor + quant_max (int): maximum quantized value for target quantized Tensor + dtype (torch.dtype): dtype for target quantized Tensor + + Returns: + scale (float): quantization parameter for the target quantized Tensor + zero_point (int): quantization parameter for the target quantized Tensor + """ + assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}" + assert dtype == torch.int8 or dtype == torch.uint8 or dtype == torch.int32, \ + f"Expecting target dtype to be int8 uint8 or int32, but got: {dtype}" + validate_qmin_qmax(qmin, qmax) + + min_val, max_val = torch.aminmax(input) + + return determine_qparams( + min_val, max_val, qmin, qmax, dtype, torch.Tensor([torch.finfo(torch.float32).eps]), has_customized_qrange=False) + +quantized_decomposed_lib.define( + "choose_qparams_symmetric.tensor(Tensor input, int quant_min, int quant_max, " + "ScalarType dtype) -> (Tensor, Tensor)") + +@impl(quantized_decomposed_lib, "choose_qparams_symmetric.tensor", "CompositeExplicitAutograd") +def choose_qparams_symmetric_tensor( + input: torch.Tensor, + qmin: int, + qmax: int, + dtype: torch.dtype +) -> Tuple[torch.Tensor, torch.Tensor]: + """ Given an input Tensor, derive the per tensor affine quantization parameter + (scale and zero_point) for target quantized Tensor from the Tensor + + Args: + input (torch.Tensor): floating point input Tensor + quant_min (int): minimum quantized value for target quantized Tensor + quant_max (int): maximum quantized value for target quantized Tensor + dtype (torch.dtype): dtype for target quantized Tensor + + Returns: + scale (float): quantization parameter for the target quantized Tensor + zero_point (int): quantization parameter for the target quantized Tensor + """ + assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}" + assert dtype == torch.int8 or dtype == torch.uint8 or dtype == torch.int32, \ + f"Expecting target dtype to be int8 uint8 or int32, but got: {dtype}" + validate_qmin_qmax(qmin, qmax) + + min_val, max_val = torch.aminmax(input) + return determine_qparams( + min_val, + max_val, + qmin, + qmax, + dtype, + torch.Tensor([torch.finfo(torch.float32).eps]), + has_customized_qrange=False, + qscheme=torch.per_tensor_symmetric + ) + +@impl(quantized_decomposed_lib, "choose_qparams.tensor", "Meta") +def choose_qparams_tensor_meta( + input: torch.Tensor, + quant_min: int, + quant_max: int, + dtype: torch.dtype +) -> Tuple[torch.Tensor, torch.Tensor]: + assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}" + assert quant_min < quant_max, f"Expecting quant_min to be smaller than quant_max but received min: \ + {quant_min} max: {quant_max}" + return torch.empty(1, dtype=torch.float, device=input.device), torch.empty(1, dtype=torch.int32, device=input.device) + +@impl(quantized_decomposed_lib, "choose_qparams_symmetric.tensor", "Meta") +def choose_qparams_symmetric_tensor_meta( + input: torch.Tensor, + quant_min: int, + quant_max: int, + dtype: torch.dtype +) -> Tuple[torch.Tensor, torch.Tensor]: + return torch.empty(1, dtype=torch.float, device=input.device), torch.empty(1, dtype=torch.int32, device=input.device) +# Helper function used to implement per-channel quantization against any axis +def _permute_to_axis_zero(x, axis): + new_axis_list = list(range(x.dim())) + new_axis_list[axis] = 0 + new_axis_list[0] = axis + y = x.permute(tuple(new_axis_list)) + return y, new_axis_list + +quantized_decomposed_lib.define( + "quantize_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, " + "int quant_min, int quant_max, ScalarType dtype) -> Tensor") + +@impl(quantized_decomposed_lib, "quantize_per_channel", "CompositeExplicitAutograd") +def quantize_per_channel( + input: torch.Tensor, + scales: torch.Tensor, + zero_points: torch.Tensor, + axis: int, + quant_min: int, + quant_max: int, + dtype: torch.dtype +) -> torch.Tensor: + """ Affine per channel quantization for the Tensor using the same quantization + parameters for each channel/axis to map from floating point to quantized values + + Args: + input (torch.Tensor): original float32 Tensor + scales (torch.Tensor): a list of scale quantization parameter for + affine quantization, one per channel + zero_point (torch.Tensor): a list of zero_point quantization parameter for + affine quantization, one per channel + quant_min (int): minimum quantized value for output Tensor + quant_max (int): maximum quantized value for output Tensor + dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor + + Returns: + Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters + are not stored in the Tensor, we are storing them in function arguments instead + """ + assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}" + assert axis < input.dim(), f"Expecting axis to be < {input.dim()}" + _quant_min_max_bounds_check(quant_min, quant_max, dtype) + input, permute_axis_list = _permute_to_axis_zero(input, axis) + res = torch.zeros_like(input) + + for i in range(input.size(0)): + res[i] = torch.clamp( + torch.round(input[i] * (1.0 / scales[i])) + zero_points[i], + quant_min, + quant_max + ) + + out = res.permute(tuple(permute_axis_list)) + return out.to(dtype) + +@impl(quantized_decomposed_lib, "quantize_per_channel", "Meta") +def quantize_per_channel_meta( + input: torch.Tensor, + scales: torch.Tensor, + zero_points: torch.Tensor, + axis: int, + quant_min: int, + quant_max: int, + dtype: torch.dtype +) -> torch.Tensor: + assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}" + assert axis < input.dim(), f"Expecting axis to be < {input.dim()}" + _quant_min_max_bounds_check(quant_min, quant_max, dtype) + return torch.empty_like(input, dtype=dtype) + +# Note: quant_min/quant_max/dtype are not used in the operator, but for now it's kept in +# the signature as metadata for the input Tensor, this might be useful for pattern +# matching in the future +# We will revisit this later if we found there are no use cases for it +quantized_decomposed_lib.define( + "dequantize_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, " + "int quant_min, int quant_max, ScalarType dtype) -> Tensor") + +@impl(quantized_decomposed_lib, "dequantize_per_channel", "CompositeExplicitAutograd") +def dequantize_per_channel( + input: torch.Tensor, + scales: torch.Tensor, + zero_points: torch.Tensor, + axis: int, + quant_min: int, + quant_max: int, + dtype: torch.dtype +) -> torch.Tensor: + """ Affine per channel dequantization for the Tensor using the same quantization + parameters for each channel/axis to map from quantized values to floating point values + + Args: + input (torch.Tensor): Tensor with dtype matching `dtype` argument, + e.g. (`torch.uint8`), it is a per channel quantized Tensor if combined with + quantization parameter in the argument of this function (scales/zero_points/axis) + + scales (torch.Tensor): a list of scale quantization parameter for + affine quantization, one per channel + + zero_points (torch.Tensor): a list of zero_point quantization parameter for + affine quantization, one per channel + + quant_min (int): minimum quantized value for output Tensor (not used in computation, + reserved for pattern matching) + + quant_max (int): maximum quantized value for output Tensor (not used in computation, + reserved for pattern matching) + + dtype (torch.dtype): requested dtype for output Tensor (not used in computation, + reserved for pattern matching) + + Returns: + dequantized float32 Tensor + """ + assert input.dtype == dtype, f"Expecting input to have dtype {dtype}, but got dtype: {input.dtype}" + assert axis < input.dim(), f"Expecting axis to be < {input.dim()}" + _quant_min_max_bounds_check(quant_min, quant_max, dtype) + input, permute_axis_list = _permute_to_axis_zero(input, axis) + res = torch.zeros_like(input, dtype=torch.float32) + + for i in range(input.size(0)): + # TODO: investigate why + # (input[i] - zero_points[i]).to(torch.float32) * scales[i] + # failed the test + res[i] = (input[i].to(torch.float32) - zero_points[i]) * scales[i] + + out = res.permute(tuple(permute_axis_list)) + return out + +@impl(quantized_decomposed_lib, "dequantize_per_channel", "Meta") +def dequantize_per_channel_meta( + input: torch.Tensor, + scales: torch.Tensor, + zero_points: torch.Tensor, + axis: int, + quant_min: int, + quant_max: int, + dtype: torch.dtype +) -> torch.Tensor: + assert input.dtype == dtype, f"Expecting input to have dtype {dtype}, but got dtype: {input.dtype}" + assert axis < input.dim(), f"Expecting axis to be < {input.dim()}" + _quant_min_max_bounds_check(quant_min, quant_max, dtype) + return torch.empty_like(input, dtype=torch.float32) diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_equalize.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_equalize.py new file mode 100644 index 0000000000000000000000000000000000000000..8022f28cbfc50f92020480dde327ffeb121d07f4 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_equalize.py @@ -0,0 +1,824 @@ +import warnings + +from collections import namedtuple +from typing import Any, Dict, List, Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.ao.nn.intrinsic as nni +from torch.fx import GraphModule +from torch.fx.graph import Node +from torch.ao.quantization.fx.graph_module import _get_observed_graph_module_attr + +from torch.ao.quantization.backend_config import get_native_backend_config + +from ..observer import _with_args, ObserverBase, PerChannelMinMaxObserver +from ..utils import _parent_name, check_min_max_valid + +from .utils import ( + get_new_attr_name_with_prefix, + maybe_get_next_module, + node_arg_is_weight, +) + +CUSTOM_MODULE_SUPP_LIST: List[Any] = [] + +def reshape_scale(scale: torch.Tensor, axis: int, input: torch.Tensor) -> torch.Tensor: + """Reshapes the scale so that we can multiply it to the input by the given axis. + """ + new_shape = [1] * input.ndim + new_shape[axis] = input.size(axis) + return scale.view(new_shape) + +qsheme_mapping_per_tensor_to_per_channel = { + torch.per_tensor_affine: torch.per_channel_affine, + torch.per_tensor_symmetric: torch.per_channel_symmetric, +} + + +class _InputEqualizationObserver(nn.Module): + r"""Observer for tracking the running min/max values of input columns, and + computing the quantization parameters for the overall min/max input values. + + Args: + dtype: Quantized data type + qscheme: Quantization scheme + quant_min: Minimum quantization value. If unspecified, it will + follow the 8-bit setup. + quant_max: Maximum quantization value. If unspecified, it will + follow the 8-bit setup. + + The running minimum/maximum :math:`x_\text{min/max}` are computed in the + same way as :class:`~torch.ao.quantization.observer.PerChannelMinMaxObserver`, + with the difference that the running min/max values are stored per column. + This observer is intended to be used along with a WeightEqualizationObserver + to calculate the equalization scale. + """ + + def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine, + quant_min=None, quant_max=None, factory_kwargs=None) -> None: + super().__init__() + + if qscheme not in {torch.per_tensor_affine, torch.per_tensor_symmetric}: + raise TypeError("Input qscheme must be per-tensor") + + self.dtype = dtype + self.qscheme = qscheme + + per_channel_qscheme = qsheme_mapping_per_tensor_to_per_channel[qscheme] + self.input_obs = PerChannelMinMaxObserver(ch_axis=1, dtype=dtype, + qscheme=per_channel_qscheme, + quant_min=quant_min, + quant_max=quant_max, + factory_kwargs=factory_kwargs) + + self.equalization_scale = torch.tensor(1) + self.equalization_shape: List[int] = [] + + def forward(self, x_orig): + if not (x_orig.ndim >= 2 and x_orig.ndim <= 5): + raise ValueError("InputEqualizationObserver only supports Linear and Conv layers") + + # Calculate the shape needed to reshape the equalization scale later (needed for Conv layers) + self.equalization_shape = [1] * x_orig.ndim + self.equalization_shape[1] = x_orig.size(1) + + return self.input_obs(x_orig) + + def get_input_minmax(self): + return (self.input_obs.min_val, self.input_obs.max_val) + + def set_equalization_scale(self, equalization_scale): + # Reshape the equalization scale along axis=1 so that it can be + # multiplied with the input along axis=1 + if equalization_scale.nelement() == 1 and equalization_scale == torch.tensor(1): + return + self.equalization_scale = torch.reshape(equalization_scale, self.equalization_shape) + + def calculate_scaled_minmax(self): + r""" Returns the scaled min/max inputs + """ + if self.equalization_scale.nelement() == 1 and self.equalization_scale == torch.tensor(1): + warnings.warn( + "Must call calculate_equalization_scale before calling calculate_scaled_minmax. " + + "Will not scale the next quantization observer." + ) + return None, None + + # Calculate qparams for the scaled min/max inputs + # Scale the input by the equalization scale located at the same column + # index + (min_inputs, max_inputs) = self.get_input_minmax() + equalization_scale_reshaped = reshape_scale(self.equalization_scale, 0, min_inputs) + min_input_scaled = torch.min(torch.mul(min_inputs, equalization_scale_reshaped)) + max_input_scaled = torch.max(torch.mul(max_inputs, equalization_scale_reshaped)) + + return min_input_scaled, max_input_scaled + + with_args = classmethod(_with_args) + + +class _WeightEqualizationObserver(nn.Module): + r"""Observer for tracking the running min/max values of weight columns and + rows, and computing the quantization parameters for the weight rows. + + Args: + dtype: Quantized data type + qscheme: Quantization scheme + quant_min: Minimum quantization value. If unspecified, it will + follow the 8-bit setup. + quant_max: Maximum quantization value. If unspecified, it will + follow the 8-bit setup. + + This observer is made up of 1 PerChannelMinMaxObserver `weight_col_obs` used + to record the running minimum and maximum of columns of incoming weight + tensors. This observer is intended to be used along with an + InputEqualizationObserver to calculate the equalization scale. + + The running minimum/maximum :math:`w_\text{min/max}` are computed in the + same way as :class:`~torch.ao.quantization.observer.PerChannelMinMaxObserver`. + """ + + def __init__(self, dtype=torch.qint8, qscheme=torch.per_tensor_affine, quant_min=None, + quant_max=None, factory_kwargs=None) -> None: + super().__init__() + + self.dtype = dtype + self.qscheme = qscheme + self.ch_axis = 1 + + per_channel_qscheme = qscheme + if qscheme in {torch.per_tensor_affine, torch.per_tensor_symmetric}: + per_channel_qscheme = qsheme_mapping_per_tensor_to_per_channel[qscheme] + self.weight_col_obs = PerChannelMinMaxObserver(ch_axis=1, dtype=dtype, + qscheme=per_channel_qscheme, + quant_min=quant_min, + quant_max=quant_max, + factory_kwargs=factory_kwargs) + + self.equalization_scale = torch.tensor(1) + + def forward(self, w_orig): + if not (w_orig.ndim >= 2 and w_orig.ndim <= 5): + raise ValueError("InputEqualizationObserver only supports Linear and Conv layers") + + return self.weight_col_obs(w_orig) + + def get_weight_col_minmax(self): + return (self.weight_col_obs.min_val, self.weight_col_obs.max_val) + + def set_equalization_scale(self, equalization_scale): + self.equalization_scale = equalization_scale + + with_args = classmethod(_with_args) + + +def calculate_equalization_scale(input_obs: _InputEqualizationObserver, + weight_obs: _WeightEqualizationObserver) -> torch.Tensor: + r""" Calculates the equalization scale and sets the equalization_scale value + in the observers. + + Args: + input_obs: Observer that tracks the ranges for the input columns + weight_obs: Observer that tracks the ranges for the weight columns + """ + + (min_inputs, max_inputs) = input_obs.get_input_minmax() + (min_weights, max_weights) = weight_obs.get_weight_col_minmax() + + if not (check_min_max_valid(min_inputs, max_inputs) and check_min_max_valid(min_weights, max_weights)): + warnings.warn( + "Must run observer before calling calculate_equalization_scale. " + + "Returning default equalization scale torch.tensor(1)." + ) + return torch.tensor(1) + + if not (min_inputs.shape == min_weights.shape): + raise ValueError( + "Input and Weight must have the same column dimension. " + + f"Found {min_inputs.shape} and {min_weights.shape} shapes instead." + ) + + equalization_scale = torch.sqrt((max_weights - min_weights) / (max_inputs - min_inputs)) + # Replace all 'inf', 'nan', 0's with 1s to prevent errors + equalization_scale[equalization_scale == 0.] = 1 + equalization_scale = torch.nan_to_num(equalization_scale, nan=1, posinf=1, neginf=1) + return equalization_scale + + +class EqualizationQConfig(namedtuple('EqualizationQConfig', ['input_activation', 'weight'])): + """ + Describes how to quantize a layer or a part of the network specifically for + input-weight equalization by providing settings (observer classes) for + inputs, outputs, and weights. + + Note that EqualizationQConfig needs to contain observer **classes** (like + MinMaxObserver) or a callable that returns instances on invocation, not the + concrete observer instances themselves. + Quantization function will instantiate observers multiple times for each of + the layers. + + Observer classes have usually reasonable default arguments, but they can be + overwritten with `with_args` method (that behaves like functools.partial): + + my_qconfig = EqualizationQConfig(input_activation=_InputEqualizationObserver.with_args(dtype=torch.qint8), + weight=_WeightEqualizationObserver.with_args(dtype=torch.qint8)) + """ + def __new__(cls, input_activation=torch.nn.Identity, weight=torch.nn.Identity): + if isinstance(input_activation, nn.Module) or isinstance(weight, nn.Module): + raise ValueError("EqualizationQConfig received observer instance, please pass observer class instead. " + + "Use MyObserver.with_args(x=1) to override arguments to constructor if needed") + self = super(EqualizationQConfig, cls).__new__(cls, input_activation, weight) + return self + + +input_equalization_observer = _InputEqualizationObserver.with_args( + dtype=torch.quint8, qscheme=torch.per_tensor_symmetric) +weight_equalization_observer = _WeightEqualizationObserver.with_args( + dtype=torch.qint8, qscheme=torch.per_channel_symmetric) +default_equalization_qconfig = EqualizationQConfig(input_activation=input_equalization_observer, + weight=weight_equalization_observer) + + +def fused_module_supports_equalization(module) -> bool: + """ Checks if the fused node supports equalization. """ + return type(module) in [nni.LinearReLU, nni.ConvReLU1d, nni.ConvReLU2d, nni.ConvReLU3d] + +def nn_module_supports_equalization(module) -> bool: + """ Checks if the torch.nn node supports equalization. """ + return type(module) in [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d] + +def custom_module_supports_equalization(module) -> bool: + """ Checks if the custom node supports equalization. """ + return type(module) in CUSTOM_MODULE_SUPP_LIST + + +def node_supports_equalization(node: Node, modules) -> bool: + """ Checks if the current node supports equalization + Currently we only support nn.Linear/F.Linear and nn.Conv/F.conv layers + """ + if node.op == 'call_module': + return nn_module_supports_equalization(modules[str(node.target)]) or \ + fused_module_supports_equalization(modules[str(node.target)]) or \ + custom_module_supports_equalization(modules[str(node.target)]) + elif node.op == 'call_function': + return node.target in [F.linear, F.conv1d, F.conv2d, F.conv3d] + return False + +def is_equalization_observer(observer: nn.Module) -> bool: + return (isinstance(observer, (_InputEqualizationObserver, _WeightEqualizationObserver))) + + +############################################################################### +# Functions for equalization during convert # +############################################################################### + +def get_op_node_and_weight_eq_obs( + input_eq_obs_node: Node, + model: GraphModule, + modules: Dict[str, nn.Module] +) -> Tuple[Optional[Node], Optional[_WeightEqualizationObserver]]: + """ Gets the following weight equalization observer. There should always + exist a weight equalization observer after an input equalization observer. + + Returns the operation node that follows the input equalizatoin observer node + and the weight equalization observer + """ + + # Find the op node that comes directly after the input equaliation observer + op_node = None + for user in input_eq_obs_node.users.keys(): + if node_supports_equalization(user, modules): + op_node = user + break + + assert(op_node is not None) + if op_node.op == 'call_module': + # If the op_node is a nn.Linear layer, then it must have a + # WeightEqualizationObserver configuration + maybe_equalization_node_name_to_config = _get_observed_graph_module_attr(model, "equalization_node_name_to_qconfig") + assert maybe_equalization_node_name_to_config is not None + equalization_node_name_to_qconfig: Dict[str, Any] = maybe_equalization_node_name_to_config # type: ignore[assignment] + assert(equalization_node_name_to_qconfig.get(op_node.name, None) is not None) + weight_eq_obs = equalization_node_name_to_qconfig.get(op_node.name, None).weight() + + assert(isinstance(weight_eq_obs, _WeightEqualizationObserver)) + return op_node, weight_eq_obs + + elif op_node.op == 'call_function': + weight_node = maybe_get_weight_eq_obs_node(op_node, modules) + if weight_node is not None: + weight_eq_obs = modules[str(weight_node.target)] + assert(isinstance(weight_eq_obs, _WeightEqualizationObserver)) + return op_node, weight_eq_obs + + return None, None + +def maybe_get_weight_eq_obs_node(op_node: Node, modules: Dict[str, nn.Module]) -> Optional[Node]: + """ Gets the weight equalization observer node if it exists. + """ + assert(op_node.op == 'call_function') + # TODO: Pass in backend_config into this function and parent functions. + backend_config = get_native_backend_config() + for node_arg in op_node.args: + if node_arg_is_weight(op_node, node_arg, backend_config): + assert(isinstance(node_arg, Node) and node_arg.op == 'call_module' and + isinstance(modules[str(node_arg.target)], _WeightEqualizationObserver)) + return node_arg + return None + +def maybe_get_next_input_eq_obs(node: Node, modules: Dict[str, nn.Module]) -> Optional[_InputEqualizationObserver]: + """ Gets the following input equalization observer if it exists. + + For example, in the case of connecting linear layers: + x -> inp_obs1 -> eq_obs1 -> linear1 -> out_obs1 -> eq_obs2 -> linear2 -> out_obs2 + If the node being passed in is the linear1 node, then we want to return eq_obs2, + the following equalization observer for linear2. + + However, if there are no connecting layers: + x -> inp_obs1 -> eq_obs1 -> linear1 -> out_obs1 -> add + Then we want to return None. + + In the case of an unfused linear-relu layer with a connecting linear layer: + linear1 -> relu -> out_obs1 -> eq_obs2 -> linear2 -> out_obs2 + Since it is unfused, we want to skip over the relu layer and return eq_obs2, + the following equalization observer for linear2. + """ + + assert(node_supports_equalization(node, modules)) + + # Locate the following nn.ReLU or F.relu node if it exists + maybe_relu_node = maybe_get_next_module(node, modules, nn.ReLU) + if maybe_relu_node is None: + maybe_relu_node = maybe_get_next_module(node, modules, target_functional_type=F.relu) + + # Locate the following output observer if it exists. + # We will skip the relu node if it exists. + maybe_obs_node = ( + maybe_get_next_module(node, modules, ObserverBase) + if maybe_relu_node is None + else maybe_get_next_module(maybe_relu_node, modules, ObserverBase) + ) + if maybe_obs_node is None: + return None + + maybe_eq_obs_node = maybe_get_next_module(maybe_obs_node, modules, _InputEqualizationObserver) + if maybe_eq_obs_node is None: + return None + + maybe_eq_obs = modules[str(maybe_eq_obs_node)] + assert(isinstance(maybe_eq_obs, _InputEqualizationObserver)) + return maybe_eq_obs + +def maybe_get_next_equalization_scale(node: Node, modules: Dict[str, nn.Module]) -> Optional[torch.Tensor]: + """ If the next next node is an InputEqualizationObserver then we want to + return its equalization scale, else we return 1 + + This is used in the case where there are two connecting linear layers: + linear1 -> LinearOutObs -> InputEqObs -> linear2 + In this case, the node given is linear1 and we want to locate the InputEqObs. + """ + next_inp_eq_obs = maybe_get_next_input_eq_obs(node, modules) + if next_inp_eq_obs: + if next_inp_eq_obs.equalization_scale.nelement() == 1 and \ + next_inp_eq_obs.equalization_scale == torch.tensor(1): + return None + return next_inp_eq_obs.equalization_scale + return None + +def scale_input_observer(node: Node, modules: Dict[str, nn.Module]) -> None: + """ Scales the following input quantization observer's min/max values by + updating the values with the scaled min/max values calculated by the input + equalization observer + """ + input_eq_obs = modules[str(node.target)] + assert(isinstance(input_eq_obs, _InputEqualizationObserver)) + + input_quant_obs_node = node.args[0] + assert(isinstance(input_quant_obs_node, Node)) + + input_quant_obs = modules[str(input_quant_obs_node.target)] + if not isinstance(input_quant_obs, ObserverBase): + return + + min_input_scaled, max_input_scaled = input_eq_obs.calculate_scaled_minmax() + if min_input_scaled is None and max_input_scaled is None: + return + input_quant_obs.min_val = min_input_scaled + input_quant_obs.max_val = max_input_scaled + +def scale_weight_node( + node: Node, + modules: Dict[str, nn.Module], + equalization_scale: torch.Tensor, + next_equalization_scale: Optional[torch.Tensor], +) -> None: + """ Scale the weights for input-weight equalization by multiplying the + weight by 1/equalization_scale and next_equalization_scale + + Args: + node: Current node whose weights we want to scale + equalization_scale: Current node's calculated equalization scale + next_equalization_scale: Next node's calculated equalization scale if + the following node needs to be equalized, 1 otherwise + """ + if equalization_scale is None: + return + + if fused_module_supports_equalization(modules[str(node.target)]): + op_module = modules[str(node.target)][0] # type: ignore[index] + else: + op_module = modules[str(node.target)] + assert(nn_module_supports_equalization(op_module) or custom_module_supports_equalization(op_module)) + + # Scale the weights for input-weight equalization + # If the following layer needs to be equalized then we will multiply its scale + weight = op_module.weight + assert(isinstance(weight, torch.Tensor)) + + # Scale the weights by the reciprocal of the equalization scale + # Reshape the equalization scale so that we can multiply it to the weight along axis=1 + equalization_scale_reshaped = reshape_scale(equalization_scale, 1, weight) + scaled_weight = torch.mul(weight, torch.reciprocal(equalization_scale_reshaped)) + + if next_equalization_scale is None: + op_module.weight = nn.Parameter(scaled_weight) + return + + # Multiply the weights row wise by the next equalization scale + # Reshape the equalization scale so that we can multiply it to the weight along axis=0 + next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, weight) + scaled_weight = torch.mul(scaled_weight, next_equalization_scale_reshaped) + + op_module.weight = nn.Parameter(scaled_weight) + + # Multiply the bias element wise by the next equalization scale + bias = op_module.bias + if bias is None: + return + assert(isinstance(bias, torch.Tensor)) + + # Reshape the equalization scale so that we can multiply it element-wise to the bias + next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, bias) + scaled_bias = torch.mul(bias, next_equalization_scale_reshaped) + op_module.bias = nn.Parameter(scaled_bias) + +def scale_weight_functional( + op_node: Node, + model: GraphModule, + modules: Dict[str, nn.Module], + equalization_scale: torch.Tensor, + next_equalization_scale: Optional[torch.Tensor], +) -> None: + """ Scales the weight value for functional layers + """ + if equalization_scale is None: + return + + # From the given op_node, the path looks like: + # get_attr(weight) -> weight_quant_obs -> weight_eq_obs -> op_node + # So we want to trace back from the op_node to get the equalization observer + # node, then the quantization observer node, and then finally the weight + # node which contains the weight values. + + # Get the equalization observer node + weight_eq_obs_node = maybe_get_weight_eq_obs_node(op_node, modules) + if weight_eq_obs_node is None: + return + + # Get the quantization observer node + weight_quant_obs_node = weight_eq_obs_node.args[0] + if weight_quant_obs_node is None: + return + assert(isinstance(weight_quant_obs_node, Node) and + isinstance(modules[str(weight_quant_obs_node.target)], ObserverBase)) + + # Get the get_attr(weight) node + weight_node = weight_quant_obs_node.args[0] + if weight_node is None: + return + assert(isinstance(weight_node, Node) and weight_node.op == 'get_attr') + + weight_parent_name, weight_name = _parent_name(weight_node.target) + weight = getattr(modules[weight_parent_name], weight_name) + + # Scale the weights for input-weight equalization + # If the following layer needs to be equalized then we will multiply its scale + # Reshape the equalization scale so that we can multiply it to the weight along axis=1 + equalization_scale_reshaped = reshape_scale(equalization_scale, 1, weight) + scaled_weight = torch.mul(weight, torch.reciprocal(equalization_scale_reshaped)) + + if next_equalization_scale is None: + setattr(modules[weight_parent_name], weight_name, scaled_weight) + return + + # Multiply the weights row wise by the next equalization scale + # Reshape the equalization scale so that we can multiply it to the weight along axis=1 + next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, scaled_weight) + scaled_weight = torch.mul(scaled_weight, next_equalization_scale_reshaped) + + setattr(modules[weight_parent_name], weight_name, scaled_weight) + assert(torch.allclose(model.get_buffer(str(weight_node.target)), scaled_weight)) + + # Multiply the bias element wise by the next equalization scale + bias_node = None + for node in op_node.args: + # Find the node containing the weight values + if isinstance(node, Node) and node.op == 'get_attr' and 'bias' in node.name: + bias_node = node + break + if bias_node is None: + return + + bias_parent_name, bias_name = _parent_name(bias_node.target) + bias = getattr(modules[bias_parent_name], bias_name) + + # Reshape the equalization scale so that we can multiply it element-wise to the bias + next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, bias) + scaled_bias = torch.mul(bias, next_equalization_scale_reshaped) + setattr(modules[bias_parent_name], bias_name, scaled_bias) + +def clear_weight_quant_obs_node(op_node: Node, modules: Dict[str, nn.Module]) -> None: + """ Given the operation node, we want find the corresponding quantization + observer and reset its min/max values + """ + weight_eq_obs_node = maybe_get_weight_eq_obs_node(op_node, modules) + if weight_eq_obs_node is None: + return + + weight_quant_obs_node = weight_eq_obs_node.args[0] + if weight_quant_obs_node is None: + return + assert(isinstance(weight_quant_obs_node, Node)) + + weight_quant_obs = modules[str(weight_quant_obs_node.target)] + assert(isinstance(modules[str(weight_quant_obs_node.target)], ObserverBase)) + weight_quant_obs.reset_min_max_vals() # type: ignore[operator] + +def remove_node(model: GraphModule, node: Node, prev_node: Node): + """ Removes the given node from the model by replacing all of its users with + the given previous node + """ + # For all of the current node's users, replace the current node with + # the input quantization observer node + orig_users = list(node.users.keys()) + for user_node in orig_users: + user_node.replace_input_with(node, prev_node) + + # Erase the InputEqualizationObserver node + model.graph.erase_node(node) + +def update_obs_for_equalization(model: GraphModule, modules: Dict[str, nn.Module]) -> Dict[str, _WeightEqualizationObserver]: + """ Update all of the observer's equalization scale. For each + InputEqualizationObserver, we will find the location of the next + WeightEqualizationObserver, create it, and calculate the equalization scale + based on the two observers. + + We will then return a dictionary mapping operation node names to + the corresponding WeightEqualizationObservers for that operation. + """ + weight_eq_obs_dict = {} + for node in model.graph.nodes: + if node.op == 'call_module' and isinstance(modules[node.target], _InputEqualizationObserver): + input_eq_obs = modules[node.target] + assert(isinstance(input_eq_obs, _InputEqualizationObserver)) + op_node, weight_eq_obs = get_op_node_and_weight_eq_obs(node, model, modules) + + if op_node is None or weight_eq_obs is None: + continue + + if op_node.op == 'call_module': + # Calibrate the weight equalization observer since it has just + # been created + if fused_module_supports_equalization(modules[str(op_node.target)]): + module = modules[str(op_node.target)][0] # type: ignore[index] + assert(nn_module_supports_equalization(module)) + weight_eq_obs(module.weight) + else: + weight_eq_obs(modules[str(op_node.target)].weight) + + # Calculate and set the equalization scale values + equalization_scale = calculate_equalization_scale(input_eq_obs, weight_eq_obs) + input_eq_obs.set_equalization_scale(equalization_scale) + weight_eq_obs.set_equalization_scale(equalization_scale) + + weight_eq_obs_dict[op_node.name] = weight_eq_obs + + return weight_eq_obs_dict + +def convert_eq_obs( + model: GraphModule, + modules: Dict[str, nn.Module], + weight_eq_obs_dict: Dict[str, _WeightEqualizationObserver], +) -> None: + """ Converts the equalization operations and updates the other nodes in the + following way: + - Removes the input equalization observers and inserts a mul operator + along with an equalization scale node wherever applicable (we do not + want to insert a mul operator between connecting linear layers). + - Updates the input quantization observers with the scaled input min/max + values. + - Scales the weights by the current and next equalization scales. + - Removes the weight equalization observer node if it exists. + + Before (after prepare): + weight values + | + WeightQuantObs + | + WeightEqObs + | + x -> InpQuantObs -> InpEqObs -> linear -> OutQuantObs + + After this function: + scaled weight values + | + equalization scale WeightQuantObs + | | + x -> mul -> InpQuantObs (scaled min/max) -> linear -> OutQuantObs + + After convert: + equalization scale scaled weight values + | | + x -> mul -> quantize_per_tensor -> quantized::linear + + Note that although the equalization observer appeared after the quantization + observer after prepare_fx, the mul node appears before the quantization node + after convert_fx. This is because placing the equalization observer after + the quantization observer in prepare_fx would allow us to keep the invariant + that the graph before the current node inserts its observers is not + modified. + + Having the equalization observer before the quantization observer would also + cause some inconsistences between the ordering of the quantization and + equalization observers. + For example, a single linear layer would look like: + x -> InpEqObs1 -> InpQuantObs1 -> linear1 -> OutQuantObs1 + But between two connected linear layers, it would look like: + linear1 -> OutQuantObs1 -> InpEqObs2 -> linear2 -> OutQuantObs2 + """ + for node in model.graph.nodes: + if node.op == 'call_module' and isinstance(modules[node.target], _InputEqualizationObserver): + inp_quant_obs_node = node.args[0] + prev_node = inp_quant_obs_node.args[0] + + # If the previous node is a layer that needs to be equalized, then + # we will remove the current node because we do not need to add any + # equalization nodes between two layers that need to be equalized + + # Before: linear1/relu (prev_node) -> output_quant_obs1 (inp_quant_obs_node) -> input_eq_obs2 (node) -> linear2 + # After: linear1/relu (prev_node) -> output_quant_obs1 (inp_quant_obs_node) -> linear2 + if node_supports_equalization(prev_node, modules) or "relu" in prev_node.name: + remove_node(model, node, inp_quant_obs_node) + continue + + # Update the following input quantization observer's min/max values + scale_input_observer(node, modules) + + # Remove the InputEqualization node and add a mul operator before + # the quantization observer node that appears before the equalization node + # Before: x -> input_quant_obs -> input_eq_obs -> linear + # After: x -> mul -> input_quant_obs -> linear + + # Create a node containing the equalization scale + with model.graph.inserting_before(inp_quant_obs_node): + get_new_eq_scale_name = get_new_attr_name_with_prefix(prev_node.name + '_equalization_scale') + name = get_new_eq_scale_name(modules) + setattr(model, name, modules[node.target].equalization_scale) + eq_scale_node = model.graph.create_node('get_attr', name) + + # Create a node multiplying the input with the equalization scale + with model.graph.inserting_after(eq_scale_node): + inputs = (prev_node, eq_scale_node) + mul_node = model.graph.create_node("call_function", torch.mul, inputs) + + # Set the mul nod to be the input_quant_obs_node's input instead of + # the previous node + inp_quant_obs_node.replace_input_with(prev_node, mul_node) + remove_node(model, node, inp_quant_obs_node) + + elif weight_eq_obs_dict.get(node.name, None) is not None: + weight_eq_obs = weight_eq_obs_dict.get(node.name) + assert(isinstance(weight_eq_obs, _WeightEqualizationObserver)) + equalization_scale = weight_eq_obs.equalization_scale + + if equalization_scale.nelement() == 1 and equalization_scale == torch.tensor(1): + equalization_scale = None # type: ignore[assignment] + maybe_next_equalization_scale = maybe_get_next_equalization_scale(node, modules) + + # Scale the weight nodes + if node.op == 'call_module': + scale_weight_node(node, modules, equalization_scale, maybe_next_equalization_scale) + elif node.op == 'call_function': + scale_weight_functional(node, model, modules, equalization_scale, maybe_next_equalization_scale) + + weight_eq_obs_node = maybe_get_weight_eq_obs_node(node, modules) + if weight_eq_obs_node is None: + return + assert(isinstance(modules[str(weight_eq_obs_node.target)], _WeightEqualizationObserver)) + + # Clear the quantization observer's min/max values so that they + # can get updated later based on the new scale values + clear_weight_quant_obs_node(node, modules) + + # Erase the weight equalization observer node + prev_node = weight_eq_obs_node.args[0] + remove_node(model, weight_eq_obs_node, prev_node) + else: + raise ValueError("Expected operation node to be 'call_module' or 'call_function" + + f"Instead got node {node.name} as '{node.op}'.") + +def _convert_equalization_ref(model: GraphModule): + """ Reference function which applies changes needed for equalization, but + does not quantize the nodes + """ + modules = dict(model.named_modules(remove_duplicate=False)) + + # Calculate the equalization scale, update the observers with the scaled + # inputs, and scale the weight + weight_eq_obs_dict = update_obs_for_equalization(model, modules) + convert_eq_obs(model, modules, weight_eq_obs_dict) + + return GraphModule(model, model.graph) + + +############################################################################### +# Functions for running the equalized model on the Numeric Suite # +############################################################################### + +def get_layer_sqnr_dict(model_a: nn.Module, model_b: nn.Module, x: torch.Tensor) -> Dict[str, float]: + """ Runs the Numeric Suite on model_a and model_b and returns a dictionary + containing the SQNR between layers in model_a and model_b. + + Note: In order to support equalized models, this function has a hacky fix in + which we do not match any torch.mul operators. This is because equalized + models contain extra mul operators to scale the input by the equalization + scale, but this edge case has not been resolved yet within the numeric suite code. + + Args: + model_a: A float model + model_b: A quantized model + x: Inputs to use during calibration + """ + import torch.ao.ns._numeric_suite_fx as ns + from torch.ao.ns.fx.mappings import get_unmatchable_types_map + + unmatchable_types_map = get_unmatchable_types_map() + unmatchable_types_map["funs_unmatchable"].add(torch.mul) + + model_a_ns, model_b_ns = ns.add_loggers( + 'fp32', model_a, + 'int8', model_b, + ns.OutputLogger, + unmatchable_types_map=unmatchable_types_map + ) + + model_a_ns(x) + model_b_ns(x) + + activation_comparison_dict = ns.extract_logger_info( + model_a_ns, + model_b_ns, + ns.OutputLogger, + 'int8') + ns.extend_logger_results_with_comparison( + activation_comparison_dict, + 'fp32', 'int8', + torch.ao.ns.fx.utils.compute_sqnr, 'sqnr' + ) + + # Construct a dictionary mapping layer names to the SQNR values + layer_sqnr_dict = {} + for key in activation_comparison_dict: + layer = activation_comparison_dict[key]['node_output']['int8'][0]['fqn'] + sqnr = activation_comparison_dict[key]['node_output']['int8'][0]['sqnr'][0] + layer_sqnr_dict[layer] = sqnr + + return layer_sqnr_dict + +def get_equalization_qconfig_dict( + layer_sqnr_dict: Dict[str, float], + num_layers_to_equalize: int +) -> Any: + """ Given the layer to SQNR dictionary, find the layers with the highest + quantization errors, and return an equalization_qconfig_dict + specifying to only equalize those top layers. + + Args: + layer_sqnr_dict: Dictionary mapping layer names to SQNR values (found + when comparing an equalized model against a float model) + num_layers_to_equalize: Number of layers with the highest quantization + errors to equalize + """ + + # Sort the layer_sqnr_dictionary values and get the layers with the lowest + # SQNR values (aka highest quantization errors) + layer_sqnr_sorted = sorted(layer_sqnr_dict.items(), key=lambda item: item[1]) + layers_to_equalize = layer_sqnr_sorted[:num_layers_to_equalize] + + # Constructs an equalization_qconfig_dict that specifies to only equalize + # the layers with the highest quantization errors + module_to_qconfig_list = [(item[0], default_equalization_qconfig) for item in layers_to_equalize] + equalization_qconfig_dict = {"module_name": module_to_qconfig_list} + return equalization_qconfig_dict diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_lower_to_native_backend.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_lower_to_native_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..15bfff03aa0ff154970d635b38f33b60992cb597 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_lower_to_native_backend.py @@ -0,0 +1,1121 @@ +import torch +from torch.fx import map_arg, Node +from torch.fx.graph import Graph +import torch.nn as nn +import torch.nn.functional as F +import torch.ao.nn.intrinsic as nni +import torch.ao.nn.intrinsic.quantized as nniq +import torch.ao.nn.intrinsic.quantized.dynamic as nniqd +import torch.ao.nn.quantized as nnq +import torch.ao.nn.quantized.dynamic as nnqd +import torch.ao.nn.quantized.reference as nnqr +from torch.ao.nn.quantized.modules.utils import WeightedQuantizedModule +from torch.fx import GraphModule +from .utils import ( + collect_producer_nodes, + get_linear_prepack_op_for_dtype, + get_new_attr_name_with_prefix, + get_qconv_prepack_op, + graph_module_from_producer_nodes, +) +from ..utils import _parent_name +from ..qconfig import QConfigAny +from ..quantization_mappings import get_quantized_operator +from .utils import create_node_from_old_node_preserve_meta +from typing import Dict, Tuple, Type, List, Callable, Any, Union, Set, Optional +import operator + +QOP_TO_ARG_NAMES_TO_SKIP = { + torch._ops.ops.quantized.hardswish: ['inplace'], + torch._ops.ops.quantized.elu: ['inplace'], + torch._ops.ops.quantized.dropout: ['inplace'], + torch._ops.ops.quantized.instance_norm: + ['running_mean', 'running_var', 'use_input_stats', 'momentum'], +} + +def _is_node_in_list(node, modules, func_list, method_list, module_type_list): + is_call_function = node.op == "call_function" and node.target in func_list + is_call_method = node.op == "call_method" and node.target in method_list + is_call_module = node.op == "call_module" and type(modules[str(node.target)]) in module_type_list + return is_call_function, is_call_method, is_call_module + +def is_fixed_qparams_node(node, modules): + func_list = [ + torch.nn.functional.hardsigmoid, + torch.nn.functional.sigmoid, + torch.sigmoid, + torch.tanh, + ] + method_list = [ + "hardsigmoid", + "hardsigmoid_", + "sigmoid", + "sigmoid_", + "tanh", + "tanh_", + ] + module_type_list = [ + torch.nn.Hardsigmoid, + torch.nn.Sigmoid, + torch.nn.Tanh, + torch.nn.Softmax, + ] + return _is_node_in_list(node, modules, func_list, method_list, module_type_list) + +def is_default_node(node, modules): + func_list = [ + torch.nn.functional.elu, + torch.nn.functional.hardswish, + torch.nn.functional.instance_norm, + torch.nn.functional.layer_norm, + torch.nn.functional.leaky_relu, + torch.nn.functional.dropout, + ] + method_list: List[Any] = [] + module_type_list = [ + nnqr.ConvTranspose1d, + nnqr.ConvTranspose2d, + torch.nn.ELU, + torch.nn.LeakyReLU, + torch.nn.Hardswish, + torch.nn.InstanceNorm1d, + torch.nn.InstanceNorm2d, + torch.nn.InstanceNorm3d, + torch.nn.LayerNorm, + torch.nn.Dropout, + torch.nn.PReLU, + torch.nn.BatchNorm2d, + torch.nn.BatchNorm3d, + torch.ao.nn.intrinsic.BNReLU2d, + torch.ao.nn.intrinsic.BNReLU3d, + ] + return _is_node_in_list(node, modules, func_list, method_list, module_type_list) + +def is_copy_node(node, modules): + func_list = [ + torch.adaptive_avg_pool1d, + torch.nn.functional.adaptive_avg_pool2d, + torch.nn.functional.adaptive_avg_pool3d, + torch.nn.functional.hardtanh, + torch.nn.functional.hardtanh_, + torch.nn.functional.interpolate, + torch.nn.functional.max_pool1d, + torch.nn.functional.max_pool2d, + torch.nn.functional.max_pool3d, + torch.nn.functional.relu, + torch.nn.functional.relu6, + torch.avg_pool1d, + torch._C._nn.avg_pool2d, + torch._C._nn.avg_pool3d, + torch.clamp, + torch.flatten, + torch.mean, + operator.floordiv, + # F.channel_shuffle and torch.channel_shuffle are essentially the same thing + # so we only need to put one of them here + torch.channel_shuffle, + ] + method_list = [ + "clamp", + "mean", + "relu", + "relu_", + ] + module_type_list = [ + torch.nn.AdaptiveAvgPool1d, + torch.nn.AdaptiveAvgPool2d, + torch.nn.AdaptiveAvgPool3d, + torch.nn.AvgPool1d, + torch.nn.AvgPool2d, + torch.nn.AvgPool3d, + torch.nn.Hardtanh, + torch.nn.MaxPool1d, + torch.nn.MaxPool2d, + torch.nn.MaxPool3d, + torch.nn.ReLU, + torch.nn.ReLU6, + torch.nn.ChannelShuffle, + ] + return _is_node_in_list(node, modules, func_list, method_list, module_type_list) + +def is_general_tensor_shape_node(node, modules): + func_list = [ + torch.transpose, + torch.repeat_interleave, + torch.squeeze, + torch.stack, + torch.unsqueeze, + ] + method_list = [ + "contiguous", + "detach", + "detach_", + "permute", + "repeat", + "repeat_interleave", + "reshape", + "resize_", + "shape", + "size", + "squeeze", + "squeeze_", + "transpose", + "unsqueeze", + "unsqueeze_", + "view", + ] + module_type_list = [ + torch.nn.Identity, + ] + return _is_node_in_list(node, modules, func_list, method_list, module_type_list) + +def is_other_node(node, modules): + func_list = [ + torch.cat, + ] + method_list: List[Any] = [] + module_type_list: List[Any] = [] + return _is_node_in_list(node, modules, func_list, method_list, module_type_list) + +def is_special_pattern_node(node, modules): + res_function, res_method, res_module = False, False, False + for checker in [is_fixed_qparams_node, is_default_node, is_copy_node, is_general_tensor_shape_node, is_other_node]: + is_call_function, is_call_method, is_call_module = checker(node, modules) + res_function = res_function or is_call_function + res_method = res_method or is_call_method + res_module = res_module or is_call_module + return res_function, res_method, res_module + +def is_dequantize_node(node): + return isinstance(node, Node) and node.op == "call_method" and node.target == "dequantize" + +def is_getattr_tensor_metadata_node(node): + return node.op == "call_function" and \ + node.target == getattr and \ + node.args[1] in ["shape"] + +def is_get_tensor_info_node(node): + return node.op == "call_method" and \ + node.target in ["shape", "size"] + +def should_skip_lowering(op: torch.fx.node.Node, qconfig_map: Dict[str, QConfigAny]): + """ + Return True if the op is configured with a None qconfig, False otherwise. + Note: maybe need to generalize this to also check for the dtype, and we + only lower when dtype matches, but right now fbgemm/qnnpack only support + a single dtype, so it is OK for now. + """ + return op.name in qconfig_map and qconfig_map[op.name] is None + +# Mapping from reference module class to the replacement static quantized module class for lowering +STATIC_LOWER_MODULE_MAP: Dict[Type[nn.Module], Type[WeightedQuantizedModule]] = { + nnqr.Linear: nnq.Linear, + nnqr.Conv1d: nnq.Conv1d, + nnqr.Conv2d: nnq.Conv2d, + nnqr.Conv3d: nnq.Conv3d, +} + +# Mapping from reference module class to the replacement dynamic quantized module class for lowering +DYNAMIC_LOWER_MODULE_MAP: Dict[Type[nn.Module], Type[nn.Module]] = { + nnqr.Linear: nnqd.Linear, + nnqr.GRUCell: nnqd.GRUCell, + nnqr.LSTMCell: nnqd.LSTMCell, + nnqr.RNNCell: nnqd.RNNCell, + nnqr.LSTM: nnqd.LSTM, + nnqr.GRU: nnqd.GRU, +} + +# Mapping from reference module class to the replacement weight only quantized module class for lowering +# TODO: correct the namespace for these modules +WEIGHT_ONLY_LOWER_MODULE_MAP: Dict[Type[nn.Module], Type[nn.Module]] = { + nnqr.Embedding: nnq.Embedding, + nnqr.EmbeddingBag: nnq.EmbeddingBag, +} + +# TODO: merge with STATIC_LOWER_MODULE_MAP after we merge +# _lower_static_weighted_ref_module and special_pattern_replacement +SPECIAL_PATTERN_LOWER_MODULE_MAP = { + nn.BatchNorm2d: nnq.BatchNorm2d, + nn.BatchNorm3d: nnq.BatchNorm3d, + nnqr.ConvTranspose1d: nnq.ConvTranspose1d, + nnqr.ConvTranspose2d: nnq.ConvTranspose2d, + nn.ELU: nnq.ELU, + nn.LeakyReLU: nnq.LeakyReLU, + nn.Hardswish: nnq.Hardswish, + nn.InstanceNorm1d: nnq.InstanceNorm1d, + nn.InstanceNorm2d: nnq.InstanceNorm2d, + nn.InstanceNorm3d: nnq.InstanceNorm3d, + nn.LayerNorm: nnq.LayerNorm, + nn.Dropout: nnq.Dropout, + nn.Softmax: nnq.Softmax, + nn.PReLU: nnq.PReLU, + nni.BNReLU2d: nniq.BNReLU2d, + nni.BNReLU3d: nniq.BNReLU3d, +} + +# Mapping from fused module class to a 2-tuple of: +# 1) The inner reference module class +# 2) The replacement static quantized module class for lowering +STATIC_LOWER_FUSED_MODULE_MAP: Dict[Type[nn.Module], Tuple[Type[nn.Module], Type[WeightedQuantizedModule]]] = { + nni.LinearReLU: (nnqr.Linear, nniq.LinearReLU), + # TODO: LinearLeakyReLU is registered as global but it is only fused and + # lowered when ondnn's backend config is used. Maybe need to separate + # registration and lowering functions for different backends in the future. + nni.LinearLeakyReLU: (nnqr.Linear, nniq.LinearLeakyReLU), + nni.LinearTanh: (nnqr.Linear, nniq.LinearTanh), + nni.ConvReLU1d: (nnqr.Conv1d, nniq.ConvReLU1d), + nni.ConvReLU2d: (nnqr.Conv2d, nniq.ConvReLU2d), + nni.ConvReLU3d: (nnqr.Conv3d, nniq.ConvReLU3d), +} + +# The difference between STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP and STATIC_LOWER_FUSED_MODULE_MAP: +# The refer node inside STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP has 2 inputs. +# Mapping from fused module class to a 2-tuple of: +# 1) The inner reference module class +# 2) The replacement static quantized module class for lowering +STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP: Dict[Type[nn.Module], Tuple[Type[nn.Module], Type[WeightedQuantizedModule]]] = { + nni.ConvAdd2d: (nnqr.Conv2d, nniq.ConvAdd2d), + nni.ConvAddReLU2d: (nnqr.Conv2d, nniq.ConvAddReLU2d), +} + +# Mapping from fused module class to a 2-tuple of: +# 1) The inner reference module class +# 2) The replacement dynamic quantized module class for lowering +DYNAMIC_LOWER_FUSED_MODULE_MAP: Dict[Type[nn.Module], Tuple[Type[nn.Module], Type[nn.Module]]] = { + nni.LinearReLU: (nnqr.Linear, nniqd.LinearReLU), +} + +# Mapping from a functional to lower to a 2-tuple of +# 1) The quantized version of the op +# 2) The quantized version of the op fused with relu, if it exists, else None +STATIC_LOWER_FUNCTIONAL_MAP: Dict[Callable, Tuple[Callable, Callable]] = { + F.linear: (torch.ops.quantized.linear, torch.ops.quantized.linear_relu), + F.conv1d: (torch.ops.quantized.conv1d, torch.ops.quantized.conv1d_relu), + F.conv2d: (torch.ops.quantized.conv2d, torch.ops.quantized.conv2d_relu), + F.conv3d: (torch.ops.quantized.conv3d, torch.ops.quantized.conv3d_relu), +} + +WEIGHT_PREPACK_OPS: Set[Callable] = { + torch._ops.ops.quantized.linear_prepack, + torch._ops.ops.quantized.linear_prepack_fp16, + torch._ops.ops.quantized.conv1d_prepack, + torch._ops.ops.quantized.conv2d_prepack, + torch._ops.ops.quantized.conv3d_prepack, +} + +# Mapping from a functional to a dictionary, where the key is a 2-tuple of +# (input_activation_dtype, weight_dtype) and the value is a 2-tuple of +# 1) The dynamically quantized version of the op +# 2) The dynamically quantized version of the op fused with relu, if it exists, else None +DYNAMIC_LOWER_FUNCTIONAL_MAP: Dict[Callable, Dict[Tuple[torch.dtype, torch.dtype], Tuple[Callable, Optional[Callable]]]] = { + F.linear: { + (torch.quint8, torch.qint8): (torch.ops.quantized.linear_dynamic, + torch.ops.quantized.linear_relu_dynamic), + (torch.float16, torch.float16): (torch.ops.quantized.linear_dynamic_fp16, + torch.ops.quantized.linear_relu_dynamic_fp16) + }, + # dynamic conv + relu is not available yet + F.conv1d: { + (torch.quint8, torch.qint8): (torch.ops.quantized.conv1d_dynamic, None), + }, + F.conv2d: { + (torch.quint8, torch.qint8): (torch.ops.quantized.conv2d_dynamic, None), + }, + F.conv3d: { + (torch.quint8, torch.qint8): (torch.ops.quantized.conv3d_dynamic, None), + }, +} + +CONV_FUNCTIONAL_OPS: Set[Callable] = { + F.conv1d, + F.conv2d, + F.conv3d, +} + +QBIN_OP_MAPPING: Dict[Union[Callable, str], Callable] = { + operator.add: torch.ops.quantized.add, + torch.add: torch.ops.quantized.add, + operator.mul: torch.ops.quantized.mul, + torch.mul: torch.ops.quantized.mul, + torch.matmul: torch.ops.quantized.matmul, +} +QBIN_RELU_OP_MAPPING: Dict[Union[Callable, str], Callable] = { + operator.add: torch.ops.quantized.add_relu, + torch.add: torch.ops.quantized.add_relu, + operator.mul: torch.ops.quantized.mul_relu, + torch.mul: torch.ops.quantized.mul_relu, +} + +def _save_packed_weight(self, destination, prefix, keep_vars): + for attr_name in dir(self): + if "_packed_weight" in attr_name and \ + isinstance(getattr(self, attr_name), torch._C.ScriptObject): # type: ignore[attr-defined] + packed_weight = getattr(self, attr_name) + destination[prefix + attr_name] = packed_weight + +def _load_packed_weight(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + attrs_to_pop = [] + for attr_name in state_dict: + if attr_name.startswith("_packed_weight") and isinstance(state_dict[attr_name], torch._C.ScriptObject): # type: ignore[attr-defined] # noqa: B950 + setattr(self, attr_name, state_dict[attr_name]) + attrs_to_pop.append(attr_name) + + # pop the packed param attributesn + for attr_name in attrs_to_pop: + state_dict.pop(attr_name) + +def fold_weight( + quantized_model: GraphModule, + node_name_to_scope: Dict[str, Tuple[str, type]] +) -> GraphModule: + """ + Trace back from the weight node util we hit getattr, reconstruct the + graph module with the traced nodes and run the graph module to pack the + weight. then replace the original chain of ops with the packed weight. + """ + packed_weights = {} + # map from folded node name to the prepacked weight name + folded_nodes = {} + # get packed weights + for node in quantized_model.graph.nodes: + if node.op == 'call_function' and node.target in WEIGHT_PREPACK_OPS: + nodes_to_fold = collect_producer_nodes(node) + if nodes_to_fold is not None: + for node_to_fold in nodes_to_fold: + folded_nodes[node_to_fold.name] = node + + prepacking_module = graph_module_from_producer_nodes( + quantized_model, nodes_to_fold) + packed_weight = prepacking_module() + packed_weights[node.name] = packed_weight + + # remove folded nodes and replace the prepacking node with getattr + folded_graph = Graph() + env: Dict[Any, Any] = {} + + def load_arg(a): + return map_arg(a, lambda node: env[node.name]) + + for node in quantized_model.graph.nodes: + prepack_node = folded_nodes.get(node.name, None) + if prepack_node is node: + packed_weight = packed_weights[node.name] + # add a prepacked attribute to root + op_node = list(prepack_node.users)[0] + module_path, _ = node_name_to_scope[op_node.name] + get_new_packed_weight_name = \ + get_new_attr_name_with_prefix(module_path + '_packed_weight_') + packed_weight_name = get_new_packed_weight_name(quantized_model) + setattr(quantized_model, packed_weight_name, packed_weight) + # replace prepack node with a getattr node + env[node.name] = folded_graph.create_node( + 'get_attr', packed_weight_name, (), {}) + elif prepack_node is not None: + # remove the foled node + continue + else: + # copy other nodes + env[node.name] = folded_graph.node_copy(node, load_arg) + + quantized_model = GraphModule(quantized_model, folded_graph) + quantized_model._register_state_dict_hook(_save_packed_weight) + quantized_model._register_load_state_dict_pre_hook(_load_packed_weight, with_module=True) + return quantized_model + +def _get_module(node: Node, modules: Dict[str, nn.Module]) -> Optional[nn.Module]: + """ + Return the `torch.nn.Module` that corresponds to the specified node's target. + If no such node exists, return None. + """ + if node.op == "call_module" and str(node.target) in modules: + return modules[str(node.target)] + else: + return None + +def _match_static_pattern( + node: Node, + modules: Dict[str, nn.Module], + qconfig_map: Dict[str, QConfigAny], + matching_modules_or_ops: List[Callable], + dequantize_node_arg_indices: List[int] +) -> Union[Tuple[Node, Node, Node], Tuple[None, None, None]]: + """ + Match the pattern (dequantize - ref node - quantize) against the node provided. + + If there is a match, return a 3-tuple of: + 1) q_node: the quantize node, + 2) relu_node: a relu node wrapping the ref_node, and + 3) ref_node: a reference module or functional node to replace with its quantized counterpart + Otherwise, if there is no match, return a 3-tuple of (None, None, None). + + Parameters: + node: The `torch.fx.Node` to match against. + modules: A mapping from node names to modules in the model graph, used for module lookup. + qconfig_map: A mapping from node names to the qconfigs associated with the nodes. + If the corresponding qconfig for the reference node is None, then return no match. + matching_modules_or_ops: Either a list of functions or a list of `torch.nn.Module`s. + If the reference node is not in this list, then return no match. + dequantize_node_arg_indices: A list of indices in the reference node args where dequantize + nodes may be present. An empty list means skipping the check for dequantize nodes. + """ + SKIP_LOWERING_VALUE = (None, None, None) + + # Match quantize node + if node.op != "call_function" or node.target != torch.quantize_per_tensor: + return SKIP_LOWERING_VALUE + q_node = node + ref_node = q_node.args[0] + assert(isinstance(ref_node, Node)) + + # Handle cases where the node is wrapped in a ReLU + if (ref_node.op == "call_function" and ref_node.target in (F.relu, torch.relu)) or\ + (ref_node.op == "call_module" and type(_get_module(ref_node, modules)) == nn.ReLU): + relu_node = ref_node + ref_node = relu_node.args[0] + assert(isinstance(ref_node, Node)) + else: + relu_node = None + if should_skip_lowering(ref_node, qconfig_map): + return SKIP_LOWERING_VALUE + + # Match reference module or functional + if isinstance(matching_modules_or_ops[0], type) and issubclass(matching_modules_or_ops[0], nn.Module): + expected_op = "call_module" + match_key = type(_get_module(ref_node, modules)) + else: + expected_op = "call_function" + match_key = ref_node.target + if ref_node.op != expected_op or match_key not in matching_modules_or_ops: + return SKIP_LOWERING_VALUE + + # Match dequantize node(s). Both of the following conditions must pass: + # (1) All `torch.fx.Node`s at the matching indices must be a dequantize node + # (2) There must be at least one dequantize node + matched_dequantize = False + for i in dequantize_node_arg_indices: + assert i < len(ref_node.args),\ + "Dequantize index %s exceeded reference node's arg length %s" % (i, len(ref_node.args)) + arg = ref_node.args[i] + if is_dequantize_node(arg): + matched_dequantize = True + elif isinstance(arg, Node): + return SKIP_LOWERING_VALUE + if not matched_dequantize: + return SKIP_LOWERING_VALUE + + return (q_node, relu_node, ref_node) + +def _match_static_pattern_with_two_inputs( + node: Node, + modules: Dict[str, nn.Module], + qconfig_map: Dict[str, QConfigAny], + matching_modules_or_ops: List[Callable] +) -> Union[Tuple[Node, Node], Tuple[None, None]]: + """ + (dequantize \ + Match the pattern (dequantize - ref node - quantize) against the node provided. + + If there is a match, return a 2-tuple of: + 1) q_node: the quantize node, + 2) ref_node: a reference module or functional node to replace with its quantized counterpart + Otherwise, if there is no match, return a 2-tuple of (None, None). + + Parameters: + node: The `torch.fx.Node` to match against. + modules: A mapping from node names to modules in the model graph, used for module lookup. + qconfig_map: A mapping from node names to the qconfigs associated with the nodes. + If the corresponding qconfig for the reference node is None, then return no match. + matching_modules_or_ops: Either a list of functions or a list of `torch.nn.Module`s. + If the reference node is not in this list, then return no match. + """ + SKIP_LOWERING_VALUE = (None, None) + + # Match quantize node + if node.op != "call_function" or node.target != torch.quantize_per_tensor: + return SKIP_LOWERING_VALUE + q_node = node + ref_node = q_node.args[0] + assert(isinstance(ref_node, Node)) + + if should_skip_lowering(ref_node, qconfig_map): + return SKIP_LOWERING_VALUE + + # Match reference module or functional + if isinstance(matching_modules_or_ops[0], type) and issubclass(matching_modules_or_ops[0], nn.Module): + expected_op = "call_module" + match_key = type(_get_module(ref_node, modules)) + else: + # This pass only support op of "call_module" + return SKIP_LOWERING_VALUE + + if ref_node.op != expected_op or match_key not in matching_modules_or_ops: + return SKIP_LOWERING_VALUE + + # Check ref_node has 2 input nodes, both are dq node. + if len(ref_node.args) != 2: + return SKIP_LOWERING_VALUE + for i in range(len(ref_node.args)): + arg = ref_node.args[i] + if not is_dequantize_node(arg): + return SKIP_LOWERING_VALUE + + return (q_node, ref_node) + +def _lower_static_weighted_ref_module( + model: GraphModule, + qconfig_map: Dict[str, QConfigAny]): + """ + Traverse the graph and find dequantize - ref module - quantize patterns + and replace them with the quantized version of the ref module. + """ + modules = dict(model.named_modules(remove_duplicate=False)) + nodes = list(model.graph.nodes) + for n in model.graph.nodes: + # Step 0: Find nodes that match this pattern (dequantize - ref module - quantize) + matching_modules = list(STATIC_LOWER_MODULE_MAP.keys()) + list(STATIC_LOWER_FUSED_MODULE_MAP.keys()) + (q_node, relu_node, ref_node) = _match_static_pattern( + n, modules, qconfig_map, matching_modules, dequantize_node_arg_indices=[0]) # type: ignore[arg-type] + if q_node is None: + continue + assert(ref_node is not None) + (_, scale_node, zero_point_node, _) = q_node.args + ref_module = _get_module(ref_node, modules) + ref_class = type(ref_module) + assert(isinstance(scale_node, Node)) + assert(isinstance(zero_point_node, Node)) + assert(issubclass(ref_class, nn.Module)) + + # Step 1: Change this pattern to use the corresponding quantized module + # For fused modules, we also check whether the inner module is a reference module + # If so, we replace the entire fused module with the corresponding quantized module + if ref_class in STATIC_LOWER_FUSED_MODULE_MAP: + inner_ref_class, q_class = STATIC_LOWER_FUSED_MODULE_MAP[ref_class] + if type(ref_module[0]) != inner_ref_class: # type: ignore[index] + continue + else: + q_class = STATIC_LOWER_MODULE_MAP[ref_class] + output_scale = getattr(model, scale_node.target) + output_zero_point = getattr(model, zero_point_node.target) + q_module = q_class.from_reference(ref_module, output_scale, output_zero_point) + # replace reference module with quantized module + parent_name, module_name = _parent_name(ref_node.target) + setattr(modules[parent_name], module_name, q_module) + + # Step 2: Reroute around dq_node, and remove q_node and its args + assert(len(ref_node.args) == 1) + dq_node = ref_node.args[0] + assert(isinstance(dq_node, Node)) + ref_node.replace_input_with(dq_node, dq_node.args[0]) + q_node.replace_all_uses_with(ref_node) + model.graph.erase_node(q_node) + model.graph.erase_node(scale_node) + model.graph.erase_node(zero_point_node) + +def _lower_static_weighted_ref_module_with_two_inputs( + model: GraphModule, + qconfig_map: Dict[str, QConfigAny]): + """ + Traverse the graph and find patterns + dequantize dequantize + \\ // + ref module + \\ + quantize + and replace them with the quantized version of the ref module. + """ + modules = dict(model.named_modules(remove_duplicate=False)) + nodes = list(model.graph.nodes) + for n in model.graph.nodes: + # (dequantize \ + # Step 0: Find nodes that match this pattern (dequantize - ref module - quantize) + matching_modules = list(STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP.keys()) + (q_node, ref_node) = _match_static_pattern_with_two_inputs( + n, modules, qconfig_map, matching_modules) # type: ignore[arg-type] + if q_node is None: + continue + assert(ref_node is not None) + (_, scale_node, zero_point_node, _) = q_node.args + ref_module = _get_module(ref_node, modules) + ref_class = type(ref_module) + assert(isinstance(scale_node, Node)) + assert(isinstance(zero_point_node, Node)) + assert(issubclass(ref_class, nn.Module)) + + # Step 1: Change this pattern to use the corresponding quantized module + # For fused modules, we also check whether the inner module is a reference module + # If so, we replace the entire fused module with the corresponding quantized module + if ref_class in STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP: + inner_ref_class, q_class = STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP[ref_class] + if type(ref_module[0]) != inner_ref_class: # type: ignore[index] + continue + else: + continue + output_scale = getattr(model, scale_node.target) + output_zero_point = getattr(model, zero_point_node.target) + q_module = q_class.from_reference(ref_module, output_scale, output_zero_point) + # replace reference module with quantized module + parent_name, module_name = _parent_name(ref_node.target) + setattr(modules[parent_name], module_name, q_module) + + # Step 2: Reroute around dq_node, and remove q_node and its args + assert(len(ref_node.args) == 2) + for arg in ref_node.args: + if not is_dequantize_node(arg): + continue + dq_node = arg + assert(isinstance(dq_node, Node)) + ref_node.replace_input_with(dq_node, dq_node.args[0]) + + q_node.replace_all_uses_with(ref_node) + model.graph.erase_node(q_node) + model.graph.erase_node(scale_node) + model.graph.erase_node(zero_point_node) + +def _lower_dynamic_weighted_ref_module(model: GraphModule): + """ + Traverse the graph and find quantize_per_tensor_dynamic - dequantize - ref_module patterns + and replace them with the dynamically quantized version of the ref module. + """ + named_modules = dict(model.named_modules(remove_duplicate=False)) + for n in model.graph.nodes: + if n.op != "call_module" or \ + type(named_modules[str(n.target)]) not in \ + set(DYNAMIC_LOWER_MODULE_MAP.keys()).union( + set(DYNAMIC_LOWER_FUSED_MODULE_MAP.keys())): + continue + ref_node = n + dq_node = ref_node.args[0] + if dq_node.op != "call_method" or dq_node.target != "dequantize": + continue + + input_dynamic_q_node = dq_node.args[0] + + if input_dynamic_q_node.op != "call_function" or \ + input_dynamic_q_node.target != torch.quantize_per_tensor_dynamic: + continue + + activation_dtype = input_dynamic_q_node.args[1] + is_fp16 = activation_dtype == torch.float16 + is_int8 = activation_dtype in [torch.quint8, torch.qint8] + if not is_int8 and not is_fp16: + continue + + ref_module = named_modules[str(ref_node.target)] + ref_class = type(ref_module) + if ref_class in DYNAMIC_LOWER_FUSED_MODULE_MAP: + inner_ref_class, q_class = DYNAMIC_LOWER_FUSED_MODULE_MAP[ref_class] + if type(ref_module[0]) != inner_ref_class: + continue + else: + q_class = DYNAMIC_LOWER_MODULE_MAP.get(ref_class) # type: ignore[assignment] + # TODO: maybe define a WeightedDynamicallyQuantizedModule + q_module = q_class.from_reference(ref_module) # type: ignore[attr-defined] + + # replace reference module with dynamically quantized module + parent_name, module_name = _parent_name(ref_node.target) + setattr(named_modules[parent_name], module_name, q_module) + ref_node.replace_input_with(dq_node, input_dynamic_q_node.args[0]) + +def _lower_weight_only_weighted_ref_module(model: GraphModule): + """ + Traverse the graph and find ref_module patterns + and replace them with the weight only quantized version of the ref module. + """ + named_modules = dict(model.named_modules(remove_duplicate=False)) + for n in model.graph.nodes: + if n.op != "call_module" or \ + type(named_modules[str(n.target)]) not in \ + set(WEIGHT_ONLY_LOWER_MODULE_MAP.keys()): + continue + ref_node = n + ref_module = named_modules[str(ref_node.target)] + ref_class = type(ref_module) + q_class = WEIGHT_ONLY_LOWER_MODULE_MAP.get(ref_class) + # TODO: WeightedQuantizedModule is currently assuming static quant apis + # with output_scale, output_zero_point in from_reference, we may want to + # relax that, or rename this + # TODO: maybe define a WeightedWeightOnlyQuantizedModule + q_module = q_class.from_reference(ref_module) # type: ignore[union-attr] + + # replace reference moduel with dynamically quantized module + parent_name, module_name = _parent_name(ref_node.target) + setattr(named_modules[parent_name], module_name, q_module) + +def _lower_static_weighted_ref_functional( + model: GraphModule, + qconfig_map: Dict[str, QConfigAny]): + """ + Traverse the graph and replace functional reference patterns with their quantized versions. + """ + modules = dict(model.named_modules(remove_duplicate=False)) + nodes = list(model.graph.nodes) + for n in model.graph.nodes: + # Step 0: Find nodes that match this pattern (dequantize - functional op - quantize) + matching_ops = list(STATIC_LOWER_FUNCTIONAL_MAP.keys()) + (q_node, relu_node, func_node) = _match_static_pattern( + n, modules, qconfig_map, matching_ops, dequantize_node_arg_indices=[0, 1]) + if q_node is None: + continue + assert(func_node is not None) + (_, output_scale_node, output_zp_node, _) = q_node.args + (input_dq_node, weight_dq_node, *remaining_func_args) = func_node.args + assert(isinstance(output_zp_node, Node)) + assert(isinstance(input_dq_node, Node)) + assert(isinstance(weight_dq_node, Node)) + quantized_weight = weight_dq_node.args[0] + assert(isinstance(quantized_weight, Node)) + if quantized_weight.op != "call_function" or\ + quantized_weight.target not in (torch.quantize_per_tensor, torch.quantize_per_channel): + continue + + # Step 1: Replace quantized weights with packed weights, which will be folded later + # Use the right prepack op and prepare the corresponding args + # Linear prepack args: (quantized weights[, bias]) + # Conv prepack args: (quantized weights[, bias, stride, padding, dilation, groups]) + prepack_args = [quantized_weight] + remaining_func_args + if func_node.target == F.linear: + weight_dtype = quantized_weight.args[-1] + prepack_op = get_linear_prepack_op_for_dtype(weight_dtype) + elif func_node.target in CONV_FUNCTIONAL_OPS: + prepack_op = get_qconv_prepack_op(func_node.target) # type: ignore[arg-type] + # For conv1d, the stride, padding, and dilation args may be ints, + # in which case we need to convert them to tuples + if func_node.target == F.conv1d: + for i in [2, 3, 4]: + if len(prepack_args) > i and isinstance(prepack_args[i], int): + prepack_args[i] = (prepack_args[i],) + else: + raise ValueError("Lowering is not supported for op '%s'" % func_node.target) + with model.graph.inserting_before(output_scale_node): + packed_weight = model.graph.create_node("call_function", prepack_op, tuple(prepack_args), {}) + + # Step 2: Replace reference pattern with the corresponding quantized op + (q_func, q_relu_func) = STATIC_LOWER_FUNCTIONAL_MAP[func_node.target] # type: ignore[index] + func_node.target = q_relu_func if relu_node is not None else q_func + func_node.args = (input_dq_node.args[0], packed_weight, output_scale_node, output_zp_node) + q_node.replace_all_uses_with(func_node) + # Move func_node after output_zp_node in the graph + output_zp_node.append(func_node) + + # Clean up: Remove quantize node, and the relu node if it exists + model.graph.erase_node(q_node) + if relu_node is not None: + model.graph.erase_node(relu_node) + +def _lower_dynamic_weighted_ref_functional( + model: GraphModule, + qconfig_map: Dict[str, QConfigAny]): + """ + Traverse the graph and replace functional reference patterns with their dynamically + quantized versions. + Examples: + quantize_per_tensor_dynamic - dequantize - functional linear --> linear_dynamic + to(torch.float16) - dequantize - functional linear --> linear_dynamic_fp16 + """ + modules = dict(model.named_modules(remove_duplicate=False)) + nodes = list(model.graph.nodes) + # we want to search in reserved order so that we can match the larger patterns first + # e.g. we want to match linear - relu before linear. + for n in reversed(model.graph.nodes): + + # Step 0: Find nodes that match this pattern + # (quantize_per_tensor_dynamic - dequantize - dynamically quantized op) + # We search for the pattern backwards, starting with the quantize node + # Quantize node args: (func, scale, zp, dtype) + func_node = n + # Handle cases where the functional op is wrapped in a ReLU + if func_node.op == "call_function" and func_node.target == F.relu or \ + func_node.op == "call_module" and \ + type(modules[str(func_node.target)]) == torch.nn.ReLU: + relu_node = func_node + func_node = relu_node.args[0] + else: + relu_node = None + if should_skip_lowering(func_node, qconfig_map): + continue + # Linear args: (dequantized inputs, dequantized weights[, bias]) + # Conv args: (dequantized inputs, dequantized weights[, bias, stride, padding, dilation, groups]) + if func_node.op != "call_function" or func_node.target not in DYNAMIC_LOWER_FUNCTIONAL_MAP: + continue + (input_dq_node, weight_dq_node, *remaining_func_args) = func_node.args + if input_dq_node.op != "call_method" or input_dq_node.target != "dequantize" or \ + weight_dq_node.op != "call_method" or weight_dq_node.target != "dequantize": + continue + + input_dynamic_q_node = input_dq_node.args[0] + + if input_dynamic_q_node.op != "call_function" or \ + input_dynamic_q_node.target != torch.quantize_per_tensor_dynamic: + continue + + reduce_range_node = None + (pattern_input, activation_dtype, reduce_range_node) = input_dynamic_q_node.args + is_fp16 = activation_dtype == torch.float16 + is_int8 = activation_dtype in [torch.quint8, torch.qint8] + if not is_int8 and not is_fp16: + continue + + quantized_weight = weight_dq_node.args[0] + weight_dtype = quantized_weight.args[-1] + + # Step 1: Try to select reference pattern with the corresponding quantized op + dynamic_quant_dtype_key = (activation_dtype, weight_dtype) + if dynamic_quant_dtype_key not in DYNAMIC_LOWER_FUNCTIONAL_MAP[func_node.target]: + print(f"Didn't find dtype combination {dynamic_quant_dtype_key} during " + f"dynamic quantized op lowering for {func_node.target}") + continue + (q_func, q_relu_func) = DYNAMIC_LOWER_FUNCTIONAL_MAP[func_node.target][dynamic_quant_dtype_key] + + if q_func is None or q_relu_func is None: + print("Didn't find corresponding quantized function or quantized relu function " + f"for {func_node.target}, {dynamic_quant_dtype_key}") + continue + + # Step 2: Replace quantized weights with packed weights, which will be folded later + # Use the right prepack op and prepare the corresponding args + # Linear prepack args: (quantized weights[, bias]) + # Conv prepack args: (quantized weights[, bias, stride, padding, dilation, groups]) + prepack_args = [quantized_weight] + remaining_func_args + if func_node.target == F.linear: + prepack_op = get_linear_prepack_op_for_dtype(weight_dtype) + elif func_node.target in CONV_FUNCTIONAL_OPS: + prepack_op = get_qconv_prepack_op(func_node.target) + # For conv1d, the stride, padding, and dilation args may be ints, + # in which case we need to convert them to tuples + if func_node.target == F.conv1d: + for i in [2, 3, 4]: + if len(prepack_args) > i and isinstance(prepack_args[i], int): + prepack_args[i] = (prepack_args[i],) + else: + raise ValueError("Lowering is not supported for op '%s'" % func_node.target) + with model.graph.inserting_before(func_node): + packed_weight = model.graph.create_node("call_function", prepack_op, tuple(prepack_args), {}) + + # Step 3: Replace reference pattern with the corresponding quantized op + func_node.target = q_relu_func if relu_node is not None else q_func + if is_int8: + func_node.args = (pattern_input, packed_weight, reduce_range_node) + else: + func_node.args = (pattern_input, packed_weight) + + if relu_node is not None: + relu_node.replace_all_uses_with(func_node) + + # Step 4: Remove the relu node if it exists + if relu_node is not None: + model.graph.erase_node(relu_node) + +def _lower_quantized_binary_op( + model: GraphModule, + qconfig_map: Dict[str, QConfigAny]): + binary_ops_to_lower: List[Callable] = [operator.add, torch.add, operator.mul, torch.mul, torch.matmul] + modules = dict(model.named_modules(remove_duplicate=False)) + for n in model.graph.nodes: + # Step 0: Find nodes that match this pattern (dequantize - ref module - quantize) + (q_node, relu_node, bop_node) = _match_static_pattern( + n, modules, qconfig_map, binary_ops_to_lower, dequantize_node_arg_indices=[0, 1]) + if q_node is None: + continue + assert(bop_node is not None) + (_, scale_node, zero_point_node, _) = q_node.args + + # Step 1: Remove dequant nodes + num_dq_nodes = 0 + for arg in bop_node.args: + if not is_dequantize_node(arg): + continue + dq_node = arg + assert(isinstance(dq_node, Node)) + dn_input = dq_node.args[0] + bop_node.replace_input_with(dq_node, dn_input) + num_dq_nodes += 1 + assert(num_dq_nodes > 0) + + # Step 2: Swap binary op to quantized binary op + assert bop_node.target in QBIN_OP_MAPPING + binop_to_qbinop = QBIN_OP_MAPPING if relu_node is None else QBIN_RELU_OP_MAPPING + qbin_op = binop_to_qbinop[bop_node.target] + # prepare the args for quantized bianry op + # (x, y) + qop_node_args = list(bop_node.args) + # (x, y, scale, zero_point) + # add scale and zero_point arguments for Tensor - Tensor operation + if num_dq_nodes == 2: + qop_node_args.extend([scale_node, zero_point_node]) + # insert a call to quantized binary op and remove the original binary op + with model.graph.inserting_after(q_node): + qop_node = create_node_from_old_node_preserve_meta( + model.graph, + ("call_function", qbin_op, tuple(qop_node_args), {}), + bop_node) + q_node.replace_all_uses_with(qop_node) + + # Step 3: Remove quantize node, binary op node, and relu node if any + model.graph.erase_node(q_node) + if relu_node is not None: + model.graph.erase_node(relu_node) + model.graph.erase_node(bop_node) + +def special_pattern_replacement(model: GraphModule): + modules = dict(model.named_modules(remove_duplicate=False)) + for n in model.graph.nodes: + q_node = n + is_quantize = q_node.target == torch.quantize_per_tensor + is_to_fp16 = q_node.op == "call_method" and q_node.target == "to" and \ + len(q_node.args) == 2 and q_node.args[1] == torch.float16 + if not (is_quantize or is_to_fp16): + continue + ref_node = q_node.args[0] + # get output scale/zero_point/dtype from the quantize node + # ref_node, scale_node, zero_point_node, dtype = q_node.args + # TODO: add safety checks that users for the ref_node and dq_node needs to be one + is_call_function, is_call_method, is_call_module = is_fixed_qparams_node(ref_node, modules) + if is_to_fp16 and (is_call_function or is_call_method or is_call_module): + # TODO: add a warning or error out here? (bc-breaking if error out) + # warnings.warn( + # "Only reference patterns are currently supported for {dtype} dtype with {op} op" + # "".format(dtype=dtypes, op=ref_node)) + continue + + is_call_function, is_call_method, is_call_module = is_default_node(ref_node, modules) + if is_to_fp16 and (is_call_function or is_call_method or is_call_module): + # TODO: add a warning or error out here? (bc-breaking if error out) + continue + + # This check includes all supported ops + is_call_function, is_call_method, is_call_module = is_special_pattern_node(ref_node, modules) + if not (is_call_module or is_call_function or is_call_method): + continue + assert len(ref_node.args) > 0 or len(ref_node.kwargs) > 0 + dq_node_or_nodes = ref_node.args[0] if len(ref_node.args) > 0 else list(ref_node.kwargs.values())[0] + assert isinstance(dq_node_or_nodes, (Node, tuple, list)) + is_dequantize = False + if isinstance(dq_node_or_nodes, Node): + is_dequantize = dq_node_or_nodes.op == 'call_method' and \ + dq_node_or_nodes.target == 'dequantize' + elif isinstance(dq_node_or_nodes, (tuple, list)): + is_dequantize = all( + x.op == 'call_method' and x.target == 'dequantize' + for x in dq_node_or_nodes) + + if not is_dequantize: + continue + + # TODO: enable we have patterns that needs to swap the modules + if is_call_module: + ref_module = modules[ref_node.target] + if type(ref_module) in SPECIAL_PATTERN_LOWER_MODULE_MAP and is_quantize: + qmodule_cls = SPECIAL_PATTERN_LOWER_MODULE_MAP.get(type(ref_module)) + scale_node = q_node.args[1] + zero_point_node = q_node.args[2] + output_scale = getattr(model, scale_node.target) + output_zero_point = getattr(model, zero_point_node.target) + + qmodule = qmodule_cls.from_reference(ref_module, output_scale, output_zero_point) # type:ignore[union-attr] + # replace reference module with quantized module + parent_name, module_name = _parent_name(ref_node.target) + setattr(modules[parent_name], module_name, qmodule) + + # reroute around dq node: + dq_nodes: List[Node] = [] + if isinstance(dq_node_or_nodes, Node): + dq_nodes = [dq_node_or_nodes] + elif isinstance(dq_node_or_nodes, (tuple, list)): + dq_nodes = list(dq_node_or_nodes) + + for dq_node in dq_nodes: + dn_input = dq_node.args[0] + ref_node.replace_input_with(dq_node, dn_input) + + # store q node args + qnode_qparams = list(q_node.args)[1:] + # replace uses of q node with input and remove q node + q_node_input = q_node.args[0] + q_node.replace_all_uses_with(q_node_input) + model.graph.erase_node(q_node) + + is_call_function, is_call_method, is_call_module = is_default_node(ref_node, modules) + if is_call_function: + # pass scale/zer_point arguments from quantize_per_tensor to the default node operator + # insert an op after the zero_point node so that the scale/zero_point + # nodes are is available + qop = get_quantized_operator(ref_node.target) + args = list(ref_node.args) + kwargs = dict(ref_node.kwargs) + if qop in QOP_TO_ARG_NAMES_TO_SKIP: + args_to_skip = QOP_TO_ARG_NAMES_TO_SKIP[qop] + for arg in args_to_skip: + if arg in kwargs: + kwargs.pop(arg) + kwargs["output_scale"] = qnode_qparams[0] + kwargs["output_zero_point"] = qnode_qparams[1] + with model.graph.inserting_after(qnode_qparams[1]): + qop_node = create_node_from_old_node_preserve_meta( + model.graph, + ("call_function", qop, tuple(args), kwargs), + ref_node) + ref_node.replace_all_uses_with(qop_node) + model.graph.erase_node(ref_node) + else: + # remove scale/zero_point node for quantize node + for n in qnode_qparams: + if isinstance(n, Node): + model.graph.erase_node(n) + + return model + +def _lower_getattr_tensor_metadta_op(model: GraphModule): + """ Modified the graph of the model inplace, to skip extra dequantize op before + the general tensor shape ops when possible + """ + for n in model.graph.nodes: + if is_getattr_tensor_metadata_node(n): + maybe_dq = n.args[0] + if maybe_dq.op != "call_method" or maybe_dq.target != "dequantize": + continue + # skip the dequantize node + args = list(n.args) + args[0] = n.args[0].args[0] + n.args = tuple(args) + +def _lower_get_tensor_info_op(model: GraphModule): + """ Modified the graph of the model inplace, to skip extra dequantize op before + the general tensor shape ops when possible + """ + for n in model.graph.nodes: + if not is_get_tensor_info_node(n): + continue + maybe_dq = n.args[0] + if maybe_dq.op != "call_method" or maybe_dq.target != "dequantize": + continue + # skip the dequantize node + args = list(n.args) + args[0] = n.args[0].args[0] + n.args = tuple(args) + +def _lower_to_native_backend( + model: GraphModule, + qconfig_map: Dict[str, QConfigAny], + node_name_to_scope: Dict[str, Tuple[str, type]] +) -> GraphModule: + """ Lower a quantized reference model (with reference quantized operator patterns) + to the native backend in PyTorch (fbgemm/qnnpack), both backends shares the same + operator signature so they can be lowered with the same function + """ + _lower_static_weighted_ref_module(model, qconfig_map) + _lower_static_weighted_ref_module_with_two_inputs(model, qconfig_map) + _lower_dynamic_weighted_ref_module(model) + _lower_weight_only_weighted_ref_module(model) + _lower_static_weighted_ref_functional(model, qconfig_map) + _lower_dynamic_weighted_ref_functional(model, qconfig_map) + _lower_quantized_binary_op(model, qconfig_map) + _lower_getattr_tensor_metadta_op(model) + _lower_get_tensor_info_op(model) + special_pattern_replacement(model) + model.graph.eliminate_dead_code() + model = fold_weight(model, node_name_to_scope) + model.graph.eliminate_dead_code() + model.recompile() + model.graph.lint() + return model diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__init__.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/__init__.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..daed6620a750e336dac0f6459282d103ef20ff94 Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/detector.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/detector.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..425d523a285dc36ed964649d51495651c7aaa446 Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/detector.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/model_report.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/model_report.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..562d12ea1753a4cc3da87c534bb1ada81bb70645 Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/model_report.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/model_report_observer.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/model_report_observer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ae11d94911c39b98c78db7a5f9da9c5ad98665e Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/model_report_observer.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/model_report_visualizer.cpython-310.pyc b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/model_report_visualizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0ceb2b80c86769fabc3c82fcc5d0b2a8f9c6717 Binary files /dev/null and b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/model_report_visualizer.cpython-310.pyc differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/detector.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/detector.py new file mode 100644 index 0000000000000000000000000000000000000000..bbca4609a2c669d3c3a7eddec970e7f25aa4d5a8 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/detector.py @@ -0,0 +1,1540 @@ +from typing import Any, Dict, Set, Tuple, Callable, List + +import torch +import torch.nn as nn +import torch.ao.nn.qat as nnqat +from abc import ABC, abstractmethod +from torch.ao.quantization.fake_quantize import FakeQuantize +from torch.ao.quantization.fx.graph_module import GraphModule +from torch.ao.quantization.fx._model_report.model_report_observer import ModelReportObserver +from torch.ao.quantization.qconfig import ( + QConfig, + default_qconfig, + _assert_valid_qconfig, +) +from torch.ao.quantization.observer import ( + ObserverBase, + default_dynamic_quant_observer, + default_per_channel_weight_observer, + default_observer, + default_weight_observer, +) +from torch.ao.quantization.fx._equalize import ( + default_equalization_qconfig, + EqualizationQConfig, +) +from torch.ao.quantization.observer import _is_activation_post_process + +# Names for observer insert keys +DETECTOR_TARGET_NODE_KEY = "target_node" +DETECTOR_OBS_TO_INSERT_KEY = "observer_to_insert" +DETECTOR_IS_POST_OBS_KEY = "is_post_observer" +DETECTOR_OBS_ARGS_KEY = "observer_args" + +# Mapping related code +class DetectorQConfigInfo(): + r""" + This class contains the QConfig information for a single module. + The list of variables / values this contains can grow depending on the + extensibility of the qconfig mapping feature set but this currently includes: + - if activation observer is dynamic + - if weight observer is per channel + + + Args: + module_fqn (str): The fully qualified name (fqn) of the module that this + information contains info relevant to qconfig for + """ + + def __init__(self, module_fqn: str): + super().__init__() + self.module_fqn = module_fqn + + # populate this section with all the variables we might find important + # change from none if your detector is actually using this + self.is_activation_dynamic = False + self.is_weight_per_channel = False + + # equalization related options + self.is_equalization_recommended = False + + def generate_quantization_qconfig(self, module: torch.nn.Module) -> QConfig: + r""" + Args: + module (torch.nn.Module) The module we are generating + the qconfig for + + Returns the generated quantization QConfig according to what a valid configuration is + """ + # Apply suggestions to new qconfig + module_qconfig = default_qconfig + + # keep track of dynamic and per_channel recommendations + recommendations_list = [] + # append as if a list of combinations + recommendations_list.append((self.is_activation_dynamic, self.is_weight_per_channel)) + recommendations_list.append((self.is_activation_dynamic, False)) # only trying dynamic rec + recommendations_list.append((False, self.is_weight_per_channel)) # only trying dynamic + + # now we try each of the combinations + for rec in recommendations_list: + # rec[0] -> dynamic recommended + # rec[1] -> per channel recommended + activation = default_dynamic_quant_observer if rec[0] else default_observer + weight = default_per_channel_weight_observer if rec[1] else default_weight_observer + test_config = QConfig(activation, weight) + try: + _assert_valid_qconfig(test_config, module) + module_qconfig = test_config + break + except AssertionError: + # if not a valid configuration, we move on to the next one in priority + continue + + # return the QConfig chosen + return module_qconfig + + def generate_equalization_qconfig(self) -> EqualizationQConfig: + r""" + This returns the equalization configuration for a module. + + For now, it just returns the default, but as more equalization options become + possible, this method can get more fleshed out with more nuanced granularity. + + + Returns the generated equalization QConfig according to what a valid configuration is + """ + # in this case, we just return default equalization config + # we know this is valid because only valid modules would even + # have this option + return default_equalization_qconfig + +# Adding base class for detectors +class DetectorBase(ABC): + r""" Base Detector Module + Any detector class should derive from this class. + + Concrete detectors should follow the same general API, which includes: + - A method to calculate and return observer insertion points + - Should return both the fqns and the Observer class to insert + - A method to return a report based on the the detector + - Should return a str-based report and dict info in Tuple[str,Dict] format + """ + + def __init__(self): + super().__init__() + self.detector_config_info = None + + @abstractmethod + def determine_observer_insert_points(self, model) -> Dict: + r""" + Args + model (nn.Module or subclass): model to find observer insertion points + + Returns a Dict mapping from unique observer fqns (where we want to insert them) to a Dict. + This dict maps string keys to detector specific information + """ + pass + + @abstractmethod + def get_detector_name(self) -> str: + r""" Returns the name of the current detector """ + pass + + + @abstractmethod + def get_qconfig_info(self, model) -> Dict[str, DetectorQConfigInfo]: + r""" Returns the DetectorQConfigInfo for each module_fqn relavent + Args + model (nn.Module or subclass): model to find observer insertion points + + Returns a Dict mapping from unique observer fqns (where we want to insert them) to: + A DetectorQConfigInfo with the information to generate a QConfig for a specific module + """ + pass + + def _get_targeting_node(self, prepared_fx_model: GraphModule, target_fqn: str) -> torch.fx.node.Node: + r""" + Takes in a GraphModule and the target_fqn and finds the node whose target is this fqn. + + If it's not found, it means it is most likely inside a fused layer + We just go one layer up in terms of the fqn we are searching for until we find parent node + If we get to empty string, then we know that it doesn't exist + + The reason for the recursion is that if the model that we are looking for got fused, + we will have module fqn as e.g. x.linear.0 but the graph will only have a node for the fused module, + which would have fqn as x.linear so they will not match. + To handle this, if we don't match, we then take off the last bit of the fqn e.g. x.linear.0 -> x.linear, + or more generally foo.bar.baz -> foo.bar and search again, this will allow us to locate the correct module + even in cases with fusion + + Args: + prepared_fx_model (GraphModule): The prepared Fx GraphModule + target_fqn (str): The fqn of the layer we are trying to target + + Returns the node object we are trying to add observers around + """ + for node in prepared_fx_model.graph.nodes: + # if the node's target is our target, return it + if node.target == target_fqn: + return node + + # getting here means node not found + # if no "." we are already at base and failed + parent_fqn_sep_index = target_fqn.rfind(".") + if parent_fqn_sep_index == -1: + raise ValueError("passed in target_fqn not found in graph's targets.") + else: + # recursively call it with parent fqn + return self._get_targeting_node(prepared_fx_model, target_fqn[:parent_fqn_sep_index]) + + @abstractmethod + def generate_detector_report(self, model) -> Tuple[str, Dict[str, Any]]: + r""" + Args + model (nn.Module or subclass): model to find observer insertion points + + Returns a Tuple of two elements: + Str: string report of the suggested improvements + Dict: contains useful data collected by the observer pertinent to this report + """ + pass + +class PerChannelDetector(DetectorBase): + r""" This class is used to detect if any Linear or Conv layers in a model utilize per_channel quantization. + Only Linear and Conv layers can use per_channel as of now so only these two are currently checked. + + per_channel quantization can lead to major benefits in the form of accuracy. + Therefore, if the backend used by the user supports it, it is recommended to use + + Args: + backend (str, optional): the backend the user wishes to use in production + Default value is current torch.backends.quantized.engine + """ + + # Keys for return dictionary + BACKEND_KEY = "backend" + PER_CHAN_SUPPORTED_KEY = "per_channel_quantization_supported" + PER_CHAN_USED_KEY = "per_channel_quantization_used" + + # Default map for representing supported per channel quantization modules for different backends + DEFAULT_BACKEND_PER_CHANNEL_SUPPORTED_MODULES: Dict[str, Set[Any]] = { + "fbgemm": {nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nnqat.Linear, nnqat.Conv1d, nnqat.Conv2d, nnqat.Conv3d}, + "qnnpack": {nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nnqat.Linear, nnqat.Conv1d, nnqat.Conv2d, nnqat.Conv3d}, + "onednn": {nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nnqat.Linear, nnqat.Conv1d, nnqat.Conv2d, nnqat.Conv3d}, + "x86": {nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nnqat.Linear, nnqat.Conv1d, nnqat.Conv2d, nnqat.Conv3d}, + } + + def __init__(self, backend: str = torch.backends.quantized.engine): + super().__init__() + + # store the backend information + self.backend_chosen = backend + self.supported_modules = set() + if self.backend_chosen in self.DEFAULT_BACKEND_PER_CHANNEL_SUPPORTED_MODULES: + self.supported_modules = self.DEFAULT_BACKEND_PER_CHANNEL_SUPPORTED_MODULES[self.backend_chosen] + else: + raise ValueError("Not configured to work with {}. Try a different default backend".format(self.backend_chosen)) + + def get_detector_name(self) -> str: + r""" returns the string name of this detector""" + return "per_channel_detector" + + def get_qconfig_info(self, model) -> Dict[str, DetectorQConfigInfo]: + r""" Returns the DetectorQConfigInfo for each module_fqn relavent + Args + model (nn.Module or subclass): model to find observer insertion points + + Returns a Dict mapping from unique observer fqns (where we want to insert them) to: + A DetectorQConfigInfo with the information to generate a QConfig for a specific module + """ + # run the helper function to populate the dictionary + per_channel_info = self._detect_per_channel_helper(model) + + # we actually have a qconfig info object we are populating + module_fqn_to_detector_qconfig_info = {} + + for module_fqn in per_channel_info: + # create a detector info instance + detector_qconfig_info = DetectorQConfigInfo(module_fqn) + + # see if per channel quantization is supported + per_chan_supported: bool = per_channel_info[module_fqn][self.PER_CHAN_SUPPORTED_KEY] + detector_qconfig_info.is_weight_per_channel = per_chan_supported + module_fqn_to_detector_qconfig_info[module_fqn] = detector_qconfig_info + + return module_fqn_to_detector_qconfig_info + + def determine_observer_insert_points(self, model: nn.Module) -> Dict: + r""" + There is no observers inserted for the PerChannelDetector. + + Returns an empty dictionary since no observers are added or needed + """ + return {} + + + def _detect_per_channel_helper(self, model: nn.Module): + r""" + determines if per_channel quantization is supported in modules and submodules. + + Returns a dictionary in the higher level _detect_per_channel function. + Each entry maps the fully-qualified-name to information on whether per_channel quantization. + + Args: + model: The current module that is being checked to see if it is per_channel quantizable + + Returns dictionary mapping fqns to if per_channel quantization is possible + """ + # create dict we will return + per_channel_info: Dict = {} + + # get the fully qualified name and check if in list of modules to include and list of modules to ignore + for fqn, module in model.named_modules(): + + is_in_include_list = sum(list(map(lambda x: isinstance(module, x), self.supported_modules))) > 0 + + # check if the module per_channel is supported + # based on backend + per_channel_supported = False + + if is_in_include_list: + per_channel_supported = True + + # assert statement for MyPy + q_config_file = module.qconfig + assert isinstance(q_config_file, QConfig) + + # this object should either be fake quant or observer + q_or_s_obj = module.qconfig.weight.p.func() + assert isinstance(q_or_s_obj, (FakeQuantize, ObserverBase)) + + per_channel_used = False # will be true if found in qconfig + + if hasattr(q_or_s_obj, "ch_axis"): # then we know that per_channel quantization used + + # all fake quants have channel axis so need to check is_per_channel + if isinstance(q_or_s_obj, FakeQuantize): + if hasattr(q_or_s_obj, "is_per_channel") and q_or_s_obj.is_per_channel: + per_channel_used = True + elif isinstance(q_or_s_obj, ObserverBase): + # should be an observer otherwise + per_channel_used = True + else: + raise ValueError("Should be either observer or fake quant") + + per_channel_info[fqn] = { + self.PER_CHAN_SUPPORTED_KEY: per_channel_supported, + self.PER_CHAN_USED_KEY: per_channel_used, + self.BACKEND_KEY: self.backend_chosen + } + + return per_channel_info + + def generate_detector_report(self, model: nn.Module) -> Tuple[str, Dict[str, Any]]: + r"""Checks if any Linear or Conv layers in the model utilize per_channel quantization. + Only Linear and Conv layers can use per_channel as of now so only these two are currently checked. + + Looks at q_config format and backend to determine if per_channel can be utilized. + Uses the DEFAULT_BACKEND_PER_CHANNEL_SUPPORTED_MODULES structure to determine support + + Args: + model: The prepared and calibrated model we want to check if using per_channel + + Returns a tuple with two elements: + String report of potential actions to improve model (if per_channel quantization is available in backend) + Dictionary mapping per_channel quantizable elements to: + whether per_channel quantization is supported by the backend + if it is being utilized in the current model + """ + + # run the helper function to populate the dictionary + per_channel_info = self._detect_per_channel_helper(model) + + # String to let the user know of further optimizations + further_optims_str = "Further Optimizations for backend {}: \n".format(self.backend_chosen) + + optimizations_possible = False + for fqn in per_channel_info: + fqn_dict = per_channel_info[fqn] + if fqn_dict[self.PER_CHAN_SUPPORTED_KEY] and not fqn_dict[self.PER_CHAN_USED_KEY]: + optimizations_possible = True + further_optims_str += "Module {module_fqn} can be configured to use per_channel quantization.\n".format( + module_fqn=fqn + ) + + if optimizations_possible: + further_optims_str += ( + "To use per_channel quantization, make sure the qconfig has a per_channel weight observer." + ) + else: + further_optims_str += "No further per_channel optimizations possible." + + # return the string and the dictionary form of same information + return (further_optims_str, per_channel_info) + + +class DynamicStaticDetector(DetectorBase): + r""" + Determines whether dynamic or static quantization is more appropriate for a given module. + + Takes advantage of the ModelReportObserver that records range information. + Stationary distribution of data are strictly above tolerance level for the comparison statistic: + + S = average_batch_activation_range/epoch_activation_range + + Nonstationary distributions are below or at the tolerance level for this metric. + + If the distribution of data right after the module is non-stationary, recommend dynamic quantization + Otherwise recommend static quantization + + Args: + tolerance (float, optional): The threshold where S metric is stationary above and non-stationary otherwise. Default: 0.5 + """ + # names for the pre and post observers that are inserted + DEFAULT_PRE_OBSERVER_NAME = "model_report_pre_observer" + DEFAULT_POST_OBSERVER_NAME = "model_report_post_observer" + + # naming conventions for stationary vs non-stationary data + STATIONARY_STR = "stationary" + NON_STATIONARY_STR = "non-stationary" + + # naming for activation + INPUT_ACTIVATION_PREFIX = "input_activation_" + OUTPUT_ACTIVATION_PREFIX = "output_activation_" + + # naming conventions for the keys of the return module info + TOLERANCE_KEY = "dynamic_static_tolerance" + DEFAULT_DYNAMIC_REC_KEY = "dynamic_recommended" + PRE_OBS_COMP_STAT_KEY = INPUT_ACTIVATION_PREFIX + "dynamic_static_comp_stat" + POST_OBS_COMP_STAT_KEY = OUTPUT_ACTIVATION_PREFIX + "dynamic_static_comp_stat" + PRE_OBS_DATA_DIST_KEY = INPUT_ACTIVATION_PREFIX + "dynamic_static_data_classification" + POST_OBS_DATA_DIST_KEY = OUTPUT_ACTIVATION_PREFIX + "dynamic_static_data_classification" + IS_CURRENTLY_SUPPORTED_KEY = "is_dynamic_supported" + + # modules that are supported both dynamic and static for this report function + DEFAULT_DYNAMIC_STATIC_CHECK_SUPPORTED = {nn.Linear} + + # modules that will be supported soon for both + DEFAULT_DYNAMIC_STATIC_FUTURE_SUPPORTED = {nn.Conv1d, nn.Conv2d, nn.Conv3d} + + def __init__(self, tolerance=0.5): + super().__init__() + + # set tolerance level and initialize a set to keep track of useful fqn locations + self.tolerance = tolerance + self.useful_observer_fqns: Set[str] = set() + + def determine_observer_insert_points(self, prepared_fx_model: GraphModule) -> Dict[str, Dict[str, Any]]: + r""" + Determines where observers need to be inserted for the Dynamic vs Static detector. + For this detector, we want to place observers on either side of linear layers in the model. + + Currently inserts observers for: + linear layers + + Args: + prepared_fx_model (GraphModule): The prepared Fx GraphModule + + Returns a Dict mapping from unique observer fqns (where we want to insert them) to a Dict with: + key "target_node" -> the node we are trying to observe with this observer (torch.fx.node.Node) + key "observer_to_insert" -> the observer we wish to insert (ObserverBase) + key "is_post_observer" -> True if this is meant to be a post-observer for target_node, False if pre-observer + key "observer_args" -> The arguments that are meant to be passed into the observer + """ + + # observer for this detector is ModelReportObserver + obs_ctr = ModelReportObserver + + # return dict + obs_fqn_to_info: Dict[str, Dict[str, Any]] = {} + + for fqn, module in prepared_fx_model.named_modules(): + # make sure module is supported + if self._is_supported(module, insert=True): + # if it's a supported type, we want to get node and add observer insert locations + targeted_node = self._get_targeting_node(prepared_fx_model, fqn) + + # add entry for pre-observer + pre_obs_fqn = fqn + "." + self.DEFAULT_PRE_OBSERVER_NAME + + obs_fqn_to_info[pre_obs_fqn] = { + DETECTOR_TARGET_NODE_KEY: targeted_node, + DETECTOR_OBS_TO_INSERT_KEY: obs_ctr(), + DETECTOR_IS_POST_OBS_KEY: False, + DETECTOR_OBS_ARGS_KEY: targeted_node.args + } + + # add entry for post-observer + post_obs_fqn = fqn + "." + self.DEFAULT_POST_OBSERVER_NAME + + obs_fqn_to_info[post_obs_fqn] = { + DETECTOR_TARGET_NODE_KEY: targeted_node, + DETECTOR_OBS_TO_INSERT_KEY: obs_ctr(), + DETECTOR_IS_POST_OBS_KEY: True, + DETECTOR_OBS_ARGS_KEY: (targeted_node,) + } + + return obs_fqn_to_info + + def get_detector_name(self) -> str: + r""" returns the string name of this detector""" + return "dynamic_vs_static_detector" + + + def get_qconfig_info(self, model) -> Dict[str, DetectorQConfigInfo]: + r""" Returns the DetectorQConfigInfo for each module_fqn relavent + Args + model (nn.Module or subclass): model to find observer insertion points + + Returns a Dict mapping from unique observer fqns (where we want to insert them) to: + A DetectorQConfigInfo with the information to generate a QConfig for a specific module + """ + # run the helper function to populate the dictionary + dynamic_static_info = self._generate_dict_info(model) + + # we actually have a qconfig info object we are populating + module_fqn_to_detector_qconfig_info = {} + + for module_fqn in dynamic_static_info: + # create a detector info instance + detector_qconfig_info = DetectorQConfigInfo(module_fqn) + + # see if per channel quantization is supported + dynamic_static_recommended: bool = dynamic_static_info[module_fqn][self.DEFAULT_DYNAMIC_REC_KEY] + detector_qconfig_info.is_activation_dynamic = dynamic_static_recommended + module_fqn_to_detector_qconfig_info[module_fqn] = detector_qconfig_info + + return module_fqn_to_detector_qconfig_info + + def _is_supported(self, module: nn.Module, insert: bool = False) -> bool: + r"""Returns whether the given module is supported for observers + + Args + module: The module to check and ensure is supported + insert: True if this is check for observer insertion, false if for report gen + + Returns True if the module is supported by observer, False otherwise + """ + # check to see if module is of a supported type + is_supported_type = sum(list(map(lambda x: isinstance(module, x), self.DEFAULT_DYNAMIC_STATIC_CHECK_SUPPORTED))) > 0 + + # check if it will be supported + future_supported_type = sum(list(map(lambda x: isinstance(module, x), self.DEFAULT_DYNAMIC_STATIC_FUTURE_SUPPORTED))) > 0 + + # supported + supported = is_supported_type or future_supported_type + + # this is check for observer insertion + if insert: + return supported + else: + # this is for report gen and we also need to check if it contains observers + has_obs = hasattr(module, self.DEFAULT_PRE_OBSERVER_NAME) and hasattr(module, self.DEFAULT_POST_OBSERVER_NAME) + return supported and has_obs + + def _generate_dict_info(self, model: GraphModule) -> Dict[str, Any]: + r""" + Helper function for generate_detector_report that does the generation of the dictionary. + This process is done as specified in generate_detector_report documentation + + Args: + model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers + + Returns a Dictionary mapping modules with ModelReportObservers around them to: + whether dynamic quantization is recommended + their S metric of input to module + whether input to module is stationary or non-stationary + their S metric of output of module + whether output of module is stationary or non-stationary + the tolerance level to decided whether input/output is stationary or non-stationary + whether it is currently supported or planned for the future + """ + # store modules dynamic vs static information + module_dynamic_static_info = {} + + # This for loop goes through the modules, and extracts all relavent information into module_dynamic_static_info + # This information primary includes whether the data distributions around a supported module is stationary or not + # Based on this, it is recorded whether dynamic or static quantization is recommended + + # loop through all submodules included nested ones + for fqn, module in model.named_modules(): + # if module is Linear has the ModelReportObserver attached to it + if self._is_supported(module): + # get pre and post observers for the module + pre_obs = getattr(module, self.DEFAULT_PRE_OBSERVER_NAME) + post_obs = getattr(module, self.DEFAULT_POST_OBSERVER_NAME) + + # get the statistics for each module + pre_stat = pre_obs.get_batch_to_epoch_ratio() + post_stat = post_obs.get_batch_to_epoch_ratio() + + # record module, pre and post stat, and whether to do dynamic or static based off it + # true if post observer data distribution is non-stationary, false if it's stationary + dynamic_recommended = post_stat <= self.tolerance + + # specify the classifications for whether data distributions considered stationary or non-stationary + pre_obs_dist_classif = self.STATIONARY_STR if pre_stat > self.tolerance else self.NON_STATIONARY_STR + post_obs_dist_classif = self.STATIONARY_STR if post_stat > self.tolerance else self.NON_STATIONARY_STR + + # check if current support or future support + is_supported_type = sum(list(map(lambda x: isinstance(module, x), self.DEFAULT_DYNAMIC_STATIC_CHECK_SUPPORTED))) > 0 + + # store the set of important information for this module + module_info = { + self.TOLERANCE_KEY: self.tolerance, + self.DEFAULT_DYNAMIC_REC_KEY: dynamic_recommended, + self.PRE_OBS_COMP_STAT_KEY: pre_stat, + self.PRE_OBS_DATA_DIST_KEY: pre_obs_dist_classif, + self.POST_OBS_COMP_STAT_KEY: post_stat, + self.POST_OBS_DATA_DIST_KEY: post_obs_dist_classif, + self.IS_CURRENTLY_SUPPORTED_KEY: is_supported_type, + } + + module_dynamic_static_info[fqn] = module_info + + return module_dynamic_static_info + + def generate_detector_report(self, model: GraphModule) -> Tuple[str, Dict[str, Any]]: + r""" + Determines whether dynamic or static quantization is more appropriate for a given module. + + Takes advantage of the ModelReportObserver that records range information. + Stationary distribution of data are strictly above tolerance level for the comparison statistic: + + S = average_batch_activation_range/epoch_activation_range + + Nonstationary distributions are below or at the tolerance level for this metric. + + If the distribution of data right after the module is non-stationary, recommend dynamic quantization + Otherwise recommend static quantization + + This will then generate suggestions for dynamic vs static quantization focused around Linear. + + Args: + model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers + + Returns a tuple with two elements: + String report of of whether dynamic or static quantization is recommended for certain modules + Dictionary mapping modules with ModelReportObservers around them to: + whether dynamic quantization is recommended + their S metric of input to module + whether input to module is stationary or non-stationary + their S metric of output of module + whether output of module is stationary or non-stationary + the tolerance level to decided whether input/output is stationary or non-stationary + whether it is currently supported or planned for the future + """ + + # get the dictionary of the information to format the string report + module_dynamic_static_info = self._generate_dict_info(model) + + dynamic_vs_static_string = "Dynamic vs. Static Quantization suggestions: \n" + + modules_added: bool = False # check to make sure at least 1 module added. + + dynamic_benefit = " You will get more accurate results if you use dynamic quantization" + static_benefit = " You can increase model efficiency if you use static quantization" + future_support_str = ". This layer is not yet supported for dynamic quantization" + # This for loop goes through the information collected in module_dynamic_static_info and: + # Populates the string based report with the information from module_dynamic_static_info + # Compiles the complete report by appending relavent formatted strings + + for module_fqn in module_dynamic_static_info.keys(): + + # there is at least 1 module for suggestion + modules_added = True + module_info = module_dynamic_static_info[module_fqn] + suggestion_string_template = "For module {} it is suggested to use {} quantization because {}.\n" + + # decide what string formatting values will be + quantization_type = "" + quantization_reasoning = "the distribution of data before {} is {} and the distribution after is {}." + + benefit_str = "" + + # strings for if dynamic quantized per tensor is needed + recommend_per_tensor = ". We recommend to add a {} before this module if it is static." + rec_lay_to_add = "dynamic quantize per tensor layer" + dynamic_per_tensor_string = recommend_per_tensor.format(rec_lay_to_add) + dynamic_per_tensor_reasoning_string = ( + " This is because the input to this module has a non-stationary distribution" + ) + + # start composing explanation + if module_info[self.DEFAULT_DYNAMIC_REC_KEY]: + quantization_type = "dynamic" + # check if currently supported or future supported + benefit_str = dynamic_benefit + if not module_info[self.IS_CURRENTLY_SUPPORTED_KEY]: + benefit_str += future_support_str + else: + quantization_type = "static" + benefit_str = static_benefit + + # now set the quantization explanation string + quantization_reasoning = ( + quantization_reasoning.format( + module_fqn, module_info[self.PRE_OBS_DATA_DIST_KEY], module_info[self.POST_OBS_DATA_DIST_KEY] + ) + + benefit_str + ) + + # if we have a non-stationary input -> linear -> stationary we suggested static + # however, we want to also recommend they add a dynamic quantize per tensor right if this change is made + if ( + module_info[self.PRE_OBS_DATA_DIST_KEY] == self.NON_STATIONARY_STR + and module_info[self.POST_OBS_DATA_DIST_KEY] == self.STATIONARY_STR + ): + quantization_reasoning = ( + quantization_reasoning + dynamic_per_tensor_string + dynamic_per_tensor_reasoning_string + ) + + # format the overall suggestion string with the specific inputs + module_suggestion_string = suggestion_string_template.format( + module_fqn, quantization_type, quantization_reasoning + ) + + # append to overall suggestion + dynamic_vs_static_string += module_suggestion_string + + if not modules_added: + dynamic_vs_static_string += "No applicable layers for suggestions. Only linear and conv are valid.\n" + + # return the string as well as the dictionary of information + return (dynamic_vs_static_string, module_dynamic_static_info) + + +class InputWeightEqualizationDetector(DetectorBase): + r""" + Determines whether input-weight equalization can help improve quantization for certain modules. + + Specifically, this list of modules includes: + linear + conv + + Determines whether input-weight equalization is recommended based on the comp stat: + s_c = sqrt(w_c/W)/sqrt(i_c/I) + where: + w_c is range of weight for channel c, W is range of weight over all channels + i_c is range of input for channel c, I is range of input over all channels + + if s_c >= threshold or <= 1 / threshold, recommends input-weight equalization + + Args: + ratio_threshold (float): The threshold for s_c to determine if input-weight equalization is sugggested + Should be between 0 and 1 (both non-inclusive) + ch_axis (int, optional): The channel axis being observed to determine input weight equalization + Default: 1 + + * :attr:`ratio_threshold`: The threshold for s_c to determine if input-weight equalization is sugggested + Should be between 0 and 1 + + * :attr:`ch_axis`: The channel axis being observed to determine input weight equalization + + * :attr:`SUPPORTED_MODULES`: This specifies the modules that are supported for input-weight equalization + + * :attr:`DEFAULT_PRE_OBSERVER_NAME`: The name of the pre-observer to be inserted for this detector + """ + + SUPPORTED_MODULES: Set[Callable] = {nn.Linear, + nn.Conv1d, + nn.Conv2d, + nn.Conv3d, + nnqat.Linear, + nnqat.Conv1d, + nnqat.Conv2d, + nnqat.Conv3d} + + # names for the pre and post observers that are inserted + DEFAULT_PRE_OBSERVER_NAME: str = "model_report_pre_observer" + + # weight / activation prefix for each of the below info + WEIGHT_PREFIX = "weight_" + ACTIVATION_PREFIX = "input_activation_" + + # string names for keys of info dictionaries + PER_CHANNEL_MAX_KEY = "per_channel_max" + PER_CHANNEL_MIN_KEY = "per_channel_min" + GLOBAL_MAX_KEY = "global_max" + GLOBAL_MIN_KEY = "global_min" + + # keys for return dict of recommendations + RECOMMENDED_KEY = "input_weight_equalization_recommended" + COMP_METRIC_KEY = "input_weight_channel_comparison_metrics" + THRESHOLD_KEY = "input_weight_threshold" + CHANNEL_KEY = "input_weight_channel_axis" + + # default weight and info strings + WEIGHT_STR = "weight" + INPUT_STR = "input" + + # default for what ratio we recommend input weight + DEFAULT_RECOMMEND_INPUT_WEIGHT_CHANNEL_RATIO = 0.4 + + def __init__(self, ratio_threshold: float, ch_axis: int = 1): + # ensure passed in inputs are valid + if ratio_threshold <= 0 or ratio_threshold >= 1: + raise ValueError("Make sure threshold is > 0 and < 1") + + # intialize attributes based on args + self.ratio_threshold: float = ratio_threshold + self.ch_axis: int = ch_axis + + def _is_supported(self, module: nn.Module, insert: bool = False) -> bool: + r"""Returns whether the given module is supported for observers + + Args + module: The module to check and ensure is supported + insert: True if this is check for observer insertion, false if for report gen + + Returns True if the module is supported by observer, False otherwise + """ + # check to see if module is of a supported type + is_supported_type = sum(list(map(lambda x: type(module) is x, self.SUPPORTED_MODULES))) > 0 + + # this is check for observer insertion + if insert: + return is_supported_type + else: + # this is for report gen and we also need to check if it contains observers + has_obs = hasattr(module, self.DEFAULT_PRE_OBSERVER_NAME) + return is_supported_type and has_obs + + def get_qconfig_info(self, model) -> Dict[str, DetectorQConfigInfo]: + r""" Returns the DetectorQConfigInfo for each module_fqn relavent + Args + model (nn.Module or subclass): model to find observer insertion points + + Returns a Dict mapping from unique observer fqns (where we want to insert them) to: + A DetectorQConfigInfo with the information to generate a QConfig for a specific module + """ + # run the helper function to populate the dictionary + # find the range of inputs + input_values: Dict[str, Dict] = self._extract_input_info(model) + + # find the range of weights + weight_values: Dict[str, Dict] = self._extract_weight_info(model) + + # calculate per_channel comparison statistic s_c + comp_stats: Dict[str, torch.Tensor] = self._generate_comparison_values(input_values, weight_values) + + # generate the return dictionary + input_weight_equalization_info: Dict[str, Dict] = self._generate_dict_info(input_values, weight_values, comp_stats) + + # we actually have a qconfig info object we are populating + module_fqn_to_detector_qconfig_info = {} + + for module_fqn in input_weight_equalization_info: + # create a detector info instance + detector_qconfig_info = DetectorQConfigInfo(module_fqn) + + # see if per channel quantization is supported + input_weight_recommended: bool = input_weight_equalization_info[module_fqn][self.RECOMMENDED_KEY] + detector_qconfig_info.is_equalization_recommended = input_weight_recommended + module_fqn_to_detector_qconfig_info[module_fqn] = detector_qconfig_info + + return module_fqn_to_detector_qconfig_info + + def determine_observer_insert_points(self, prepared_fx_model: GraphModule) -> Dict[str, Dict[str, Any]]: + r"""Determines where observers need to be inserted for the Input Weight Equalization Detector. + For this detector, we want to place observers in front of supported layers. + + Currently inserts observers for: + linear layers + conv layers + + Args: + prepared_fx_model (GraphModule): The prepared Fx GraphModule + + Returns a Dict mapping from unique observer fqns (where we want to insert them) to a Dict with: + key "target_node" -> the node we are trying to observe with this observer (torch.fx.node.Node) + key "observer_to_insert" -> the observer we wish to insert (ObserverBase) + key "is_post_observer" -> True if this is meant to be a post-observer for target_node, False if pre-observer + key "observer_args" -> The arguments that are meant to be passed into the observer + """ + + # observer for this detector is ModelReportObserver + obs_ctr = ModelReportObserver + + # return dict + obs_fqn_to_info: Dict[str, Dict[str, Any]] = {} + + for fqn, module in prepared_fx_model.named_modules(): + # check to see if module is of a supported type + if self._is_supported(module, insert=True): + # if it's a supported type, we want to get node and add observer insert locations + targeted_node = self._get_targeting_node(prepared_fx_model, fqn) + + # add entry for pre-observer + pre_obs_fqn = fqn + "." + self.DEFAULT_PRE_OBSERVER_NAME + + obs_fqn_to_info[pre_obs_fqn] = { + DETECTOR_TARGET_NODE_KEY: targeted_node, + DETECTOR_OBS_TO_INSERT_KEY: obs_ctr(ch_axis=self.ch_axis), + DETECTOR_IS_POST_OBS_KEY: False, + DETECTOR_OBS_ARGS_KEY: targeted_node.args, + } + + return obs_fqn_to_info + + def get_detector_name(self) -> str: + r"""Returns the name of this detector""" + return "input_weight_equalization_detector" + + def _extract_input_info(self, model: GraphModule) -> Dict[str, Dict]: + r""" + Takes in a calibrated GraphModule and then finds the relevant observers. + It then extracts the input information for each observer returns it + + Args + model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers + + Returns a dict mapping relavent module fqns (str) to a dict with keys: + "input_activation_per_channel_max" : maps to the per_channel max values + "input_activation_per_channel_min" : maps to the per_channel min values + "input_activation_global_max" : maps to the global max recorded + "input_activation_global_min" : maps to the global min recorded + """ + + # return dictionary mapping observer fqns to desired info + input_info: Dict[str, Dict] = {} + + for fqn, module in model.named_modules(): + # if module is supported and it has a pre-observer + if self._is_supported(module): + # get pre observer for the module + pre_obs = getattr(module, self.DEFAULT_PRE_OBSERVER_NAME) + + input_info[fqn] = { + self.ACTIVATION_PREFIX + self.PER_CHANNEL_MAX_KEY: pre_obs.max_val, + self.ACTIVATION_PREFIX + self.PER_CHANNEL_MIN_KEY: pre_obs.min_val, + self.ACTIVATION_PREFIX + self.GLOBAL_MAX_KEY: max(pre_obs.max_val), + self.ACTIVATION_PREFIX + self.GLOBAL_MIN_KEY: min(pre_obs.min_val), + } + + return input_info + + def _extract_weight_info(self, model: GraphModule) -> Dict[str, Dict]: + r""" + Takes in a calibrated GraphModule and then finds the relavent observers. + It then extracts the weight information for each layer an observer is attached to. + + Args + model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers + + Returns a dict mapping module fqns (str) to a dict with keys: + "per_channel_max" : maps to the per_channel max values + "per_channel_min" : maps to the per_channel min values + "global_max" : maps to the global max recorded + "global_min" : maps to the global min recorded + """ + # return dictionary mapping observer fqns to desired info + weight_info: Dict[str, Dict] = {} + + for fqn, module in model.named_modules(): + # if module is supported and it has a pre-observer + if self._is_supported(module): + # we don't need actual observer, just the module weights + # calculate min and max vals + min_val: torch.Tensor = torch.tensor([float('inf')]) + max_val: torch.Tensor = torch.tensor([float('-inf')]) + x_copy = module.weight + x_dim = x_copy.size() + + new_axis_list = [i for i in range(len(x_dim))] # noqa: C416 + new_axis_list[self.ch_axis] = 0 + new_axis_list[0] = self.ch_axis + y = x_copy.permute(new_axis_list) + + # Need to match dtype of min/max because the updates to buffers + # are done in place and types need to match for comparisons + y = y.to(min_val.dtype) + y = torch.flatten(y, start_dim=1) + if min_val.numel() == 0 or max_val.numel() == 0: + min_val, max_val = torch.aminmax(y, dim=1) + else: + min_val_cur, max_val_cur = torch.aminmax(y, dim=1) + min_val = torch.min(min_val_cur, min_val) + max_val = torch.max(max_val_cur, max_val) + + weight_info[fqn] = { + self.WEIGHT_PREFIX + self.PER_CHANNEL_MAX_KEY: max_val, + self.WEIGHT_PREFIX + self.PER_CHANNEL_MIN_KEY: min_val, + self.WEIGHT_PREFIX + self.GLOBAL_MAX_KEY: max(max_val), + self.WEIGHT_PREFIX + self.GLOBAL_MIN_KEY: min(min_val), + } + + return weight_info + + def _calculate_range_ratio(self, info_dict: Dict, info_str: str, module_fqn: str) -> torch.Tensor: + r""" + Takes in an info dict and calculates the s_c matrix. + + Args: + info_dict (dict): A dictionary of either input or weight range info + info_str (str): A str describing whether currently looking at weight or input info + Either "weight" or "input" + module_fqn (str): The fqn of the module we are looking at + + Returns a tensor of values, where each value is the s_c stat for a different channel + """ + # calculate the ratios of the info + # get the prefix str + prefix_str = self.ACTIVATION_PREFIX if info_str == self.INPUT_STR else self.WEIGHT_PREFIX + + per_channel_range = info_dict[prefix_str + self.PER_CHANNEL_MAX_KEY] - info_dict[prefix_str + self.PER_CHANNEL_MIN_KEY] + global_range = info_dict[prefix_str + self.GLOBAL_MAX_KEY] - info_dict[prefix_str + self.GLOBAL_MIN_KEY] + + if global_range == 0: + range_zero_explanation = "We recommend removing this channel as it doesn't provide any useful information." + raise ValueError( + "The range of the {} data for module {} is 0, which means you have a constant value channel. {}".format( + info_str, module_fqn, range_zero_explanation + ) + ) + + ratio = per_channel_range / global_range + + return ratio + + def _generate_comparison_values(self, input_info: Dict, weight_info: Dict) -> Dict[str, torch.Tensor]: + r""" + Takes in the information on the min and max values of the inputs and weights and: + Calculates the comp stat for each channel: s_c = sqrt(w_c/W)/sqrt(i_c/I) + + Args: + input_info (dict): A dict mapping each observer to input range information + weight_info (dict): A dict mapping each observer to weight range information + + Returns a dict mapping relavent observer fqns (str) to a 1-D tensor. + Each value is a different s_c value for a different channel + """ + # create return dictionary for each observer + module_fqn_to_channel: Dict[str, torch.Tensor] = {} + + # for each module (both passed in dicts should have same keys) + for module_fqn in input_info: + + # raise error if not in weight info + if module_fqn not in weight_info: + raise KeyError("Unable to find weight range stats for module {}".format(module_fqn)) + + # calculate the ratios of the weight info and input info + weight_ratio = self._calculate_range_ratio(weight_info[module_fqn], self.WEIGHT_STR, module_fqn) + input_ratio = self._calculate_range_ratio(input_info[module_fqn], self.INPUT_STR, module_fqn) + + # if mismatched size, because of grouping, we want to replicate weight enough times + weight_channels = len(weight_ratio) + input_channels = len(input_ratio) + if weight_channels != input_channels: + # we try to replicate + assert input_channels % weight_channels == 0, "input channels should be divisible by weight channels." + # get replication factor + rep_factor: int = input_channels // weight_channels + + # weight ratio is (n,), input ratio is (k,), we just repeat weight ratio k // n + weight_ratio = weight_ratio.repeat(rep_factor) + + # calculate the s metric per channel + s = torch.sqrt(weight_ratio) / torch.sqrt(input_ratio) + module_fqn_to_channel[module_fqn] = s + + # return compiled observer ratios + return module_fqn_to_channel + + def _generate_dict_info(self, input_info: Dict, weight_info: Dict, comp_stats: Dict) -> Dict[str, Dict]: + r""" + Helper function for generate_detector_report that does the generation of the dictionary. + This process is done as specified in generate_detector_report documentation + + Args: + input_info (dict): A dict mapping each module to input range information + weight_info (dict): A dict mapping each module to weight range information + comp_stats (dict): A dict mapping each module to its corresponding comp stat + + Returns a dictionary mapping each module with relavent ModelReportObservers around them to: + whether input weight equalization is recommended + their s_c metric compared to the threshold + the threshold used to make the recommendation + the channel used for recording data + the input channel range info + the weight channel range info + """ + # store modules input weight equalization info + input_weight_equalization_info: Dict[str, Dict] = {} + + # for each module we add separate set of suggestions + for module_fqn in input_info: + + # get relavent info for this module + mod_input_info: Dict = input_info[module_fqn] + mod_weight_info: Dict = weight_info[module_fqn] + mod_comp_stat: Dict = comp_stats[module_fqn] + + # decide if each channel should have input weight equalization or not + channel_rec_vals: list = [] + + for val in mod_comp_stat: + float_rep: float = val.item() + + # decide if recommending input weight equalization + recommended: bool = float_rep >= self.ratio_threshold and float_rep <= 1 / self.ratio_threshold + channel_rec_vals.append(recommended) + + # build the return dict input + # also unpack input and weight dicts into it + input_weight_equalization_info[module_fqn] = { + self.RECOMMENDED_KEY: channel_rec_vals, + self.COMP_METRIC_KEY: mod_comp_stat, + self.THRESHOLD_KEY: self.ratio_threshold, + self.CHANNEL_KEY: self.ch_axis, + **mod_input_info, + **mod_weight_info, + } + + # return our compiled info for each module + return input_weight_equalization_info + + def generate_detector_report(self, model: GraphModule) -> Tuple[str, Dict[str, Any]]: + r""" + Determines whether input weight equalization is appropriate for a given module. + + Takes advantage of the ModelReport Observer which records per channel information of input range + It then uses the passed in weight info inconjunction to compute the desired ratio + Finally, it gives suggestions based on this information for each module of interest + + Args: + model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers + + Returns a tuple with two elements: + String report of of whether input weight equalization is recommended for certain modules + Dictionary mapping modules of interest to: + whether input weight equalization is recommended + their s_c metric compared to the threshold + the threshold used to make the recommendation + the channel used for recording data + the input channel range info + the weight channel range info + """ + + # find the range of inputs + input_values: Dict[str, Dict] = self._extract_input_info(model) + + # find the range of weights + weight_values: Dict[str, Dict] = self._extract_weight_info(model) + + # calculate per_channel comparison statistic s_c + comp_stats: Dict[str, torch.Tensor] = self._generate_comparison_values(input_values, weight_values) + + # generate the return dictionary + input_weight_equalization_info: Dict[str, Dict] = self._generate_dict_info(input_values, weight_values, comp_stats) + + # now we can generate report based on this information + input_weight_string = "Input-Weight Equalization suggestions: \n" + + # some strings to be formatted depending on module we are adding + module_suggestion_str = "For Module {} looked at with axis {}: \n" + channel_suggestion_str = "\tWe suggest {} input weight equalization because {}\n" + use_str = "to use" + no_use_str = "to not use" + input_weight_benefit_str = "{}/{} channels would benefit and we expect significant reduction in quantization error." + input_weight_non_benefit_reasoning = "{}/{} channels benefitting from input-weight equalization being applied." + input_weight_non_benefit_str = "we don't expect much improvement from input-weight equalization based on {}" + + # added module check + added_module: bool = False + + # compile the suggestion string + for module_fqn in input_weight_equalization_info: + # we added at least 1 module + added_module = True + # add the module level description + input_weight_string += module_suggestion_str.format(module_fqn, self.ch_axis) + + mod_info: Dict[str, Any] = input_weight_equalization_info[module_fqn] + + # gather info on how many channels would benefit from input weight and + recommendation_per_channel: torch.Tensor = mod_info[self.RECOMMENDED_KEY] + num_recs = sum(recommendation_per_channel) + + if num_recs / len(recommendation_per_channel) >= self.DEFAULT_RECOMMEND_INPUT_WEIGHT_CHANNEL_RATIO: + input_benefit_formatted = input_weight_benefit_str.format(num_recs, len(recommendation_per_channel)) + channel_str = channel_suggestion_str.format(use_str, input_benefit_formatted) + input_weight_string += channel_str + else: + non_benefit_reason_formatted = input_weight_non_benefit_reasoning.format(num_recs, len(recommendation_per_channel)) + non_benefit_str = input_weight_non_benefit_str.format(non_benefit_reason_formatted) + channel_str = channel_suggestion_str.format(no_use_str, non_benefit_str) + input_weight_string += channel_str + + # if no modules looked at, amend return string + if not added_module: + input_weight_string += "No applicable layers for suggestions. Only linear and conv valid.\n" + + # return a tuple with the string explanation and the compiled dict info + return (input_weight_string, input_weight_equalization_info) + + +class OutlierDetector(DetectorBase): + r""" + Determines whether there are significant outliers in activation data around a certain layer. + + This is ideally used in conjunction with information on stationary vs. non-stationary distribution: + If the data is stationary, and there are significant outliers, then we want to flag them + We want to do this on a per channel basis for detecting outliers + + Determines whether activation data is flagged as outlier based on if data is stationary and: + p_r = avg(100th percentile / "reference_percentile"th percentile) + where: + p_r is average percentile ratio across all batches in the epoch + reference_percentile is a percentile values between 0 and 100 exclusive + + if p_r is above some threshold, then we consider the activations to have significant outliers + + Args: + ratio_threshold (float, optional): The threshold for p_r to determine if there are outliers in activations + Should be >= 1 + Default: 3.5 + reference_percentile (float, optional): The denominator to find the relative scale of the 100th percentile + Should be between 0 and 1 + Default: 0.975 + fraction_batches_used_threshold (float, optional): Threshold of fraction of batches per channel to determine outlier + If fraction is below this, we deem number of samples used to calculate outliers as insignificant and alert user + regardless of whether we detected outliers or not in channel to take a closer look at channel results + Should be between 0 and 1 + Default: 0.95 + ch_axis (int, optional): The channel axis being observed to determine input weight equalization + Default: 1 + + * :attr:`ratio_threshold`: The threshold for p_r to determine if there are outliers in activations + The p_r value (average ratio of 100th percentile/reference_percentile) is compared to ratio_threshold + If it is significantly greater, then we consider it an outlier + This threshold was calculated based on the ratio of the percentiles in a normal distribution + The calculations behind value choice: https://drive.google.com/file/d/1N2wdtXWI-kOH8S7HH4-PYB_NmqzZil4p/view?usp=sharing + + * :attr:`reference_percentile`: The denominator of the top fraction to find the relative scale of the 100th percentile + Should be between 0 and 1 + The calculations behind value choice: https://drive.google.com/file/d/1N2wdtXWI-kOH8S7HH4-PYB_NmqzZil4p/view?usp=sharing + + * :attr:`fraction_batches_used_threshold`: The fraction of batches to determine outliers for each channel should be above this + Some batches may not be used because of 0-based errors, so this is to ensure a good amount of the total batches are used + Should be between 0 and 1 + + * :attr:`ch_axis`: The channel axis being observed to determine outliers + + * :attr:`DEFAULT_PRE_OBSERVER_NAME`: The name of the pre-observer to be inserted for this detector + """ + + # names for the pre observers that are inserted + DEFAULT_PRE_OBSERVER_NAME: str = "model_report_pre_observer" + + # pre activation prefix + INPUT_ACTIVATION_PREFIX = "input_activation_" + + # names for dict keys + OUTLIER_KEY = "outliers_detected" + NUM_BATCHES_KEY = "outlier_detection_batches_used" + IS_SUFFICIENT_BATCHES_KEY = "outlier_detection_is_sufficient_batches" + COMP_METRIC_KEY = "outlier_detection_percentile_ratios" + RATIO_THRES_KEY = "outlier_detection_ratio_threshold" + REF_PERCENTILE_KEY = "outlier_detection_reference_percentile" + CHANNEL_AXIS_KEY = "outlier_detection_channel_axis" + MAX_VALS_KEY = INPUT_ACTIVATION_PREFIX + "per_channel_max" + CONSTANT_COUNTS_KEY = "constant_batch_counts" + + def __init__( + self, + ratio_threshold: float = 3.5, + reference_percentile: float = 0.975, + fraction_batches_used_threshold: float = 0.95, + ch_axis: int = 1, + ): + # initialize the variables of interest + self.ratio_threshold = ratio_threshold + + # make sure passed in percentile is valid + assert reference_percentile >= 0 and reference_percentile <= 1 + assert fraction_batches_used_threshold >= 0 and fraction_batches_used_threshold <= 1 + self.reference_percentile = reference_percentile + self.fraction_batches_used_threshold = fraction_batches_used_threshold + self.ch_axis = ch_axis + + def get_detector_name(self) -> str: + r"""Returns the name of this detector""" + return "outlier_detector" + + def _supports_insertion(self, module: nn.Module) -> bool: + r"""Returns whether the given module is supported for observers insertion + + Any module that doesn't have children and isn't an observer itself is supported + + Args + module: The module to check and ensure is supported + + Returns True if the module is supported by observer, False otherwise + """ + # case for insertion of module + # check if the module has any children and isn't observer + num_children = len(list(module.children())) + return num_children == 0 and not _is_activation_post_process(module) + + def get_qconfig_info(self, model) -> Dict[str, DetectorQConfigInfo]: + r""" Returns the DetectorQConfigInfo for each module_fqn relavent + Args + model (nn.Module or subclass): model to find observer insertion points + + Returns a Dict mapping from unique observer fqns (where we want to insert them) to: + A DetectorQConfigInfo with the information to generate a QConfig for a specific module + """ + # currently doesn't do anything for outlier detector + return {} + + def _supports_report_gen(self, module: nn.Module) -> bool: + r"""Returns whether the given module is supported for report generation + + Any module that has a model report pre-observer is supported + + Args + module: The module to check and ensure is supported + + Returns True if the module is supported by observer, False otherwise + """ + return hasattr(module, self.DEFAULT_PRE_OBSERVER_NAME) + + def determine_observer_insert_points(self, prepared_fx_model: GraphModule) -> Dict[str, Dict[str, Any]]: + r""" Determines where observers need to be inserted for the Outlier Detector. + + For this detector, we want to place observers in front of supported layers. + + Currently inserts observers for: + all layers that do not have children (leaf level layers) + + Args: + prepared_fx_model (GraphModule): The prepared Fx GraphModule + + Returns a Dict mapping from unique observer fqns (where we want to insert them) to a Dict with: + key "target_node" -> the node we are trying to observe with this observer (torch.fx.node.Node) + key "observer_to_insert" -> the observer we wish to insert (ObserverBase) + key "is_post_observer" -> True if this is meant to be a post-observer for target_node, False if pre-observer + key "observer_args" -> The arguments that are meant to be passed into the observer + """ + # observer for this detector is ModelReportObserver + obs_ctr = ModelReportObserver + + # return dict + obs_fqn_to_info: Dict[str, Dict[str, Any]] = {} + + for fqn, module in prepared_fx_model.named_modules(): + # check to see if module is of a supported type + if self._supports_insertion(module): + # if it's a supported type, we want to get node and add observer insert locations + targeted_node = self._get_targeting_node(prepared_fx_model, fqn) + + # add entry for pre-observer + pre_obs_fqn = fqn + "." + self.DEFAULT_PRE_OBSERVER_NAME + + obs_fqn_to_info[pre_obs_fqn] = { + DETECTOR_TARGET_NODE_KEY: targeted_node, + DETECTOR_OBS_TO_INSERT_KEY: obs_ctr(ch_axis=self.ch_axis, comp_percentile=self.reference_percentile), + DETECTOR_IS_POST_OBS_KEY: False, + DETECTOR_OBS_ARGS_KEY: targeted_node.args, + } + + return obs_fqn_to_info + + def _calculate_outlier_info( + self, + percentile_ratios: torch.Tensor, + counted_batches: torch.Tensor, + total_batches: int, + ) -> Dict[str, List[bool]]: + r""" + Gives info on whether the percentile ratios calculated would be considered outliers + Also gives information on whether the collected data is statistically significant to make this claim + + Args: + percentile_ratios (torch.Tensor): The average percentile_ratios per channel calculated by the observer + counted_batches (torch.Tensor): The number of batches used for average calculation per tensor + total_batches (int): The total number of batches that passed through observer in this epoch + + Returns a dictionary mapping: + "outliers_detected" : list of bools per channel that are true if it is considered an outlier + "is_sufficient_batches": if o_r was >= fraction_batches_used_threshold: + where o_r = counted_batches / total_batches + """ + outlier_dict: Dict[str, List[bool]] = {self.OUTLIER_KEY: [], self.IS_SUFFICIENT_BATCHES_KEY: []} + + # get both as flattened lists for easy mapping + ratios_list: List = percentile_ratios.tolist() + num_batches_list: List = counted_batches.tolist() + + # calculate whether channels were statistically significant + significant_size = [ + batch_size / total_batches >= self.fraction_batches_used_threshold for batch_size in num_batches_list + ] + outlier_dict[self.IS_SUFFICIENT_BATCHES_KEY] = significant_size + + # calculate for each channel whether it's an outlier or not based on ratio + outlier_detected = [ratio > self.ratio_threshold for ratio in ratios_list] + outlier_dict[self.OUTLIER_KEY] = outlier_detected + + # return the dictionary with the two lists + return outlier_dict + + def _generate_info_dict(self, model: GraphModule) -> Dict[str, Dict]: + r""" + Helper function for generate_detector_report that does the generation of the dictionary. + This process is done as specified in generate_detector_report documentation + + Args: + model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers + + Returns a dict mapping relavent module fqns to: + whether there were outliers found in activation before + the number of batches used for each channel + whether fraction of applicable batches used is above fraction_batches_used_threshold + their p_r metric compared to the threshold + the threshold used to make the recommendation + the reference_percentile used to make the recommendation + the channel axis used to determine individual channels + the constant batch counts per channel + the per channel max values + """ + # return dictionary mapping observer fqns to desired info + info_dict: Dict[str, Dict] = {} + + for fqn, module in model.named_modules(): + # if module is supported and it has a pre-observer + if self._supports_report_gen(module): + # get pre observer for the module + pre_obs: ModelReportObserver = getattr(module, self.DEFAULT_PRE_OBSERVER_NAME) + + # get the number of batches and calculated ratio thresholds + num_batches: torch.Tensor = pre_obs.percentile_batches_tracked + average_ratios: torch.Tensor = pre_obs.average_percentile_ratio + channel_batch_cnts: torch.Tensor = pre_obs.constant_channels + total_batches: int = pre_obs.num_batches_tracked + + # also get the max values + max_vals: torch.Tensor = pre_obs.max_val + + # we have to specifically modify how we are recording negative ratio for pre-relu layers + for index, ratio_val in enumerate(average_ratios): + # check if we have a negative ratio + # a ratio might be negative if we have a situation where the 100th percentile is + # > 0 while the nth percentile is < 0, in which case this would not be detected + # as an outlier. Since we care more about magnitude, we make it positive. + if ratio_val.item() < 0: + # first make it positive + average_ratios[index] = -ratio_val + + if ratio_val.item() < 1: + # if it's less than 1 we have the flip it as well + average_ratios[index] = 1 / ratio_val + + outlier_calcs = self._calculate_outlier_info(average_ratios, num_batches, total_batches) + + # calculate whether ratios were outliers + info_dict[fqn] = { + self.CHANNEL_AXIS_KEY: self.ch_axis, + self.REF_PERCENTILE_KEY: self.reference_percentile, + self.RATIO_THRES_KEY: self.ratio_threshold, + self.COMP_METRIC_KEY: average_ratios, + self.NUM_BATCHES_KEY: num_batches, + self.OUTLIER_KEY: outlier_calcs[self.OUTLIER_KEY], + self.IS_SUFFICIENT_BATCHES_KEY: outlier_calcs[self.IS_SUFFICIENT_BATCHES_KEY], + self.CONSTANT_COUNTS_KEY: channel_batch_cnts, + self.MAX_VALS_KEY: max_vals + } + + return info_dict + + def generate_detector_report(self, model: GraphModule) -> Tuple[str, Dict[str, Any]]: + r""" + Determines whether input weight equalization is appropriate for a given module. + + Takes advantage of the ModelReport Observer which records the relavent percentile information + + Args: + model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers + + Returns a tuple with two elements: + String report of of whether there are outliers in the activations around certain modules + Dictionary mapping modules of interest to: + whether there were outliers found in activation before + the number of batches used for each channel + whether fraction of applicable batches used is above fraction_batches_used_threshold + their p_r metric compared to the threshold + the threshold used to make the recommendation + the reference_percentile used to make the recommendation + the channel axis used to determine individual channels + the constant batch counts per channel + the per channel max values + """ + # generate the information dictionary of outlier information + info_dict = self._generate_info_dict(model) + + # now we can generate report based on this information + outlier_string = "Outlier detection report: \n" + + # added module check + added_module: bool = False + + # some strings to be formatted depending on module we are adding + module_suggestion_str = "For Module {} looked at with axis {}: \n" + channel_suggestion_str = "\tFor channel {}, we found outliers in the preceding activation data with {}.\n" + channel_max_value_str = "a max value across all batches of {}" + note_string = "Note: outlier detection is only reliable for {}. We recommend {} to ensure the most accurate results." + note_distribution = "stationary distributions" + note_rec = "running the static vs. dynamic detector to ensure activation data before modules above is stationary" + + # suggestion for constant batch check since that can make it no outliers + constant_str = "\tFor channel {}, we found {} constant value batches. {}\n" + constant_suggestion = "We recommend taking a look at the dict and data to see how frequent this occurred and why." + + # compile the suggestion string + for module_fqn in info_dict: + # get module specific info + mod_info: Dict[str, Any] = info_dict[module_fqn] + # check to see if we already added high level model desc + added_model_desc = False + # look at each individual channel and add a suggestion + for index, outlier_detected in enumerate(mod_info[self.OUTLIER_KEY]): + if outlier_detected: + # we found at least 1 outlier + if not added_model_desc: + # add the module level description + outlier_string += module_suggestion_str.format(module_fqn, self.ch_axis) + added_model_desc = True + + # we mark that we found at least one outlier + added_module = True + max_value_found_str = channel_max_value_str.format(mod_info[self.MAX_VALS_KEY][index]) + channel_str = channel_suggestion_str.format(index, max_value_found_str) + outlier_string += channel_str + + # also check if we found constant batch + if mod_info[self.CONSTANT_COUNTS_KEY][index] != 0: + # make sure we add a module level highlight. + if not added_model_desc: + # add the module level description + outlier_string += module_suggestion_str.format(module_fqn, self.ch_axis) + added_model_desc = True + + constant_values_for_channel = mod_info[self.CONSTANT_COUNTS_KEY][index] + formatted_str = constant_str.format(index, constant_values_for_channel, constant_suggestion) + outlier_string += formatted_str + # we also added at least one thing to description + added_module = True + + + # if found outlier, give suggestion, else give default response + if added_module: + # compose the note string + note_composed = note_string.format(note_distribution, note_rec) + outlier_string += note_composed + else: + outlier_string += "There were no outliers found in the activations.\n" + + return (outlier_string, info_dict) diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/model_report.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/model_report.py new file mode 100644 index 0000000000000000000000000000000000000000..8bc2aec1350363bf24f8ed6d4d3cf8c24096773a --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/model_report.py @@ -0,0 +1,606 @@ +from typing import Any, Dict, Set, Tuple, Callable +from collections import OrderedDict +import torch +from torch.ao.quantization.fx._model_report.detector import ( + DetectorBase, + DETECTOR_OBS_ARGS_KEY, + DETECTOR_OBS_TO_INSERT_KEY, + DETECTOR_IS_POST_OBS_KEY, + DETECTOR_TARGET_NODE_KEY, + DetectorQConfigInfo +) +from torch.ao.quantization.fx._model_report.model_report_visualizer import ModelReportVisualizer +from torch.ao.quantization.fx.graph_module import GraphModule +from torch.ao.quantization.observer import ObserverBase +from torch.ao.quantization.qconfig_mapping import QConfigMapping, QConfig +from torch.ao.quantization.fx._equalize import EqualizationQConfig + +class ModelReport: + r""" + The ModelReport class aims to provide users an easy way to diagnose issues that they run into + with their models. The class works with all traceable GraphModules to help diagnose issues, + though the requirements on the type of model more-so depends on the specific report the user + is trying to generate. With respect to the reports, the ModelReport class is intialized with + a set of Detector classes, each of which generate reports on quantization configuration + issues a use might have. + + Currently supports generating reports on: + - Suggestions for per-channel vs. per-tensor quantization (nn.Module) + - Suggestions for dynamic vs static quantization for linear layers (Graph Modules) + - Suggestions for input-weight equalization for linear and conv layers (Graph Modules) + - Suggestions for outlier detection for all layers (Graph Modules) + + The ModelReport class has the primary functionality of inserting observers (primarily the ModelReportObserver) + where needed for each detector to gather the information it needs, and then after callibration, the ModelReport + class compiles the report generated by each Detector class into a single report to return to the user. It also + has the capability to remove all the observers it inserted as well. + + * :attr:`_model` The model we wish to generate the report for. Must be a traceable GraphModule + + * :attr:`_desired_report_detectors` The set of Detectors representing desired reports from the ModelReport class + Make sure that these are all unique types of detectors [do not have more than 1 of the same class] + + * :attr:`_desired_detector_names` The set of detector names of the _desired_report_detectors. + This set is generated by calling the get_detector_name() of each detector + + * :attr:`_detector_name_to_observer_fqns` The mapping from each detector to fqns of observers of interest + The purpose of this is to keep track of what observers were inserted for each detector, so that they + can be removed at the end if desired + + * :attr:`_prepared_flag` A boolean flag that keeps track of whether we have prepared the model or not + This is to ensure we only insert observers once with the ModelReport instance + + * :attr:`_removed_observers` A boolean to track if we have removed observers already + The purpose is to ensure we don't attempt to remove observers twice with the same ModelReport + instance. This also allows the functionality where we can generate the report multiple times + as long as we haven't removed the observers yet. + + Note: + This class was initially designed to work with the Fx Graph Mode workflow in mind. However, + full functionality is available as long as there is a traceable GraphModule that is being used. + One method to get a traceable GraphModule without going through the Fx workflow is to use + the QuantizationTracer class. + + General Flow for Fx workflow: + 1.) Initialize ModelReport object with reports of interest by passing in initialized detector objects and model + 2.) Prepare your model with prepare_fx + 3.) Call model_report.prepare_detailed_calibration to add relavent observers + 4.) Callibrate your model with data + 5.) Call model_report.generate_report on your model to generate report and optionally remove added observers + Optional + 6.) Call model_report.generate_visualizer to get a ModelReportVisualizer instance + 7.) To help in parsing report information and debugging, view report info as a: + - Table + - Histogram + - Line plot + 8.) Call model_report.generate_qconfigs to generate the qconfigs based on the report suggestions + + Example (with QuantizationTracer): + >>> # xdoctest: +SKIP + >>> # get the necessary qconfig + >>> config = PrepareCustomConfig() + >>> skipped_module_names, skipped_module_classes = get_skipped_module_name_and_classes(config, False) + + >>> # initialize our model and get GraphModule + >>> model = SomeModel() + >>> tracer = QuantizationTracer(skipped_module_names, skipped_module_classes) + >>> graph_module = GraphModule(model, tracer.trace(model)) + + >>> # get our set of detectors and ModelReport instance + >>> detector_set = set([DynamicStaticDetector(tolerance=0.5), InputWeightEqualizationDetector(ratio_threshold=0.7)]) + >>> tracer_reporter = ModelReport(graph_module, tracer_detector_set) + + >>> # now we insert the observers and callibrate the model + >>> tracer_model_with_observers = tracer_reporter.prepare_detailed_calibration() + >>> for i in range(num_callibration_batches): + >>> example_input = get_callibration_input() + >>> tracer_model_with_observers(example_input) + + >>> # finally we generate the reports and optionally remove the observers we inserted + >>> reports = tracer_reporter.generate_model_report(remove_inserted_observers=True) + + >>> # Optional: we can generate the qconfig mapping based on the suggestions + >>> qconfigs = model_report.generate_qconfig_mapping() + + >>> # Optional: we can generate the equalization mapping based on the suggestions + >>> qconfigs = model_report.generate_equalization_mapping() + + >>> # Optional: we get a ModelReportVisualizer instance to do any visualizations desired + >>> model_report_visualizer = tracer_reporter.generate_visualizer() + + """ + + def __init__(self, model: GraphModule, desired_report_detectors: Set[DetectorBase]): + + if len(desired_report_detectors) == 0: + raise ValueError("Should include at least 1 desired report") + + # keep track of the model we wish to generate report for + self._model: GraphModule = model + + # keep the reports private so they can't be modified + self._desired_report_detectors = desired_report_detectors + self._desired_detector_names = {detector.get_detector_name() for detector in desired_report_detectors} + + # keep a mapping of desired reports to observers of interest + # this is to get the readings, and to remove them, can create a large set + # this set can then be used to traverse the graph and remove added observers + self._detector_name_to_observer_fqns: Dict[str, Set[str]] = {} + + # initialize each report to have empty set of observers of interest + for desired_report in self._desired_detector_names: + self._detector_name_to_observer_fqns[desired_report] = set() + + # flags to ensure that we can only prepare and remove observers once + self._prepared_flag = False + self._removed_observers = False + + # store the reports that we generated for visualization purposes + # intially empty since no reports generated + self._generated_reports: Dict[str, Dict] = {} + + def get_desired_reports_names(self) -> Set[str]: + """ Returns a copy of the desired reports for viewing """ + return self._desired_detector_names.copy() + + def get_observers_of_interest(self) -> Dict[str, Set[str]]: + """ Returns a copy of the observers of interest for viewing """ + return self._detector_name_to_observer_fqns.copy() + + def prepare_detailed_calibration(self) -> GraphModule: + r""" + Takes in a graph model and inserts the following observers: + - ModelReportObserver + + Each observer is inserted based on the desired_reports into the relavent locations + + Right now, each report in self._desired_detector_names has independent insertions + However, if a module already has a Observer of the same type, the insertion will not occur + This is because all of the same type of Observer collect same information, so redundant + + Returns the same GraphModule with the observers inserted + """ + + # if already prepared once, cannot prepare again + if self._prepared_flag: + raise ValueError("Already ran preparing detailed callibration. Run the report generation next after callibration.") + + # loop through each detector, find where placements should be, and keep track + insert_observers_fqns: Dict[str, Any] = {} + + for detector in self._desired_report_detectors: + # determine observer points for each detector + obs_fqn_to_info = detector.determine_observer_insert_points(self._model) + # map each insert point to the observer to use + insert_observers_fqns.update(obs_fqn_to_info) + # update the set of observers this report cares about + self._detector_name_to_observer_fqns[detector.get_detector_name()] = set(obs_fqn_to_info.keys()) + + # now insert all the observers at their desired locations + for observer_fqn in insert_observers_fqns: + target_node = insert_observers_fqns[observer_fqn][DETECTOR_TARGET_NODE_KEY] + insert_obs = insert_observers_fqns[observer_fqn][DETECTOR_OBS_TO_INSERT_KEY] + insert_post = insert_observers_fqns[observer_fqn][DETECTOR_IS_POST_OBS_KEY] + observer_args = insert_observers_fqns[observer_fqn][DETECTOR_OBS_ARGS_KEY] + self._insert_observer_around_module( + observer_fqn, target_node, insert_obs, observer_args, insert_post + ) + + self._prepared_flag = True + + return self._model + + def _insert_observer_around_module( + self, + obs_fqn: str, + target_node: torch.fx.node.Node, + obs_to_insert: ObserverBase, + observer_args: Tuple, + insert_post: bool + ): + r""" + Helper function that inserts the observer into both the graph structure and the module of the model + + Args + node_fqn (str): The fully qualified name of the observer we want to insert + target_node (torch.fx.node.Node): The node in model we are inserting observers around + obs_to_insert (ObserverBase): The observer we are inserting around target_node + observer_args (Tuple): The arguments we want to pass into the observer + insert_post (bool): whether this is meant to be a post observer for this node + """ + # if we are inserting post, then our target node is the next node + if insert_post: + target_node = target_node.next + + with self._model.graph.inserting_before(target_node): + self._model.add_submodule(obs_fqn, obs_to_insert) + self._model.graph.create_node(op="call_module", target=obs_fqn, args=observer_args) + + # recompile model after inserts are made + self._model.recompile() + + def _get_node_from_fqn(self, node_fqn: str) -> torch.fx.node.Node: + r""" + Takes in a node fqn and returns the node based on the fqn + + Args + node_fqn (str): The fully qualified name of the node we want to find in model + + Returns the Node object of the given node_fqn otherwise returns None + """ + node_to_return = None + for node in self._model.graph.nodes: + # if the target matches the fqn, it's the node we are looking for + if node.target == node_fqn: + node_to_return = node + break + + if node_to_return is None: + raise ValueError("The node_fqn is was not found within the module.") + + # assert for MyPy + assert isinstance(node_to_return, torch.fx.node.Node) + + return node_to_return + + def generate_model_report( + self, remove_inserted_observers: bool + ) -> Dict[str, Tuple[str, Dict]]: + r""" + Generates all the requested reports. + + Note: + You should have callibrated the model with relavent data before calling this + + The reports generated are specified by the desired_reports specified in desired_reports + + Can optionally remove all the observers inserted by the ModelReport instance + + Args: + remove_inserted_observers (bool): True to remove the observers inserted by this ModelReport instance + + Returns a mapping of each desired report name to a tuple with: + The textual summary of that report information + A dictionary containing relavent statistics or information for that report + + Note: + Throws exception if we try to generate report on model we already removed observers from + Throws exception if we try to generate report without preparing for callibration + """ + # if we haven't prepped model for callibration, then we shouldn't generate report yet + if not self._prepared_flag: + raise Exception("Cannot generate report without preparing model for callibration") + + # if we already removed the observers, we cannot generate report + if self._removed_observers: + raise Exception("Cannot generate report on model you already removed observers from") + + # keep track of all the reports of interest and their outputs + reports_of_interest = {} + + for detector in self._desired_report_detectors: + # generate the individual report for the detector + report_output = detector.generate_detector_report(self._model) + reports_of_interest[detector.get_detector_name()] = report_output + + # if user wishes to remove inserted observers, go ahead and remove + if remove_inserted_observers: + self._removed_observers = True + # get the set of all Observers inserted by this instance of ModelReport + all_observers_of_interest: Set[str] = set() + for desired_report in self._detector_name_to_observer_fqns: + observers_of_interest = self._detector_name_to_observer_fqns[desired_report] + all_observers_of_interest.update(observers_of_interest) + + # go through all_observers_of_interest and remove them from the graph and model + for observer_fqn in all_observers_of_interest: + # remove the observer from the model + self._model.delete_submodule(observer_fqn) + + # remove the observer from the graph structure + node_obj = self._get_node_from_fqn(observer_fqn) + + if node_obj: + self._model.graph.erase_node(node_obj) + else: + raise ValueError("Node no longer exists in GraphModule structure") + + # remember to recompile the model + self._model.recompile() + + # save the generated reports for visualization purposes + saved_reports: Dict[str, Dict] = { + report_name : report_tuple[1] for report_name, report_tuple in reports_of_interest.items() + } + + self._generated_reports = saved_reports + + # return the reports of interest + return reports_of_interest + + def _is_same_info_for_same_key(self, info_dict_a: Dict, info_dict_b: Dict) -> bool: + r""" + Takes in two dictionaries and ensures that any common keys between the two have the same + values. + + Args: + info_dict_a (Dict): First dictionary we wish to compare + info_dict_b (Dict): Second dictionary we wish to compare + + Returns True if all shared keys have same values, false otherwise + """ + # get the set of keys for both + dict_a_keys: Set = set(info_dict_a.keys()) + dict_b_keys: Set = set(info_dict_b.keys()) + + # get the insersection keys and check if same value for both dicts + intersecting_keys: Set = dict_a_keys.intersection(dict_b_keys) + + for key in intersecting_keys: + dict_a_val = info_dict_a[key] + dict_b_val = info_dict_b[key] + + # if it's a tensor we have to handle separately + if type(dict_a_val) == torch.Tensor: + # if dict_b_val not tensor, automatically false + if type(dict_b_val) != torch.Tensor or sum(dict_a_val != dict_b_val) != 0: + return False + else: + # for non-tensor vals + if dict_a_val != dict_b_val: + return False + + # if no non matching shared keys found, return true + return True + + def _reformat_reports_for_visualizer(self) -> OrderedDict: + r""" + Takes the generated reports and reformats them into the format that is desired by the + ModelReportVisualizer + + Returns an OrderedDict mapping module_fqns to their features + """ + # we want to reorder and reformat the information so it is ordered in terms of order + # found in the model + + # first create new dict with all modules as keys and features under respective module + module_fqns_to_features: Dict[str, Dict] = {} + + for report_name in self._generated_reports: + # get mod -> feature dict and go through + module_info = self._generated_reports[report_name] + + for module_fqn in module_info: + # check if already in our accumulation dict + if module_fqn in module_fqns_to_features: + # we merge all the features together + new_info: Dict = module_info[module_fqn] + present_info: Dict = module_fqns_to_features[module_fqn] + + # merge them together into the new unioned dict + # same features keys -> same info, so okay if override + + # do safety check to make sure shared keys have same info + if self._is_same_info_for_same_key(new_info, present_info): + module_fqns_to_features[module_fqn] = {**new_info, **present_info} + else: + error_str = "You have the same key with different values across detectors. " + error_str += "Someone incorrectly implemented a detector with conflicting keys to existing detectors." + raise ValueError(error_str) + else: + # we just set it + module_fqns_to_features[module_fqn] = module_info[module_fqn] + + # our ordered dict so that modules can be ordered in order of how they appear in model + features_by_module: OrderedDict[str, Dict] = OrderedDict() + + # we loop through modules in graph in order + for fqn, module in self._model.named_modules(): + # find that fqn in fqns_to_features + if fqn in module_fqns_to_features: + # add it to our ordered dict + features_by_module[fqn] = module_fqns_to_features[fqn] + + # return the ordered dict of info we created + return features_by_module + + def generate_visualizer(self) -> ModelReportVisualizer: + r""" + Generates a ModelReportVisualizer instance using the reports generated + by the generate_model_report() method. + + Returns the generated ModelReportVisualizer instance initialized + + Note: + Throws exception if attempt to get visualizers without generating report + """ + # check if user has generated reports at least once + if len(self._generated_reports) == 0: + raise Exception("Unable to generate visualizers without first generating reports") + + # get the ordered dict mapping modules to their full set of collected features / stats + module_fqns_to_features: OrderedDict = self._reformat_reports_for_visualizer() + + # create and return ModelReportVisualizer instance + visualizer: ModelReportVisualizer = ModelReportVisualizer(module_fqns_to_features) + + return visualizer + + def _generate_qconfig_mapping_helper( + self, + detector_qconfig_info_combined: Dict[str, DetectorQConfigInfo], + generation_function: Callable + ) -> QConfigMapping: + r""" + This helper takes in the compiled detector qconfig info that + has been compiled together and merges it into a QConfigMapping + """ + # keep track of the qconfigmapping + qconfig_mapping = QConfigMapping() + + # loop through each module / fqn and attempt to create QConfigMapping + for fqn, module in self._model.named_modules(): + # if we have a qconfig info for this module + if fqn in detector_qconfig_info_combined: + qconfig_info_compiled = detector_qconfig_info_combined[fqn] + + # now generate the qconfig and add it to the mapping + generated_qconfig = generation_function(qconfig_info_compiled, module) + + # add to our config + qconfig_mapping.set_module_name(fqn, generated_qconfig) + + # return compiled mapping + return qconfig_mapping + + def _update_detector_quantizaiton_qconfig_info(self, combined_info: DetectorQConfigInfo, new_info: DetectorQConfigInfo): + r""" + Takes in the old and new information and updates the combined information. + + Args: + combined_info (DetectorQConfigInfo): The DetectorQConfigInfo we are compiling all of the information in + new_info (DetectorQConfigInfo): The DetectorQConfigInfo with the information we are trying to merge the new info + into it + """ + combined_info.is_activation_dynamic = combined_info.is_activation_dynamic or new_info.is_activation_dynamic + combined_info.is_weight_per_channel = combined_info.is_weight_per_channel or new_info.is_weight_per_channel + + def _update_detector_equalization_qconfig_info(self, combined_info: DetectorQConfigInfo, new_info: DetectorQConfigInfo): + r""" + Takes in the old and new information and updates the combined information. + + Args: + combined_info (DetectorQConfigInfo): The DetectorQConfigInfo we are compiling all of the information in + new_info (DetectorQConfigInfo): The DetectorQConfigInfo with the information we are trying to merge the new info + into it + """ + is_equalization_recommended = combined_info.is_equalization_recommended or new_info.is_equalization_recommended + combined_info.is_equalization_recommended = is_equalization_recommended + + def _generate_module_fqn_to_detector_info_mapping( + self, + update_qconfig_info_function: Callable + ) -> Dict[str, DetectorQConfigInfo]: + r""" + Generates a QConfigMapping based on the suggestions of the + ModelReport API. The generated mapping encompasses all the + different types of feedback from the different detectors + all into one place. + + These configs are based on the suggestions provided by the ModelReport API + and can only be generated once the reports have been generated. + + Args: + update_qconfig_info_function (Callable) takes in a function that takes in two DetectorQConfigInfo + and updates the one that is being compiled + + Returns a Dict mapping module_fqns to DetectorQConfigInfo objects + + Note: + Throws exception if we try to generate mapping on model we already removed observers from + Throws exception if we try to generate mapping without preparing for callibration + """ + # if we haven't prepped model for callibration, then we shouldn't generate mapping yet + if not self._prepared_flag: + raise Exception("Cannot generate report without preparing model for callibration") + + # if we already removed the observers, we cannot mapping + if self._removed_observers: + raise Exception("Cannot generate report on model you already removed observers from") + + # keep track of qconfig info for each module across detectors + detector_qconfig_info_combined: Dict[str, DetectorQConfigInfo] = {} + + for detector in self._desired_report_detectors: + # get the info from the detector + detector_info: Dict[str, DetectorQConfigInfo] = detector.get_qconfig_info(self._model) + + # we go through the modules + for module_fqn in detector_info: + # see if we already have info on it + if module_fqn in detector_qconfig_info_combined: + # we combine the current options with what is there + current_options = detector_qconfig_info_combined[module_fqn] + detector_options = detector_info[module_fqn] + + update_qconfig_info_function(current_options, detector_options) + else: + # we just use this for now + detector_qconfig_info_combined[module_fqn] = detector_info[module_fqn] + + return detector_qconfig_info_combined + + def generate_qconfig_mapping(self) -> QConfigMapping: + r""" + Generates a QConfigMapping based on the suggestions of the + ModelReport API. The generated mapping encompasses all the + different types of feedback from the different detectors + all into one place. + + These configs are based on the suggestions provided by the ModelReport API + and can only be generated once the reports have been generated. + + Returns a QConfigMapping for the quantization configuration + + Note: + Throws exception if we try to generate mapping on model we already removed observers from + Throws exception if we try to generate mapping without preparing for callibration + """ + # get the mapping info + detector_qconfig_info_combined = self._generate_module_fqn_to_detector_info_mapping( + self._update_detector_quantizaiton_qconfig_info + ) + + # we will do a bit of processing and remove fqns that don't have input weight recommended + + # now we generate the QConfig for each of the options + mapping: QConfigMapping = self._generate_qconfig_mapping_helper( + detector_qconfig_info_combined, + self._quantization_config_generator + ) + + # return the generated mapping + return mapping + + def _quantization_config_generator(self, detector_qconfig_info: DetectorQConfigInfo, module: torch.nn.Module) -> QConfig: + r""" + Returns the quantization configuration generated by the DetectorQConfigInfo object + """ + return detector_qconfig_info.generate_quantization_qconfig(module) + + def _equalization_config_generator( + self, + detector_qconfig_info: DetectorQConfigInfo, + module: torch.nn.Module + ) -> EqualizationQConfig: + r""" + We ignore the module argument here, and only focus on thedetector_qconfig_info + + Returns the equalization configuration generated by the DetectorQConfigInfo object + """ + return detector_qconfig_info.generate_equalization_qconfig() + + def generate_equalization_mapping(self) -> QConfigMapping: + r""" + Generates a QConfigMapping based on the suggestions of the + ModelReport API for equalization. The generated mapping encompasses all the + different types of feedback from the input-weight equalization detector. + + These configs are based on the suggestions provided by the ModelReport API + and can only be generated once the reports have been generated. + + Returns a QConfigMapping for the equalization configuration + """ + # get the mapping info + detector_qconfig_info_combined = self._generate_module_fqn_to_detector_info_mapping( + self._update_detector_equalization_qconfig_info + ) + + # now we generate the QConfig for each of the options + mapping: QConfigMapping = self._generate_qconfig_mapping_helper( + detector_qconfig_info_combined, + self._equalization_config_generator + ) + + # return the generated mapping + return mapping diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/model_report_observer.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/model_report_observer.py new file mode 100644 index 0000000000000000000000000000000000000000..3d263ca32fe1ca417035ed736662d8c6d46176a9 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/model_report_observer.py @@ -0,0 +1,256 @@ +import torch +from torch.ao.quantization.observer import ObserverBase + + +class ModelReportObserver(ObserverBase): + r"""This observer is used to record additional information regarding keeping track + of S = average_batch_activation_range/epoch_activation_range. + + The purpose of this information is to prepare a report to present to users on whether + Dynamic or Static Quantization is more appropriate for their model given the general + distributions of their data. + + Args: + ch_axis (int, optional): The channel axis for which the range and outlier stats are computed + Default: 1 + comp_percentile (float, optional): The percentile to compare against 100 percentile to find outliers + Should be between 0 and 1 exclusive + Default: 0.9 + + * :attr:`num_batches_tracked` specifies number of batches passed through the observer + + * :attr:`average_batch_activation_range` defines average across the ranges of each batch passed through + + * :attr:`epoch_activation_min` defines the minimum value passed through the observer + + * :attr:`epoch_activation_max` defines the maximum value passed through the observer + + * :attr:`ch_axis` defines the channel being used to compute per channel min max stats + + * :attr:`min_val` defines the per channel minimum values passed through + + * :attr:`max_val` defines the per channel maximum values passed through + + * :attr:`comp_percentile` defines comparison percentile to find outliers + + * :attr:`average_percentile_ratio` defines the per channel average percentile ratios + + * :attr:`percentile_batches_tracked` defines the number of percentile batches tracked for each channel + + * :attr:`constant_channels` defines the number of batches that aren't constant channels per channel + + Note: this tool is meant for FX Graph Mode Quantization + """ + + def __init__(self, ch_axis: int = 1, comp_percentile: float = 0.9): + super().__init__(torch.qint8) + self.num_batches_tracked = 0 + + # keep track of the min and mix of the range for average batch and epoch as a whole + self.average_batch_activation_range: torch.Tensor = torch.tensor(float(0)) + self.epoch_activation_min = torch.tensor(float("inf")) + self.epoch_activation_max = torch.tensor(float("-inf")) + + # keep track of per channel min max information using the given channel + self.ch_axis: int = ch_axis + self.min_val: torch.Tensor = torch.tensor([]) + self.max_val: torch.Tensor = torch.tensor([]) + + # keep track of percentile ratio information per channel + self.comp_percentile: torch.Tensor = torch.tensor([comp_percentile]) + self.average_percentile_ratio: torch.Tensor = torch.tensor([]) + self.percentile_batches_tracked: torch.Tensor = torch.tensor([]) + self.constant_channels: torch.Tensor = torch.tensor([]) + + def forward(self, x): + x_copy = x.detach() # avoid keeping autograd tape + x_copy = x_copy.to(self.epoch_activation_min.dtype) + + x_copy = self._calculate_range_stats(x_copy) + x_copy = self._calculate_min_max_stats(x_copy) + x_copy = self._calculate_percentile_stats(x_copy) + + # return the passed in the value + return x + + def _calculate_range_stats(self, x_copy): + r"""Calculates and stores range stats with forward values. + + Args + x_copy: A copy of the forward data + + Returns the passed in x_copy + """ + # get the min, max values of the data + min_val_cur, max_val_cur = torch.aminmax(x_copy) + + # calculate new epoch range values + epoch_min_val = torch.min(self.epoch_activation_min, min_val_cur) + epoch_max_val = torch.max(self.epoch_activation_max, max_val_cur) + + self.epoch_activation_min.copy_(epoch_min_val) + self.epoch_activation_max.copy_(epoch_max_val) + + # calculate the average batch activation range + current_batch_range = max_val_cur - min_val_cur + new_range = ( + self.average_batch_activation_range * self.num_batches_tracked + + current_batch_range + ) / (self.num_batches_tracked + 1) + + self.average_batch_activation_range = new_range + self.num_batches_tracked += 1 # new batch was processed + + return x_copy + + def _calculate_min_max_stats(self, x_copy): + r"""Calculates and stores the per_channel min, max stats with forward values. + Does calculation based on channel axis: self.ch_axis + + Args + x_copy: A copy of the forward data + + Returns the passed in x_copy + """ + # get the current min and max vals + min_val = self.min_val + max_val = self.max_val + x_dim = x_copy.size() + + new_axis_list = [i for i in range(len(x_dim))] # noqa: C416 + new_axis_list[self.ch_axis] = 0 + new_axis_list[0] = self.ch_axis + y = x_copy.permute(new_axis_list) + # Need to match dtype of min/max because the updates to buffers + # are done in place and types need to match for comparisons + y = y.to(self.min_val.dtype) + y = torch.flatten(y, start_dim=1) + if min_val.numel() == 0 or max_val.numel() == 0: + min_val, max_val = torch.aminmax(y, dim=1) + else: + min_val_cur, max_val_cur = torch.aminmax(y, dim=1) + min_val = torch.min(min_val_cur, min_val) + max_val = torch.max(max_val_cur, max_val) + + self.min_val.resize_(min_val.shape) + self.max_val.resize_(max_val.shape) + self.min_val.copy_(min_val) + self.max_val.copy_(max_val) + + return x_copy + + def _calculate_percentile_stats(self, x_copy): + r"""Calculates and stores the per_channel percentile stats with forward values. + Does calculation based on channel axis: self.ch_axis + + Args + x_copy: A copy of the forward data + + Returns the passed in x_copy + """ + # get the dimension of the copy + x_dim = x_copy.size() + + new_axis_list = [i for i in range(len(x_dim))] # noqa: C416 + new_axis_list[self.ch_axis] = 0 + new_axis_list[0] = self.ch_axis + y = x_copy.permute(new_axis_list) + # Need to match dtype of min/max because the updates to buffers + # are done in place and types need to match for comparisons + y = y.to(self.min_val.dtype) + y = torch.flatten(y, start_dim=1) + y = y.to(self.min_val.dtype) + + # find the percentile values along the axis + # we want both 100th percentile and comp_percentile + # we also want to find 0th quartile to see if we have constant channel + quantiles_list = [0, self.comp_percentile, 1.00] + quantiles_to_find = torch.tensor(quantiles_list, dtype=self.min_val.dtype) + + # find the quantiles + desired_quantiles = torch.quantile(y, quantiles_to_find, dim=self.ch_axis, interpolation="lower") + zero_quantile = desired_quantiles[0] + comp_quantile = desired_quantiles[1] + hundreth_quartile = desired_quantiles[2] + + # if any of the channels have 0s, we ignore that channel for this calculation + any_non_zero_quantile_value: torch.Tensor = (comp_quantile != torch.tensor([0])) | (hundreth_quartile != torch.tensor([0])) + any_non_zero_quantile_value = any_non_zero_quantile_value.int() # transform boolean values to int values + + # we also check if we have a constant channel + any_constant_channels: torch.Tensor = (hundreth_quartile - zero_quantile) == torch.tensor([0]) + any_constant_channels = any_constant_channels.int() # transform boolean values to int values + + # possibilities to get nan as an answer + # will ignore any of these three cases with 0s and just not deal with them for now + # case (1) 0 in numerator: issue if 0 is largest, all negative, and rest are really negative + # case (2) 0 in denominator: is possible unless case 3, we just ignore + # case (3) 0 in both: not outlier, channel just kinda useless, ignore + + # get the ratio and get rid of nan values + quantile_ratios = hundreth_quartile / comp_quantile + quantile_ratios = torch.nan_to_num(quantile_ratios) + # update averages, remembering to only update if didn't have zeros + ratio_if_not_zero = any_non_zero_quantile_value * quantile_ratios + + # if num_batches and average_ratio are not initialized, we want to initialize them + if self.percentile_batches_tracked.shape[0] == 0 or self.average_percentile_ratio.shape[0] == 0: + self.percentile_batches_tracked = torch.zeros_like(any_non_zero_quantile_value) + self.average_percentile_ratio = torch.zeros_like(ratio_if_not_zero) + + # also initialize the constant channel var if that is not initialized separately + if self.constant_channels.shape[0] == 0: + self.constant_channels = torch.zeros_like(any_constant_channels) + + # get current num batches and average ratio + num_batches = self.percentile_batches_tracked + average_ratio = self.average_percentile_ratio + + # calculate new_number of batches, new_ratios, and get rid of nans because of 0 size batches + new_number_of_batches: torch.Tensor = num_batches + any_non_zero_quantile_value + new_ratios: torch.Tensor = ((average_ratio * num_batches) + ratio_if_not_zero) / new_number_of_batches + new_ratios = torch.nan_to_num(new_ratios) + + # update the number of non-constant channels + new_constant_count: torch.Tensor = self.constant_channels + any_constant_channels + + # update the values locally + self.percentile_batches_tracked.copy_(new_number_of_batches) + self.average_percentile_ratio.copy_(new_ratios) + self.constant_channels.copy_(new_constant_count) + + return x_copy + + + + @torch.jit.export + def get_batch_to_epoch_ratio(self): + epoch_activation_range = self.epoch_activation_max - self.epoch_activation_min + + if epoch_activation_range == torch.tensor(float(0)): + raise ValueError("Range for Epoch is 0") + elif epoch_activation_range == torch.tensor(float("inf")): + raise ValueError( + "No data has been run through observer or infinity value present" + ) + else: + return self.average_batch_activation_range / epoch_activation_range + + @torch.jit.export + def reset_batch_and_epoch_values(self): + # set all the values back to their original defaults for a new epoch + self.num_batches_tracked = 0 + self.average_batch_activation_range = torch.tensor(float(0)) + self.epoch_activation_min = torch.tensor(float("inf")) + self.epoch_activation_max = torch.tensor(float("-inf")) + self.min_val = torch.tensor([]) + self.max_val = torch.tensor([]) + self.average_percentile_ratio = torch.tensor([]) + self.percentile_batches_tracked = torch.tensor([]) + self.constant_channels = torch.tensor([]) + + @torch.jit.export + def calculate_qparams(self): + raise Exception( + "calculate_qparams should not be called for ModelReportObserver" + ) diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/model_report_visualizer.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/model_report_visualizer.py new file mode 100644 index 0000000000000000000000000000000000000000..811dcba776ebee5bb7f249650684bef3c4a96c47 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/model_report_visualizer.py @@ -0,0 +1,666 @@ +import torch +from typing import Any, Set, Dict, List, Tuple, OrderedDict +from collections import OrderedDict as OrdDict + +# try to import tablate +got_tabulate = True +try: + from tabulate import tabulate +except ImportError: + got_tabulate = False + + +# var to see if we could import matplotlib +got_matplotlib = True +try: + import matplotlib.pyplot as plt +except ImportError: + got_matplotlib = False + +class ModelReportVisualizer: + r""" + The ModelReportVisualizer class aims to provide users a way to visualize some of the statistics + that were generated by the ModelReport API. However, at a higher level, the class aims to provide + some level of visualization of statistics to PyTorch in order to make it easier to parse data and + diagnose any potential issues with data or a specific model. With respect to the visualizations, + the ModelReportVisualizer class currently supports several methods of visualizing data. + + Supported Visualization Methods Include: + - Table format + - Plot format (line graph) + - Histogram format + + For all of the existing visualization methods, there is the option to filter data based on: + - A module fqn prefix + - Feature [required for the plot and histogram] + + * :attr:`generated_reports` The reports generated by the ModelReport class in the structure below + Ensure sure that features that are the same across different report contain the same name + Ensure that objects representing the same features are the same type / dimension (where applicable) + + Note: + Currently, the ModelReportVisualizer class supports visualization of data generated by the + ModelReport class. However, this structure is extensible and should allow the visualization of + other information as long as the information is structured in the following general format: + + Report Structure + -- module_fqn [module with attached detectors] + | + -- feature keys [not every detector extracts same information] + [same collected info has same keys, unless can be specific to detector] + + + The goal behind the class is that the generated visualizations can be used in conjunction with the generated + report for people to get a better understanding of issues and what the fix might be. It is also just to provide + a good visualization platform, since it might be hard to parse through the ModelReport returned dictionary as + that grows in size. + + General Use Flow Expected + 1.) Initialize ModelReport object with reports of interest by passing in initialized detector objects + 2.) Prepare your model with prepare_fx + 3.) Call model_report.prepare_detailed_calibration on your model to add relavent observers + 4.) Callibrate your model with data + 5.) Call model_report.generate_report on your model to generate report and optionally remove added observers + 6.) Use output of model_report.generate_report to initialize ModelReportVisualizer instance + 7.) Use instance to view different views of data as desired, applying filters as needed + 8.) Either see the super detailed information or just the actual printed or shown table / plot / histogram + + """ + + # keys for table dict + TABLE_TENSOR_KEY = "tensor_level_info" + TABLE_CHANNEL_KEY = "channel_level_info" + + # Constants for header vals + NUM_NON_FEATURE_TENSOR_HEADERS = 2 + NUM_NON_FEATURE_CHANNEL_HEADERS = 3 + + # Constants for row index in header + CHANNEL_NUM_INDEX = 2 + + def __init__(self, generated_reports: OrderedDict[str, Any]): + r""" + Initializes the ModelReportVisualizer instance with the necessary reports. + + Args: + generated_reports (Dict[str, Any]): The reports generated by the ModelReport class + can also be a dictionary generated in another manner, as long as format is same + """ + self.generated_reports = generated_reports + + def get_all_unique_module_fqns(self) -> Set[str]: + r""" + The purpose of this method is to provide a user the set of all module_fqns so that if + they wish to use some of the filtering capabilities of the ModelReportVisualizer class, + they don't need to manually parse the generated_reports dictionary to get this information. + + Returns all the unique module fqns present in the reports the ModelReportVisualizer + instance was initialized with. + """ + # returns the keys of the ordered dict + return set(self.generated_reports.keys()) + + def get_all_unique_feature_names(self, plottable_features_only: bool = True) -> Set[str]: + r""" + The purpose of this method is to provide a user the set of all feature names so that if + they wish to use the filtering capabilities of the generate_table_view(), or use either of + the generate_plot_view() or generate_histogram_view(), they don't need to manually parse + the generated_reports dictionary to get this information. + + Args: + plottable_features_only (bool): True if the user is only looking for plottable features, + False otherwise + plottable features are those that are tensor values + Default: True (only return those feature names that are plottable) + + Returns all the unique module fqns present in the reports the ModelReportVisualizer + instance was initialized with. + """ + unique_feature_names = set() + for module_fqn in self.generated_reports: + # get dict of the features + feature_dict: Dict[str, Any] = self.generated_reports[module_fqn] + + # loop through features + for feature_name in feature_dict: + # if we need plottable, ensure type of val is tensor + if not plottable_features_only or type(feature_dict[feature_name]) == torch.Tensor: + unique_feature_names.add(feature_name) + + # return our compiled set of unique feature names + return unique_feature_names + + def _get_filtered_data(self, feature_filter: str, module_fqn_filter: str) -> OrderedDict[str, Any]: + r""" + Filters the data and returns it in the same ordered dictionary format so the relavent views can be displayed. + + Args: + feature_filter (str): The feature filter, if we want to filter the set of data to only include + a certain set of features that include feature_filter + If feature = "", then we do not filter based on any features + module_fqn_filter (str): The filter on prefix for the module fqn. All modules that have fqn with + this prefix will be included + If module_fqn_filter = "" we do not filter based on module fqn, and include all modules + + First, the data is filtered based on module_fqn, and then filtered based on feature + Returns an OrderedDict (sorted in order of model) mapping: + module_fqns -> feature_names -> values + """ + # create return dict + filtered_dict: OrderedDict[str, Any] = OrdDict() + + for module_fqn in self.generated_reports: + # first filter based on module + if module_fqn_filter == "" or module_fqn_filter in module_fqn: + # create entry for module and loop through features + filtered_dict[module_fqn] = {} + module_reports = self.generated_reports[module_fqn] + for feature_name in module_reports: + # check if filtering on features and do so if desired + if feature_filter == "" or feature_filter in feature_name: + filtered_dict[module_fqn][feature_name] = module_reports[feature_name] + + # we have populated the filtered dict, and must return it + + return filtered_dict + + def _generate_tensor_table( + self, + filtered_data: OrderedDict[str, Dict[str, Any]], + tensor_features: List[str] + ) -> Tuple[List, List]: + r""" + Takes in the filtered data and features list and generates the tensor headers and table + + Currently meant to generate the headers and table for both the tensor information. + + Args: + filtered_data (OrderedDict[str, Dict[str, Any]]): An OrderedDict (sorted in order of model) mapping: + module_fqns -> feature_names -> values + tensor_features (List[str]): A list of the tensor level features + + Returns a tuple with: + A list of the headers of the tensor table + A list of lists containing the table information row by row + The 0th index row will contain the headers of the columns + The rest of the rows will contain data + """ + # now we compose the tensor information table + tensor_table: List[List[Any]] = [] + tensor_headers: List[str] = [] + + # append the table row to the table only if we have features + if len(tensor_features) > 0: + # now we add all the data + for index, module_fqn in enumerate(filtered_data): + # we make a new row for the tensor table + tensor_table_row = [index, module_fqn] + for feature in tensor_features: + # we iterate in same order of added features + + if feature in filtered_data[module_fqn]: + # add value if applicable to module + feature_val = filtered_data[module_fqn][feature] + else: + # add that it is not applicable + feature_val = "Not Applicable" + + # if it's a tensor we want to extract val + if isinstance(feature_val, torch.Tensor): + feature_val = feature_val.item() + + # we add to our list of values + tensor_table_row.append(feature_val) + + tensor_table.append(tensor_table_row) + + # add row of headers of we actually have something, otherwise just empty + if len(tensor_table) != 0: + tensor_headers = ["idx", "layer_fqn"] + tensor_features + + return (tensor_headers, tensor_table) + + def _generate_channels_table( + self, + filtered_data: OrderedDict[str, Any], + channel_features: List[str], + num_channels: int + ) -> Tuple[List, List]: + r""" + Takes in the filtered data and features list and generates the channels headers and table + + Currently meant to generate the headers and table for both the channels information. + + Args: + filtered_data (OrderedDict[str, Any]): An OrderedDict (sorted in order of model) mapping: + module_fqns -> feature_names -> values + channel_features (List[str]): A list of the channel level features + num_channels (int): Number of channels in the channel data + + Returns a tuple with: + A list of the headers of the channel table + A list of lists containing the table information row by row + The 0th index row will contain the headers of the columns + The rest of the rows will contain data + """ + # now we compose the table for the channel information table + channel_table: List[List[Any]] = [] + channel_headers: List[str] = [] + + # counter to keep track of number of entries in + channel_table_entry_counter: int = 0 + + if len(channel_features) > 0: + # now we add all channel data + for index, module_fqn in enumerate(filtered_data): + # we iterate over all channels + for channel in range(num_channels): + # we make a new row for the channel + new_channel_row = [channel_table_entry_counter, module_fqn, channel] + for feature in channel_features: + if feature in filtered_data[module_fqn]: + # add value if applicable to module + feature_val = filtered_data[module_fqn][feature][channel] + else: + # add that it is not applicable + feature_val = "Not Applicable" + + # if it's a tensor we want to extract val + if type(feature_val) is torch.Tensor: + feature_val = feature_val.item() + + # add value to channel specific row + new_channel_row.append(feature_val) + + # add to table and increment row index counter + channel_table.append(new_channel_row) + channel_table_entry_counter += 1 + + # add row of headers of we actually have something, otherwise just empty + if len(channel_table) != 0: + channel_headers = ["idx", "layer_fqn", "channel"] + channel_features + + return (channel_headers, channel_table) + + def generate_filtered_tables(self, feature_filter: str = "", module_fqn_filter: str = "") -> Dict[str, Tuple[List, List]]: + r""" + Takes in optional filter values and generates two tables with desired information. + + The generated tables are presented in both a list-of-lists format + + The reason for the two tables are that they handle different things: + 1.) the first table handles all tensor level information + 2.) the second table handles and displays all channel based information + + The reasoning for this is that having all the info in one table can make it ambiguous which collected + statistics are global, and which are actually per-channel, so it's better to split it up into two + tables. This also makes the information much easier to digest given the plethora of statistics collected + + Tensor table columns: + idx layer_fqn feature_1 feature_2 feature_3 .... feature_n + ---- --------- --------- --------- --------- --------- + + Per-Channel table columns: + idx layer_fqn channel feature_1 feature_2 feature_3 .... feature_n + ---- --------- ------- --------- --------- --------- --------- + + Args: + feature_filter (str, optional): Filters the features presented to only those that + contain this filter substring + Default = "", results in all the features being printed + module_fqn_filter (str, optional): Only includes modules that contains this string + Default = "", results in all the modules in the reports to be visible in the table + + Returns a dictionary with two keys: + (Dict[str, Tuple[List, List]]) A dict containing two keys: + "tensor_level_info", "channel_level_info" + Each key maps to a tuple with: + A list of the headers of each table + A list of lists containing the table information row by row + The 0th index row will contain the headers of the columns + The rest of the rows will contain data + + Example Use: + >>> # xdoctest: +SKIP("undefined variables") + >>> mod_report_visualizer.generate_filtered_tables( + ... feature_filter = "per_channel_min", + ... module_fqn_filter = "block1" + ... ) # generates table with per_channel_min info for all modules in block 1 of the model + """ + # first get the filtered data + filtered_data: OrderedDict[str, Any] = self._get_filtered_data(feature_filter, module_fqn_filter) + + # now we split into tensor and per-channel data + tensor_features: Set[str] = set() + channel_features: Set[str] = set() + + # keep track of the number of channels we have + num_channels: int = 0 + + for module_fqn in filtered_data: + for feature_name in filtered_data[module_fqn]: + # get the data for that specific feature + feature_data = filtered_data[module_fqn][feature_name] + + # check if not zero dim tensor + is_tensor: bool = isinstance(feature_data, torch.Tensor) + is_not_zero_dim: bool = is_tensor and len(feature_data.shape) != 0 + + if is_not_zero_dim or isinstance(feature_data, list): + # works means per channel + channel_features.add(feature_name) + num_channels = len(feature_data) + else: + # means is per-tensor + tensor_features.add(feature_name) + + # we make them lists for iteration purposes + tensor_features_list: List[str] = sorted(tensor_features) + channel_features_list: List[str] = sorted(channel_features) + + # get the tensor info + tensor_headers, tensor_table = self._generate_tensor_table(filtered_data, tensor_features_list) + + # get the channel info + channel_headers, channel_table = self._generate_channels_table( + filtered_data, channel_features_list, num_channels + ) + + # let's now create the dictionary to return + table_dict = { + self.TABLE_TENSOR_KEY : (tensor_headers, tensor_table), + self.TABLE_CHANNEL_KEY : (channel_headers, channel_table) + } + + # return the two tables + return table_dict + + def generate_table_visualization(self, feature_filter: str = "", module_fqn_filter: str = ""): + r""" + Takes in optional filter values and prints out formatted tables of the information. + + The reason for the two tables printed out instead of one large one are that they handle different things: + 1.) the first table handles all tensor level information + 2.) the second table handles and displays all channel based information + + The reasoning for this is that having all the info in one table can make it ambiguous which collected + statistics are global, and which are actually per-channel, so it's better to split it up into two + tables. This also makes the information much easier to digest given the plethora of statistics collected + + Tensor table columns: + idx layer_fqn feature_1 feature_2 feature_3 .... feature_n + ---- --------- --------- --------- --------- --------- + + Per-Channel table columns: + + idx layer_fqn channel feature_1 feature_2 feature_3 .... feature_n + ---- --------- ------- --------- --------- --------- --------- + + Args: + feature_filter (str, optional): Filters the features presented to only those that + contain this filter substring + Default = "", results in all the features being printed + module_fqn_filter (str, optional): Only includes modules that contains this string + Default = "", results in all the modules in the reports to be visible in the table + + Example Use: + >>> # xdoctest: +SKIP("undefined variables") + >>> mod_report_visualizer.generate_table_visualization( + ... feature_filter = "per_channel_min", + ... module_fqn_filter = "block1" + ... ) + >>> # prints out neatly formatted table with per_channel_min info + >>> # for all modules in block 1 of the model + """ + # see if we got tabulate + if not got_tabulate: + print("Make sure to install tabulate and try again.") + return None + + # get the table dict and the specific tables of interest + table_dict = self.generate_filtered_tables(feature_filter, module_fqn_filter) + tensor_headers, tensor_table = table_dict[self.TABLE_TENSOR_KEY] + channel_headers, channel_table = table_dict[self.TABLE_CHANNEL_KEY] + + # get the table string and print it out + # now we have populated the tables for each one + # let's create the strings to be returned + table_str = "" + # the tables will have some headers columns that are non-feature + # ex. table index, module name, channel index, etc. + # we want to look at header columns for features, that come after those headers + if len(tensor_headers) > self.NUM_NON_FEATURE_TENSOR_HEADERS: + # if we have at least one tensor level feature to be addded we add tensor table + table_str += "Tensor Level Information \n" + table_str += tabulate(tensor_table, headers=tensor_headers) + if len(channel_headers) > self.NUM_NON_FEATURE_CHANNEL_HEADERS: + # if we have at least one channel level feature to be addded we add tensor table + table_str += "\n\n Channel Level Information \n" + table_str += tabulate(channel_table, headers=channel_headers) + + # if no features at all, let user know + if table_str == "": + table_str = "No data points to generate table with." + + print(table_str) + + def _get_plottable_data(self, feature_filter: str, module_fqn_filter: str) -> Tuple[List, List[List], bool]: + r""" + Takes in the feature filters and module filters and outputs the x and y data for plotting + + Args: + feature_filter (str): Filters the features presented to only those that + contain this filter substring + module_fqn_filter (str): Only includes modules that contains this string + + Returns a tuple of three elements + The first is a list containing relavent x-axis data + The second is a list containing the corresponding y-axis data + If the data is per channel + """ + # get the table dict and the specific tables of interest + table_dict = self.generate_filtered_tables(feature_filter, module_fqn_filter) + tensor_headers, tensor_table = table_dict[self.TABLE_TENSOR_KEY] + channel_headers, channel_table = table_dict[self.TABLE_CHANNEL_KEY] + + # make sure it is only 1 feature that is being plotted + # get the number of features in each of these + tensor_info_features_count = len(tensor_headers) - ModelReportVisualizer.NUM_NON_FEATURE_TENSOR_HEADERS + channel_info_features_count = len(channel_headers) - ModelReportVisualizer.NUM_NON_FEATURE_CHANNEL_HEADERS + + # see if valid tensor or channel plot + is_valid_per_tensor_plot: bool = tensor_info_features_count == 1 + is_valid_per_channel_plot: bool = channel_info_features_count == 1 + + # offset should either be one of tensor or channel table or neither + feature_column_offset = ModelReportVisualizer.NUM_NON_FEATURE_TENSOR_HEADERS + table = tensor_table + + # if a per_channel plot, we have different offset and table + if is_valid_per_channel_plot: + feature_column_offset = ModelReportVisualizer.NUM_NON_FEATURE_CHANNEL_HEADERS + table = channel_table + + x_data: List = [] + y_data: List[List] = [] + # the feature will either be a tensor feature or channel feature + if is_valid_per_tensor_plot: + for table_row_num, row in enumerate(table): + # get x_value to append + x_val_to_append = table_row_num + # the index of the feature will the 0 + num non feature columns + tensor_feature_index = feature_column_offset + row_value = row[tensor_feature_index] + if not type(row_value) == str: + x_data.append(x_val_to_append) + y_data.append(row_value) + elif is_valid_per_channel_plot: + # gather the x_data and multiple y_data + # calculate the number of channels + num_channels: int = max(row[self.CHANNEL_NUM_INDEX] for row in table) + 1 + for channel in range(num_channels): + y_data.append([]) # separate data list per channel + + for table_row_num, row in enumerate(table): + # get x_value to append + x_val_to_append = table_row_num + current_channel = row[self.CHANNEL_NUM_INDEX] # intially chose current channel + new_module_index: int = table_row_num // num_channels + x_val_to_append = new_module_index + + # the index of the feature will the 0 + num non feature columns + tensor_feature_index = feature_column_offset + row_value = row[tensor_feature_index] + if not type(row_value) == str: + # only append if new index we are appending + if len(x_data) == 0 or x_data[-1] != x_val_to_append: + x_data.append(x_val_to_append) + + # append value for that channel + y_data[current_channel].append(row_value) + else: + # more than one feature was chosen + error_str = "Make sure to pick only a single feature with your filter to plot a graph." + error_str += " We recommend calling get_all_unique_feature_names() to find unique feature names." + error_str += " Pick one of those features to plot." + raise ValueError(error_str) + + # return x, y values, and if data is per-channel + return (x_data, y_data, is_valid_per_channel_plot) + + def generate_plot_visualization(self, feature_filter: str, module_fqn_filter: str = ""): + r""" + Takes in a feature and optional module_filter and plots of the desired data. + + For per channel features, it averages the value across the channels and plots a point + per module. The reason for this is that for models with hundreds of channels, it can + be hard to diffrentiate one channel line from another, and so the point of generating + a single average point per module is to give a sense of general trends that encourage + further deep dives. + + Note: + Only features in the report that have tensor value data are plottable by this class + When the tensor information is plotted, it will plot: + idx as the x val, feature value as the y_val + When the channel information is plotted, it will plot: + the first idx of each module as the x val, feature value as the y_val [for each channel] + The reason for this is that we want to be able to compare values across the + channels for same layer, and it will be hard if values are staggered by idx + This means each module is represented by only 1 x value + Args: + feature_filter (str): Filters the features presented to only those that + contain this filter substring + module_fqn_filter (str, optional): Only includes modules that contains this string + Default = "", results in all the modules in the reports to be visible in the table + + Example Use: + >>> # xdoctest: +SKIP("undefined variables") + >>> mod_report_visualizer.generate_plot_visualization( + ... feature_filter = "per_channel_min", + ... module_fqn_filter = "block1" + ... ) + >>> # outputs line plot of per_channel_min information for all + >>> # modules in block1 of model each channel gets it's own line, + >>> # and it's plotted across the in-order modules on the x-axis + """ + # checks if we have matplotlib and let's user know to install it if don't + if not got_matplotlib: + print("make sure to install matplotlib and try again.") + return None + + # get the x and y data and if per channel + x_data, y_data, data_per_channel = self._get_plottable_data(feature_filter, module_fqn_filter) + + # plot based on whether data is per channel or not + ax = plt.subplot() + ax.set_ylabel(feature_filter) + ax.set_title(feature_filter + " Plot") + plt.xticks(x_data) # only show ticks for actual points + + if data_per_channel: + ax.set_xlabel("First idx of module") + # set the legend as well + # plot a single line that is average of the channel values + num_modules = len(y_data[0]) # all y_data have same length, so get num modules + num_channels = len(y_data) # we want num channels to be able to calculate average later + + avg_vals = [sum(y_data[:][index]) / num_channels for index in range(num_modules)] + + # plot the three things we measured + ax.plot(x_data, avg_vals, label="Average Value Across {} Channels".format(num_channels)) + ax.legend(loc='upper right') + else: + ax.set_xlabel("idx") + ax.plot(x_data, y_data) + + # actually show the plot + plt.show() + + def generate_histogram_visualization(self, feature_filter: str, module_fqn_filter: str = "", num_bins: int = 10): + r""" + Takes in a feature and optional module_filter and plots the histogram of desired data. + + Note: + Only features in the report that have tensor value data can be viewed as a histogram + If you want to plot a histogram from all the channel values of a specific feature for + a specific model, make sure to specify both the model and the feature properly + in the filters and you should be able to see a distribution of the channel data + + Args: + feature_filter (str, optional): Filters the features presented to only those that + contain this filter substring + Default = "", results in all the features being printed + module_fqn_filter (str, optional): Only includes modules that contains this string + Default = "", results in all the modules in the reports to be visible in the table + num_bins (int, optional): The number of bins to create the histogram with + Default = 10, the values will be split into 10 equal sized bins + + Example Use: + >>> # xdoctest: +SKIP + >>> mod_report_visualizer.generategenerate_histogram_visualization_plot_visualization( + ... feature_filter = "per_channel_min", + ... module_fqn_filter = "block1" + ... ) + # outputs histogram of per_channel_min information for all modules in block1 of model + information is gathered across all channels for all modules in block 1 for the + per_channel_min and is displayed in a histogram of equally sized bins + """ + # checks if we have matplotlib and let's user know to install it if don't + if not got_matplotlib: + print("make sure to install matplotlib and try again.") + return None + + # get the x and y data and if per channel + x_data, y_data, data_per_channel = self._get_plottable_data(feature_filter, module_fqn_filter) + + # for histogram, we just care about plotting the y data + # plot based on whether data is per channel or not + ax = plt.subplot() + ax.set_xlabel(feature_filter) + ax.set_ylabel("Frequency") + ax.set_title(feature_filter + " Histogram") + + if data_per_channel: + # set the legend as well + # combine all the data + all_data = [] + for index, channel_info in enumerate(y_data): + all_data.extend(channel_info) + + val, bins, _ = plt.hist( + all_data, + bins=num_bins, + stacked=True, + rwidth=0.8, + ) + plt.xticks(bins) + else: + val, bins, _ = plt.hist( + y_data, + bins=num_bins, + stacked=False, + rwidth=0.8, + ) + plt.xticks(bins) + + plt.show() diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/convert.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/convert.py new file mode 100644 index 0000000000000000000000000000000000000000..4d2f012bd38cedf479ed0cbea93e3f8ce16cb9ea --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/convert.py @@ -0,0 +1,1045 @@ +from typing import Any, Dict, List, Optional, Set, Tuple, Union, Type, Callable +from torch.ao.quantization.quant_type import QuantType +import torch +import copy +import warnings +from torch.fx import ( + GraphModule, +) +from torch.fx.graph import ( + Graph, + Node, + Argument, +) +from ..utils import ( + activation_is_statically_quantized, + weight_is_quantized, + get_qparam_dict, + _parent_name, + get_swapped_custom_module_class, +) +from ..qconfig import ( + QConfigAny, + qconfig_equals +) +from ..qconfig_mapping import QConfigMapping +from .qconfig_mapping_utils import ( + _generate_node_name_to_qconfig, + _compare_prepare_convert_qconfig_mappings, + _update_qconfig_for_fusion, + _is_qconfig_supported_by_dtype_configs, + _update_qconfig_for_qat, +) +from torch.ao.quantization.backend_config.utils import ( + get_root_module_to_quantized_reference_module, + get_pattern_to_dtype_configs, + get_fused_module_classes, + get_qat_module_classes, +) +from torch.ao.quantization.backend_config import ( + BackendConfig, + get_native_backend_config, +) +from torch.ao.quantization.observer import _is_activation_post_process +from .graph_module import ( + _is_observed_module, + _is_observed_standalone_module, +) +from ._equalize import update_obs_for_equalization, convert_eq_obs +from torch.nn.utils.parametrize import type_before_parametrizations +from .utils import ( + _get_module, + _is_custom_module_lstm, + get_custom_module_class_keys, + create_getattr_from_value, + collect_producer_nodes, + graph_module_from_producer_nodes, + node_arg_is_weight, +) +from torch.ao.quantization.utils import ( + is_per_channel, + to_underlying_dtype, +) +from torch.ao.quantization.quantize import ( + _remove_qconfig, +) +from torch.ao.quantization.stubs import DeQuantStub +from .custom_config import ( + ConvertCustomConfig, + PrepareCustomConfig, +) +from .lower_to_fbgemm import lower_to_fbgemm +# importing the lib so that the quantized_decomposed ops are registered +from ._decomposed import quantized_decomposed_lib # noqa: F401 +import operator + +__all__ = [ + "convert", + "convert_custom_module", + "convert_standalone_module", + "convert_weighted_module", +] + +def _replace_observer_with_quantize_dequantize_node_decomposed( + model: torch.nn.Module, + graph: Graph, + node: Node, + modules: Dict[str, torch.nn.Module], + node_name_to_scope: Dict[str, Tuple[str, type]], + node_name_to_qconfig: Dict[str, QConfigAny]) -> None: + """ Replace activation_post_process module call node with quantize and + dequantize node working with decomposed Tensor + + Before: + ... -> observer_0(x) -> ... + After: + ... -> torch.ops.quantized_decomposed.quantize_per_tensor(x, ...) -> + torch.ops.quantized_decomposed.dequantize_per_tensor() -> ... + + or quantize_per_channel and dequantize_per_channel + """ + assert modules is not None + assert isinstance(node.target, str) + module_path, prefix = _get_module_path_and_prefix(node, node_name_to_scope, node_name_to_qconfig) + activation_post_process = modules[node.target] + # skip replacing observers to quant/dequant nodes if the qconfigs of all + # consumers and producers of this observer are None + skip_replacement = all([ + _has_none_qconfig(n, node_name_to_qconfig) for n in + list(node.args) + list(node.users.keys())]) + if skip_replacement or not _is_conversion_supported(activation_post_process): + # didn't find correponding quantize op and info for the activation_post_process + # so we just remove the observer + with graph.inserting_before(node): + node.replace_all_uses_with(node.args[0]) + graph.erase_node(node) + return + + # otherwise, we can convert the activation_post_process module call to quantize/dequantize node + + # 1. extract the information from activation_post_process module for generating + # the quantize and dequantize operator + dtype = activation_post_process.dtype # type: ignore[attr-defined] + + is_dynamic = False + if hasattr(activation_post_process, "is_dynamic"): + is_dynamic = activation_post_process.is_dynamic # type: ignore[assignment] + + if dtype in [torch.quint8, torch.qint8, torch.qint32] and \ + (not is_dynamic): + # TODO: probably should cleanup this condition check, it's hard + # to reason about this if and the following elif + + # uint8/int8/int32 static quantization branch + + # 1. extract information for inserting q/dq node from activation_post_process + node_type = "call_function" + quantize_op : Optional[Callable] = None + scale, zero_point = activation_post_process.calculate_qparams() # type: ignore[attr-defined, operator] + if is_per_channel(activation_post_process.qscheme): # type: ignore[attr-defined] + ch_axis = int(activation_post_process.ch_axis) # type: ignore[attr-defined, arg-type] + quantize_op = torch.ops.quantized_decomposed.quantize_per_channel + dequantize_op = torch.ops.quantized_decomposed.dequantize_per_channel + quant_min = activation_post_process.quant_min + quant_max = activation_post_process.quant_max + dtype_ = to_underlying_dtype(dtype) + qparams = { + "_scale_": scale, + "_zero_point_": zero_point, + "_axis_": ch_axis, + "_quant_min_": quant_min, + "_quant_max_": quant_max, + "_dtype_": dtype_ + } + else: + quantize_op = torch.ops.quantized_decomposed.quantize_per_tensor + dequantize_op = torch.ops.quantized_decomposed.dequantize_per_tensor + scale = float(scale) + zero_point = int(zero_point) + quant_min = activation_post_process.quant_min # type: ignore[attr-defined] + quant_max = activation_post_process.quant_max # type: ignore[attr-defined] + dtype_ = to_underlying_dtype(dtype) + qparams = { + "_scale_": scale, + "_zero_point_": zero_point, + "_quant_min_": quant_min, + "_quant_max_": quant_max, + "_dtype_": dtype_ + } + + # 2. replace activation_post_process node with quantize and dequantize + with graph.inserting_before(node): + input_node = node.args[0] + quantize_op_inputs = [input_node] + for key, value_or_node in qparams.items(): + # TODO: we can add the information of whether a value needs to + # be registered as an attribute in qparams dict itself + if key in ['_scale_', '_zero_point_']: + # For scale and zero_point values we register them as buffers in the root module. + # TODO: maybe need more complex attr name here + qparam_node = create_getattr_from_value( + model, graph, module_path + prefix + key, value_or_node) + quantize_op_inputs.append(qparam_node) + else: + # for qparams that are not scale/zero_point (like axis, dtype) we store them as literals in the graph. + quantize_op_inputs.append(value_or_node) + + quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {}) + # use the same qparams from quantize op + dq_inputs = [quantized_node] + quantize_op_inputs[1:] + dequantized_node = graph.call_function( + dequantize_op, + tuple(dq_inputs), + {} + ) + node.replace_all_uses_with(dequantized_node) + graph.erase_node(node) + elif is_dynamic: + + # uint8/int8/fp16 dynamic quantization + + # 1. extract information for inserting q/dq node from activation_post_process + node_type = "call_function" + quantize_op = torch.ops.quantized_decomposed.quantize_per_tensor.tensor + # we only use choose_qparams for is_decomposed now, + # but we should probably align the non-decomposed path with this as well, + # and that can be done after we remove reduce_range flag + # 1. extract qparams from activation_post_process module + dtype_ = to_underlying_dtype(dtype) + assert dtype_ in [torch.uint8, torch.int8], \ + "only uint8 and int8 are supported in reference flow for " \ + "dynamic quantization right now" + quant_min = activation_post_process.quant_min # type: ignore[attr-defined] + quant_max = activation_post_process.quant_max # type: ignore[attr-defined] + # note: scale and zero_point are missing for quantize_per_tensor op + # we'll need to get this from choose_qparams op, which we'll add after + # this step + qparams = { + "_quant_min_": quant_min, + "_quant_max_": quant_max, + "_dtype_": dtype_ + } + + # 2. insert choose_qparams op and update the qparams list + with graph.inserting_before(node): + input_node = node.args[0] + choose_qparams_op_inputs = [node.args[0]] + for key, value in qparams.items(): + # we have quant_min, quant_max and dtype, all should be stored + # as literals + choose_qparams_op_inputs.append(value) + choose_qparams_node = graph.create_node( + "call_function", + torch.ops.quantized_decomposed.choose_qparams.tensor, + tuple(choose_qparams_op_inputs), + {} + ) + # choose_qparms returns (scale, zero_point) + scale_node = graph.create_node( + "call_function", + operator.getitem, + (choose_qparams_node, 0), + {} + ) + zero_point_node = graph.create_node( + "call_function", + operator.getitem, + (choose_qparams_node, 1), + {} + ) + quant_min = qparams["_quant_min_"] + quant_max = qparams["_quant_max_"] + dtype = qparams["_dtype_"] + qparams = { + "_scale_": scale_node, + "_zero_point_": zero_point_node, + "_quant_min_": quant_min, + "_quant_max_": quant_max, + "_dtype_": dtype + } + + # 3. replace activation_post_process node to quantize and dequantize node + with graph.inserting_before(node): + input_node = node.args[0] + quantize_op_inputs = [input_node] + for key, value_or_node in qparams.items(): + # TODO: we can add the information of whether a value needs to + # be registered as an attribute in qparams dict itself + if key in ['_scale_', '_zero_point_']: + # in this case we have a node in the graph since it's dynamically + # computed from the input, with choose_qparams op + qparam_node = value_or_node + quantize_op_inputs.append(qparam_node) + else: + # for qparams that are not scale/zero_point (like axis, dtype) we + # store them as literals in the graph. + quantize_op_inputs.append(value_or_node) + + quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {}) + # use the same qparams from quantize op + dq_inputs = [quantized_node] + quantize_op_inputs[1:] + # need to use the tensor variant of this op, since scale and zero_point + # from choose_qparam are Tensors, instead of float/int, this is to + # prevent these nodes being traced away by downstream systems + dequantize_op = torch.ops.quantized_decomposed.dequantize_per_tensor.tensor + dequantized_node = graph.call_function( + dequantize_op, + tuple(dq_inputs), + {} + ) + node.replace_all_uses_with(dequantized_node) + graph.erase_node(node) + elif dtype == torch.float16: + raise NotImplementedError("decomposed to float16 op not implemented yet") + + # should not reach since we have checks in the begining to make sure the + # activation_post_process is supported + +def _replace_observer_with_quantize_dequantize_node( + model: torch.nn.Module, + graph: Graph, + node: Node, + modules: Dict[str, torch.nn.Module], + node_name_to_scope: Dict[str, Tuple[str, type]], + node_name_to_qconfig: Dict[str, QConfigAny]) -> None: + """ Replace activation_post_process module call node with quantize and + dequantize node + + Before: + ... -> observer_0(x) -> ... + After: + ... -> torch.quantize_per_tensor(x, ...) -> x.dequantize() -> ... + """ + assert modules is not None + assert isinstance(node.target, str) + module_path, prefix = _get_module_path_and_prefix(node, node_name_to_scope, node_name_to_qconfig) + activation_post_process = modules[node.target] + # skip replacing observers to quant/dequant nodes if the qconfigs of all + # consumers and producers of this observer are None + skip_replacement = all([ + _has_none_qconfig(n, node_name_to_qconfig) for n in + list(node.args) + list(node.users.keys())]) + if skip_replacement or not _is_conversion_supported(activation_post_process): + # didn't find correponding quantize op and info for the activation_post_process + # so we just remove the observer + with graph.inserting_before(node): + node.replace_all_uses_with(node.args[0]) + graph.erase_node(node) + return + + # otherwise, we can convert the activation_post_process module call to quantize/dequantize node + dtype = activation_post_process.dtype # type: ignore[attr-defined] + + is_dynamic = False + if hasattr(activation_post_process, "is_dynamic"): + is_dynamic = activation_post_process.is_dynamic # type: ignore[attr-defined, assignment] + + if dtype in [torch.quint8, torch.qint8, torch.qint32] and \ + (not is_dynamic): + # TODO: probably should cleanup this condition check, it's hard + # to reason about this if and the following elif + + # uint8/int8/int32 static quantization branch + + # 1. extract the information from activation_post_process module for generating + # the quantize and dequantize operator + node_type = "call_function" + quantize_op : Optional[Callable] = None + scale, zero_point = activation_post_process.calculate_qparams() # type: ignore[attr-defined, operator] + if is_per_channel(activation_post_process.qscheme): # type: ignore[attr-defined] + ch_axis = int(activation_post_process.ch_axis) # type: ignore[attr-defined, arg-type] + qparams = {"_scale_": scale, "_zero_point_": zero_point, "_axis_": ch_axis, "_dtype_": dtype} + quantize_op = torch.quantize_per_channel + else: + scale = float(scale) + zero_point = int(zero_point) + qparams = {"_scale_": scale, "_zero_point_": zero_point, "_dtype_": dtype} + quantize_op = torch.quantize_per_tensor + + # 2. replace activation_post_process node with quantize and dequantize + with graph.inserting_before(node): + input_node = node.args[0] + quantize_op_inputs = [input_node] + for key, value_or_node in qparams.items(): + # TODO: we can add the information of whether a value needs to + # be registered as an attribute in qparams dict itself + if key in ['_scale_', '_zero_point_']: + # For scale and zero_point values we register them as buffers in the root module. + # TODO: maybe need more complex attr name here + qparam_node = create_getattr_from_value( + model, graph, module_path + prefix + key, value_or_node) + quantize_op_inputs.append(qparam_node) + else: + # for qparams that are not scale/zero_point (like axis, dtype) we store them as literals in the graph. + quantize_op_inputs.append(value_or_node) + + quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {}) + dequantized_node = graph.call_method("dequantize", args=(quantized_node,)) + node.replace_all_uses_with(dequantized_node) + graph.erase_node(node) + elif is_dynamic: + + # uint8/int8/fp16 dynamic quantization branch + + node_type = "call_function" + quantize_op = torch.quantize_per_tensor_dynamic + # TODO: get reduce range from observer + # reduce_range = activation_post_process.reduce_range + reduce_range = torch.backends.quantized.engine in ("fbgemm", "x86") + qparams = {"_dtype_": dtype, "_reduce_range_": reduce_range} + + with graph.inserting_before(node): + input_node = node.args[0] + quantize_op_inputs = [input_node] + for key, value in qparams.items(): + quantize_op_inputs.append(value) + + quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {}) + dequantized_node = graph.call_method("dequantize", args=(quantized_node,)) + node.replace_all_uses_with(dequantized_node) + graph.erase_node(node) + elif dtype == torch.float16: + node_type = "call_method" + quantize_op = "to" # type: ignore[assignment] + qparams = {"_dtype_": dtype} + with graph.inserting_before(node): + input_node = node.args[0] + quantize_op_inputs = [input_node] + for key, value in qparams.items(): + # TODO: we can add the information of whether a value needs to + # be registered as an attribute in qparams dict itself + quantize_op_inputs.append(value) + + quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {}) + dequantized_node = graph.call_method("dequantize", args=(quantized_node,)) + node.replace_all_uses_with(dequantized_node) + graph.erase_node(node) + + # should not reach since we have checks in the begining to make sure the + # activation_post_process is supported + +# this is a temporary hack for custom module, we may want to implement +# this properly after the custom module class design is finalized +# TODO: DeQuantStubs are currently inserted only after custom module LSTM, while observers are inserted +# after all other custom modules. In the future, we should simply insert QuantStubs before and DeQuantStubs +# after custom modules in general, and replace these with "quantize" and "dequantize" nodes respectively. +def _replace_observer_or_dequant_stub_with_dequantize_node(node: Node, graph: Graph): + call_custom_module_node = node.args[0] + assert isinstance(call_custom_module_node, Node), \ + f"Expecting the for call custom module node to be a Node, but got {call_custom_module_node}" + node.replace_all_uses_with(call_custom_module_node) + graph.erase_node(node) + _insert_dequantize_node(call_custom_module_node, graph) + +def _is_conversion_supported(activation_post_process: torch.nn.Module) -> bool: + dtype = activation_post_process.dtype # type: ignore[attr-defined] + + is_dynamic = False + if hasattr(activation_post_process, "is_dynamic"): + is_dynamic = activation_post_process.is_dynamic # type: ignore[attr-defined, assignment] + + return ( + (dtype in [torch.quint8, torch.qint8, torch.qint32] and (not is_dynamic)) or # type: ignore[return-value] + is_dynamic or + dtype == torch.float16 + ) + +def _has_none_qconfig(node: Argument, node_name_to_qconfig: Dict[str, QConfigAny]) -> bool: + """ Check if a node has a qconfig of None, i.e. user requested to not quantize + the node + """ + return isinstance(node, Node) and node.name in node_name_to_qconfig and node_name_to_qconfig[node.name] is None + +def _run_weight_observers(observed: GraphModule, backend_config: BackendConfig) -> None: + """ Extract the subgraph that produces the weight for dynamic quant + or weight only quant node and run the subgraph to observe the weight. + Note that the observers of dynamic quant or weight only quant ops are + run during the convert step. + """ + for node in observed.graph.nodes: + if node.op != "call_function": + continue + for node_arg in node.args: + # node_arg is weight + if node_arg and node_arg_is_weight(node, node_arg, backend_config): + weight_observer_nodes = collect_producer_nodes(node_arg) + if weight_observer_nodes is None: + continue + weight_observer_module = \ + graph_module_from_producer_nodes( + observed, weight_observer_nodes) + # run the weight observer + weight_observer_module() + +def _maybe_recursive_remove_dequantize(arg: Any, node: Node, graph: Graph): + """ If the arg is a dequantize Node, or a list/tuple/dict of dequantize Node, + we'll recursively remove the dequantize Node + """ + if isinstance(arg, Node) and \ + arg.op == "call_method" and \ + arg.target == "dequantize": + quantize_node = arg.args[0] + # we only replace the specific use since dequantize could be used by other nodes + # as well + node.replace_input_with(arg, quantize_node) + elif isinstance(arg, (list, tuple)): + for arg_element in arg: + _maybe_recursive_remove_dequantize(arg_element, node, graph) + elif isinstance(arg, dict): + for arg_element in arg.values(): + _maybe_recursive_remove_dequantize(arg_element, node, graph) + else: + warnings.warn(f"Unsupported node type in recursive remove dequantize: {type(arg)}") + +def _get_module_path_and_prefix( + obs_node: Node, + node_name_to_scope: Dict[str, Tuple[str, type]], + node_name_to_qconfig: Dict[str, QConfigAny]): + """ Given and observer node, get the `Scope` or the fully qualified name for + the submodule containing the observed node, also return a prefix of "_input" + when the observed node is an input of a F.linear op, and not the output of another + quantized op. + TODO: this logic is hacky, we should think about how to remove it or make it more + general + """ + observed_node = obs_node.args[0] + # an observer can be inserted for both input of the next operator or output of the previous + # operator (they can be the same) + # this flag identifies if the observer is inserted only because the observed node is + # the input of the next operator + assert isinstance(observed_node, Node), \ + f"Expecting observed node to be a Node, but got {observed_node}" + is_input_observer_only = node_name_to_qconfig[observed_node.name] is None \ + if observed_node.name in node_name_to_qconfig else None + if is_input_observer_only: + # if the quantize function is at the input of op, then we find the first user of the observer_node + # to get the path. If a linear call_function is in the user list, we return the first instance + # of linear node to get the FQN. + users = list(obs_node.users) + first_linear_use_or_first_use = users[0] if users else None + linear_node = None + for n in users: + if n.op == "call_function" and n.target == torch.nn.functional.linear: + linear_node = n + break + if linear_node: + first_linear_use_or_first_use = linear_node + prefix = "_input" + else: + # if the quantize function is at the output of the op, we use the observer input node to get the path + first_linear_use_or_first_use = observed_node + prefix = "" + + if first_linear_use_or_first_use and first_linear_use_or_first_use.name in node_name_to_scope: + module_path, _ = node_name_to_scope[first_linear_use_or_first_use.name] + else: + # TODO: it's not used, so actually we can skip quantization + # but this requires changing return type of quantize_node + # we can fix it later if needed + module_path = "" + return module_path, prefix + +def _insert_dequantize_node( + node: Node, + graph: Graph): + """ Inserts dequantize node for `node` in `graph` + """ + with graph.inserting_after(node): + dequantize_node = graph.call_method("dequantize", (node,)) + for user_node in dict(node.users): + if user_node is not dequantize_node: + user_node.replace_input_with(node, dequantize_node) + +def _maybe_get_observer_for_node( + node: Node, + modules: Dict[str, torch.nn.Module] +) -> Optional[torch.nn.Module]: + """ + If the node is observed, return the observer + instance. Otherwise, return None. + """ + for maybe_obs_node, _ in node.users.items(): + if maybe_obs_node.op == 'call_module': + maybe_obs = modules[str(maybe_obs_node.target)] + if _is_activation_post_process(maybe_obs): + return maybe_obs + return None + +def convert_standalone_module( + node: Node, + modules: Dict[str, torch.nn.Module], + model: torch.fx.GraphModule, + is_reference: bool, + backend_config: Optional[BackendConfig]): + """ Converts a observed standalone module to a quantized standalone module by calling + the fx convert api, currently using the same `is_reference` flag as parent, but we may + changing this behavior in the future (e.g. separating quantization and lowering for + standalone module as well) + + Args: + - node: The call_module node of the observed standalone module + - modules: named_module of original model + - model: original model + - is_reference: a flag from parent provided by user to decide if we want to + produce a reference model or a fbgemm/qnnpack model + - backend_config: backend configuration of the target backend of quantization + """ + # TODO: remove is_reference flag + if is_reference: + convert_fn = torch.ao.quantization.quantize_fx.convert_to_reference_fx + else: + convert_fn = torch.ao.quantization.quantize_fx.convert_fx # type: ignore[attr-defined] + # We know that observed standalone module is a GraphModule since + # it's produced by us + observed_standalone_module : GraphModule = modules[str(node.target)] # type: ignore[assignment] + sm_input_quantized_idxs = \ + observed_standalone_module \ + .meta["_observed_graph_module_attrs"].standalone_module_input_quantized_idxs + # remove the dequantize nodes for inputs + args = list(node.args) + for idx in range(len(args)): + if idx in sm_input_quantized_idxs: + arg = args[idx] + if arg.op == "call_method" and arg.target == "dequantize": # type: ignore[union-attr] + quantize_node = arg.args[0] # type: ignore[union-attr] + node.replace_input_with(arg, quantize_node) + if len(arg.users) == 0: # type: ignore[union-attr] + model.graph.erase_node(arg) + # add dequantize node for output + sm_output_quantized_idxs = \ + observed_standalone_module \ + .meta["_observed_graph_module_attrs"].standalone_module_output_quantized_idxs + if len(sm_output_quantized_idxs) > 0: + assert sm_output_quantized_idxs[0] == 0, "Currently only quantized" + "output idxs = [0] is supported" + + # if it's non-empty, then it means the output is kept in quantized form + # we'll just add a dequantize node after this node + _insert_dequantize_node(node, model.graph) + + # TODO: allow convert_custom_config to override backend_config + # for standalone module + quantized_standalone_module = convert_fn( + observed_standalone_module, + backend_config=backend_config) + parent_name, name = _parent_name(node.target) + # update the modules dict + setattr(modules[parent_name], name, quantized_standalone_module) + modules[str(node.target)] = quantized_standalone_module + +def convert_weighted_module( + node: Node, + modules: Dict[str, torch.nn.Module], + observed_node_names: Set[str], + node_name_to_qconfig: Dict[str, QConfigAny], + backend_config: BackendConfig, + is_decomposed: bool = False): + """ Convert a weighted module to reference quantized module in the model + If the QConfig of a QAT module is not set, the module will still be converted to + a float module. + + Args: + - node: The call_module node of the observed standalone module + - modules: named_module of original model + - observed_node_names: names for the set of observed fx node, we can skip + this conversion if the node is not observed + """ + original_module = modules[str(node.target)] + qconfig: QConfigAny = original_module.qconfig # type: ignore[assignment] + weight_post_process = None + qat_module_classes = get_qat_module_classes(backend_config) + + if isinstance( + original_module, + qat_module_classes): + # Converting qat module to a float module, we need to attch + # weight fake_quant to the module, weight fake_quant is assumed to be run during + # QAT so we don't need to run it again here + weight_post_process = original_module.weight_fake_quant + original_module = original_module.to_float() # type: ignore[operator] + # change qat module to float module + parent_name, name = _parent_name(node.target) + setattr(modules[parent_name], name, original_module) + + is_observed = node.name in observed_node_names + # If a qconfig is not defined for this node, then skip converting to a reference module + if qconfig is None or _has_none_qconfig(node, node_name_to_qconfig) or not is_observed: + return + + # skip converting to reference quantized module if the qconfig is not supported + pattern_to_dtype_configs = get_pattern_to_dtype_configs(backend_config) + dtype_configs = pattern_to_dtype_configs.get(type(original_module), []) + if not _is_qconfig_supported_by_dtype_configs(qconfig, dtype_configs): + return + + # TODO: rename weight_is_statically_quantized to weight_is_int8_quantized + is_weight_quantized = weight_is_quantized(qconfig) + + # the condition for swapping the module to reference quantized module is: + # weights need to be quantized + if not is_weight_quantized: + return + + fused_module = None + float_module = original_module + # extract the inidividual float_module and fused module + if isinstance(original_module, torch.ao.nn.intrinsic._FusedModule): + fused_module = float_module + float_module = fused_module[0] # type: ignore[index] + + # TODO: move this to the reference quantized module + # weight_qparams or weight_qparams dict + wq_or_wq_dict = {"is_decomposed": is_decomposed} + if isinstance(float_module, torch.nn.RNNCellBase): + weight_post_process_ih = qconfig.weight() # type: ignore[union-attr, operator] + weight_post_process_hh = qconfig.weight() # type: ignore[union-attr, operator] + weight_post_process_ih(float_module.weight_ih) + weight_post_process_hh(float_module.weight_hh) + weight_qparams_ih = get_qparam_dict(weight_post_process_ih) + weight_qparams_hh = get_qparam_dict(weight_post_process_hh) + wq_or_wq_dict.update({ + "weight_ih": weight_qparams_ih, + "weight_hh": weight_qparams_hh, + }) + elif isinstance(float_module, (torch.nn.LSTM, torch.nn.GRU)): + # format for wq_or_wq_dict (flattened attributes): + # {"weight_ih_l0_scale": ..., "weight_ih_l0_qscheme": ..., ...} + for wn in float_module._flat_weights_names: + if hasattr(float_module, wn) and wn.startswith("weight"): + weight = getattr(float_module, wn) + weight_post_process = qconfig.weight() # type: ignore[union-attr, operator] + if weight_post_process.dtype == torch.qint8: # type: ignore[union-attr] + weight_post_process(weight) # type: ignore[operator, misc] + wq_or_wq_dict[wn] = get_qparam_dict(weight_post_process) + else: + # weight_post_process is None means the original module is not a QAT module + # we need to get weight_post_process from qconfig in this case + if weight_post_process is None: + weight_post_process = qconfig.weight() # type: ignore[union-attr, operator] + # run weight observer + # TODO: This is currently a hack for QAT to get the right shapes for scale and zero point. + # In the future, we should require the user to calibrate the model after calling prepare + # Issue: https://github.com/pytorch/pytorch/issues/73941 + weight_post_process(float_module.weight) # type: ignore[operator] + wq_or_wq_dict.update(get_qparam_dict(weight_post_process)) + + # We use the same reference module for all modes of quantization: static, dynamic, weight_only + # root_module_to_quantized_reference_module: module mapping from root (floating point) module class + # to quantized reference module class, e.g. nn.Conv2d to nn.quantized._reference.Conv2d + root_module_to_quantized_reference_module = get_root_module_to_quantized_reference_module(backend_config) + ref_qmodule_cls = root_module_to_quantized_reference_module.get(type_before_parametrizations(float_module), None) + assert ( + ref_qmodule_cls is not None + ), f"No reference quantized module class configured for {type_before_parametrizations(float_module)}" + ref_qmodule = ref_qmodule_cls.from_float(float_module, wq_or_wq_dict) # type: ignore[attr-defined] + if fused_module is not None: + fused_module[0] = ref_qmodule # type: ignore[operator] + else: + parent_name, name = _parent_name(node.target) + setattr(modules[parent_name], name, ref_qmodule) + +def _remove_previous_dequantize_in_custom_module(node: Node, prev_node: Node, graph: Graph): + """ + Given a custom module `node`, if the previous node is a dequantize, reroute the custom as follows: + + Before: quantize - dequantize - custom_module + After: quantize - custom_module + \\ - dequantize + """ + # expecting the input node for a custom module node to be a Node + assert isinstance(prev_node, Node), \ + f"Expecting the argument for custom module node to be a Node, but got {prev_node}" + if prev_node.op == "call_method" and prev_node.target == "dequantize": + node.replace_input_with(prev_node, prev_node.args[0]) + # Remove the dequantize node if it doesn't have other users + if len(prev_node.users) == 0: + graph.erase_node(prev_node) + +def convert_custom_module( + node: Node, + graph: Graph, + modules: Dict[str, torch.nn.Module], + custom_module_class_mapping: Dict[QuantType, Dict[Type, Type]], + statically_quantized_custom_module_nodes: Set[Node]): + """ Converts an observed custom module to a quantized custom module based on + `custom_module_class_mapping` + For static quantization, we'll also remove the previous `dequantize` node and + attach the observer node for output to the module, the observer for the node + will be converted to a dequantize node instead of quantize-dequantize pairs + later in the graph. In the end we would have a quantized custom module that + has the same interface as a default quantized module in nn.quantized namespace, + i.e. quantized input and quantized output. + + Args: + - node: The call_module node of the observed standalone module + - graph: The graph containing the node + - modules: named_module of original model + - custom_module_class_mapping: mapping from observed custom module class to + quantized custom module class, used to swap custom modules + - statically_quantized_custom_module_nodes: we'll add the custom module node + if we find it is statically quantized, this will be used later when converting + observers to quant/dequant node pairs, if the observed node is a statically + quantized custom module nodes, we'll convert the observer to a dequantize node, + this is to keep the interface the same as the default quantized module. + TODO: maybe we want to redesign this part to align with reference model design + as well, but there has been some discussions around the interface, so we can do + it later. + """ + observed_custom_module = modules[str(node.target)] + maybe_obs = _maybe_get_observer_for_node(node, modules) + qconfig = observed_custom_module.qconfig + if activation_is_statically_quantized(qconfig): + statically_quantized_custom_module_nodes.add(node) + if _is_custom_module_lstm(node, modules): + # The inputs are tuples in the form (input, (hidden0, hidden1)) + # Ensure all three input nodes are quantized + assert ( + len(node.args) == 2 and + isinstance(node.args[1], tuple) and + len(node.args[1]) == 2 + ) + (inputs, (hidden0, hidden1)) = node.args # type: ignore[misc] + assert isinstance(inputs, Node) + assert isinstance(hidden0, Node) + assert isinstance(hidden1, Node) + _remove_previous_dequantize_in_custom_module(node, inputs, graph) + _remove_previous_dequantize_in_custom_module(node, hidden0, graph) + _remove_previous_dequantize_in_custom_module(node, hidden1, graph) + else: + # remove the previous dequant node to ensure the inputs are quantized + arg = node.args[0] + assert isinstance(arg, Node) + _remove_previous_dequantize_in_custom_module(node, arg, graph) + # absorb the following observer into the module conversion + activation_post_process = _maybe_get_observer_for_node(node, modules) + assert activation_post_process is not None + observed_custom_module.activation_post_process = activation_post_process + + # swap the observed custom module to quantized custom module + quantized_custom_module_class = get_swapped_custom_module_class( + observed_custom_module, custom_module_class_mapping, qconfig) + quantized_custom_module = \ + quantized_custom_module_class.from_observed(observed_custom_module) + parent_name, name = _parent_name(node.target) + setattr(modules[parent_name], name, quantized_custom_module) + +def convert( + model: GraphModule, is_reference: bool = False, + convert_custom_config: Union[ConvertCustomConfig, Dict[str, Any], None] = None, + is_standalone_module: bool = False, + _remove_qconfig_flag: bool = True, + qconfig_mapping: Union[QConfigMapping, Dict[str, Any], None] = None, + backend_config: Union[BackendConfig, Dict[str, Any], None] = None, + is_decomposed: bool = False) -> torch.nn.Module: + """ + We will convert an observed model (a module with observer calls) to a reference + quantized model, the rule is simple: + 1. for each observer module call in the graph, we'll convert it to calls to + quantize and dequantize functions based on the observer instance + 2. for weighted operations like linear/conv, we need to convert them to reference + quantized module, this requires us to know whether the dtype configured for the + weight is supported in the backend, this is done in prepare step and the result + is stored in observed_node_names, we can decide whether we need to swap the + module based on this set + + Args: + * `is_standalone_module`: when this flag is True, it means we are quantizing + a submodule that is not inlined in parent module, and will be quantized + separately as one unit. + + * `is_decomposed`: a boolean flag to indicate whether we want to use the + quantize operator for decomposed quantized tensor + (torch.ops.quantized_decomposed.quantize_per_tensor) or default/standalone + quantized tensor (torch.quantize_per_tensor) + + Returns: + a quantized standalone module, whether input/output is quantized is + specified by prepare_custom_config, with + input_quantized_idxs, output_quantized_idxs, please + see docs for :func:`~torch.ao.quantization.prepare_fx` for details + """ + if convert_custom_config is None: + convert_custom_config = ConvertCustomConfig() + + if isinstance(convert_custom_config, Dict): + warnings.warn( + "Passing a convert_custom_config_dict to convert is deprecated and will not be supported " + "in a future version. Please pass in a ConvertCustomConfig instead.") + convert_custom_config = ConvertCustomConfig.from_dict(convert_custom_config) + + if isinstance(qconfig_mapping, Dict): + warnings.warn( + "Passing a QConfig dictionary to convert is deprecated and will not be supported " + "in a future version. Please pass in a QConfigMapping instead.") + qconfig_mapping = QConfigMapping.from_dict(qconfig_mapping) if qconfig_mapping else None + qconfig_mapping = copy.deepcopy(qconfig_mapping) + assert(qconfig_mapping is None or isinstance(qconfig_mapping, QConfigMapping)) + + if isinstance(backend_config, Dict): + warnings.warn( + "Passing a backend_config_dict to prepare is deprecated and will not be supported " + "in a future version. Please pass in a BackendConfig instead.") + backend_config = BackendConfig.from_dict(backend_config) + + if backend_config is None: + backend_config = get_native_backend_config() + + assert _is_observed_module(model), \ + 'incoming model must be produced by prepare_fx' + observed_graph_module_attrs = model.meta["_observed_graph_module_attrs"] + node_name_to_scope: Dict[str, Tuple[str, type]] = observed_graph_module_attrs.node_name_to_scope + prepare_custom_config: PrepareCustomConfig = observed_graph_module_attrs.prepare_custom_config + observed_node_names: Set[str] = observed_graph_module_attrs.observed_node_names + node_name_to_qconfig: Dict[str, QConfigAny] = observed_graph_module_attrs.node_name_to_qconfig # type: ignore[assignment] + + # mapping from fully qualified module name to module instance + # for example, + # { + # '': Model(...), + # 'linear': Linear(...), + # 'linear.weight_fake_quant': PerChannelMinMaxObserver(...), + # } + # We use remove_duplicate=False here because torch.cat uses + # the same activation_post_process module instance but different names + modules = dict(model.named_modules(remove_duplicate=False)) + + # TODO refactor this code once we update the prepare logic to have additional information on + # which graph nodes have been observed and share that with convert to decide which observers to ignore. + if qconfig_mapping: + prepare_qconfig_mapping: QConfigMapping = observed_graph_module_attrs.qconfig_mapping # type: ignore[assignment] + modules_copy = copy.deepcopy(modules) + + if observed_graph_module_attrs.is_qat: + _update_qconfig_for_qat(qconfig_mapping, backend_config) + _update_qconfig_for_fusion(model, qconfig_mapping) + + _compare_prepare_convert_qconfig_mappings(prepare_qconfig_mapping, qconfig_mapping) # type: ignore[arg-type] + convert_node_name_to_qconfig = _generate_node_name_to_qconfig( + model, modules_copy, model.graph, qconfig_mapping, node_name_to_scope) + # check the convert_node_name_to_qconfig generated and ensure that + # all the values either match what was set in prepare node_name_to_qconfig + # or are set to None in the convert_node_name_to_qconfig. + for k, v in node_name_to_qconfig.items(): + assert k in convert_node_name_to_qconfig, 'Expected key {} in convert node_name_to_qconfig'.format(k) + if convert_node_name_to_qconfig[k] is not None: + assert qconfig_equals(v, convert_node_name_to_qconfig[k]), \ + "Expected k {} to have the same value in prepare and convert QConfigMappings, " \ + "but {} was updated to {}".format(k, v, convert_node_name_to_qconfig[k]) + node_name_to_qconfig = convert_node_name_to_qconfig + + custom_module_classes = get_custom_module_class_keys(convert_custom_config.observed_to_quantized_mapping) + custom_module_class_mapping = convert_custom_config.observed_to_quantized_mapping + + if observed_graph_module_attrs.equalization_node_name_to_qconfig is not None: + # If we want to do equalization then do the following: + # Calculate the equalization scale, update the observers with the scaled + # inputs, and scale the weight + weight_eq_obs_dict = update_obs_for_equalization(model, modules) + convert_eq_obs(model, modules, weight_eq_obs_dict) + + # always run weight observers in the top level forward method + # for dynamic quant ops or weight only quant ops + _run_weight_observers(model, backend_config) + + graph_inputs: List[str] = [] + for node in model.graph.nodes: + if node.op == 'placeholder': + graph_inputs.append(node.name) + + # additional state to override inputs to be quantized, if specified + # by the user + placeholder_node_seen_cnt = 0 + input_quantized_idxs: List[int] = prepare_custom_config.input_quantized_indexes + output_quantized_idxs: List[int] = prepare_custom_config.output_quantized_indexes + + root_module_to_quantized_reference_module = get_root_module_to_quantized_reference_module(backend_config) + # convert tuples so that it can work with isinstance(module, tuple_of_classes) + root_module_classes = tuple(root_module_to_quantized_reference_module.keys()) + qat_module_classes = get_qat_module_classes(backend_config) + fused_module_classes = get_fused_module_classes(backend_config) + statically_quantized_custom_module_nodes: Set[Node] = set() + + for node in list(model.graph.nodes): + if node.op == 'placeholder': + cur_placeholder_node_idx = placeholder_node_seen_cnt + placeholder_node_seen_cnt += 1 + if cur_placeholder_node_idx in input_quantized_idxs: + # Inputs are assumed to be quantized if the user specifid the + # input_quantized_idxs override. + # we need to dequantize the inputs since all operators took + # floating point inputs in reference quantized models + _insert_dequantize_node(node, model.graph) + elif node.op == "output": + # If the argument is empty we don't need to do anything + if len(output_quantized_idxs) == 0: + continue + # Result are kept quantized if the user specified the + # output_quantized_idxs override. + # Remove the dequantize operator for the node in the end if any + return_node = node + output = node.args[0] + # outputs can be Node, list, tuple, dict, other cases are not supported yet + if isinstance(output, (list, tuple)): + for idx in output_quantized_idxs: + _maybe_recursive_remove_dequantize(output[idx], return_node, model.graph) + elif isinstance(output, (Node, dict)): + # we treat dict as a single argument currently, but it can be extended + # to support {"key": dtype} after we change output_quantized_idxs to + # dict + if 0 in output_quantized_idxs: + _maybe_recursive_remove_dequantize(output, return_node, model.graph) + else: + warnings.warn(f"Unsupported node type for output_quantized_idxs: {type(output)}") + elif node.op == "call_module": + mod = _get_module(node, modules) + assert mod is not None + if _is_activation_post_process(mod): + observed_node = node.args[0] + if observed_node in statically_quantized_custom_module_nodes: + _replace_observer_or_dequant_stub_with_dequantize_node(node, model.graph) + else: + if is_decomposed: + _replace_observer_with_quantize_dequantize_node_decomposed( + model, model.graph, node, modules, node_name_to_scope, + node_name_to_qconfig) + else: + _replace_observer_with_quantize_dequantize_node( + model, model.graph, node, modules, node_name_to_scope, + node_name_to_qconfig) + elif isinstance(mod, DeQuantStub): + _replace_observer_or_dequant_stub_with_dequantize_node(node, model.graph) + elif _is_observed_standalone_module(mod): + convert_standalone_module( + node, modules, model, is_reference, backend_config) + # below this point `type_before_parametrizations` is used + # instead of `type` to handle situations with fx quant + sparsity + elif type_before_parametrizations(mod) in set( + root_module_classes).union(qat_module_classes).union(fused_module_classes): + # extra check for fused module classes to make sure they are fused module classes + # of target modules + if type_before_parametrizations(mod) in fused_module_classes and \ + type_before_parametrizations(mod[0]) not in root_module_classes: # type: ignore[index] + continue + convert_weighted_module( + node, modules, observed_node_names, node_name_to_qconfig, backend_config, is_decomposed) + elif type_before_parametrizations(mod) in custom_module_classes: + convert_custom_module( + node, model.graph, modules, custom_module_class_mapping, + statically_quantized_custom_module_nodes) + + # remove deadcode after converting observers to quant/dequant ops + model.graph.eliminate_dead_code() + model = GraphModule(model, model.graph) + + # TODO: maybe move this to quantize_fx.py + if not is_reference: + model = lower_to_fbgemm(model, node_name_to_qconfig, node_name_to_scope) + + # TODO: this looks hacky, we want to check why we need this and see if we can + # remove this + # removes qconfig and activation_post_process modules + if _remove_qconfig_flag: + _remove_qconfig(model) + model.delete_all_unused_submodules() + model.meta.pop("_observed_graph_module_attrs", None) + return model diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/custom_config.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/custom_config.py new file mode 100644 index 0000000000000000000000000000000000000000..ef29061796d3a3a8c3ec12acfd7b2b09964c07b8 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/custom_config.py @@ -0,0 +1,422 @@ +from __future__ import annotations +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Type + +from torch.ao.quantization import QConfigMapping +from torch.ao.quantization.backend_config import BackendConfig +from torch.ao.quantization.quant_type import QuantType, _quant_type_from_str, _get_quant_type_to_str + + +__all__ = [ + "ConvertCustomConfig", + "FuseCustomConfig", + "PrepareCustomConfig", + "StandaloneModuleConfigEntry", +] + + +# TODO: replace all usages with these constants +STANDALONE_MODULE_NAME_DICT_KEY = "standalone_module_name" +STANDALONE_MODULE_CLASS_DICT_KEY = "standalone_module_class" +FLOAT_TO_OBSERVED_DICT_KEY = "float_to_observed_custom_module_class" +OBSERVED_TO_QUANTIZED_DICT_KEY = "observed_to_quantized_custom_module_class" +NON_TRACEABLE_MODULE_NAME_DICT_KEY = "non_traceable_module_name" +NON_TRACEABLE_MODULE_CLASS_DICT_KEY = "non_traceable_module_class" +INPUT_QUANTIZED_INDEXES_DICT_KEY = "input_quantized_idxs" +OUTPUT_QUANTIZED_INDEXES_DICT_KEY = "output_quantized_idxs" +PRESERVED_ATTRIBUTES_DICT_KEY = "preserved_attributes" + + +@dataclass +class StandaloneModuleConfigEntry: + # qconfig_mapping for the prepare function called in the submodule, + # None means use qconfig from parent qconfig_mapping + qconfig_mapping: Optional[QConfigMapping] + example_inputs: Tuple[Any, ...] + prepare_custom_config: Optional[PrepareCustomConfig] + backend_config: Optional[BackendConfig] + + +class PrepareCustomConfig: + """ + Custom configuration for :func:`~torch.ao.quantization.quantize_fx.prepare_fx` and + :func:`~torch.ao.quantization.quantize_fx.prepare_qat_fx`. + + Example usage:: + + prepare_custom_config = PrepareCustomConfig() \ + .set_standalone_module_name("module1", qconfig_mapping, example_inputs, \ + child_prepare_custom_config, backend_config) \ + .set_standalone_module_class(MyStandaloneModule, qconfig_mapping, example_inputs, \ + child_prepare_custom_config, backend_config) \ + .set_float_to_observed_mapping(FloatCustomModule, ObservedCustomModule) \ + .set_non_traceable_module_names(["module2", "module3"]) \ + .set_non_traceable_module_classes([NonTraceableModule1, NonTraceableModule2]) \ + .set_input_quantized_indexes([0]) \ + .set_output_quantized_indexes([0]) \ + .set_preserved_attributes(["attr1", "attr2"]) + """ + def __init__(self): + self.standalone_module_names: Dict[str, StandaloneModuleConfigEntry] = {} + self.standalone_module_classes: Dict[Type, StandaloneModuleConfigEntry] = {} + self.float_to_observed_mapping: Dict[QuantType, Dict[Type, Type]] = {} + self.non_traceable_module_names: List[str] = [] + self.non_traceable_module_classes: List[Type] = [] + self.input_quantized_indexes: List[int] = [] + self.output_quantized_indexes: List[int] = [] + self.preserved_attributes: List[str] = [] + + def __repr__(self): + dict_nonempty = { + k: v for k, v in self.__dict__.items() + if len(v) > 0 + } + return f"PrepareCustomConfig({dict_nonempty})" + + def set_standalone_module_name( + self, + module_name: str, + qconfig_mapping: Optional[QConfigMapping], + example_inputs: Tuple[Any, ...], + prepare_custom_config: Optional[PrepareCustomConfig], + backend_config: Optional[BackendConfig]) -> PrepareCustomConfig: + """ + Set the configuration for running a standalone module identified by ``module_name``. + + If ``qconfig_mapping`` is None, the parent ``qconfig_mapping`` will be used instead. + If ``prepare_custom_config`` is None, an empty ``PrepareCustomConfig`` will be used. + If ``backend_config`` is None, the parent ``backend_config`` will be used instead. + """ + self.standalone_module_names[module_name] = \ + StandaloneModuleConfigEntry(qconfig_mapping, example_inputs, prepare_custom_config, backend_config) + return self + + def set_standalone_module_class( + self, + module_class: Type, + qconfig_mapping: Optional[QConfigMapping], + example_inputs: Tuple[Any, ...], + prepare_custom_config: Optional[PrepareCustomConfig], + backend_config: Optional[BackendConfig]) -> PrepareCustomConfig: + """ + Set the configuration for running a standalone module identified by ``module_class``. + + If ``qconfig_mapping`` is None, the parent ``qconfig_mapping`` will be used instead. + If ``prepare_custom_config`` is None, an empty ``PrepareCustomConfig`` will be used. + If ``backend_config`` is None, the parent ``backend_config`` will be used instead. + """ + self.standalone_module_classes[module_class] = \ + StandaloneModuleConfigEntry(qconfig_mapping, example_inputs, prepare_custom_config, backend_config) + return self + + def set_float_to_observed_mapping( + self, + float_class: Type, + observed_class: Type, + quant_type: QuantType = QuantType.STATIC) -> PrepareCustomConfig: + """ + Set the mapping from a custom float module class to a custom observed module class. + + The observed module class must have a ``from_float`` class method that converts the float module class + to the observed module class. This is currently only supported for static quantization. + """ + if quant_type != QuantType.STATIC: + raise ValueError("set_float_to_observed_mapping is currently only supported for static quantization") + if quant_type not in self.float_to_observed_mapping: + self.float_to_observed_mapping[quant_type] = {} + self.float_to_observed_mapping[quant_type][float_class] = observed_class + return self + + def set_non_traceable_module_names(self, module_names: List[str]) -> PrepareCustomConfig: + """ + Set the modules that are not symbolically traceable, identified by name. + """ + self.non_traceable_module_names = module_names + return self + + def set_non_traceable_module_classes(self, module_classes: List[Type]) -> PrepareCustomConfig: + """ + Set the modules that are not symbolically traceable, identified by class. + """ + self.non_traceable_module_classes = module_classes + return self + + def set_input_quantized_indexes(self, indexes: List[int]) -> PrepareCustomConfig: + """ + Set the indexes of the inputs of the graph that should be quantized. + Inputs are otherwise assumed to be in fp32 by default instead. + """ + self.input_quantized_indexes = indexes + return self + + def set_output_quantized_indexes(self, indexes: List[int]) -> PrepareCustomConfig: + """ + Set the indexes of the outputs of the graph that should be quantized. + Outputs are otherwise assumed to be in fp32 by default instead. + """ + self.output_quantized_indexes = indexes + return self + + def set_preserved_attributes(self, attributes: List[str]) -> PrepareCustomConfig: + """ + Set the names of the attributes that will persist in the graph module even if they are not used in + the model's ``forward`` method. + """ + self.preserved_attributes = attributes + return self + + # TODO: remove this + @classmethod + def from_dict(cls, prepare_custom_config_dict: Dict[str, Any]) -> PrepareCustomConfig: + """ + Create a ``PrepareCustomConfig`` from a dictionary with the following items: + + "standalone_module_name": a list of (module_name, qconfig_mapping, example_inputs, + child_prepare_custom_config, backend_config) tuples + + "standalone_module_class" a list of (module_class, qconfig_mapping, example_inputs, + child_prepare_custom_config, backend_config) tuples + + "float_to_observed_custom_module_class": a nested dictionary mapping from quantization + mode to an inner mapping from float module classes to observed module classes, e.g. + {"static": {FloatCustomModule: ObservedCustomModule}} + + "non_traceable_module_name": a list of modules names that are not symbolically traceable + "non_traceable_module_class": a list of module classes that are not symbolically traceable + "input_quantized_idxs": a list of indexes of graph inputs that should be quantized + "output_quantized_idxs": a list of indexes of graph outputs that should be quantized + "preserved_attributes": a list of attributes that persist even if they are not used in ``forward`` + + This function is primarily for backward compatibility and may be removed in the future. + """ + def _get_qconfig_mapping(obj: Any, dict_key: str) -> Optional[QConfigMapping]: + """ + Convert the given object into a QConfigMapping if possible, else throw an exception. + """ + if isinstance(obj, QConfigMapping) or obj is None: + return obj + if isinstance(obj, Dict): + return QConfigMapping.from_dict(obj) + raise ValueError("Expected QConfigMapping in prepare_custom_config_dict[\"%s\"], got '%s'" % + (dict_key, type(obj))) + + def _get_prepare_custom_config(obj: Any, dict_key: str) -> Optional[PrepareCustomConfig]: + """ + Convert the given object into a PrepareCustomConfig if possible, else throw an exception. + """ + if isinstance(obj, PrepareCustomConfig) or obj is None: + return obj + if isinstance(obj, Dict): + return PrepareCustomConfig.from_dict(obj) + raise ValueError("Expected PrepareCustomConfig in prepare_custom_config_dict[\"%s\"], got '%s'" % + (dict_key, type(obj))) + + def _get_backend_config(obj: Any, dict_key: str) -> Optional[BackendConfig]: + """ + Convert the given object into a BackendConfig if possible, else throw an exception. + """ + if isinstance(obj, BackendConfig) or obj is None: + return obj + if isinstance(obj, Dict): + return BackendConfig.from_dict(obj) + raise ValueError("Expected BackendConfig in prepare_custom_config_dict[\"%s\"], got '%s'" % + (dict_key, type(obj))) + + conf = cls() + for (module_name, qconfig_dict, example_inputs, _prepare_custom_config_dict, backend_config_dict) in\ + prepare_custom_config_dict.get(STANDALONE_MODULE_NAME_DICT_KEY, []): + qconfig_mapping = _get_qconfig_mapping(qconfig_dict, STANDALONE_MODULE_NAME_DICT_KEY) + prepare_custom_config = _get_prepare_custom_config(_prepare_custom_config_dict, STANDALONE_MODULE_NAME_DICT_KEY) + backend_config = _get_backend_config(backend_config_dict, STANDALONE_MODULE_NAME_DICT_KEY) + conf.set_standalone_module_name( + module_name, qconfig_mapping, example_inputs, prepare_custom_config, backend_config) + for (module_class, qconfig_dict, example_inputs, _prepare_custom_config_dict, backend_config_dict) in\ + prepare_custom_config_dict.get(STANDALONE_MODULE_CLASS_DICT_KEY, []): + qconfig_mapping = _get_qconfig_mapping(qconfig_dict, STANDALONE_MODULE_CLASS_DICT_KEY) + prepare_custom_config = _get_prepare_custom_config(_prepare_custom_config_dict, STANDALONE_MODULE_CLASS_DICT_KEY) + backend_config = _get_backend_config(backend_config_dict, STANDALONE_MODULE_CLASS_DICT_KEY) + conf.set_standalone_module_class( + module_class, qconfig_mapping, example_inputs, prepare_custom_config, backend_config) + for quant_type_name, custom_module_mapping in prepare_custom_config_dict.get(FLOAT_TO_OBSERVED_DICT_KEY, {}).items(): + quant_type = _quant_type_from_str(quant_type_name) + for float_class, observed_class in custom_module_mapping.items(): + conf.set_float_to_observed_mapping(float_class, observed_class, quant_type) + conf.set_non_traceable_module_names(prepare_custom_config_dict.get(NON_TRACEABLE_MODULE_NAME_DICT_KEY, [])) + conf.set_non_traceable_module_classes(prepare_custom_config_dict.get(NON_TRACEABLE_MODULE_CLASS_DICT_KEY, [])) + conf.set_input_quantized_indexes(prepare_custom_config_dict.get(INPUT_QUANTIZED_INDEXES_DICT_KEY, [])) + conf.set_output_quantized_indexes(prepare_custom_config_dict.get(OUTPUT_QUANTIZED_INDEXES_DICT_KEY, [])) + conf.set_preserved_attributes(prepare_custom_config_dict.get(PRESERVED_ATTRIBUTES_DICT_KEY, [])) + return conf + + def to_dict(self) -> Dict[str, Any]: + """ + Convert this ``PrepareCustomConfig`` to a dictionary with the items described in + :func:`~torch.ao.quantization.fx.custom_config.PrepareCustomConfig.from_dict`. + """ + def _make_tuple(key: Any, e: StandaloneModuleConfigEntry): + qconfig_dict = e.qconfig_mapping.to_dict() if e.qconfig_mapping else None + prepare_custom_config_dict = e.prepare_custom_config.to_dict() if e.prepare_custom_config else None + return (key, qconfig_dict, e.example_inputs, prepare_custom_config_dict, e.backend_config) + + d: Dict[str, Any] = {} + for module_name, sm_config_entry in self.standalone_module_names.items(): + if STANDALONE_MODULE_NAME_DICT_KEY not in d: + d[STANDALONE_MODULE_NAME_DICT_KEY] = [] + d[STANDALONE_MODULE_NAME_DICT_KEY].append(_make_tuple(module_name, sm_config_entry)) + for module_class, sm_config_entry in self.standalone_module_classes.items(): + if STANDALONE_MODULE_CLASS_DICT_KEY not in d: + d[STANDALONE_MODULE_CLASS_DICT_KEY] = [] + d[STANDALONE_MODULE_CLASS_DICT_KEY].append(_make_tuple(module_class, sm_config_entry)) + for quant_type, float_to_observed_mapping in self.float_to_observed_mapping.items(): + if FLOAT_TO_OBSERVED_DICT_KEY not in d: + d[FLOAT_TO_OBSERVED_DICT_KEY] = {} + d[FLOAT_TO_OBSERVED_DICT_KEY][_get_quant_type_to_str(quant_type)] = float_to_observed_mapping + if len(self.non_traceable_module_names) > 0: + d[NON_TRACEABLE_MODULE_NAME_DICT_KEY] = self.non_traceable_module_names + if len(self.non_traceable_module_classes) > 0: + d[NON_TRACEABLE_MODULE_CLASS_DICT_KEY] = self.non_traceable_module_classes + if len(self.input_quantized_indexes) > 0: + d[INPUT_QUANTIZED_INDEXES_DICT_KEY] = self.input_quantized_indexes + if len(self.output_quantized_indexes) > 0: + d[OUTPUT_QUANTIZED_INDEXES_DICT_KEY] = self.output_quantized_indexes + if len(self.preserved_attributes) > 0: + d[PRESERVED_ATTRIBUTES_DICT_KEY] = self.preserved_attributes + return d + + +class ConvertCustomConfig: + """ + Custom configuration for :func:`~torch.ao.quantization.quantize_fx.convert_fx`. + + Example usage:: + + convert_custom_config = ConvertCustomConfig() \ + .set_observed_to_quantized_mapping(ObservedCustomModule, QuantizedCustomModule) \ + .set_preserved_attributes(["attr1", "attr2"]) + """ + + def __init__(self): + self.observed_to_quantized_mapping: Dict[QuantType, Dict[Type, Type]] = {} + self.preserved_attributes: List[str] = [] + + def __repr__(self): + dict_nonempty = { + k: v for k, v in self.__dict__.items() + if len(v) > 0 + } + return f"ConvertCustomConfig({dict_nonempty})" + + def set_observed_to_quantized_mapping( + self, + observed_class: Type, + quantized_class: Type, + quant_type: QuantType = QuantType.STATIC) -> ConvertCustomConfig: + """ + Set the mapping from a custom observed module class to a custom quantized module class. + + The quantized module class must have a ``from_observed`` class method that converts the observed module class + to the quantized module class. + """ + if quant_type not in self.observed_to_quantized_mapping: + self.observed_to_quantized_mapping[quant_type] = {} + self.observed_to_quantized_mapping[quant_type][observed_class] = quantized_class + return self + + def set_preserved_attributes(self, attributes: List[str]) -> ConvertCustomConfig: + """ + Set the names of the attributes that will persist in the graph module even if they are not used in + the model's ``forward`` method. + """ + self.preserved_attributes = attributes + return self + + # TODO: remove this + @classmethod + def from_dict(cls, convert_custom_config_dict: Dict[str, Any]) -> ConvertCustomConfig: + """ + Create a ``ConvertCustomConfig`` from a dictionary with the following items: + + "observed_to_quantized_custom_module_class": a nested dictionary mapping from quantization + mode to an inner mapping from observed module classes to quantized module classes, e.g.:: + { + "static": {FloatCustomModule: ObservedCustomModule}, + "dynamic": {FloatCustomModule: ObservedCustomModule}, + "weight_only": {FloatCustomModule: ObservedCustomModule} + } + "preserved_attributes": a list of attributes that persist even if they are not used in ``forward`` + + This function is primarily for backward compatibility and may be removed in the future. + """ + conf = cls() + for quant_type_name, custom_module_mapping in convert_custom_config_dict.get(OBSERVED_TO_QUANTIZED_DICT_KEY, {}).items(): + quant_type = _quant_type_from_str(quant_type_name) + for observed_class, quantized_class in custom_module_mapping.items(): + conf.set_observed_to_quantized_mapping(observed_class, quantized_class, quant_type) + conf.set_preserved_attributes(convert_custom_config_dict.get(PRESERVED_ATTRIBUTES_DICT_KEY, [])) + return conf + + def to_dict(self) -> Dict[str, Any]: + """ + Convert this ``ConvertCustomConfig`` to a dictionary with the items described in + :func:`~torch.ao.quantization.fx.custom_config.ConvertCustomConfig.from_dict`. + """ + d: Dict[str, Any] = {} + for quant_type, observed_to_quantized_mapping in self.observed_to_quantized_mapping.items(): + if OBSERVED_TO_QUANTIZED_DICT_KEY not in d: + d[OBSERVED_TO_QUANTIZED_DICT_KEY] = {} + d[OBSERVED_TO_QUANTIZED_DICT_KEY][_get_quant_type_to_str(quant_type)] = observed_to_quantized_mapping + if len(self.preserved_attributes) > 0: + d[PRESERVED_ATTRIBUTES_DICT_KEY] = self.preserved_attributes + return d + + +class FuseCustomConfig: + """ + Custom configuration for :func:`~torch.ao.quantization.quantize_fx.fuse_fx`. + + Example usage:: + + fuse_custom_config = FuseCustomConfig().set_preserved_attributes(["attr1", "attr2"]) + """ + + def __init__(self): + self.preserved_attributes: List[str] = [] + + def __repr__(self): + dict_nonempty = { + k: v for k, v in self.__dict__.items() + if len(v) > 0 + } + return f"FuseCustomConfig({dict_nonempty})" + + def set_preserved_attributes(self, attributes: List[str]) -> FuseCustomConfig: + """ + Set the names of the attributes that will persist in the graph module even if they are not used in + the model's ``forward`` method. + """ + self.preserved_attributes = attributes + return self + + # TODO: remove this + @classmethod + def from_dict(cls, fuse_custom_config_dict: Dict[str, Any]) -> FuseCustomConfig: + """ + Create a ``ConvertCustomConfig`` from a dictionary with the following items: + + "preserved_attributes": a list of attributes that persist even if they are not used in ``forward`` + + This function is primarily for backward compatibility and may be removed in the future. + """ + conf = cls() + conf.set_preserved_attributes(fuse_custom_config_dict.get(PRESERVED_ATTRIBUTES_DICT_KEY, [])) + return conf + + def to_dict(self) -> Dict[str, Any]: + """ + Convert this ``FuseCustomConfig`` to a dictionary with the items described in + :func:`~torch.ao.quantization.fx.custom_config.ConvertCustomConfig.from_dict`. + """ + d: Dict[str, Any] = {} + if len(self.preserved_attributes) > 0: + d[PRESERVED_ATTRIBUTES_DICT_KEY] = self.preserved_attributes + return d diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/fuse.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/fuse.py new file mode 100644 index 0000000000000000000000000000000000000000..91b876997d10910e5b411225c2654857eab07f2b --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/fuse.py @@ -0,0 +1,161 @@ +from torch.fx import ( + GraphModule, + Node, + map_arg +) +from torch.fx.graph import Graph +from .match_utils import ( + _is_match, + MatchAllNode, +) +from .pattern_utils import ( + _sorted_patterns_dict, +) + +from ..backend_config import ( + BackendConfig, + get_native_backend_config, +) +from ..backend_config.utils import ( + get_fuser_method_mapping, + get_fusion_pattern_to_root_node_getter, + get_fusion_pattern_to_extra_inputs_getter, +) + +from .custom_config import FuseCustomConfig + +from .fuse_handler import ( + _get_fusion_pattern_to_fuse_handler_cls, + FuseHandler, +) + +from typing import Any, Callable, Dict, List, Tuple, Union +import warnings + +from torch.ao.quantization.utils import Pattern, NodePattern + + +__all__ = [ + "fuse", + # TODO: We should make this private in the future + # This is currently needed for test_public_bindings for some reason + "FuseHandler", +] + + +def fuse( + model: GraphModule, + is_qat: bool, + fuse_custom_config: Union[FuseCustomConfig, Dict[str, Any], None] = None, + backend_config: Union[BackendConfig, Dict[str, Any], None] = None, +) -> GraphModule: + if fuse_custom_config is None: + fuse_custom_config = FuseCustomConfig() + + if isinstance(fuse_custom_config, Dict): + warnings.warn( + "Passing a fuse_custom_config_dict to fuse is deprecated and will not be supported " + "in a future version. Please pass in a FuseCustomConfig instead.") + fuse_custom_config = FuseCustomConfig.from_dict(fuse_custom_config) + + if isinstance(backend_config, Dict): + warnings.warn( + "Passing a backend_config_dict to prepare is deprecated and will not be supported " + "in a future version. Please pass in a BackendConfig instead.") + backend_config = BackendConfig.from_dict(backend_config) + + named_modules = dict(model.named_modules()) + + if backend_config is None: + backend_config = get_native_backend_config() + + fusion_pattern_to_fuse_handler_cls = _sorted_patterns_dict(_get_fusion_pattern_to_fuse_handler_cls(backend_config)) + fuser_method_mapping = get_fuser_method_mapping(backend_config) + fusion_pattern_to_root_node_getter = get_fusion_pattern_to_root_node_getter(backend_config) + fusion_pattern_to_extra_inputs_getter = get_fusion_pattern_to_extra_inputs_getter(backend_config) + + # find fusion + fusion_pairs = _find_matches( + model, model.graph, fusion_pattern_to_fuse_handler_cls) + # TODO: change this to inplace changes to graph, since we no longer construct + # new GraphModule anymore + fused_graph = Graph() + env: Dict[Any, Any] = {} + + def load_arg(a): + return map_arg(a, lambda node: env[node.name]) + + def default_root_node_getter(node_pattern): + while not isinstance(node_pattern[-1], Node): + node_pattern = node_pattern[-1] + return node_pattern[-1] + + for node in model.graph.nodes: + maybe_last_node, pattern, matched_node_pattern, obj, node_to_subpattern = \ + fusion_pairs.get(node.name, (None, None, None, None, None)) + # get the corresponding subpattern for the current node + if node_to_subpattern is not None: + node_subpattern = node_to_subpattern.get(node, None) + else: + node_subpattern = None + if maybe_last_node is node: + assert obj is not None + root_node_getter = fusion_pattern_to_root_node_getter.get(pattern, default_root_node_getter) + root_node = root_node_getter(matched_node_pattern) # type: ignore[index] + extra_inputs_getter = fusion_pattern_to_extra_inputs_getter.get(pattern, None) + extra_inputs = [] + if extra_inputs_getter is not None: + extra_inputs = extra_inputs_getter(matched_node_pattern) + # TODO: add validation that root_node is a module and has the same type + # as the root_module in the configuration + env[node.name] = obj.fuse( + load_arg, named_modules, fused_graph, root_node, extra_inputs, matched_node_pattern, # type: ignore[arg-type] + fuse_custom_config, fuser_method_mapping, is_qat) + elif maybe_last_node is None or node_subpattern is MatchAllNode: + env[node.name] = fused_graph.node_copy(node, load_arg) + # node matched in patterns and is not root is removed here + + model = GraphModule(model, fused_graph) + return model + +def _find_matches( + root: GraphModule, + graph: Graph, + pattern_to_fuse_handler_cls: Dict[Pattern, Callable], +) -> Dict[str, Tuple[Node, Pattern, NodePattern, FuseHandler, Dict[Node, Any]]]: + modules = dict(root.named_modules()) + # node name -> (root_node, match_value) + match_map : Dict[ + str, Tuple[Node, Pattern, NodePattern, FuseHandler, Dict[Node, Any]]] = {} + # a map from node to the matched subpattern + node_to_subpattern: Dict[Node, Any] = {} + + # TODO: dedup with quantization matching function in match_utils.py + def apply_match(pattern, node, match, matched_node_pattern, node_to_subpattern): + if isinstance(pattern, tuple): + s, *args = pattern + current_node_pattern: List[Node] = [] + apply_match(s, node, match, current_node_pattern, node_to_subpattern) + for subpattern, arg in zip(args, node.args): + apply_match(subpattern, arg, match, current_node_pattern, node_to_subpattern) + matched_node_pattern.append(tuple(current_node_pattern)) + else: + # the first pattern matches will take precedence + if node.name not in match_map: + matched_node_pattern.append(node) + # MatchAllNode here is actually MatchAllInputNode which should not + # be added to match_map + if pattern is not MatchAllNode: + node_to_subpattern[node] = pattern + root_node, pattern, handler = match + match_map[node.name] = (root_node, pattern, matched_node_pattern, handler, node_to_subpattern) + + for node in reversed(graph.nodes): + if node.name not in match_map: + for pattern, fuse_handler_cls in pattern_to_fuse_handler_cls.items(): + matched_node_pattern: List[Node] = [] + if _is_match(modules, node, pattern): + apply_match(pattern, node, (node, pattern, fuse_handler_cls(node)), matched_node_pattern, node_to_subpattern) + break + + return match_map diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/fuse_handler.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/fuse_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..2706f96fef36873efa5f36bef068a3308d2dbb55 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/fuse_handler.py @@ -0,0 +1,119 @@ +import torch +from torch.ao.quantization.backend_config import BackendConfig +from torch.fx.graph import Node, Graph +from ..utils import _parent_name, NodePattern, Pattern +from ..fuser_method_mappings import get_fuser_method_new +from abc import ABC, abstractmethod +from typing import Any, Callable, Dict, List, Union +from .custom_config import FuseCustomConfig +from .match_utils import MatchAllNode +from torch.nn.utils.parametrize import type_before_parametrizations + +__all__ = [ + "DefaultFuseHandler", + "FuseHandler", +] + + +# ---------------------------- +# Fusion Pattern Registrations +# ---------------------------- + +# Base Pattern Handler +class FuseHandler(ABC): + """ Base handler class for the fusion patterns + """ + def __init__(self, node: Node): + pass + + @abstractmethod + def fuse(self, + load_arg: Callable, + named_modules: Dict[str, torch.nn.Module], + fused_graph: Graph, + root_node: Node, + extra_inputs: List[Any], + matched_node_pattern: NodePattern, + fuse_custom_config: FuseCustomConfig, + fuser_method_mapping: Dict[Pattern, Union[torch.nn.Sequential, Callable]], + is_qat: bool) -> Node: + pass + +class DefaultFuseHandler(FuseHandler): + def __init__( + self, + node: Node): + super().__init__(node) + + def fuse(self, + load_arg: Callable, + named_modules: Dict[str, torch.nn.Module], + fused_graph: Graph, + root_node: Node, + extra_inputs: List[Any], + matched_node_pattern: NodePattern, + fuse_custom_config: FuseCustomConfig, + fuser_method_mapping: Dict[Pattern, Union[torch.nn.Sequential, Callable]], + is_qat: bool) -> Node: + assert root_node.op == "call_module", "Expecting module node to be a call_module Node" + root_module = named_modules[str(root_node.target)] + + def get_modules(pattern): + """ Given a node pattern, extract the corresponding modules + e.g. input: (relu_node, (bn_node, conv_node)) + output: (relu_module, (bn_module, conv_module)) + """ + if isinstance(pattern, (tuple, list)): + n, *args = pattern + modules: List[torch.nn.Module] = [] + modules.append(get_modules(n)) + for a in args: + modules.append(get_modules(a)) + return tuple(modules) + else: + n = pattern + if n.op == "call_module": + return named_modules[n.target] + elif n.op == "call_function" and n.target == torch.nn.functional.relu: + relu = torch.nn.ReLU() + relu.training = root_module.training + return relu + elif n.op == "call_function" or n.op == "call_method": + return n.target + else: + return MatchAllNode + + # since relu can be used multiple times, we'll need to create a relu module for each match + matched_modules = get_modules(matched_node_pattern) + + def get_matched_types(m): + if isinstance(m, tuple): + return tuple(map(get_matched_types, m)) + if isinstance(m, torch.nn.Module): + return type_before_parametrizations(m) + return m + + matched_module_types = get_matched_types(matched_modules) + module_parent_name, module_name = _parent_name(root_node.target) + fuser_method = get_fuser_method_new(matched_module_types, fuser_method_mapping) + # TODO: change the signature for fuser_method to take matched module patterns + # as input + fused_module = fuser_method(is_qat, *matched_modules) + setattr(named_modules[module_parent_name], module_name, fused_module) + extra_args = [] + for input in extra_inputs: + extra_args.append(load_arg(input)) + node = fused_graph.node_copy(root_node, load_arg) + args = list(node.args) + args.extend(extra_args) + node.args = tuple(args) + return node + +def _get_fusion_pattern_to_fuse_handler_cls( + backend_config: BackendConfig) -> Dict[Pattern, Callable]: + fusion_pattern_to_fuse_handlers: Dict[Pattern, Callable] = {} + for pattern, config in backend_config._pattern_complex_format_to_config.items(): + if config.fuser_method is not None: + # TODO: is this logic right? + fusion_pattern_to_fuse_handlers[pattern] = DefaultFuseHandler + return fusion_pattern_to_fuse_handlers diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/graph_module.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/graph_module.py new file mode 100644 index 0000000000000000000000000000000000000000..cc9187285ae6313b07e03fe47e0eaec8ca4a265b --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/graph_module.py @@ -0,0 +1,119 @@ +import torch +import copy +from torch.fx import GraphModule +from torch.fx.graph import Graph +from typing import Union, Dict, Any, Set + +__all__ = [ + "FusedGraphModule", + "ObservedGraphModule", + "ObservedStandaloneGraphModule", + "QuantizedGraphModule", +] + +class FusedGraphModule(GraphModule): + def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph, preserved_attr_names: Set[str]): + self.preserved_attr_names = preserved_attr_names + preserved_attrs = {attr: getattr(root, attr) for attr in self.preserved_attr_names if hasattr(root, attr)} + super().__init__(root, graph) + for attr in preserved_attrs: + setattr(self, attr, preserved_attrs[attr]) + + # GraphModule does not copy attributes which are not in the __dict__ + # of vanilla nn.Module. So, we override __deepcopy__ in order + # to copy the quantization specific attributes correctly. + def __deepcopy__(self, memo): + fake_mod = torch.nn.Module() + fake_mod.__dict__ = copy.deepcopy(self.__dict__) + return FusedGraphModule(fake_mod, copy.deepcopy(self.graph), copy.deepcopy(self.preserved_attr_names)) + +class ObservedGraphModule(GraphModule): + + def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph, preserved_attr_names: Set[str]): + self.preserved_attr_names = { + '_activation_post_process_map', + '_activation_post_process_indexes', + '_patterns', + '_node_name_to_qconfig', + '_prepare_custom_config', + '_equalization_node_name_to_qconfig', + '_node_name_to_scope', + '_qconfig_mapping', + '_is_qat', + '_observed_node_names'}.union(preserved_attr_names) + preserved_attrs = {attr: getattr(root, attr) for attr in self.preserved_attr_names if hasattr(root, attr)} + super().__init__(root, graph) + for attr in preserved_attrs: + setattr(self, attr, preserved_attrs[attr]) + + # GraphModule does not copy attributes which are not in the __dict__ + # of vanilla nn.Module. So, we override __deepcopy__ in order + # to copy the quantization specific attributes correctly. + def __deepcopy__(self, memo): + fake_mod = torch.nn.Module() + fake_mod.__dict__ = copy.deepcopy(self.__dict__) + return ObservedGraphModule(fake_mod, copy.deepcopy(self.graph), copy.deepcopy(self.preserved_attr_names)) + +def _is_observed_module(module: Any) -> bool: + return hasattr(module, "meta") and "_observed_graph_module_attrs" in module.meta + +def _get_observed_graph_module_attr(model: Union[torch.nn.Module, GraphModule], attr_name: str) -> Any: + if hasattr(model, "meta") and "_observed_graph_module_attrs" in model.meta: # type: ignore[operator, index] + return getattr(model.meta["_observed_graph_module_attrs"], attr_name) # type: ignore[index] + return None + +class ObservedStandaloneGraphModule(ObservedGraphModule): + def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph, preserved_attr_names: Set[str]): + preserved_attr_names = preserved_attr_names.union({ + "_standalone_module_input_quantized_idxs", + "_standalone_module_output_quantized_idxs"}) + super().__init__(root, graph, preserved_attr_names) + + def __deepcopy__(self, memo): + fake_mod = torch.nn.Module() + fake_mod.__dict__ = copy.deepcopy(self.__dict__) + return ObservedStandaloneGraphModule(fake_mod, copy.deepcopy(self.graph), copy.deepcopy(self.preserved_attr_names)) + +def _is_observed_standalone_module(module: Any) -> bool: + return _is_observed_module(module) and module.meta["_observed_graph_module_attrs"].is_observed_standalone_module + +def _save_packed_weight(self, destination, prefix, keep_vars): + for attr_name in dir(self): + if "_packed_weight" in attr_name and \ + isinstance(getattr(self, attr_name), torch._C.ScriptObject): # type: ignore[attr-defined] + packed_weight = getattr(self, attr_name) + destination[prefix + attr_name] = packed_weight + +class QuantizedGraphModule(GraphModule): + """ This class is created to make sure PackedParams + (e.g. LinearPackedParams, Conv2dPackedParams) to appear in state_dict + so that we can serialize and deserialize quantized graph module with + torch.save(m.state_dict()) and m.load_state_dict(state_dict) + """ + def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph, preserved_attr_names: Set[str]): + self.preserved_attr_names = preserved_attr_names + preserved_attrs = {attr: getattr(root, attr) for attr in self.preserved_attr_names if hasattr(root, attr)} + super().__init__(root, graph) + for attr in preserved_attrs: + setattr(self, attr, preserved_attrs[attr]) + self._register_state_dict_hook(_save_packed_weight) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + attrs_to_pop = [] + for attr_name in state_dict: + if attr_name.startswith("_packed_weight") and isinstance(state_dict[attr_name], torch._C.ScriptObject): # type: ignore[attr-defined] # noqa: B950 + setattr(self, attr_name, state_dict[attr_name]) + attrs_to_pop.append(attr_name) + + # pop the packed param attributesn + for attr_name in attrs_to_pop: + state_dict.pop(attr_name) + + super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) + + + def __deepcopy__(self, memo): + fake_mod = torch.nn.Module() + fake_mod.__dict__ = copy.deepcopy(self.__dict__) + return QuantizedGraphModule(fake_mod, copy.deepcopy(self.graph), copy.deepcopy(self.preserved_attr_names)) diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/lower_to_fbgemm.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/lower_to_fbgemm.py new file mode 100644 index 0000000000000000000000000000000000000000..ef58652b1adda0dc135fbef21afe789d6f538eda --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/lower_to_fbgemm.py @@ -0,0 +1,16 @@ +from ._lower_to_native_backend import _lower_to_native_backend +from ..qconfig import QConfigAny +from torch.fx import GraphModule +from typing import Dict, Tuple + +__all__ = ['lower_to_fbgemm'] + +def lower_to_fbgemm( + model: GraphModule, + qconfig_map: Dict[str, QConfigAny], + node_name_to_scope: Dict[str, Tuple[str, type]] +) -> GraphModule: + """ Lower a quantized reference model (with reference quantized operator patterns) + to fbgemm + """ + return _lower_to_native_backend(model, qconfig_map, node_name_to_scope) diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/lower_to_qnnpack.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/lower_to_qnnpack.py new file mode 100644 index 0000000000000000000000000000000000000000..a3a82179789dc392132a791632f0397a2dcf7595 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/lower_to_qnnpack.py @@ -0,0 +1,18 @@ +from ._lower_to_native_backend import _lower_to_native_backend +from ..qconfig import QConfigAny +from torch.fx import GraphModule +from typing import Dict, Tuple + +__all__ = [ + "lower_to_qnnpack" +] + +def lower_to_qnnpack( + model: GraphModule, + qconfig_map: Dict[str, QConfigAny], + node_name_to_scope: Dict[str, Tuple[str, type]] +) -> GraphModule: + """ Lower a quantized reference model (with reference quantized operator patterns) + to qnnpack + """ + return _lower_to_native_backend(model, qconfig_map, node_name_to_scope) diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/match_utils.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/match_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..77a7f3079906a082d2f0bb154e087ef6f29af258 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/match_utils.py @@ -0,0 +1,237 @@ +import sys +import torch +from torch.fx.graph import ( + Graph, + Node, +) +from torch.ao.quantization.utils import Pattern +from .quantize_handler import ( + QuantizeHandler, +) +from ..qconfig import ( + QConfigAny, +) +from ..utils import ( + MatchAllNode +) +from .graph_module import ( + _is_observed_standalone_module, +) +from torch.nn.utils.parametrize import type_before_parametrizations +from typing import Any, Dict, List, Callable, Optional, Tuple, Type, Set, Iterable + + +__all__: List[str] = [] + +# TODO(future PR): the 1st argument is typed as `List[Node]`, but a better type +# would be a recursive `List[Union[Node, Tuple[Union[Node, ...]]]]` +_MatchResult = Tuple[Node, List[Node], Optional[Pattern], QuantizeHandler] + +_MatchResultWithQConfig = Tuple[Node, List[Node], Optional[Pattern], QuantizeHandler, + QConfigAny] + +# Note: The order of patterns is important! match function will take whatever is matched first, so we'll +# need to put the fusion patterns before single patterns. For example, add_relu should be registered come before relu. +# decorators are applied in the reverse order we see. Also when we match the nodes in the graph with these patterns, +# we'll start from the last node of the graph and traverse back. +def _is_match(modules, node, pattern, max_uses=sys.maxsize): + """ Matches a node in fx against a pattern + """ + if isinstance(pattern, tuple): + self_match, *arg_matches = pattern + if self_match is getattr: + assert len(pattern) == 2, 'Expecting getattr pattern to have two elements' + arg_matches = [] + else: + self_match = pattern + arg_matches = [] + + if isinstance(self_match, type) and issubclass(self_match, MatchAllNode): + return True + + if node == pattern: + return True + + if not isinstance(node, Node) or len(node.users) > max_uses: + return False + + if isinstance(self_match, type) and issubclass(self_match, torch.nn.Module): + if node.op != 'call_module': + return False + if not type_before_parametrizations(modules[node.target]) == self_match: + return False + elif callable(self_match): + if node.op != 'call_function' or node.target is not self_match: + return False + elif node.target is getattr: + if node.args[1] != pattern[1]: + return False + elif isinstance(self_match, str): + if node.op != 'call_method' or node.target != self_match: + return False + elif node.target != self_match: + return False + + if not arg_matches: + return True + + if len(arg_matches) != len(node.args): + return False + + return all(_is_match(modules, node, arg_match, max_uses=1) for node, arg_match in zip(node.args, arg_matches)) + +def _find_matches( + graph: Graph, + modules: Dict[str, torch.nn.Module], + patterns: Dict[Pattern, QuantizeHandler], + root_node_getter_mapping: Dict[Pattern, Callable], + standalone_module_names: List[str] = None, + standalone_module_classes: List[Type] = None, + custom_module_classes: List[Any] = None) -> Dict[str, _MatchResult]: + """ + Matches the nodes in the input graph to quantization patterns, and + outputs the information needed to quantize them in future steps. + + Inputs: + - graph: an fx.Graph object + - modules: a mapping of fully qualified module name to instance, + for example, {'foo': ModuleFoo, ...} + - patterns: a mapping from a tuple of nodes in reverse order to + uninitialized QuantizeHandler subclass. + + Outputs a map of + node_name -> + (node, matched_values, matched_pattern, QuantizeHandler instance, + qconfig) + + For example, { + 'relu_1': (relu_1, [relu_1], torch.nn.functional.relu, + , QConfig(...)), + ... + } + """ + if custom_module_classes is None: + custom_module_classes = [] + + if standalone_module_classes is None: + standalone_module_classes = [] + + if standalone_module_names is None: + standalone_module_names = [] + + match_map: Dict[str, _MatchResult] = {} + all_matched : Set[str] = set() + + def _recursive_record_node_in_match_map( + last_node, + match_map, + node_pattern, + matched_node_pattern, + pattern, + match_value): + if isinstance(node_pattern, Node): + match_map[node_pattern.name] = ( + last_node, matched_node_pattern, pattern, match_value) + elif not isinstance(node_pattern, Iterable): + return + else: + for n in node_pattern: + _recursive_record_node_in_match_map(last_node, match_map, n, matched_node_pattern, pattern, match_value) + + # TODO: 1. merge with fuse matcher 2. document the code + def record_match( + pattern, + node, + last_node, + matched_node_pattern, + match_map): + if isinstance(pattern, tuple): + s, *args = pattern + is_single_arg = len(args) == 1 + current_node_pattern: List[Node] = [] + record_match( + s, + node, + last_node, + matched_node_pattern, + match_map) + if pattern[0] is not getattr: + for subpattern, arg in zip(args, node.args): + record_match( + subpattern, + arg, + node, + current_node_pattern, + match_map) + if len(current_node_pattern) > 1: + # current_node_pattern is the node pattern we get from matching + # the subpattern with arguments of the node + # we use is_single_arg to recover the original structure of the pattern + # if the original pattern has a single argument, we will have + # (original_op, (original_arg, ...)) + # otherwise, we'll have a list of arguments + # (original_op, arg0, arg1, arg2, ...) + if is_single_arg: + matched_node_pattern.append(tuple(current_node_pattern)) + else: + matched_node_pattern.extend(list(current_node_pattern)) + else: + matched_node_pattern.append(current_node_pattern[0]) + else: + matched_node_pattern.append(node) + + for node in reversed(graph.nodes): + if node.name not in match_map and node.name not in all_matched: + for pattern, quantize_handler_cls in patterns.items(): + root_node_getter = root_node_getter_mapping.get(pattern, None) + if _is_match(modules, node, pattern) and node.name not in match_map: + matched_node_pattern: List[Node] = [] + record_match( + pattern, + node, + node, + matched_node_pattern, + match_map) + quantize_handler = quantize_handler_cls( # type: ignore[operator] + matched_node_pattern, + modules, + root_node_getter) + last_node = node + # record the match for all nodes in the pattern + _recursive_record_node_in_match_map( + last_node, + match_map, + # we need to record all nodes in the matched pattern in the match_map + matched_node_pattern, + # this is a part of the value corresponding to the node + matched_node_pattern, + pattern, + quantize_handler) + break + + # add custom module instances to the match result + assert modules is not None + for node in graph.nodes: + if node.op == 'call_module' and \ + type(modules[node.target]) in custom_module_classes: + match_map[node.name] = ( + node, node, None, QuantizeHandler(node, modules, is_custom_module=True)) + + def is_standalone_module(node_target: str, modules: Dict[str, torch.nn.Module]): + assert modules is not None + return ( + node_target in standalone_module_names or # type: ignore[operator] + type(modules[node_target]) in standalone_module_classes # type: ignore[operator] + ) + + # add standalone modules to the match + for node in graph.nodes: + if node.op == 'call_module' and \ + (is_standalone_module(node.target, modules) or + _is_observed_standalone_module(modules[node.target])): + # add node to matched nodes + match_map[node.name] = ( + node, node, None, + QuantizeHandler(node, modules, is_standalone_module=True)) + + return match_map diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/pattern_utils.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/pattern_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..29063fa2761239d53bd054af5e7987905d6b1532 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/pattern_utils.py @@ -0,0 +1,87 @@ +from collections import OrderedDict +from typing import Dict, Any +from torch.ao.quantization.utils import Pattern +from ..fake_quantize import FixedQParamsFakeQuantize +from ..observer import ObserverBase +import copy + +__all__ = [ + "get_default_fusion_patterns", + "get_default_quant_patterns", + "get_default_output_activation_post_process_map", +] + +# TODO(future PR): fix the typing on QuantizeHandler (currently a circular dependency) +QuantizeHandler = Any + +# pattern for conv bn fusion +_DEFAULT_FUSION_PATTERNS = OrderedDict() +def _register_fusion_pattern(pattern): + def insert(fn): + _DEFAULT_FUSION_PATTERNS[pattern] = fn + return fn + return insert + +def get_default_fusion_patterns() -> Dict[Pattern, QuantizeHandler]: + return copy.copy(_DEFAULT_FUSION_PATTERNS) + +_DEFAULT_QUANTIZATION_PATTERNS = OrderedDict() + +# Mapping from pattern to activation_post_process(observer/fake_quant) constructor for output activation +# e.g. pattern: torch.sigmoid, +# output_activation_post_process: default_fixed_qparams_range_0to1_fake_quant +_DEFAULT_OUTPUT_FAKE_QUANTIZE_MAP = {} +_DEFAULT_OUTPUT_OBSERVER_MAP = {} + +# Register pattern for both static quantization and qat +def _register_quant_pattern(pattern, fixed_qparams_observer=None): + def insert(fn): + _DEFAULT_QUANTIZATION_PATTERNS[pattern] = fn + if fixed_qparams_observer is not None: + _DEFAULT_OUTPUT_FAKE_QUANTIZE_MAP[pattern] = FixedQParamsFakeQuantize.with_args(observer=fixed_qparams_observer) + _DEFAULT_OUTPUT_OBSERVER_MAP[pattern] = fixed_qparams_observer + return fn + return insert + +# Get patterns for both static quantization and qat +def get_default_quant_patterns() -> Dict[Pattern, QuantizeHandler]: + return copy.copy(_DEFAULT_QUANTIZATION_PATTERNS) + +# a map from pattern to output activation post process constructor +# e.g. torch.sigmoid -> default_affine_fixed_qparam_fake_quant +def get_default_output_activation_post_process_map(is_training) -> Dict[Pattern, ObserverBase]: + if is_training: + return copy.copy(_DEFAULT_OUTPUT_FAKE_QUANTIZE_MAP) + else: + return copy.copy(_DEFAULT_OUTPUT_OBSERVER_MAP) + +# Example use of register pattern function: +# @_register_fusion_pattern(torch.nn.ReLU, (torch.nn.BatchNorm2d, torch.nn.Conv2d))) +# class ConvOrLinearBNReLUFusion(): +# def __init__(...): +# ... +# + +def _sorted_patterns_dict(patterns_dict: Dict[Pattern, QuantizeHandler]) -> Dict[Pattern, QuantizeHandler]: + """ + Return a sorted version of the patterns dictionary such that longer patterns are matched first, + e.g. match (F.relu, F.linear) before F.relu. + This works for current use cases, but we may need to have a more clever way to sort + things to address more complex patterns + """ + + def get_len(pattern): + """ this will calculate the length of the pattern by counting all the entries + in the pattern. + this will make sure (nn.ReLU, (nn.BatchNorm, nn.Conv2d)) comes before + (nn.BatchNorm, nn.Conv2d) so that we can match the former first + """ + len = 0 + if isinstance(pattern, tuple): + for item in pattern: + len += get_len(item) + else: + len += 1 + return len + + return OrderedDict(sorted(patterns_dict.items(), key=lambda kv: -get_len(kv[0]) if isinstance(kv[0], tuple) else 1)) diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/prepare.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/prepare.py new file mode 100644 index 0000000000000000000000000000000000000000..6f5d242d52933d42583d3ff08028560eaecfe094 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/prepare.py @@ -0,0 +1,1665 @@ +import copy +import torch +import warnings +from torch.fx import ( + GraphModule, +) +from torch.fx.graph import ( + Graph, + Node, +) +from torch.fx.node import Argument + +from ..quantize import ( + propagate_qconfig_, +) +from ..observer import ( + ObserverBase, + _is_activation_post_process +) +from ..qconfig import ( + _is_reuse_input_qconfig, + QConfigAny, +) +from ..qconfig_mapping import ( + QConfigMapping, +) +from .qconfig_mapping_utils import ( + _generate_node_name_to_qconfig, + _update_qconfig_for_fusion, + _get_flattened_qconfig_dict, + _update_qconfig_for_qat, +) + +from .quantize_handler import ( + _default_root_node_getter, + _get_pattern_to_quantize_handlers, + QuantizeHandler, +) + +from torch.ao.quantization.utils import ( + Pattern, + NodePattern, +) + +from ._equalize import ( + is_equalization_observer, + node_supports_equalization, +) + +from .pattern_utils import ( + _sorted_patterns_dict, +) + +from .match_utils import ( + _MatchResultWithQConfig, + _find_matches, +) + +from .utils import ( + _insert_dequant_stubs_for_custom_module_lstm_output, + _is_custom_module_lstm, + _maybe_get_custom_module_lstm_from_node_arg, + _qconfig_satisfies_dtype_config_constraints, + get_custom_module_class_keys, + all_node_args_have_no_tensors, + assert_and_get_unique_device, + get_non_observable_arg_indexes_and_types, + get_new_attr_name_with_prefix, + node_arg_is_weight, + node_arg_is_bias, + NON_QUANTIZABLE_WEIGHT_OPS, + ObservedGraphModuleAttrs, +) + +from torch.ao.quantization import ( + PlaceholderObserver +) +from torch.ao.quantization.quantize import ( + convert +) + +from ..utils import ( + _parent_name, + get_qconfig_dtypes, + get_swapped_custom_module_class, + activation_is_statically_quantized, +) + +from ..backend_config.utils import ( + get_pattern_to_dtype_configs, + get_module_to_qat_module, + get_fusion_pattern_to_root_node_getter, +) +from ..backend_config import ( + BackendConfig, + DTypeConfig, + get_native_backend_config, +) +from .custom_config import ( + PrepareCustomConfig, + StandaloneModuleConfigEntry, +) + +from torch._subclasses import FakeTensor + +from typing import Any, Dict, List, Optional, Set, Tuple, Type, Union, Callable + + +__all__ = [ + "insert_observers_for_model", + "prepare", + "propagate_dtypes_for_known_nodes", +] + + +# list of dtypes to not add observers to +_DO_NOT_OBS_DTYPE_LIST = [int, float, torch.bool, None] + +# note: the following default target dtype info dicts are temporary, +# should be moved to the new programmable API class soon +_DEFAULT_FP32_QCONFIG_FOR_TARGET_DTYPE_INFO = { + "input_act_obs_or_fq_ctr": torch.ao.quantization.qconfig._default_fp32_placeholder_qconfig.activation, + "output_act_obs_or_fq_ctr": torch.ao.quantization.qconfig._default_fp32_placeholder_qconfig.activation +} + +_DEFAULT_QUINT8_QCONFIG_FOR_TARGET_DTYPE_INFO = { + "input_act_obs_or_fq_ctr": torch.ao.quantization.qconfig._default_quint8_placeholder_qconfig.activation, + "output_act_obs_or_fq_ctr": torch.ao.quantization.qconfig._default_quint8_placeholder_qconfig.activation +} + +def _is_activation_post_process_node(node: Node, named_modules: Dict[str, torch.nn.Module]) -> bool: + return isinstance(node, torch.fx.Node) and node.op == "call_module" and \ + _is_activation_post_process(named_modules[str(node.target)]) + +def _get_dtype_and_is_dynamic(obs_or_fq_ctr: Optional[Callable]) -> Tuple[Optional[torch.dtype], bool]: + """ Given a constructor for observer or fake quant module, returns + a Tuple of dtype and is_dynamic + """ + # TODO: instead of instantiating the instance, we can use inspect to get the default args + if obs_or_fq_ctr is None: + return None, False + else: + obs_or_fq = obs_or_fq_ctr() + return obs_or_fq.dtype, getattr(obs_or_fq, "is_dynamic", False) + +def _is_input_arg_dtype_supported_by_backend( + arg: Argument, + node: Node, + qconfig: QConfigAny, + dtype_config: DTypeConfig, + backend_config: BackendConfig, +) -> bool: + """ Check if the configured qconfig for the argument + is supported by the backend or not + """ + if isinstance(arg, (list, tuple)): + return all(_is_input_arg_dtype_supported_by_backend( + a, node, qconfig, + dtype_config, backend_config) for a in arg) + if not isinstance(arg, Node): + return True + # TODO: support check for standalone module + is_weight = node_arg_is_weight(node, arg, backend_config) + is_bias = node_arg_is_bias(node, arg, backend_config) + is_activation = not is_weight and not is_bias + if is_activation: + input_act_obs_or_fq_ctr = node.meta["target_dtype_info"].get("input_act_obs_or_fq_ctr") + qconfig_dtype, qconfig_is_dynamic = _get_dtype_and_is_dynamic(input_act_obs_or_fq_ctr) + # TODO(future PR): remove the cast to bool below after figuring + # out why backend_config has is_dynamic set to None in some cases. + return (dtype_config.input_dtype is None) or ( + dtype_config.input_dtype == qconfig_dtype and + bool(dtype_config.is_dynamic) == bool(qconfig_is_dynamic) and + _qconfig_satisfies_dtype_config_constraints(qconfig, dtype_config.input_dtype_with_constraints) + ) + elif is_weight: + # TODO: move dtype check into `_qconfig_satisfies_dtype_config_constraints` as well + weight_obs_or_fq_ctr = node.meta["target_dtype_info"].get("weight_obs_or_fq_ctr", None) + qconfig_weight_dtype, _ = _get_dtype_and_is_dynamic(weight_obs_or_fq_ctr) + backend_config_weight_dtype = dtype_config.weight_dtype + dtype_matches = qconfig_weight_dtype == backend_config_weight_dtype + qconfig_satisfies_constraints = _qconfig_satisfies_dtype_config_constraints( + qconfig, dtype_config.weight_dtype_with_constraints, is_activation=False) + return backend_config_weight_dtype is None or (dtype_matches and qconfig_satisfies_constraints) + else: # bias + # TODO: move dtype check into `_qconfig_satisfies_dtype_config_constraints` as well + bias_obs_or_fq_ctr = node.meta["target_dtype_info"].get("bias_obs_or_fq_ctr", None) + qconfig_bias_dtype, _ = _get_dtype_and_is_dynamic(bias_obs_or_fq_ctr) + backend_config_bias_dtype = dtype_config.bias_dtype + return backend_config_bias_dtype is None or qconfig_bias_dtype == backend_config_bias_dtype + +def _is_output_dtype_supported_by_backend( + node: Node, + qconfig: QConfigAny, + dtype_config: DTypeConfig, +) -> bool: + """ Check if the configured qconfig for the output + is supported by the backend or not + """ + # TODO: move dtype check into `_qconfig_satisfies_dtype_config_constraints` as well + backend_config_output_dtype = dtype_config.output_dtype + # TODO: we should check is_dynamic here as well, the code from _is_input_arg_dtype_supported_by_backend + # from input activation check can be reused here + qconfig_output_dtype = None + output_act_obs_or_fq_ctr = node.meta["target_dtype_info"].get("output_act_obs_or_fq_ctr") + qconfig_output_dtype, qconfig_output_is_dynamic = _get_dtype_and_is_dynamic(output_act_obs_or_fq_ctr) + # TODO: this is a hack because we can only specify one activation_obs_or_fq for + # qconfig (qconfig.activation), and we are only supporting dynamically quantized + # linear op which has fp32 output dtype, this should be removed if we generalize + # the structure of qconfig in the future + if qconfig_output_is_dynamic: + qconfig_output_dtype = torch.float32 + dtype_matches = qconfig_output_dtype == backend_config_output_dtype + qconfig_satisfies_constraints = _qconfig_satisfies_dtype_config_constraints( + qconfig, dtype_config.output_dtype_with_constraints) + return backend_config_output_dtype is None or (dtype_matches and qconfig_satisfies_constraints) + +def _is_observer_in_same_graph(node: Node, named_modules: Dict[str, torch.nn.Module]): + """ Check if observer in same graph + when the node output is not fp32 and input is 'placeholder' + the input is assumed to be quantized, so it is observed + in a different place rather than not observed. + """ + node_output_dtype = _get_arg_target_dtype_as_output(node, named_modules) + if len(node.args) > 0 and isinstance(node.args[0], Node): + if node_output_dtype == torch.quint8 and node.args[0].op == 'placeholder': + return False + return True + +def _is_pattern_dtype_config_and_qconfig_supported_by_backend( + pattern: Optional[Pattern], + matched_node_pattern: Optional[List[Node]], + qconfig: QConfigAny, + backend_config: BackendConfig, +) -> bool: + """ Check if the dtype configuration of a pattern is supported by + the backend or not, and whether the qconfig satisfies constraints + specified in the corresponding dtype config. + """ + if backend_config is None or pattern is None: + return True + assert matched_node_pattern is not None and len(matched_node_pattern) >= 1 + pattern_to_dtype_configs = get_pattern_to_dtype_configs(backend_config) + dtype_configs: List[DTypeConfig] = pattern_to_dtype_configs.get(pattern, []) + pattern_to_root_node_getter = get_fusion_pattern_to_root_node_getter(backend_config) + + root_node_getter = pattern_to_root_node_getter.get(pattern, _default_root_node_getter) + root_node = root_node_getter(matched_node_pattern) + input_node = root_node + output_node = matched_node_pattern[0] + for dtype_config in dtype_configs: + # check if arg dtype are supported + supported = True + for arg in list(input_node.args) + list(input_node.kwargs.values()): + supported = supported and _is_input_arg_dtype_supported_by_backend( + arg, input_node, qconfig, dtype_config, backend_config) + # check if output dtype is supported + supported = supported and _is_output_dtype_supported_by_backend( + output_node, qconfig, dtype_config) + if supported: + return True + return False + +def _get_standalone_module_configs( + node: Node, + named_modules: Dict[str, torch.nn.Module], + prepare_custom_config: PrepareCustomConfig, + parent_qconfig: QConfigAny, + parent_backend_config: Optional[BackendConfig], +) -> Tuple[QConfigMapping, Tuple[Any, ...], PrepareCustomConfig, Optional[BackendConfig]]: + """ + Returns the standalone module QConfigMapping and PrepareCustomConfig + for `node`, assuming that the module pointed to by `node` is + a standalone modules. + """ + module_name = str(node.target) + module_type = type(named_modules[module_name]) # type: ignore[index] + # name config has precedence over type config + config_entry = StandaloneModuleConfigEntry(None, (), None, None) + config_entry = prepare_custom_config.standalone_module_classes.get(module_type, config_entry) + config_entry = prepare_custom_config.standalone_module_names.get(module_name, config_entry) + # fallback to use parent module's qconfig if user didn't specify qconfig dict + qconfig_mapping = config_entry.qconfig_mapping or QConfigMapping().set_global(parent_qconfig) + example_inputs = config_entry.example_inputs + prepare_custom_config = config_entry.prepare_custom_config or PrepareCustomConfig() + backend_config = config_entry.backend_config or parent_backend_config + return (qconfig_mapping, example_inputs, prepare_custom_config, backend_config) + +def _qat_swap_modules( + root: torch.nn.Module, + module_to_qat_module: Dict[Pattern, Type[torch.nn.Module]]) -> None: + convert(root, mapping=module_to_qat_module, inplace=True, remove_qconfig=False) + +def _add_matched_node_name_to_set(matched_node_pattern: NodePattern, s: Set[str]): + if isinstance(matched_node_pattern, Node): + s.add(matched_node_pattern.name) + elif isinstance(matched_node_pattern, (list, tuple)): + for maybe_node in matched_node_pattern: + _add_matched_node_name_to_set(maybe_node, s) + +def _insert_observer( + node: Node, + observer: ObserverBase, + model: torch.nn.Module, + named_modules: Dict[str, torch.nn.Module], + graph: Graph, +) -> Node: + """ + Attaches `observer` to `model`, and creates a node which calls + `observer` on the output of `node`. + """ + model_device = assert_and_get_unique_device(model) + if model_device: + observer.to(model_device) + # add observer module as attribute + if is_equalization_observer(observer): + prefix = node.name + '_equalization_process_' + else: + prefix = 'activation_post_process_' + get_new_observer_name = get_new_attr_name_with_prefix(prefix) + observer_name = get_new_observer_name(model) + setattr(model, observer_name, observer) + named_modules[observer_name] = observer + with graph.inserting_after(node): + new_obs = graph.create_node( + 'call_module', observer_name, (node,), {}) + return new_obs + +def _set_target_dtype_info_for_matched_node_pattern( + matched_node_pattern: NodePattern, + last_node: Node, + qconfig: QConfigAny, + backend_config: BackendConfig, + named_modules: Dict[str, torch.nn.Module], + cache_for_no_tensor_check: Dict[Node, bool], + processed_nodes: Set[Node], +) -> None: + """ Sets the target_dtype_info for each node in matched_node_pattern + Note: processed_nodes is used to ensure we only process each node once + """ + if isinstance(matched_node_pattern, (list, tuple)): + for node_pattern in matched_node_pattern: + _set_target_dtype_info_for_matched_node_pattern( + node_pattern, + last_node, + qconfig, + backend_config, + named_modules, + cache_for_no_tensor_check, + processed_nodes + ) + + # set target_dtype_info if matched_node_pattern is a Node + # other types of matched object, e.g. int, float literals, are ignored + elif isinstance(matched_node_pattern, Node): + # for pyre + assert isinstance(matched_node_pattern, Node) + node = matched_node_pattern + if node in processed_nodes: + return + processed_nodes.add(node) + + if qconfig is None: + return + # TODO: refactor the following code in terms of apply a qconfig to a pattern + # e.g. for a pattern with op1 -> op2 -> op3, and qconfig = QConfig(input_act=obs0, output_act=obs1) + # we set the input_obs_or_fq_ctr for the arguments of op1 to based on qconfig.input_act, + # and set output_obs_or_fq_ctr based on qconfig.output_act + # this also requires we extend the structure of QConfig to support more fine + # grained configurations + target_dtype_info: Dict[str, Optional[Tuple[Union[torch.dtype, type], bool]]] = ( + _get_target_activation_dtype_for_node( + node, + qconfig, + named_modules, + cache_for_no_tensor_check, + ) + ) + node.meta["target_dtype_info"] = target_dtype_info + +def _get_target_activation_dtype_for_node( + node: Node, + qconfig: QConfigAny, + named_modules: Dict[str, torch.nn.Module], + cache_for_no_tensor_check: Dict[Node, bool], +) -> Dict[str, Optional[Tuple[Union[torch.dtype, type], bool]]]: + """ + For each op attribute in the op's input activation, output activation, + weight, bias - returns the settings of dtype and is_dynamic we expect + for the `quantize` call in the reference model representation, or None + if there is no `quantize` call needed. + + For example, if we have a node corresponding to `op0` in + + x0 -> op0 -> x1 + + And we want a reference quantized representation to be + + x0 -> quant_static -> dequant -> op0 -> quant_dynamic -> dequant -> x1 + + Then this function will return + + { + "input_act_obs_or_fq_ctr": MinMaxObserver.with_args(dtype=torch.quint8, is_dynamic=False), + "output_act_obs_or_fq_ctr": MinMaxObserver.with_args(dtype=torch.quint8, is_dynamic=False), + } + + TODO(future PR, if needed): explicitly spell out the non-Tensor + dtypes. + """ + args_have_no_tensors = \ + all_node_args_have_no_tensors( + node, named_modules, cache_for_no_tensor_check) + if args_have_no_tensors: + return { + "input_act_obs_or_fq_ctr": None, + "output_act_obs_or_fq_ctr": None, + } + # get qconfig to determine the eventual dtype of this node + if qconfig is not None: + act_dtype, weight_dtype, input_act_is_dynamic = \ + get_qconfig_dtypes(qconfig) + + # Currently `QConfig` only has one `activation` field. + # For static quantization, it is reused for both input + # and output activation. For dynamic quantization, this + # field is currently only used for the input activation, + # with the output activation being in fp32. + # In the future this may change as we add more fields + # to the `QConfig` object. + output_act_dtype = act_dtype \ + if (not input_act_is_dynamic) else torch.float + + bias_dtype = torch.float16 \ + if ( + act_dtype == torch.float16 + and weight_dtype == torch.float16 + and (not input_act_is_dynamic) + ) else torch.float + return { + "input_act_obs_or_fq_ctr": qconfig.activation, + "weight_obs_or_fq_ctr": qconfig.weight, + "bias_obs_or_fq_ctr": PlaceholderObserver.with_args(dtype=bias_dtype), + "output_act_obs_or_fq_ctr": qconfig.activation, + } + return copy.copy(_DEFAULT_FP32_QCONFIG_FOR_TARGET_DTYPE_INFO) + +def _get_arg_target_dtype_as_output( + arg: Node, + named_modules: Dict[str, torch.nn.Module], +) -> Optional[Union[torch.dtype, type]]: + """ Get the target output activation dtype for + the argument in the original graph, skipping inserted observers + We are assuming that the observers are inserted correctly, and the dtype for + argument in quantized graph will match what is specified by the qconfig + """ + assert isinstance(arg, Node) + # Custom module LSTM output is a tuple that we broke down into the internal nodes in order + # to insert DeQuantStubs (see `_insert_dequant_stubs_for_custom_module_lstm_output`). + # Since we modified the graph in this case, we must trace back from the args through + # the specific nodes we added in order to reach the original LSTM node. Otherwise, we would + # not be able to accurately detect whether this node is a consumer of custom module LSTM. + custom_module_lstm_node = _maybe_get_custom_module_lstm_from_node_arg(arg, named_modules) + output_act_obs_or_fq_ctr = None + if custom_module_lstm_node is not None: + output_act_obs_or_fq_ctr = custom_module_lstm_node.meta["target_dtype_info"]["output_act_obs_or_fq_ctr"] + elif _is_activation_post_process_node(arg, named_modules): + observed_arg = arg.args[0] + assert isinstance(observed_arg, Node), "Currently we only support observing Node" + output_act_obs_or_fq_ctr = observed_arg.meta["target_dtype_info"]["output_act_obs_or_fq_ctr"] + else: + output_act_obs_or_fq_ctr = \ + arg.meta["target_dtype_info"]["output_act_obs_or_fq_ctr"] + output_act_dtype, _ = _get_dtype_and_is_dynamic(output_act_obs_or_fq_ctr) + # TODO: should support is_dynamic here as well + return output_act_dtype + +def _get_arg_target_dtype_as_input_to_node( + arg: Node, + node: Node, + named_modules: Dict[str, torch.nn.Module], + backend_config: BackendConfig, +) -> Optional[Union[torch.dtype, type]]: + """ Get the target argument dtype for the argument `arg`, as input + to node `node` + """ + assert isinstance(arg, Node) + is_weight = node_arg_is_weight(node, arg, backend_config) + is_bias = node_arg_is_bias(node, arg, backend_config) + is_activation = not is_weight and not is_bias + if is_activation: + input_act_obs_or_fq_ctr = node.meta["target_dtype_info"].get("input_act_obs_or_fq_ctr") + qconfig_dtype, _ = _get_dtype_and_is_dynamic(input_act_obs_or_fq_ctr) + return qconfig_dtype + elif is_weight: + if node.target in NON_QUANTIZABLE_WEIGHT_OPS: + return None + else: + weight_obs_or_fq_ctr = node.meta["target_dtype_info"].get("weight_obs_or_fq_ctr", None) + qconfig_weight_dtype, _ = _get_dtype_and_is_dynamic(weight_obs_or_fq_ctr) + return qconfig_weight_dtype + else: + bias_obs_or_fq_ctr = node.meta["target_dtype_info"].get("bias_obs_or_fq_ctr", None) + qconfig_bias_dtype, _ = _get_dtype_and_is_dynamic(bias_obs_or_fq_ctr) + return qconfig_bias_dtype + +def _get_arg_target_is_dynamic_as_input_to_node( + arg: Node, + node: Node, + named_modules: Dict[str, torch.nn.Module], + backend_config: BackendConfig, +) -> bool: + """ Get the target argument dtype for the argument `arg`, as input + to node `node` + """ + assert isinstance(arg, Node) + is_weight = node_arg_is_weight(node, arg, backend_config) + is_bias = node_arg_is_bias(node, arg, backend_config) + is_activation = not is_weight and not is_bias + if is_activation and "input_act_obs_or_fq_ctr" in node.meta["target_dtype_info"]: + input_act_obs_or_fq_ctr = node.meta["target_dtype_info"].get("input_act_obs_or_fq_ctr") + _, qconfig_is_dynamic = _get_dtype_and_is_dynamic(input_act_obs_or_fq_ctr) + return qconfig_is_dynamic + else: + return False + +def _maybe_insert_input_observer_for_arg_or_kwarg( + node: Union[Node, Any], + arg: Argument, + qconfig: QConfigAny, + model: torch.nn.Module, + named_modules: Dict[str, torch.nn.Module], + graph: Graph, + qhandler: Optional[QuantizeHandler], + prepare_custom_config: PrepareCustomConfig, + backend_config: BackendConfig, +) -> Argument: + """ + Given a `node` and an `arg`, inserts an input observer between + `node` and `arg` if necessary. + """ + # for ops such as torch.cat([x0, x1]), + # traverse through the list + if isinstance(arg, (list, tuple)): + new_arg_to_return = [] + for inner_arg in arg: + new_inner_arg = _maybe_insert_input_observer_for_arg_or_kwarg( + node, inner_arg, qconfig, model, named_modules, + graph, + qhandler, + prepare_custom_config, + backend_config) + new_arg_to_return.append(new_inner_arg) + return type(arg)(new_arg_to_return) + + if not isinstance(arg, Node): + return arg + assert isinstance(arg, Node) + # default (no observer) + new_arg = arg + + is_standalone_module = qhandler is not None and qhandler.is_standalone_module() + assert qconfig is not None + if not is_standalone_module: + # regular flow for most nodes, except standalone modules + is_weight = node_arg_is_weight(node, arg, backend_config) + + _is_reuse_input_qconfig_ = _is_reuse_input_qconfig(qconfig) + + act_post_process_ctr = qconfig.weight if is_weight else \ + qconfig.activation + + arg_as_output_target_dtype = _get_arg_target_dtype_as_output(arg, named_modules) + arg_as_input_target_dtype = _get_arg_target_dtype_as_input_to_node( + arg, node, named_modules, backend_config) + arg_as_input_target_is_dynamic = \ + _get_arg_target_is_dynamic_as_input_to_node( + arg, node, named_modules, backend_config) # type: ignore[arg-type] + needs_obs = \ + ( + # the following code block is for static quantization + (not arg_as_input_target_is_dynamic) and + # if the dtypes are different, we need an observer + (arg_as_output_target_dtype != arg_as_input_target_dtype) and + # except if the second dtype is float, a dequant will be inserted + # without an observer in convert + # TODO(future PR): change this so a placeholder is inserted for + # future dequants, to make the logic easier to understand + (arg_as_input_target_dtype != torch.float) and + # if arg output dtype is in _DO_NOT_OBS_DTYPE_LIST do not insert observer + (arg_as_output_target_dtype not in _DO_NOT_OBS_DTYPE_LIST) and + # if qconfig is reuse_input qconfig, we won't insert extra observer for input + not _is_reuse_input_qconfig_ + ) or ( + # need to add input observer for dynamic quantization + # only add observer for first input for now, we may need to extend + # qconfig_dict and backend_config to support more general configurations + # of dynamic quantization, e.g. dynamically quantizing second input, third + # input etc. + arg_as_input_target_is_dynamic and arg is node.args[0] + ) + + else: + # custom flow for standalone modules + _, _, sm_prepare_custom_config, _ = \ + _get_standalone_module_configs( + node, named_modules, prepare_custom_config, qconfig, backend_config) + sm_input_quantized_idxs = sm_prepare_custom_config.input_quantized_indexes + + # for args, this is set to the index of the current arg + # for kwargs, this is left at None + cur_input_idx = None + for arg_idx, arg_to_check in enumerate(node.args): + if arg_to_check is arg: + cur_input_idx = arg_idx + break + + if cur_input_idx is None: + needs_obs = False + else: + arg_as_output_target_dtype = _get_arg_target_dtype_as_output(arg, named_modules) + arg_as_input_target_dtype = torch.quint8 if cur_input_idx in sm_input_quantized_idxs \ + else torch.float + needs_obs = ( + (arg_as_output_target_dtype != arg_as_input_target_dtype) and + (arg_as_input_target_dtype != torch.float) + ) + + act_post_process_ctr = qconfig.activation + + if needs_obs: + + new_obs_mod = act_post_process_ctr() + existing_obs_node = None + + # Before using the new observer, check if an observer + # of the correct type already exists. If it does, use it. + # This prevents duplicate observer insertions if a node is + # used by multiple nodes. + # TODO: this is looking into how the value is used in the future + # we should remove this + # removing this means we insert one observer for each use, even if they + # have the same dtype, we can have an extra pass that removes the extra observers + for maybe_obs_node, _ in arg.users.items(): + if maybe_obs_node.op == 'call_module': + maybe_obs_mod = named_modules[maybe_obs_node.target] # type: ignore[index] + if ( + type(maybe_obs_mod) == type(new_obs_mod) and + maybe_obs_mod.dtype == arg_as_input_target_dtype + ): + existing_obs_node = maybe_obs_node + break + + if existing_obs_node is None: + new_obs_node = _insert_observer( + arg, new_obs_mod, model, named_modules, graph) + # override this arg to be the observed arg + new_arg = new_obs_node + else: + new_arg = existing_obs_node + + return new_arg + + +def _maybe_insert_input_observers_for_node( + node: Node, + qconfig: QConfigAny, + model: torch.nn.Module, + named_modules: Dict[str, torch.nn.Module], + graph: Graph, + qhandler: Optional[QuantizeHandler], + prepare_custom_config: PrepareCustomConfig, + backend_config: BackendConfig, +) -> None: + """ + If needed, inserts observers to the input args and kwargs of `node`. + Note: modifies `node` inplace. + + For example, if cur_node needs an observer after prev_node, we change from + + prev_node -> cur_node + + To + + prev_node -> obs -> cur_node + """ + if qconfig is None: + # if quantization is turned off for this node, we do not need + # to insert input observers + return + assert qconfig is not None + + # Look through every input arg. If that arg's target dtype does not + # match the current node's target dtype, insert an observer. + new_args = [] + for arg in node.args: + new_arg = _maybe_insert_input_observer_for_arg_or_kwarg( + node, arg, qconfig, model, named_modules, graph, + qhandler, + prepare_custom_config, + backend_config) + new_args.append(new_arg) + + new_kwargs = {} + for k, kwarg in node.kwargs.items(): + new_kwarg = _maybe_insert_input_observer_for_arg_or_kwarg( + node, kwarg, qconfig, model, named_modules, graph, + qhandler, + prepare_custom_config, + backend_config) + new_kwargs[k] = new_kwarg + + # assign the new args and kwargs to the node, inplace + node.args = tuple(new_args) + node.kwargs = new_kwargs + +def _maybe_insert_input_equalization_observers_for_node( + node: Node, + equalization_qconfig: Any, + model: torch.nn.Module, + named_modules: Dict[str, torch.nn.Module], + graph: Graph, + is_branch: bool, + backend_config: BackendConfig, +) -> None: + """ + If `node` needs to be equalized, find the input/weight observers it needs in + `equalization_qconfig`, creates them, and inserts it into `graph`. + + If `node` does not need an equalization observer, returns None. + """ + if equalization_qconfig is None or not node_supports_equalization(node, named_modules): + return + + if is_branch: + warnings.warn( + f"Cannot equalize {node} because it is part of a branch." + ) + return + + new_args = [] + for arg in node.args: + if not isinstance(arg, Node) or node_arg_is_bias(node, arg, backend_config): + new_args.append(arg) + continue + + is_weight = node_arg_is_weight(node, arg, backend_config) + + act_eq_process_ctr = equalization_qconfig.weight if is_weight else \ + equalization_qconfig.input_activation + + new_eq_obs_mod = act_eq_process_ctr() + new_eq_obs_node = _insert_observer( + arg, new_eq_obs_mod, model, named_modules, graph) + + new_args.append(new_eq_obs_node) + + # assign the new args and kwargs to the node, inplace + node.args = tuple(new_args) + +def _maybe_insert_output_observer_for_node( + node: Node, + model: torch.nn.Module, + named_modules: Dict[str, torch.nn.Module], + graph: Graph, + node_name_to_match_result_with_qconfig: Dict[str, _MatchResultWithQConfig], + matched_pattern: Any, + qhandler: Optional[QuantizeHandler], + is_qat: bool, +) -> Optional[Node]: + """ + If `node` needs an output observer, creates it, inserts it into `graph` + and returns it. + + If `node` does not need an output observer, returns None. + """ + root_node, _, pattern, qhandler, qconfig = node_name_to_match_result_with_qconfig.get( + node.name, (None, None, None, None, None)) + + if qhandler is None: + return None + + assert qconfig is not None + assert node.op != 'output', 'observer insertion for outputs is handled elsewhere' + + is_standalone_module = qhandler is not None and qhandler.is_standalone_module() + + output_act_obs_or_fq_ctr = node.meta["target_dtype_info"].get("output_act_obs_or_fq_ctr") + qconfig_dtype, _ = _get_dtype_and_is_dynamic(output_act_obs_or_fq_ctr) + should_insert_observer = qconfig_dtype not in _DO_NOT_OBS_DTYPE_LIST + [torch.float] + # TODO(future PR): move the following logic to + # should_insert_observer_for_output + should_insert_observer = should_insert_observer and \ + activation_is_statically_quantized(qconfig) + + # we never insert observers to output of standalone module, we assume + # if needed, they are inserted inside the standalone module + should_insert_observer = should_insert_observer and \ + (not is_standalone_module) + + if should_insert_observer: + observer = qconfig.activation() + return _insert_observer(node, observer, model, named_modules, graph) + else: + return None + +def _maybe_insert_observers_before_graph_output( + graph_output_node: Node, + output_quantized_idxs: List[int], + node_name_to_qconfig: Dict[str, QConfigAny], + model: torch.nn.Module, + named_modules: Dict[str, torch.nn.Module], + graph: Graph, +) -> None: + """ + If the output needs to be quantized and there are any nodes + in the output which are not already observed, inserts observers + for those nodes. + """ + + # TODO(future PR): update the output_quantized_idxs API to match + # arbitrary data structures. There is always a single output, and + # that output can have arbitrary nesting of values. List[int] is + # not the right data type for this. + assert output_quantized_idxs == [0] or output_quantized_idxs == [], \ + 'unrecognized format of output_quantized_idxs' + + # Currently dequants are inserted in the convert step. So, we only + # have to do anything if the output is hardcoded to be quantized + if output_quantized_idxs == []: + return + # TODO(future PR): support more dtypes in model outputs, if necessary + output_target_dtype = torch.quint8 + + def _recursive_maybe_replace_node_with_obs( + maybe_node: Argument, + target_dtype: torch.dtype, + node_name_to_qconfig: Dict[str, QConfigAny], + model: torch.nn.Module, + named_modules: Dict[str, torch.nn.Module], + graph: Graph, + ) -> Argument: + """ + Navigate an arbitrary data structure of lists, tuples, dicts. + For each container type, recurse on all inputs. Once any Node + is found, insert an observer if needed and do not recurse further. + + For example, given a structure of + + {'foo1': [[bar1]], 'foo2': {'foo3': [[[bar3]]]}} + + we recurse down to bar1 and bar3, observe them if necessary, + and if we inserted an observer then replace the original node + with its observer. + + Returns the data structure with all nodes needing observation being + replaced by their observers. + """ + if isinstance(maybe_node, Node): + # check dtype of this node + this_node_dtype = _get_arg_target_dtype_as_output( + maybe_node, named_modules) + if this_node_dtype != target_dtype: + # insert observer + qconfig = node_name_to_qconfig.get(maybe_node.name) + # TODO(future PR): see if we need to allow specifying qconfig + # on output nodes, to remove the restriction below. + assert qconfig is not None, \ + 'Quantizing the output node without a qconfig is not supported' + observer_mod = qconfig.activation() + observer_node = _insert_observer( + maybe_node, observer_mod, model, named_modules, graph) + return observer_node + else: + return maybe_node + elif isinstance(maybe_node, (list, tuple)): + results = [] + for inner_node in maybe_node: + results.append(_recursive_maybe_replace_node_with_obs( + inner_node, target_dtype, node_name_to_qconfig, model, named_modules, graph)) + if isinstance(maybe_node, list): + return results + else: + return tuple(results) + elif isinstance(maybe_node, dict): + results_dict = {} + for k, inner_v in maybe_node.items(): + results_dict[k] = _recursive_maybe_replace_node_with_obs( + inner_v, target_dtype, node_name_to_qconfig, model, named_modules, graph) + return results_dict + else: + return results + + new_args = [] + for old_arg in graph_output_node.args: + new_args.append( + _recursive_maybe_replace_node_with_obs( + old_arg, output_target_dtype, node_name_to_qconfig, model, named_modules, graph)) + + graph_output_node.args = tuple(new_args) # type: ignore[assignment] + + +def _maybe_propagate_dtype_for_node( + node: Node, + target_dtype: Union[torch.dtype, type], + node_name_to_match_result_with_qconfig: Dict[str, _MatchResultWithQConfig], +) -> None: + """ + Assigns `target_dtype` to `node`, setting `is_dynamic` to False. If `node` + is a general tensor shape op, also call this function recursively on + the first argument, to propagate the dtype to the caller. + """ + node.meta["target_dtype_info"]["input_act_obs_or_fq_ctr"] = None + node.meta["target_dtype_info"]["output_act_obs_or_fq_ctr"] = None + # if this is a copy node, propagate to first arg + root_node, _, pattern, qhandler, qconfig = node_name_to_match_result_with_qconfig.get( + node.name, (None, None, None, None, None)) + # TODO: probably need to remove `is_general_tensor_value_op` + if qhandler is not None and qhandler.is_general_tensor_value_op(): + prev_node = node.args[0] + if isinstance(prev_node, Node): + _maybe_propagate_dtype_for_node( + prev_node, target_dtype, node_name_to_match_result_with_qconfig) + +def propagate_dtypes_for_known_nodes( + graph: Graph, + node_name_to_match_result_with_qconfig: Dict[str, _MatchResultWithQConfig], +) -> None: + """ + Currently we assume that inputs to the graph are either `torch.float` or + `torch.quint8`, which is not always correct. For ops such as + `x.masked_fill(mask, value)`, we know that the dtype of `mask` is a + `BoolTensor`. Propagate this information throughout the graph. + + Note: not all dtypes in the graph will be correct after this pass, but a + higher percentage of them will be correct. Hopefully in the future we can + replace this with a better way to reason about dtypes of tensors. + """ + for node in graph.nodes: + non_observable_arg_dict = get_non_observable_arg_indexes_and_types(node) + + for arg_type in non_observable_arg_dict: + non_observable_indices = non_observable_arg_dict[arg_type](node) + + for index in non_observable_indices: + arg = node.args[index] + + # when an argument is a tuple, it does not show up as another node so we need to go through + # all elements of the tuple manually + if isinstance(arg, (tuple, list)): + arg_list = list(arg) + else: + arg_list = [arg] + + for cur_arg in arg_list: + # hard coded arguments show up but aren't `Node` typed and do not need dtype propgated + if isinstance(cur_arg, torch.fx.node.Node): + _maybe_propagate_dtype_for_node( + cur_arg, arg_type, node_name_to_match_result_with_qconfig) + +def _maybe_make_input_output_share_observers( + node: Node, + model: torch.nn.Module, + named_modules: Dict[str, torch.nn.Module], +) -> bool: + """ + Ensures that we share an observer + for all input arguments as well as the output argument. In detail, given + a graph of + + x0 -> obs0 -> op -> x2 + / + x1 -> obs1 / + + where node obs0 points to observer instance observer0, + obs1 points to observer1 and obs2 points to observer2, we make nodes obs1 + and ob2 point to observer0. + Returns: whether the operation succeeded or not + """ + first_arg = None + # find the first non-Tensor arg + for i in range(len(node.args)): + if isinstance(node.args[i], (Node, list, tuple)): + first_arg = node.args[i] + break + + # if there is no non-Tensor arg, return directly + if first_arg is None: + return False + + if isinstance(first_arg, (list, tuple)): + first_arg_arg = first_arg[0] + elif isinstance(first_arg, Node): + first_arg_arg = first_arg + else: + return False + + # if we have a graph such as + # observed_node -> non_observed_node -> cat + # we need to navigate up to the first observer + iteration_guard = 0 + while not _is_activation_post_process_node(first_arg_arg, named_modules): + if not isinstance(first_arg_arg, Node): + return False + # did not find an activation_post_process for the op + if first_arg_arg.op == "placeholder": + return False + # trace back the args until we found the first Tensor/Node + trace_back_node = None + for i in range(len(first_arg_arg.args)): + trace_back_node = first_arg_arg.args[i] + if isinstance(trace_back_node, Node): + break + if trace_back_node is None: + return False + first_arg_arg = trace_back_node + + iteration_guard += 1 + if iteration_guard > 10000: + raise AssertionError('Unable to find observer of previous node') + + assert isinstance(first_arg_arg, Node) + target_to_use = first_arg_arg.target + assert isinstance(target_to_use, str) + obs_mod_to_use = named_modules[target_to_use] + + if isinstance(first_arg, (list, tuple)): + # set all other input observer nodes to use that module + for input_idx, input_arg in enumerate(first_arg): + if input_idx == 0: + continue + iteration_guard = 0 + while not _is_activation_post_process_node(input_arg, named_modules): + # failed to trace back since no input arg for the current node + if len(input_arg.args) < 1: + return False + input_arg = input_arg.args[0] + iteration_guard += 1 + if iteration_guard > 10000: + raise AssertionError('Unable to find observer of previous node') + + parent_name, name = _parent_name(input_arg.target) + setattr(named_modules[parent_name], name, obs_mod_to_use) + + # set the output observer node to use that module + for output_obs_node, _ in node.users.items(): + assert _is_activation_post_process_node(output_obs_node, named_modules) + parent_name, name = _parent_name(output_obs_node.target) + setattr(named_modules[parent_name], name, obs_mod_to_use) + + # TODO(future PR): delete the orphaned observer modules + return True + +def _remove_output_observer( + node: Node, + model: torch.nn.Module, + named_modules: Dict[str, torch.nn.Module]): + items = list(node.users.items()) + for output_obs_node, _ in items: + assert _is_activation_post_process_node(output_obs_node, named_modules) + output_obs_node.replace_all_uses_with(node) + model.graph.erase_node(output_obs_node) # type: ignore[union-attr, operator] + +def _swap_custom_module_to_observed( + node: Node, + qconfig: QConfigAny, + named_modules: Dict[str, torch.nn.Module], + prepare_custom_config: PrepareCustomConfig): + custom_module = named_modules[node.target] # type: ignore[index] + custom_module_class_mapping = prepare_custom_config.float_to_observed_mapping + observed_custom_module_class = \ + get_swapped_custom_module_class( + custom_module, custom_module_class_mapping, qconfig) + observed_custom_module = \ + observed_custom_module_class.from_float(custom_module) + parent_name, name = _parent_name(node.target) + setattr(named_modules[parent_name], name, observed_custom_module) + +def insert_observers_for_model( + model: GraphModule, + node_name_to_match_result_with_qconfig: Dict[str, _MatchResultWithQConfig], + node_name_to_qconfig: Dict[str, QConfigAny], + prepare_custom_config: PrepareCustomConfig, + equalization_config_map: Dict[str, Any], + backend_config: BackendConfig, + observed_node_names: Set[str], + is_qat: bool, +) -> Optional[Node]: + """ + Inserts observers, using the following high level algorithm: + + For each node in the graph: + 1. determine the target dtype of this node in the quantized graph, and save + it for future steps + 2. determine the target dtype or all args and kwargs of this node + 3. if any arg or kwarg's target dtype does not match the current node's + dtype, insert an observer + 4. if the current node needs an output observer, insert it + + For example: + + - starting graph: + x0 -> linear -> x1 + + - observed graph after processing x0: + x0(fp32) + + - observed graph after processing linear: + x0(fp32) -> x0_obs0(int8) -> linear(int8) -> linear_obs0(int8) + + - observed graph after processing x1: + x0(fp32) -> x0_obs0(int8) -> linear(int8) -> linear_obs0(int8) -> x1 + + After a node is processed, the naive observer placement is guaranteed to be + complete for that node and all of its predecessors. There can be future + passes which optimize the graph by deduplicating observers, etc. + """ + + # node.meta["target_dtype_info"] stores the target dtype information + # that's derived from qconfig for the Node, for example, if we have + # a conv2d node that has a qconfig + # qconfig = QConfig(activation=..., weight=...) + # # information for input and bias node omitted + # # for getattr node + # # weight = getattr(self, 'weight') + # weight.meta["target_dtype_info"] = { + # 'output_act_obs_or_fq_ctr': qconfig.weight, + # } + # # for conv2d node + # # conv2d = call_function[target=torch.nn.functional.conv2d]( + # # args=(input, weight, bias)) + # conv2d.meta["target_dtype_info"] = { + # 'input_act_obs_or_fq_ctr': qconfig.activation + # 'weight_obs_or_fq_ctr': qconfig.weight, + # 'bias_obs_or_fq_ctr': PlaceholderObserver.with_args(dtype=torch.float32), + # 'output_act_obs_or_fq_ctr': qconfig.activation, + # } + # + cache_for_no_tensor_check: Dict[Node, bool] = {} + + # first, populate the dtype map based only on qconfig and qhandler + # this assumes: + # graph inputs are fp32 by default, and int8 where overriden + # other nodes output dtype is specified by the qconfig + named_modules = dict(model.named_modules(remove_duplicate=False)) + + input_quantized_idxs: List[int] = prepare_custom_config.input_quantized_indexes + output_quantized_idxs: List[int] = prepare_custom_config.output_quantized_indexes + processed_nodes: Set[Node] = set() + # initalize target_dtype_info + for node in model.graph.nodes: + node.meta["target_dtype_info"] = copy.copy(_DEFAULT_FP32_QCONFIG_FOR_TARGET_DTYPE_INFO) + + inputs_seen_counter = 0 + outputs_seen_counter = 0 + placeholder_node_to_input_index: Dict[Node, int] = {} + # TODO: we probably don't need this counter since each graph will only have + # one output node? + output_node_to_output_index: Dict[Node, int] = {} + for node in model.graph.nodes: + if node.op == "placeholder": + placeholder_node_to_input_index[node] = inputs_seen_counter + inputs_seen_counter += 1 + if node.op == "output": + output_node_to_output_index[node] = outputs_seen_counter + outputs_seen_counter += 1 + + # Step 1, set the observer or fake quantize module constructor for each node in the + # matched_node_pattern + + for node_name, match_res_with_qconfig in node_name_to_match_result_with_qconfig.items(): + last_node, matched_node_pattern, pattern, qhandler, qconfig = match_res_with_qconfig + assert qhandler is not None + _set_target_dtype_info_for_matched_node_pattern( + matched_node_pattern, + last_node, + qconfig, + backend_config, + named_modules, + cache_for_no_tensor_check, + processed_nodes + ) + + # Step 2. Special cases for some operators, we might be able to remove them + # in the future if we know dtype information of each node better + + # Step 2.1. some settings are not based on patterns, we need to process each node + # instead + for node in model.graph.nodes: + if node.op == "placeholder" and placeholder_node_to_input_index[node] in input_quantized_idxs: + # users are not supposed to call calculate_qparams on PlaceholderObserver, and + # this is OK because we are using this as a way to encode the dtypes of input + # tensor, we won't actually insert these observers in the graph and won't + # actually call calculate_qparams + node.meta["target_dtype_info"] = copy.copy(_DEFAULT_QUINT8_QCONFIG_FOR_TARGET_DTYPE_INFO) + elif node.op in ("call_module", "call_method", "call_function"): + args_have_no_tensors = \ + all_node_args_have_no_tensors( + node, named_modules, cache_for_no_tensor_check) + if args_have_no_tensors: + node.meta["target_dtype_info"] = { + "input_act_obs_or_fq_ctr": None, + "output_act_obs_or_fq_ctr": None, + } + elif node.op == "output" and output_node_to_output_index[node] in output_quantized_idxs: + node.meta["target_dtype_info"] = copy.copy(_DEFAULT_QUINT8_QCONFIG_FOR_TARGET_DTYPE_INFO) + + # Step 2.2, for nodes with known input dtypes, propagate them throughout the + # graph. For example, if there is a call such as + # x1 = x0.masked_fill(mask, 1) + # we propagate the type of mask to be torch.bool + propagate_dtypes_for_known_nodes(model.graph, node_name_to_match_result_with_qconfig) + + # Step 3, check if the requested target_dtype_info is supported by backend or not + # if not, we'll reset the target_dtye_info to use the default (float Tensor) + + # reset the counters and set of processed_nodes + processed_nodes = set() + for node_name, match_res_with_qconfig in node_name_to_match_result_with_qconfig.items(): + last_node, matched_node_pattern, pattern, qhandler, qconfig = match_res_with_qconfig + is_supported_by_backend = _is_pattern_dtype_config_and_qconfig_supported_by_backend( + pattern, matched_node_pattern, qconfig, backend_config) + assert qhandler is not None + + # get output_act_dtype so that we don't also reset the special typed nodes + # TODO: we might want to handle these more uniformly with the default path + # this can be improved if we can use node.meta["val"] + output_act_dtype, _ = _get_dtype_and_is_dynamic(node.meta["target_dtype_info"]["output_act_obs_or_fq_ctr"]) + if not is_supported_by_backend and output_act_dtype not in [None, int, float, torch.bool]: + # restore target_dtype_info to default if it is not supported by backend + _set_target_dtype_info_for_matched_node_pattern( + matched_node_pattern, + last_node, + torch.ao.quantization.qconfig._default_fp32_placeholder_qconfig, + backend_config, + named_modules, + cache_for_no_tensor_check, + processed_nodes + ) + + # After this point, the current node and all of its arguments + # have a target_dtype_info assigned. Now, we insert observers for inputs + # of this node (if needed for this node), and the output of this node + # (if needed for this node). + + # Since we are mutating the graph as we go, we iterate over the original + # nodes before observer insertion, instead of model.graph.nodes. + nodes_before_observation = list(model.graph.nodes) + + # Avoid duplicates custom module swaps for multiple nodes with same target. + custom_module_names_already_swapped: Set[str] = set() + + # TODO: reuse placeholder_node_to_input_index and output_node_to_output_index + # reset inputs/outputs counters + inputs_seen_counter = 0 + outputs_seen_counter = 0 + results_node = None + + # TODO: change this to insert obs/fq by pattern instead of by node + for node in nodes_before_observation: + + if node.op == 'placeholder': + # if a graph input is in fp32, it does not need observation + # if a graph input is in int8, we assume the observation happens + # outside of the graph, and no additional observation is needed + pass + + elif node.op in ('call_module', 'call_method', 'call_function', 'output'): + # check for matches + last_node, matched_node_pattern, pattern, qhandler, qconfig = ( + node_name_to_match_result_with_qconfig.get(node.name, (None, None, None, None, None)) # type: ignore[assignment] + ) + equalization_qconfig = equalization_config_map.get(node.name, None) + + this_node_dtype_info = node.meta["target_dtype_info"] + if "val" in node.meta: + output_is_a_tensor = ( + this_node_dtype_info is not None and + isinstance(node.meta["val"], FakeTensor) + ) + else: + output_is_a_tensor = this_node_dtype_info is not None + + skip_inserting_observers = ( + (qconfig is None) or + not output_is_a_tensor + ) and ( + not node.op == 'output' + ) + + # TODO: take a closer look to see if we can remove this check + # right now it is here because of `observed_node_names`, we are using + # it as an indicator for swapping the modules to reference modules in + # convert + is_supported_by_backend = _is_pattern_dtype_config_and_qconfig_supported_by_backend( + pattern, matched_node_pattern, qconfig, backend_config) + + if not skip_inserting_observers and is_supported_by_backend: + named_modules = dict(model.named_modules(remove_duplicate=False)) + if node.op != 'output': + assert matched_node_pattern is not None + # add matched nodes to the observed node name set + _add_matched_node_name_to_set(matched_node_pattern, observed_node_names) + + # This is currently only used for equalization. + # Checks if the current node is in a branch in which the two + # first layers are both being quantized. + # + # ex. conv2 + # / + # x -> conv1 + # + # If this is the case, we will not apply equalization to the + # initial two layers. + is_quantized_branch = False + if ( + len(node.args) > 0 and + isinstance(node.args[0], Node) and + len(node.args[0].users) > 1 + ): + for user in node.args[0].users: + # Checks if there exists another user being quantized + is_user_quantized = ( + node_name_to_qconfig.get(user.name, None) is not None or + (user.op == 'call_module' and isinstance(named_modules[str(user.target)], ObserverBase)) + ) + if user != node and is_user_quantized: + is_quantized_branch = True + + pattern_to_root_node_getter = get_fusion_pattern_to_root_node_getter(backend_config) + root_node_getter = pattern_to_root_node_getter.get(pattern, _default_root_node_getter) + root_node = root_node_getter(matched_node_pattern) + is_input_node_of_the_pattern = node is root_node + if is_input_node_of_the_pattern: + # this modifies node inplace + _maybe_insert_input_observers_for_node( + node, qconfig, model, named_modules, model.graph, + qhandler, + prepare_custom_config, + backend_config) + + # insert equalization input observers if needed + _maybe_insert_input_equalization_observers_for_node( + node, equalization_qconfig, model, named_modules, model.graph, + is_quantized_branch, backend_config) + + is_last_node_of_pattern = node is last_node + is_general_tensor_value_op = \ + (qhandler is not None and qhandler.is_general_tensor_value_op()) + _is_reuse_input_qconfig_ = _is_reuse_input_qconfig(qconfig) + + if is_last_node_of_pattern: + if _is_custom_module_lstm(node, named_modules, qconfig, qhandler): + # Currently custom module outputs are assumed to be already quantized, + # so we need to insert a DeQuantStub after the output. For custom module + # LSTM specifically, the outputs are also a nested tuple, so we must first + # break down the tuple to insert DeQuantStubs after the internal nodes. + + # TODO: This currently diverges from how custom modules are handled today, + # where we insert observers after the output instead of DeQuantStubs, and + # replace these observers with "dequantize" nodes during convert. Conceptually, + # these output observers are the same as DeQuantStubs. In the future, we + # should resolve this inconsistency by inserting DeQuantStubs for all custom + # modules, not just for LSTM. + _insert_dequant_stubs_for_custom_module_lstm_output(node, model, named_modules, model.graph) + if(node.target not in custom_module_names_already_swapped): + custom_module_names_already_swapped.add(node.target) + _swap_custom_module_to_observed(node, qconfig, named_modules, prepare_custom_config) + else: + # this returns the new observer node if it was needed + maybe_output_obs_node = _maybe_insert_output_observer_for_node( + node, model, named_modules, model.graph, node_name_to_match_result_with_qconfig, + pattern, qhandler, is_qat) + + if maybe_output_obs_node is not None: + # Update users of original node to use the output observer + # instead. For example, change + # + # next_node + # / + # cur_node -> obs + # + # to + # + # next_node + # / + # cur_node -> obs + # + # We need to save orig users before updating uses because + # the list of users will change as we update uses + orig_users = list(node.users.keys()) + for user_node in orig_users: + if user_node is maybe_output_obs_node: + continue + user_node.replace_input_with(node, maybe_output_obs_node) + + _is_observer_in_same_graph_ = _is_observer_in_same_graph( + node, named_modules) + + # for general tensor value ops, we modify the graph + # to make all inputs and outputs use the first input's + # observer + if (is_general_tensor_value_op and _is_observer_in_same_graph_) or \ + _is_reuse_input_qconfig_: + if not _maybe_make_input_output_share_observers(node, model, named_modules): + _remove_output_observer(node, model, named_modules) + + if qhandler is not None and qhandler.is_custom_module(): + if(node.target not in custom_module_names_already_swapped): + custom_module_names_already_swapped.add(node.target) + _swap_custom_module_to_observed(node, qconfig, named_modules, prepare_custom_config) + + else: # output + _maybe_insert_observers_before_graph_output( + node, output_quantized_idxs, + node_name_to_qconfig, + model, named_modules, model.graph) + + # + # After this point, the current node has input and output observers + # that it needs for itself inserted. + # + + # increment the counters, so future inputs and outputs are assigned + # correct dtypes + if node.op == 'placeholder': + inputs_seen_counter += 1 + elif node.op == 'output': + outputs_seen_counter += 1 + results_node = node + + return results_node + +def _run_prepare_fx_on_standalone_modules( + model: torch.nn.Module, + is_qat: bool, + named_modules: Dict[str, torch.nn.Module], + node_name_to_match_result_with_qconfig: Any, + prepare_custom_config: PrepareCustomConfig, + backend_config: BackendConfig, +) -> None: + """ + Runs prepare_fx on each standalone module. Note: this does + not modify the graph, it just replaces the unobserved modules with + their observed versions. + """ + for ( + node_name, + (root_node, _, pattern, qhandler, qconfig), + ) in node_name_to_match_result_with_qconfig.items(): + if qhandler is None: + continue + elif not qhandler.is_standalone_module(): + continue + + sm_qconfig_mapping, sm_example_inputs, sm_prepare_custom_config, \ + sm_backend_config = _get_standalone_module_configs( + root_node, named_modules, prepare_custom_config, qconfig, backend_config) + + standalone_module = named_modules[root_node.target] + prepare = \ + torch.ao.quantization.quantize_fx._prepare_standalone_module_fx # type: ignore[attr-defined] + observed_standalone_module = \ + prepare( + standalone_module, + sm_qconfig_mapping, + is_qat, + example_inputs=sm_example_inputs, + prepare_custom_config=sm_prepare_custom_config, + backend_config=sm_backend_config) + parent_name, name = _parent_name(root_node.target) + setattr(named_modules[parent_name], name, observed_standalone_module) + named_modules[root_node.target] = observed_standalone_module + +def _save_state( + observed: GraphModule, + node_name_to_qconfig: Dict[str, QConfigAny], + node_name_to_scope: Dict[str, Tuple[str, type]], + prepare_custom_config: PrepareCustomConfig, + equalization_node_name_to_qconfig: Dict[str, Any], + qconfig_mapping: QConfigMapping, + is_qat: bool, + observed_node_names: Set[str], +) -> None: + observed.meta["_observed_graph_module_attrs"] = ( + ObservedGraphModuleAttrs( + node_name_to_qconfig=node_name_to_qconfig, + node_name_to_scope=node_name_to_scope, + prepare_custom_config=prepare_custom_config, + equalization_node_name_to_qconfig=equalization_node_name_to_qconfig, + qconfig_mapping=qconfig_mapping, + is_qat=is_qat, + observed_node_names=observed_node_names, + ) + ) + +def prepare( + model: GraphModule, + qconfig_mapping: Union[QConfigMapping, Dict[str, Any]], + is_qat: bool, + node_name_to_scope: Dict[str, Tuple[str, type]], + example_inputs: Tuple[Any, ...], + prepare_custom_config: Union[PrepareCustomConfig, Dict[str, Any], None] = None, + _equalization_config: Union[QConfigMapping, Dict[str, Any], None] = None, + backend_config: Union[BackendConfig, Dict[str, Any], None] = None, + is_standalone_module: bool = False) -> GraphModule: + """ standalone_module means it a submodule that is not inlined in + parent module, and will be quantized separately as one unit. + + How the standalone module is observed is specified by `input_quantized_idxs` and + `output_quantized_idxs` in the prepare_custom_config for the standalone module + Args: + node_name_to_scope: mapping from node name to the scope of the module which contains the node. + The scope is a tuple of fully qualified path of the module and the type of the module + Returns: + model(GraphModule): prepared standalone module + attributes related to standalone module + in model.meta["_observed_graph_module_attrs"]: + is_observed_standalone_module (bool): boolean value that shows whether the + current model is a observed standalone module or not + standalone_module_input_quantized_idxs(List[Int]): a list of + indexes for the graph input that is expected to be quantized, + same as input_quantized_idxs configuration provided + for the standalone module + standalone_module_output_quantized_idxs(List[Int]): a list of + indexs for the graph output that is quantized + same as input_quantized_idxs configuration provided + for the standalone module + """ + if prepare_custom_config is None: + prepare_custom_config = PrepareCustomConfig() + if _equalization_config is None: + _equalization_config = QConfigMapping() + + if isinstance(qconfig_mapping, Dict): + warnings.warn( + "Passing a QConfig dictionary to prepare is deprecated and will not be supported " + "in a future version. Please pass in a QConfigMapping instead.") + qconfig_mapping = QConfigMapping.from_dict(qconfig_mapping) + + if isinstance(_equalization_config, Dict): + warnings.warn( + "Passing a QConfig dictionary to prepare for equalization is deprecated and will not " + "be supported in a future version. Please pass in a QConfigMapping instead.") + _equalization_config = QConfigMapping.from_dict(_equalization_config) + + if isinstance(prepare_custom_config, Dict): + warnings.warn( + "Passing a prepare_custom_config_dict to prepare is deprecated and will not be supported " + "in a future version. Please pass in a PrepareCustomConfig instead.") + prepare_custom_config = PrepareCustomConfig.from_dict(prepare_custom_config) + + if isinstance(backend_config, Dict): + warnings.warn( + "Passing a backend_config_dict to prepare is deprecated and will not be supported " + "in a future version. Please pass in a BackendConfig instead.") + backend_config = BackendConfig.from_dict(backend_config) + + assert(isinstance(qconfig_mapping, QConfigMapping)) + assert(isinstance(_equalization_config, QConfigMapping)) + qconfig_mapping = copy.deepcopy(qconfig_mapping) + _equalization_config = copy.deepcopy(_equalization_config) + + # mapping from a tuple of nodes in reverse order to uninitialized + # QuantizeHandler subclass. For example, + # { + # # match a single node + # (: + # ), + # # match multiple nodes in reverse order + # ((, ): + # ), + # } + + pattern_to_quantize_handler: Dict[Pattern, QuantizeHandler] = {} + if backend_config is None: + backend_config = get_native_backend_config() + pattern_to_quantize_handler = _get_pattern_to_quantize_handlers(backend_config) + pattern_to_quantize_handler = _sorted_patterns_dict(pattern_to_quantize_handler) + + root_node_getter_mapping = \ + get_fusion_pattern_to_root_node_getter(backend_config) + + _update_qconfig_for_fusion(model, qconfig_mapping) + _update_qconfig_for_fusion(model, _equalization_config) + flattened_qconfig_dict = _get_flattened_qconfig_dict(qconfig_mapping) + # TODO: support regex as well + propagate_qconfig_(model, flattened_qconfig_dict, prepare_custom_config.to_dict()) + + if is_qat: + module_to_qat_module = get_module_to_qat_module(backend_config) + _qat_swap_modules(model, module_to_qat_module) + _update_qconfig_for_qat(qconfig_mapping, backend_config) + + # mapping from fully qualified module name to module instance + # for example, + # { + # '': Model(...), + # 'linear': Linear(...), + # 'linear.weight_fake_quant': PerChannelMinMaxObserver(...), + # } + named_modules = dict(model.named_modules(remove_duplicate=False)) + + # fill node_name_to_qconfig, a map from node name to qconfig, used in _find_matches + equalization_node_name_to_qconfig = _generate_node_name_to_qconfig( + model, named_modules, model.graph, _equalization_config, node_name_to_scope) + node_name_to_qconfig = _generate_node_name_to_qconfig(model, named_modules, model.graph, qconfig_mapping, node_name_to_scope) + + # match the patterns that will get quantized + standalone_module_names = list(prepare_custom_config.standalone_module_names.keys()) + standalone_module_classes = list(prepare_custom_config.standalone_module_classes.keys()) + + custom_module_classes = get_custom_module_class_keys(prepare_custom_config.float_to_observed_mapping) + matches_without_qconfig = _find_matches( + model.graph, named_modules, pattern_to_quantize_handler, root_node_getter_mapping, + standalone_module_names, standalone_module_classes, custom_module_classes) + + # map qconfig instances to matches + node_name_to_match_result_with_qconfig = {} + for node_name, match_without_qconfig in matches_without_qconfig.items(): + match_with_qconfig = (*match_without_qconfig, node_name_to_qconfig[node_name]) + node_name_to_match_result_with_qconfig[node_name] = match_with_qconfig + + _run_prepare_fx_on_standalone_modules( + model, is_qat, named_modules, node_name_to_match_result_with_qconfig, prepare_custom_config, backend_config) + + # record names for the set of observed node, so that in convert step + # we know whether we need to convert a floating point module to reference + # quantized module or not + observed_node_names: Set[str] = set() + + result_node = insert_observers_for_model( + model, + node_name_to_match_result_with_qconfig, + node_name_to_qconfig, + prepare_custom_config, + equalization_node_name_to_qconfig, + backend_config, + observed_node_names, + is_qat + ) + model = GraphModule(model, model.graph) + + _save_state(model, node_name_to_qconfig, node_name_to_scope, + prepare_custom_config, equalization_node_name_to_qconfig, + qconfig_mapping, is_qat, observed_node_names) + + if is_standalone_module: + assert result_node is not None + assert isinstance(result_node.args[0], Node), \ + "standalone module only supports returning simple value currently"\ + "(not tuple, dict etc.)" + # these inputs are observed in parent + # converting List[int] to Tensor since module attribute is + # Union[Tensor, Module] + input_quantized_idxs: List[int] = prepare_custom_config.input_quantized_indexes + output_quantized_idxs: List[int] = prepare_custom_config.output_quantized_indexes + observed_graph_module_attrs = model.meta["_observed_graph_module_attrs"] + # inplace modification + observed_graph_module_attrs.is_observed_standalone_module = True + observed_graph_module_attrs.standalone_module_input_quantized_idxs = \ + input_quantized_idxs + observed_graph_module_attrs.standalone_module_output_quantized_idxs = \ + output_quantized_idxs + return model diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/qconfig_mapping_utils.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/qconfig_mapping_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..15d2a94b8304026813dff1cdf3117a80cff47ef1 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/qconfig_mapping_utils.py @@ -0,0 +1,343 @@ +import torch +import re +from collections import defaultdict, OrderedDict +from typing import Callable, Any, Dict, Tuple, Set, List, Union +from torch.ao.quantization import QConfig +from torch.ao.quantization.qconfig import _add_module_to_qconfig_obs_ctr, QConfigAny, qconfig_equals +from torch.ao.quantization.observer import ( + _is_activation_post_process, +) +from torch.ao.quantization.backend_config import ( + BackendConfig, + DTypeConfig, +) +from torch.ao.quantization.backend_config.utils import ( + get_module_to_qat_module, +) + +from torch.fx import ( + GraphModule, +) +from torch.fx.graph import ( + Graph, +) +from torch.ao.nn.intrinsic import _FusedModule + +from ..utils import ( + _parent_name, + get_qconfig_dtypes, +) +from ..qconfig_mapping import ( + _OBJECT_TYPE_DICT_KEY, + _MODULE_NAME_DICT_KEY, + _MODULE_NAME_REGEX_DICT_KEY, + QConfigMapping, +) + +__all__: List[str] = [] + + + +def _maybe_adjust_qconfig_for_module_name_object_type_order( + qconfig_mapping: QConfigMapping, + cur_module_path: str, + cur_object_type: Callable, + cur_object_type_idx: int, + fallback_qconfig: QConfigAny, +) -> QConfigAny: + for (module_name, object_type, index), qconfig in qconfig_mapping.module_name_object_type_order_qconfigs.items(): + if ( + (module_name == cur_module_path) and + (object_type == cur_object_type) and + (index == cur_object_type_idx) + ): + return qconfig + return fallback_qconfig + + +def _update_qconfig_for_fusion(model: GraphModule, qconfig_mapping: QConfigMapping): + """ + Update the QConfigMapping to account for fused modules such as LinearReLU. + This assumes the QConfigMapping's attributes have already been converted to OrderedDicts. + """ + object_type_dict = qconfig_mapping.object_type_qconfigs + if len(object_type_dict) == 0: + return qconfig_mapping + + modules = dict(model.named_modules()) + + for node in model.graph.nodes: + if node.op == 'call_module' and node.target in modules: + maybe_fused_module = modules[str(node.target)] + if not isinstance(maybe_fused_module, _FusedModule): + continue + + ops = list(maybe_fused_module._modules.values()) + fused_qconfig = object_type_dict.get(type(ops[0]), None) + + # Raise an error if the modules in the fused module have + # different qconfigs specified in the qconfig_dict + # TODO: currently it only works for modules, + # need to make this work for torch.nn.functional.relu + # TODO: currently it only works for object_type configurations, + # ideally it should work for different types of configurations, + # maybe we want to redesign this part + for op in ops[1:]: + if not qconfig_equals(object_type_dict.get(type(op), None), fused_qconfig): + raise LookupError( + "During fusion, we need to specify the same " + + f"qconfigs for all module types in {type(maybe_fused_module)} " + + f"offending type: {type(op)}") + + if fused_qconfig is not None: + object_type_dict[type(maybe_fused_module)] = fused_qconfig + +def _generate_node_name_to_qconfig( + root: torch.nn.Module, + modules: Dict[str, torch.nn.Module], + input_graph: Graph, + qconfig_mapping: QConfigMapping, + node_name_to_scope: Dict[str, Tuple[str, type]]) -> Dict[str, QConfigAny]: + global_qconfig = qconfig_mapping.global_qconfig + node_name_to_qconfig = {} + + # example: + # + # {'foo.bar': {F.linear: 0, F.conv2d: 1, ...}, ...} + # + # meaning in submodule 'foo.bar', we have seen 0 F.linear and + # 1 F.conv2d invocations so far. + submodule_to_object_type_to_cur_idx: Dict[str, Dict[Callable, int]] = \ + defaultdict(lambda: defaultdict(int)) + for node in input_graph.nodes: + qconfig = None + if node.op == "get_attr": + module_name, _ = _parent_name(node.target) + qconfig = _maybe_adjust_qconfig_for_module_type_or_name( + qconfig_mapping, type(modules[module_name]), module_name, global_qconfig) + qconfig_with_device_check = _add_module_to_qconfig_obs_ctr(qconfig, modules.get(node.target, None)) + elif node.op == "call_function": + # precedence: module_name_qconfig + # > function_qconfig > global_qconfig + # module_name takes precedence over function qconfig + function_qconfig = _get_object_type_qconfig( + qconfig_mapping, node.target, global_qconfig) + module_path, module_type = node_name_to_scope[node.name] + qconfig = _maybe_adjust_qconfig_for_module_type_or_name( + qconfig_mapping, module_type, module_path, function_qconfig) + + cur_object_type_idx = \ + submodule_to_object_type_to_cur_idx[module_path][node.target] + submodule_to_object_type_to_cur_idx[module_path][node.target] += 1 + qconfig = _maybe_adjust_qconfig_for_module_name_object_type_order( + qconfig_mapping, module_path, node.target, cur_object_type_idx, qconfig) + qconfig_with_device_check = _add_module_to_qconfig_obs_ctr(qconfig, modules.get(node.target, None)) + + elif node.op == "call_method": + module_path, module_type = node_name_to_scope[node.name] + # first use node.target (string) to get the qconfig + # this is to support configs like + # "object_type": [("reshpe", qconfig)] + qconfig = _maybe_adjust_qconfig_for_module_type_or_name( + qconfig_mapping, node.target, module_path, global_qconfig) + # if there is no special config for the method, we'll fall back to the + # config for the module that contains the call_method node + qconfig = _maybe_adjust_qconfig_for_module_type_or_name( + qconfig_mapping, module_type, module_path, qconfig) + # currently call_method does not support modifying qconfig + # by order, we can add this later if it is needed. + qconfig_with_device_check = _add_module_to_qconfig_obs_ctr(qconfig, modules.get(node.target, None)) + + elif node.op == 'call_module': + # if the node is an observer, just continue - don't add it to the qconfig_map + if _is_activation_post_process(modules[node.target]): + continue + qconfig = _maybe_adjust_qconfig_for_module_type_or_name( + qconfig_mapping, type(modules[node.target]), node.target, global_qconfig) + + module_path, module_type = node_name_to_scope[node.name] + # Note: for call_module, the module_path is the current module's name. + # to meaningfully count invocations, we need to count them in the parent + # module. + parent_name, _ = _parent_name(module_path) + cur_object_type_idx = \ + submodule_to_object_type_to_cur_idx[parent_name][module_type] + submodule_to_object_type_to_cur_idx[parent_name][module_type] += 1 + qconfig = _maybe_adjust_qconfig_for_module_name_object_type_order( + qconfig_mapping, parent_name, module_type, cur_object_type_idx, + qconfig) + qconfig_with_device_check = _add_module_to_qconfig_obs_ctr(qconfig, modules.get(node.target, None)) + + # regex is not supported eager mode propagate_qconfig_, we'll + # need to set the qconfig explicitly here in case regex + # is used + modules[node.target].qconfig = qconfig_with_device_check + else: + qconfig_with_device_check = None + + node_name_to_qconfig[node.name] = qconfig_with_device_check + return node_name_to_qconfig + + +def _check_is_valid_config_dict(config_dict: Any, allowed_keys: Set[str], dict_name: str) -> None: + r""" Checks if the given config_dict has the correct keys + + Args: + `config_dict`: dictionary whose keys we want to check + """ + + for k in config_dict.keys(): + if k not in allowed_keys: + raise ValueError( + 'Expected ' + dict_name + ' to have the following keys: ' + + str(allowed_keys) + '. But found \'' + k + + '\' instead.') + + +def _compare_prepare_convert_qconfig_mappings( + prepare_qconfig_mapping: QConfigMapping, + convert_qconfig_mapping: QConfigMapping): + r""" Compare the qconfig_mapping passed in convert to the one from prepare and check the values + + Args: + `prepare_qconfig_mapping`: configuration for prepare quantization step + `convert_qconfig_mapping`: configuration for convert quantization step + """ + assert qconfig_equals(prepare_qconfig_mapping.global_qconfig, convert_qconfig_mapping.global_qconfig), \ + "Expected global qconfigs to be the same in the prepare and convert quantization configs" + prepare_dicts: List[OrderedDict] = [ + prepare_qconfig_mapping.object_type_qconfigs, + prepare_qconfig_mapping.module_name_qconfigs, + prepare_qconfig_mapping.module_name_regex_qconfigs, + ] + convert_dicts: List[OrderedDict] = [ + convert_qconfig_mapping.object_type_qconfigs, + convert_qconfig_mapping.module_name_qconfigs, + convert_qconfig_mapping.module_name_regex_qconfigs, + ] + dict_names = [_OBJECT_TYPE_DICT_KEY, _MODULE_NAME_DICT_KEY, _MODULE_NAME_REGEX_DICT_KEY] + for i in range(len(prepare_dicts)): + for name, qconfig in prepare_dicts[i].items(): + assert name in convert_dicts[i], "Missing key {} {} in convert QConfigMapping \ + when it was present in prepare".format(dict_names[i], name) + assert convert_dicts[i][name] is None \ + or qconfig_equals(prepare_dicts[i][name], convert_dicts[i][name]), \ + "Expected convert QConfigMapping to have the same qconfig as prepare for key {} {}; \ + prepare: {}; convert: {}".format(dict_names[i], name, prepare_dicts[i][name], convert_dicts[i][name]) + +def _is_qconfig_supported_by_dtype_configs(qconfig: QConfig, dtype_configs: List[DTypeConfig]): + for dtype_config in dtype_configs: + is_dynamic = dtype_config.is_dynamic + if is_dynamic is None: + is_dynamic = False + input_dtype = dtype_config.input_dtype or torch.float + weight_dtype = dtype_config.weight_dtype or torch.float + bias_dtype = dtype_config.bias_dtype or torch.float + output_dtype = dtype_config.output_dtype or torch.float + qconfig_activation_dtype, qconfig_weight_dtype, qconfig_input_act_is_dynamic = \ + get_qconfig_dtypes(qconfig) + qconfig_bias_dtype = torch.float16 \ + if ( + qconfig_activation_dtype == torch.float16 + and qconfig_weight_dtype == torch.float16 + and not is_dynamic + ) else torch.float + + if is_dynamic: + is_match = qconfig_input_act_is_dynamic and \ + input_dtype == qconfig_activation_dtype and \ + output_dtype == torch.float and \ + weight_dtype == qconfig_weight_dtype + else: + is_match = input_dtype == qconfig_activation_dtype and \ + output_dtype == qconfig_activation_dtype and \ + weight_dtype == qconfig_weight_dtype and \ + bias_dtype == qconfig_bias_dtype + if is_match: + return True + return False + +def _get_object_type_qconfig( + qconfig_mapping: QConfigMapping, + object_type: Union[Callable, str], + fallback_qconfig: QConfigAny) -> QConfigAny: + return qconfig_mapping.object_type_qconfigs.get(object_type, fallback_qconfig) + + +def _get_module_name_regex_qconfig(qconfig_mapping, module_name, fallback_qconfig): + for regex_pattern, qconfig in qconfig_mapping.module_name_regex_qconfigs.items(): + if re.match(regex_pattern, module_name): + # first match wins + return qconfig + return fallback_qconfig + + +def _get_module_name_qconfig(qconfig_mapping, module_name, fallback_qconfig): + if module_name == '': + # module name qconfig not found + return fallback_qconfig + if module_name in qconfig_mapping.module_name_qconfigs: + return qconfig_mapping.module_name_qconfigs[module_name] + else: + parent, _ = _parent_name(module_name) + return _get_module_name_qconfig(qconfig_mapping, parent, fallback_qconfig) + + +def _maybe_adjust_qconfig_for_module_type_or_name(qconfig_mapping, module_type, module_name, global_qconfig): + # get qconfig for module_name, + # fallback to module_name_regex_qconfig, module_type_qconfig, + # global_qconfig if necessary + module_type_qconfig = _get_object_type_qconfig( + qconfig_mapping, module_type, global_qconfig) + module_name_regex_qconfig = _get_module_name_regex_qconfig( + qconfig_mapping, module_name, module_type_qconfig) + module_name_qconfig = _get_module_name_qconfig( + qconfig_mapping, module_name, module_name_regex_qconfig) + return module_name_qconfig + + +def _get_flattened_qconfig_dict(qconfig_mapping: QConfigMapping) -> Dict[Union[Callable, str], QConfigAny]: + """ flatten the global, object_type and module_name qconfig + to the same qconfig_dict so that it can be used by + propagate_qconfig_ function. + "module_name_regex" is ignored for now since it's not supported + in propagate_qconfig_, but it can be fixed later. + + For example: + Input: { + "": qconfig, + "object_type": [ + (torch.add, qconfig) + ], + "module_name": [ + ("conv", qconfig) + ] + } + + Output: { + "": qconfig, + torch.add: qconfig, + "conv": qconfig + } + """ + flattened: Dict[Union[Callable, str], QConfigAny] = {"": qconfig_mapping.global_qconfig} + for obj, qconfig in qconfig_mapping.object_type_qconfigs.items(): + flattened[obj] = qconfig + for obj, qconfig in qconfig_mapping.module_name_qconfigs.items(): + flattened[obj] = qconfig + return flattened + + +def _update_qconfig_for_qat( + qconfig_mapping: QConfigMapping, + backend_config: BackendConfig): + """ + Update the qconfig_mapping to account for module swaps during QAT. + During QAT we perform a module swap on the nn.Module types to the corresponding nn.qat.modules types. + """ + module_to_qat_module_class = get_module_to_qat_module(backend_config) + object_type_dict = qconfig_mapping.object_type_qconfigs + new_object_type_dict = object_type_dict.copy() + for k, v in new_object_type_dict.items(): + if k in module_to_qat_module_class: + object_type_dict[module_to_qat_module_class[k]] = v diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/quantize_handler.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/quantize_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..57e3c97411a5060804f5ab4495d5e6a8bb9bdda9 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/quantize_handler.py @@ -0,0 +1,203 @@ +import torch +from torch.fx.graph import ( + Node, +) + +from .utils import ( + all_node_args_have_no_tensors, +) +from torch.ao.quantization.backend_config import ( + BackendConfig, + DTypeConfig, + ObservationType, +) +from torch.ao.quantization.utils import ( + NodePattern, + Pattern, + QuantizerCls, +) + +from abc import ABC +from typing import Callable, Dict, List, Type + +__all__ = [ + "QuantizeHandler", + "BinaryOpQuantizeHandler", + "CatQuantizeHandler", + "ConvReluQuantizeHandler", + "LinearReLUQuantizeHandler", + "BatchNormQuantizeHandler", + "EmbeddingQuantizeHandler", + "RNNDynamicQuantizeHandler", + "DefaultNodeQuantizeHandler", + "FixedQParamsOpQuantizeHandler", + "CopyNodeQuantizeHandler", + "GeneralTensorShapeOpQuantizeHandler", + "CustomModuleQuantizeHandler", + "StandaloneModuleQuantizeHandler", +] + +def _default_root_node_getter(node_pattern): + if node_pattern is None: + return node_pattern + while not isinstance(node_pattern, Node): + node_pattern = node_pattern[-1] + return node_pattern + +# Base Pattern Handler +class QuantizeHandler(ABC): + """ Base handler class for the quantizer patterns + """ + def __init__( + self, + node_pattern: NodePattern, + modules: Dict[str, torch.nn.Module], + root_node_getter: Callable = None, + is_custom_module=False, + is_standalone_module=False): + """ Records pattern information in __init__, which will be used + in convert + """ + self.node_pattern = node_pattern + self.modules = modules + if root_node_getter is None: + root_node_getter = _default_root_node_getter + self.root_node = root_node_getter(node_pattern) + self.is_custom_module_ = is_custom_module + self.is_standalone_module_ = is_standalone_module + self.num_tensor_args = 0 + # determine how many of the first two args are Tensors (versus scalars) + # this distinguishes things like "x + y" from "x + 2" or "2 + x" + if isinstance(self.root_node, Node): + cache_for_no_tensor_check: Dict[Node, bool] = {} + for arg_idx in range(len(self.root_node.args)): + arg = self.root_node.args[arg_idx] + if isinstance(arg, Node) and ( + not all_node_args_have_no_tensors( + arg, self.modules, cache_for_no_tensor_check)): + self.num_tensor_args += 1 + + def is_general_tensor_value_op(self) -> bool: + """ + Returns True if the operator works for both floating point and + quantized input, and does some computation based on the input Tensor, + or the ops that only re-arranges the Tensor values or query some metadata + about the Tensor + so we need to insert observer/fake_quant for the output of the + operator (same observer instance as input) + since the distribution of values is different for input and output + Tensors (for HistogramObserver) while they share the same quantization + parameters + Example operator: avgpool2d, reshape, transpose, maxpool2d + Example observed operator: + observer_0 - avgpool2d - observer_0 (same observer instance as input) + """ + return False + + def is_custom_module(self): + return self.is_custom_module_ + + def is_standalone_module(self): + return self.is_standalone_module_ + +def _get_quantize_handler_cls( + observation_type: ObservationType, + dtype_configs: List[DTypeConfig], + num_tensor_args_to_observation_type: Dict[int, ObservationType]) -> Type[QuantizeHandler]: + """ + Return a configurable QuantizeHandler that matches the given specifications from the backend. + """ + + class ConfigurableQuantizeHandler(QuantizeHandler): + def __init__( + self, + node_pattern: NodePattern, + modules: Dict[str, torch.nn.Module], + root_node_getter: Callable = None): + super().__init__(node_pattern, modules, root_node_getter) + if num_tensor_args_to_observation_type: + assert self.num_tensor_args in num_tensor_args_to_observation_type, \ + f"Must provide observation_type config for tensor number {self.num_tensor_args}" \ + f" in num_tensor_args_to_observation_type for {node_pattern}" + self.observation_type = num_tensor_args_to_observation_type[self.num_tensor_args] + else: + self.observation_type = observation_type + self.dtype_configs = dtype_configs + + def is_general_tensor_value_op(self) -> bool: + return self.observation_type == ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT + + return ConfigurableQuantizeHandler + +def _get_pattern_to_quantize_handlers(backend_config: BackendConfig) -> Dict[Pattern, QuantizerCls]: + """ + Note: Quantize handler is just a holder for some check methods like + (should_insert_observer_for_output), maybe this can be a enum as well, + we can refactor this after we convert the path for fbgemm/qnnpack fully to the + new path, this is not exposed to backend developers + """ + pattern_to_quantize_handlers = {} + for pattern, config in backend_config._pattern_complex_format_to_config.items(): + observation_type = config.observation_type + dtype_configs = config.dtype_configs + num_tensor_args_to_observation_type = config._num_tensor_args_to_observation_type + pattern_to_quantize_handlers[pattern] = \ + _get_quantize_handler_cls( + observation_type, + dtype_configs, + num_tensor_args_to_observation_type) + return pattern_to_quantize_handlers + +# TODO: remove this class, this is still exposed in torch.ao.quantization +# but we should be able to break bc +class BinaryOpQuantizeHandler(QuantizeHandler): + pass + +class CatQuantizeHandler(QuantizeHandler): + pass + +# TODO: remove this class +class ConvReluQuantizeHandler(QuantizeHandler): + pass + +# TODO: remove this class +class LinearReLUQuantizeHandler(QuantizeHandler): + pass + +# TODO: remove this class +class BatchNormQuantizeHandler(QuantizeHandler): + pass + +# TODO: remove this class +class EmbeddingQuantizeHandler(QuantizeHandler): + pass + +# TODO: remove this class +class RNNDynamicQuantizeHandler(QuantizeHandler): + pass + +# TODO: remove this class +class DefaultNodeQuantizeHandler(QuantizeHandler): + """ Common quantized op, first input and first output will be quantized + """ + pass + +# TODO: remove this class +class FixedQParamsOpQuantizeHandler(QuantizeHandler): + pass + +# TODO: remove +class CopyNodeQuantizeHandler(QuantizeHandler): + pass + +# TODO: remove +class GeneralTensorShapeOpQuantizeHandler(QuantizeHandler): + pass + +# TODO: not used, can be removed after torch.ao.quantization namespace is deprecated +class CustomModuleQuantizeHandler(QuantizeHandler): + pass + +# TODO: not used, can be removed after torch.ao.quantization namespace is deprecated +class StandaloneModuleQuantizeHandler(QuantizeHandler): + pass diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/tracer.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/tracer.py new file mode 100644 index 0000000000000000000000000000000000000000..47f326caf7043f54866f860ab464c3434eb91a5d --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/tracer.py @@ -0,0 +1,45 @@ +import torch +from torch.fx._symbolic_trace import Tracer +from torch.fx.proxy import Scope +from torch.ao.nn.intrinsic import _FusedModule +from typing import List, Callable + +__all__ = [ + "QuantizationTracer", +] + +class ScopeContextManager(torch.fx.proxy.ScopeContextManager): + def __init__( + self, + scope: Scope, + current_module: torch.nn.Module, + current_module_path: str + ): + super().__init__(scope, Scope(current_module_path, type(current_module))) + + +class QuantizationTracer(Tracer): + def __init__( + self, skipped_module_names: List[str], skipped_module_classes: List[Callable] + ): + super().__init__() + self.skipped_module_names = skipped_module_names + self.skipped_module_classes = skipped_module_classes + # NB: initialized the module_type of top level module to None + # we are assuming people won't configure the model with the type of top level + # module here, since people can use "" for global config + # We can change this if there is a use case that configures + # qconfig using top level module type + self.scope = Scope("", None) + self.record_stack_traces = True + + def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool: + return ( + ( + (m.__module__.startswith("torch.nn") or m.__module__.startswith("torch.ao.nn")) + and not isinstance(m, torch.nn.Sequential) + ) + or module_qualified_name in self.skipped_module_names + or type(m) in self.skipped_module_classes + or isinstance(m, _FusedModule) + ) diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/utils.py b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cc97e14f07d9ef3ea2e8f6f4928b9c3ae4120468 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/utils.py @@ -0,0 +1,854 @@ +import copy +import torch +import torch.nn as nn +from torch.ao.quantization import ( + QConfigAny, + QuantType, +) +from torch.ao.quantization.backend_config import ( + BackendConfig, + DTypeWithConstraints, +) +from torch.ao.quantization.fake_quantize import ( + FakeQuantizeBase, + FixedQParamsFakeQuantize, +) +from torch.ao.quantization.observer import ( + FixedQParamsObserver, + ObserverBase, +) +from torch.ao.quantization.qconfig import ( + float16_static_qconfig, + float16_dynamic_qconfig, + qconfig_equals, +) +from torch.ao.quantization.stubs import DeQuantStub +from torch.ao.quantization.utils import ( + activation_is_statically_quantized, +) +from torch.ao.quantization.observer import _is_activation_post_process +from torch.ao.quantization.qconfig_mapping import QConfigMapping + +from torch.fx import GraphModule, map_arg + +from torch.fx.graph import ( + Graph, + Node, +) +from .custom_config import PrepareCustomConfig +# importing the lib so that the quantized_decomposed ops are registered +from ._decomposed import quantized_decomposed_lib # noqa: F401 + +from typing import Callable, Optional, List, Dict, Any, Set, Tuple, Union, Type +from dataclasses import dataclass +from collections import namedtuple +import operator +import warnings + +# TODO: revisit this list. Many helper methods shouldn't be public +__all__ = [ + "all_node_args_except_first", + "all_node_args_have_no_tensors", + "assert_and_get_unique_device", + "collect_producer_nodes", + "create_getattr_from_value", + "create_node_from_old_node_preserve_meta", + "EMPTY_ARG_DICT", + "get_custom_module_class_keys", + "get_linear_prepack_op_for_dtype", + "get_new_attr_name_with_prefix", + "get_non_observable_arg_indexes_and_types", + "get_qconv_prepack_op", + "get_skipped_module_name_and_classes", + "graph_module_from_producer_nodes", + "maybe_get_next_module", + "NodeInfo", + "node_arg_is_bias", + "node_arg_is_weight", + "NON_OBSERVABLE_ARG_DICT", + "NON_QUANTIZABLE_WEIGHT_OPS", + "return_arg_list", + "ObservedGraphModuleAttrs", +] + +NON_QUANTIZABLE_WEIGHT_OPS = {torch.nn.functional.layer_norm, torch.nn.functional.group_norm, torch.nn.functional.instance_norm} + +@dataclass +class ObservedGraphModuleAttrs: + node_name_to_qconfig: Dict[str, QConfigAny] + node_name_to_scope: Dict[str, Tuple[str, type]] + prepare_custom_config: PrepareCustomConfig + equalization_node_name_to_qconfig: Dict[str, Any] + qconfig_mapping: QConfigMapping + is_qat: bool + observed_node_names: Set[str] + is_observed_standalone_module: bool = False + standalone_module_input_quantized_idxs: Optional[List[int]] = None + standalone_module_output_quantized_idxs: Optional[List[int]] = None + +def node_arg_is_weight(node: Node, arg: Any, backend_config: BackendConfig) -> bool: + """Returns if node arg is weight""" + if isinstance(node, Node) and node.op == "call_function" and \ + node.target in backend_config._pattern_complex_format_to_config: + weight_index = backend_config._pattern_complex_format_to_config[node.target]._input_type_to_index.get("weight") + if weight_index is not None and weight_index < len(node.args) and node.args[weight_index] is arg: + return True + return node.kwargs.get("weight") is arg + return False + +def node_arg_is_bias(node: Node, arg: Any, backend_config: BackendConfig) -> bool: + """Returns if node arg is bias""" + if isinstance(node, Node) and node.op == "call_function" and \ + node.target in backend_config._pattern_complex_format_to_config: + bias_index = backend_config._pattern_complex_format_to_config[node.target]._input_type_to_index.get("bias") + if bias_index is not None and bias_index < len(node.args) and node.args[bias_index] is arg: + return True + return node.kwargs.get("bias") is arg + return False + +def get_custom_module_class_keys(custom_module_mapping: Dict[QuantType, Dict[Type, Type]]) -> List[Any]: + r""" Get all the unique custom module keys in the custom config dict + e.g. + Input: + { + QuantType.STATIC: { + CustomModule1: ObservedCustomModule + }, + QuantType.DYNAMIC: { + CustomModule2: DynamicObservedCustomModule + }, + QuantType.WEIGHT_ONLY: { + CustomModule3: WeightOnlyObservedCustomModule + }, + } + + Output: + # extract the keys across all inner STATIC, DYNAMIC, and WEIGHT_ONLY dicts + [CustomModule1, CustomModule2, CustomModule3] + """ + # using set to dedup + float_custom_module_classes : Set[Any] = set() + for quant_mode in [QuantType.STATIC, QuantType.DYNAMIC, QuantType.WEIGHT_ONLY]: + quant_mode_custom_module_config = custom_module_mapping.get(quant_mode, {}) + quant_mode_custom_module_classes = set(quant_mode_custom_module_config.keys()) + float_custom_module_classes |= quant_mode_custom_module_classes + return list(float_custom_module_classes) + +def get_linear_prepack_op_for_dtype(dtype): + if dtype == torch.float16: + return torch.ops.quantized.linear_prepack_fp16 + elif dtype == torch.qint8: + return torch.ops.quantized.linear_prepack + else: + raise Exception("can't get linear prepack op for dtype:", dtype) + +def get_qconv_prepack_op(conv_op: Callable) -> Callable: + prepack_ops = { + torch.nn.functional.conv1d: torch.ops.quantized.conv1d_prepack, + torch.nn.functional.conv2d: torch.ops.quantized.conv2d_prepack, + torch.nn.functional.conv3d: torch.ops.quantized.conv3d_prepack + } + prepack_op = prepack_ops.get(conv_op, None) + assert prepack_op, "Didn't find prepack op for {}".format(conv_op) + return prepack_op + +# Returns a function that can get a new attribute name for module with given +# prefix, for example, +# >> get_new_observer_name = get_new_attr_name_with_prefix('_observer') +# >> new_name = get_new_observer_name(module) +# new_name will be an unused attribute name on module, e.g. `_observer_1` +def get_new_attr_name_with_prefix(prefix: str) -> Callable: + prefix = prefix.replace(".", "_") + + def get_new_attr_name(module: torch.nn.Module): + def get_attr_name(i: int): + return prefix + str(i) + i = 0 + attr_name = get_attr_name(i) + while hasattr(module, attr_name): + i += 1 + attr_name = get_attr_name(i) + return attr_name + return get_new_attr_name + +def collect_producer_nodes(node: Node) -> Optional[List[Node]]: + r''' Starting from a target node, trace back until we hit inpu or + getattr node. This is used to extract the chain of operators + starting from getattr to the target node, for example + def forward(self, x): + observed = self.observer(self.weight) + return F.linear(x, observed) + collect_producer_nodes(observed) will either return a list of nodes that + produces the observed node or None if we can't extract a self contained + graph without free variables(inputs of the forward function). + ''' + nodes = [node] + frontier = [node] + while frontier: + node = frontier.pop() + all_args = list(node.args) + list(node.kwargs.values()) + for arg in all_args: + if not isinstance(arg, Node): + continue + if arg.op == 'placeholder': + # hit input, can't fold in this case + return None + nodes.append(arg) + if not (arg.op == 'call_function' and arg.target == getattr): + frontier.append(arg) + return nodes + +def graph_module_from_producer_nodes( + root: GraphModule, producer_nodes: List[Node]) -> GraphModule: + r''' Construct a graph module from extracted producer nodes + from `collect_producer_nodes` function + Args: + root: the root module for the original graph + producer_nodes: a list of nodes we use to construct the graph + Return: + A graph module constructed from the producer nodes + ''' + assert len(producer_nodes) > 0, 'list of producer nodes can not be empty' + # since we traced back from node to getattrr + producer_nodes.reverse() + graph = Graph() + env: Dict[Any, Any] = {} + + def load_arg(a): + return map_arg(a, lambda node: env[node]) + for producer_node in producer_nodes: + env[producer_node] = graph.node_copy(producer_node, load_arg) + graph.output(load_arg(producer_nodes[-1])) + graph_module = GraphModule(root, graph) + return graph_module + +def assert_and_get_unique_device(module: torch.nn.Module) -> Any: + """ + Returns the unique device for a module, or None if no device is found. + Throws an error if multiple devices are detected. + """ + devices = {p.device for p in module.parameters()} | \ + {p.device for p in module.buffers()} + assert len(devices) <= 1, ( + "prepare only works with cpu or single-device CUDA modules, " + "but got devices {}".format(devices) + ) + device = next(iter(devices)) if len(devices) > 0 else None + return device + +def create_getattr_from_value(module: torch.nn.Module, graph: Graph, prefix: str, value: Any) -> Node: + """ + Given a value of any type, creates a getattr node corresponding to the value and + registers the value as a buffer to the module. + """ + get_new_attr_name = get_new_attr_name_with_prefix(prefix) + attr_name = get_new_attr_name(module) + device = assert_and_get_unique_device(module) + new_value = value.clone().detach() if isinstance(value, torch.Tensor) \ + else torch.tensor(value, device=device) + module.register_buffer(attr_name, new_value) + # Create get_attr with value + attr_node = graph.create_node("get_attr", attr_name) + return attr_node + +def all_node_args_have_no_tensors(node: Node, modules: Dict[str, torch.nn.Module], cache: Dict[Node, bool]) -> bool: + """ + If we know for sure that all of this node's args have no + tensors (are primitives), return True. If we either + find a tensor or are not sure, return False. Note: this + function is not exact. + """ + if cache and node in cache: + return cache[node] + + result = False # will be overwritten + if not isinstance(node, Node): + result = True + elif node.op == 'placeholder': + result = False + elif node.op == 'call_module': + assert isinstance(node.target, str) + if _is_activation_post_process(modules[node.target]): + result = all_node_args_have_no_tensors(node.args[0], modules, cache) # type: ignore[arg-type] + elif node.op == 'call_module': + result = False + elif node.op == 'call_function' and node.target is operator.getitem: + result = all_node_args_have_no_tensors(node.args[0], modules, cache) # type: ignore[arg-type] + elif node.op == 'get_attr': + result = False + elif node.target is getattr and node.args[1] in ['ndim', 'shape']: + # x1 = x0.ndim + result = True + elif node.op == 'call_method' and node.target == 'size': + # x1 = x0.size(0) + result = True + else: + found_one_tensor = False + for arg in node.args: + if isinstance(arg, list): + for list_el in arg: + if isinstance(list_el, Node): + this_list_el_args_have_no_tensors = \ + all_node_args_have_no_tensors(list_el, modules, cache) + found_one_tensor = found_one_tensor or \ + (not this_list_el_args_have_no_tensors) + # If found_one_tensor is True, there is no point in + # recursing further as the end result will always + # be True. + # TODO(future PR): remove this entire function and + # change to dtype inference without recursion. + if found_one_tensor: + result = not found_one_tensor + if cache: + cache[node] = result + return result + elif isinstance(arg, int): + pass + else: + if isinstance(arg, Node): + this_arg_args_have_no_tensors = all_node_args_have_no_tensors(arg, modules, cache) + found_one_tensor = found_one_tensor or \ + (not this_arg_args_have_no_tensors) + # If found_one_tensor is True, there is no point in + # recursing further as the end result will always + # be True. + # TODO(future PR): remove this entire function and + # change to dtype inference without recursion. + if found_one_tensor: + result = not found_one_tensor + if cache: + cache[node] = result + return result + else: + found_one_tensor = True + result = not found_one_tensor + if cache: + cache[node] = result + return result + +def all_node_args_except_first(node: Node) -> List[int]: + """ + Returns all node arg indices after first + """ + return list(range(1, len(node.args))) + +def return_arg_list(arg_indices: List[int]) -> Callable[[Node], List[int]]: + """ + Constructs a function that takes a node as arg and returns the arg_indices + that are valid for node.args + """ + def arg_indices_func(node: Node) -> List[int]: + return [i for i in arg_indices if i < len(node.args)] + return arg_indices_func + +NodeInfo = namedtuple("NodeInfo", "op target") + +# this dict identifies which indices of a node are non tensors +# so that they can be propagated correctly since inserting observers +# for them would cause errors + +NON_OBSERVABLE_ARG_DICT: Dict[NodeInfo, Dict[Union[type, torch.dtype], Callable[[Node], List[int]]]] = { + NodeInfo("call_method", "masked_fill") : { + torch.bool: return_arg_list([1]), + float: return_arg_list([2]) + }, + NodeInfo("call_method", "permute") : { + int: all_node_args_except_first + }, + NodeInfo("call_method", "repeat") : { + int: all_node_args_except_first + }, + NodeInfo("call_method", "reshape") : { + int: all_node_args_except_first + }, + NodeInfo("call_method", "size") : { + int: return_arg_list([1]) + }, + NodeInfo("call_method", "transpose") : { + int: all_node_args_except_first + }, + NodeInfo("call_method", torch.transpose) : { + int: all_node_args_except_first + }, + NodeInfo("call_method", "unsqueeze") : { + int: return_arg_list([1]) + }, + NodeInfo("call_method", "unsqueeze_") : { + int: return_arg_list([1]) + }, + NodeInfo("call_method", torch.unsqueeze) : { + int: return_arg_list([1]) + }, + NodeInfo("call_method", "view") : { + int: all_node_args_except_first + }, +} + +EMPTY_ARG_DICT: Dict[Union[type, torch.dtype], Callable[[Node], List[int]]] = {} + +def get_non_observable_arg_indexes_and_types(node: Node) -> Dict[Union[type, torch.dtype], Callable[[Node], List[int]]]: + """ + Returns a dict with of non float tensor types as keys and values which correspond to a + function to retrieve the list (which takes the node as an argument) + """ + info = NodeInfo(node.op, node.target) + + return NON_OBSERVABLE_ARG_DICT.get(info, EMPTY_ARG_DICT) + +def maybe_get_next_module( + node: Node, + modules: Dict[str, nn.Module], + target_module_type: Optional[Type[nn.Module]] = None, + target_functional_type: Any = None, +) -> Optional[Node]: + """ Gets the next module that matches what is needed in + is_target_module_type if it exists + + Args: + node: The node whose users we want to look at + target_module_type: Module type that we want to check + target_functional_type: Functional type that we want to check + """ + + for user, _ in node.users.items(): + if user.op == 'call_module' and target_module_type is not None and \ + isinstance(modules[str(user.target)], target_module_type): + return user + elif (user.op == 'call_function' and target_functional_type is not None and + user.target == target_functional_type): + return user + + return None + +def create_node_from_old_node_preserve_meta( + quantized_graph: Graph, + create_node_args: Tuple[Any, ...], + old_node: Node, +) -> Node: + """ + Creates `new_node` and copies the necessary metadata to it from `old_node`. + """ + new_node = quantized_graph.create_node(*create_node_args) + new_node.stack_trace = old_node.stack_trace + return new_node + +def get_skipped_module_name_and_classes( + prepare_custom_config: PrepareCustomConfig, + is_standalone_module: bool) -> Tuple[List[str], List[Type[Any]]]: + skipped_module_names = copy.copy(prepare_custom_config.non_traceable_module_names) + skipped_module_classes = copy.copy(prepare_custom_config.non_traceable_module_classes) + if not is_standalone_module: + # standalone module and custom module config are applied in top level module + skipped_module_names += list(prepare_custom_config.standalone_module_names.keys()) + skipped_module_classes += list(prepare_custom_config.standalone_module_classes.keys()) + skipped_module_classes += get_custom_module_class_keys(prepare_custom_config.float_to_observed_mapping) + + return skipped_module_names, skipped_module_classes + +def _is_custom_module_lstm( + node: Node, + named_modules: Dict[str, torch.nn.Module], + qconfig: QConfigAny = None, + # QuantizeHandler, but we cannot include the type here due to circular imports + qhandler: Optional[Any] = None, +) -> bool: + """ + Return whether this refers to the custom module LSTM flow. + """ + mod = _get_module(node, named_modules) + if qconfig is not None and qhandler is not None: + assert isinstance(qhandler, torch.ao.quantization.fx.quantize_handler.QuantizeHandler) # type: ignore[attr-defined] + return isinstance(mod, torch.nn.LSTM) and \ + activation_is_statically_quantized(qconfig) and \ + qhandler.is_custom_module() + else: + return isinstance(mod, torch.ao.nn.quantizable.LSTM) + +def _get_module(node: Node, named_modules: Dict[str, torch.nn.Module]) -> Optional[torch.nn.Module]: + """ + If `node` refers to a call_module node, return the module, else None. + """ + if node.op == "call_module" and str(node.target) in named_modules: + return named_modules[str(node.target)] + else: + return None + +def _insert_dequant_stub( + node: Node, + model: torch.nn.Module, + named_modules: Dict[str, torch.nn.Module], + graph: Graph, +) -> Node: + """ + Attach a `DeQuantStub` to the model and create a node that calls this + `DeQuantStub` on the output of `node`, similar to how observers are inserted. + """ + prefix = "dequant_stub_" + get_new_dequant_stub_name = get_new_attr_name_with_prefix(prefix) + dequant_stub_name = get_new_dequant_stub_name(model) + dequant_stub = DeQuantStub() + setattr(model, dequant_stub_name, dequant_stub) + named_modules[dequant_stub_name] = dequant_stub + with graph.inserting_after(node): + return graph.call_module(dequant_stub_name, (node,)) + +def _insert_dequant_stubs_for_custom_module_lstm_output( + node: Node, + model: torch.nn.Module, + named_modules: Dict[str, torch.nn.Module], + graph: Graph, +) -> Node: + """ + Insert DeQuantStubs after each internal output node of custom module LSTM. + + Custom module LSTM outputs are nested tuples of the sturcture (output, (hidden0, hidden1)), + Since we cannot dequantize a tuple as a whole, we must first break down the tuple into its + components through `getitem`. This function transforms the graph as follows: + + (1) Split the LSTM node into (output, (hidden0, hidden1)) + (2) Insert a DeQuantStub after each internal node + (3) Recombine the DeQuantStubs into the same structure as before + (4) Reroute all consumers of the original LSTM node and its sub-nodes + (e.g. lstm[0]) + + Before: + lstm_output + | + v + original_user(s) + After: + lstm_output + / \\ + / (getitem) \\ + / \\ + v v + output hidden + | / \\ + (DeQuantStub) (getitem) + | / \\ + v v v + output_dq hidden0 hidden1 + | | | + | (DeQuantStub) (DeQuantStub) + | | | + | v v + | hidden0_dq hidden1_dq + | \\ / + | (tuple) + | \\ / + | v v + | hidden_dq + \\ / + \\ (tuple) / + v v + lstm_output_dq + | + v + original_user(s) + + For step (4), reroute all users of the original LSTM node(s) as follows: + lstm_output -> lstm_output_dq + lstm_output[0] -> output_dq + lstm_output[1] -> hidden_dq + lstm_output[1][0] -> hidden0_dq + lstm_output[1][1] -> hidden1_dq + + Return the node `lstm_output_dq`. + """ + # (1) Split the LSTM node into (output, (hidden0, hidden1)) + # (2) Insert a DeQuantStub after each internal node + with graph.inserting_after(node): + output = graph.call_function(operator.getitem, (node, 0)) + output_dq = _insert_dequant_stub(output, model, named_modules, graph) + with graph.inserting_after(output_dq): + hidden = graph.call_function(operator.getitem, (node, 1)) + with graph.inserting_after(hidden): + hidden0 = graph.call_function(operator.getitem, (hidden, 0)) + hidden0_dq = _insert_dequant_stub(hidden0, model, named_modules, graph) + with graph.inserting_after(hidden0_dq): + hidden1 = graph.call_function(operator.getitem, (hidden, 1)) + hidden1_dq = _insert_dequant_stub(hidden1, model, named_modules, graph) + + # (3) Recombine the DeQuantStubs into the same structure as before + with graph.inserting_after(hidden1_dq): + hidden_dq = graph.call_function(tuple, ([hidden0_dq, hidden1_dq],)) + with graph.inserting_after(hidden_dq): + lstm_output_dq = graph.call_function(tuple, ([output_dq, hidden_dq],)) + + # (4) Reroute all consumers of the original LSTM node and its sub-nodes + for user in list(node.users.keys()): + if user != output and user != hidden: + user.replace_input_with(node, lstm_output_dq) + # The getitem and tuple nodes we added here may interfere with reference quantized + # pattern matching, so we need to redirect the consumers of internal nodes to the + # corresponding nodes with DeQuantStubs (e.g. lstm_output_dq[0] -> output_dq) attached, + # in order to preserve reference patterns like "dequantize - consumer - quantize". + _reroute_tuple_getitem_pattern(graph) + return lstm_output_dq + +def _maybe_get_custom_module_lstm_from_node_arg( + arg: Node, + named_modules: Dict[str, torch.nn.Module], +) -> Optional[Node]: + """ + Given an argument of a node, if the argument refers to the path through which the node + is a consumer of custom module LSTM, return the custom module LSTM node, or None otherwise. + + This is used to determine whether a node is a consumer of custom module LSTM, and, if so, + skip inserting input observers for this node. This is because custom module LSTM produces + quantized outputs, so inserting an input observer for the consumer of custom module LSTM + would unnecessarily quantize the outputs again. + + lstm -> consumer + + In practice, however, custom module LSTM outputs a tuple (output, (hidden0, hidden1)) with + DeQuantStubs attached to each internal node (see `_insert_dequant_stubs_for_custom_module_lstm_output`). + This tuple can be consumed in one of four ways: + + lstm -> getitem -> DeQuantStub -> consumer # consume lstm[0] + lstm -> getitem -> getitem -> DeQuantStub -> tuple -> consumer # consume lstm[1] + lstm -> getitem -> getitem -> DeQuantStub -> consumer # consume lstm[1][0] or lstm[1][1] + lstm -> getitem -> DeQuantStub -> tuple -> consumer # consume lstm + + Thus, we must match against the above patterns instead of simply checking the parent node + to determine whether this node is a consumer of a custom module LSTM. + """ + def match_dq(a): + return isinstance(_get_module(a, named_modules), DeQuantStub) + + def match_lstm(a): + return _is_custom_module_lstm(a, named_modules) + + def match_getitem(a): + return a.op == "call_function" and a.target == operator.getitem + + def match_tuple(a): + return a.op == "call_function" and a.target == tuple + + def _match_pattern(match_pattern: List[Callable]) -> Optional[Node]: + """ + Traverse up the graph and match the args one by one. + If there is a match, return the last matched node, or None otherwise. + """ + a = arg + for i, match in enumerate(match_pattern): + if not match(a): + return None + # Match next arg, for tuple the arg is a tuple of a list, e.g. ([dq_1, other_node],) + if i < len(match_pattern) - 1: + if match == match_tuple: + a = a.args[0][0] # type: ignore[assignment,index] + else: + a = a.args[0] # type: ignore[assignment] + return a + + all_match_patterns = [ + [match_dq, match_getitem, match_lstm], + [match_tuple, match_dq, match_getitem, match_getitem, match_lstm], + [match_dq, match_getitem, match_getitem, match_lstm], + [match_tuple, match_dq, match_getitem, match_lstm], + ] + + for p in all_match_patterns: + matched_node = _match_pattern(p) + if matched_node is not None: + return matched_node + return None + +def _reroute_tuple_getitem_pattern(graph: Graph): + """ + Search for patterns where N consecutive `tuple` call_function nodes are followed by + N consecutive `getitem` call_function nodes that are "reverses" of the `tuple` nodes. + If we find this pattern, reroute the consumers of the last `getitem` to skip these + N `tuple` and `getitem` nodes. + + Before: + + a b c + | \\ / + \\ tuple + \\ / + tuple + | + getitem(1) + | + getitem(0) + | + d + + After: + + b + | + d + """ + def find_patterns( + node: Node, + index_stack: List[int], + current_pattern: List[Node], + matched_patterns: List[List[Node]], + seen: Set[Tuple[Node, Tuple[int, ...]]]): + """ + Traverse the graph recursively to match for the N-tuple - N-getitem patterns, + starting at the given node. + + We use a stack to keep track of the expected `getitem` indices, since these are + reversed from the `tuple` indices. In the above example, the stack after + (b -> tuple -> tuple) will be [0, 1], which will be popped by getitem(1) first + and then by getitem(0). + + TODO: traverse upwards from the output and handle the case when tuple is not a + separate node, e.g. graph.call_function(operator.getitem, args=(a, (b, c))) + """ + if len(index_stack) == 0 and len(current_pattern) > 0: + matched_patterns.append(copy.copy(current_pattern)) + current_pattern.clear() + + # Avoid duplicating work + state = (node, tuple(index_stack)) + if state in seen: + return + seen.add(state) + + # Iterate through users of this node to find tuple/getitem nodes to match + for user in node.users: + if user.op == "call_function" and user.target == tuple: + for i, user_arg in enumerate(user.args[0]): # type: ignore[arg-type] + if user_arg == node: + index_stack.append(i) + current_pattern.append(user) + find_patterns(user, index_stack, current_pattern, matched_patterns, seen) + elif user.op == "call_function" and user.target == operator.getitem: + if len(index_stack) > 0: + if user.args[1] == index_stack[-1]: + index_stack.pop() + current_pattern.append(user) + find_patterns(user, index_stack, current_pattern, matched_patterns, seen) + return matched_patterns + + # Collect all matched patterns + matched_patterns: List[List[Node]] = [] + seen: Set[Tuple[Node, Tuple[int, ...]]] = set() # (node, index_stack) + for node in graph.nodes: + find_patterns(node, [], [], matched_patterns, seen) + + # For each pattern, redirect all consumers of the last getitem node to the correct input + # of the first tuple node + for pattern in matched_patterns: + first_tuple = pattern[0] + last_getitem = pattern[-1] + assert first_tuple.op == "call_function" and first_tuple.target == tuple + assert last_getitem.op == "call_function" and last_getitem.target == operator.getitem + last_getitem_index = last_getitem.args[1] + new_input = first_tuple.args[0][last_getitem_index] # type: ignore[index] + for user in list(last_getitem.users.keys()): + user.replace_input_with(last_getitem, new_input) + +def _get_observer_from_activation_post_process( + activation_post_process: Union[ObserverBase, FakeQuantizeBase], +) -> ObserverBase: + """ + If `activation_post_process` is an observer, return the observer. + If `activation_post_process` is a fake quantize, return the internal observer. + """ + if isinstance(activation_post_process, ObserverBase): + return activation_post_process + else: + assert isinstance(activation_post_process, FakeQuantizeBase) + return activation_post_process.activation_post_process # type: ignore[return-value] + +def _qconfig_satisfies_dtype_config_constraints( + qconfig: QConfigAny, + dtype_with_constraints: DTypeWithConstraints, + is_activation: bool = True) -> bool: + """ + Return whether `qconfig` satisfies the following constraints from the backend, + specified through the activation and weight DTypeWithConstraints. + + 1. QConfig specified a quantization range that falls within the backend's, if any + 2. QConfig specified a min scale value that is >= the backend's, if any + 3. QConfig specified a FixedQParamsObserver or FixedQParamsFakeQuantize that has + scale and zero point that match the backend's, if any + + If `is_activation` is True, we check `qconfig.activation`, else we check `qconfig.weight`. + If `qconfig` or `dtype_with_constraints.dtype` is None, or the dtypes do not match, return True. + """ + # TODO: log warnings only when the user enabled a debug flag + def _activation_post_process_satisfies_dtype_config_constraints( + activation_post_process: Union[ObserverBase, FakeQuantizeBase], + dtype_with_constraints: DTypeWithConstraints, + debug_string: str) -> bool: + observer = _get_observer_from_activation_post_process(activation_post_process) + app_quant_min = getattr(observer, "quant_min", None) + app_quant_max = getattr(observer, "quant_max", None) + # TODO: for now, just use the existing eps value as scale_min. In the future, we should + # resolve the differences between the two, either by renaming eps or some other way + app_scale_min = getattr(observer, "eps", None) + backend_quant_min = dtype_with_constraints.quant_min_lower_bound + backend_quant_max = dtype_with_constraints.quant_max_upper_bound + backend_scale_min = dtype_with_constraints.scale_min_lower_bound + backend_scale_exact_match = dtype_with_constraints.scale_exact_match + backend_zero_point_exact_match = dtype_with_constraints.zero_point_exact_match + # check quantization ranges + if backend_quant_min is not None and backend_quant_max is not None: + if app_quant_min is None or app_quant_max is None: + warnings.warn("QConfig %s must specify 'quant_min' and 'quant_max', ignoring %s" % + (debug_string, qconfig)) + return False + elif app_quant_min < backend_quant_min or app_quant_max > backend_quant_max: + warnings.warn(("QConfig %s quantization range must fall within the backend's:\n" + "QConfig range = (%s, %s), BackendConfig range = (%s, %s), ignoring %s") % + (debug_string, app_quant_min, app_quant_max, + backend_quant_min, backend_quant_max, qconfig)) + return False + # check scale min + if backend_scale_min is not None: + if app_scale_min is None: + warnings.warn("QConfig %s must specify 'eps', ignoring %s" % (debug_string, qconfig)) + return False + elif app_scale_min < backend_scale_min: + warnings.warn(("QConfig %s eps (%s) must be greater than or equal to " + "the backend's min scale value (%s), ignoring %s") % + (debug_string, app_scale_min, backend_scale_min, qconfig)) + return False + # check fixed scale and zero point + if backend_scale_exact_match is not None and backend_zero_point_exact_match is not None: + # For tests only, accept the following qconfigs for now + # TODO: handle fp16 qconfigs properly + for accepted_qconfig in [float16_static_qconfig, float16_dynamic_qconfig]: + if qconfig_equals(qconfig, accepted_qconfig): + return True + suggestion_str = ( + "Please use torch.ao.quantization.get_default_qconfig_mapping or " + "torch.ao.quantization.get_default_qat_qconfig_mapping. Example:\n" + " qconfig_mapping = get_default_qconfig_mapping(\"fbgemm\")\n" + " model = prepare_fx(model, qconfig_mapping, example_inputs)" + ) + if not isinstance(activation_post_process, FixedQParamsObserver) and \ + not isinstance(activation_post_process, FixedQParamsFakeQuantize): + warnings.warn(("QConfig must specify a FixedQParamsObserver or a FixedQParamsFakeQuantize " + "for fixed qparams ops, ignoring %s.\n%s") % (qconfig, suggestion_str)) + return False + if observer.scale != backend_scale_exact_match or observer.zero_point != backend_zero_point_exact_match: + warnings.warn(("QConfig fixed scale (%s) and zero point (%s) do not match the backend's " + "(%s and %s), ignoring %s.\n%s") % + (observer.scale, observer.zero_point, backend_scale_exact_match, + backend_zero_point_exact_match, qconfig, suggestion_str)) + return False + return True + + if qconfig is None or dtype_with_constraints.dtype is None: + return True + + activation_post_process_ctr = qconfig.activation if is_activation else qconfig.weight + debug_string = "activation" if is_activation else "weight" + satisfies_constraints = True + if activation_post_process_ctr is not None: + activation_post_process = activation_post_process_ctr() + assert _is_activation_post_process(activation_post_process) + # If dtypes don't match, don't check the activation_post_process and return True early + if activation_post_process.dtype != dtype_with_constraints.dtype: + return True + satisfies_constraints = _activation_post_process_satisfies_dtype_config_constraints( + activation_post_process, dtype_with_constraints, debug_string) + return satisfies_constraints