ZTWHHH commited on
Commit
4d01aa8
·
verified ·
1 Parent(s): 195a1d4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. deepseek/lib/python3.10/site-packages/numpy/array_api/__pycache__/__init__.cpython-310.pyc +0 -0
  2. deepseek/lib/python3.10/site-packages/numpy/array_api/__pycache__/_constants.cpython-310.pyc +0 -0
  3. deepseek/lib/python3.10/site-packages/numpy/doc/__init__.py +26 -0
  4. deepseek/lib/python3.10/site-packages/numpy/doc/__pycache__/__init__.cpython-310.pyc +0 -0
  5. deepseek/lib/python3.10/site-packages/numpy/doc/__pycache__/constants.cpython-310.pyc +0 -0
  6. deepseek/lib/python3.10/site-packages/numpy/doc/__pycache__/ufuncs.cpython-310.pyc +0 -0
  7. deepseek/lib/python3.10/site-packages/numpy/random/LICENSE.md +71 -0
  8. deepseek/lib/python3.10/site-packages/numpy/random/__init__.py +215 -0
  9. deepseek/lib/python3.10/site-packages/numpy/random/__init__.pyi +72 -0
  10. deepseek/lib/python3.10/site-packages/numpy/random/_bounded_integers.pxd +29 -0
  11. deepseek/lib/python3.10/site-packages/numpy/random/_common.pxd +106 -0
  12. deepseek/lib/python3.10/site-packages/numpy/random/_generator.pyi +681 -0
  13. deepseek/lib/python3.10/site-packages/numpy/random/_mt19937.pyi +22 -0
  14. deepseek/lib/python3.10/site-packages/numpy/random/_pcg64.pyi +42 -0
  15. deepseek/lib/python3.10/site-packages/numpy/random/_philox.pyi +36 -0
  16. deepseek/lib/python3.10/site-packages/numpy/random/_pickle.py +80 -0
  17. deepseek/lib/python3.10/site-packages/numpy/random/_sfc64.pyi +28 -0
  18. deepseek/lib/python3.10/site-packages/numpy/random/bit_generator.pxd +35 -0
  19. deepseek/lib/python3.10/site-packages/numpy/random/mtrand.pyi +571 -0
  20. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/__init__.cpython-310.pyc +0 -0
  21. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_equalize.cpython-310.pyc +0 -0
  22. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_learnable_fake_quantize.cpython-310.pyc +0 -0
  23. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/qconfig.cpython-310.pyc +0 -0
  24. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize_jit.cpython-310.pyc +0 -0
  25. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/utils.cpython-310.pyc +0 -0
  26. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/__init__.py +0 -0
  27. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/__pycache__/__init__.cpython-310.pyc +0 -0
  28. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/__pycache__/utils.cpython-310.pyc +0 -0
  29. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/utils.py +138 -0
  30. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__init__.py +3 -0
  31. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/__init__.cpython-310.pyc +0 -0
  32. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_decomposed.cpython-310.pyc +0 -0
  33. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_equalize.cpython-310.pyc +0 -0
  34. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_lower_to_native_backend.cpython-310.pyc +0 -0
  35. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/convert.cpython-310.pyc +0 -0
  36. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/custom_config.cpython-310.pyc +0 -0
  37. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/fuse.cpython-310.pyc +0 -0
  38. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/fuse_handler.cpython-310.pyc +0 -0
  39. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/graph_module.cpython-310.pyc +0 -0
  40. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/lower_to_fbgemm.cpython-310.pyc +0 -0
  41. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/lower_to_qnnpack.cpython-310.pyc +0 -0
  42. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/match_utils.cpython-310.pyc +0 -0
  43. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/pattern_utils.cpython-310.pyc +0 -0
  44. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/prepare.cpython-310.pyc +0 -0
  45. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/qconfig_mapping_utils.cpython-310.pyc +0 -0
  46. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/quantize_handler.cpython-310.pyc +0 -0
  47. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/tracer.cpython-310.pyc +0 -0
  48. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/utils.cpython-310.pyc +0 -0
  49. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_decomposed.py +416 -0
  50. deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_equalize.py +824 -0
deepseek/lib/python3.10/site-packages/numpy/array_api/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (9.86 kB). View file
 
deepseek/lib/python3.10/site-packages/numpy/array_api/__pycache__/_constants.cpython-310.pyc ADDED
Binary file (258 Bytes). View file
 
deepseek/lib/python3.10/site-packages/numpy/doc/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ ref_dir = os.path.join(os.path.dirname(__file__))
4
+
5
+ __all__ = sorted(f[:-3] for f in os.listdir(ref_dir) if f.endswith('.py') and
6
+ not f.startswith('__'))
7
+
8
+ for f in __all__:
9
+ __import__(__name__ + '.' + f)
10
+
11
+ del f, ref_dir
12
+
13
+ __doc__ = """\
14
+ Topical documentation
15
+ =====================
16
+
17
+ The following topics are available:
18
+ %s
19
+
20
+ You can view them by
21
+
22
+ >>> help(np.doc.TOPIC) #doctest: +SKIP
23
+
24
+ """ % '\n- '.join([''] + __all__)
25
+
26
+ __all__.extend(['__doc__'])
deepseek/lib/python3.10/site-packages/numpy/doc/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (788 Bytes). View file
 
deepseek/lib/python3.10/site-packages/numpy/doc/__pycache__/constants.cpython-310.pyc ADDED
Binary file (8.03 kB). View file
 
deepseek/lib/python3.10/site-packages/numpy/doc/__pycache__/ufuncs.cpython-310.pyc ADDED
Binary file (5.52 kB). View file
 
deepseek/lib/python3.10/site-packages/numpy/random/LICENSE.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ **This software is dual-licensed under the The University of Illinois/NCSA
2
+ Open Source License (NCSA) and The 3-Clause BSD License**
3
+
4
+ # NCSA Open Source License
5
+ **Copyright (c) 2019 Kevin Sheppard. All rights reserved.**
6
+
7
+ Developed by: Kevin Sheppard (<kevin.sheppard@economics.ox.ac.uk>,
8
+ <kevin.k.sheppard@gmail.com>)
9
+ [http://www.kevinsheppard.com](http://www.kevinsheppard.com)
10
+
11
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
12
+ this software and associated documentation files (the "Software"), to deal with
13
+ the Software without restriction, including without limitation the rights to
14
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
15
+ of the Software, and to permit persons to whom the Software is furnished to do
16
+ so, subject to the following conditions:
17
+
18
+ Redistributions of source code must retain the above copyright notice, this
19
+ list of conditions and the following disclaimers.
20
+
21
+ Redistributions in binary form must reproduce the above copyright notice, this
22
+ list of conditions and the following disclaimers in the documentation and/or
23
+ other materials provided with the distribution.
24
+
25
+ Neither the names of Kevin Sheppard, nor the names of any contributors may be
26
+ used to endorse or promote products derived from this Software without specific
27
+ prior written permission.
28
+
29
+ **THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32
+ CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
34
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
35
+ THE SOFTWARE.**
36
+
37
+
38
+ # 3-Clause BSD License
39
+ **Copyright (c) 2019 Kevin Sheppard. All rights reserved.**
40
+
41
+ Redistribution and use in source and binary forms, with or without
42
+ modification, are permitted provided that the following conditions are met:
43
+
44
+ 1. Redistributions of source code must retain the above copyright notice,
45
+ this list of conditions and the following disclaimer.
46
+
47
+ 2. Redistributions in binary form must reproduce the above copyright notice,
48
+ this list of conditions and the following disclaimer in the documentation
49
+ and/or other materials provided with the distribution.
50
+
51
+ 3. Neither the name of the copyright holder nor the names of its contributors
52
+ may be used to endorse or promote products derived from this software
53
+ without specific prior written permission.
54
+
55
+ **THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
56
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
59
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
60
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
61
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
62
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
63
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
64
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
65
+ THE POSSIBILITY OF SUCH DAMAGE.**
66
+
67
+ # Components
68
+
69
+ Many parts of this module have been derived from original sources,
70
+ often the algorithm's designer. Component licenses are located with
71
+ the component code.
deepseek/lib/python3.10/site-packages/numpy/random/__init__.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ========================
3
+ Random Number Generation
4
+ ========================
5
+
6
+ Use ``default_rng()`` to create a `Generator` and call its methods.
7
+
8
+ =============== =========================================================
9
+ Generator
10
+ --------------- ---------------------------------------------------------
11
+ Generator Class implementing all of the random number distributions
12
+ default_rng Default constructor for ``Generator``
13
+ =============== =========================================================
14
+
15
+ ============================================= ===
16
+ BitGenerator Streams that work with Generator
17
+ --------------------------------------------- ---
18
+ MT19937
19
+ PCG64
20
+ PCG64DXSM
21
+ Philox
22
+ SFC64
23
+ ============================================= ===
24
+
25
+ ============================================= ===
26
+ Getting entropy to initialize a BitGenerator
27
+ --------------------------------------------- ---
28
+ SeedSequence
29
+ ============================================= ===
30
+
31
+
32
+ Legacy
33
+ ------
34
+
35
+ For backwards compatibility with previous versions of numpy before 1.17, the
36
+ various aliases to the global `RandomState` methods are left alone and do not
37
+ use the new `Generator` API.
38
+
39
+ ==================== =========================================================
40
+ Utility functions
41
+ -------------------- ---------------------------------------------------------
42
+ random Uniformly distributed floats over ``[0, 1)``
43
+ bytes Uniformly distributed random bytes.
44
+ permutation Randomly permute a sequence / generate a random sequence.
45
+ shuffle Randomly permute a sequence in place.
46
+ choice Random sample from 1-D array.
47
+ ==================== =========================================================
48
+
49
+ ==================== =========================================================
50
+ Compatibility
51
+ functions - removed
52
+ in the new API
53
+ -------------------- ---------------------------------------------------------
54
+ rand Uniformly distributed values.
55
+ randn Normally distributed values.
56
+ ranf Uniformly distributed floating point numbers.
57
+ random_integers Uniformly distributed integers in a given range.
58
+ (deprecated, use ``integers(..., closed=True)`` instead)
59
+ random_sample Alias for `random_sample`
60
+ randint Uniformly distributed integers in a given range
61
+ seed Seed the legacy random number generator.
62
+ ==================== =========================================================
63
+
64
+ ==================== =========================================================
65
+ Univariate
66
+ distributions
67
+ -------------------- ---------------------------------------------------------
68
+ beta Beta distribution over ``[0, 1]``.
69
+ binomial Binomial distribution.
70
+ chisquare :math:`\\chi^2` distribution.
71
+ exponential Exponential distribution.
72
+ f F (Fisher-Snedecor) distribution.
73
+ gamma Gamma distribution.
74
+ geometric Geometric distribution.
75
+ gumbel Gumbel distribution.
76
+ hypergeometric Hypergeometric distribution.
77
+ laplace Laplace distribution.
78
+ logistic Logistic distribution.
79
+ lognormal Log-normal distribution.
80
+ logseries Logarithmic series distribution.
81
+ negative_binomial Negative binomial distribution.
82
+ noncentral_chisquare Non-central chi-square distribution.
83
+ noncentral_f Non-central F distribution.
84
+ normal Normal / Gaussian distribution.
85
+ pareto Pareto distribution.
86
+ poisson Poisson distribution.
87
+ power Power distribution.
88
+ rayleigh Rayleigh distribution.
89
+ triangular Triangular distribution.
90
+ uniform Uniform distribution.
91
+ vonmises Von Mises circular distribution.
92
+ wald Wald (inverse Gaussian) distribution.
93
+ weibull Weibull distribution.
94
+ zipf Zipf's distribution over ranked data.
95
+ ==================== =========================================================
96
+
97
+ ==================== ==========================================================
98
+ Multivariate
99
+ distributions
100
+ -------------------- ----------------------------------------------------------
101
+ dirichlet Multivariate generalization of Beta distribution.
102
+ multinomial Multivariate generalization of the binomial distribution.
103
+ multivariate_normal Multivariate generalization of the normal distribution.
104
+ ==================== ==========================================================
105
+
106
+ ==================== =========================================================
107
+ Standard
108
+ distributions
109
+ -------------------- ---------------------------------------------------------
110
+ standard_cauchy Standard Cauchy-Lorentz distribution.
111
+ standard_exponential Standard exponential distribution.
112
+ standard_gamma Standard Gamma distribution.
113
+ standard_normal Standard normal distribution.
114
+ standard_t Standard Student's t-distribution.
115
+ ==================== =========================================================
116
+
117
+ ==================== =========================================================
118
+ Internal functions
119
+ -------------------- ---------------------------------------------------------
120
+ get_state Get tuple representing internal state of generator.
121
+ set_state Set state of generator.
122
+ ==================== =========================================================
123
+
124
+
125
+ """
126
+ __all__ = [
127
+ 'beta',
128
+ 'binomial',
129
+ 'bytes',
130
+ 'chisquare',
131
+ 'choice',
132
+ 'dirichlet',
133
+ 'exponential',
134
+ 'f',
135
+ 'gamma',
136
+ 'geometric',
137
+ 'get_state',
138
+ 'gumbel',
139
+ 'hypergeometric',
140
+ 'laplace',
141
+ 'logistic',
142
+ 'lognormal',
143
+ 'logseries',
144
+ 'multinomial',
145
+ 'multivariate_normal',
146
+ 'negative_binomial',
147
+ 'noncentral_chisquare',
148
+ 'noncentral_f',
149
+ 'normal',
150
+ 'pareto',
151
+ 'permutation',
152
+ 'poisson',
153
+ 'power',
154
+ 'rand',
155
+ 'randint',
156
+ 'randn',
157
+ 'random',
158
+ 'random_integers',
159
+ 'random_sample',
160
+ 'ranf',
161
+ 'rayleigh',
162
+ 'sample',
163
+ 'seed',
164
+ 'set_state',
165
+ 'shuffle',
166
+ 'standard_cauchy',
167
+ 'standard_exponential',
168
+ 'standard_gamma',
169
+ 'standard_normal',
170
+ 'standard_t',
171
+ 'triangular',
172
+ 'uniform',
173
+ 'vonmises',
174
+ 'wald',
175
+ 'weibull',
176
+ 'zipf',
177
+ ]
178
+
179
+ # add these for module-freeze analysis (like PyInstaller)
180
+ from . import _pickle
181
+ from . import _common
182
+ from . import _bounded_integers
183
+
184
+ from ._generator import Generator, default_rng
185
+ from .bit_generator import SeedSequence, BitGenerator
186
+ from ._mt19937 import MT19937
187
+ from ._pcg64 import PCG64, PCG64DXSM
188
+ from ._philox import Philox
189
+ from ._sfc64 import SFC64
190
+ from .mtrand import *
191
+
192
+ __all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937',
193
+ 'Philox', 'PCG64', 'PCG64DXSM', 'SFC64', 'default_rng',
194
+ 'BitGenerator']
195
+
196
+
197
+ def __RandomState_ctor():
198
+ """Return a RandomState instance.
199
+
200
+ This function exists solely to assist (un)pickling.
201
+
202
+ Note that the state of the RandomState returned here is irrelevant, as this
203
+ function's entire purpose is to return a newly allocated RandomState whose
204
+ state pickle can set. Consequently the RandomState returned by this function
205
+ is a freshly allocated copy with a seed=0.
206
+
207
+ See https://github.com/numpy/numpy/issues/4763 for a detailed discussion
208
+
209
+ """
210
+ return RandomState(seed=0)
211
+
212
+
213
+ from numpy._pytesttester import PytestTester
214
+ test = PytestTester(__name__)
215
+ del PytestTester
deepseek/lib/python3.10/site-packages/numpy/random/__init__.pyi ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy._pytesttester import PytestTester
2
+
3
+ from numpy.random._generator import Generator as Generator
4
+ from numpy.random._generator import default_rng as default_rng
5
+ from numpy.random._mt19937 import MT19937 as MT19937
6
+ from numpy.random._pcg64 import (
7
+ PCG64 as PCG64,
8
+ PCG64DXSM as PCG64DXSM,
9
+ )
10
+ from numpy.random._philox import Philox as Philox
11
+ from numpy.random._sfc64 import SFC64 as SFC64
12
+ from numpy.random.bit_generator import BitGenerator as BitGenerator
13
+ from numpy.random.bit_generator import SeedSequence as SeedSequence
14
+ from numpy.random.mtrand import (
15
+ RandomState as RandomState,
16
+ beta as beta,
17
+ binomial as binomial,
18
+ bytes as bytes,
19
+ chisquare as chisquare,
20
+ choice as choice,
21
+ dirichlet as dirichlet,
22
+ exponential as exponential,
23
+ f as f,
24
+ gamma as gamma,
25
+ geometric as geometric,
26
+ get_bit_generator as get_bit_generator,
27
+ get_state as get_state,
28
+ gumbel as gumbel,
29
+ hypergeometric as hypergeometric,
30
+ laplace as laplace,
31
+ logistic as logistic,
32
+ lognormal as lognormal,
33
+ logseries as logseries,
34
+ multinomial as multinomial,
35
+ multivariate_normal as multivariate_normal,
36
+ negative_binomial as negative_binomial,
37
+ noncentral_chisquare as noncentral_chisquare,
38
+ noncentral_f as noncentral_f,
39
+ normal as normal,
40
+ pareto as pareto,
41
+ permutation as permutation,
42
+ poisson as poisson,
43
+ power as power,
44
+ rand as rand,
45
+ randint as randint,
46
+ randn as randn,
47
+ random as random,
48
+ random_integers as random_integers,
49
+ random_sample as random_sample,
50
+ ranf as ranf,
51
+ rayleigh as rayleigh,
52
+ sample as sample,
53
+ seed as seed,
54
+ set_bit_generator as set_bit_generator,
55
+ set_state as set_state,
56
+ shuffle as shuffle,
57
+ standard_cauchy as standard_cauchy,
58
+ standard_exponential as standard_exponential,
59
+ standard_gamma as standard_gamma,
60
+ standard_normal as standard_normal,
61
+ standard_t as standard_t,
62
+ triangular as triangular,
63
+ uniform as uniform,
64
+ vonmises as vonmises,
65
+ wald as wald,
66
+ weibull as weibull,
67
+ zipf as zipf,
68
+ )
69
+
70
+ __all__: list[str]
71
+ __path__: list[str]
72
+ test: PytestTester
deepseek/lib/python3.10/site-packages/numpy/random/_bounded_integers.pxd ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
2
+ int8_t, int16_t, int32_t, int64_t, intptr_t)
3
+ import numpy as np
4
+ cimport numpy as np
5
+ ctypedef np.npy_bool bool_t
6
+
7
+ from numpy.random cimport bitgen_t
8
+
9
+ cdef inline uint64_t _gen_mask(uint64_t max_val) nogil:
10
+ """Mask generator for use in bounded random numbers"""
11
+ # Smallest bit mask >= max
12
+ cdef uint64_t mask = max_val
13
+ mask |= mask >> 1
14
+ mask |= mask >> 2
15
+ mask |= mask >> 4
16
+ mask |= mask >> 8
17
+ mask |= mask >> 16
18
+ mask |= mask >> 32
19
+ return mask
20
+
21
+ cdef object _rand_uint64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
22
+ cdef object _rand_uint32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
23
+ cdef object _rand_uint16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
24
+ cdef object _rand_uint8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
25
+ cdef object _rand_bool(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
26
+ cdef object _rand_int64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
27
+ cdef object _rand_int32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
28
+ cdef object _rand_int16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
29
+ cdef object _rand_int8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
deepseek/lib/python3.10/site-packages/numpy/random/_common.pxd ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #cython: language_level=3
2
+
3
+ from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t
4
+
5
+ import numpy as np
6
+ cimport numpy as np
7
+
8
+ from numpy.random cimport bitgen_t
9
+
10
+ cdef double POISSON_LAM_MAX
11
+ cdef double LEGACY_POISSON_LAM_MAX
12
+ cdef uint64_t MAXSIZE
13
+
14
+ cdef enum ConstraintType:
15
+ CONS_NONE
16
+ CONS_NON_NEGATIVE
17
+ CONS_POSITIVE
18
+ CONS_POSITIVE_NOT_NAN
19
+ CONS_BOUNDED_0_1
20
+ CONS_BOUNDED_GT_0_1
21
+ CONS_BOUNDED_LT_0_1
22
+ CONS_GT_1
23
+ CONS_GTE_1
24
+ CONS_POISSON
25
+ LEGACY_CONS_POISSON
26
+
27
+ ctypedef ConstraintType constraint_type
28
+
29
+ cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method)
30
+ cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output)
31
+ cdef object prepare_cffi(bitgen_t *bitgen)
32
+ cdef object prepare_ctypes(bitgen_t *bitgen)
33
+ cdef int check_constraint(double val, object name, constraint_type cons) except -1
34
+ cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1
35
+
36
+ cdef extern from "include/aligned_malloc.h":
37
+ cdef void *PyArray_realloc_aligned(void *p, size_t n)
38
+ cdef void *PyArray_malloc_aligned(size_t n)
39
+ cdef void *PyArray_calloc_aligned(size_t n, size_t s)
40
+ cdef void PyArray_free_aligned(void *p)
41
+
42
+ ctypedef void (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) noexcept nogil
43
+ ctypedef double (*random_double_0)(void *state) noexcept nogil
44
+ ctypedef double (*random_double_1)(void *state, double a) noexcept nogil
45
+ ctypedef double (*random_double_2)(void *state, double a, double b) noexcept nogil
46
+ ctypedef double (*random_double_3)(void *state, double a, double b, double c) noexcept nogil
47
+
48
+ ctypedef void (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) noexcept nogil
49
+ ctypedef float (*random_float_0)(bitgen_t *state) noexcept nogil
50
+ ctypedef float (*random_float_1)(bitgen_t *state, float a) noexcept nogil
51
+
52
+ ctypedef int64_t (*random_uint_0)(void *state) noexcept nogil
53
+ ctypedef int64_t (*random_uint_d)(void *state, double a) noexcept nogil
54
+ ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) noexcept nogil
55
+ ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) noexcept nogil
56
+ ctypedef int64_t (*random_uint_i)(void *state, int64_t a) noexcept nogil
57
+ ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) noexcept nogil
58
+
59
+ ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) noexcept nogil
60
+ ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) noexcept nogil
61
+
62
+ ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) noexcept nogil
63
+ ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) noexcept nogil
64
+
65
+ cdef double kahan_sum(double *darr, np.npy_intp n) noexcept
66
+
67
+ cdef inline double uint64_to_double(uint64_t rnd) noexcept nogil:
68
+ return (rnd >> 11) * (1.0 / 9007199254740992.0)
69
+
70
+ cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out)
71
+
72
+ cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out)
73
+
74
+ cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out)
75
+
76
+ cdef object wrap_int(object val, object bits)
77
+
78
+ cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size)
79
+
80
+ cdef validate_output_shape(iter_shape, np.ndarray output)
81
+
82
+ cdef object cont(void *func, void *state, object size, object lock, int narg,
83
+ object a, object a_name, constraint_type a_constraint,
84
+ object b, object b_name, constraint_type b_constraint,
85
+ object c, object c_name, constraint_type c_constraint,
86
+ object out)
87
+
88
+ cdef object disc(void *func, void *state, object size, object lock,
89
+ int narg_double, int narg_int64,
90
+ object a, object a_name, constraint_type a_constraint,
91
+ object b, object b_name, constraint_type b_constraint,
92
+ object c, object c_name, constraint_type c_constraint)
93
+
94
+ cdef object cont_f(void *func, bitgen_t *state, object size, object lock,
95
+ object a, object a_name, constraint_type a_constraint,
96
+ object out)
97
+
98
+ cdef object cont_broadcast_3(void *func, void *state, object size, object lock,
99
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
100
+ np.ndarray b_arr, object b_name, constraint_type b_constraint,
101
+ np.ndarray c_arr, object c_name, constraint_type c_constraint)
102
+
103
+ cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock,
104
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
105
+ np.ndarray b_arr, object b_name, constraint_type b_constraint,
106
+ np.ndarray c_arr, object c_name, constraint_type c_constraint)
deepseek/lib/python3.10/site-packages/numpy/random/_generator.pyi ADDED
@@ -0,0 +1,681 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Callable
2
+ from typing import Any, Union, overload, TypeVar, Literal
3
+
4
+ from numpy import (
5
+ bool_,
6
+ dtype,
7
+ float32,
8
+ float64,
9
+ int8,
10
+ int16,
11
+ int32,
12
+ int64,
13
+ int_,
14
+ ndarray,
15
+ uint,
16
+ uint8,
17
+ uint16,
18
+ uint32,
19
+ uint64,
20
+ )
21
+ from numpy.random import BitGenerator, SeedSequence
22
+ from numpy._typing import (
23
+ ArrayLike,
24
+ _ArrayLikeFloat_co,
25
+ _ArrayLikeInt_co,
26
+ _DoubleCodes,
27
+ _DTypeLikeBool,
28
+ _DTypeLikeInt,
29
+ _DTypeLikeUInt,
30
+ _Float32Codes,
31
+ _Float64Codes,
32
+ _FloatLike_co,
33
+ _Int8Codes,
34
+ _Int16Codes,
35
+ _Int32Codes,
36
+ _Int64Codes,
37
+ _IntCodes,
38
+ _ShapeLike,
39
+ _SingleCodes,
40
+ _SupportsDType,
41
+ _UInt8Codes,
42
+ _UInt16Codes,
43
+ _UInt32Codes,
44
+ _UInt64Codes,
45
+ _UIntCodes,
46
+ )
47
+
48
+ _ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
49
+
50
+ _DTypeLikeFloat32 = Union[
51
+ dtype[float32],
52
+ _SupportsDType[dtype[float32]],
53
+ type[float32],
54
+ _Float32Codes,
55
+ _SingleCodes,
56
+ ]
57
+
58
+ _DTypeLikeFloat64 = Union[
59
+ dtype[float64],
60
+ _SupportsDType[dtype[float64]],
61
+ type[float],
62
+ type[float64],
63
+ _Float64Codes,
64
+ _DoubleCodes,
65
+ ]
66
+
67
+ class Generator:
68
+ def __init__(self, bit_generator: BitGenerator) -> None: ...
69
+ def __repr__(self) -> str: ...
70
+ def __str__(self) -> str: ...
71
+ def __getstate__(self) -> dict[str, Any]: ...
72
+ def __setstate__(self, state: dict[str, Any]) -> None: ...
73
+ def __reduce__(self) -> tuple[Callable[[str], Generator], tuple[str], dict[str, Any]]: ...
74
+ @property
75
+ def bit_generator(self) -> BitGenerator: ...
76
+ def spawn(self, n_children: int) -> list[Generator]: ...
77
+ def bytes(self, length: int) -> bytes: ...
78
+ @overload
79
+ def standard_normal( # type: ignore[misc]
80
+ self,
81
+ size: None = ...,
82
+ dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
83
+ out: None = ...,
84
+ ) -> float: ...
85
+ @overload
86
+ def standard_normal( # type: ignore[misc]
87
+ self,
88
+ size: _ShapeLike = ...,
89
+ ) -> ndarray[Any, dtype[float64]]: ...
90
+ @overload
91
+ def standard_normal( # type: ignore[misc]
92
+ self,
93
+ *,
94
+ out: ndarray[Any, dtype[float64]] = ...,
95
+ ) -> ndarray[Any, dtype[float64]]: ...
96
+ @overload
97
+ def standard_normal( # type: ignore[misc]
98
+ self,
99
+ size: _ShapeLike = ...,
100
+ dtype: _DTypeLikeFloat32 = ...,
101
+ out: None | ndarray[Any, dtype[float32]] = ...,
102
+ ) -> ndarray[Any, dtype[float32]]: ...
103
+ @overload
104
+ def standard_normal( # type: ignore[misc]
105
+ self,
106
+ size: _ShapeLike = ...,
107
+ dtype: _DTypeLikeFloat64 = ...,
108
+ out: None | ndarray[Any, dtype[float64]] = ...,
109
+ ) -> ndarray[Any, dtype[float64]]: ...
110
+ @overload
111
+ def permutation(self, x: int, axis: int = ...) -> ndarray[Any, dtype[int64]]: ...
112
+ @overload
113
+ def permutation(self, x: ArrayLike, axis: int = ...) -> ndarray[Any, Any]: ...
114
+ @overload
115
+ def standard_exponential( # type: ignore[misc]
116
+ self,
117
+ size: None = ...,
118
+ dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
119
+ method: Literal["zig", "inv"] = ...,
120
+ out: None = ...,
121
+ ) -> float: ...
122
+ @overload
123
+ def standard_exponential(
124
+ self,
125
+ size: _ShapeLike = ...,
126
+ ) -> ndarray[Any, dtype[float64]]: ...
127
+ @overload
128
+ def standard_exponential(
129
+ self,
130
+ *,
131
+ out: ndarray[Any, dtype[float64]] = ...,
132
+ ) -> ndarray[Any, dtype[float64]]: ...
133
+ @overload
134
+ def standard_exponential(
135
+ self,
136
+ size: _ShapeLike = ...,
137
+ *,
138
+ method: Literal["zig", "inv"] = ...,
139
+ out: None | ndarray[Any, dtype[float64]] = ...,
140
+ ) -> ndarray[Any, dtype[float64]]: ...
141
+ @overload
142
+ def standard_exponential(
143
+ self,
144
+ size: _ShapeLike = ...,
145
+ dtype: _DTypeLikeFloat32 = ...,
146
+ method: Literal["zig", "inv"] = ...,
147
+ out: None | ndarray[Any, dtype[float32]] = ...,
148
+ ) -> ndarray[Any, dtype[float32]]: ...
149
+ @overload
150
+ def standard_exponential(
151
+ self,
152
+ size: _ShapeLike = ...,
153
+ dtype: _DTypeLikeFloat64 = ...,
154
+ method: Literal["zig", "inv"] = ...,
155
+ out: None | ndarray[Any, dtype[float64]] = ...,
156
+ ) -> ndarray[Any, dtype[float64]]: ...
157
+ @overload
158
+ def random( # type: ignore[misc]
159
+ self,
160
+ size: None = ...,
161
+ dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
162
+ out: None = ...,
163
+ ) -> float: ...
164
+ @overload
165
+ def random(
166
+ self,
167
+ *,
168
+ out: ndarray[Any, dtype[float64]] = ...,
169
+ ) -> ndarray[Any, dtype[float64]]: ...
170
+ @overload
171
+ def random(
172
+ self,
173
+ size: _ShapeLike = ...,
174
+ *,
175
+ out: None | ndarray[Any, dtype[float64]] = ...,
176
+ ) -> ndarray[Any, dtype[float64]]: ...
177
+ @overload
178
+ def random(
179
+ self,
180
+ size: _ShapeLike = ...,
181
+ dtype: _DTypeLikeFloat32 = ...,
182
+ out: None | ndarray[Any, dtype[float32]] = ...,
183
+ ) -> ndarray[Any, dtype[float32]]: ...
184
+ @overload
185
+ def random(
186
+ self,
187
+ size: _ShapeLike = ...,
188
+ dtype: _DTypeLikeFloat64 = ...,
189
+ out: None | ndarray[Any, dtype[float64]] = ...,
190
+ ) -> ndarray[Any, dtype[float64]]: ...
191
+ @overload
192
+ def beta(
193
+ self,
194
+ a: _FloatLike_co,
195
+ b: _FloatLike_co,
196
+ size: None = ...,
197
+ ) -> float: ... # type: ignore[misc]
198
+ @overload
199
+ def beta(
200
+ self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
201
+ ) -> ndarray[Any, dtype[float64]]: ...
202
+ @overload
203
+ def exponential(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
204
+ @overload
205
+ def exponential(
206
+ self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
207
+ ) -> ndarray[Any, dtype[float64]]: ...
208
+ @overload
209
+ def integers( # type: ignore[misc]
210
+ self,
211
+ low: int,
212
+ high: None | int = ...,
213
+ ) -> int: ...
214
+ @overload
215
+ def integers( # type: ignore[misc]
216
+ self,
217
+ low: int,
218
+ high: None | int = ...,
219
+ size: None = ...,
220
+ dtype: _DTypeLikeBool = ...,
221
+ endpoint: bool = ...,
222
+ ) -> bool: ...
223
+ @overload
224
+ def integers( # type: ignore[misc]
225
+ self,
226
+ low: int,
227
+ high: None | int = ...,
228
+ size: None = ...,
229
+ dtype: _DTypeLikeInt | _DTypeLikeUInt = ...,
230
+ endpoint: bool = ...,
231
+ ) -> int: ...
232
+ @overload
233
+ def integers( # type: ignore[misc]
234
+ self,
235
+ low: _ArrayLikeInt_co,
236
+ high: None | _ArrayLikeInt_co = ...,
237
+ size: None | _ShapeLike = ...,
238
+ ) -> ndarray[Any, dtype[int64]]: ...
239
+ @overload
240
+ def integers( # type: ignore[misc]
241
+ self,
242
+ low: _ArrayLikeInt_co,
243
+ high: None | _ArrayLikeInt_co = ...,
244
+ size: None | _ShapeLike = ...,
245
+ dtype: _DTypeLikeBool = ...,
246
+ endpoint: bool = ...,
247
+ ) -> ndarray[Any, dtype[bool_]]: ...
248
+ @overload
249
+ def integers( # type: ignore[misc]
250
+ self,
251
+ low: _ArrayLikeInt_co,
252
+ high: None | _ArrayLikeInt_co = ...,
253
+ size: None | _ShapeLike = ...,
254
+ dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ...,
255
+ endpoint: bool = ...,
256
+ ) -> ndarray[Any, dtype[int8]]: ...
257
+ @overload
258
+ def integers( # type: ignore[misc]
259
+ self,
260
+ low: _ArrayLikeInt_co,
261
+ high: None | _ArrayLikeInt_co = ...,
262
+ size: None | _ShapeLike = ...,
263
+ dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ...,
264
+ endpoint: bool = ...,
265
+ ) -> ndarray[Any, dtype[int16]]: ...
266
+ @overload
267
+ def integers( # type: ignore[misc]
268
+ self,
269
+ low: _ArrayLikeInt_co,
270
+ high: None | _ArrayLikeInt_co = ...,
271
+ size: None | _ShapeLike = ...,
272
+ dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ...,
273
+ endpoint: bool = ...,
274
+ ) -> ndarray[Any, dtype[int32]]: ...
275
+ @overload
276
+ def integers( # type: ignore[misc]
277
+ self,
278
+ low: _ArrayLikeInt_co,
279
+ high: None | _ArrayLikeInt_co = ...,
280
+ size: None | _ShapeLike = ...,
281
+ dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ...,
282
+ endpoint: bool = ...,
283
+ ) -> ndarray[Any, dtype[int64]]: ...
284
+ @overload
285
+ def integers( # type: ignore[misc]
286
+ self,
287
+ low: _ArrayLikeInt_co,
288
+ high: None | _ArrayLikeInt_co = ...,
289
+ size: None | _ShapeLike = ...,
290
+ dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ...,
291
+ endpoint: bool = ...,
292
+ ) -> ndarray[Any, dtype[uint8]]: ...
293
+ @overload
294
+ def integers( # type: ignore[misc]
295
+ self,
296
+ low: _ArrayLikeInt_co,
297
+ high: None | _ArrayLikeInt_co = ...,
298
+ size: None | _ShapeLike = ...,
299
+ dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ...,
300
+ endpoint: bool = ...,
301
+ ) -> ndarray[Any, dtype[uint16]]: ...
302
+ @overload
303
+ def integers( # type: ignore[misc]
304
+ self,
305
+ low: _ArrayLikeInt_co,
306
+ high: None | _ArrayLikeInt_co = ...,
307
+ size: None | _ShapeLike = ...,
308
+ dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ...,
309
+ endpoint: bool = ...,
310
+ ) -> ndarray[Any, dtype[uint32]]: ...
311
+ @overload
312
+ def integers( # type: ignore[misc]
313
+ self,
314
+ low: _ArrayLikeInt_co,
315
+ high: None | _ArrayLikeInt_co = ...,
316
+ size: None | _ShapeLike = ...,
317
+ dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ...,
318
+ endpoint: bool = ...,
319
+ ) -> ndarray[Any, dtype[uint64]]: ...
320
+ @overload
321
+ def integers( # type: ignore[misc]
322
+ self,
323
+ low: _ArrayLikeInt_co,
324
+ high: None | _ArrayLikeInt_co = ...,
325
+ size: None | _ShapeLike = ...,
326
+ dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ...,
327
+ endpoint: bool = ...,
328
+ ) -> ndarray[Any, dtype[int_]]: ...
329
+ @overload
330
+ def integers( # type: ignore[misc]
331
+ self,
332
+ low: _ArrayLikeInt_co,
333
+ high: None | _ArrayLikeInt_co = ...,
334
+ size: None | _ShapeLike = ...,
335
+ dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ...,
336
+ endpoint: bool = ...,
337
+ ) -> ndarray[Any, dtype[uint]]: ...
338
+ # TODO: Use a TypeVar _T here to get away from Any output? Should be int->ndarray[Any,dtype[int64]], ArrayLike[_T] -> _T | ndarray[Any,Any]
339
+ @overload
340
+ def choice(
341
+ self,
342
+ a: int,
343
+ size: None = ...,
344
+ replace: bool = ...,
345
+ p: None | _ArrayLikeFloat_co = ...,
346
+ axis: int = ...,
347
+ shuffle: bool = ...,
348
+ ) -> int: ...
349
+ @overload
350
+ def choice(
351
+ self,
352
+ a: int,
353
+ size: _ShapeLike = ...,
354
+ replace: bool = ...,
355
+ p: None | _ArrayLikeFloat_co = ...,
356
+ axis: int = ...,
357
+ shuffle: bool = ...,
358
+ ) -> ndarray[Any, dtype[int64]]: ...
359
+ @overload
360
+ def choice(
361
+ self,
362
+ a: ArrayLike,
363
+ size: None = ...,
364
+ replace: bool = ...,
365
+ p: None | _ArrayLikeFloat_co = ...,
366
+ axis: int = ...,
367
+ shuffle: bool = ...,
368
+ ) -> Any: ...
369
+ @overload
370
+ def choice(
371
+ self,
372
+ a: ArrayLike,
373
+ size: _ShapeLike = ...,
374
+ replace: bool = ...,
375
+ p: None | _ArrayLikeFloat_co = ...,
376
+ axis: int = ...,
377
+ shuffle: bool = ...,
378
+ ) -> ndarray[Any, Any]: ...
379
+ @overload
380
+ def uniform(
381
+ self,
382
+ low: _FloatLike_co = ...,
383
+ high: _FloatLike_co = ...,
384
+ size: None = ...,
385
+ ) -> float: ... # type: ignore[misc]
386
+ @overload
387
+ def uniform(
388
+ self,
389
+ low: _ArrayLikeFloat_co = ...,
390
+ high: _ArrayLikeFloat_co = ...,
391
+ size: None | _ShapeLike = ...,
392
+ ) -> ndarray[Any, dtype[float64]]: ...
393
+ @overload
394
+ def normal(
395
+ self,
396
+ loc: _FloatLike_co = ...,
397
+ scale: _FloatLike_co = ...,
398
+ size: None = ...,
399
+ ) -> float: ... # type: ignore[misc]
400
+ @overload
401
+ def normal(
402
+ self,
403
+ loc: _ArrayLikeFloat_co = ...,
404
+ scale: _ArrayLikeFloat_co = ...,
405
+ size: None | _ShapeLike = ...,
406
+ ) -> ndarray[Any, dtype[float64]]: ...
407
+ @overload
408
+ def standard_gamma( # type: ignore[misc]
409
+ self,
410
+ shape: _FloatLike_co,
411
+ size: None = ...,
412
+ dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
413
+ out: None = ...,
414
+ ) -> float: ...
415
+ @overload
416
+ def standard_gamma(
417
+ self,
418
+ shape: _ArrayLikeFloat_co,
419
+ size: None | _ShapeLike = ...,
420
+ ) -> ndarray[Any, dtype[float64]]: ...
421
+ @overload
422
+ def standard_gamma(
423
+ self,
424
+ shape: _ArrayLikeFloat_co,
425
+ *,
426
+ out: ndarray[Any, dtype[float64]] = ...,
427
+ ) -> ndarray[Any, dtype[float64]]: ...
428
+ @overload
429
+ def standard_gamma(
430
+ self,
431
+ shape: _ArrayLikeFloat_co,
432
+ size: None | _ShapeLike = ...,
433
+ dtype: _DTypeLikeFloat32 = ...,
434
+ out: None | ndarray[Any, dtype[float32]] = ...,
435
+ ) -> ndarray[Any, dtype[float32]]: ...
436
+ @overload
437
+ def standard_gamma(
438
+ self,
439
+ shape: _ArrayLikeFloat_co,
440
+ size: None | _ShapeLike = ...,
441
+ dtype: _DTypeLikeFloat64 = ...,
442
+ out: None | ndarray[Any, dtype[float64]] = ...,
443
+ ) -> ndarray[Any, dtype[float64]]: ...
444
+ @overload
445
+ def gamma(self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
446
+ @overload
447
+ def gamma(
448
+ self,
449
+ shape: _ArrayLikeFloat_co,
450
+ scale: _ArrayLikeFloat_co = ...,
451
+ size: None | _ShapeLike = ...,
452
+ ) -> ndarray[Any, dtype[float64]]: ...
453
+ @overload
454
+ def f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
455
+ @overload
456
+ def f(
457
+ self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
458
+ ) -> ndarray[Any, dtype[float64]]: ...
459
+ @overload
460
+ def noncentral_f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
461
+ @overload
462
+ def noncentral_f(
463
+ self,
464
+ dfnum: _ArrayLikeFloat_co,
465
+ dfden: _ArrayLikeFloat_co,
466
+ nonc: _ArrayLikeFloat_co,
467
+ size: None | _ShapeLike = ...,
468
+ ) -> ndarray[Any, dtype[float64]]: ...
469
+ @overload
470
+ def chisquare(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
471
+ @overload
472
+ def chisquare(
473
+ self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
474
+ ) -> ndarray[Any, dtype[float64]]: ...
475
+ @overload
476
+ def noncentral_chisquare(self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
477
+ @overload
478
+ def noncentral_chisquare(
479
+ self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
480
+ ) -> ndarray[Any, dtype[float64]]: ...
481
+ @overload
482
+ def standard_t(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
483
+ @overload
484
+ def standard_t(
485
+ self, df: _ArrayLikeFloat_co, size: None = ...
486
+ ) -> ndarray[Any, dtype[float64]]: ...
487
+ @overload
488
+ def standard_t(
489
+ self, df: _ArrayLikeFloat_co, size: _ShapeLike = ...
490
+ ) -> ndarray[Any, dtype[float64]]: ...
491
+ @overload
492
+ def vonmises(self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
493
+ @overload
494
+ def vonmises(
495
+ self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
496
+ ) -> ndarray[Any, dtype[float64]]: ...
497
+ @overload
498
+ def pareto(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
499
+ @overload
500
+ def pareto(
501
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
502
+ ) -> ndarray[Any, dtype[float64]]: ...
503
+ @overload
504
+ def weibull(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
505
+ @overload
506
+ def weibull(
507
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
508
+ ) -> ndarray[Any, dtype[float64]]: ...
509
+ @overload
510
+ def power(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
511
+ @overload
512
+ def power(
513
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
514
+ ) -> ndarray[Any, dtype[float64]]: ...
515
+ @overload
516
+ def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc]
517
+ @overload
518
+ def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
519
+ @overload
520
+ def laplace(
521
+ self,
522
+ loc: _FloatLike_co = ...,
523
+ scale: _FloatLike_co = ...,
524
+ size: None = ...,
525
+ ) -> float: ... # type: ignore[misc]
526
+ @overload
527
+ def laplace(
528
+ self,
529
+ loc: _ArrayLikeFloat_co = ...,
530
+ scale: _ArrayLikeFloat_co = ...,
531
+ size: None | _ShapeLike = ...,
532
+ ) -> ndarray[Any, dtype[float64]]: ...
533
+ @overload
534
+ def gumbel(
535
+ self,
536
+ loc: _FloatLike_co = ...,
537
+ scale: _FloatLike_co = ...,
538
+ size: None = ...,
539
+ ) -> float: ... # type: ignore[misc]
540
+ @overload
541
+ def gumbel(
542
+ self,
543
+ loc: _ArrayLikeFloat_co = ...,
544
+ scale: _ArrayLikeFloat_co = ...,
545
+ size: None | _ShapeLike = ...,
546
+ ) -> ndarray[Any, dtype[float64]]: ...
547
+ @overload
548
+ def logistic(
549
+ self,
550
+ loc: _FloatLike_co = ...,
551
+ scale: _FloatLike_co = ...,
552
+ size: None = ...,
553
+ ) -> float: ... # type: ignore[misc]
554
+ @overload
555
+ def logistic(
556
+ self,
557
+ loc: _ArrayLikeFloat_co = ...,
558
+ scale: _ArrayLikeFloat_co = ...,
559
+ size: None | _ShapeLike = ...,
560
+ ) -> ndarray[Any, dtype[float64]]: ...
561
+ @overload
562
+ def lognormal(
563
+ self,
564
+ mean: _FloatLike_co = ...,
565
+ sigma: _FloatLike_co = ...,
566
+ size: None = ...,
567
+ ) -> float: ... # type: ignore[misc]
568
+ @overload
569
+ def lognormal(
570
+ self,
571
+ mean: _ArrayLikeFloat_co = ...,
572
+ sigma: _ArrayLikeFloat_co = ...,
573
+ size: None | _ShapeLike = ...,
574
+ ) -> ndarray[Any, dtype[float64]]: ...
575
+ @overload
576
+ def rayleigh(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
577
+ @overload
578
+ def rayleigh(
579
+ self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
580
+ ) -> ndarray[Any, dtype[float64]]: ...
581
+ @overload
582
+ def wald(self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
583
+ @overload
584
+ def wald(
585
+ self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
586
+ ) -> ndarray[Any, dtype[float64]]: ...
587
+ @overload
588
+ def triangular(
589
+ self,
590
+ left: _FloatLike_co,
591
+ mode: _FloatLike_co,
592
+ right: _FloatLike_co,
593
+ size: None = ...,
594
+ ) -> float: ... # type: ignore[misc]
595
+ @overload
596
+ def triangular(
597
+ self,
598
+ left: _ArrayLikeFloat_co,
599
+ mode: _ArrayLikeFloat_co,
600
+ right: _ArrayLikeFloat_co,
601
+ size: None | _ShapeLike = ...,
602
+ ) -> ndarray[Any, dtype[float64]]: ...
603
+ @overload
604
+ def binomial(self, n: int, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
605
+ @overload
606
+ def binomial(
607
+ self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
608
+ ) -> ndarray[Any, dtype[int64]]: ...
609
+ @overload
610
+ def negative_binomial(self, n: _FloatLike_co, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
611
+ @overload
612
+ def negative_binomial(
613
+ self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
614
+ ) -> ndarray[Any, dtype[int64]]: ...
615
+ @overload
616
+ def poisson(self, lam: _FloatLike_co = ..., size: None = ...) -> int: ... # type: ignore[misc]
617
+ @overload
618
+ def poisson(
619
+ self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
620
+ ) -> ndarray[Any, dtype[int64]]: ...
621
+ @overload
622
+ def zipf(self, a: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
623
+ @overload
624
+ def zipf(
625
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
626
+ ) -> ndarray[Any, dtype[int64]]: ...
627
+ @overload
628
+ def geometric(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
629
+ @overload
630
+ def geometric(
631
+ self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
632
+ ) -> ndarray[Any, dtype[int64]]: ...
633
+ @overload
634
+ def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc]
635
+ @overload
636
+ def hypergeometric(
637
+ self,
638
+ ngood: _ArrayLikeInt_co,
639
+ nbad: _ArrayLikeInt_co,
640
+ nsample: _ArrayLikeInt_co,
641
+ size: None | _ShapeLike = ...,
642
+ ) -> ndarray[Any, dtype[int64]]: ...
643
+ @overload
644
+ def logseries(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
645
+ @overload
646
+ def logseries(
647
+ self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
648
+ ) -> ndarray[Any, dtype[int64]]: ...
649
+ def multivariate_normal(
650
+ self,
651
+ mean: _ArrayLikeFloat_co,
652
+ cov: _ArrayLikeFloat_co,
653
+ size: None | _ShapeLike = ...,
654
+ check_valid: Literal["warn", "raise", "ignore"] = ...,
655
+ tol: float = ...,
656
+ *,
657
+ method: Literal["svd", "eigh", "cholesky"] = ...,
658
+ ) -> ndarray[Any, dtype[float64]]: ...
659
+ def multinomial(
660
+ self, n: _ArrayLikeInt_co,
661
+ pvals: _ArrayLikeFloat_co,
662
+ size: None | _ShapeLike = ...
663
+ ) -> ndarray[Any, dtype[int64]]: ...
664
+ def multivariate_hypergeometric(
665
+ self,
666
+ colors: _ArrayLikeInt_co,
667
+ nsample: int,
668
+ size: None | _ShapeLike = ...,
669
+ method: Literal["marginals", "count"] = ...,
670
+ ) -> ndarray[Any, dtype[int64]]: ...
671
+ def dirichlet(
672
+ self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
673
+ ) -> ndarray[Any, dtype[float64]]: ...
674
+ def permuted(
675
+ self, x: ArrayLike, *, axis: None | int = ..., out: None | ndarray[Any, Any] = ...
676
+ ) -> ndarray[Any, Any]: ...
677
+ def shuffle(self, x: ArrayLike, axis: int = ...) -> None: ...
678
+
679
+ def default_rng(
680
+ seed: None | _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator = ...
681
+ ) -> Generator: ...
deepseek/lib/python3.10/site-packages/numpy/random/_mt19937.pyi ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, TypedDict
2
+
3
+ from numpy import dtype, ndarray, uint32
4
+ from numpy.random.bit_generator import BitGenerator, SeedSequence
5
+ from numpy._typing import _ArrayLikeInt_co
6
+
7
+ class _MT19937Internal(TypedDict):
8
+ key: ndarray[Any, dtype[uint32]]
9
+ pos: int
10
+
11
+ class _MT19937State(TypedDict):
12
+ bit_generator: str
13
+ state: _MT19937Internal
14
+
15
+ class MT19937(BitGenerator):
16
+ def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
17
+ def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ...
18
+ def jumped(self, jumps: int = ...) -> MT19937: ...
19
+ @property
20
+ def state(self) -> _MT19937State: ...
21
+ @state.setter
22
+ def state(self, value: _MT19937State) -> None: ...
deepseek/lib/python3.10/site-packages/numpy/random/_pcg64.pyi ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TypedDict
2
+
3
+ from numpy.random.bit_generator import BitGenerator, SeedSequence
4
+ from numpy._typing import _ArrayLikeInt_co
5
+
6
+ class _PCG64Internal(TypedDict):
7
+ state: int
8
+ inc: int
9
+
10
+ class _PCG64State(TypedDict):
11
+ bit_generator: str
12
+ state: _PCG64Internal
13
+ has_uint32: int
14
+ uinteger: int
15
+
16
+ class PCG64(BitGenerator):
17
+ def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
18
+ def jumped(self, jumps: int = ...) -> PCG64: ...
19
+ @property
20
+ def state(
21
+ self,
22
+ ) -> _PCG64State: ...
23
+ @state.setter
24
+ def state(
25
+ self,
26
+ value: _PCG64State,
27
+ ) -> None: ...
28
+ def advance(self, delta: int) -> PCG64: ...
29
+
30
+ class PCG64DXSM(BitGenerator):
31
+ def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
32
+ def jumped(self, jumps: int = ...) -> PCG64DXSM: ...
33
+ @property
34
+ def state(
35
+ self,
36
+ ) -> _PCG64State: ...
37
+ @state.setter
38
+ def state(
39
+ self,
40
+ value: _PCG64State,
41
+ ) -> None: ...
42
+ def advance(self, delta: int) -> PCG64DXSM: ...
deepseek/lib/python3.10/site-packages/numpy/random/_philox.pyi ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, TypedDict
2
+
3
+ from numpy import dtype, ndarray, uint64
4
+ from numpy.random.bit_generator import BitGenerator, SeedSequence
5
+ from numpy._typing import _ArrayLikeInt_co
6
+
7
+ class _PhiloxInternal(TypedDict):
8
+ counter: ndarray[Any, dtype[uint64]]
9
+ key: ndarray[Any, dtype[uint64]]
10
+
11
+ class _PhiloxState(TypedDict):
12
+ bit_generator: str
13
+ state: _PhiloxInternal
14
+ buffer: ndarray[Any, dtype[uint64]]
15
+ buffer_pos: int
16
+ has_uint32: int
17
+ uinteger: int
18
+
19
+ class Philox(BitGenerator):
20
+ def __init__(
21
+ self,
22
+ seed: None | _ArrayLikeInt_co | SeedSequence = ...,
23
+ counter: None | _ArrayLikeInt_co = ...,
24
+ key: None | _ArrayLikeInt_co = ...,
25
+ ) -> None: ...
26
+ @property
27
+ def state(
28
+ self,
29
+ ) -> _PhiloxState: ...
30
+ @state.setter
31
+ def state(
32
+ self,
33
+ value: _PhiloxState,
34
+ ) -> None: ...
35
+ def jumped(self, jumps: int = ...) -> Philox: ...
36
+ def advance(self, delta: int) -> Philox: ...
deepseek/lib/python3.10/site-packages/numpy/random/_pickle.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .mtrand import RandomState
2
+ from ._philox import Philox
3
+ from ._pcg64 import PCG64, PCG64DXSM
4
+ from ._sfc64 import SFC64
5
+
6
+ from ._generator import Generator
7
+ from ._mt19937 import MT19937
8
+
9
+ BitGenerators = {'MT19937': MT19937,
10
+ 'PCG64': PCG64,
11
+ 'PCG64DXSM': PCG64DXSM,
12
+ 'Philox': Philox,
13
+ 'SFC64': SFC64,
14
+ }
15
+
16
+
17
+ def __bit_generator_ctor(bit_generator_name='MT19937'):
18
+ """
19
+ Pickling helper function that returns a bit generator object
20
+
21
+ Parameters
22
+ ----------
23
+ bit_generator_name : str
24
+ String containing the name of the BitGenerator
25
+
26
+ Returns
27
+ -------
28
+ bit_generator : BitGenerator
29
+ BitGenerator instance
30
+ """
31
+ if bit_generator_name in BitGenerators:
32
+ bit_generator = BitGenerators[bit_generator_name]
33
+ else:
34
+ raise ValueError(str(bit_generator_name) + ' is not a known '
35
+ 'BitGenerator module.')
36
+
37
+ return bit_generator()
38
+
39
+
40
+ def __generator_ctor(bit_generator_name="MT19937",
41
+ bit_generator_ctor=__bit_generator_ctor):
42
+ """
43
+ Pickling helper function that returns a Generator object
44
+
45
+ Parameters
46
+ ----------
47
+ bit_generator_name : str
48
+ String containing the core BitGenerator's name
49
+ bit_generator_ctor : callable, optional
50
+ Callable function that takes bit_generator_name as its only argument
51
+ and returns an instantized bit generator.
52
+
53
+ Returns
54
+ -------
55
+ rg : Generator
56
+ Generator using the named core BitGenerator
57
+ """
58
+ return Generator(bit_generator_ctor(bit_generator_name))
59
+
60
+
61
+ def __randomstate_ctor(bit_generator_name="MT19937",
62
+ bit_generator_ctor=__bit_generator_ctor):
63
+ """
64
+ Pickling helper function that returns a legacy RandomState-like object
65
+
66
+ Parameters
67
+ ----------
68
+ bit_generator_name : str
69
+ String containing the core BitGenerator's name
70
+ bit_generator_ctor : callable, optional
71
+ Callable function that takes bit_generator_name as its only argument
72
+ and returns an instantized bit generator.
73
+
74
+ Returns
75
+ -------
76
+ rs : RandomState
77
+ Legacy RandomState using the named core BitGenerator
78
+ """
79
+
80
+ return RandomState(bit_generator_ctor(bit_generator_name))
deepseek/lib/python3.10/site-packages/numpy/random/_sfc64.pyi ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, TypedDict
2
+
3
+ from numpy import dtype as dtype
4
+ from numpy import ndarray as ndarray
5
+ from numpy import uint64
6
+ from numpy.random.bit_generator import BitGenerator, SeedSequence
7
+ from numpy._typing import _ArrayLikeInt_co
8
+
9
+ class _SFC64Internal(TypedDict):
10
+ state: ndarray[Any, dtype[uint64]]
11
+
12
+ class _SFC64State(TypedDict):
13
+ bit_generator: str
14
+ state: _SFC64Internal
15
+ has_uint32: int
16
+ uinteger: int
17
+
18
+ class SFC64(BitGenerator):
19
+ def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
20
+ @property
21
+ def state(
22
+ self,
23
+ ) -> _SFC64State: ...
24
+ @state.setter
25
+ def state(
26
+ self,
27
+ value: _SFC64State,
28
+ ) -> None: ...
deepseek/lib/python3.10/site-packages/numpy/random/bit_generator.pxd ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport numpy as np
2
+ from libc.stdint cimport uint32_t, uint64_t
3
+
4
+ cdef extern from "numpy/random/bitgen.h":
5
+ struct bitgen:
6
+ void *state
7
+ uint64_t (*next_uint64)(void *st) nogil
8
+ uint32_t (*next_uint32)(void *st) nogil
9
+ double (*next_double)(void *st) nogil
10
+ uint64_t (*next_raw)(void *st) nogil
11
+
12
+ ctypedef bitgen bitgen_t
13
+
14
+ cdef class BitGenerator():
15
+ cdef readonly object _seed_seq
16
+ cdef readonly object lock
17
+ cdef bitgen_t _bitgen
18
+ cdef readonly object _ctypes
19
+ cdef readonly object _cffi
20
+ cdef readonly object capsule
21
+
22
+
23
+ cdef class SeedSequence():
24
+ cdef readonly object entropy
25
+ cdef readonly tuple spawn_key
26
+ cdef readonly Py_ssize_t pool_size
27
+ cdef readonly object pool
28
+ cdef readonly uint32_t n_children_spawned
29
+
30
+ cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer,
31
+ np.ndarray[np.npy_uint32, ndim=1] entropy_array)
32
+ cdef get_assembled_entropy(self)
33
+
34
+ cdef class SeedlessSequence():
35
+ pass
deepseek/lib/python3.10/site-packages/numpy/random/mtrand.pyi ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import builtins
2
+ from collections.abc import Callable
3
+ from typing import Any, Union, overload, Literal
4
+
5
+ from numpy import (
6
+ bool_,
7
+ dtype,
8
+ float32,
9
+ float64,
10
+ int8,
11
+ int16,
12
+ int32,
13
+ int64,
14
+ int_,
15
+ ndarray,
16
+ uint,
17
+ uint8,
18
+ uint16,
19
+ uint32,
20
+ uint64,
21
+ )
22
+ from numpy.random.bit_generator import BitGenerator
23
+ from numpy._typing import (
24
+ ArrayLike,
25
+ _ArrayLikeFloat_co,
26
+ _ArrayLikeInt_co,
27
+ _DoubleCodes,
28
+ _DTypeLikeBool,
29
+ _DTypeLikeInt,
30
+ _DTypeLikeUInt,
31
+ _Float32Codes,
32
+ _Float64Codes,
33
+ _Int8Codes,
34
+ _Int16Codes,
35
+ _Int32Codes,
36
+ _Int64Codes,
37
+ _IntCodes,
38
+ _ShapeLike,
39
+ _SingleCodes,
40
+ _SupportsDType,
41
+ _UInt8Codes,
42
+ _UInt16Codes,
43
+ _UInt32Codes,
44
+ _UInt64Codes,
45
+ _UIntCodes,
46
+ )
47
+
48
+ _DTypeLikeFloat32 = Union[
49
+ dtype[float32],
50
+ _SupportsDType[dtype[float32]],
51
+ type[float32],
52
+ _Float32Codes,
53
+ _SingleCodes,
54
+ ]
55
+
56
+ _DTypeLikeFloat64 = Union[
57
+ dtype[float64],
58
+ _SupportsDType[dtype[float64]],
59
+ type[float],
60
+ type[float64],
61
+ _Float64Codes,
62
+ _DoubleCodes,
63
+ ]
64
+
65
+ class RandomState:
66
+ _bit_generator: BitGenerator
67
+ def __init__(self, seed: None | _ArrayLikeInt_co | BitGenerator = ...) -> None: ...
68
+ def __repr__(self) -> str: ...
69
+ def __str__(self) -> str: ...
70
+ def __getstate__(self) -> dict[str, Any]: ...
71
+ def __setstate__(self, state: dict[str, Any]) -> None: ...
72
+ def __reduce__(self) -> tuple[Callable[[str], RandomState], tuple[str], dict[str, Any]]: ...
73
+ def seed(self, seed: None | _ArrayLikeFloat_co = ...) -> None: ...
74
+ @overload
75
+ def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ...
76
+ @overload
77
+ def get_state(
78
+ self, legacy: Literal[True] = ...
79
+ ) -> dict[str, Any] | tuple[str, ndarray[Any, dtype[uint32]], int, int, float]: ...
80
+ def set_state(
81
+ self, state: dict[str, Any] | tuple[str, ndarray[Any, dtype[uint32]], int, int, float]
82
+ ) -> None: ...
83
+ @overload
84
+ def random_sample(self, size: None = ...) -> float: ... # type: ignore[misc]
85
+ @overload
86
+ def random_sample(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
87
+ @overload
88
+ def random(self, size: None = ...) -> float: ... # type: ignore[misc]
89
+ @overload
90
+ def random(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
91
+ @overload
92
+ def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc]
93
+ @overload
94
+ def beta(
95
+ self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
96
+ ) -> ndarray[Any, dtype[float64]]: ...
97
+ @overload
98
+ def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
99
+ @overload
100
+ def exponential(
101
+ self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
102
+ ) -> ndarray[Any, dtype[float64]]: ...
103
+ @overload
104
+ def standard_exponential(self, size: None = ...) -> float: ... # type: ignore[misc]
105
+ @overload
106
+ def standard_exponential(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
107
+ @overload
108
+ def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc]
109
+ @overload
110
+ def tomaxint(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[int_]]: ...
111
+ @overload
112
+ def randint( # type: ignore[misc]
113
+ self,
114
+ low: int,
115
+ high: None | int = ...,
116
+ ) -> int: ...
117
+ @overload
118
+ def randint( # type: ignore[misc]
119
+ self,
120
+ low: int,
121
+ high: None | int = ...,
122
+ size: None = ...,
123
+ dtype: _DTypeLikeBool = ...,
124
+ ) -> bool: ...
125
+ @overload
126
+ def randint( # type: ignore[misc]
127
+ self,
128
+ low: int,
129
+ high: None | int = ...,
130
+ size: None = ...,
131
+ dtype: _DTypeLikeInt | _DTypeLikeUInt = ...,
132
+ ) -> int: ...
133
+ @overload
134
+ def randint( # type: ignore[misc]
135
+ self,
136
+ low: _ArrayLikeInt_co,
137
+ high: None | _ArrayLikeInt_co = ...,
138
+ size: None | _ShapeLike = ...,
139
+ ) -> ndarray[Any, dtype[int_]]: ...
140
+ @overload
141
+ def randint( # type: ignore[misc]
142
+ self,
143
+ low: _ArrayLikeInt_co,
144
+ high: None | _ArrayLikeInt_co = ...,
145
+ size: None | _ShapeLike = ...,
146
+ dtype: _DTypeLikeBool = ...,
147
+ ) -> ndarray[Any, dtype[bool_]]: ...
148
+ @overload
149
+ def randint( # type: ignore[misc]
150
+ self,
151
+ low: _ArrayLikeInt_co,
152
+ high: None | _ArrayLikeInt_co = ...,
153
+ size: None | _ShapeLike = ...,
154
+ dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ...,
155
+ ) -> ndarray[Any, dtype[int8]]: ...
156
+ @overload
157
+ def randint( # type: ignore[misc]
158
+ self,
159
+ low: _ArrayLikeInt_co,
160
+ high: None | _ArrayLikeInt_co = ...,
161
+ size: None | _ShapeLike = ...,
162
+ dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ...,
163
+ ) -> ndarray[Any, dtype[int16]]: ...
164
+ @overload
165
+ def randint( # type: ignore[misc]
166
+ self,
167
+ low: _ArrayLikeInt_co,
168
+ high: None | _ArrayLikeInt_co = ...,
169
+ size: None | _ShapeLike = ...,
170
+ dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ...,
171
+ ) -> ndarray[Any, dtype[int32]]: ...
172
+ @overload
173
+ def randint( # type: ignore[misc]
174
+ self,
175
+ low: _ArrayLikeInt_co,
176
+ high: None | _ArrayLikeInt_co = ...,
177
+ size: None | _ShapeLike = ...,
178
+ dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ...,
179
+ ) -> ndarray[Any, dtype[int64]]: ...
180
+ @overload
181
+ def randint( # type: ignore[misc]
182
+ self,
183
+ low: _ArrayLikeInt_co,
184
+ high: None | _ArrayLikeInt_co = ...,
185
+ size: None | _ShapeLike = ...,
186
+ dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ...,
187
+ ) -> ndarray[Any, dtype[uint8]]: ...
188
+ @overload
189
+ def randint( # type: ignore[misc]
190
+ self,
191
+ low: _ArrayLikeInt_co,
192
+ high: None | _ArrayLikeInt_co = ...,
193
+ size: None | _ShapeLike = ...,
194
+ dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ...,
195
+ ) -> ndarray[Any, dtype[uint16]]: ...
196
+ @overload
197
+ def randint( # type: ignore[misc]
198
+ self,
199
+ low: _ArrayLikeInt_co,
200
+ high: None | _ArrayLikeInt_co = ...,
201
+ size: None | _ShapeLike = ...,
202
+ dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ...,
203
+ ) -> ndarray[Any, dtype[uint32]]: ...
204
+ @overload
205
+ def randint( # type: ignore[misc]
206
+ self,
207
+ low: _ArrayLikeInt_co,
208
+ high: None | _ArrayLikeInt_co = ...,
209
+ size: None | _ShapeLike = ...,
210
+ dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ...,
211
+ ) -> ndarray[Any, dtype[uint64]]: ...
212
+ @overload
213
+ def randint( # type: ignore[misc]
214
+ self,
215
+ low: _ArrayLikeInt_co,
216
+ high: None | _ArrayLikeInt_co = ...,
217
+ size: None | _ShapeLike = ...,
218
+ dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ...,
219
+ ) -> ndarray[Any, dtype[int_]]: ...
220
+ @overload
221
+ def randint( # type: ignore[misc]
222
+ self,
223
+ low: _ArrayLikeInt_co,
224
+ high: None | _ArrayLikeInt_co = ...,
225
+ size: None | _ShapeLike = ...,
226
+ dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ...,
227
+ ) -> ndarray[Any, dtype[uint]]: ...
228
+ def bytes(self, length: int) -> builtins.bytes: ...
229
+ @overload
230
+ def choice(
231
+ self,
232
+ a: int,
233
+ size: None = ...,
234
+ replace: bool = ...,
235
+ p: None | _ArrayLikeFloat_co = ...,
236
+ ) -> int: ...
237
+ @overload
238
+ def choice(
239
+ self,
240
+ a: int,
241
+ size: _ShapeLike = ...,
242
+ replace: bool = ...,
243
+ p: None | _ArrayLikeFloat_co = ...,
244
+ ) -> ndarray[Any, dtype[int_]]: ...
245
+ @overload
246
+ def choice(
247
+ self,
248
+ a: ArrayLike,
249
+ size: None = ...,
250
+ replace: bool = ...,
251
+ p: None | _ArrayLikeFloat_co = ...,
252
+ ) -> Any: ...
253
+ @overload
254
+ def choice(
255
+ self,
256
+ a: ArrayLike,
257
+ size: _ShapeLike = ...,
258
+ replace: bool = ...,
259
+ p: None | _ArrayLikeFloat_co = ...,
260
+ ) -> ndarray[Any, Any]: ...
261
+ @overload
262
+ def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
263
+ @overload
264
+ def uniform(
265
+ self,
266
+ low: _ArrayLikeFloat_co = ...,
267
+ high: _ArrayLikeFloat_co = ...,
268
+ size: None | _ShapeLike = ...,
269
+ ) -> ndarray[Any, dtype[float64]]: ...
270
+ @overload
271
+ def rand(self) -> float: ...
272
+ @overload
273
+ def rand(self, *args: int) -> ndarray[Any, dtype[float64]]: ...
274
+ @overload
275
+ def randn(self) -> float: ...
276
+ @overload
277
+ def randn(self, *args: int) -> ndarray[Any, dtype[float64]]: ...
278
+ @overload
279
+ def random_integers(self, low: int, high: None | int = ..., size: None = ...) -> int: ... # type: ignore[misc]
280
+ @overload
281
+ def random_integers(
282
+ self,
283
+ low: _ArrayLikeInt_co,
284
+ high: None | _ArrayLikeInt_co = ...,
285
+ size: None | _ShapeLike = ...,
286
+ ) -> ndarray[Any, dtype[int_]]: ...
287
+ @overload
288
+ def standard_normal(self, size: None = ...) -> float: ... # type: ignore[misc]
289
+ @overload
290
+ def standard_normal( # type: ignore[misc]
291
+ self, size: _ShapeLike = ...
292
+ ) -> ndarray[Any, dtype[float64]]: ...
293
+ @overload
294
+ def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
295
+ @overload
296
+ def normal(
297
+ self,
298
+ loc: _ArrayLikeFloat_co = ...,
299
+ scale: _ArrayLikeFloat_co = ...,
300
+ size: None | _ShapeLike = ...,
301
+ ) -> ndarray[Any, dtype[float64]]: ...
302
+ @overload
303
+ def standard_gamma( # type: ignore[misc]
304
+ self,
305
+ shape: float,
306
+ size: None = ...,
307
+ ) -> float: ...
308
+ @overload
309
+ def standard_gamma(
310
+ self,
311
+ shape: _ArrayLikeFloat_co,
312
+ size: None | _ShapeLike = ...,
313
+ ) -> ndarray[Any, dtype[float64]]: ...
314
+ @overload
315
+ def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
316
+ @overload
317
+ def gamma(
318
+ self,
319
+ shape: _ArrayLikeFloat_co,
320
+ scale: _ArrayLikeFloat_co = ...,
321
+ size: None | _ShapeLike = ...,
322
+ ) -> ndarray[Any, dtype[float64]]: ...
323
+ @overload
324
+ def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc]
325
+ @overload
326
+ def f(
327
+ self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
328
+ ) -> ndarray[Any, dtype[float64]]: ...
329
+ @overload
330
+ def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
331
+ @overload
332
+ def noncentral_f(
333
+ self,
334
+ dfnum: _ArrayLikeFloat_co,
335
+ dfden: _ArrayLikeFloat_co,
336
+ nonc: _ArrayLikeFloat_co,
337
+ size: None | _ShapeLike = ...,
338
+ ) -> ndarray[Any, dtype[float64]]: ...
339
+ @overload
340
+ def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
341
+ @overload
342
+ def chisquare(
343
+ self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
344
+ ) -> ndarray[Any, dtype[float64]]: ...
345
+ @overload
346
+ def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
347
+ @overload
348
+ def noncentral_chisquare(
349
+ self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
350
+ ) -> ndarray[Any, dtype[float64]]: ...
351
+ @overload
352
+ def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
353
+ @overload
354
+ def standard_t(
355
+ self, df: _ArrayLikeFloat_co, size: None = ...
356
+ ) -> ndarray[Any, dtype[float64]]: ...
357
+ @overload
358
+ def standard_t(
359
+ self, df: _ArrayLikeFloat_co, size: _ShapeLike = ...
360
+ ) -> ndarray[Any, dtype[float64]]: ...
361
+ @overload
362
+ def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc]
363
+ @overload
364
+ def vonmises(
365
+ self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
366
+ ) -> ndarray[Any, dtype[float64]]: ...
367
+ @overload
368
+ def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
369
+ @overload
370
+ def pareto(
371
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
372
+ ) -> ndarray[Any, dtype[float64]]: ...
373
+ @overload
374
+ def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
375
+ @overload
376
+ def weibull(
377
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
378
+ ) -> ndarray[Any, dtype[float64]]: ...
379
+ @overload
380
+ def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
381
+ @overload
382
+ def power(
383
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
384
+ ) -> ndarray[Any, dtype[float64]]: ...
385
+ @overload
386
+ def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc]
387
+ @overload
388
+ def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
389
+ @overload
390
+ def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
391
+ @overload
392
+ def laplace(
393
+ self,
394
+ loc: _ArrayLikeFloat_co = ...,
395
+ scale: _ArrayLikeFloat_co = ...,
396
+ size: None | _ShapeLike = ...,
397
+ ) -> ndarray[Any, dtype[float64]]: ...
398
+ @overload
399
+ def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
400
+ @overload
401
+ def gumbel(
402
+ self,
403
+ loc: _ArrayLikeFloat_co = ...,
404
+ scale: _ArrayLikeFloat_co = ...,
405
+ size: None | _ShapeLike = ...,
406
+ ) -> ndarray[Any, dtype[float64]]: ...
407
+ @overload
408
+ def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
409
+ @overload
410
+ def logistic(
411
+ self,
412
+ loc: _ArrayLikeFloat_co = ...,
413
+ scale: _ArrayLikeFloat_co = ...,
414
+ size: None | _ShapeLike = ...,
415
+ ) -> ndarray[Any, dtype[float64]]: ...
416
+ @overload
417
+ def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
418
+ @overload
419
+ def lognormal(
420
+ self,
421
+ mean: _ArrayLikeFloat_co = ...,
422
+ sigma: _ArrayLikeFloat_co = ...,
423
+ size: None | _ShapeLike = ...,
424
+ ) -> ndarray[Any, dtype[float64]]: ...
425
+ @overload
426
+ def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
427
+ @overload
428
+ def rayleigh(
429
+ self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
430
+ ) -> ndarray[Any, dtype[float64]]: ...
431
+ @overload
432
+ def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc]
433
+ @overload
434
+ def wald(
435
+ self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
436
+ ) -> ndarray[Any, dtype[float64]]: ...
437
+ @overload
438
+ def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc]
439
+ @overload
440
+ def triangular(
441
+ self,
442
+ left: _ArrayLikeFloat_co,
443
+ mode: _ArrayLikeFloat_co,
444
+ right: _ArrayLikeFloat_co,
445
+ size: None | _ShapeLike = ...,
446
+ ) -> ndarray[Any, dtype[float64]]: ...
447
+ @overload
448
+ def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc]
449
+ @overload
450
+ def binomial(
451
+ self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
452
+ ) -> ndarray[Any, dtype[int_]]: ...
453
+ @overload
454
+ def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc]
455
+ @overload
456
+ def negative_binomial(
457
+ self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
458
+ ) -> ndarray[Any, dtype[int_]]: ...
459
+ @overload
460
+ def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc]
461
+ @overload
462
+ def poisson(
463
+ self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
464
+ ) -> ndarray[Any, dtype[int_]]: ...
465
+ @overload
466
+ def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc]
467
+ @overload
468
+ def zipf(
469
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
470
+ ) -> ndarray[Any, dtype[int_]]: ...
471
+ @overload
472
+ def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
473
+ @overload
474
+ def geometric(
475
+ self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
476
+ ) -> ndarray[Any, dtype[int_]]: ...
477
+ @overload
478
+ def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc]
479
+ @overload
480
+ def hypergeometric(
481
+ self,
482
+ ngood: _ArrayLikeInt_co,
483
+ nbad: _ArrayLikeInt_co,
484
+ nsample: _ArrayLikeInt_co,
485
+ size: None | _ShapeLike = ...,
486
+ ) -> ndarray[Any, dtype[int_]]: ...
487
+ @overload
488
+ def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
489
+ @overload
490
+ def logseries(
491
+ self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
492
+ ) -> ndarray[Any, dtype[int_]]: ...
493
+ def multivariate_normal(
494
+ self,
495
+ mean: _ArrayLikeFloat_co,
496
+ cov: _ArrayLikeFloat_co,
497
+ size: None | _ShapeLike = ...,
498
+ check_valid: Literal["warn", "raise", "ignore"] = ...,
499
+ tol: float = ...,
500
+ ) -> ndarray[Any, dtype[float64]]: ...
501
+ def multinomial(
502
+ self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
503
+ ) -> ndarray[Any, dtype[int_]]: ...
504
+ def dirichlet(
505
+ self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
506
+ ) -> ndarray[Any, dtype[float64]]: ...
507
+ def shuffle(self, x: ArrayLike) -> None: ...
508
+ @overload
509
+ def permutation(self, x: int) -> ndarray[Any, dtype[int_]]: ...
510
+ @overload
511
+ def permutation(self, x: ArrayLike) -> ndarray[Any, Any]: ...
512
+
513
+ _rand: RandomState
514
+
515
+ beta = _rand.beta
516
+ binomial = _rand.binomial
517
+ bytes = _rand.bytes
518
+ chisquare = _rand.chisquare
519
+ choice = _rand.choice
520
+ dirichlet = _rand.dirichlet
521
+ exponential = _rand.exponential
522
+ f = _rand.f
523
+ gamma = _rand.gamma
524
+ get_state = _rand.get_state
525
+ geometric = _rand.geometric
526
+ gumbel = _rand.gumbel
527
+ hypergeometric = _rand.hypergeometric
528
+ laplace = _rand.laplace
529
+ logistic = _rand.logistic
530
+ lognormal = _rand.lognormal
531
+ logseries = _rand.logseries
532
+ multinomial = _rand.multinomial
533
+ multivariate_normal = _rand.multivariate_normal
534
+ negative_binomial = _rand.negative_binomial
535
+ noncentral_chisquare = _rand.noncentral_chisquare
536
+ noncentral_f = _rand.noncentral_f
537
+ normal = _rand.normal
538
+ pareto = _rand.pareto
539
+ permutation = _rand.permutation
540
+ poisson = _rand.poisson
541
+ power = _rand.power
542
+ rand = _rand.rand
543
+ randint = _rand.randint
544
+ randn = _rand.randn
545
+ random = _rand.random
546
+ random_integers = _rand.random_integers
547
+ random_sample = _rand.random_sample
548
+ rayleigh = _rand.rayleigh
549
+ seed = _rand.seed
550
+ set_state = _rand.set_state
551
+ shuffle = _rand.shuffle
552
+ standard_cauchy = _rand.standard_cauchy
553
+ standard_exponential = _rand.standard_exponential
554
+ standard_gamma = _rand.standard_gamma
555
+ standard_normal = _rand.standard_normal
556
+ standard_t = _rand.standard_t
557
+ triangular = _rand.triangular
558
+ uniform = _rand.uniform
559
+ vonmises = _rand.vonmises
560
+ wald = _rand.wald
561
+ weibull = _rand.weibull
562
+ zipf = _rand.zipf
563
+ # Two legacy that are trivial wrappers around random_sample
564
+ sample = _rand.random_sample
565
+ ranf = _rand.random_sample
566
+
567
+ def set_bit_generator(bitgen: BitGenerator) -> None:
568
+ ...
569
+
570
+ def get_bit_generator() -> BitGenerator:
571
+ ...
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.77 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_equalize.cpython-310.pyc ADDED
Binary file (5.63 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_learnable_fake_quantize.cpython-310.pyc ADDED
Binary file (6.53 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/qconfig.cpython-310.pyc ADDED
Binary file (13.8 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize_jit.cpython-310.pyc ADDED
Binary file (13.1 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/utils.cpython-310.pyc ADDED
Binary file (21.4 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/__init__.py ADDED
File without changes
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.05 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/utils.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.fx import GraphModule
3
+ from torch.nn.utils.fusion import fuse_conv_bn_weights
4
+ # TODO[jerryzh168]: move this to a more general util function
5
+ from torch.ao.quantization.fx.prepare import (
6
+ _is_activation_post_process_node,
7
+ )
8
+ from collections import OrderedDict
9
+ import operator
10
+
11
+ # TODO[qihan]: longer term, this should happen in the dynamo stack as well
12
+ def _get_renamed_nn_module_stack(nn_module_stack):
13
+ # initialize with top level parent scope
14
+ nn_module_stack_renamed = OrderedDict([("", None)])
15
+ if nn_module_stack:
16
+ # Rename module_key, e.g. "self_layer1_1__conv1" to "self.layer1.1._conv1", for easier downstream parsing
17
+ prev_key = ""
18
+ for key, value in nn_module_stack.items():
19
+ if not prev_key:
20
+ if key.startswith("self_"):
21
+ new_key = key[5:]
22
+ prev_key = new_key
23
+ else:
24
+ new_key = prev_key + "." + key[len(prev_key) + 6 :]
25
+ nn_module_stack_renamed[new_key] = value
26
+ prev_key = new_key
27
+ return nn_module_stack_renamed
28
+
29
+ def _get_tensor_constant_from_node(node, m):
30
+ if node is None:
31
+ return None
32
+ assert node.op == "get_attr"
33
+ return getattr(m, node.target)
34
+
35
+ # fuse conv bn weights, inplace modification of the graph_module and graph
36
+ def _fuse_conv_bn_(m: GraphModule) -> None:
37
+ for n in m.graph.nodes:
38
+ if n.op != "call_function" or n.target != torch.ops.aten.native_batch_norm.default:
39
+ continue
40
+ bn_op = n
41
+ n = bn_op.args[0]
42
+ if n.op != "call_function" or n.target != torch.ops.aten.convolution.default:
43
+ continue
44
+ conv_op = n
45
+
46
+ # conv weight
47
+ conv_w = _get_tensor_constant_from_node(conv_op.args[1], m)
48
+ # conv bias
49
+ conv_b = _get_tensor_constant_from_node(conv_op.args[2], m)
50
+ transpose = conv_op.args[6]
51
+
52
+ # bn weight
53
+ bn_w = _get_tensor_constant_from_node(bn_op.args[1], m)
54
+ # bn bias
55
+ bn_b = _get_tensor_constant_from_node(bn_op.args[2], m)
56
+ # bn running mean
57
+ bn_rm = _get_tensor_constant_from_node(bn_op.args[3], m)
58
+ # bn running variance
59
+ bn_rv = _get_tensor_constant_from_node(bn_op.args[4], m)
60
+ bn_eps = bn_op.args[7]
61
+
62
+ fused_weight, fused_bias = fuse_conv_bn_weights(conv_w, conv_b, bn_rm, bn_rv, bn_eps, bn_w, bn_b, transpose=False)
63
+
64
+ # update the weight and bias for conv
65
+ conv_args = list(conv_op.args)
66
+ # calling data since the fused_weight and fused_bias are nn.Parameter
67
+ weight_attr_name = conv_args[1].target
68
+ setattr(m, weight_attr_name, fused_weight)
69
+ if conv_args[2] is not None:
70
+ bias_attr_name = conv_args[2].target
71
+ else:
72
+ bias_attr_name = weight_attr_name + "_bias"
73
+ with m.graph.inserting_before(conv_op):
74
+ get_bias_node = m.graph.get_attr(bias_attr_name)
75
+ conv_args[2] = get_bias_node
76
+ setattr(m, bias_attr_name, fused_bias)
77
+ conv_op.args = tuple(conv_args)
78
+
79
+ # native_batch_norm has 3 outputs, we expect getitem calls on the output
80
+ # and we want to replace the uses of getitem 0 with the output of conv
81
+ #
82
+ # Before:
83
+ # conv -> bn - (first output) -> users1
84
+ # \ - (second output) -> users2
85
+ # \ - (third output) -> users3
86
+ # After:
87
+ # conv -> (first output) -> users1
88
+ # bn -
89
+ # \ - (second output) -> users2
90
+ # \ - (third output) -> users3
91
+ # if users2 and users3 are empty then bn will be removed through dead code elimination
92
+
93
+ for user in bn_op.users:
94
+ if user.op != "call_function" or user.target != operator.getitem or user.args[1] != 0:
95
+ continue
96
+ user.replace_all_uses_with(conv_op)
97
+ m.graph.eliminate_dead_code()
98
+ m.recompile()
99
+
100
+ def _rearrange_weight_observer_for_addmm(
101
+ model: GraphModule,
102
+ ) -> None:
103
+ """
104
+ before:
105
+ weight - t - observer \
106
+ input - observer - addmm
107
+ after:
108
+ weight - observer - t \
109
+ input - observer - addmm
110
+ """
111
+ named_modules = dict(model.named_modules(remove_duplicate=False))
112
+ for node in model.graph.nodes:
113
+ if node.target != torch.ops.aten.addmm.default:
114
+ continue
115
+ addmm = node
116
+ maybe_weight_obs = addmm.args[2]
117
+ if not _is_activation_post_process_node(maybe_weight_obs, named_modules):
118
+ continue
119
+ transpose_node = maybe_weight_obs.args[0]
120
+ if transpose_node.target != torch.ops.aten.t.default:
121
+ continue
122
+ # swap the order of transpose and observation
123
+
124
+ maybe_weight_obs.replace_input_with(transpose_node, transpose_node.args[0])
125
+ # remove the transpose node
126
+ with model.graph.inserting_after(maybe_weight_obs):
127
+ args = list(transpose_node.args)
128
+ args[0] = maybe_weight_obs
129
+ new_transpose_node = model.graph.create_node(
130
+ "call_function",
131
+ torch.ops.aten.t.default,
132
+ tuple(args),
133
+ transpose_node.kwargs
134
+ )
135
+ addmm.replace_input_with(maybe_weight_obs, new_transpose_node)
136
+
137
+ model.graph.eliminate_dead_code()
138
+ model.graph.lint()
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .prepare import prepare
2
+ from .convert import convert
3
+ from .fuse import fuse
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (268 Bytes). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_decomposed.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_equalize.cpython-310.pyc ADDED
Binary file (25.2 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_lower_to_native_backend.cpython-310.pyc ADDED
Binary file (26 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/convert.cpython-310.pyc ADDED
Binary file (24.1 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/custom_config.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/fuse.cpython-310.pyc ADDED
Binary file (4.03 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/fuse_handler.cpython-310.pyc ADDED
Binary file (4.27 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/graph_module.cpython-310.pyc ADDED
Binary file (5.39 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/lower_to_fbgemm.cpython-310.pyc ADDED
Binary file (745 Bytes). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/lower_to_qnnpack.cpython-310.pyc ADDED
Binary file (750 Bytes). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/match_utils.cpython-310.pyc ADDED
Binary file (4.98 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/pattern_utils.cpython-310.pyc ADDED
Binary file (3 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/prepare.cpython-310.pyc ADDED
Binary file (33 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/qconfig_mapping_utils.cpython-310.pyc ADDED
Binary file (8.7 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/quantize_handler.cpython-310.pyc ADDED
Binary file (7.02 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/tracer.cpython-310.pyc ADDED
Binary file (1.81 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/utils.cpython-310.pyc ADDED
Binary file (27 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_decomposed.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.library import Library, impl
3
+ from torch.ao.quantization.utils import determine_qparams, validate_qmin_qmax
4
+ from typing import Tuple
5
+
6
+
7
+ # Note: decomposed means decomposed quantized tensor, using decomposed so that the
8
+ # name is not too long
9
+ quantized_decomposed_lib = Library("quantized_decomposed", "DEF")
10
+
11
+ _DTYPE_TO_QVALUE_BOUNDS = {
12
+ torch.uint8: (0, 255),
13
+ torch.int8: (-128, 127),
14
+ torch.int32: (-(2**31), 2**31 - 1)
15
+ }
16
+
17
+ # Helper to check the passed in quant min and max are valid for the dtype
18
+ def _quant_min_max_bounds_check(quant_min, quant_max, dtype):
19
+ if dtype not in _DTYPE_TO_QVALUE_BOUNDS:
20
+ raise ValueError(f"Unsupported dtype: {dtype}")
21
+ quant_min_lower_bound, quant_max_upper_bound = _DTYPE_TO_QVALUE_BOUNDS[dtype]
22
+
23
+ assert quant_min >= quant_min_lower_bound, \
24
+ "quant_min out of bound for dtype, " \
25
+ f"quant_min_lower_bound: {quant_min_lower_bound} quant_min: {quant_min}"
26
+
27
+ assert quant_max <= quant_max_upper_bound, \
28
+ "quant_max out of bound for dtype, " \
29
+ f"quant_max_upper_bound: {quant_max_upper_bound} quant_max: {quant_max}"
30
+
31
+ quantized_decomposed_lib.define(
32
+ "quantize_per_tensor(Tensor input, float scale, int zero_point, "
33
+ "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
34
+
35
+ @impl(quantized_decomposed_lib, "quantize_per_tensor", "CompositeExplicitAutograd")
36
+ def quantize_per_tensor(
37
+ input: torch.Tensor,
38
+ scale: float,
39
+ zero_point: int,
40
+ quant_min: int,
41
+ quant_max: int,
42
+ dtype: torch.dtype
43
+ ) -> torch.Tensor:
44
+ """ Affine quantization for the Tensor using the same quantization parameters to map
45
+ from floating point to quantized values
46
+
47
+ Args:
48
+ input (torch.Tensor): original float32 Tensor
49
+ scale (float): quantization parameter for affine quantization
50
+ zero_point (int): quantization parameter for affine quantization
51
+ quant_min (int): minimum quantized value for output Tensor
52
+ quant_max (int): maximum quantized value for output Tensor
53
+ dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor
54
+
55
+ Returns:
56
+ Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters
57
+ are not stored in the Tensor, we are storing them in function arguments instead
58
+ """
59
+ assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
60
+ _quant_min_max_bounds_check(quant_min, quant_max, dtype)
61
+
62
+ inv_scale = 1.0 / scale
63
+ return torch.clamp(torch.round(input * inv_scale) + zero_point, quant_min, quant_max).to(dtype)
64
+
65
+ quantized_decomposed_lib.define(
66
+ "quantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, "
67
+ "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
68
+
69
+ @impl(quantized_decomposed_lib, "quantize_per_tensor.tensor", "CompositeExplicitAutograd")
70
+ def quantize_per_tensor_tensor(
71
+ input: torch.Tensor,
72
+ scale: torch.Tensor,
73
+ zero_point: torch.Tensor,
74
+ quant_min: int,
75
+ quant_max: int,
76
+ dtype: torch.dtype
77
+ ) -> torch.Tensor:
78
+ """ Affine quantization for the Tensor using the same quantization parameters to map
79
+ from floating point to quantized values
80
+ Same as `quantize_per_tensor` but scale and zero_point are Scalar Tensor instead of
81
+ scalar values
82
+ """
83
+ assert zero_point.numel() == 1, f"Exepecting zero_point tensor to be one element, but received : {zero_point.numel()}"
84
+ assert scale.numel() == 1, f"Exepecting scale tensor to be one element, but received : {scale.numel()}"
85
+ return quantize_per_tensor(input, scale.item(), zero_point.item(), quant_min, quant_max, dtype)
86
+
87
+ @impl(quantized_decomposed_lib, "quantize_per_tensor.tensor", "Meta")
88
+ def quantize_per_tensor_tensor_meta(input, scale, zero_point, quant_min, quant_max, dtype):
89
+ assert zero_point.numel() == 1, f"Exepecting zero_point tensor to be one element, but received : {zero_point.numel()}"
90
+ assert scale.numel() == 1, f"Exepecting scale tensor to be one element, but received : {scale.numel()}"
91
+ assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
92
+ _quant_min_max_bounds_check(quant_min, quant_max, dtype)
93
+ return torch.empty_like(input, dtype=dtype)
94
+
95
+ # Note: quant_min/quant_max/dtype are not used in the operator, but for now it's kept in
96
+ # the signature as metadata for the input Tensor, this might be useful for pattern
97
+ # matching in the future
98
+ # We will revisit this later if we found there are no use cases for it
99
+ quantized_decomposed_lib.define(
100
+ "dequantize_per_tensor(Tensor input, float scale, int zero_point, "
101
+ "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
102
+
103
+ @impl(quantized_decomposed_lib, "dequantize_per_tensor", "CompositeExplicitAutograd")
104
+ def dequantize_per_tensor(
105
+ input: torch.Tensor,
106
+ scale: float,
107
+ zero_point: int,
108
+ quant_min: int,
109
+ quant_max: int,
110
+ dtype: torch.dtype
111
+ ) -> torch.Tensor:
112
+ """ Affine dequantization for the Tensor using the same quantization parameters to map
113
+ from quantized values to floating point values
114
+
115
+ Args:
116
+ input (torch.Tensor): Tensor with dtype matching `dtype` argument,
117
+ e.g. (`torch.uint8`), it is a per tensor quantized Tensor if combined with
118
+ quantization parameters in the argument of this function (scale/zero_point)
119
+
120
+ scale (float): quantization parameter for affine quantization
121
+
122
+ zero_point (int): quantization parameter for affine quantization
123
+
124
+ quant_min (int): minimum quantized value for input Tensor (not used in computation,
125
+ reserved for pattern matching)
126
+
127
+ quant_max (int): maximum quantized value for input Tensor (not used in computation,
128
+ reserved for pattern matching)
129
+
130
+ dtype (torch.dtype): dtype for input Tensor (not used in computation,
131
+ reserved for pattern matching)
132
+
133
+ Returns:
134
+ dequantized float32 Tensor
135
+ """
136
+ assert input.dtype == dtype, f"Expecting input to have dtype: {dtype}"
137
+ if dtype in [torch.uint8, torch.int8, torch.int32]:
138
+ # TODO: investigate why
139
+ # (input - zero_point).to(torch.float32) * scale
140
+ # failed the test
141
+ return (input.to(torch.float32) - zero_point) * scale
142
+ else:
143
+ raise ValueError(f"Unsupported dtype in dequantize_per_tensor: {dtype}")
144
+
145
+
146
+ quantized_decomposed_lib.define(
147
+ "dequantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, "
148
+ "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
149
+
150
+ @impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor", "CompositeExplicitAutograd")
151
+ def dequantize_per_tensor_tensor(
152
+ input: torch.Tensor,
153
+ scale: torch.Tensor,
154
+ zero_point: torch.Tensor,
155
+ quant_min: int,
156
+ quant_max: int,
157
+ dtype: torch.dtype
158
+ ) -> torch.Tensor:
159
+ """ Affine dequantization for the Tensor using the same quantization parameters to map
160
+ from quantized values to floating point values
161
+ Same as `dequantize_per_tensor` but scale and zero_point are Scalar Tensor instead of
162
+ scalar values
163
+ """
164
+ assert zero_point.numel() == 1, f"Exepecting zero_point tensor to be one element, but received : {zero_point.numel()}"
165
+ assert scale.numel() == 1, f"Exepecting scale tensor to be one element, but received : {scale.numel()}"
166
+ return dequantize_per_tensor(input, scale.item(), zero_point.item(), quant_min, quant_max, dtype)
167
+
168
+ @impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor", "Meta")
169
+ def dequantize_per_tensor_tensor_meta(input, scale, zero_point, quant_min, quant_max, dtype):
170
+ assert zero_point.numel() == 1, f"Exepecting zero_point tensor to be one element, but received : {zero_point.numel()}"
171
+ assert scale.numel() == 1, f"Exepecting scale tensor to be one element, but received : {scale.numel()}"
172
+ assert input.dtype == dtype, f"Expecting input to have dtype: {dtype}"
173
+ if dtype in [torch.uint8, torch.int8, torch.int32]:
174
+ return torch.empty_like(input, dtype=torch.float32)
175
+ else:
176
+ raise ValueError(f"Unsupported dtype in dequantize_per_tensor: {dtype}")
177
+
178
+
179
+ quantized_decomposed_lib.define(
180
+ "choose_qparams.tensor(Tensor input, int quant_min, int quant_max, "
181
+ "ScalarType dtype) -> (Tensor, Tensor)")
182
+
183
+ @impl(quantized_decomposed_lib, "choose_qparams.tensor", "CompositeExplicitAutograd")
184
+ def choose_qparams_tensor(
185
+ input: torch.Tensor,
186
+ qmin: int,
187
+ qmax: int,
188
+ dtype: torch.dtype
189
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
190
+ """ Given an input Tensor, derive the per tensor affine quantization parameter
191
+ (scale and zero_point) for target quantized Tensor from the Tensor
192
+
193
+ Args:
194
+ input (torch.Tensor): floating point input Tensor
195
+ quant_min (int): minimum quantized value for target quantized Tensor
196
+ quant_max (int): maximum quantized value for target quantized Tensor
197
+ dtype (torch.dtype): dtype for target quantized Tensor
198
+
199
+ Returns:
200
+ scale (float): quantization parameter for the target quantized Tensor
201
+ zero_point (int): quantization parameter for the target quantized Tensor
202
+ """
203
+ assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
204
+ assert dtype == torch.int8 or dtype == torch.uint8 or dtype == torch.int32, \
205
+ f"Expecting target dtype to be int8 uint8 or int32, but got: {dtype}"
206
+ validate_qmin_qmax(qmin, qmax)
207
+
208
+ min_val, max_val = torch.aminmax(input)
209
+
210
+ return determine_qparams(
211
+ min_val, max_val, qmin, qmax, dtype, torch.Tensor([torch.finfo(torch.float32).eps]), has_customized_qrange=False)
212
+
213
+ quantized_decomposed_lib.define(
214
+ "choose_qparams_symmetric.tensor(Tensor input, int quant_min, int quant_max, "
215
+ "ScalarType dtype) -> (Tensor, Tensor)")
216
+
217
+ @impl(quantized_decomposed_lib, "choose_qparams_symmetric.tensor", "CompositeExplicitAutograd")
218
+ def choose_qparams_symmetric_tensor(
219
+ input: torch.Tensor,
220
+ qmin: int,
221
+ qmax: int,
222
+ dtype: torch.dtype
223
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
224
+ """ Given an input Tensor, derive the per tensor affine quantization parameter
225
+ (scale and zero_point) for target quantized Tensor from the Tensor
226
+
227
+ Args:
228
+ input (torch.Tensor): floating point input Tensor
229
+ quant_min (int): minimum quantized value for target quantized Tensor
230
+ quant_max (int): maximum quantized value for target quantized Tensor
231
+ dtype (torch.dtype): dtype for target quantized Tensor
232
+
233
+ Returns:
234
+ scale (float): quantization parameter for the target quantized Tensor
235
+ zero_point (int): quantization parameter for the target quantized Tensor
236
+ """
237
+ assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
238
+ assert dtype == torch.int8 or dtype == torch.uint8 or dtype == torch.int32, \
239
+ f"Expecting target dtype to be int8 uint8 or int32, but got: {dtype}"
240
+ validate_qmin_qmax(qmin, qmax)
241
+
242
+ min_val, max_val = torch.aminmax(input)
243
+ return determine_qparams(
244
+ min_val,
245
+ max_val,
246
+ qmin,
247
+ qmax,
248
+ dtype,
249
+ torch.Tensor([torch.finfo(torch.float32).eps]),
250
+ has_customized_qrange=False,
251
+ qscheme=torch.per_tensor_symmetric
252
+ )
253
+
254
+ @impl(quantized_decomposed_lib, "choose_qparams.tensor", "Meta")
255
+ def choose_qparams_tensor_meta(
256
+ input: torch.Tensor,
257
+ quant_min: int,
258
+ quant_max: int,
259
+ dtype: torch.dtype
260
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
261
+ assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
262
+ assert quant_min < quant_max, f"Expecting quant_min to be smaller than quant_max but received min: \
263
+ {quant_min} max: {quant_max}"
264
+ return torch.empty(1, dtype=torch.float, device=input.device), torch.empty(1, dtype=torch.int32, device=input.device)
265
+
266
+ @impl(quantized_decomposed_lib, "choose_qparams_symmetric.tensor", "Meta")
267
+ def choose_qparams_symmetric_tensor_meta(
268
+ input: torch.Tensor,
269
+ quant_min: int,
270
+ quant_max: int,
271
+ dtype: torch.dtype
272
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
273
+ return torch.empty(1, dtype=torch.float, device=input.device), torch.empty(1, dtype=torch.int32, device=input.device)
274
+ # Helper function used to implement per-channel quantization against any axis
275
+ def _permute_to_axis_zero(x, axis):
276
+ new_axis_list = list(range(x.dim()))
277
+ new_axis_list[axis] = 0
278
+ new_axis_list[0] = axis
279
+ y = x.permute(tuple(new_axis_list))
280
+ return y, new_axis_list
281
+
282
+ quantized_decomposed_lib.define(
283
+ "quantize_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, "
284
+ "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
285
+
286
+ @impl(quantized_decomposed_lib, "quantize_per_channel", "CompositeExplicitAutograd")
287
+ def quantize_per_channel(
288
+ input: torch.Tensor,
289
+ scales: torch.Tensor,
290
+ zero_points: torch.Tensor,
291
+ axis: int,
292
+ quant_min: int,
293
+ quant_max: int,
294
+ dtype: torch.dtype
295
+ ) -> torch.Tensor:
296
+ """ Affine per channel quantization for the Tensor using the same quantization
297
+ parameters for each channel/axis to map from floating point to quantized values
298
+
299
+ Args:
300
+ input (torch.Tensor): original float32 Tensor
301
+ scales (torch.Tensor): a list of scale quantization parameter for
302
+ affine quantization, one per channel
303
+ zero_point (torch.Tensor): a list of zero_point quantization parameter for
304
+ affine quantization, one per channel
305
+ quant_min (int): minimum quantized value for output Tensor
306
+ quant_max (int): maximum quantized value for output Tensor
307
+ dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor
308
+
309
+ Returns:
310
+ Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters
311
+ are not stored in the Tensor, we are storing them in function arguments instead
312
+ """
313
+ assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
314
+ assert axis < input.dim(), f"Expecting axis to be < {input.dim()}"
315
+ _quant_min_max_bounds_check(quant_min, quant_max, dtype)
316
+ input, permute_axis_list = _permute_to_axis_zero(input, axis)
317
+ res = torch.zeros_like(input)
318
+
319
+ for i in range(input.size(0)):
320
+ res[i] = torch.clamp(
321
+ torch.round(input[i] * (1.0 / scales[i])) + zero_points[i],
322
+ quant_min,
323
+ quant_max
324
+ )
325
+
326
+ out = res.permute(tuple(permute_axis_list))
327
+ return out.to(dtype)
328
+
329
+ @impl(quantized_decomposed_lib, "quantize_per_channel", "Meta")
330
+ def quantize_per_channel_meta(
331
+ input: torch.Tensor,
332
+ scales: torch.Tensor,
333
+ zero_points: torch.Tensor,
334
+ axis: int,
335
+ quant_min: int,
336
+ quant_max: int,
337
+ dtype: torch.dtype
338
+ ) -> torch.Tensor:
339
+ assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
340
+ assert axis < input.dim(), f"Expecting axis to be < {input.dim()}"
341
+ _quant_min_max_bounds_check(quant_min, quant_max, dtype)
342
+ return torch.empty_like(input, dtype=dtype)
343
+
344
+ # Note: quant_min/quant_max/dtype are not used in the operator, but for now it's kept in
345
+ # the signature as metadata for the input Tensor, this might be useful for pattern
346
+ # matching in the future
347
+ # We will revisit this later if we found there are no use cases for it
348
+ quantized_decomposed_lib.define(
349
+ "dequantize_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, "
350
+ "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
351
+
352
+ @impl(quantized_decomposed_lib, "dequantize_per_channel", "CompositeExplicitAutograd")
353
+ def dequantize_per_channel(
354
+ input: torch.Tensor,
355
+ scales: torch.Tensor,
356
+ zero_points: torch.Tensor,
357
+ axis: int,
358
+ quant_min: int,
359
+ quant_max: int,
360
+ dtype: torch.dtype
361
+ ) -> torch.Tensor:
362
+ """ Affine per channel dequantization for the Tensor using the same quantization
363
+ parameters for each channel/axis to map from quantized values to floating point values
364
+
365
+ Args:
366
+ input (torch.Tensor): Tensor with dtype matching `dtype` argument,
367
+ e.g. (`torch.uint8`), it is a per channel quantized Tensor if combined with
368
+ quantization parameter in the argument of this function (scales/zero_points/axis)
369
+
370
+ scales (torch.Tensor): a list of scale quantization parameter for
371
+ affine quantization, one per channel
372
+
373
+ zero_points (torch.Tensor): a list of zero_point quantization parameter for
374
+ affine quantization, one per channel
375
+
376
+ quant_min (int): minimum quantized value for output Tensor (not used in computation,
377
+ reserved for pattern matching)
378
+
379
+ quant_max (int): maximum quantized value for output Tensor (not used in computation,
380
+ reserved for pattern matching)
381
+
382
+ dtype (torch.dtype): requested dtype for output Tensor (not used in computation,
383
+ reserved for pattern matching)
384
+
385
+ Returns:
386
+ dequantized float32 Tensor
387
+ """
388
+ assert input.dtype == dtype, f"Expecting input to have dtype {dtype}, but got dtype: {input.dtype}"
389
+ assert axis < input.dim(), f"Expecting axis to be < {input.dim()}"
390
+ _quant_min_max_bounds_check(quant_min, quant_max, dtype)
391
+ input, permute_axis_list = _permute_to_axis_zero(input, axis)
392
+ res = torch.zeros_like(input, dtype=torch.float32)
393
+
394
+ for i in range(input.size(0)):
395
+ # TODO: investigate why
396
+ # (input[i] - zero_points[i]).to(torch.float32) * scales[i]
397
+ # failed the test
398
+ res[i] = (input[i].to(torch.float32) - zero_points[i]) * scales[i]
399
+
400
+ out = res.permute(tuple(permute_axis_list))
401
+ return out
402
+
403
+ @impl(quantized_decomposed_lib, "dequantize_per_channel", "Meta")
404
+ def dequantize_per_channel_meta(
405
+ input: torch.Tensor,
406
+ scales: torch.Tensor,
407
+ zero_points: torch.Tensor,
408
+ axis: int,
409
+ quant_min: int,
410
+ quant_max: int,
411
+ dtype: torch.dtype
412
+ ) -> torch.Tensor:
413
+ assert input.dtype == dtype, f"Expecting input to have dtype {dtype}, but got dtype: {input.dtype}"
414
+ assert axis < input.dim(), f"Expecting axis to be < {input.dim()}"
415
+ _quant_min_max_bounds_check(quant_min, quant_max, dtype)
416
+ return torch.empty_like(input, dtype=torch.float32)
deepseekvl2/lib/python3.10/site-packages/torch/ao/quantization/fx/_equalize.py ADDED
@@ -0,0 +1,824 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ from collections import namedtuple
4
+ from typing import Any, Dict, List, Optional, Tuple
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ import torch.ao.nn.intrinsic as nni
10
+ from torch.fx import GraphModule
11
+ from torch.fx.graph import Node
12
+ from torch.ao.quantization.fx.graph_module import _get_observed_graph_module_attr
13
+
14
+ from torch.ao.quantization.backend_config import get_native_backend_config
15
+
16
+ from ..observer import _with_args, ObserverBase, PerChannelMinMaxObserver
17
+ from ..utils import _parent_name, check_min_max_valid
18
+
19
+ from .utils import (
20
+ get_new_attr_name_with_prefix,
21
+ maybe_get_next_module,
22
+ node_arg_is_weight,
23
+ )
24
+
25
+ CUSTOM_MODULE_SUPP_LIST: List[Any] = []
26
+
27
+ def reshape_scale(scale: torch.Tensor, axis: int, input: torch.Tensor) -> torch.Tensor:
28
+ """Reshapes the scale so that we can multiply it to the input by the given axis.
29
+ """
30
+ new_shape = [1] * input.ndim
31
+ new_shape[axis] = input.size(axis)
32
+ return scale.view(new_shape)
33
+
34
+ qsheme_mapping_per_tensor_to_per_channel = {
35
+ torch.per_tensor_affine: torch.per_channel_affine,
36
+ torch.per_tensor_symmetric: torch.per_channel_symmetric,
37
+ }
38
+
39
+
40
+ class _InputEqualizationObserver(nn.Module):
41
+ r"""Observer for tracking the running min/max values of input columns, and
42
+ computing the quantization parameters for the overall min/max input values.
43
+
44
+ Args:
45
+ dtype: Quantized data type
46
+ qscheme: Quantization scheme
47
+ quant_min: Minimum quantization value. If unspecified, it will
48
+ follow the 8-bit setup.
49
+ quant_max: Maximum quantization value. If unspecified, it will
50
+ follow the 8-bit setup.
51
+
52
+ The running minimum/maximum :math:`x_\text{min/max}` are computed in the
53
+ same way as :class:`~torch.ao.quantization.observer.PerChannelMinMaxObserver`,
54
+ with the difference that the running min/max values are stored per column.
55
+ This observer is intended to be used along with a WeightEqualizationObserver
56
+ to calculate the equalization scale.
57
+ """
58
+
59
+ def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine,
60
+ quant_min=None, quant_max=None, factory_kwargs=None) -> None:
61
+ super().__init__()
62
+
63
+ if qscheme not in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
64
+ raise TypeError("Input qscheme must be per-tensor")
65
+
66
+ self.dtype = dtype
67
+ self.qscheme = qscheme
68
+
69
+ per_channel_qscheme = qsheme_mapping_per_tensor_to_per_channel[qscheme]
70
+ self.input_obs = PerChannelMinMaxObserver(ch_axis=1, dtype=dtype,
71
+ qscheme=per_channel_qscheme,
72
+ quant_min=quant_min,
73
+ quant_max=quant_max,
74
+ factory_kwargs=factory_kwargs)
75
+
76
+ self.equalization_scale = torch.tensor(1)
77
+ self.equalization_shape: List[int] = []
78
+
79
+ def forward(self, x_orig):
80
+ if not (x_orig.ndim >= 2 and x_orig.ndim <= 5):
81
+ raise ValueError("InputEqualizationObserver only supports Linear and Conv layers")
82
+
83
+ # Calculate the shape needed to reshape the equalization scale later (needed for Conv layers)
84
+ self.equalization_shape = [1] * x_orig.ndim
85
+ self.equalization_shape[1] = x_orig.size(1)
86
+
87
+ return self.input_obs(x_orig)
88
+
89
+ def get_input_minmax(self):
90
+ return (self.input_obs.min_val, self.input_obs.max_val)
91
+
92
+ def set_equalization_scale(self, equalization_scale):
93
+ # Reshape the equalization scale along axis=1 so that it can be
94
+ # multiplied with the input along axis=1
95
+ if equalization_scale.nelement() == 1 and equalization_scale == torch.tensor(1):
96
+ return
97
+ self.equalization_scale = torch.reshape(equalization_scale, self.equalization_shape)
98
+
99
+ def calculate_scaled_minmax(self):
100
+ r""" Returns the scaled min/max inputs
101
+ """
102
+ if self.equalization_scale.nelement() == 1 and self.equalization_scale == torch.tensor(1):
103
+ warnings.warn(
104
+ "Must call calculate_equalization_scale before calling calculate_scaled_minmax. " +
105
+ "Will not scale the next quantization observer."
106
+ )
107
+ return None, None
108
+
109
+ # Calculate qparams for the scaled min/max inputs
110
+ # Scale the input by the equalization scale located at the same column
111
+ # index
112
+ (min_inputs, max_inputs) = self.get_input_minmax()
113
+ equalization_scale_reshaped = reshape_scale(self.equalization_scale, 0, min_inputs)
114
+ min_input_scaled = torch.min(torch.mul(min_inputs, equalization_scale_reshaped))
115
+ max_input_scaled = torch.max(torch.mul(max_inputs, equalization_scale_reshaped))
116
+
117
+ return min_input_scaled, max_input_scaled
118
+
119
+ with_args = classmethod(_with_args)
120
+
121
+
122
+ class _WeightEqualizationObserver(nn.Module):
123
+ r"""Observer for tracking the running min/max values of weight columns and
124
+ rows, and computing the quantization parameters for the weight rows.
125
+
126
+ Args:
127
+ dtype: Quantized data type
128
+ qscheme: Quantization scheme
129
+ quant_min: Minimum quantization value. If unspecified, it will
130
+ follow the 8-bit setup.
131
+ quant_max: Maximum quantization value. If unspecified, it will
132
+ follow the 8-bit setup.
133
+
134
+ This observer is made up of 1 PerChannelMinMaxObserver `weight_col_obs` used
135
+ to record the running minimum and maximum of columns of incoming weight
136
+ tensors. This observer is intended to be used along with an
137
+ InputEqualizationObserver to calculate the equalization scale.
138
+
139
+ The running minimum/maximum :math:`w_\text{min/max}` are computed in the
140
+ same way as :class:`~torch.ao.quantization.observer.PerChannelMinMaxObserver`.
141
+ """
142
+
143
+ def __init__(self, dtype=torch.qint8, qscheme=torch.per_tensor_affine, quant_min=None,
144
+ quant_max=None, factory_kwargs=None) -> None:
145
+ super().__init__()
146
+
147
+ self.dtype = dtype
148
+ self.qscheme = qscheme
149
+ self.ch_axis = 1
150
+
151
+ per_channel_qscheme = qscheme
152
+ if qscheme in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
153
+ per_channel_qscheme = qsheme_mapping_per_tensor_to_per_channel[qscheme]
154
+ self.weight_col_obs = PerChannelMinMaxObserver(ch_axis=1, dtype=dtype,
155
+ qscheme=per_channel_qscheme,
156
+ quant_min=quant_min,
157
+ quant_max=quant_max,
158
+ factory_kwargs=factory_kwargs)
159
+
160
+ self.equalization_scale = torch.tensor(1)
161
+
162
+ def forward(self, w_orig):
163
+ if not (w_orig.ndim >= 2 and w_orig.ndim <= 5):
164
+ raise ValueError("InputEqualizationObserver only supports Linear and Conv layers")
165
+
166
+ return self.weight_col_obs(w_orig)
167
+
168
+ def get_weight_col_minmax(self):
169
+ return (self.weight_col_obs.min_val, self.weight_col_obs.max_val)
170
+
171
+ def set_equalization_scale(self, equalization_scale):
172
+ self.equalization_scale = equalization_scale
173
+
174
+ with_args = classmethod(_with_args)
175
+
176
+
177
+ def calculate_equalization_scale(input_obs: _InputEqualizationObserver,
178
+ weight_obs: _WeightEqualizationObserver) -> torch.Tensor:
179
+ r""" Calculates the equalization scale and sets the equalization_scale value
180
+ in the observers.
181
+
182
+ Args:
183
+ input_obs: Observer that tracks the ranges for the input columns
184
+ weight_obs: Observer that tracks the ranges for the weight columns
185
+ """
186
+
187
+ (min_inputs, max_inputs) = input_obs.get_input_minmax()
188
+ (min_weights, max_weights) = weight_obs.get_weight_col_minmax()
189
+
190
+ if not (check_min_max_valid(min_inputs, max_inputs) and check_min_max_valid(min_weights, max_weights)):
191
+ warnings.warn(
192
+ "Must run observer before calling calculate_equalization_scale. " +
193
+ "Returning default equalization scale torch.tensor(1)."
194
+ )
195
+ return torch.tensor(1)
196
+
197
+ if not (min_inputs.shape == min_weights.shape):
198
+ raise ValueError(
199
+ "Input and Weight must have the same column dimension. " +
200
+ f"Found {min_inputs.shape} and {min_weights.shape} shapes instead."
201
+ )
202
+
203
+ equalization_scale = torch.sqrt((max_weights - min_weights) / (max_inputs - min_inputs))
204
+ # Replace all 'inf', 'nan', 0's with 1s to prevent errors
205
+ equalization_scale[equalization_scale == 0.] = 1
206
+ equalization_scale = torch.nan_to_num(equalization_scale, nan=1, posinf=1, neginf=1)
207
+ return equalization_scale
208
+
209
+
210
+ class EqualizationQConfig(namedtuple('EqualizationQConfig', ['input_activation', 'weight'])):
211
+ """
212
+ Describes how to quantize a layer or a part of the network specifically for
213
+ input-weight equalization by providing settings (observer classes) for
214
+ inputs, outputs, and weights.
215
+
216
+ Note that EqualizationQConfig needs to contain observer **classes** (like
217
+ MinMaxObserver) or a callable that returns instances on invocation, not the
218
+ concrete observer instances themselves.
219
+ Quantization function will instantiate observers multiple times for each of
220
+ the layers.
221
+
222
+ Observer classes have usually reasonable default arguments, but they can be
223
+ overwritten with `with_args` method (that behaves like functools.partial):
224
+
225
+ my_qconfig = EqualizationQConfig(input_activation=_InputEqualizationObserver.with_args(dtype=torch.qint8),
226
+ weight=_WeightEqualizationObserver.with_args(dtype=torch.qint8))
227
+ """
228
+ def __new__(cls, input_activation=torch.nn.Identity, weight=torch.nn.Identity):
229
+ if isinstance(input_activation, nn.Module) or isinstance(weight, nn.Module):
230
+ raise ValueError("EqualizationQConfig received observer instance, please pass observer class instead. " +
231
+ "Use MyObserver.with_args(x=1) to override arguments to constructor if needed")
232
+ self = super(EqualizationQConfig, cls).__new__(cls, input_activation, weight)
233
+ return self
234
+
235
+
236
+ input_equalization_observer = _InputEqualizationObserver.with_args(
237
+ dtype=torch.quint8, qscheme=torch.per_tensor_symmetric)
238
+ weight_equalization_observer = _WeightEqualizationObserver.with_args(
239
+ dtype=torch.qint8, qscheme=torch.per_channel_symmetric)
240
+ default_equalization_qconfig = EqualizationQConfig(input_activation=input_equalization_observer,
241
+ weight=weight_equalization_observer)
242
+
243
+
244
+ def fused_module_supports_equalization(module) -> bool:
245
+ """ Checks if the fused node supports equalization. """
246
+ return type(module) in [nni.LinearReLU, nni.ConvReLU1d, nni.ConvReLU2d, nni.ConvReLU3d]
247
+
248
+ def nn_module_supports_equalization(module) -> bool:
249
+ """ Checks if the torch.nn node supports equalization. """
250
+ return type(module) in [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d]
251
+
252
+ def custom_module_supports_equalization(module) -> bool:
253
+ """ Checks if the custom node supports equalization. """
254
+ return type(module) in CUSTOM_MODULE_SUPP_LIST
255
+
256
+
257
+ def node_supports_equalization(node: Node, modules) -> bool:
258
+ """ Checks if the current node supports equalization
259
+ Currently we only support nn.Linear/F.Linear and nn.Conv/F.conv layers
260
+ """
261
+ if node.op == 'call_module':
262
+ return nn_module_supports_equalization(modules[str(node.target)]) or \
263
+ fused_module_supports_equalization(modules[str(node.target)]) or \
264
+ custom_module_supports_equalization(modules[str(node.target)])
265
+ elif node.op == 'call_function':
266
+ return node.target in [F.linear, F.conv1d, F.conv2d, F.conv3d]
267
+ return False
268
+
269
+ def is_equalization_observer(observer: nn.Module) -> bool:
270
+ return (isinstance(observer, (_InputEqualizationObserver, _WeightEqualizationObserver)))
271
+
272
+
273
+ ###############################################################################
274
+ # Functions for equalization during convert #
275
+ ###############################################################################
276
+
277
+ def get_op_node_and_weight_eq_obs(
278
+ input_eq_obs_node: Node,
279
+ model: GraphModule,
280
+ modules: Dict[str, nn.Module]
281
+ ) -> Tuple[Optional[Node], Optional[_WeightEqualizationObserver]]:
282
+ """ Gets the following weight equalization observer. There should always
283
+ exist a weight equalization observer after an input equalization observer.
284
+
285
+ Returns the operation node that follows the input equalizatoin observer node
286
+ and the weight equalization observer
287
+ """
288
+
289
+ # Find the op node that comes directly after the input equaliation observer
290
+ op_node = None
291
+ for user in input_eq_obs_node.users.keys():
292
+ if node_supports_equalization(user, modules):
293
+ op_node = user
294
+ break
295
+
296
+ assert(op_node is not None)
297
+ if op_node.op == 'call_module':
298
+ # If the op_node is a nn.Linear layer, then it must have a
299
+ # WeightEqualizationObserver configuration
300
+ maybe_equalization_node_name_to_config = _get_observed_graph_module_attr(model, "equalization_node_name_to_qconfig")
301
+ assert maybe_equalization_node_name_to_config is not None
302
+ equalization_node_name_to_qconfig: Dict[str, Any] = maybe_equalization_node_name_to_config # type: ignore[assignment]
303
+ assert(equalization_node_name_to_qconfig.get(op_node.name, None) is not None)
304
+ weight_eq_obs = equalization_node_name_to_qconfig.get(op_node.name, None).weight()
305
+
306
+ assert(isinstance(weight_eq_obs, _WeightEqualizationObserver))
307
+ return op_node, weight_eq_obs
308
+
309
+ elif op_node.op == 'call_function':
310
+ weight_node = maybe_get_weight_eq_obs_node(op_node, modules)
311
+ if weight_node is not None:
312
+ weight_eq_obs = modules[str(weight_node.target)]
313
+ assert(isinstance(weight_eq_obs, _WeightEqualizationObserver))
314
+ return op_node, weight_eq_obs
315
+
316
+ return None, None
317
+
318
+ def maybe_get_weight_eq_obs_node(op_node: Node, modules: Dict[str, nn.Module]) -> Optional[Node]:
319
+ """ Gets the weight equalization observer node if it exists.
320
+ """
321
+ assert(op_node.op == 'call_function')
322
+ # TODO: Pass in backend_config into this function and parent functions.
323
+ backend_config = get_native_backend_config()
324
+ for node_arg in op_node.args:
325
+ if node_arg_is_weight(op_node, node_arg, backend_config):
326
+ assert(isinstance(node_arg, Node) and node_arg.op == 'call_module' and
327
+ isinstance(modules[str(node_arg.target)], _WeightEqualizationObserver))
328
+ return node_arg
329
+ return None
330
+
331
+ def maybe_get_next_input_eq_obs(node: Node, modules: Dict[str, nn.Module]) -> Optional[_InputEqualizationObserver]:
332
+ """ Gets the following input equalization observer if it exists.
333
+
334
+ For example, in the case of connecting linear layers:
335
+ x -> inp_obs1 -> eq_obs1 -> linear1 -> out_obs1 -> eq_obs2 -> linear2 -> out_obs2
336
+ If the node being passed in is the linear1 node, then we want to return eq_obs2,
337
+ the following equalization observer for linear2.
338
+
339
+ However, if there are no connecting layers:
340
+ x -> inp_obs1 -> eq_obs1 -> linear1 -> out_obs1 -> add
341
+ Then we want to return None.
342
+
343
+ In the case of an unfused linear-relu layer with a connecting linear layer:
344
+ linear1 -> relu -> out_obs1 -> eq_obs2 -> linear2 -> out_obs2
345
+ Since it is unfused, we want to skip over the relu layer and return eq_obs2,
346
+ the following equalization observer for linear2.
347
+ """
348
+
349
+ assert(node_supports_equalization(node, modules))
350
+
351
+ # Locate the following nn.ReLU or F.relu node if it exists
352
+ maybe_relu_node = maybe_get_next_module(node, modules, nn.ReLU)
353
+ if maybe_relu_node is None:
354
+ maybe_relu_node = maybe_get_next_module(node, modules, target_functional_type=F.relu)
355
+
356
+ # Locate the following output observer if it exists.
357
+ # We will skip the relu node if it exists.
358
+ maybe_obs_node = (
359
+ maybe_get_next_module(node, modules, ObserverBase)
360
+ if maybe_relu_node is None
361
+ else maybe_get_next_module(maybe_relu_node, modules, ObserverBase)
362
+ )
363
+ if maybe_obs_node is None:
364
+ return None
365
+
366
+ maybe_eq_obs_node = maybe_get_next_module(maybe_obs_node, modules, _InputEqualizationObserver)
367
+ if maybe_eq_obs_node is None:
368
+ return None
369
+
370
+ maybe_eq_obs = modules[str(maybe_eq_obs_node)]
371
+ assert(isinstance(maybe_eq_obs, _InputEqualizationObserver))
372
+ return maybe_eq_obs
373
+
374
+ def maybe_get_next_equalization_scale(node: Node, modules: Dict[str, nn.Module]) -> Optional[torch.Tensor]:
375
+ """ If the next next node is an InputEqualizationObserver then we want to
376
+ return its equalization scale, else we return 1
377
+
378
+ This is used in the case where there are two connecting linear layers:
379
+ linear1 -> LinearOutObs -> InputEqObs -> linear2
380
+ In this case, the node given is linear1 and we want to locate the InputEqObs.
381
+ """
382
+ next_inp_eq_obs = maybe_get_next_input_eq_obs(node, modules)
383
+ if next_inp_eq_obs:
384
+ if next_inp_eq_obs.equalization_scale.nelement() == 1 and \
385
+ next_inp_eq_obs.equalization_scale == torch.tensor(1):
386
+ return None
387
+ return next_inp_eq_obs.equalization_scale
388
+ return None
389
+
390
+ def scale_input_observer(node: Node, modules: Dict[str, nn.Module]) -> None:
391
+ """ Scales the following input quantization observer's min/max values by
392
+ updating the values with the scaled min/max values calculated by the input
393
+ equalization observer
394
+ """
395
+ input_eq_obs = modules[str(node.target)]
396
+ assert(isinstance(input_eq_obs, _InputEqualizationObserver))
397
+
398
+ input_quant_obs_node = node.args[0]
399
+ assert(isinstance(input_quant_obs_node, Node))
400
+
401
+ input_quant_obs = modules[str(input_quant_obs_node.target)]
402
+ if not isinstance(input_quant_obs, ObserverBase):
403
+ return
404
+
405
+ min_input_scaled, max_input_scaled = input_eq_obs.calculate_scaled_minmax()
406
+ if min_input_scaled is None and max_input_scaled is None:
407
+ return
408
+ input_quant_obs.min_val = min_input_scaled
409
+ input_quant_obs.max_val = max_input_scaled
410
+
411
+ def scale_weight_node(
412
+ node: Node,
413
+ modules: Dict[str, nn.Module],
414
+ equalization_scale: torch.Tensor,
415
+ next_equalization_scale: Optional[torch.Tensor],
416
+ ) -> None:
417
+ """ Scale the weights for input-weight equalization by multiplying the
418
+ weight by 1/equalization_scale and next_equalization_scale
419
+
420
+ Args:
421
+ node: Current node whose weights we want to scale
422
+ equalization_scale: Current node's calculated equalization scale
423
+ next_equalization_scale: Next node's calculated equalization scale if
424
+ the following node needs to be equalized, 1 otherwise
425
+ """
426
+ if equalization_scale is None:
427
+ return
428
+
429
+ if fused_module_supports_equalization(modules[str(node.target)]):
430
+ op_module = modules[str(node.target)][0] # type: ignore[index]
431
+ else:
432
+ op_module = modules[str(node.target)]
433
+ assert(nn_module_supports_equalization(op_module) or custom_module_supports_equalization(op_module))
434
+
435
+ # Scale the weights for input-weight equalization
436
+ # If the following layer needs to be equalized then we will multiply its scale
437
+ weight = op_module.weight
438
+ assert(isinstance(weight, torch.Tensor))
439
+
440
+ # Scale the weights by the reciprocal of the equalization scale
441
+ # Reshape the equalization scale so that we can multiply it to the weight along axis=1
442
+ equalization_scale_reshaped = reshape_scale(equalization_scale, 1, weight)
443
+ scaled_weight = torch.mul(weight, torch.reciprocal(equalization_scale_reshaped))
444
+
445
+ if next_equalization_scale is None:
446
+ op_module.weight = nn.Parameter(scaled_weight)
447
+ return
448
+
449
+ # Multiply the weights row wise by the next equalization scale
450
+ # Reshape the equalization scale so that we can multiply it to the weight along axis=0
451
+ next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, weight)
452
+ scaled_weight = torch.mul(scaled_weight, next_equalization_scale_reshaped)
453
+
454
+ op_module.weight = nn.Parameter(scaled_weight)
455
+
456
+ # Multiply the bias element wise by the next equalization scale
457
+ bias = op_module.bias
458
+ if bias is None:
459
+ return
460
+ assert(isinstance(bias, torch.Tensor))
461
+
462
+ # Reshape the equalization scale so that we can multiply it element-wise to the bias
463
+ next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, bias)
464
+ scaled_bias = torch.mul(bias, next_equalization_scale_reshaped)
465
+ op_module.bias = nn.Parameter(scaled_bias)
466
+
467
+ def scale_weight_functional(
468
+ op_node: Node,
469
+ model: GraphModule,
470
+ modules: Dict[str, nn.Module],
471
+ equalization_scale: torch.Tensor,
472
+ next_equalization_scale: Optional[torch.Tensor],
473
+ ) -> None:
474
+ """ Scales the weight value for functional layers
475
+ """
476
+ if equalization_scale is None:
477
+ return
478
+
479
+ # From the given op_node, the path looks like:
480
+ # get_attr(weight) -> weight_quant_obs -> weight_eq_obs -> op_node
481
+ # So we want to trace back from the op_node to get the equalization observer
482
+ # node, then the quantization observer node, and then finally the weight
483
+ # node which contains the weight values.
484
+
485
+ # Get the equalization observer node
486
+ weight_eq_obs_node = maybe_get_weight_eq_obs_node(op_node, modules)
487
+ if weight_eq_obs_node is None:
488
+ return
489
+
490
+ # Get the quantization observer node
491
+ weight_quant_obs_node = weight_eq_obs_node.args[0]
492
+ if weight_quant_obs_node is None:
493
+ return
494
+ assert(isinstance(weight_quant_obs_node, Node) and
495
+ isinstance(modules[str(weight_quant_obs_node.target)], ObserverBase))
496
+
497
+ # Get the get_attr(weight) node
498
+ weight_node = weight_quant_obs_node.args[0]
499
+ if weight_node is None:
500
+ return
501
+ assert(isinstance(weight_node, Node) and weight_node.op == 'get_attr')
502
+
503
+ weight_parent_name, weight_name = _parent_name(weight_node.target)
504
+ weight = getattr(modules[weight_parent_name], weight_name)
505
+
506
+ # Scale the weights for input-weight equalization
507
+ # If the following layer needs to be equalized then we will multiply its scale
508
+ # Reshape the equalization scale so that we can multiply it to the weight along axis=1
509
+ equalization_scale_reshaped = reshape_scale(equalization_scale, 1, weight)
510
+ scaled_weight = torch.mul(weight, torch.reciprocal(equalization_scale_reshaped))
511
+
512
+ if next_equalization_scale is None:
513
+ setattr(modules[weight_parent_name], weight_name, scaled_weight)
514
+ return
515
+
516
+ # Multiply the weights row wise by the next equalization scale
517
+ # Reshape the equalization scale so that we can multiply it to the weight along axis=1
518
+ next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, scaled_weight)
519
+ scaled_weight = torch.mul(scaled_weight, next_equalization_scale_reshaped)
520
+
521
+ setattr(modules[weight_parent_name], weight_name, scaled_weight)
522
+ assert(torch.allclose(model.get_buffer(str(weight_node.target)), scaled_weight))
523
+
524
+ # Multiply the bias element wise by the next equalization scale
525
+ bias_node = None
526
+ for node in op_node.args:
527
+ # Find the node containing the weight values
528
+ if isinstance(node, Node) and node.op == 'get_attr' and 'bias' in node.name:
529
+ bias_node = node
530
+ break
531
+ if bias_node is None:
532
+ return
533
+
534
+ bias_parent_name, bias_name = _parent_name(bias_node.target)
535
+ bias = getattr(modules[bias_parent_name], bias_name)
536
+
537
+ # Reshape the equalization scale so that we can multiply it element-wise to the bias
538
+ next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, bias)
539
+ scaled_bias = torch.mul(bias, next_equalization_scale_reshaped)
540
+ setattr(modules[bias_parent_name], bias_name, scaled_bias)
541
+
542
+ def clear_weight_quant_obs_node(op_node: Node, modules: Dict[str, nn.Module]) -> None:
543
+ """ Given the operation node, we want find the corresponding quantization
544
+ observer and reset its min/max values
545
+ """
546
+ weight_eq_obs_node = maybe_get_weight_eq_obs_node(op_node, modules)
547
+ if weight_eq_obs_node is None:
548
+ return
549
+
550
+ weight_quant_obs_node = weight_eq_obs_node.args[0]
551
+ if weight_quant_obs_node is None:
552
+ return
553
+ assert(isinstance(weight_quant_obs_node, Node))
554
+
555
+ weight_quant_obs = modules[str(weight_quant_obs_node.target)]
556
+ assert(isinstance(modules[str(weight_quant_obs_node.target)], ObserverBase))
557
+ weight_quant_obs.reset_min_max_vals() # type: ignore[operator]
558
+
559
+ def remove_node(model: GraphModule, node: Node, prev_node: Node):
560
+ """ Removes the given node from the model by replacing all of its users with
561
+ the given previous node
562
+ """
563
+ # For all of the current node's users, replace the current node with
564
+ # the input quantization observer node
565
+ orig_users = list(node.users.keys())
566
+ for user_node in orig_users:
567
+ user_node.replace_input_with(node, prev_node)
568
+
569
+ # Erase the InputEqualizationObserver node
570
+ model.graph.erase_node(node)
571
+
572
+ def update_obs_for_equalization(model: GraphModule, modules: Dict[str, nn.Module]) -> Dict[str, _WeightEqualizationObserver]:
573
+ """ Update all of the observer's equalization scale. For each
574
+ InputEqualizationObserver, we will find the location of the next
575
+ WeightEqualizationObserver, create it, and calculate the equalization scale
576
+ based on the two observers.
577
+
578
+ We will then return a dictionary mapping operation node names to
579
+ the corresponding WeightEqualizationObservers for that operation.
580
+ """
581
+ weight_eq_obs_dict = {}
582
+ for node in model.graph.nodes:
583
+ if node.op == 'call_module' and isinstance(modules[node.target], _InputEqualizationObserver):
584
+ input_eq_obs = modules[node.target]
585
+ assert(isinstance(input_eq_obs, _InputEqualizationObserver))
586
+ op_node, weight_eq_obs = get_op_node_and_weight_eq_obs(node, model, modules)
587
+
588
+ if op_node is None or weight_eq_obs is None:
589
+ continue
590
+
591
+ if op_node.op == 'call_module':
592
+ # Calibrate the weight equalization observer since it has just
593
+ # been created
594
+ if fused_module_supports_equalization(modules[str(op_node.target)]):
595
+ module = modules[str(op_node.target)][0] # type: ignore[index]
596
+ assert(nn_module_supports_equalization(module))
597
+ weight_eq_obs(module.weight)
598
+ else:
599
+ weight_eq_obs(modules[str(op_node.target)].weight)
600
+
601
+ # Calculate and set the equalization scale values
602
+ equalization_scale = calculate_equalization_scale(input_eq_obs, weight_eq_obs)
603
+ input_eq_obs.set_equalization_scale(equalization_scale)
604
+ weight_eq_obs.set_equalization_scale(equalization_scale)
605
+
606
+ weight_eq_obs_dict[op_node.name] = weight_eq_obs
607
+
608
+ return weight_eq_obs_dict
609
+
610
+ def convert_eq_obs(
611
+ model: GraphModule,
612
+ modules: Dict[str, nn.Module],
613
+ weight_eq_obs_dict: Dict[str, _WeightEqualizationObserver],
614
+ ) -> None:
615
+ """ Converts the equalization operations and updates the other nodes in the
616
+ following way:
617
+ - Removes the input equalization observers and inserts a mul operator
618
+ along with an equalization scale node wherever applicable (we do not
619
+ want to insert a mul operator between connecting linear layers).
620
+ - Updates the input quantization observers with the scaled input min/max
621
+ values.
622
+ - Scales the weights by the current and next equalization scales.
623
+ - Removes the weight equalization observer node if it exists.
624
+
625
+ Before (after prepare):
626
+ weight values
627
+ |
628
+ WeightQuantObs
629
+ |
630
+ WeightEqObs
631
+ |
632
+ x -> InpQuantObs -> InpEqObs -> linear -> OutQuantObs
633
+
634
+ After this function:
635
+ scaled weight values
636
+ |
637
+ equalization scale WeightQuantObs
638
+ | |
639
+ x -> mul -> InpQuantObs (scaled min/max) -> linear -> OutQuantObs
640
+
641
+ After convert:
642
+ equalization scale scaled weight values
643
+ | |
644
+ x -> mul -> quantize_per_tensor -> quantized::linear
645
+
646
+ Note that although the equalization observer appeared after the quantization
647
+ observer after prepare_fx, the mul node appears before the quantization node
648
+ after convert_fx. This is because placing the equalization observer after
649
+ the quantization observer in prepare_fx would allow us to keep the invariant
650
+ that the graph before the current node inserts its observers is not
651
+ modified.
652
+
653
+ Having the equalization observer before the quantization observer would also
654
+ cause some inconsistences between the ordering of the quantization and
655
+ equalization observers.
656
+ For example, a single linear layer would look like:
657
+ x -> InpEqObs1 -> InpQuantObs1 -> linear1 -> OutQuantObs1
658
+ But between two connected linear layers, it would look like:
659
+ linear1 -> OutQuantObs1 -> InpEqObs2 -> linear2 -> OutQuantObs2
660
+ """
661
+ for node in model.graph.nodes:
662
+ if node.op == 'call_module' and isinstance(modules[node.target], _InputEqualizationObserver):
663
+ inp_quant_obs_node = node.args[0]
664
+ prev_node = inp_quant_obs_node.args[0]
665
+
666
+ # If the previous node is a layer that needs to be equalized, then
667
+ # we will remove the current node because we do not need to add any
668
+ # equalization nodes between two layers that need to be equalized
669
+
670
+ # Before: linear1/relu (prev_node) -> output_quant_obs1 (inp_quant_obs_node) -> input_eq_obs2 (node) -> linear2
671
+ # After: linear1/relu (prev_node) -> output_quant_obs1 (inp_quant_obs_node) -> linear2
672
+ if node_supports_equalization(prev_node, modules) or "relu" in prev_node.name:
673
+ remove_node(model, node, inp_quant_obs_node)
674
+ continue
675
+
676
+ # Update the following input quantization observer's min/max values
677
+ scale_input_observer(node, modules)
678
+
679
+ # Remove the InputEqualization node and add a mul operator before
680
+ # the quantization observer node that appears before the equalization node
681
+ # Before: x -> input_quant_obs -> input_eq_obs -> linear
682
+ # After: x -> mul -> input_quant_obs -> linear
683
+
684
+ # Create a node containing the equalization scale
685
+ with model.graph.inserting_before(inp_quant_obs_node):
686
+ get_new_eq_scale_name = get_new_attr_name_with_prefix(prev_node.name + '_equalization_scale')
687
+ name = get_new_eq_scale_name(modules)
688
+ setattr(model, name, modules[node.target].equalization_scale)
689
+ eq_scale_node = model.graph.create_node('get_attr', name)
690
+
691
+ # Create a node multiplying the input with the equalization scale
692
+ with model.graph.inserting_after(eq_scale_node):
693
+ inputs = (prev_node, eq_scale_node)
694
+ mul_node = model.graph.create_node("call_function", torch.mul, inputs)
695
+
696
+ # Set the mul nod to be the input_quant_obs_node's input instead of
697
+ # the previous node
698
+ inp_quant_obs_node.replace_input_with(prev_node, mul_node)
699
+ remove_node(model, node, inp_quant_obs_node)
700
+
701
+ elif weight_eq_obs_dict.get(node.name, None) is not None:
702
+ weight_eq_obs = weight_eq_obs_dict.get(node.name)
703
+ assert(isinstance(weight_eq_obs, _WeightEqualizationObserver))
704
+ equalization_scale = weight_eq_obs.equalization_scale
705
+
706
+ if equalization_scale.nelement() == 1 and equalization_scale == torch.tensor(1):
707
+ equalization_scale = None # type: ignore[assignment]
708
+ maybe_next_equalization_scale = maybe_get_next_equalization_scale(node, modules)
709
+
710
+ # Scale the weight nodes
711
+ if node.op == 'call_module':
712
+ scale_weight_node(node, modules, equalization_scale, maybe_next_equalization_scale)
713
+ elif node.op == 'call_function':
714
+ scale_weight_functional(node, model, modules, equalization_scale, maybe_next_equalization_scale)
715
+
716
+ weight_eq_obs_node = maybe_get_weight_eq_obs_node(node, modules)
717
+ if weight_eq_obs_node is None:
718
+ return
719
+ assert(isinstance(modules[str(weight_eq_obs_node.target)], _WeightEqualizationObserver))
720
+
721
+ # Clear the quantization observer's min/max values so that they
722
+ # can get updated later based on the new scale values
723
+ clear_weight_quant_obs_node(node, modules)
724
+
725
+ # Erase the weight equalization observer node
726
+ prev_node = weight_eq_obs_node.args[0]
727
+ remove_node(model, weight_eq_obs_node, prev_node)
728
+ else:
729
+ raise ValueError("Expected operation node to be 'call_module' or 'call_function" +
730
+ f"Instead got node {node.name} as '{node.op}'.")
731
+
732
+ def _convert_equalization_ref(model: GraphModule):
733
+ """ Reference function which applies changes needed for equalization, but
734
+ does not quantize the nodes
735
+ """
736
+ modules = dict(model.named_modules(remove_duplicate=False))
737
+
738
+ # Calculate the equalization scale, update the observers with the scaled
739
+ # inputs, and scale the weight
740
+ weight_eq_obs_dict = update_obs_for_equalization(model, modules)
741
+ convert_eq_obs(model, modules, weight_eq_obs_dict)
742
+
743
+ return GraphModule(model, model.graph)
744
+
745
+
746
+ ###############################################################################
747
+ # Functions for running the equalized model on the Numeric Suite #
748
+ ###############################################################################
749
+
750
+ def get_layer_sqnr_dict(model_a: nn.Module, model_b: nn.Module, x: torch.Tensor) -> Dict[str, float]:
751
+ """ Runs the Numeric Suite on model_a and model_b and returns a dictionary
752
+ containing the SQNR between layers in model_a and model_b.
753
+
754
+ Note: In order to support equalized models, this function has a hacky fix in
755
+ which we do not match any torch.mul operators. This is because equalized
756
+ models contain extra mul operators to scale the input by the equalization
757
+ scale, but this edge case has not been resolved yet within the numeric suite code.
758
+
759
+ Args:
760
+ model_a: A float model
761
+ model_b: A quantized model
762
+ x: Inputs to use during calibration
763
+ """
764
+ import torch.ao.ns._numeric_suite_fx as ns
765
+ from torch.ao.ns.fx.mappings import get_unmatchable_types_map
766
+
767
+ unmatchable_types_map = get_unmatchable_types_map()
768
+ unmatchable_types_map["funs_unmatchable"].add(torch.mul)
769
+
770
+ model_a_ns, model_b_ns = ns.add_loggers(
771
+ 'fp32', model_a,
772
+ 'int8', model_b,
773
+ ns.OutputLogger,
774
+ unmatchable_types_map=unmatchable_types_map
775
+ )
776
+
777
+ model_a_ns(x)
778
+ model_b_ns(x)
779
+
780
+ activation_comparison_dict = ns.extract_logger_info(
781
+ model_a_ns,
782
+ model_b_ns,
783
+ ns.OutputLogger,
784
+ 'int8')
785
+ ns.extend_logger_results_with_comparison(
786
+ activation_comparison_dict,
787
+ 'fp32', 'int8',
788
+ torch.ao.ns.fx.utils.compute_sqnr, 'sqnr'
789
+ )
790
+
791
+ # Construct a dictionary mapping layer names to the SQNR values
792
+ layer_sqnr_dict = {}
793
+ for key in activation_comparison_dict:
794
+ layer = activation_comparison_dict[key]['node_output']['int8'][0]['fqn']
795
+ sqnr = activation_comparison_dict[key]['node_output']['int8'][0]['sqnr'][0]
796
+ layer_sqnr_dict[layer] = sqnr
797
+
798
+ return layer_sqnr_dict
799
+
800
+ def get_equalization_qconfig_dict(
801
+ layer_sqnr_dict: Dict[str, float],
802
+ num_layers_to_equalize: int
803
+ ) -> Any:
804
+ """ Given the layer to SQNR dictionary, find the layers with the highest
805
+ quantization errors, and return an equalization_qconfig_dict
806
+ specifying to only equalize those top layers.
807
+
808
+ Args:
809
+ layer_sqnr_dict: Dictionary mapping layer names to SQNR values (found
810
+ when comparing an equalized model against a float model)
811
+ num_layers_to_equalize: Number of layers with the highest quantization
812
+ errors to equalize
813
+ """
814
+
815
+ # Sort the layer_sqnr_dictionary values and get the layers with the lowest
816
+ # SQNR values (aka highest quantization errors)
817
+ layer_sqnr_sorted = sorted(layer_sqnr_dict.items(), key=lambda item: item[1])
818
+ layers_to_equalize = layer_sqnr_sorted[:num_layers_to_equalize]
819
+
820
+ # Constructs an equalization_qconfig_dict that specifies to only equalize
821
+ # the layers with the highest quantization errors
822
+ module_to_qconfig_list = [(item[0], default_equalization_qconfig) for item in layers_to_equalize]
823
+ equalization_qconfig_dict = {"module_name": module_to_qconfig_list}
824
+ return equalization_qconfig_dict