Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +3 -0
- infer_4_37_2/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-310.pyc +3 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/constants/__init__.py +358 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/constants/__pycache__/codata.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/constants/_codata.py +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/constants/_constants.py +366 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/constants/codata.py +21 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/constants/constants.py +53 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/constants/tests/__init__.py +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_codata.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_constants.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/constants/tests/test_codata.py +78 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/constants/tests/test_constants.py +90 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_basic.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_helper.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_pseudo_diffs.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_realtransforms.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/__pycache__/basic.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/__pycache__/helper.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/__pycache__/pseudo_diffs.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/__pycache__/realtransforms.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so +3 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_basic.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_helper.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_pseudo_diffs.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_real_transforms.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/tests/test_import.py +33 -0
- janus/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/_utils.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/common_op_utils.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/op_registry_utils.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/sharder.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/reshard.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/utils.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__init__.py +1 -0
- janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/api.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/api.py +87 -0
- janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__init__.py +10 -0
- janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/chunk_sharding_spec.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/api.py +263 -0
- janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py +218 -0
- janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__init__.py +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py +348 -0
- janus/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_nested_dict.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -1152,3 +1152,6 @@ infer_4_37_2/lib/python3.10/site-packages/pandas/core/__pycache__/series.cpython
|
|
| 1152 |
janus/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantization.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1153 |
janus/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/rpc_test.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1154 |
infer_4_37_2/lib/python3.10/site-packages/numpy/core/__pycache__/fromnumeric.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 1152 |
janus/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantization.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1153 |
janus/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/rpc_test.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1154 |
infer_4_37_2/lib/python3.10/site-packages/numpy/core/__pycache__/fromnumeric.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1155 |
+
infer_4_37_2/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1156 |
+
infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1157 |
+
janus/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_nn.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
infer_4_37_2/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ba81ea6afc3dfd95fc23b52e309351644610bb653ecc6b8a4210df83ad3451e1
|
| 3 |
+
size 143253
|
infer_4_37_2/lib/python3.10/site-packages/scipy/constants/__init__.py
ADDED
|
@@ -0,0 +1,358 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
r"""
|
| 2 |
+
==================================
|
| 3 |
+
Constants (:mod:`scipy.constants`)
|
| 4 |
+
==================================
|
| 5 |
+
|
| 6 |
+
.. currentmodule:: scipy.constants
|
| 7 |
+
|
| 8 |
+
Physical and mathematical constants and units.
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
Mathematical constants
|
| 12 |
+
======================
|
| 13 |
+
|
| 14 |
+
================ =================================================================
|
| 15 |
+
``pi`` Pi
|
| 16 |
+
``golden`` Golden ratio
|
| 17 |
+
``golden_ratio`` Golden ratio
|
| 18 |
+
================ =================================================================
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
Physical constants
|
| 22 |
+
==================
|
| 23 |
+
The following physical constants are available as attributes of `scipy.constants`.
|
| 24 |
+
All units are `SI <https://en.wikipedia.org/wiki/International_System_of_Units>`_.
|
| 25 |
+
|
| 26 |
+
=========================== ================================================================ ===============
|
| 27 |
+
Attribute Quantity Units
|
| 28 |
+
=========================== ================================================================ ===============
|
| 29 |
+
``c`` speed of light in vacuum m s^-1
|
| 30 |
+
``speed_of_light`` speed of light in vacuum m s^-1
|
| 31 |
+
``mu_0`` the magnetic constant :math:`\mu_0` N A^-2
|
| 32 |
+
``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0` F m^-1
|
| 33 |
+
``h`` the Planck constant :math:`h` J Hz^-1
|
| 34 |
+
``Planck`` the Planck constant :math:`h` J Hz^-1
|
| 35 |
+
``hbar`` the reduced Planck constant, :math:`\hbar = h/(2\pi)` J s
|
| 36 |
+
``G`` Newtonian constant of gravitation m^3 kg^-1 s^-2
|
| 37 |
+
``gravitational_constant`` Newtonian constant of gravitation m^3 kg^-1 s^-2
|
| 38 |
+
``g`` standard acceleration of gravity m s^-2
|
| 39 |
+
``e`` elementary charge C
|
| 40 |
+
``elementary_charge`` elementary charge C
|
| 41 |
+
``R`` molar gas constant J mol^-1 K^-1
|
| 42 |
+
``gas_constant`` molar gas constant J mol^-1 K^-1
|
| 43 |
+
``alpha`` fine-structure constant (unitless)
|
| 44 |
+
``fine_structure`` fine-structure constant (unitless)
|
| 45 |
+
``N_A`` Avogadro constant mol^-1
|
| 46 |
+
``Avogadro`` Avogadro constant mol^-1
|
| 47 |
+
``k`` Boltzmann constant J K^-1
|
| 48 |
+
``Boltzmann`` Boltzmann constant J K^-1
|
| 49 |
+
``sigma`` Stefan-Boltzmann constant :math:`\sigma` W m^-2 K^-4
|
| 50 |
+
``Stefan_Boltzmann`` Stefan-Boltzmann constant :math:`\sigma` W m^-2 K^-4
|
| 51 |
+
``Wien`` Wien wavelength displacement law constant m K
|
| 52 |
+
``Rydberg`` Rydberg constant m^-1
|
| 53 |
+
``m_e`` electron mass kg
|
| 54 |
+
``electron_mass`` electron mass kg
|
| 55 |
+
``m_p`` proton mass kg
|
| 56 |
+
``proton_mass`` proton mass kg
|
| 57 |
+
``m_n`` neutron mass kg
|
| 58 |
+
``neutron_mass`` neutron mass kg
|
| 59 |
+
=========================== ================================================================ ===============
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
Constants database
|
| 63 |
+
------------------
|
| 64 |
+
|
| 65 |
+
In addition to the above variables, :mod:`scipy.constants` also contains the
|
| 66 |
+
2022 CODATA recommended values [CODATA2022]_ database containing more physical
|
| 67 |
+
constants.
|
| 68 |
+
|
| 69 |
+
.. autosummary::
|
| 70 |
+
:toctree: generated/
|
| 71 |
+
|
| 72 |
+
value -- Value in physical_constants indexed by key
|
| 73 |
+
unit -- Unit in physical_constants indexed by key
|
| 74 |
+
precision -- Relative precision in physical_constants indexed by key
|
| 75 |
+
find -- Return list of physical_constant keys with a given string
|
| 76 |
+
ConstantWarning -- Constant sought not in newest CODATA data set
|
| 77 |
+
|
| 78 |
+
.. data:: physical_constants
|
| 79 |
+
|
| 80 |
+
Dictionary of physical constants, of the format
|
| 81 |
+
``physical_constants[name] = (value, unit, uncertainty)``.
|
| 82 |
+
The CODATA database uses ellipses to indicate that a value is defined
|
| 83 |
+
(exactly) in terms of others but cannot be represented exactly with the
|
| 84 |
+
allocated number of digits. In these cases, SciPy calculates the derived
|
| 85 |
+
value and reports it to the full precision of a Python ``float``. Although
|
| 86 |
+
``physical_constants`` lists the uncertainty as ``0.0`` to indicate that
|
| 87 |
+
the CODATA value is exact, the value in ``physical_constants`` is still
|
| 88 |
+
subject to the truncation error inherent in double-precision representation.
|
| 89 |
+
|
| 90 |
+
Available constants:
|
| 91 |
+
|
| 92 |
+
====================================================================== ====
|
| 93 |
+
%(constant_names)s
|
| 94 |
+
====================================================================== ====
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
Units
|
| 98 |
+
=====
|
| 99 |
+
|
| 100 |
+
SI prefixes
|
| 101 |
+
-----------
|
| 102 |
+
|
| 103 |
+
============ =================================================================
|
| 104 |
+
``quetta`` :math:`10^{30}`
|
| 105 |
+
``ronna`` :math:`10^{27}`
|
| 106 |
+
``yotta`` :math:`10^{24}`
|
| 107 |
+
``zetta`` :math:`10^{21}`
|
| 108 |
+
``exa`` :math:`10^{18}`
|
| 109 |
+
``peta`` :math:`10^{15}`
|
| 110 |
+
``tera`` :math:`10^{12}`
|
| 111 |
+
``giga`` :math:`10^{9}`
|
| 112 |
+
``mega`` :math:`10^{6}`
|
| 113 |
+
``kilo`` :math:`10^{3}`
|
| 114 |
+
``hecto`` :math:`10^{2}`
|
| 115 |
+
``deka`` :math:`10^{1}`
|
| 116 |
+
``deci`` :math:`10^{-1}`
|
| 117 |
+
``centi`` :math:`10^{-2}`
|
| 118 |
+
``milli`` :math:`10^{-3}`
|
| 119 |
+
``micro`` :math:`10^{-6}`
|
| 120 |
+
``nano`` :math:`10^{-9}`
|
| 121 |
+
``pico`` :math:`10^{-12}`
|
| 122 |
+
``femto`` :math:`10^{-15}`
|
| 123 |
+
``atto`` :math:`10^{-18}`
|
| 124 |
+
``zepto`` :math:`10^{-21}`
|
| 125 |
+
``yocto`` :math:`10^{-24}`
|
| 126 |
+
``ronto`` :math:`10^{-27}`
|
| 127 |
+
``quecto`` :math:`10^{-30}`
|
| 128 |
+
============ =================================================================
|
| 129 |
+
|
| 130 |
+
Binary prefixes
|
| 131 |
+
---------------
|
| 132 |
+
|
| 133 |
+
============ =================================================================
|
| 134 |
+
``kibi`` :math:`2^{10}`
|
| 135 |
+
``mebi`` :math:`2^{20}`
|
| 136 |
+
``gibi`` :math:`2^{30}`
|
| 137 |
+
``tebi`` :math:`2^{40}`
|
| 138 |
+
``pebi`` :math:`2^{50}`
|
| 139 |
+
``exbi`` :math:`2^{60}`
|
| 140 |
+
``zebi`` :math:`2^{70}`
|
| 141 |
+
``yobi`` :math:`2^{80}`
|
| 142 |
+
============ =================================================================
|
| 143 |
+
|
| 144 |
+
Mass
|
| 145 |
+
----
|
| 146 |
+
|
| 147 |
+
================= ============================================================
|
| 148 |
+
``gram`` :math:`10^{-3}` kg
|
| 149 |
+
``metric_ton`` :math:`10^{3}` kg
|
| 150 |
+
``grain`` one grain in kg
|
| 151 |
+
``lb`` one pound (avoirdupous) in kg
|
| 152 |
+
``pound`` one pound (avoirdupous) in kg
|
| 153 |
+
``blob`` one inch version of a slug in kg (added in 1.0.0)
|
| 154 |
+
``slinch`` one inch version of a slug in kg (added in 1.0.0)
|
| 155 |
+
``slug`` one slug in kg (added in 1.0.0)
|
| 156 |
+
``oz`` one ounce in kg
|
| 157 |
+
``ounce`` one ounce in kg
|
| 158 |
+
``stone`` one stone in kg
|
| 159 |
+
``grain`` one grain in kg
|
| 160 |
+
``long_ton`` one long ton in kg
|
| 161 |
+
``short_ton`` one short ton in kg
|
| 162 |
+
``troy_ounce`` one Troy ounce in kg
|
| 163 |
+
``troy_pound`` one Troy pound in kg
|
| 164 |
+
``carat`` one carat in kg
|
| 165 |
+
``m_u`` atomic mass constant (in kg)
|
| 166 |
+
``u`` atomic mass constant (in kg)
|
| 167 |
+
``atomic_mass`` atomic mass constant (in kg)
|
| 168 |
+
================= ============================================================
|
| 169 |
+
|
| 170 |
+
Angle
|
| 171 |
+
-----
|
| 172 |
+
|
| 173 |
+
================= ============================================================
|
| 174 |
+
``degree`` degree in radians
|
| 175 |
+
``arcmin`` arc minute in radians
|
| 176 |
+
``arcminute`` arc minute in radians
|
| 177 |
+
``arcsec`` arc second in radians
|
| 178 |
+
``arcsecond`` arc second in radians
|
| 179 |
+
================= ============================================================
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
Time
|
| 183 |
+
----
|
| 184 |
+
|
| 185 |
+
================= ============================================================
|
| 186 |
+
``minute`` one minute in seconds
|
| 187 |
+
``hour`` one hour in seconds
|
| 188 |
+
``day`` one day in seconds
|
| 189 |
+
``week`` one week in seconds
|
| 190 |
+
``year`` one year (365 days) in seconds
|
| 191 |
+
``Julian_year`` one Julian year (365.25 days) in seconds
|
| 192 |
+
================= ============================================================
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
Length
|
| 196 |
+
------
|
| 197 |
+
|
| 198 |
+
===================== ============================================================
|
| 199 |
+
``inch`` one inch in meters
|
| 200 |
+
``foot`` one foot in meters
|
| 201 |
+
``yard`` one yard in meters
|
| 202 |
+
``mile`` one mile in meters
|
| 203 |
+
``mil`` one mil in meters
|
| 204 |
+
``pt`` one point in meters
|
| 205 |
+
``point`` one point in meters
|
| 206 |
+
``survey_foot`` one survey foot in meters
|
| 207 |
+
``survey_mile`` one survey mile in meters
|
| 208 |
+
``nautical_mile`` one nautical mile in meters
|
| 209 |
+
``fermi`` one Fermi in meters
|
| 210 |
+
``angstrom`` one Angstrom in meters
|
| 211 |
+
``micron`` one micron in meters
|
| 212 |
+
``au`` one astronomical unit in meters
|
| 213 |
+
``astronomical_unit`` one astronomical unit in meters
|
| 214 |
+
``light_year`` one light year in meters
|
| 215 |
+
``parsec`` one parsec in meters
|
| 216 |
+
===================== ============================================================
|
| 217 |
+
|
| 218 |
+
Pressure
|
| 219 |
+
--------
|
| 220 |
+
|
| 221 |
+
================= ============================================================
|
| 222 |
+
``atm`` standard atmosphere in pascals
|
| 223 |
+
``atmosphere`` standard atmosphere in pascals
|
| 224 |
+
``bar`` one bar in pascals
|
| 225 |
+
``torr`` one torr (mmHg) in pascals
|
| 226 |
+
``mmHg`` one torr (mmHg) in pascals
|
| 227 |
+
``psi`` one psi in pascals
|
| 228 |
+
================= ============================================================
|
| 229 |
+
|
| 230 |
+
Area
|
| 231 |
+
----
|
| 232 |
+
|
| 233 |
+
================= ============================================================
|
| 234 |
+
``hectare`` one hectare in square meters
|
| 235 |
+
``acre`` one acre in square meters
|
| 236 |
+
================= ============================================================
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
Volume
|
| 240 |
+
------
|
| 241 |
+
|
| 242 |
+
=================== ========================================================
|
| 243 |
+
``liter`` one liter in cubic meters
|
| 244 |
+
``litre`` one liter in cubic meters
|
| 245 |
+
``gallon`` one gallon (US) in cubic meters
|
| 246 |
+
``gallon_US`` one gallon (US) in cubic meters
|
| 247 |
+
``gallon_imp`` one gallon (UK) in cubic meters
|
| 248 |
+
``fluid_ounce`` one fluid ounce (US) in cubic meters
|
| 249 |
+
``fluid_ounce_US`` one fluid ounce (US) in cubic meters
|
| 250 |
+
``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters
|
| 251 |
+
``bbl`` one barrel in cubic meters
|
| 252 |
+
``barrel`` one barrel in cubic meters
|
| 253 |
+
=================== ========================================================
|
| 254 |
+
|
| 255 |
+
Speed
|
| 256 |
+
-----
|
| 257 |
+
|
| 258 |
+
================== ==========================================================
|
| 259 |
+
``kmh`` kilometers per hour in meters per second
|
| 260 |
+
``mph`` miles per hour in meters per second
|
| 261 |
+
``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second
|
| 262 |
+
``speed_of_sound`` one Mach (approx., at 15 C, 1 atm) in meters per second
|
| 263 |
+
``knot`` one knot in meters per second
|
| 264 |
+
================== ==========================================================
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
Temperature
|
| 268 |
+
-----------
|
| 269 |
+
|
| 270 |
+
===================== =======================================================
|
| 271 |
+
``zero_Celsius`` zero of Celsius scale in Kelvin
|
| 272 |
+
``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins
|
| 273 |
+
===================== =======================================================
|
| 274 |
+
|
| 275 |
+
.. autosummary::
|
| 276 |
+
:toctree: generated/
|
| 277 |
+
|
| 278 |
+
convert_temperature
|
| 279 |
+
|
| 280 |
+
Energy
|
| 281 |
+
------
|
| 282 |
+
|
| 283 |
+
==================== =======================================================
|
| 284 |
+
``eV`` one electron volt in Joules
|
| 285 |
+
``electron_volt`` one electron volt in Joules
|
| 286 |
+
``calorie`` one calorie (thermochemical) in Joules
|
| 287 |
+
``calorie_th`` one calorie (thermochemical) in Joules
|
| 288 |
+
``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules
|
| 289 |
+
``erg`` one erg in Joules
|
| 290 |
+
``Btu`` one British thermal unit (International Steam Table) in Joules
|
| 291 |
+
``Btu_IT`` one British thermal unit (International Steam Table) in Joules
|
| 292 |
+
``Btu_th`` one British thermal unit (thermochemical) in Joules
|
| 293 |
+
``ton_TNT`` one ton of TNT in Joules
|
| 294 |
+
==================== =======================================================
|
| 295 |
+
|
| 296 |
+
Power
|
| 297 |
+
-----
|
| 298 |
+
|
| 299 |
+
==================== =======================================================
|
| 300 |
+
``hp`` one horsepower in watts
|
| 301 |
+
``horsepower`` one horsepower in watts
|
| 302 |
+
==================== =======================================================
|
| 303 |
+
|
| 304 |
+
Force
|
| 305 |
+
-----
|
| 306 |
+
|
| 307 |
+
==================== =======================================================
|
| 308 |
+
``dyn`` one dyne in newtons
|
| 309 |
+
``dyne`` one dyne in newtons
|
| 310 |
+
``lbf`` one pound force in newtons
|
| 311 |
+
``pound_force`` one pound force in newtons
|
| 312 |
+
``kgf`` one kilogram force in newtons
|
| 313 |
+
``kilogram_force`` one kilogram force in newtons
|
| 314 |
+
==================== =======================================================
|
| 315 |
+
|
| 316 |
+
Optics
|
| 317 |
+
------
|
| 318 |
+
|
| 319 |
+
.. autosummary::
|
| 320 |
+
:toctree: generated/
|
| 321 |
+
|
| 322 |
+
lambda2nu
|
| 323 |
+
nu2lambda
|
| 324 |
+
|
| 325 |
+
References
|
| 326 |
+
==========
|
| 327 |
+
|
| 328 |
+
.. [CODATA2022] CODATA Recommended Values of the Fundamental
|
| 329 |
+
Physical Constants 2022.
|
| 330 |
+
|
| 331 |
+
https://physics.nist.gov/cuu/Constants/
|
| 332 |
+
|
| 333 |
+
""" # noqa: E501
|
| 334 |
+
# Modules contributed by BasSw (wegwerp@gmail.com)
|
| 335 |
+
from ._codata import *
|
| 336 |
+
from ._constants import *
|
| 337 |
+
from ._codata import _obsolete_constants, physical_constants
|
| 338 |
+
|
| 339 |
+
# Deprecated namespaces, to be removed in v2.0.0
|
| 340 |
+
from . import codata, constants
|
| 341 |
+
|
| 342 |
+
_constant_names_list = [(_k.lower(), _k, _v)
|
| 343 |
+
for _k, _v in physical_constants.items()
|
| 344 |
+
if _k not in _obsolete_constants]
|
| 345 |
+
_constant_names = "\n".join(["``{}``{} {} {}".format(_x[1], " "*(66-len(_x[1])),
|
| 346 |
+
_x[2][0], _x[2][1])
|
| 347 |
+
for _x in sorted(_constant_names_list)])
|
| 348 |
+
if __doc__:
|
| 349 |
+
__doc__ = __doc__ % dict(constant_names=_constant_names)
|
| 350 |
+
|
| 351 |
+
del _constant_names
|
| 352 |
+
del _constant_names_list
|
| 353 |
+
|
| 354 |
+
__all__ = [s for s in dir() if not s.startswith('_')]
|
| 355 |
+
|
| 356 |
+
from scipy._lib._testutils import PytestTester
|
| 357 |
+
test = PytestTester(__name__)
|
| 358 |
+
del PytestTester
|
infer_4_37_2/lib/python3.10/site-packages/scipy/constants/__pycache__/codata.cpython-310.pyc
ADDED
|
Binary file (661 Bytes). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/constants/_codata.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/constants/_constants.py
ADDED
|
@@ -0,0 +1,366 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Collection of physical constants and conversion factors.
|
| 3 |
+
|
| 4 |
+
Most constants are in SI units, so you can do
|
| 5 |
+
print '10 mile per minute is', 10*mile/minute, 'm/s or', 10*mile/(minute*knot), 'knots'
|
| 6 |
+
|
| 7 |
+
The list is not meant to be comprehensive, but just convenient for everyday use.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import math as _math
|
| 11 |
+
from typing import TYPE_CHECKING, Any
|
| 12 |
+
|
| 13 |
+
from ._codata import value as _cd
|
| 14 |
+
|
| 15 |
+
if TYPE_CHECKING:
|
| 16 |
+
import numpy.typing as npt
|
| 17 |
+
|
| 18 |
+
from scipy._lib._array_api import array_namespace, _asarray
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
"""
|
| 22 |
+
BasSw 2006
|
| 23 |
+
physical constants: imported from CODATA
|
| 24 |
+
unit conversion: see e.g., NIST special publication 811
|
| 25 |
+
Use at own risk: double-check values before calculating your Mars orbit-insertion burn.
|
| 26 |
+
Some constants exist in a few variants, which are marked with suffixes.
|
| 27 |
+
The ones without any suffix should be the most common ones.
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
__all__ = [
|
| 31 |
+
'Avogadro', 'Boltzmann', 'Btu', 'Btu_IT', 'Btu_th', 'G',
|
| 32 |
+
'Julian_year', 'N_A', 'Planck', 'R', 'Rydberg',
|
| 33 |
+
'Stefan_Boltzmann', 'Wien', 'acre', 'alpha',
|
| 34 |
+
'angstrom', 'arcmin', 'arcminute', 'arcsec',
|
| 35 |
+
'arcsecond', 'astronomical_unit', 'atm',
|
| 36 |
+
'atmosphere', 'atomic_mass', 'atto', 'au', 'bar',
|
| 37 |
+
'barrel', 'bbl', 'blob', 'c', 'calorie',
|
| 38 |
+
'calorie_IT', 'calorie_th', 'carat', 'centi',
|
| 39 |
+
'convert_temperature', 'day', 'deci', 'degree',
|
| 40 |
+
'degree_Fahrenheit', 'deka', 'dyn', 'dyne', 'e',
|
| 41 |
+
'eV', 'electron_mass', 'electron_volt',
|
| 42 |
+
'elementary_charge', 'epsilon_0', 'erg',
|
| 43 |
+
'exa', 'exbi', 'femto', 'fermi', 'fine_structure',
|
| 44 |
+
'fluid_ounce', 'fluid_ounce_US', 'fluid_ounce_imp',
|
| 45 |
+
'foot', 'g', 'gallon', 'gallon_US', 'gallon_imp',
|
| 46 |
+
'gas_constant', 'gibi', 'giga', 'golden', 'golden_ratio',
|
| 47 |
+
'grain', 'gram', 'gravitational_constant', 'h', 'hbar',
|
| 48 |
+
'hectare', 'hecto', 'horsepower', 'hour', 'hp',
|
| 49 |
+
'inch', 'k', 'kgf', 'kibi', 'kilo', 'kilogram_force',
|
| 50 |
+
'kmh', 'knot', 'lambda2nu', 'lb', 'lbf',
|
| 51 |
+
'light_year', 'liter', 'litre', 'long_ton', 'm_e',
|
| 52 |
+
'm_n', 'm_p', 'm_u', 'mach', 'mebi', 'mega',
|
| 53 |
+
'metric_ton', 'micro', 'micron', 'mil', 'mile',
|
| 54 |
+
'milli', 'minute', 'mmHg', 'mph', 'mu_0', 'nano',
|
| 55 |
+
'nautical_mile', 'neutron_mass', 'nu2lambda',
|
| 56 |
+
'ounce', 'oz', 'parsec', 'pebi', 'peta',
|
| 57 |
+
'pi', 'pico', 'point', 'pound', 'pound_force',
|
| 58 |
+
'proton_mass', 'psi', 'pt', 'quecto', 'quetta', 'ronna', 'ronto',
|
| 59 |
+
'short_ton', 'sigma', 'slinch', 'slug', 'speed_of_light',
|
| 60 |
+
'speed_of_sound', 'stone', 'survey_foot',
|
| 61 |
+
'survey_mile', 'tebi', 'tera', 'ton_TNT',
|
| 62 |
+
'torr', 'troy_ounce', 'troy_pound', 'u',
|
| 63 |
+
'week', 'yard', 'year', 'yobi', 'yocto',
|
| 64 |
+
'yotta', 'zebi', 'zepto', 'zero_Celsius', 'zetta'
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
# mathematical constants
|
| 69 |
+
pi = _math.pi
|
| 70 |
+
golden = golden_ratio = (1 + _math.sqrt(5)) / 2
|
| 71 |
+
|
| 72 |
+
# SI prefixes
|
| 73 |
+
quetta = 1e30
|
| 74 |
+
ronna = 1e27
|
| 75 |
+
yotta = 1e24
|
| 76 |
+
zetta = 1e21
|
| 77 |
+
exa = 1e18
|
| 78 |
+
peta = 1e15
|
| 79 |
+
tera = 1e12
|
| 80 |
+
giga = 1e9
|
| 81 |
+
mega = 1e6
|
| 82 |
+
kilo = 1e3
|
| 83 |
+
hecto = 1e2
|
| 84 |
+
deka = 1e1
|
| 85 |
+
deci = 1e-1
|
| 86 |
+
centi = 1e-2
|
| 87 |
+
milli = 1e-3
|
| 88 |
+
micro = 1e-6
|
| 89 |
+
nano = 1e-9
|
| 90 |
+
pico = 1e-12
|
| 91 |
+
femto = 1e-15
|
| 92 |
+
atto = 1e-18
|
| 93 |
+
zepto = 1e-21
|
| 94 |
+
yocto = 1e-24
|
| 95 |
+
ronto = 1e-27
|
| 96 |
+
quecto = 1e-30
|
| 97 |
+
|
| 98 |
+
# binary prefixes
|
| 99 |
+
kibi = 2**10
|
| 100 |
+
mebi = 2**20
|
| 101 |
+
gibi = 2**30
|
| 102 |
+
tebi = 2**40
|
| 103 |
+
pebi = 2**50
|
| 104 |
+
exbi = 2**60
|
| 105 |
+
zebi = 2**70
|
| 106 |
+
yobi = 2**80
|
| 107 |
+
|
| 108 |
+
# physical constants
|
| 109 |
+
c = speed_of_light = _cd('speed of light in vacuum')
|
| 110 |
+
mu_0 = _cd('vacuum mag. permeability')
|
| 111 |
+
epsilon_0 = _cd('vacuum electric permittivity')
|
| 112 |
+
h = Planck = _cd('Planck constant')
|
| 113 |
+
hbar = _cd('reduced Planck constant')
|
| 114 |
+
G = gravitational_constant = _cd('Newtonian constant of gravitation')
|
| 115 |
+
g = _cd('standard acceleration of gravity')
|
| 116 |
+
e = elementary_charge = _cd('elementary charge')
|
| 117 |
+
R = gas_constant = _cd('molar gas constant')
|
| 118 |
+
alpha = fine_structure = _cd('fine-structure constant')
|
| 119 |
+
N_A = Avogadro = _cd('Avogadro constant')
|
| 120 |
+
k = Boltzmann = _cd('Boltzmann constant')
|
| 121 |
+
sigma = Stefan_Boltzmann = _cd('Stefan-Boltzmann constant')
|
| 122 |
+
Wien = _cd('Wien wavelength displacement law constant')
|
| 123 |
+
Rydberg = _cd('Rydberg constant')
|
| 124 |
+
|
| 125 |
+
# mass in kg
|
| 126 |
+
gram = 1e-3
|
| 127 |
+
metric_ton = 1e3
|
| 128 |
+
grain = 64.79891e-6
|
| 129 |
+
lb = pound = 7000 * grain # avoirdupois
|
| 130 |
+
blob = slinch = pound * g / 0.0254 # lbf*s**2/in (added in 1.0.0)
|
| 131 |
+
slug = blob / 12 # lbf*s**2/foot (added in 1.0.0)
|
| 132 |
+
oz = ounce = pound / 16
|
| 133 |
+
stone = 14 * pound
|
| 134 |
+
long_ton = 2240 * pound
|
| 135 |
+
short_ton = 2000 * pound
|
| 136 |
+
|
| 137 |
+
troy_ounce = 480 * grain # only for metals / gems
|
| 138 |
+
troy_pound = 12 * troy_ounce
|
| 139 |
+
carat = 200e-6
|
| 140 |
+
|
| 141 |
+
m_e = electron_mass = _cd('electron mass')
|
| 142 |
+
m_p = proton_mass = _cd('proton mass')
|
| 143 |
+
m_n = neutron_mass = _cd('neutron mass')
|
| 144 |
+
m_u = u = atomic_mass = _cd('atomic mass constant')
|
| 145 |
+
|
| 146 |
+
# angle in rad
|
| 147 |
+
degree = pi / 180
|
| 148 |
+
arcmin = arcminute = degree / 60
|
| 149 |
+
arcsec = arcsecond = arcmin / 60
|
| 150 |
+
|
| 151 |
+
# time in second
|
| 152 |
+
minute = 60.0
|
| 153 |
+
hour = 60 * minute
|
| 154 |
+
day = 24 * hour
|
| 155 |
+
week = 7 * day
|
| 156 |
+
year = 365 * day
|
| 157 |
+
Julian_year = 365.25 * day
|
| 158 |
+
|
| 159 |
+
# length in meter
|
| 160 |
+
inch = 0.0254
|
| 161 |
+
foot = 12 * inch
|
| 162 |
+
yard = 3 * foot
|
| 163 |
+
mile = 1760 * yard
|
| 164 |
+
mil = inch / 1000
|
| 165 |
+
pt = point = inch / 72 # typography
|
| 166 |
+
survey_foot = 1200.0 / 3937
|
| 167 |
+
survey_mile = 5280 * survey_foot
|
| 168 |
+
nautical_mile = 1852.0
|
| 169 |
+
fermi = 1e-15
|
| 170 |
+
angstrom = 1e-10
|
| 171 |
+
micron = 1e-6
|
| 172 |
+
au = astronomical_unit = 149597870700.0
|
| 173 |
+
light_year = Julian_year * c
|
| 174 |
+
parsec = au / arcsec
|
| 175 |
+
|
| 176 |
+
# pressure in pascal
|
| 177 |
+
atm = atmosphere = _cd('standard atmosphere')
|
| 178 |
+
bar = 1e5
|
| 179 |
+
torr = mmHg = atm / 760
|
| 180 |
+
psi = pound * g / (inch * inch)
|
| 181 |
+
|
| 182 |
+
# area in meter**2
|
| 183 |
+
hectare = 1e4
|
| 184 |
+
acre = 43560 * foot**2
|
| 185 |
+
|
| 186 |
+
# volume in meter**3
|
| 187 |
+
litre = liter = 1e-3
|
| 188 |
+
gallon = gallon_US = 231 * inch**3 # US
|
| 189 |
+
# pint = gallon_US / 8
|
| 190 |
+
fluid_ounce = fluid_ounce_US = gallon_US / 128
|
| 191 |
+
bbl = barrel = 42 * gallon_US # for oil
|
| 192 |
+
|
| 193 |
+
gallon_imp = 4.54609e-3 # UK
|
| 194 |
+
fluid_ounce_imp = gallon_imp / 160
|
| 195 |
+
|
| 196 |
+
# speed in meter per second
|
| 197 |
+
kmh = 1e3 / hour
|
| 198 |
+
mph = mile / hour
|
| 199 |
+
# approx value of mach at 15 degrees in 1 atm. Is this a common value?
|
| 200 |
+
mach = speed_of_sound = 340.5
|
| 201 |
+
knot = nautical_mile / hour
|
| 202 |
+
|
| 203 |
+
# temperature in kelvin
|
| 204 |
+
zero_Celsius = 273.15
|
| 205 |
+
degree_Fahrenheit = 1/1.8 # only for differences
|
| 206 |
+
|
| 207 |
+
# energy in joule
|
| 208 |
+
eV = electron_volt = elementary_charge # * 1 Volt
|
| 209 |
+
calorie = calorie_th = 4.184
|
| 210 |
+
calorie_IT = 4.1868
|
| 211 |
+
erg = 1e-7
|
| 212 |
+
Btu_th = pound * degree_Fahrenheit * calorie_th / gram
|
| 213 |
+
Btu = Btu_IT = pound * degree_Fahrenheit * calorie_IT / gram
|
| 214 |
+
ton_TNT = 1e9 * calorie_th
|
| 215 |
+
# Wh = watt_hour
|
| 216 |
+
|
| 217 |
+
# power in watt
|
| 218 |
+
hp = horsepower = 550 * foot * pound * g
|
| 219 |
+
|
| 220 |
+
# force in newton
|
| 221 |
+
dyn = dyne = 1e-5
|
| 222 |
+
lbf = pound_force = pound * g
|
| 223 |
+
kgf = kilogram_force = g # * 1 kg
|
| 224 |
+
|
| 225 |
+
# functions for conversions that are not linear
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def convert_temperature(
|
| 229 |
+
val: "npt.ArrayLike",
|
| 230 |
+
old_scale: str,
|
| 231 |
+
new_scale: str,
|
| 232 |
+
) -> Any:
|
| 233 |
+
"""
|
| 234 |
+
Convert from a temperature scale to another one among Celsius, Kelvin,
|
| 235 |
+
Fahrenheit, and Rankine scales.
|
| 236 |
+
|
| 237 |
+
Parameters
|
| 238 |
+
----------
|
| 239 |
+
val : array_like
|
| 240 |
+
Value(s) of the temperature(s) to be converted expressed in the
|
| 241 |
+
original scale.
|
| 242 |
+
old_scale : str
|
| 243 |
+
Specifies as a string the original scale from which the temperature
|
| 244 |
+
value(s) will be converted. Supported scales are Celsius ('Celsius',
|
| 245 |
+
'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'),
|
| 246 |
+
Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f'), and Rankine
|
| 247 |
+
('Rankine', 'rankine', 'R', 'r').
|
| 248 |
+
new_scale : str
|
| 249 |
+
Specifies as a string the new scale to which the temperature
|
| 250 |
+
value(s) will be converted. Supported scales are Celsius ('Celsius',
|
| 251 |
+
'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'),
|
| 252 |
+
Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f'), and Rankine
|
| 253 |
+
('Rankine', 'rankine', 'R', 'r').
|
| 254 |
+
|
| 255 |
+
Returns
|
| 256 |
+
-------
|
| 257 |
+
res : float or array of floats
|
| 258 |
+
Value(s) of the converted temperature(s) expressed in the new scale.
|
| 259 |
+
|
| 260 |
+
Notes
|
| 261 |
+
-----
|
| 262 |
+
.. versionadded:: 0.18.0
|
| 263 |
+
|
| 264 |
+
Examples
|
| 265 |
+
--------
|
| 266 |
+
>>> from scipy.constants import convert_temperature
|
| 267 |
+
>>> import numpy as np
|
| 268 |
+
>>> convert_temperature(np.array([-40, 40]), 'Celsius', 'Kelvin')
|
| 269 |
+
array([ 233.15, 313.15])
|
| 270 |
+
|
| 271 |
+
"""
|
| 272 |
+
xp = array_namespace(val)
|
| 273 |
+
_val = _asarray(val, xp=xp, subok=True)
|
| 274 |
+
# Convert from `old_scale` to Kelvin
|
| 275 |
+
if old_scale.lower() in ['celsius', 'c']:
|
| 276 |
+
tempo = _val + zero_Celsius
|
| 277 |
+
elif old_scale.lower() in ['kelvin', 'k']:
|
| 278 |
+
tempo = _val
|
| 279 |
+
elif old_scale.lower() in ['fahrenheit', 'f']:
|
| 280 |
+
tempo = (_val - 32) * 5 / 9 + zero_Celsius
|
| 281 |
+
elif old_scale.lower() in ['rankine', 'r']:
|
| 282 |
+
tempo = _val * 5 / 9
|
| 283 |
+
else:
|
| 284 |
+
raise NotImplementedError(f"{old_scale=} is unsupported: supported scales "
|
| 285 |
+
"are Celsius, Kelvin, Fahrenheit, and "
|
| 286 |
+
"Rankine")
|
| 287 |
+
# and from Kelvin to `new_scale`.
|
| 288 |
+
if new_scale.lower() in ['celsius', 'c']:
|
| 289 |
+
res = tempo - zero_Celsius
|
| 290 |
+
elif new_scale.lower() in ['kelvin', 'k']:
|
| 291 |
+
res = tempo
|
| 292 |
+
elif new_scale.lower() in ['fahrenheit', 'f']:
|
| 293 |
+
res = (tempo - zero_Celsius) * 9 / 5 + 32
|
| 294 |
+
elif new_scale.lower() in ['rankine', 'r']:
|
| 295 |
+
res = tempo * 9 / 5
|
| 296 |
+
else:
|
| 297 |
+
raise NotImplementedError(f"{new_scale=} is unsupported: supported "
|
| 298 |
+
"scales are 'Celsius', 'Kelvin', "
|
| 299 |
+
"'Fahrenheit', and 'Rankine'")
|
| 300 |
+
|
| 301 |
+
return res
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
# optics
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def lambda2nu(lambda_: "npt.ArrayLike") -> Any:
|
| 308 |
+
"""
|
| 309 |
+
Convert wavelength to optical frequency
|
| 310 |
+
|
| 311 |
+
Parameters
|
| 312 |
+
----------
|
| 313 |
+
lambda_ : array_like
|
| 314 |
+
Wavelength(s) to be converted.
|
| 315 |
+
|
| 316 |
+
Returns
|
| 317 |
+
-------
|
| 318 |
+
nu : float or array of floats
|
| 319 |
+
Equivalent optical frequency.
|
| 320 |
+
|
| 321 |
+
Notes
|
| 322 |
+
-----
|
| 323 |
+
Computes ``nu = c / lambda`` where c = 299792458.0, i.e., the
|
| 324 |
+
(vacuum) speed of light in meters/second.
|
| 325 |
+
|
| 326 |
+
Examples
|
| 327 |
+
--------
|
| 328 |
+
>>> from scipy.constants import lambda2nu, speed_of_light
|
| 329 |
+
>>> import numpy as np
|
| 330 |
+
>>> lambda2nu(np.array((1, speed_of_light)))
|
| 331 |
+
array([ 2.99792458e+08, 1.00000000e+00])
|
| 332 |
+
|
| 333 |
+
"""
|
| 334 |
+
xp = array_namespace(lambda_)
|
| 335 |
+
return c / _asarray(lambda_, xp=xp, subok=True)
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
def nu2lambda(nu: "npt.ArrayLike") -> Any:
|
| 339 |
+
"""
|
| 340 |
+
Convert optical frequency to wavelength.
|
| 341 |
+
|
| 342 |
+
Parameters
|
| 343 |
+
----------
|
| 344 |
+
nu : array_like
|
| 345 |
+
Optical frequency to be converted.
|
| 346 |
+
|
| 347 |
+
Returns
|
| 348 |
+
-------
|
| 349 |
+
lambda : float or array of floats
|
| 350 |
+
Equivalent wavelength(s).
|
| 351 |
+
|
| 352 |
+
Notes
|
| 353 |
+
-----
|
| 354 |
+
Computes ``lambda = c / nu`` where c = 299792458.0, i.e., the
|
| 355 |
+
(vacuum) speed of light in meters/second.
|
| 356 |
+
|
| 357 |
+
Examples
|
| 358 |
+
--------
|
| 359 |
+
>>> from scipy.constants import nu2lambda, speed_of_light
|
| 360 |
+
>>> import numpy as np
|
| 361 |
+
>>> nu2lambda(np.array((1, speed_of_light)))
|
| 362 |
+
array([ 2.99792458e+08, 1.00000000e+00])
|
| 363 |
+
|
| 364 |
+
"""
|
| 365 |
+
xp = array_namespace(nu)
|
| 366 |
+
return c / _asarray(nu, xp=xp, subok=True)
|
infer_4_37_2/lib/python3.10/site-packages/scipy/constants/codata.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.constants` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
__all__ = [ # noqa: F822
|
| 8 |
+
'physical_constants', 'value', 'unit', 'precision', 'find',
|
| 9 |
+
'ConstantWarning', 'k', 'c',
|
| 10 |
+
|
| 11 |
+
]
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def __dir__():
|
| 15 |
+
return __all__
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def __getattr__(name):
|
| 19 |
+
return _sub_module_deprecation(sub_package="constants", module="codata",
|
| 20 |
+
private_modules=["_codata"], all=__all__,
|
| 21 |
+
attribute=name)
|
infer_4_37_2/lib/python3.10/site-packages/scipy/constants/constants.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.constants` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = [ # noqa: F822
|
| 9 |
+
'Avogadro', 'Boltzmann', 'Btu', 'Btu_IT', 'Btu_th', 'G',
|
| 10 |
+
'Julian_year', 'N_A', 'Planck', 'R', 'Rydberg',
|
| 11 |
+
'Stefan_Boltzmann', 'Wien', 'acre', 'alpha',
|
| 12 |
+
'angstrom', 'arcmin', 'arcminute', 'arcsec',
|
| 13 |
+
'arcsecond', 'astronomical_unit', 'atm',
|
| 14 |
+
'atmosphere', 'atomic_mass', 'atto', 'au', 'bar',
|
| 15 |
+
'barrel', 'bbl', 'blob', 'c', 'calorie',
|
| 16 |
+
'calorie_IT', 'calorie_th', 'carat', 'centi',
|
| 17 |
+
'convert_temperature', 'day', 'deci', 'degree',
|
| 18 |
+
'degree_Fahrenheit', 'deka', 'dyn', 'dyne', 'e',
|
| 19 |
+
'eV', 'electron_mass', 'electron_volt',
|
| 20 |
+
'elementary_charge', 'epsilon_0', 'erg',
|
| 21 |
+
'exa', 'exbi', 'femto', 'fermi', 'fine_structure',
|
| 22 |
+
'fluid_ounce', 'fluid_ounce_US', 'fluid_ounce_imp',
|
| 23 |
+
'foot', 'g', 'gallon', 'gallon_US', 'gallon_imp',
|
| 24 |
+
'gas_constant', 'gibi', 'giga', 'golden', 'golden_ratio',
|
| 25 |
+
'grain', 'gram', 'gravitational_constant', 'h', 'hbar',
|
| 26 |
+
'hectare', 'hecto', 'horsepower', 'hour', 'hp',
|
| 27 |
+
'inch', 'k', 'kgf', 'kibi', 'kilo', 'kilogram_force',
|
| 28 |
+
'kmh', 'knot', 'lambda2nu', 'lb', 'lbf',
|
| 29 |
+
'light_year', 'liter', 'litre', 'long_ton', 'm_e',
|
| 30 |
+
'm_n', 'm_p', 'm_u', 'mach', 'mebi', 'mega',
|
| 31 |
+
'metric_ton', 'micro', 'micron', 'mil', 'mile',
|
| 32 |
+
'milli', 'minute', 'mmHg', 'mph', 'mu_0', 'nano',
|
| 33 |
+
'nautical_mile', 'neutron_mass', 'nu2lambda',
|
| 34 |
+
'ounce', 'oz', 'parsec', 'pebi', 'peta',
|
| 35 |
+
'pi', 'pico', 'point', 'pound', 'pound_force',
|
| 36 |
+
'proton_mass', 'psi', 'pt', 'short_ton',
|
| 37 |
+
'sigma', 'slinch', 'slug', 'speed_of_light',
|
| 38 |
+
'speed_of_sound', 'stone', 'survey_foot',
|
| 39 |
+
'survey_mile', 'tebi', 'tera', 'ton_TNT',
|
| 40 |
+
'torr', 'troy_ounce', 'troy_pound', 'u',
|
| 41 |
+
'week', 'yard', 'year', 'yobi', 'yocto',
|
| 42 |
+
'yotta', 'zebi', 'zepto', 'zero_Celsius', 'zetta'
|
| 43 |
+
]
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def __dir__():
|
| 47 |
+
return __all__
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def __getattr__(name):
|
| 51 |
+
return _sub_module_deprecation(sub_package="constants", module="constants",
|
| 52 |
+
private_modules=["_constants"], all=__all__,
|
| 53 |
+
attribute=name)
|
infer_4_37_2/lib/python3.10/site-packages/scipy/constants/tests/__init__.py
ADDED
|
File without changes
|
infer_4_37_2/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (179 Bytes). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_codata.cpython-310.pyc
ADDED
|
Binary file (2.96 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_constants.cpython-310.pyc
ADDED
|
Binary file (3.72 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/constants/tests/test_codata.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from scipy.constants import find, value, c, speed_of_light, precision
|
| 2 |
+
from numpy.testing import assert_equal, assert_, assert_almost_equal
|
| 3 |
+
import scipy.constants._codata as _cd
|
| 4 |
+
from scipy import constants
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def test_find():
|
| 8 |
+
keys = find('weak mixing', disp=False)
|
| 9 |
+
assert_equal(keys, ['weak mixing angle'])
|
| 10 |
+
|
| 11 |
+
keys = find('qwertyuiop', disp=False)
|
| 12 |
+
assert_equal(keys, [])
|
| 13 |
+
|
| 14 |
+
keys = find('natural unit', disp=False)
|
| 15 |
+
assert_equal(keys, sorted(['natural unit of velocity',
|
| 16 |
+
'natural unit of action',
|
| 17 |
+
'natural unit of action in eV s',
|
| 18 |
+
'natural unit of mass',
|
| 19 |
+
'natural unit of energy',
|
| 20 |
+
'natural unit of energy in MeV',
|
| 21 |
+
'natural unit of momentum',
|
| 22 |
+
'natural unit of momentum in MeV/c',
|
| 23 |
+
'natural unit of length',
|
| 24 |
+
'natural unit of time']))
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def test_basic_table_parse():
|
| 28 |
+
c_s = 'speed of light in vacuum'
|
| 29 |
+
assert_equal(value(c_s), c)
|
| 30 |
+
assert_equal(value(c_s), speed_of_light)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def test_basic_lookup():
|
| 34 |
+
assert_equal('%d %s' % (_cd.value('speed of light in vacuum'),
|
| 35 |
+
_cd.unit('speed of light in vacuum')),
|
| 36 |
+
'299792458 m s^-1')
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def test_find_all():
|
| 40 |
+
assert_(len(find(disp=False)) > 300)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def test_find_single():
|
| 44 |
+
assert_equal(find('Wien freq', disp=False)[0],
|
| 45 |
+
'Wien frequency displacement law constant')
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def test_2002_vs_2006():
|
| 49 |
+
assert_almost_equal(value('magn. flux quantum'),
|
| 50 |
+
value('mag. flux quantum'))
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def test_exact_values():
|
| 54 |
+
# Check that updating stored values with exact ones worked.
|
| 55 |
+
exact = dict((k, v[0]) for k, v in _cd._physical_constants_2018.items())
|
| 56 |
+
replace = _cd.exact2018(exact)
|
| 57 |
+
for key, val in replace.items():
|
| 58 |
+
assert_equal(val, value(key))
|
| 59 |
+
assert precision(key) == 0
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def test_gh11341():
|
| 63 |
+
# gh-11341 noted that these three constants should exist (for backward
|
| 64 |
+
# compatibility) and should always have the same value:
|
| 65 |
+
a = constants.epsilon_0
|
| 66 |
+
b = constants.physical_constants['electric constant'][0]
|
| 67 |
+
c = constants.physical_constants['vacuum electric permittivity'][0]
|
| 68 |
+
assert a == b == c
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def test_gh14467():
|
| 72 |
+
# gh-14467 noted that some physical constants in CODATA are rounded
|
| 73 |
+
# to only ten significant figures even though they are supposed to be
|
| 74 |
+
# exact. Check that (at least) the case mentioned in the issue is resolved.
|
| 75 |
+
res = constants.physical_constants['Boltzmann constant in eV/K'][0]
|
| 76 |
+
ref = (constants.physical_constants['Boltzmann constant'][0]
|
| 77 |
+
/ constants.physical_constants['elementary charge'][0])
|
| 78 |
+
assert res == ref
|
infer_4_37_2/lib/python3.10/site-packages/scipy/constants/tests/test_constants.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
import scipy.constants as sc
|
| 4 |
+
from scipy.conftest import array_api_compatible
|
| 5 |
+
from scipy._lib._array_api_no_0d import xp_assert_equal, xp_assert_close
|
| 6 |
+
from numpy.testing import assert_allclose
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_xp_backends")]
|
| 10 |
+
skip_xp_backends = pytest.mark.skip_xp_backends
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class TestConvertTemperature:
|
| 14 |
+
def test_convert_temperature(self, xp):
|
| 15 |
+
xp_assert_equal(sc.convert_temperature(xp.asarray(32.), 'f', 'Celsius'),
|
| 16 |
+
xp.asarray(0.0))
|
| 17 |
+
xp_assert_equal(sc.convert_temperature(xp.asarray([0., 0.]),
|
| 18 |
+
'celsius', 'Kelvin'),
|
| 19 |
+
xp.asarray([273.15, 273.15]))
|
| 20 |
+
xp_assert_equal(sc.convert_temperature(xp.asarray([0., 0.]), 'kelvin', 'c'),
|
| 21 |
+
xp.asarray([-273.15, -273.15]))
|
| 22 |
+
xp_assert_equal(sc.convert_temperature(xp.asarray([32., 32.]), 'f', 'k'),
|
| 23 |
+
xp.asarray([273.15, 273.15]))
|
| 24 |
+
xp_assert_equal(sc.convert_temperature(xp.asarray([273.15, 273.15]),
|
| 25 |
+
'kelvin', 'F'),
|
| 26 |
+
xp.asarray([32., 32.]))
|
| 27 |
+
xp_assert_equal(sc.convert_temperature(xp.asarray([0., 0.]), 'C', 'fahrenheit'),
|
| 28 |
+
xp.asarray([32., 32.]))
|
| 29 |
+
xp_assert_close(sc.convert_temperature(xp.asarray([0., 0.], dtype=xp.float64),
|
| 30 |
+
'c', 'r'),
|
| 31 |
+
xp.asarray([491.67, 491.67], dtype=xp.float64),
|
| 32 |
+
rtol=0., atol=1e-13)
|
| 33 |
+
xp_assert_close(sc.convert_temperature(xp.asarray([491.67, 491.67],
|
| 34 |
+
dtype=xp.float64),
|
| 35 |
+
'Rankine', 'C'),
|
| 36 |
+
xp.asarray([0., 0.], dtype=xp.float64), rtol=0., atol=1e-13)
|
| 37 |
+
xp_assert_close(sc.convert_temperature(xp.asarray([491.67, 491.67],
|
| 38 |
+
dtype=xp.float64),
|
| 39 |
+
'r', 'F'),
|
| 40 |
+
xp.asarray([32., 32.], dtype=xp.float64), rtol=0., atol=1e-13)
|
| 41 |
+
xp_assert_close(sc.convert_temperature(xp.asarray([32., 32.], dtype=xp.float64),
|
| 42 |
+
'fahrenheit', 'R'),
|
| 43 |
+
xp.asarray([491.67, 491.67], dtype=xp.float64),
|
| 44 |
+
rtol=0., atol=1e-13)
|
| 45 |
+
xp_assert_close(sc.convert_temperature(xp.asarray([273.15, 273.15],
|
| 46 |
+
dtype=xp.float64),
|
| 47 |
+
'K', 'R'),
|
| 48 |
+
xp.asarray([491.67, 491.67], dtype=xp.float64),
|
| 49 |
+
rtol=0., atol=1e-13)
|
| 50 |
+
xp_assert_close(sc.convert_temperature(xp.asarray([491.67, 0.],
|
| 51 |
+
dtype=xp.float64),
|
| 52 |
+
'rankine', 'kelvin'),
|
| 53 |
+
xp.asarray([273.15, 0.], dtype=xp.float64), rtol=0., atol=1e-13)
|
| 54 |
+
|
| 55 |
+
@skip_xp_backends(np_only=True, reason='Python list input uses NumPy backend')
|
| 56 |
+
def test_convert_temperature_array_like(self):
|
| 57 |
+
assert_allclose(sc.convert_temperature([491.67, 0.], 'rankine', 'kelvin'),
|
| 58 |
+
[273.15, 0.], rtol=0., atol=1e-13)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
@skip_xp_backends(np_only=True, reason='Python int input uses NumPy backend')
|
| 62 |
+
def test_convert_temperature_errors(self, xp):
|
| 63 |
+
with pytest.raises(NotImplementedError, match="old_scale="):
|
| 64 |
+
sc.convert_temperature(1, old_scale="cheddar", new_scale="kelvin")
|
| 65 |
+
with pytest.raises(NotImplementedError, match="new_scale="):
|
| 66 |
+
sc.convert_temperature(1, old_scale="kelvin", new_scale="brie")
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class TestLambdaToNu:
|
| 70 |
+
def test_lambda_to_nu(self, xp):
|
| 71 |
+
xp_assert_equal(sc.lambda2nu(xp.asarray([sc.speed_of_light, 1])),
|
| 72 |
+
xp.asarray([1, sc.speed_of_light]))
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
@skip_xp_backends(np_only=True, reason='Python list input uses NumPy backend')
|
| 76 |
+
def test_lambda_to_nu_array_like(self, xp):
|
| 77 |
+
assert_allclose(sc.lambda2nu([sc.speed_of_light, 1]),
|
| 78 |
+
[1, sc.speed_of_light])
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class TestNuToLambda:
|
| 82 |
+
def test_nu_to_lambda(self, xp):
|
| 83 |
+
xp_assert_equal(sc.nu2lambda(xp.asarray([sc.speed_of_light, 1])),
|
| 84 |
+
xp.asarray([1, sc.speed_of_light]))
|
| 85 |
+
|
| 86 |
+
@skip_xp_backends(np_only=True, reason='Python list input uses NumPy backend')
|
| 87 |
+
def test_nu_to_lambda_array_like(self, xp):
|
| 88 |
+
assert_allclose(sc.nu2lambda([sc.speed_of_light, 1]),
|
| 89 |
+
[1, sc.speed_of_light])
|
| 90 |
+
|
infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (3.16 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_basic.cpython-310.pyc
ADDED
|
Binary file (13.2 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_helper.cpython-310.pyc
ADDED
|
Binary file (3.58 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_pseudo_diffs.cpython-310.pyc
ADDED
|
Binary file (13 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_realtransforms.cpython-310.pyc
ADDED
|
Binary file (19.1 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/__pycache__/basic.cpython-310.pyc
ADDED
|
Binary file (631 Bytes). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/__pycache__/helper.cpython-310.pyc
ADDED
|
Binary file (639 Bytes). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/__pycache__/pseudo_diffs.cpython-310.pyc
ADDED
|
Binary file (699 Bytes). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/__pycache__/realtransforms.cpython-310.pyc
ADDED
|
Binary file (656 Bytes). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6f5e5a7834f8e50a5339bfef2eb38a5cef5eebc72f521c88e4370cebfcb6acb3
|
| 3 |
+
size 272968
|
infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (177 Bytes). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_basic.cpython-310.pyc
ADDED
|
Binary file (28.2 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_helper.cpython-310.pyc
ADDED
|
Binary file (2.22 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc
ADDED
|
Binary file (1.6 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_pseudo_diffs.cpython-310.pyc
ADDED
|
Binary file (13.3 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_real_transforms.cpython-310.pyc
ADDED
|
Binary file (26.6 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/scipy/fftpack/tests/test_import.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Test possibility of patching fftpack with pyfftw.
|
| 2 |
+
|
| 3 |
+
No module source outside of scipy.fftpack should contain an import of
|
| 4 |
+
the form `from scipy.fftpack import ...`, so that a simple replacement
|
| 5 |
+
of scipy.fftpack by the corresponding fftw interface completely swaps
|
| 6 |
+
the two FFT implementations.
|
| 7 |
+
|
| 8 |
+
Because this simply inspects source files, we only need to run the test
|
| 9 |
+
on one version of Python.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
import re
|
| 15 |
+
import tokenize
|
| 16 |
+
import pytest
|
| 17 |
+
from numpy.testing import assert_
|
| 18 |
+
import scipy
|
| 19 |
+
|
| 20 |
+
class TestFFTPackImport:
|
| 21 |
+
@pytest.mark.slow
|
| 22 |
+
def test_fftpack_import(self):
|
| 23 |
+
base = Path(scipy.__file__).parent
|
| 24 |
+
regexp = r"\s*from.+\.fftpack import .*\n"
|
| 25 |
+
for path in base.rglob("*.py"):
|
| 26 |
+
if base / "fftpack" in path.parents:
|
| 27 |
+
continue
|
| 28 |
+
# use tokenize to auto-detect encoding on systems where no
|
| 29 |
+
# default encoding is defined (e.g., LANG='C')
|
| 30 |
+
with tokenize.open(str(path)) as file:
|
| 31 |
+
assert_(all(not re.fullmatch(regexp, line)
|
| 32 |
+
for line in file),
|
| 33 |
+
f"{path} contains an import from fftpack")
|
janus/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/_utils.cpython-310.pyc
ADDED
|
Binary file (1.05 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/common_op_utils.cpython-310.pyc
ADDED
|
Binary file (2.28 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/op_registry_utils.cpython-310.pyc
ADDED
|
Binary file (1.19 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/sharder.cpython-310.pyc
ADDED
|
Binary file (1.33 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/reshard.cpython-310.pyc
ADDED
|
Binary file (8.13 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (5.69 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .api import ShardingPlan, ShardingPlanner
|
janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (255 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/api.cpython-310.pyc
ADDED
|
Binary file (4.32 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/api.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import abc
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
+
from typing import Dict, List, Optional, Union
|
| 4 |
+
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
from torch.distributed._shard.sharder import Sharder
|
| 7 |
+
from torch.distributed._shard.sharding_spec import ShardingSpec
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@dataclass
|
| 11 |
+
class ShardingPlan:
|
| 12 |
+
"""
|
| 13 |
+
Representation of a sharding plan, describes how to shard a module
|
| 14 |
+
across hosts. `plan` is used to shard module parameters according to the spec provided,
|
| 15 |
+
`output_plan` and `return_local_tensor` are optional, they are used to specify the output
|
| 16 |
+
layout of a module with a spec, and when to convert back to data parallel fashion.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
plan (Dict[str, Union[:class:`torch.distributed._shard.sharding_spec.ShardingSpec`,
|
| 20 |
+
:class:`torch.distributed._shard.sharder.Sharder`]):
|
| 21 |
+
a dict describes how to shard a module, there're currently two ways to shard a module:
|
| 22 |
+
1. directly shard a module parameter by a `ShardingSpec`, keyed by the name of
|
| 23 |
+
a parameter to a `ShardingSpec`.
|
| 24 |
+
2. shard a submodule by applying a `Sharder` on it, keyed by the name of a module
|
| 25 |
+
to a `Sharder` object.
|
| 26 |
+
output_plan (Dict[str, :class:`torch.distributed._shard.sharding_spec.ShardingSpec`), optional):
|
| 27 |
+
a dict specifies the layout of a module's output which produces a ShardedTensor,
|
| 28 |
+
keyed by the name of module to ShardingSpec("" in key means the root module).
|
| 29 |
+
Default: `None`
|
| 30 |
+
return_local_tensor (List[str], optional): a list of string, each element enables
|
| 31 |
+
a module's sharded output to be returned as a Tensor from its local shards to
|
| 32 |
+
ensure further processing in a data parallel fashion. ("" in list means the
|
| 33 |
+
root module).
|
| 34 |
+
Default: None
|
| 35 |
+
Example:
|
| 36 |
+
Suppose we want to shard a module with two linear layers and then run it with DDP, we also
|
| 37 |
+
want to convert the output of the second linear layer back to DDP, we can do it as follows:
|
| 38 |
+
|
| 39 |
+
>>> # xdoctest: +REQUIRES(module:torch._C._distributed_c10d)
|
| 40 |
+
>>> class MyModule(nn.Module):
|
| 41 |
+
>>> def __init__(self) -> None:
|
| 42 |
+
>>> super().__init__()
|
| 43 |
+
>>> self.fc1 = nn.Linear()
|
| 44 |
+
>>> self.gelu = nn.GELU()
|
| 45 |
+
>>> self.fc2 = nn.Linear()
|
| 46 |
+
>>> self.relu = nn.Linear()
|
| 47 |
+
>>>
|
| 48 |
+
>>> def forward(self, input):
|
| 49 |
+
>>> return self.relu(self.fc2(self.gelu(self.fc1(input))))
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
>>> # xdoctest: +SKIP("Undefined spec1, spec2)
|
| 53 |
+
>>> sharding_plan = ShardingPlan(
|
| 54 |
+
>>> plan={
|
| 55 |
+
>>> "fc1.weight": spec1,
|
| 56 |
+
>>> "fc2.weight": spec2
|
| 57 |
+
>>> },
|
| 58 |
+
>>> output_plan={
|
| 59 |
+
>>> "fc2": output_spec
|
| 60 |
+
>>> },
|
| 61 |
+
>>> return_local_tensor=["fc2"]
|
| 62 |
+
>>> )
|
| 63 |
+
"""
|
| 64 |
+
|
| 65 |
+
plan: Dict[str, Union[ShardingSpec, Sharder]]
|
| 66 |
+
output_plan: Optional[Dict[str, ShardingSpec]] = None
|
| 67 |
+
return_local_tensor: Optional[List[str]] = None
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class ShardingPlanner(abc.ABC):
|
| 71 |
+
"""
|
| 72 |
+
Default ShardingPlanner interface, can be extended and
|
| 73 |
+
implement advanced sharding strategies.
|
| 74 |
+
"""
|
| 75 |
+
|
| 76 |
+
@abc.abstractmethod
|
| 77 |
+
def build_plan(self, module: nn.Module) -> ShardingPlan:
|
| 78 |
+
"""
|
| 79 |
+
Given a nn.Module, define how to shard the module across
|
| 80 |
+
ranks, return a ShardingPlan
|
| 81 |
+
Args:
|
| 82 |
+
module (:class:`torch.nn.Module`):
|
| 83 |
+
The module to apply sharding to.
|
| 84 |
+
Returns:
|
| 85 |
+
A :class:`torch.distributed._shard.sharding_plan.ShardingPlan` object that
|
| 86 |
+
represents how to shard the module.
|
| 87 |
+
"""
|
janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__init__.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.distributed._shard.metadata import ShardMetadata
|
| 2 |
+
|
| 3 |
+
from .api import (
|
| 4 |
+
_infer_sharding_spec_from_shards_metadata,
|
| 5 |
+
DevicePlacementSpec,
|
| 6 |
+
EnumerableShardingSpec,
|
| 7 |
+
PlacementSpec,
|
| 8 |
+
ShardingSpec,
|
| 9 |
+
)
|
| 10 |
+
from .chunk_sharding_spec import ChunkShardingSpec as ChunkShardingSpec
|
janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (505 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/chunk_sharding_spec.cpython-310.pyc
ADDED
|
Binary file (5.91 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/api.py
ADDED
|
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import functools
|
| 3 |
+
import operator
|
| 4 |
+
from abc import ABC, abstractmethod
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
from typing import Callable, Dict, List, TYPE_CHECKING
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.distributed._shard.sharded_tensor.metadata as sharded_tensor_meta
|
| 10 |
+
from torch.distributed._shard.metadata import ShardMetadata
|
| 11 |
+
from torch.distributed._shard.op_registry_utils import _decorator_func
|
| 12 |
+
|
| 13 |
+
from ._internals import (
|
| 14 |
+
check_tensor,
|
| 15 |
+
get_chunked_dim_size,
|
| 16 |
+
get_split_size,
|
| 17 |
+
validate_non_overlapping_shards_metadata,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
if TYPE_CHECKING:
|
| 22 |
+
# Only include ShardedTensor when do type checking, exclude it
|
| 23 |
+
# from run-time to resolve circular dependency.
|
| 24 |
+
from torch.distributed._shard.sharded_tensor import ShardedTensor
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class PlacementSpec(ABC): # noqa: B024
|
| 28 |
+
"""
|
| 29 |
+
Base class representing the placement of an entity. Subclasses of this
|
| 30 |
+
class can be used to specify customized placements which might not be
|
| 31 |
+
covered by existing APIs.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@dataclass
|
| 36 |
+
class DevicePlacementSpec(PlacementSpec):
|
| 37 |
+
"""
|
| 38 |
+
Associates placement of an entity with a single device.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
device(:class:`torch.distributed._remote_device`): The device to place the entity on.
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
device: torch.distributed._remote_device
|
| 45 |
+
|
| 46 |
+
def __post_init__(self):
|
| 47 |
+
if not isinstance(self.device, torch.distributed._remote_device):
|
| 48 |
+
self.device = torch.distributed._remote_device(self.device)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class ShardingSpec(ABC):
|
| 52 |
+
"""
|
| 53 |
+
Base class representing sharding specifications.
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
@abstractmethod
|
| 57 |
+
def build_metadata(
|
| 58 |
+
self,
|
| 59 |
+
tensor_sizes: torch.Size,
|
| 60 |
+
tensor_properties: sharded_tensor_meta.TensorProperties,
|
| 61 |
+
) -> sharded_tensor_meta.ShardedTensorMetadata:
|
| 62 |
+
"""
|
| 63 |
+
Given a global tensor size, define how to shard a tensor like this shape
|
| 64 |
+
across ranks, return ShardedTensorMetadata
|
| 65 |
+
Args:
|
| 66 |
+
tensor_sizes (:class:`torch.Size`):
|
| 67 |
+
The tensor shape to shard on, a `torch.Size` object that represents the
|
| 68 |
+
tensor shape to be sharded according to the ShardingSpec.
|
| 69 |
+
tensor_properties(:class:`torch.distributed._shard.sharded_tensor.TensorProperties):
|
| 70 |
+
Tensor properties used to create a ShardedTensor.
|
| 71 |
+
Returns:
|
| 72 |
+
A :class:`ShardedTensorMetadata` object that encodes the information about
|
| 73 |
+
the layout of the ShardedTensor and its properties.
|
| 74 |
+
"""
|
| 75 |
+
|
| 76 |
+
@abstractmethod
|
| 77 |
+
def shard(
|
| 78 |
+
self, tensor: torch.Tensor, src_rank: int = 0, process_group=None
|
| 79 |
+
) -> "ShardedTensor":
|
| 80 |
+
"""
|
| 81 |
+
Given a global tensor on src_rank, shard this tensor
|
| 82 |
+
across ranks within the process group, return a ShardedTensor.
|
| 83 |
+
Args:
|
| 84 |
+
tensor (:class:`torch.Tensor`): Tensor needs to be sharded.
|
| 85 |
+
Keyword args:
|
| 86 |
+
src_rank (int, optional): The source rank which is used as the ground truth of
|
| 87 |
+
the data for the parameter that would be sharded and scattered
|
| 88 |
+
across the rest of the ranks.
|
| 89 |
+
Default: 0.
|
| 90 |
+
process_group (ProcessGroup, optional): The process group to work on. If None,
|
| 91 |
+
the default process group will be used.
|
| 92 |
+
Returns:
|
| 93 |
+
A :class:`ShardedTensor` sharded from the given tensor.
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
# Ops customized for a particular ShardingSpec.
|
| 98 |
+
_CUSTOM_SHARDING_SPEC_OPS: Dict[str, Dict[Callable, Callable]] = {}
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def _has_custom_op(sharding_spec, op):
|
| 102 |
+
"""
|
| 103 |
+
Returns whether or not the ShardingSpec has a custom op implementation.
|
| 104 |
+
"""
|
| 105 |
+
class_name = type(sharding_spec).__qualname__
|
| 106 |
+
return (
|
| 107 |
+
class_name in _CUSTOM_SHARDING_SPEC_OPS
|
| 108 |
+
and op in _CUSTOM_SHARDING_SPEC_OPS[class_name]
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def _dispatch_custom_op(
|
| 113 |
+
sharding_spec, op: Callable, types, args, kwargs, process_group
|
| 114 |
+
):
|
| 115 |
+
"""
|
| 116 |
+
Calls the custom op for this ShardingSpec if it exists.
|
| 117 |
+
"""
|
| 118 |
+
class_name = type(sharding_spec).__qualname__
|
| 119 |
+
if not _has_custom_op(sharding_spec, op):
|
| 120 |
+
raise RuntimeError(f"Custom op: {op} not registered for {class_name}")
|
| 121 |
+
func = _CUSTOM_SHARDING_SPEC_OPS[class_name][op]
|
| 122 |
+
return func(types, args, kwargs, process_group)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def custom_sharding_spec_op(sharding_spec_class, func):
|
| 126 |
+
"""
|
| 127 |
+
Decorator to allow custom registration of ops.
|
| 128 |
+
Args:
|
| 129 |
+
sharding_spec_class(type): The ShardingSpec for which we need to add this custom op.
|
| 130 |
+
func(Callable): The op to override (ex: torch.bmm)
|
| 131 |
+
"""
|
| 132 |
+
class_name = sharding_spec_class.__qualname__
|
| 133 |
+
if class_name not in _CUSTOM_SHARDING_SPEC_OPS:
|
| 134 |
+
_CUSTOM_SHARDING_SPEC_OPS[class_name] = {}
|
| 135 |
+
return functools.partial(
|
| 136 |
+
_decorator_func, op=func, op_table=_CUSTOM_SHARDING_SPEC_OPS[class_name]
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
@dataclass
|
| 141 |
+
class EnumerableShardingSpec(ShardingSpec):
|
| 142 |
+
"""
|
| 143 |
+
This is a type of PlacementSpec that allows users to specify a generic
|
| 144 |
+
sharding scheme by enumerating exactly how each shard is laid out.
|
| 145 |
+
|
| 146 |
+
Args:
|
| 147 |
+
shards(List[ShardMetadata]): List of :class:`ShardMetadata` objects representing
|
| 148 |
+
each shard. Note that none of the shards should overlap.
|
| 149 |
+
"""
|
| 150 |
+
|
| 151 |
+
shards: List[ShardMetadata]
|
| 152 |
+
|
| 153 |
+
def __post_init__(self):
|
| 154 |
+
if len(self.shards) == 0:
|
| 155 |
+
raise ValueError(f"Empty shard list provided: {self.shards}")
|
| 156 |
+
|
| 157 |
+
# Validate each shard has same rank.
|
| 158 |
+
rank = -1
|
| 159 |
+
for shard in self.shards:
|
| 160 |
+
if rank != -1 and rank != len(shard.shard_offsets):
|
| 161 |
+
raise ValueError(
|
| 162 |
+
f"Found inconsistent ranks for shards: {rank} and {len(shard.shard_offsets)}"
|
| 163 |
+
)
|
| 164 |
+
rank = len(shard.shard_offsets)
|
| 165 |
+
|
| 166 |
+
validate_non_overlapping_shards_metadata(self.shards)
|
| 167 |
+
|
| 168 |
+
def build_metadata(
|
| 169 |
+
self,
|
| 170 |
+
tensor_sizes: torch.Size,
|
| 171 |
+
tensor_properties: sharded_tensor_meta.TensorProperties,
|
| 172 |
+
) -> sharded_tensor_meta.ShardedTensorMetadata:
|
| 173 |
+
# check if shards form a valid tensor
|
| 174 |
+
check_tensor(self.shards, tensor_sizes)
|
| 175 |
+
return sharded_tensor_meta.ShardedTensorMetadata(
|
| 176 |
+
self.shards, tensor_sizes, tensor_properties
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
def shard(
|
| 180 |
+
self, tensor: torch.Tensor, src_rank: int = 0, process_group=None
|
| 181 |
+
) -> "ShardedTensor":
|
| 182 |
+
# TODO: figure out a generic and efficient way to scatter the shards for EnumerableShardingSpec
|
| 183 |
+
raise NotImplementedError("EnumerableShardingSpec.shard not implemented yet!")
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def _infer_sharding_spec_from_shards_metadata(shards_metadata):
|
| 187 |
+
"""
|
| 188 |
+
Infer the sharding spec from the metadata of each shard of a ShardedTensor.
|
| 189 |
+
If the tensor is sharded only on one dimension, we can then verify whether it's
|
| 190 |
+
a ChunkShardingSpec or not. The way to verify it is to first get the total length
|
| 191 |
+
and perform a chunk sharding with the given placements to see if we can have the
|
| 192 |
+
same chunk size as the given shards_metadata. If not, we assume it's enum sharded.
|
| 193 |
+
|
| 194 |
+
Args:
|
| 195 |
+
shards_metadata (List[ShardMetadata]): List of Metadata of local shards.
|
| 196 |
+
|
| 197 |
+
Returns:
|
| 198 |
+
A :class:`torch.distributed._shard.sharding_spec.ShardingSpec` object of sharding
|
| 199 |
+
spec for one sharded tensor.
|
| 200 |
+
"""
|
| 201 |
+
placements = []
|
| 202 |
+
chunk_sharding_dim = None
|
| 203 |
+
chunk_offset_list = []
|
| 204 |
+
shard_size_list = []
|
| 205 |
+
shard_offset_list = []
|
| 206 |
+
# collect local shard metadatas from the global sharded_tensor_metadata
|
| 207 |
+
for shard_metadata in shards_metadata: # type: ignore[attr-defined]
|
| 208 |
+
placements.append(shard_metadata.placement)
|
| 209 |
+
local_offsets = shard_metadata.shard_offsets
|
| 210 |
+
chunk_offset_list.append(sum(local_offsets))
|
| 211 |
+
shard_size_list.append(shard_metadata.shard_sizes)
|
| 212 |
+
shard_offset_list.append(shard_metadata.shard_offsets)
|
| 213 |
+
shard_dims = [idx for idx, e in enumerate(local_offsets) if e != 0]
|
| 214 |
+
# If the offset is [0, 0, ..., 0] (all zeros),
|
| 215 |
+
# we cannot decide whether how the tensor is sharded.
|
| 216 |
+
if len(shard_dims) == 0:
|
| 217 |
+
continue
|
| 218 |
+
# If the offset is [0, N, .,0, M, 0, .., 0],
|
| 219 |
+
# we are sure it's sharded by more than one dimension.
|
| 220 |
+
if len(shard_dims) != 1:
|
| 221 |
+
chunk_sharding_dim = None
|
| 222 |
+
break
|
| 223 |
+
# If the offset is [0, 0, .,0, M, 0, .., 0], aka, it's sharded by just
|
| 224 |
+
# one dimension, we need to make sure all ranks share the same dimension.
|
| 225 |
+
if not chunk_sharding_dim:
|
| 226 |
+
chunk_sharding_dim = shard_dims[0]
|
| 227 |
+
elif chunk_sharding_dim != shard_dims[0]:
|
| 228 |
+
chunk_sharding_dim = None
|
| 229 |
+
break
|
| 230 |
+
|
| 231 |
+
if chunk_sharding_dim is not None:
|
| 232 |
+
# Ensure we infer the correct placement order from offsets
|
| 233 |
+
placements = [
|
| 234 |
+
x
|
| 235 |
+
for _, x in sorted(
|
| 236 |
+
zip(chunk_offset_list, placements), key=operator.itemgetter(0)
|
| 237 |
+
)
|
| 238 |
+
]
|
| 239 |
+
|
| 240 |
+
from .chunk_sharding_spec import ChunkShardingSpec
|
| 241 |
+
|
| 242 |
+
chunk_spec = ChunkShardingSpec(
|
| 243 |
+
dim=chunk_sharding_dim,
|
| 244 |
+
placements=placements,
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
shard_sizes = sorted([x[chunk_sharding_dim] for x in shard_size_list])
|
| 248 |
+
shard_total_length = sum(shard_sizes)
|
| 249 |
+
shard_offsets = sorted([x[chunk_sharding_dim] for x in shard_offset_list])
|
| 250 |
+
|
| 251 |
+
chunks = len(placements)
|
| 252 |
+
split_size = get_split_size(shard_total_length, chunks)
|
| 253 |
+
chunk_shard_sizes = sorted(
|
| 254 |
+
[
|
| 255 |
+
get_chunked_dim_size(shard_total_length, split_size, idx)
|
| 256 |
+
for idx in range(chunks)
|
| 257 |
+
]
|
| 258 |
+
)
|
| 259 |
+
# Should match ChunkShardingSpec offsets calculation
|
| 260 |
+
chunk_shard_offsets = [split_size * idx for idx in range(chunks)]
|
| 261 |
+
if shard_sizes == chunk_shard_sizes and shard_offsets == chunk_shard_offsets:
|
| 262 |
+
return chunk_spec
|
| 263 |
+
return EnumerableShardingSpec(shards_metadata)
|
janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
+
from typing import cast, List, Optional, TYPE_CHECKING, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.distributed as dist
|
| 7 |
+
import torch.distributed._shard.sharded_tensor.metadata as sharded_tensor_meta
|
| 8 |
+
import torch.distributed.distributed_c10d as distributed_c10d
|
| 9 |
+
from torch.distributed._shard._utils import narrow_tensor
|
| 10 |
+
from torch.distributed._shard.metadata import ShardMetadata
|
| 11 |
+
from torch.distributed._shard.sharded_tensor.shard import Shard
|
| 12 |
+
from torch.distributed._shard.sharded_tensor.utils import (
|
| 13 |
+
_parse_and_validate_remote_device,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
from ._internals import get_chunked_dim_size, get_split_size
|
| 17 |
+
from .api import ShardingSpec
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
# Only include ShardedTensor when do type checking, exclude it
|
| 22 |
+
# from run-time to resolve circular dependency.
|
| 23 |
+
from torch.distributed._shard.sharded_tensor import ShardedTensor
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@dataclass
|
| 27 |
+
class ChunkShardingSpec(ShardingSpec):
|
| 28 |
+
"""
|
| 29 |
+
This is a type of PlacementSpec that defines the placement as being sharded
|
| 30 |
+
across multiple devices. In particular, it represents sharding a Tensor
|
| 31 |
+
along a single dimension into equal chunks (similar to :meth:`torch.chunk`).
|
| 32 |
+
|
| 33 |
+
The semantics of how a tensor is partitioned is inline with
|
| 34 |
+
:meth:`torch.chunk`, where ``dim`` in torch.chunk corresponds to the
|
| 35 |
+
specified ``dim`` and ``chunks`` in torch.chunk is the number of elements
|
| 36 |
+
in the placement specified.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
dim (int or str):
|
| 40 |
+
The dimension to shard on, could be an integer representing the
|
| 41 |
+
dimension or a string in case of named tensors where dimensions are
|
| 42 |
+
named. Note that named tensor support is not added yet.
|
| 43 |
+
placement(List[Union[_remote_device, str]]):
|
| 44 |
+
Specifies the placement of each shard of the Tensor. The size of
|
| 45 |
+
the list represents the number of shards to be created. This could
|
| 46 |
+
be a list of
|
| 47 |
+
:class:`torch.distributed._remote_device`'s. This list
|
| 48 |
+
could also contain a string which represents remote
|
| 49 |
+
device as accepted by
|
| 50 |
+
:class:`torch.distributed._remote_device`
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
ShardingDim = Union[int, str]
|
| 54 |
+
|
| 55 |
+
dim: ShardingDim
|
| 56 |
+
placements: List[Union[torch.distributed._remote_device, str]]
|
| 57 |
+
|
| 58 |
+
def __post_init__(self):
|
| 59 |
+
self._verify_dim(self.dim)
|
| 60 |
+
for i, remote_device in enumerate(self.placements):
|
| 61 |
+
if not isinstance(remote_device, torch.distributed._remote_device):
|
| 62 |
+
self.placements[i] = torch.distributed._remote_device(remote_device)
|
| 63 |
+
|
| 64 |
+
@staticmethod
|
| 65 |
+
def _verify_dim(dim):
|
| 66 |
+
# Validate the sharding spec.
|
| 67 |
+
# TODO: support named dimension
|
| 68 |
+
if isinstance(dim, str):
|
| 69 |
+
raise NotImplementedError(
|
| 70 |
+
"ChunkShardingSpec does not support named dimension yet!"
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
if not isinstance(dim, int):
|
| 74 |
+
raise ValueError(f"Sharding dim needs to be an integer, found: {dim}")
|
| 75 |
+
|
| 76 |
+
def build_metadata(
|
| 77 |
+
self,
|
| 78 |
+
tensor_sizes: torch.Size,
|
| 79 |
+
tensor_properties: sharded_tensor_meta.TensorProperties,
|
| 80 |
+
) -> sharded_tensor_meta.ShardedTensorMetadata:
|
| 81 |
+
tensor_num_dim = len(tensor_sizes)
|
| 82 |
+
|
| 83 |
+
self._verify_dim(self.dim)
|
| 84 |
+
if self.dim >= tensor_num_dim or self.dim < -tensor_num_dim: # type: ignore[operator]
|
| 85 |
+
raise ValueError(f"Invalid sharding dim: {self.dim}")
|
| 86 |
+
|
| 87 |
+
shards_metadata = []
|
| 88 |
+
sharding_dim_size = tensor_sizes[self.dim] # type: ignore[index]
|
| 89 |
+
chunks = len(self.placements)
|
| 90 |
+
split_size = get_split_size(sharding_dim_size, chunks)
|
| 91 |
+
for idx, placement in enumerate(self.placements):
|
| 92 |
+
# generate ShardMetadata for each placement device
|
| 93 |
+
chunked_dim_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)
|
| 94 |
+
shard_size = list(tensor_sizes)
|
| 95 |
+
current_offsets = [0] * tensor_num_dim
|
| 96 |
+
current_offsets[self.dim] = split_size * idx # type: ignore[index]
|
| 97 |
+
shard_size[self.dim] = chunked_dim_size # type: ignore[index]
|
| 98 |
+
|
| 99 |
+
shard_metadata = ShardMetadata(
|
| 100 |
+
shard_offsets=current_offsets,
|
| 101 |
+
shard_sizes=shard_size,
|
| 102 |
+
placement=placement,
|
| 103 |
+
)
|
| 104 |
+
shards_metadata.append(shard_metadata)
|
| 105 |
+
|
| 106 |
+
return sharded_tensor_meta.ShardedTensorMetadata(
|
| 107 |
+
shards_metadata, tensor_sizes, tensor_properties
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
def shard(
|
| 111 |
+
self, tensor: torch.Tensor, src_rank: int = 0, process_group=None
|
| 112 |
+
) -> "ShardedTensor":
|
| 113 |
+
"""
|
| 114 |
+
Args:
|
| 115 |
+
src_rank: group rank relative to ``process_group``
|
| 116 |
+
|
| 117 |
+
N.B. If ``process_group`` is None, ``src_rank`` is a global rank.
|
| 118 |
+
"""
|
| 119 |
+
# relative imports to avoid circular dependency
|
| 120 |
+
from torch.distributed._shard.sharded_tensor import ShardedTensor
|
| 121 |
+
|
| 122 |
+
tensor_properties = sharded_tensor_meta.TensorProperties(
|
| 123 |
+
dtype=tensor.dtype,
|
| 124 |
+
layout=tensor.layout,
|
| 125 |
+
requires_grad=tensor.requires_grad,
|
| 126 |
+
memory_format=torch.contiguous_format,
|
| 127 |
+
pin_memory=tensor.is_pinned(),
|
| 128 |
+
)
|
| 129 |
+
current_rank = dist.get_rank(process_group)
|
| 130 |
+
current_global_rank = dist.get_rank()
|
| 131 |
+
tensor_meta = self.build_metadata(tensor.size(), tensor_properties)
|
| 132 |
+
local_shards = []
|
| 133 |
+
local_tensor = None
|
| 134 |
+
local_metadata = None
|
| 135 |
+
tensors_to_scatter = cast(
|
| 136 |
+
List[Optional[torch.Tensor]],
|
| 137 |
+
[None] * dist.get_world_size(process_group),
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
sharding_dim_size = tensor.size()[self.dim] # type: ignore[index]
|
| 141 |
+
chunks = len(self.placements)
|
| 142 |
+
split_size = get_split_size(sharding_dim_size, chunks)
|
| 143 |
+
scatter_shape = list(tensor.size())
|
| 144 |
+
scatter_shape[self.dim] = split_size # type: ignore[index]
|
| 145 |
+
|
| 146 |
+
for shard_meta in tensor_meta.shards_metadata:
|
| 147 |
+
remote_global_rank, device = _parse_and_validate_remote_device(
|
| 148 |
+
process_group, shard_meta.placement
|
| 149 |
+
)
|
| 150 |
+
if current_rank == src_rank:
|
| 151 |
+
# Reshape to get shard for this rank and we don't want autograd
|
| 152 |
+
# recording here for the narrow op and 'local_shard' should be a
|
| 153 |
+
# leaf variable in the autograd graph.
|
| 154 |
+
narrowed_tensor = narrow_tensor(tensor, shard_meta)
|
| 155 |
+
if shard_meta.shard_sizes[self.dim] < split_size: # type: ignore[index]
|
| 156 |
+
# for the last shard that might be smaller to other shards
|
| 157 |
+
# resize the narrowed tensor to the same size and use it for
|
| 158 |
+
# the scatter collective as dist.scatter requires same size
|
| 159 |
+
# inputs on every rank
|
| 160 |
+
tensor_to_scatter = (
|
| 161 |
+
narrowed_tensor.detach().clone().resize_(scatter_shape)
|
| 162 |
+
)
|
| 163 |
+
else:
|
| 164 |
+
tensor_to_scatter = narrowed_tensor.detach().clone().contiguous()
|
| 165 |
+
|
| 166 |
+
tensors_to_scatter[
|
| 167 |
+
dist.get_group_rank(process_group, remote_global_rank)
|
| 168 |
+
] = tensor_to_scatter
|
| 169 |
+
|
| 170 |
+
if current_global_rank == remote_global_rank:
|
| 171 |
+
local_tensor = torch.empty(
|
| 172 |
+
scatter_shape,
|
| 173 |
+
dtype=tensor.dtype,
|
| 174 |
+
layout=tensor.layout,
|
| 175 |
+
device=device,
|
| 176 |
+
)
|
| 177 |
+
local_metadata = shard_meta
|
| 178 |
+
|
| 179 |
+
# each rank should have local_tensor and local_metadata initialized if we build
|
| 180 |
+
# the metadata list in a correct way.
|
| 181 |
+
assert local_tensor is not None
|
| 182 |
+
assert local_metadata is not None
|
| 183 |
+
|
| 184 |
+
# Scatter the shards to all ranks in the pg
|
| 185 |
+
# scatter takes the global rank as ``src``
|
| 186 |
+
src_for_scatter = src_rank
|
| 187 |
+
if (
|
| 188 |
+
process_group is not None
|
| 189 |
+
and process_group is not distributed_c10d._get_default_group()
|
| 190 |
+
):
|
| 191 |
+
src_for_scatter = distributed_c10d.get_global_rank(
|
| 192 |
+
process_group, src_for_scatter
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
dist.scatter(
|
| 196 |
+
local_tensor,
|
| 197 |
+
scatter_list=tensors_to_scatter if current_rank == src_rank else None,
|
| 198 |
+
src=src_for_scatter,
|
| 199 |
+
group=process_group,
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
if list(local_tensor.size()) != local_metadata.shard_sizes:
|
| 203 |
+
# detach again after receiving to ensure local shards remain a leaf node
|
| 204 |
+
local_tensor = local_tensor.resize_(local_metadata.shard_sizes).detach()
|
| 205 |
+
|
| 206 |
+
# Sync requires_grad to local_shard.
|
| 207 |
+
local_tensor.requires_grad = tensor.requires_grad
|
| 208 |
+
|
| 209 |
+
local_shards.append(Shard(tensor=local_tensor, metadata=local_metadata))
|
| 210 |
+
|
| 211 |
+
st = ShardedTensor._init_from_local_shards_and_global_metadata(
|
| 212 |
+
local_shards, tensor_meta, process_group=process_group
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
# Manually set sharding_spec
|
| 216 |
+
st._sharding_spec = self
|
| 217 |
+
|
| 218 |
+
return st
|
janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__init__.py
ADDED
|
File without changes
|
janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (213 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py
ADDED
|
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.distributed as dist
|
| 5 |
+
from torch.distributed._shard.sharded_tensor import ShardedTensor
|
| 6 |
+
from torch.distributed._shard.sharded_tensor._ops._common import _sharded_op_common
|
| 7 |
+
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
|
| 8 |
+
from torch.distributed._shard.sharding_spec._internals import (
|
| 9 |
+
get_chunk_sharding_params,
|
| 10 |
+
get_chunked_dim_size,
|
| 11 |
+
get_split_size,
|
| 12 |
+
)
|
| 13 |
+
from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op
|
| 14 |
+
from torch.distributed.nn.functional import (
|
| 15 |
+
_all_gather_base,
|
| 16 |
+
all_reduce,
|
| 17 |
+
all_to_all_single,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def _chunk_sharding_spec_check(spec, op):
|
| 22 |
+
"""
|
| 23 |
+
For the given op implementation check if the sharding spec is ChunkShardingSpec.
|
| 24 |
+
"""
|
| 25 |
+
if not isinstance(spec, ChunkShardingSpec):
|
| 26 |
+
raise NotImplementedError(
|
| 27 |
+
f"Only ChunkShardingSpec supported for '{op.__name__}'."
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _register_sharded_op_on_local_tensor(
|
| 32 |
+
op, early_stop_func=None, extra_check=None, customized_func=None
|
| 33 |
+
):
|
| 34 |
+
"""
|
| 35 |
+
Handles ``__torch_function__`` dispatch for ops which are performed on
|
| 36 |
+
the single local tensor of the sharded tensor such as op like
|
| 37 |
+
``torch.nn.functional.softmax`` or ``torch.Tensor.view``.
|
| 38 |
+
|
| 39 |
+
For more complicated ops, a customized func can be used to generate
|
| 40 |
+
the new local tensor, sharding spec and sharded tensor size.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
op: The op to be registered and applied to all shards of the st.
|
| 44 |
+
early_stop_func (Callable, optional): the func for early stop.
|
| 45 |
+
Default: if ``None``, no early stop.
|
| 46 |
+
extra_check (Callable, optional): the func for extra condition check.
|
| 47 |
+
Default: if ``None``, no extra check.
|
| 48 |
+
customized_func (Callable, optional): the func for customized logic
|
| 49 |
+
to generate the new local tensor, sharding spec and sharded tensor size.
|
| 50 |
+
Default: if ``None``, we simply lower to the real op call with
|
| 51 |
+
the single local tensor of the st.
|
| 52 |
+
|
| 53 |
+
Return:
|
| 54 |
+
func (Callable): registered implementation for sharded op for
|
| 55 |
+
``__torch_function__`` dispatch.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
@custom_sharding_spec_op(ChunkShardingSpec, op)
|
| 59 |
+
@_sharded_op_common(op, early_stop_func, extra_check)
|
| 60 |
+
def sharded_tensor_op_on_local_tensor(types, args=(), kwargs=None, pg=None):
|
| 61 |
+
st = args[0]
|
| 62 |
+
sharding_spec = st.sharding_spec()
|
| 63 |
+
if len(st.local_shards()) != 1:
|
| 64 |
+
raise TypeError(
|
| 65 |
+
f"torch function '{op.__name__}', with args: {args} and "
|
| 66 |
+
f"kwargs: {kwargs} only supported for single local tensor!"
|
| 67 |
+
)
|
| 68 |
+
st_size = st.size()
|
| 69 |
+
if customized_func:
|
| 70 |
+
local_tensor, sharding_spec, st_size = customized_func(args, kwargs, pg)
|
| 71 |
+
else:
|
| 72 |
+
args = (st.local_tensor(), *args[1:])
|
| 73 |
+
local_tensor = op(*args, **kwargs)
|
| 74 |
+
return ShardedTensor._init_from_local_tensor(
|
| 75 |
+
local_tensor.contiguous(),
|
| 76 |
+
sharding_spec,
|
| 77 |
+
st_size, # type: ignore[arg-type]
|
| 78 |
+
process_group=pg,
|
| 79 |
+
init_rrefs=st._init_rrefs,
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def _handle_col_wise_sharding_base(
|
| 84 |
+
op_func,
|
| 85 |
+
col_dim,
|
| 86 |
+
input,
|
| 87 |
+
world_size,
|
| 88 |
+
weight,
|
| 89 |
+
local_shard,
|
| 90 |
+
pg,
|
| 91 |
+
gathered_inputs,
|
| 92 |
+
mode=None,
|
| 93 |
+
gathered_per_sample_weights=None,
|
| 94 |
+
gathered_offsets=None,
|
| 95 |
+
padding_idx=None,
|
| 96 |
+
):
|
| 97 |
+
"""
|
| 98 |
+
For col-wise sharding of weight, lots of logic are common.
|
| 99 |
+
So we extract the common logic and put in this function:
|
| 100 |
+
Step 1. To get input from each rank and
|
| 101 |
+
Step 2. To perform the op on the concatenated tensor.
|
| 102 |
+
Step 3. To distribute results to each rank with col rearrangement.
|
| 103 |
+
Step 4. To concatenate all results from all ranks.
|
| 104 |
+
|
| 105 |
+
Args:
|
| 106 |
+
op_func: operator which is applied to the input tensor.
|
| 107 |
+
col_dim: dim of result tensor after the operation.
|
| 108 |
+
input: tensor to be applied op on.
|
| 109 |
+
world_size: number of ranks.
|
| 110 |
+
weight: sharded weight tensor.
|
| 111 |
+
local_shard: col-wise sharded weight tensor.
|
| 112 |
+
pg: process group.
|
| 113 |
+
gathered_inputs: list of inputs from all ranks. If specified, we
|
| 114 |
+
don't need to communicate with each rank any more.
|
| 115 |
+
mode: aggregation mode of EmbeddingBag.
|
| 116 |
+
gathered_per_sample_weights: per_sample_weights across all ranks.
|
| 117 |
+
gathered_offsets: offsets across all ranks.
|
| 118 |
+
padding_idx: If specified, the entries at padding_idx do
|
| 119 |
+
not contribute to the gradient; therefore, the embedding
|
| 120 |
+
vector at padding_idx is not updated during training,
|
| 121 |
+
i.e. it remains as a fixed "pad".
|
| 122 |
+
Note that the embedding vector at padding_idx is
|
| 123 |
+
excluded from the reduction.
|
| 124 |
+
|
| 125 |
+
Return: final result of input being applied with the op.
|
| 126 |
+
"""
|
| 127 |
+
# run the operator's function for all the inputs.
|
| 128 |
+
results = []
|
| 129 |
+
for i, inp in enumerate(gathered_inputs):
|
| 130 |
+
if op_func == torch.nn.functional.embedding_bag:
|
| 131 |
+
result = op_func(
|
| 132 |
+
inp,
|
| 133 |
+
local_shard,
|
| 134 |
+
offsets=gathered_offsets[i] if gathered_offsets is not None else None,
|
| 135 |
+
mode=mode,
|
| 136 |
+
per_sample_weights=gathered_per_sample_weights[i]
|
| 137 |
+
if gathered_per_sample_weights is not None
|
| 138 |
+
else None,
|
| 139 |
+
padding_idx=padding_idx,
|
| 140 |
+
)
|
| 141 |
+
elif op_func == torch.nn.functional.embedding:
|
| 142 |
+
result = op_func(
|
| 143 |
+
inp,
|
| 144 |
+
local_shard,
|
| 145 |
+
padding_idx=padding_idx,
|
| 146 |
+
)
|
| 147 |
+
else:
|
| 148 |
+
result = op_func(inp, local_shard)
|
| 149 |
+
results.append(torch.transpose(result, 0, col_dim))
|
| 150 |
+
|
| 151 |
+
# Distribute results to each rank with col rearrangement.
|
| 152 |
+
output = _result_distribute_with_col_rearrange(
|
| 153 |
+
results, input, world_size, weight, pg
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
# transpose the output and return result.
|
| 157 |
+
return torch.transpose(output, 0, col_dim)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def _result_distribute_with_col_rearrange(results, input, world_size, weight, pg):
|
| 161 |
+
"""
|
| 162 |
+
For col-wise sharding of weight, we need to distribute
|
| 163 |
+
results to each rank. We do them in this function.
|
| 164 |
+
Note that, if the index in the Sharding Spec is not equal to
|
| 165 |
+
the rank number, we need to do the rearrangement based on the
|
| 166 |
+
order given by the Sharding Spec (placement).
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
results: results from ops applied to inputs from all ranks.
|
| 170 |
+
We need to distribute them back to their original ranks.
|
| 171 |
+
input: tensor to be applied op to.
|
| 172 |
+
world_size: number of ranks.
|
| 173 |
+
weight: sharded weight tensor.
|
| 174 |
+
pg: process group.
|
| 175 |
+
|
| 176 |
+
Return: column rearranged result.
|
| 177 |
+
"""
|
| 178 |
+
# Process results and outputs for all2all.
|
| 179 |
+
sharding_dim = weight._sharding_spec.dim
|
| 180 |
+
sharding_dim_size = weight.size(sharding_dim)
|
| 181 |
+
dims = list(results[0].size())
|
| 182 |
+
dims[0] = sharding_dim_size
|
| 183 |
+
combined_results = torch.cat(results)
|
| 184 |
+
output = torch.empty(
|
| 185 |
+
*dims, device=combined_results.device, dtype=combined_results.dtype
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
# Compute output splits
|
| 189 |
+
split_size = get_split_size(sharding_dim_size, world_size)
|
| 190 |
+
output_split_sizes = [0] * world_size
|
| 191 |
+
for idx, placement in enumerate(weight._sharding_spec.placements):
|
| 192 |
+
output_split_sizes[placement.rank()] = get_chunked_dim_size(
|
| 193 |
+
sharding_dim_size, split_size, idx
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
# distribute the outputs using all2all.
|
| 197 |
+
output = all_to_all_single(
|
| 198 |
+
output, combined_results, output_split_sizes=output_split_sizes, group=pg
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
# Check if we need to rearrange columns appropriately for output.
|
| 202 |
+
rearrange_columns = any(
|
| 203 |
+
idx != placement.rank()
|
| 204 |
+
for idx, placement in enumerate(weight._sharding_spec.placements)
|
| 205 |
+
)
|
| 206 |
+
if not rearrange_columns:
|
| 207 |
+
return output
|
| 208 |
+
|
| 209 |
+
indices = []
|
| 210 |
+
for placement in weight._sharding_spec.placements:
|
| 211 |
+
dim_size = output_split_sizes[placement.rank()]
|
| 212 |
+
start = sum(
|
| 213 |
+
split_size if i < placement.rank() else 0
|
| 214 |
+
for i, split_size in enumerate(output_split_sizes)
|
| 215 |
+
)
|
| 216 |
+
indices += list(range(start, start + dim_size))
|
| 217 |
+
|
| 218 |
+
return output.index_select(0, torch.tensor(indices, device=output.device))
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def _handle_max_norm_col_wise(
|
| 222 |
+
max_norm,
|
| 223 |
+
norm_type,
|
| 224 |
+
local_shard,
|
| 225 |
+
input,
|
| 226 |
+
world_size,
|
| 227 |
+
gathered_inputs,
|
| 228 |
+
pg,
|
| 229 |
+
):
|
| 230 |
+
"""
|
| 231 |
+
For col-wise sharding of weight, we need to aggregate the
|
| 232 |
+
norm across all ranks before we can perform the proper re-norm.
|
| 233 |
+
Note that, the max_norm logic is only applied to the embedding
|
| 234 |
+
indices that are looked up and not the whole shard.
|
| 235 |
+
|
| 236 |
+
Args:
|
| 237 |
+
max_norm: If given, each embedding vector with norm larger
|
| 238 |
+
than max_norm is renormalized to have norm max_norm.
|
| 239 |
+
Note: this will modify weight in-place.
|
| 240 |
+
norm_type: The p in the p-norm to compute for the max_norm option.
|
| 241 |
+
local_shard: col-wise shared local weight used for lookup.
|
| 242 |
+
input: tensor to be applied op to.
|
| 243 |
+
world_size: number of ranks.
|
| 244 |
+
gathered_inputs: list of inputs from all ranks.
|
| 245 |
+
pg: process group.
|
| 246 |
+
|
| 247 |
+
Return:
|
| 248 |
+
local_shard_norm_renormed: local_shard re-normed to max_norm if the norm is larger
|
| 249 |
+
than it.
|
| 250 |
+
|
| 251 |
+
"""
|
| 252 |
+
norm_type = norm_type if norm_type is not None else 2.0
|
| 253 |
+
unique_inp = torch.unique(torch.cat(gathered_inputs))
|
| 254 |
+
local_shard_sum = torch.sum(
|
| 255 |
+
torch.pow(torch.abs(local_shard), norm_type), dim=1, dtype=local_shard.dtype
|
| 256 |
+
)
|
| 257 |
+
# For col-wise sharding, we need to first aggregate the powered sum
|
| 258 |
+
# from each rank first and then calculate the norm.
|
| 259 |
+
local_shard_sum = all_reduce(local_shard_sum, group=pg)
|
| 260 |
+
local_shard_norm = torch.pow(local_shard_sum, 1.0 / norm_type)
|
| 261 |
+
max_norm_tensor = torch.full(
|
| 262 |
+
(local_shard.size(0),),
|
| 263 |
+
float("inf"),
|
| 264 |
+
dtype=local_shard.dtype,
|
| 265 |
+
device=input.device,
|
| 266 |
+
)
|
| 267 |
+
max_norm_tensor[unique_inp] = max_norm
|
| 268 |
+
local_shard_t = local_shard.t().contiguous()
|
| 269 |
+
normalized_tensor = torch.where(
|
| 270 |
+
local_shard_norm > max_norm_tensor, max_norm_tensor, local_shard_norm
|
| 271 |
+
)
|
| 272 |
+
# Make sure divisor is not zero.
|
| 273 |
+
local_shard_norm[local_shard_norm == 0.0] = 1.0
|
| 274 |
+
local_shard_norm_renormed = (
|
| 275 |
+
torch.div(torch.mul(local_shard_t, normalized_tensor), local_shard_norm)
|
| 276 |
+
.t()
|
| 277 |
+
.contiguous()
|
| 278 |
+
)
|
| 279 |
+
return local_shard_norm_renormed
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
def _all_gather_base_input(input, pg):
|
| 283 |
+
"""
|
| 284 |
+
Use _all_gather_base to get a concatenated input from each rank.
|
| 285 |
+
|
| 286 |
+
Args:
|
| 287 |
+
input: tensor to be applied op on.
|
| 288 |
+
pg: process group.
|
| 289 |
+
|
| 290 |
+
Returns:
|
| 291 |
+
gathered_inputs: input gathered from each rank and concat by dim 0.
|
| 292 |
+
"""
|
| 293 |
+
# allgather the inputs first.
|
| 294 |
+
gather_inp_size = list(input.size())
|
| 295 |
+
gather_inp_size[0] = input.size(0) * dist.get_world_size(pg)
|
| 296 |
+
gather_inp = torch.empty(gather_inp_size, device=input.device, dtype=input.dtype)
|
| 297 |
+
return _all_gather_base(gather_inp, input, group=pg)
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
def _handle_row_wise_mask(gather_inp, padding_idx, weight, world_size, rank):
|
| 301 |
+
"""
|
| 302 |
+
Mask the input for embedding look-up for IDs which are not stored
|
| 303 |
+
on the current rank. This function also adjust the ``padding_idx``
|
| 304 |
+
so that it is only used on the rank where the corresponding row is
|
| 305 |
+
stored.
|
| 306 |
+
|
| 307 |
+
Note that, with ``max_norm`` flag on, only weights of rows being
|
| 308 |
+
looked up will be re-normed. So we need an extra row for masked ID
|
| 309 |
+
so that it does not affect the final result and ``max_norm``.
|
| 310 |
+
|
| 311 |
+
Args:
|
| 312 |
+
gather_inp: tensor to be applied op on gathered from all ranks.
|
| 313 |
+
padding_idx: If specified, the entries at padding_idx do
|
| 314 |
+
not contribute to the gradient; therefore, the embedding
|
| 315 |
+
vector at padding_idx is not updated during training,
|
| 316 |
+
i.e. it remains as a fixed "pad".
|
| 317 |
+
Note that the embedding vector at padding_idx is
|
| 318 |
+
excluded from the reduction.
|
| 319 |
+
weight: weight tensor of Embedding look-up table.
|
| 320 |
+
world_size: number of ranks.
|
| 321 |
+
rank: # of cuda process.
|
| 322 |
+
|
| 323 |
+
Returns:
|
| 324 |
+
lookup_input: Tensor of masked input.
|
| 325 |
+
padding_idx: adjusted padding_idx.
|
| 326 |
+
padding_row: The extra row we used during lookup so that
|
| 327 |
+
looking up does not affect ``max_norm``.
|
| 328 |
+
"""
|
| 329 |
+
(start_pos, chunk_size) = get_chunk_sharding_params(
|
| 330 |
+
weight.size(0), world_size, weight._sharding_spec, rank
|
| 331 |
+
)
|
| 332 |
+
mask = (gather_inp < start_pos) | (gather_inp >= start_pos + chunk_size)
|
| 333 |
+
lookup_input = gather_inp.clone() - start_pos
|
| 334 |
+
lookup_input[mask] = chunk_size
|
| 335 |
+
if (
|
| 336 |
+
padding_idx is not None
|
| 337 |
+
and padding_idx >= start_pos
|
| 338 |
+
and padding_idx < (start_pos + chunk_size)
|
| 339 |
+
):
|
| 340 |
+
padding_idx = padding_idx - start_pos
|
| 341 |
+
else:
|
| 342 |
+
padding_idx = None
|
| 343 |
+
|
| 344 |
+
# When max_norm is set, it will only re-norm the row being looked up.
|
| 345 |
+
padding_row = torch.zeros(
|
| 346 |
+
1, weight.size(1), device=gather_inp.device, dtype=weight.dtype
|
| 347 |
+
)
|
| 348 |
+
return lookup_input, padding_idx, padding_row
|
janus/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_nested_dict.cpython-310.pyc
ADDED
|
Binary file (1.89 kB). View file
|
|
|